summaryrefslogtreecommitdiff
path: root/deps/v8
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2016-12-23 16:30:57 +0100
committerMichaël Zasso <targos@protonmail.com>2017-01-26 22:46:17 +0100
commit2739185b790e040c3b044c577327f5d44bffad4a (patch)
tree29a466999212f4c85958379d9d400eec8a185ba5 /deps/v8
parenta67a04d7654faaa04c8da00e42981ebc9fd0911c (diff)
downloadandroid-node-v8-2739185b790e040c3b044c577327f5d44bffad4a.tar.gz
android-node-v8-2739185b790e040c3b044c577327f5d44bffad4a.tar.bz2
android-node-v8-2739185b790e040c3b044c577327f5d44bffad4a.zip
deps: update V8 to 5.5.372.40
PR-URL: https://github.com/nodejs/node/pull/9618 Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com> Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Diffstat (limited to 'deps/v8')
-rw-r--r--deps/v8/.gitignore7
-rw-r--r--deps/v8/.gn5
-rw-r--r--deps/v8/AUTHORS4
-rw-r--r--deps/v8/BUILD.gn468
-rw-r--r--deps/v8/ChangeLog1984
-rw-r--r--deps/v8/DEPS84
-rw-r--r--deps/v8/OWNERS1
-rw-r--r--deps/v8/PRESUBMIT.py33
-rw-r--r--deps/v8/base/trace_event/common/trace_event_common.h97
-rw-r--r--deps/v8/build_overrides/build.gni8
-rw-r--r--deps/v8/build_overrides/v8.gni16
-rw-r--r--deps/v8/gypfiles/config/win/msvs_dependencies.isolate77
-rwxr-xr-xdeps/v8/gypfiles/get_landmines.py2
-rwxr-xr-xdeps/v8/gypfiles/gyp_v814
-rw-r--r--deps/v8/gypfiles/standalone.gypi6
-rw-r--r--deps/v8/include/DEPS4
-rw-r--r--deps/v8/include/OWNERS5
-rw-r--r--deps/v8/include/libplatform/v8-tracing.h62
-rw-r--r--deps/v8/include/v8-inspector-protocol.h13
-rw-r--r--deps/v8/include/v8-inspector.h267
-rw-r--r--deps/v8/include/v8-platform.h66
-rw-r--r--deps/v8/include/v8-profiler.h28
-rw-r--r--deps/v8/include/v8-util.h17
-rw-r--r--deps/v8/include/v8-version.h6
-rw-r--r--deps/v8/include/v8.h983
-rw-r--r--deps/v8/infra/mb/mb_config.pyl254
-rw-r--r--deps/v8/src/address-map.cc2
-rw-r--r--deps/v8/src/address-map.h4
-rw-r--r--deps/v8/src/allocation.h6
-rw-r--r--deps/v8/src/api-arguments-inl.h41
-rw-r--r--deps/v8/src/api-arguments.cc4
-rw-r--r--deps/v8/src/api-arguments.h7
-rw-r--r--deps/v8/src/api-natives.cc92
-rw-r--r--deps/v8/src/api.cc603
-rw-r--r--deps/v8/src/api.h8
-rw-r--r--deps/v8/src/arguments.h7
-rw-r--r--deps/v8/src/arm/OWNERS1
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h2
-rw-r--r--deps/v8/src/arm/assembler-arm.cc576
-rw-r--r--deps/v8/src/arm/assembler-arm.h27
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc375
-rw-r--r--deps/v8/src/arm/codegen-arm.cc2
-rw-r--r--deps/v8/src/arm/constants-arm.h24
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc25
-rw-r--r--deps/v8/src/arm/disasm-arm.cc86
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc26
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc185
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h52
-rw-r--r--deps/v8/src/arm/simulator-arm.cc170
-rw-r--r--deps/v8/src/arm/simulator-arm.h19
-rw-r--r--deps/v8/src/arm64/OWNERS1
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc405
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc26
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc9
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h12
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc3
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h5
-rw-r--r--deps/v8/src/asmjs/asm-js.cc54
-rw-r--r--deps/v8/src/asmjs/asm-js.h19
-rw-r--r--deps/v8/src/asmjs/asm-typer.cc69
-rw-r--r--deps/v8/src/asmjs/asm-typer.h26
-rw-r--r--deps/v8/src/asmjs/asm-types.h4
-rw-r--r--deps/v8/src/asmjs/asm-wasm-builder.cc448
-rw-r--r--deps/v8/src/asmjs/asm-wasm-builder.h7
-rw-r--r--deps/v8/src/assembler.cc49
-rw-r--r--deps/v8/src/assembler.h19
-rw-r--r--deps/v8/src/assert-scope.h8
-rw-r--r--deps/v8/src/ast/OWNERS1
-rw-r--r--deps/v8/src/ast/ast-expression-rewriter.cc9
-rw-r--r--deps/v8/src/ast/ast-expression-rewriter.h4
-rw-r--r--deps/v8/src/ast/ast-literal-reindexer.cc9
-rw-r--r--deps/v8/src/ast/ast-literal-reindexer.h2
-rw-r--r--deps/v8/src/ast/ast-numbering.cc63
-rw-r--r--deps/v8/src/ast/ast-traversal-visitor.h4
-rw-r--r--deps/v8/src/ast/ast-type-bounds.h14
-rw-r--r--deps/v8/src/ast/ast-types.cc (renamed from deps/v8/src/types.cc)465
-rw-r--r--deps/v8/src/ast/ast-types.h1024
-rw-r--r--deps/v8/src/ast/ast-value-factory.cc68
-rw-r--r--deps/v8/src/ast/ast-value-factory.h27
-rw-r--r--deps/v8/src/ast/ast.cc142
-rw-r--r--deps/v8/src/ast/ast.h653
-rw-r--r--deps/v8/src/ast/compile-time-value.cc56
-rw-r--r--deps/v8/src/ast/compile-time-value.h45
-rw-r--r--deps/v8/src/ast/context-slot-cache.cc7
-rw-r--r--deps/v8/src/ast/context-slot-cache.h1
-rw-r--r--deps/v8/src/ast/modules.cc248
-rw-r--r--deps/v8/src/ast/modules.h124
-rw-r--r--deps/v8/src/ast/prettyprinter.cc75
-rw-r--r--deps/v8/src/ast/prettyprinter.h3
-rw-r--r--deps/v8/src/ast/scopeinfo.cc666
-rw-r--r--deps/v8/src/ast/scopeinfo.h18
-rw-r--r--deps/v8/src/ast/scopes.cc1351
-rw-r--r--deps/v8/src/ast/scopes.h372
-rw-r--r--deps/v8/src/ast/variables.cc45
-rw-r--r--deps/v8/src/ast/variables.h124
-rw-r--r--deps/v8/src/background-parsing-task.cc16
-rw-r--r--deps/v8/src/background-parsing-task.h14
-rw-r--r--deps/v8/src/bailout-reason.h11
-rw-r--r--deps/v8/src/base.isolate8
-rw-r--r--deps/v8/src/base/accounting-allocator.cc44
-rw-r--r--deps/v8/src/base/accounting-allocator.h36
-rw-r--r--deps/v8/src/base/atomic-utils.h16
-rw-r--r--deps/v8/src/base/build_config.h17
-rw-r--r--deps/v8/src/base/hashmap-entry.h54
-rw-r--r--deps/v8/src/base/hashmap.h351
-rw-r--r--deps/v8/src/base/macros.h6
-rw-r--r--deps/v8/src/base/platform/platform-macos.cc5
-rw-r--r--deps/v8/src/basic-block-profiler.h8
-rw-r--r--deps/v8/src/bit-vector.h2
-rw-r--r--deps/v8/src/bootstrapper.cc194
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc226
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc275
-rw-r--r--deps/v8/src/builtins/builtins-array.cc322
-rw-r--r--deps/v8/src/builtins/builtins-callsite.cc150
-rw-r--r--deps/v8/src/builtins/builtins-conversion.cc174
-rw-r--r--deps/v8/src/builtins/builtins-dataview.cc204
-rw-r--r--deps/v8/src/builtins/builtins-date.cc135
-rw-r--r--deps/v8/src/builtins/builtins-global.cc109
-rw-r--r--deps/v8/src/builtins/builtins-handler.cc21
-rw-r--r--deps/v8/src/builtins/builtins-internal.cc19
-rw-r--r--deps/v8/src/builtins/builtins-interpreter.cc22
-rw-r--r--deps/v8/src/builtins/builtins-iterator.cc17
-rw-r--r--deps/v8/src/builtins/builtins-number.cc138
-rw-r--r--deps/v8/src/builtins/builtins-object.cc31
-rw-r--r--deps/v8/src/builtins/builtins-regexp.cc441
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer.cc8
-rw-r--r--deps/v8/src/builtins/builtins-string.cc1063
-rw-r--r--deps/v8/src/builtins/builtins-utils.h51
-rw-r--r--deps/v8/src/builtins/builtins.h158
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc380
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc260
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc258
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc221
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc218
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc271
-rw-r--r--deps/v8/src/builtins/x87/builtins-x87.cc381
-rw-r--r--deps/v8/src/checks.h5
-rw-r--r--deps/v8/src/code-events.h7
-rw-r--r--deps/v8/src/code-factory.cc64
-rw-r--r--deps/v8/src/code-factory.h6
-rw-r--r--deps/v8/src/code-stub-assembler.cc3089
-rw-r--r--deps/v8/src/code-stub-assembler.h433
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc534
-rw-r--r--deps/v8/src/code-stubs.cc1377
-rw-r--r--deps/v8/src/code-stubs.h505
-rw-r--r--deps/v8/src/codegen.cc6
-rw-r--r--deps/v8/src/collector.h2
-rw-r--r--deps/v8/src/compilation-dependencies.cc2
-rw-r--r--deps/v8/src/compilation-info.cc214
-rw-r--r--deps/v8/src/compilation-info.h400
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc161
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h32
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc2
-rw-r--r--deps/v8/src/compiler.cc653
-rw-r--r--deps/v8/src/compiler.h450
-rw-r--r--deps/v8/src/compiler/access-builder.cc169
-rw-r--r--deps/v8/src/compiler/access-builder.h21
-rw-r--r--deps/v8/src/compiler/access-info.cc118
-rw-r--r--deps/v8/src/compiler/access-info.h22
-rw-r--r--deps/v8/src/compiler/all-nodes.cc23
-rw-r--r--deps/v8/src/compiler/all-nodes.h14
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc279
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc28
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc38
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc129
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.cc256
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.h20
-rw-r--r--deps/v8/src/compiler/ast-loop-assignment-analyzer.cc6
-rw-r--r--deps/v8/src/compiler/ast-loop-assignment-analyzer.h2
-rw-r--r--deps/v8/src/compiler/basic-block-instrumentor.cc3
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc582
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h53
-rw-r--r--deps/v8/src/compiler/bytecode-loop-analysis.h2
-rw-r--r--deps/v8/src/compiler/c-linkage.cc2
-rw-r--r--deps/v8/src/compiler/code-assembler.cc85
-rw-r--r--deps/v8/src/compiler/code-assembler.h41
-rw-r--r--deps/v8/src/compiler/code-generator-impl.h6
-rw-r--r--deps/v8/src/compiler/code-generator.cc38
-rw-r--r--deps/v8/src/compiler/code-generator.h9
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc8
-rw-r--r--deps/v8/src/compiler/common-operator.cc2
-rw-r--r--deps/v8/src/compiler/common-operator.h4
-rw-r--r--deps/v8/src/compiler/control-equivalence.h2
-rw-r--r--deps/v8/src/compiler/control-flow-optimizer.h2
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc584
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h33
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc36
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc44
-rw-r--r--deps/v8/src/compiler/escape-analysis.h2
-rw-r--r--deps/v8/src/compiler/graph-reducer.h2
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc12
-rw-r--r--deps/v8/src/compiler/graph.h4
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc14
-rw-r--r--deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc8
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc6
-rw-r--r--deps/v8/src/compiler/instruction-codes.h1
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.cc30
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.h33
-rw-r--r--deps/v8/src/compiler/instruction-selector-impl.h6
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc223
-rw-r--r--deps/v8/src/compiler/instruction-selector.h44
-rw-r--r--deps/v8/src/compiler/instruction.cc108
-rw-r--r--deps/v8/src/compiler/instruction.h98
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc14
-rw-r--r--deps/v8/src/compiler/int64-lowering.h2
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc397
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.h14
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc53
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h12
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc101
-rw-r--r--deps/v8/src/compiler/js-create-lowering.h6
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc45
-rw-r--r--deps/v8/src/compiler/js-global-object-specialization.cc62
-rw-r--r--deps/v8/src/compiler/js-global-object-specialization.h4
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc302
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h21
-rw-r--r--deps/v8/src/compiler/js-inlining.cc216
-rw-r--r--deps/v8/src/compiler/js-inlining.h4
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc8
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc336
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h7
-rw-r--r--deps/v8/src/compiler/js-operator.cc79
-rw-r--r--deps/v8/src/compiler/js-operator.h71
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc727
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h18
-rw-r--r--deps/v8/src/compiler/linkage.cc83
-rw-r--r--deps/v8/src/compiler/linkage.h2
-rw-r--r--deps/v8/src/compiler/live-range-separator.cc16
-rw-r--r--deps/v8/src/compiler/live-range-separator.h3
-rw-r--r--deps/v8/src/compiler/liveness-analyzer.h2
-rw-r--r--deps/v8/src/compiler/load-elimination.cc344
-rw-r--r--deps/v8/src/compiler/load-elimination.h49
-rw-r--r--deps/v8/src/compiler/loop-analysis.cc4
-rw-r--r--deps/v8/src/compiler/loop-analysis.h2
-rw-r--r--deps/v8/src/compiler/loop-peeling.cc6
-rw-r--r--deps/v8/src/compiler/loop-variable-optimizer.cc13
-rw-r--r--deps/v8/src/compiler/loop-variable-optimizer.h4
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc667
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.h31
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc87
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.h2
-rw-r--r--deps/v8/src/compiler/machine-operator.cc645
-rw-r--r--deps/v8/src/compiler/machine-operator.h18
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc42
-rw-r--r--deps/v8/src/compiler/memory-optimizer.h2
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc62
-rw-r--r--deps/v8/src/compiler/mips/instruction-codes-mips.h12
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc130
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc61
-rw-r--r--deps/v8/src/compiler/mips64/instruction-codes-mips64.h12
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc134
-rw-r--r--deps/v8/src/compiler/move-optimizer.cc2
-rw-r--r--deps/v8/src/compiler/move-optimizer.h2
-rw-r--r--deps/v8/src/compiler/node-aux-data.h2
-rw-r--r--deps/v8/src/compiler/node-cache.cc4
-rw-r--r--deps/v8/src/compiler/node-matchers.h12
-rw-r--r--deps/v8/src/compiler/node-properties.h2
-rw-r--r--deps/v8/src/compiler/node.h4
-rw-r--r--deps/v8/src/compiler/opcodes.h226
-rw-r--r--deps/v8/src/compiler/operation-typer.cc14
-rw-r--r--deps/v8/src/compiler/operation-typer.h6
-rw-r--r--deps/v8/src/compiler/operator-properties.cc1
-rw-r--r--deps/v8/src/compiler/operator.h2
-rw-r--r--deps/v8/src/compiler/osr.cc18
-rw-r--r--deps/v8/src/compiler/osr.h2
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.cc3
-rw-r--r--deps/v8/src/compiler/pipeline.cc114
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc14
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc4
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc24
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h14
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.cc4
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.h2
-rw-r--r--deps/v8/src/compiler/register-allocator.cc27
-rw-r--r--deps/v8/src/compiler/register-allocator.h4
-rw-r--r--deps/v8/src/compiler/representation-change.cc264
-rw-r--r--deps/v8/src/compiler/representation-change.h23
-rw-r--r--deps/v8/src/compiler/s390/code-generator-s390.cc14
-rw-r--r--deps/v8/src/compiler/s390/instruction-selector-s390.cc7
-rw-r--r--deps/v8/src/compiler/schedule.h2
-rw-r--r--deps/v8/src/compiler/scheduler.cc4
-rw-r--r--deps/v8/src/compiler/scheduler.h2
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc792
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h9
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc24
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc106
-rw-r--r--deps/v8/src/compiler/simplified-operator.h23
-rw-r--r--deps/v8/src/compiler/state-values-utils.h2
-rw-r--r--deps/v8/src/compiler/store-store-elimination.cc20
-rw-r--r--deps/v8/src/compiler/store-store-elimination.h2
-rw-r--r--deps/v8/src/compiler/type-cache.cc (renamed from deps/v8/src/type-cache.cc)5
-rw-r--r--deps/v8/src/compiler/type-cache.h157
-rw-r--r--deps/v8/src/compiler/type-hint-analyzer.cc26
-rw-r--r--deps/v8/src/compiler/type-hint-analyzer.h6
-rw-r--r--deps/v8/src/compiler/typed-optimization.cc253
-rw-r--r--deps/v8/src/compiler/typed-optimization.h73
-rw-r--r--deps/v8/src/compiler/typer.cc845
-rw-r--r--deps/v8/src/compiler/typer.h7
-rw-r--r--deps/v8/src/compiler/types.cc961
-rw-r--r--deps/v8/src/compiler/types.h (renamed from deps/v8/src/types.h)469
-rw-r--r--deps/v8/src/compiler/verifier.cc247
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc402
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h27
-rw-r--r--deps/v8/src/compiler/wasm-linkage.cc4
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc82
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h8
-rw-r--r--deps/v8/src/compiler/x64/instruction-scheduler-x64.cc15
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc107
-rw-r--r--deps/v8/src/compiler/x87/code-generator-x87.cc14
-rw-r--r--deps/v8/src/compiler/x87/instruction-selector-x87.cc6
-rw-r--r--deps/v8/src/compiler/zone-pool.cc2
-rw-r--r--deps/v8/src/compiler/zone-pool.h6
-rw-r--r--deps/v8/src/contexts.cc114
-rw-r--r--deps/v8/src/contexts.h141
-rw-r--r--deps/v8/src/counters-inl.h14
-rw-r--r--deps/v8/src/counters.cc49
-rw-r--r--deps/v8/src/counters.h109
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-arm.cc76
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-arm.h57
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc52
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-codegen-arm.h2
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-arm64.cc87
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-arm64.h57
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc63
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h2
-rw-r--r--deps/v8/src/crankshaft/compilation-phase.h5
-rw-r--r--deps/v8/src/crankshaft/hydrogen-bce.cc15
-rw-r--r--deps/v8/src/crankshaft/hydrogen-bce.h2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-flow-engine.h4
-rw-r--r--deps/v8/src/crankshaft/hydrogen-gvn.h4
-rw-r--r--deps/v8/src/crankshaft/hydrogen-instructions.cc21
-rw-r--r--deps/v8/src/crankshaft/hydrogen-instructions.h103
-rw-r--r--deps/v8/src/crankshaft/hydrogen-osr.h2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-types.cc20
-rw-r--r--deps/v8/src/crankshaft/hydrogen-types.h4
-rw-r--r--deps/v8/src/crankshaft/hydrogen.cc509
-rw-r--r--deps/v8/src/crankshaft/hydrogen.h67
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc52
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h2
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-ia32.cc77
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-ia32.h56
-rw-r--r--deps/v8/src/crankshaft/lithium-allocator.h2
-rw-r--r--deps/v8/src/crankshaft/lithium-codegen.cc2
-rw-r--r--deps/v8/src/crankshaft/lithium-codegen.h4
-rw-r--r--deps/v8/src/crankshaft/lithium.h2
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc54
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-codegen-mips.h2
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-mips.cc76
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-mips.h57
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc54
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h2
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-mips64.cc76
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-mips64.h57
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc56
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h2
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-ppc.cc75
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-ppc.h57
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc101
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-codegen-s390.h2
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-s390.cc71
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-s390.h55
-rw-r--r--deps/v8/src/crankshaft/typing.cc167
-rw-r--r--deps/v8/src/crankshaft/typing.h24
-rw-r--r--deps/v8/src/crankshaft/unique.h2
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc52
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-codegen-x64.h2
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-x64.cc76
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-x64.h57
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc56
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-codegen-x87.h2
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-x87.cc77
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-x87.h56
-rw-r--r--deps/v8/src/d8.cc285
-rw-r--r--deps/v8/src/d8.gyp20
-rw-r--r--deps/v8/src/d8.h43
-rw-r--r--deps/v8/src/dateparser.h2
-rw-r--r--deps/v8/src/debug/arm/debug-arm.cc4
-rw-r--r--deps/v8/src/debug/arm64/debug-arm64.cc4
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc15
-rw-r--r--deps/v8/src/debug/debug-scopes.cc19
-rw-r--r--deps/v8/src/debug/debug-scopes.h2
-rw-r--r--deps/v8/src/debug/debug.cc40
-rw-r--r--deps/v8/src/debug/debug.h11
-rw-r--r--deps/v8/src/debug/ia32/debug-ia32.cc4
-rw-r--r--deps/v8/src/debug/liveedit.cc60
-rw-r--r--deps/v8/src/debug/liveedit.h14
-rw-r--r--deps/v8/src/debug/mips/debug-mips.cc4
-rw-r--r--deps/v8/src/debug/mips64/debug-mips64.cc4
-rw-r--r--deps/v8/src/debug/ppc/debug-ppc.cc4
-rw-r--r--deps/v8/src/debug/s390/debug-s390.cc4
-rw-r--r--deps/v8/src/debug/x64/debug-x64.cc5
-rw-r--r--deps/v8/src/debug/x87/debug-x87.cc4
-rw-r--r--deps/v8/src/deoptimize-reason.cc2
-rw-r--r--deps/v8/src/deoptimize-reason.h4
-rw-r--r--deps/v8/src/deoptimizer.cc286
-rw-r--r--deps/v8/src/deoptimizer.h22
-rw-r--r--deps/v8/src/effects.h24
-rw-r--r--deps/v8/src/elements.cc58
-rw-r--r--deps/v8/src/elements.h3
-rw-r--r--deps/v8/src/execution.cc27
-rw-r--r--deps/v8/src/execution.h13
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc18
-rw-r--r--deps/v8/src/external-reference-table.cc4
-rw-r--r--deps/v8/src/factory.cc158
-rw-r--r--deps/v8/src/factory.h75
-rw-r--r--deps/v8/src/fast-accessor-assembler.cc42
-rw-r--r--deps/v8/src/field-type.cc10
-rw-r--r--deps/v8/src/field-type.h3
-rw-r--r--deps/v8/src/flag-definitions.h137
-rw-r--r--deps/v8/src/frames-inl.h2
-rw-r--r--deps/v8/src/frames.cc15
-rw-r--r--deps/v8/src/frames.h9
-rw-r--r--deps/v8/src/full-codegen/arm/full-codegen-arm.cc176
-rw-r--r--deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc180
-rw-r--r--deps/v8/src/full-codegen/full-codegen.cc171
-rw-r--r--deps/v8/src/full-codegen/full-codegen.h60
-rw-r--r--deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc176
-rw-r--r--deps/v8/src/full-codegen/mips/full-codegen-mips.cc177
-rw-r--r--deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc177
-rw-r--r--deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc177
-rw-r--r--deps/v8/src/full-codegen/s390/full-codegen-s390.cc174
-rw-r--r--deps/v8/src/full-codegen/x64/full-codegen-x64.cc174
-rw-r--r--deps/v8/src/full-codegen/x87/full-codegen-x87.cc176
-rw-r--r--deps/v8/src/gdb-jit.cc3
-rw-r--r--deps/v8/src/globals.h201
-rw-r--r--deps/v8/src/handles.h26
-rw-r--r--deps/v8/src/heap-symbols.h36
-rw-r--r--deps/v8/src/heap/gc-tracer.cc245
-rw-r--r--deps/v8/src/heap/gc-tracer.h99
-rw-r--r--deps/v8/src/heap/heap-inl.h167
-rw-r--r--deps/v8/src/heap/heap.cc912
-rw-r--r--deps/v8/src/heap/heap.h625
-rw-r--r--deps/v8/src/heap/incremental-marking-inl.h10
-rw-r--r--deps/v8/src/heap/incremental-marking-job.cc130
-rw-r--r--deps/v8/src/heap/incremental-marking-job.h53
-rw-r--r--deps/v8/src/heap/incremental-marking.cc518
-rw-r--r--deps/v8/src/heap/incremental-marking.h117
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h4
-rw-r--r--deps/v8/src/heap/mark-compact.cc160
-rw-r--r--deps/v8/src/heap/mark-compact.h18
-rw-r--r--deps/v8/src/heap/memory-reducer.cc11
-rw-r--r--deps/v8/src/heap/object-stats.cc74
-rw-r--r--deps/v8/src/heap/object-stats.h1
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h97
-rw-r--r--deps/v8/src/heap/objects-visiting.cc2
-rw-r--r--deps/v8/src/heap/objects-visiting.h10
-rw-r--r--deps/v8/src/heap/remembered-set.cc32
-rw-r--r--deps/v8/src/heap/remembered-set.h36
-rw-r--r--deps/v8/src/heap/scavenge-job.cc2
-rw-r--r--deps/v8/src/heap/slot-set.h267
-rw-r--r--deps/v8/src/heap/spaces-inl.h8
-rw-r--r--deps/v8/src/heap/spaces.cc211
-rw-r--r--deps/v8/src/heap/spaces.h406
-rw-r--r--deps/v8/src/i18n.cc12
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc522
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc30
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc7
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h18
-rw-r--r--deps/v8/src/ic/arm/handler-compiler-arm.cc44
-rw-r--r--deps/v8/src/ic/arm/ic-arm.cc14
-rw-r--r--deps/v8/src/ic/arm64/handler-compiler-arm64.cc45
-rw-r--r--deps/v8/src/ic/arm64/ic-arm64.cc15
-rw-r--r--deps/v8/src/ic/handler-compiler.cc103
-rw-r--r--deps/v8/src/ic/handler-compiler.h22
-rw-r--r--deps/v8/src/ic/ia32/handler-compiler-ia32.cc107
-rw-r--r--deps/v8/src/ic/ia32/ic-compiler-ia32.cc21
-rw-r--r--deps/v8/src/ic/ia32/ic-ia32.cc95
-rw-r--r--deps/v8/src/ic/ia32/stub-cache-ia32.cc29
-rw-r--r--deps/v8/src/ic/ic-inl.h1
-rw-r--r--deps/v8/src/ic/ic-state.cc44
-rw-r--r--deps/v8/src/ic/ic-state.h13
-rw-r--r--deps/v8/src/ic/ic.cc208
-rw-r--r--deps/v8/src/ic/ic.h7
-rw-r--r--deps/v8/src/ic/mips/handler-compiler-mips.cc46
-rw-r--r--deps/v8/src/ic/mips/ic-mips.cc27
-rw-r--r--deps/v8/src/ic/mips64/handler-compiler-mips64.cc46
-rw-r--r--deps/v8/src/ic/mips64/ic-mips64.cc27
-rw-r--r--deps/v8/src/ic/ppc/handler-compiler-ppc.cc44
-rw-r--r--deps/v8/src/ic/ppc/ic-ppc.cc14
-rw-r--r--deps/v8/src/ic/s390/handler-compiler-s390.cc42
-rw-r--r--deps/v8/src/ic/s390/ic-s390.cc15
-rw-r--r--deps/v8/src/ic/stub-cache.cc1
-rw-r--r--deps/v8/src/ic/stub-cache.h1
-rw-r--r--deps/v8/src/ic/x64/handler-compiler-x64.cc56
-rw-r--r--deps/v8/src/ic/x64/ic-x64.cc24
-rw-r--r--deps/v8/src/ic/x87/handler-compiler-x87.cc107
-rw-r--r--deps/v8/src/ic/x87/ic-compiler-x87.cc21
-rw-r--r--deps/v8/src/ic/x87/ic-x87.cc95
-rw-r--r--deps/v8/src/ic/x87/stub-cache-x87.cc29
-rw-r--r--deps/v8/src/identity-map.cc2
-rw-r--r--deps/v8/src/inspector/BUILD.gn228
-rw-r--r--deps/v8/src/inspector/DEPS8
-rw-r--r--deps/v8/src/inspector/OWNERS15
-rw-r--r--deps/v8/src/inspector/PRESUBMIT.py55
-rw-r--r--deps/v8/src/inspector/build/check_injected_script_source.py88
-rw-r--r--deps/v8/src/inspector/build/closure-compiler.tar.gz.sha11
-rwxr-xr-xdeps/v8/src/inspector/build/compile-scripts.py169
-rwxr-xr-xdeps/v8/src/inspector/build/generate_protocol_externs.py246
-rwxr-xr-xdeps/v8/src/inspector/build/rjsmin.py295
-rw-r--r--deps/v8/src/inspector/build/xxd.py28
-rw-r--r--deps/v8/src/inspector/debugger-script.js712
-rw-r--r--deps/v8/src/inspector/debugger_script_externs.js522
-rw-r--r--deps/v8/src/inspector/injected-script-native.cc89
-rw-r--r--deps/v8/src/inspector/injected-script-native.h47
-rw-r--r--deps/v8/src/inspector/injected-script-source.js1076
-rw-r--r--deps/v8/src/inspector/injected-script.cc581
-rw-r--r--deps/v8/src/inspector/injected-script.h207
-rw-r--r--deps/v8/src/inspector/injected_script_externs.js66
-rw-r--r--deps/v8/src/inspector/inspected-context.cc88
-rw-r--r--deps/v8/src/inspector/inspected-context.h64
-rw-r--r--deps/v8/src/inspector/inspector.gyp174
-rw-r--r--deps/v8/src/inspector/inspector.gypi95
-rw-r--r--deps/v8/src/inspector/inspector_protocol_config.json25
-rw-r--r--deps/v8/src/inspector/java-script-call-frame.cc162
-rw-r--r--deps/v8/src/inspector/java-script-call-frame.h82
-rw-r--r--deps/v8/src/inspector/js_protocol-1.2.json997
-rw-r--r--deps/v8/src/inspector/js_protocol.json306
-rw-r--r--deps/v8/src/inspector/protocol-platform.h21
-rw-r--r--deps/v8/src/inspector/remote-object-id.cc76
-rw-r--r--deps/v8/src/inspector/remote-object-id.h58
-rw-r--r--deps/v8/src/inspector/script-breakpoint.h52
-rw-r--r--deps/v8/src/inspector/search-util.cc164
-rw-r--r--deps/v8/src/inspector/search-util.h24
-rw-r--r--deps/v8/src/inspector/string-16.cc518
-rw-r--r--deps/v8/src/inspector/string-16.h133
-rw-r--r--deps/v8/src/inspector/string-util.cc218
-rw-r--r--deps/v8/src/inspector/string-util.h75
-rw-r--r--deps/v8/src/inspector/v8-console-agent-impl.cc79
-rw-r--r--deps/v8/src/inspector/v8-console-agent-impl.h48
-rw-r--r--deps/v8/src/inspector/v8-console-message.cc485
-rw-r--r--deps/v8/src/inspector/v8-console-message.h120
-rw-r--r--deps/v8/src/inspector/v8-console.cc922
-rw-r--r--deps/v8/src/inspector/v8-console.h119
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc1255
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.h224
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.cc140
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.h87
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc1002
-rw-r--r--deps/v8/src/inspector/v8-debugger.h160
-rw-r--r--deps/v8/src/inspector/v8-function-call.cc111
-rw-r--r--deps/v8/src/inspector/v8-function-call.h65
-rw-r--r--deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc407
-rw-r--r--deps/v8/src/inspector/v8-heap-profiler-agent-impl.h73
-rw-r--r--deps/v8/src/inspector/v8-injected-script-host.cc216
-rw-r--r--deps/v8/src/inspector/v8-injected-script-host.h46
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.cc376
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.h150
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.cc417
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.h126
-rw-r--r--deps/v8/src/inspector/v8-internal-value-type.cc77
-rw-r--r--deps/v8/src/inspector/v8-internal-value-type.h23
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.cc321
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.h74
-rw-r--r--deps/v8/src/inspector/v8-regex.cc93
-rw-r--r--deps/v8/src/inspector/v8-regex.h37
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc738
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.h131
-rw-r--r--deps/v8/src/inspector/v8-schema-agent-impl.cc29
-rw-r--r--deps/v8/src/inspector/v8-schema-agent-impl.h37
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.cc281
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.h99
-rw-r--r--deps/v8/src/inspector/v8-value-copier.cc110
-rw-r--r--deps/v8/src/inspector/v8-value-copier.h24
-rw-r--r--deps/v8/src/interface-descriptors.cc766
-rw-r--r--deps/v8/src/interface-descriptors.h480
-rw-r--r--deps/v8/src/interpreter/OWNERS1
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc684
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h233
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.cc13
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.h1
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc148
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.h4
-rw-r--r--deps/v8/src/interpreter/bytecode-dead-code-optimizer.cc6
-rw-r--r--deps/v8/src/interpreter/bytecode-dead-code-optimizer.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-decoder.cc53
-rw-r--r--deps/v8/src/interpreter/bytecode-decoder.h6
-rw-r--r--deps/v8/src/interpreter/bytecode-flags.cc8
-rw-r--r--deps/v8/src/interpreter/bytecode-flags.h11
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc1254
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h56
-rw-r--r--deps/v8/src/interpreter/bytecode-label.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-operands.cc89
-rw-r--r--deps/v8/src/interpreter/bytecode-operands.h126
-rw-r--r--deps/v8/src/interpreter/bytecode-peephole-optimizer.cc16
-rw-r--r--deps/v8/src/interpreter/bytecode-peephole-optimizer.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-pipeline.cc56
-rw-r--r--deps/v8/src/interpreter/bytecode-pipeline.h167
-rw-r--r--deps/v8/src/interpreter/bytecode-register-allocator.cc210
-rw-r--r--deps/v8/src/interpreter/bytecode-register-allocator.h150
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.cc180
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.h38
-rw-r--r--deps/v8/src/interpreter/bytecode-register.cc2
-rw-r--r--deps/v8/src/interpreter/bytecode-register.h36
-rw-r--r--deps/v8/src/interpreter/bytecode-traits.h260
-rw-r--r--deps/v8/src/interpreter/bytecodes.cc532
-rw-r--r--deps/v8/src/interpreter/bytecodes.h662
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.cc20
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h8
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.cc14
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.h8
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.h2
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc365
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h21
-rw-r--r--deps/v8/src/interpreter/interpreter.cc528
-rw-r--r--deps/v8/src/interpreter/interpreter.h44
-rw-r--r--deps/v8/src/interpreter/mkpeephole.cc3
-rw-r--r--deps/v8/src/isolate-inl.h24
-rw-r--r--deps/v8/src/isolate.cc490
-rw-r--r--deps/v8/src/isolate.h71
-rw-r--r--deps/v8/src/js/async-await.js180
-rw-r--r--deps/v8/src/js/collection.js10
-rw-r--r--deps/v8/src/js/datetime-format-to-parts.js16
-rw-r--r--deps/v8/src/js/harmony-async-await.js51
-rw-r--r--deps/v8/src/js/i18n.js79
-rw-r--r--deps/v8/src/js/intl-extra.js22
-rw-r--r--deps/v8/src/js/iterator-prototype.js21
-rw-r--r--deps/v8/src/js/prologue.js25
-rw-r--r--deps/v8/src/js/promise.js275
-rw-r--r--deps/v8/src/js/regexp.js340
-rw-r--r--deps/v8/src/js/string-iterator.js98
-rw-r--r--deps/v8/src/js/string.js226
-rw-r--r--deps/v8/src/js/typedarray.js73
-rw-r--r--deps/v8/src/js/v8natives.js56
-rw-r--r--deps/v8/src/json-parser.cc2
-rw-r--r--deps/v8/src/libplatform/default-platform.cc31
-rw-r--r--deps/v8/src/libplatform/default-platform.h20
-rw-r--r--deps/v8/src/libplatform/tracing/trace-object.cc21
-rw-r--r--deps/v8/src/libplatform/tracing/trace-writer.cc15
-rw-r--r--deps/v8/src/libplatform/tracing/trace-writer.h1
-rw-r--r--deps/v8/src/libplatform/tracing/tracing-controller.cc49
-rw-r--r--deps/v8/src/libsampler/sampler.cc2
-rw-r--r--deps/v8/src/lookup-cache-inl.h40
-rw-r--r--deps/v8/src/lookup-cache.cc84
-rw-r--r--deps/v8/src/lookup-cache.h117
-rw-r--r--deps/v8/src/lookup.cc6
-rw-r--r--deps/v8/src/lookup.h46
-rw-r--r--deps/v8/src/machine-type.h44
-rw-r--r--deps/v8/src/messages.cc819
-rw-r--r--deps/v8/src/messages.h189
-rw-r--r--deps/v8/src/mips/assembler-mips.cc38
-rw-r--r--deps/v8/src/mips/assembler-mips.h7
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc350
-rw-r--r--deps/v8/src/mips/constants-mips.cc110
-rw-r--r--deps/v8/src/mips/constants-mips.h297
-rw-r--r--deps/v8/src/mips/disasm-mips.cc29
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc26
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc56
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h12
-rw-r--r--deps/v8/src/mips/simulator-mips.cc401
-rw-r--r--deps/v8/src/mips/simulator-mips.h82
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc39
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h7
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc355
-rw-r--r--deps/v8/src/mips64/constants-mips64.cc112
-rw-r--r--deps/v8/src/mips64/constants-mips64.h304
-rw-r--r--deps/v8/src/mips64/disasm-mips64.cc33
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc26
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc54
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h12
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc413
-rw-r--r--deps/v8/src/mips64/simulator-mips64.h81
-rw-r--r--deps/v8/src/objects-body-descriptors-inl.h2
-rw-r--r--deps/v8/src/objects-debug.cc44
-rw-r--r--deps/v8/src/objects-inl.h142
-rw-r--r--deps/v8/src/objects-printer.cc98
-rw-r--r--deps/v8/src/objects.cc808
-rw-r--r--deps/v8/src/objects.h639
-rw-r--r--deps/v8/src/ostreams.h3
-rw-r--r--deps/v8/src/parsing/duplicate-finder.cc145
-rw-r--r--deps/v8/src/parsing/duplicate-finder.h64
-rw-r--r--deps/v8/src/parsing/expression-classifier.h117
-rw-r--r--deps/v8/src/parsing/func-name-inferrer.cc8
-rw-r--r--deps/v8/src/parsing/func-name-inferrer.h2
-rw-r--r--deps/v8/src/parsing/parameter-initializer-rewriter.cc4
-rw-r--r--deps/v8/src/parsing/parameter-initializer-rewriter.h2
-rw-r--r--deps/v8/src/parsing/parse-info.cc20
-rw-r--r--deps/v8/src/parsing/parse-info.h20
-rw-r--r--deps/v8/src/parsing/parser-base.h3974
-rw-r--r--deps/v8/src/parsing/parser.cc4309
-rw-r--r--deps/v8/src/parsing/parser.h1102
-rw-r--r--deps/v8/src/parsing/pattern-rewriter.cc40
-rw-r--r--deps/v8/src/parsing/preparser.cc1175
-rw-r--r--deps/v8/src/parsing/preparser.h1166
-rw-r--r--deps/v8/src/parsing/rewriter.cc3
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc971
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.h184
-rw-r--r--deps/v8/src/parsing/scanner.cc375
-rw-r--r--deps/v8/src/parsing/scanner.h294
-rw-r--r--deps/v8/src/pending-compilation-error-handler.cc1
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc366
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc26
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc8
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h12
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc20
-rw-r--r--deps/v8/src/ppc/simulator-ppc.h16
-rw-r--r--deps/v8/src/profiler/OWNERS1
-rw-r--r--deps/v8/src/profiler/allocation-tracker.cc2
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h1
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc30
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h6
-rw-r--r--deps/v8/src/profiler/profile-generator.h9
-rw-r--r--deps/v8/src/profiler/profiler-listener.cc2
-rw-r--r--deps/v8/src/profiler/profiler-listener.h2
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.cc4
-rw-r--r--deps/v8/src/profiler/strings-storage.h4
-rw-r--r--deps/v8/src/profiler/tracing-cpu-profiler.cc25
-rw-r--r--deps/v8/src/profiler/tracing-cpu-profiler.h26
-rw-r--r--deps/v8/src/property-details.h1
-rw-r--r--deps/v8/src/regexp/jsregexp.cc1
-rw-r--r--deps/v8/src/regexp/jsregexp.h4
-rw-r--r--deps/v8/src/regexp/regexp-ast.h4
-rw-r--r--deps/v8/src/regexp/regexp-parser.h2
-rw-r--r--deps/v8/src/runtime-profiler.cc155
-rw-r--r--deps/v8/src/runtime-profiler.h10
-rw-r--r--deps/v8/src/runtime/runtime-array.cc8
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc155
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc51
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc4
-rw-r--r--deps/v8/src/runtime/runtime-forin.cc21
-rw-r--r--deps/v8/src/runtime/runtime-function.cc1
-rw-r--r--deps/v8/src/runtime/runtime-generator.cc4
-rw-r--r--deps/v8/src/runtime/runtime-i18n.cc136
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc81
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc3
-rw-r--r--deps/v8/src/runtime/runtime-object.cc59
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc2
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc57
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc132
-rw-r--r--deps/v8/src/runtime/runtime-test.cc35
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc214
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc125
-rw-r--r--deps/v8/src/runtime/runtime.cc72
-rw-r--r--deps/v8/src/runtime/runtime.h104
-rw-r--r--deps/v8/src/s390/code-stubs-s390.cc373
-rw-r--r--deps/v8/src/s390/interface-descriptors-s390.cc24
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.cc9
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.h12
-rw-r--r--deps/v8/src/s390/simulator-s390.cc18
-rw-r--r--deps/v8/src/s390/simulator-s390.h16
-rw-r--r--deps/v8/src/safepoint-table.h2
-rw-r--r--deps/v8/src/signature.h2
-rw-r--r--deps/v8/src/small-pointer-list.h2
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc4
-rw-r--r--deps/v8/src/snapshot/code-serializer.h3
-rw-r--r--deps/v8/src/snapshot/deserializer.cc2
-rw-r--r--deps/v8/src/snapshot/natives.h7
-rw-r--r--deps/v8/src/snapshot/serializer-common.cc2
-rw-r--r--deps/v8/src/snapshot/serializer.cc5
-rw-r--r--deps/v8/src/snapshot/serializer.h2
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc70
-rw-r--r--deps/v8/src/snapshot/snapshot.h16
-rw-r--r--deps/v8/src/snapshot/startup-serializer.h3
-rw-r--r--deps/v8/src/source-position-table.h2
-rw-r--r--deps/v8/src/tracing/trace-event.cc95
-rw-r--r--deps/v8/src/tracing/trace-event.h198
-rw-r--r--deps/v8/src/type-cache.h176
-rw-r--r--deps/v8/src/type-feedback-vector-inl.h104
-rw-r--r--deps/v8/src/type-feedback-vector.cc91
-rw-r--r--deps/v8/src/type-feedback-vector.h104
-rw-r--r--deps/v8/src/type-hints.cc (renamed from deps/v8/src/compiler/type-hints.cc)6
-rw-r--r--deps/v8/src/type-hints.h (renamed from deps/v8/src/compiler/type-hints.h)9
-rw-r--r--deps/v8/src/type-info.cc134
-rw-r--r--deps/v8/src/type-info.h18
-rw-r--r--deps/v8/src/unicode-decoder.h3
-rw-r--r--deps/v8/src/unicode.cc188
-rw-r--r--deps/v8/src/unicode.h7
-rw-r--r--deps/v8/src/utils.cc7
-rw-r--r--deps/v8/src/utils.h19
-rw-r--r--deps/v8/src/v8.gyp121
-rw-r--r--deps/v8/src/value-serializer.cc890
-rw-r--r--deps/v8/src/value-serializer.h99
-rw-r--r--deps/v8/src/wasm/ast-decoder.cc1047
-rw-r--r--deps/v8/src/wasm/ast-decoder.h202
-rw-r--r--deps/v8/src/wasm/decoder.h15
-rw-r--r--deps/v8/src/wasm/module-decoder.cc1079
-rw-r--r--deps/v8/src/wasm/module-decoder.h15
-rw-r--r--deps/v8/src/wasm/switch-logic.h4
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc10
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc3
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc389
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.h24
-rw-r--r--deps/v8/src/wasm/wasm-js.cc472
-rw-r--r--deps/v8/src/wasm/wasm-js.h19
-rw-r--r--deps/v8/src/wasm/wasm-macro-gen.h170
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc (renamed from deps/v8/src/wasm/encoder.cc)239
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h (renamed from deps/v8/src/wasm/encoder.h)128
-rw-r--r--deps/v8/src/wasm/wasm-module.cc2028
-rw-r--r--deps/v8/src/wasm/wasm-module.h385
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc29
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h292
-rw-r--r--deps/v8/src/wasm/wasm-result.cc39
-rw-r--r--deps/v8/src/wasm/wasm-result.h36
-rw-r--r--deps/v8/src/x64/assembler-x64.cc278
-rw-r--r--deps/v8/src/x64/assembler-x64.h262
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc371
-rw-r--r--deps/v8/src/x64/disasm-x64.cc213
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc26
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc9
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h12
-rw-r--r--deps/v8/src/x64/sse-instr.h69
-rw-r--r--deps/v8/src/x87/code-stubs-x87.cc522
-rw-r--r--deps/v8/src/x87/interface-descriptors-x87.cc30
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.cc7
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.h18
-rw-r--r--deps/v8/src/zone/accounting-allocator.cc45
-rw-r--r--deps/v8/src/zone/accounting-allocator.h41
-rw-r--r--deps/v8/src/zone/zone-allocator.h (renamed from deps/v8/src/zone-allocator.h)34
-rw-r--r--deps/v8/src/zone/zone-containers.h (renamed from deps/v8/src/zone-containers.h)27
-rw-r--r--deps/v8/src/zone/zone-segment.cc22
-rw-r--r--deps/v8/src/zone/zone-segment.h61
-rw-r--r--deps/v8/src/zone/zone.cc (renamed from deps/v8/src/zone.cc)80
-rw-r--r--deps/v8/src/zone/zone.h (renamed from deps/v8/src/zone.h)45
-rw-r--r--deps/v8/test/BUILD.gn5
-rw-r--r--deps/v8/test/cctest/BUILD.gn305
-rw-r--r--deps/v8/test/cctest/asmjs/test-asm-typer.cc6
-rw-r--r--deps/v8/test/cctest/ast-types-fuzz.h327
-rw-r--r--deps/v8/test/cctest/cctest.cc74
-rw-r--r--deps/v8/test/cctest/cctest.gyp39
-rw-r--r--deps/v8/test/cctest/cctest.h86
-rw-r--r--deps/v8/test/cctest/cctest.status138
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.h5
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.cc211
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.h199
-rw-r--r--deps/v8/test/cctest/compiler/graph-builder-tester.h5
-rw-r--r--deps/v8/test/cctest/compiler/test-code-assembler.cc1
-rw-r--r--deps/v8/test/cctest/compiler/test-gap-resolver.cc208
-rw-r--r--deps/v8/test/cctest/compiler/test-instruction.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-js-constant-cache.cc87
-rw-r--r--deps/v8/test/cctest/compiler/test-js-typed-lowering.cc14
-rw-r--r--deps/v8/test/cctest/compiler/test-jump-threading.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-linkage.cc8
-rw-r--r--deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-multiple-return.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-node.cc36
-rw-r--r--deps/v8/test/cctest/compiler/test-representation-change.cc60
-rw-r--r--deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc101
-rw-r--r--deps/v8/test/cctest/compiler/test-run-inlining.cc1
-rw-r--r--deps/v8/test/cctest/compiler/test-run-intrinsics.cc10
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsbranches.cc1
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jscalls.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsexceptions.cc1
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsobjects.cc10
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsops.cc1
-rw-r--r--deps/v8/test/cctest/compiler/test-run-native-calls.cc27
-rw-r--r--deps/v8/test/cctest/compiler/test-run-stackcheck.cc1
-rw-r--r--deps/v8/test/cctest/compiler/test-run-stubs.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-run-unwinding-info.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-run-variables.cc1
-rw-r--r--deps/v8/test/cctest/compiler/test-simplified-lowering.cc1756
-rw-r--r--deps/v8/test/cctest/heap/heap-tester.h1
-rw-r--r--deps/v8/test/cctest/heap/heap-utils.cc52
-rw-r--r--deps/v8/test/cctest/heap/heap-utils.h8
-rw-r--r--deps/v8/test/cctest/heap/test-alloc.cc9
-rw-r--r--deps/v8/test/cctest/heap/test-array-buffer-tracker.cc6
-rw-r--r--deps/v8/test/cctest/heap/test-compaction.cc20
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc558
-rw-r--r--deps/v8/test/cctest/heap/test-incremental-marking.cc105
-rw-r--r--deps/v8/test/cctest/heap/test-lab.cc6
-rw-r--r--deps/v8/test/cctest/heap/test-mark-compact.cc36
-rw-r--r--deps/v8/test/cctest/heap/test-page-promotion.cc15
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc149
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc79
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h30
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden42
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiteralsWide.golden518
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden84
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden236
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden74
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden24
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden52
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden66
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden36
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden46
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden30
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden565
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden82
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden18
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DeadCodeRemoval.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden22
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DeleteLookupSlotInEval.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DoDebugger.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden14
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden18
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden118
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden498
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden512
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden18
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden34
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden24
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden526
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden46
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/IntegerConstants.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/JumpsRequiringConstantWideOperands.golden644
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden11
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden50
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden270
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden26
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden201
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotInEval.golden18
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotWideInEval.golden2066
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden896
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/NewTarget.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden113
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiteralsWide.golden516
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden23
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Parameters.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden24
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveReturnStatements.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden286
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden542
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden1082
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden16
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiteralsWide.golden516
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden546
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StringConstants.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden250
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ThisFunction.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Throw.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden14
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden29
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden39
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden35
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden40
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden17
-rw-r--r--deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc90
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc282
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc4
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter.cc315
-rw-r--r--deps/v8/test/cctest/interpreter/test-source-positions.cc1
-rw-r--r--deps/v8/test/cctest/libplatform/test-tracing.cc112
-rw-r--r--deps/v8/test/cctest/libsampler/test-sampler.cc1
-rw-r--r--deps/v8/test/cctest/parsing/test-scanner-streams.cc448
-rw-r--r--deps/v8/test/cctest/parsing/test-scanner.cc87
-rw-r--r--deps/v8/test/cctest/test-accessors.cc2
-rw-r--r--deps/v8/test/cctest/test-api-interceptors.cc871
-rw-r--r--deps/v8/test/cctest/test-api.cc798
-rw-r--r--deps/v8/test/cctest/test-api.h1
-rw-r--r--deps/v8/test/cctest/test-array-list.cc7
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc572
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc126
-rw-r--r--deps/v8/test/cctest/test-assembler-mips64.cc123
-rw-r--r--deps/v8/test/cctest/test-ast-types.cc1904
-rw-r--r--deps/v8/test/cctest/test-ast.cc43
-rw-r--r--deps/v8/test/cctest/test-bit-vector.cc2
-rw-r--r--deps/v8/test/cctest/test-code-cache.cc9
-rw-r--r--deps/v8/test/cctest/test-code-layout.cc9
-rw-r--r--deps/v8/test/cctest/test-code-stub-assembler.cc50
-rw-r--r--deps/v8/test/cctest/test-compiler.cc26
-rw-r--r--deps/v8/test/cctest/test-conversions.cc15
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc20
-rw-r--r--deps/v8/test/cctest/test-date.cc5
-rw-r--r--deps/v8/test/cctest/test-debug.cc16
-rw-r--r--deps/v8/test/cctest/test-decls.cc23
-rw-r--r--deps/v8/test/cctest/test-deoptimization.cc3
-rw-r--r--deps/v8/test/cctest/test-dictionary.cc4
-rw-r--r--deps/v8/test/cctest/test-disasm-arm.cc53
-rw-r--r--deps/v8/test/cctest/test-disasm-mips.cc17
-rw-r--r--deps/v8/test/cctest/test-disasm-mips64.cc21
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc74
-rw-r--r--deps/v8/test/cctest/test-feedback-vector.cc37
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc62
-rw-r--r--deps/v8/test/cctest/test-flags.cc1
-rw-r--r--deps/v8/test/cctest/test-global-handles.cc11
-rw-r--r--deps/v8/test/cctest/test-hashmap.cc6
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc57
-rw-r--r--deps/v8/test/cctest/test-identity-map.cc20
-rw-r--r--deps/v8/test/cctest/test-javascript-arm64.cc1
-rw-r--r--deps/v8/test/cctest/test-js-arm64-variables.cc1
-rw-r--r--deps/v8/test/cctest/test-list.cc2
-rw-r--r--deps/v8/test/cctest/test-liveedit.cc2
-rw-r--r--deps/v8/test/cctest/test-lockers.cc1
-rw-r--r--deps/v8/test/cctest/test-log.cc8
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips.cc53
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips64.cc64
-rw-r--r--deps/v8/test/cctest/test-mementos.cc20
-rw-r--r--deps/v8/test/cctest/test-modules.cc111
-rw-r--r--deps/v8/test/cctest/test-object.cc6
-rw-r--r--deps/v8/test/cctest/test-parsing.cc1313
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc4
-rw-r--r--deps/v8/test/cctest/test-random-number-generator.cc2
-rw-r--r--deps/v8/test/cctest/test-representation.cc1
-rw-r--r--deps/v8/test/cctest/test-sampler-api.cc1
-rw-r--r--deps/v8/test/cctest/test-serialize.cc37
-rw-r--r--deps/v8/test/cctest/test-simd.cc9
-rw-r--r--deps/v8/test/cctest/test-slots-buffer.cc133
-rw-r--r--deps/v8/test/cctest/test-strings.cc45
-rw-r--r--deps/v8/test/cctest/test-symbols.cc16
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc2
-rw-r--r--deps/v8/test/cctest/test-threads.cc2
-rw-r--r--deps/v8/test/cctest/test-trace-event.cc13
-rw-r--r--deps/v8/test/cctest/test-transitions.cc4
-rw-r--r--deps/v8/test/cctest/test-types.cc883
-rw-r--r--deps/v8/test/cctest/test-unboxed-doubles.cc18
-rw-r--r--deps/v8/test/cctest/test-unique.cc7
-rw-r--r--deps/v8/test/cctest/test-utils.cc1
-rw-r--r--deps/v8/test/cctest/test-weakmaps.cc27
-rw-r--r--deps/v8/test/cctest/test-weaksets.cc25
-rw-r--r--deps/v8/test/cctest/types-fuzz.h126
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-64.cc67
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc48
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc85
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-js.cc9
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-module.cc294
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc73
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd.cc49
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc843
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-function-name-table.cc6
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-stack.cc2
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-trap-position.cc9
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h27
-rw-r--r--deps/v8/test/common/DEPS3
-rw-r--r--deps/v8/test/common/wasm/test-signatures.h (renamed from deps/v8/test/cctest/wasm/test-signatures.h)6
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.cc231
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.h66
-rw-r--r--deps/v8/test/fuzzer/fuzzer.gyp260
-rw-r--r--deps/v8/test/fuzzer/fuzzer.isolate16
-rw-r--r--deps/v8/test/fuzzer/testcfg.py5
-rw-r--r--deps/v8/test/fuzzer/wasm-asmjs.cc10
-rw-r--r--deps/v8/test/fuzzer/wasm-code.cc104
-rw-r--r--deps/v8/test/fuzzer/wasm-data-section.cc9
-rw-r--r--deps/v8/test/fuzzer/wasm-function-sigs-section.cc10
-rw-r--r--deps/v8/test/fuzzer/wasm-globals-section.cc9
-rw-r--r--deps/v8/test/fuzzer/wasm-imports-section.cc9
-rw-r--r--deps/v8/test/fuzzer/wasm-memory-section.cc9
-rw-r--r--deps/v8/test/fuzzer/wasm-names-section.cc10
-rw-r--r--deps/v8/test/fuzzer/wasm-section-fuzzers.cc63
-rw-r--r--deps/v8/test/fuzzer/wasm-section-fuzzers.h16
-rw-r--r--deps/v8/test/fuzzer/wasm-types-section.cc9
-rw-r--r--deps/v8/test/fuzzer/wasm.cc9
-rw-r--r--deps/v8/test/fuzzer/wasm.tar.gz.sha11
-rw-r--r--deps/v8/test/fuzzer/wasm/foo.wasmbin47 -> 0 bytes
-rw-r--r--deps/v8/test/fuzzer/wasm_asmjs.tar.gz.sha11
-rw-r--r--deps/v8/test/fuzzer/wasm_asmjs/foo.wasmbin47 -> 0 bytes
-rw-r--r--deps/v8/test/fuzzer/wasm_code/foo0
-rw-r--r--deps/v8/test/fuzzer/wasm_data_section/foo0
-rw-r--r--deps/v8/test/fuzzer/wasm_function_sigs_section/foo0
-rw-r--r--deps/v8/test/fuzzer/wasm_globals_section/foo0
-rw-r--r--deps/v8/test/fuzzer/wasm_imports_section/foo0
-rw-r--r--deps/v8/test/fuzzer/wasm_memory_section/foo0
-rw-r--r--deps/v8/test/fuzzer/wasm_names_section/foo0
-rw-r--r--deps/v8/test/fuzzer/wasm_types_section/foo0
-rw-r--r--deps/v8/test/inspector/BUILD.gn39
-rw-r--r--deps/v8/test/inspector/DEPS10
-rw-r--r--deps/v8/test/inspector/OWNERS2
-rw-r--r--deps/v8/test/inspector/console/let-const-with-api-expected.txt19
-rw-r--r--deps/v8/test/inspector/console/let-const-with-api.js52
-rw-r--r--deps/v8/test/inspector/console/memory-setter-in-strict-mode-expected.txt9
-rw-r--r--deps/v8/test/inspector/console/memory-setter-in-strict-mode.js13
-rw-r--r--deps/v8/test/inspector/cpu-profiler/console-profile-end-parameterless-crash-expected.txt3
-rw-r--r--deps/v8/test/inspector/cpu-profiler/console-profile-end-parameterless-crash.js46
-rw-r--r--deps/v8/test/inspector/cpu-profiler/console-profile-expected.txt3
-rw-r--r--deps/v8/test/inspector/cpu-profiler/console-profile.js59
-rw-r--r--deps/v8/test/inspector/cpu-profiler/enable-disable-expected.txt8
-rw-r--r--deps/v8/test/inspector/cpu-profiler/enable-disable.js75
-rw-r--r--deps/v8/test/inspector/cpu-profiler/record-cpu-profile-expected.txt7
-rw-r--r--deps/v8/test/inspector/cpu-profiler/record-cpu-profile.js48
-rw-r--r--deps/v8/test/inspector/cpu-profiler/stop-without-preceeding-start-expected.txt2
-rw-r--r--deps/v8/test/inspector/cpu-profiler/stop-without-preceeding-start.js12
-rw-r--r--deps/v8/test/inspector/debugger/access-obsolete-frame-expected.txt8
-rw-r--r--deps/v8/test/inspector/debugger/access-obsolete-frame.js67
-rw-r--r--deps/v8/test/inspector/debugger/async-console-count-doesnt-crash-expected.txt1
-rw-r--r--deps/v8/test/inspector/debugger/async-console-count-doesnt-crash.js10
-rw-r--r--deps/v8/test/inspector/debugger/call-frame-function-location-expected.txt3
-rw-r--r--deps/v8/test/inspector/debugger/call-frame-function-location.js25
-rw-r--r--deps/v8/test/inspector/debugger/command-line-api-with-bound-function-expected.txt23
-rw-r--r--deps/v8/test/inspector/debugger/command-line-api-with-bound-function.js64
-rw-r--r--deps/v8/test/inspector/debugger/continue-to-location-expected.txt31
-rw-r--r--deps/v8/test/inspector/debugger/continue-to-location.js114
-rw-r--r--deps/v8/test/inspector/debugger/doesnt-step-into-injected-script-expected.txt17
-rw-r--r--deps/v8/test/inspector/debugger/doesnt-step-into-injected-script.js32
-rw-r--r--deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name-expected.txt19
-rw-r--r--deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name.js42
-rw-r--r--deps/v8/test/inspector/debugger/script-parsed-hash-expected.txt3
-rw-r--r--deps/v8/test/inspector/debugger/script-parsed-hash.js31
-rw-r--r--deps/v8/test/inspector/debugger/set-blackbox-patterns-expected.txt25
-rw-r--r--deps/v8/test/inspector/debugger/set-blackbox-patterns.js59
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-before-enabling-expected.txt7
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-before-enabling.js17
-rw-r--r--deps/v8/test/inspector/debugger/set-script-source-expected.txt8
-rw-r--r--deps/v8/test/inspector/debugger/set-script-source.js152
-rw-r--r--deps/v8/test/inspector/debugger/step-over-caught-exception-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/step-over-caught-exception.js76
-rw-r--r--deps/v8/test/inspector/debugger/stepping-with-blackboxed-ranges-expected.txt54
-rw-r--r--deps/v8/test/inspector/debugger/stepping-with-blackboxed-ranges.js126
-rw-r--r--deps/v8/test/inspector/debugger/update-call-frame-scopes-expected.txt7
-rw-r--r--deps/v8/test/inspector/debugger/update-call-frame-scopes.js63
-rw-r--r--deps/v8/test/inspector/inspector-impl.cc201
-rw-r--r--deps/v8/test/inspector/inspector-impl.h79
-rw-r--r--deps/v8/test/inspector/inspector-test.cc254
-rw-r--r--deps/v8/test/inspector/inspector.gyp41
-rw-r--r--deps/v8/test/inspector/inspector.status7
-rw-r--r--deps/v8/test/inspector/json-parse-expected.txt9
-rw-r--r--deps/v8/test/inspector/json-parse.js14
-rw-r--r--deps/v8/test/inspector/protocol-test.js210
-rw-r--r--deps/v8/test/inspector/runtime/await-promise-expected.txt119
-rw-r--r--deps/v8/test/inspector/runtime/await-promise.js116
-rw-r--r--deps/v8/test/inspector/runtime/call-function-on-async-expected.txt141
-rw-r--r--deps/v8/test/inspector/runtime/call-function-on-async.js129
-rw-r--r--deps/v8/test/inspector/runtime/clear-of-command-line-api-expected.txt177
-rw-r--r--deps/v8/test/inspector/runtime/clear-of-command-line-api.js117
-rw-r--r--deps/v8/test/inspector/runtime/compile-script-expected.txt66
-rw-r--r--deps/v8/test/inspector/runtime/compile-script.js50
-rw-r--r--deps/v8/test/inspector/runtime/console-api-repeated-in-console-expected.txt6
-rw-r--r--deps/v8/test/inspector/runtime/console-api-repeated-in-console.js37
-rw-r--r--deps/v8/test/inspector/runtime/console-deprecated-methods-expected.txt5
-rw-r--r--deps/v8/test/inspector/runtime/console-deprecated-methods.js28
-rw-r--r--deps/v8/test/inspector/runtime/console-line-and-column-expected.txt52
-rw-r--r--deps/v8/test/inspector/runtime/console-line-and-column.js18
-rw-r--r--deps/v8/test/inspector/runtime/console-log-doesnt-run-microtasks-expected.txt21
-rw-r--r--deps/v8/test/inspector/runtime/console-log-doesnt-run-microtasks.js26
-rw-r--r--deps/v8/test/inspector/runtime/console-timestamp-expected.txt9
-rw-r--r--deps/v8/test/inspector/runtime/console-timestamp.js23
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-async-expected.txt95
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-async-with-wrap-error-expected.txt8
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-async-with-wrap-error.js15
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-async.js58
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-with-context-id-equal-zero-expected.txt9
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-with-context-id-equal-zero.js9
-rw-r--r--deps/v8/test/inspector/runtime/exception-thrown-expected.txt117
-rw-r--r--deps/v8/test/inspector/runtime/exception-thrown.js12
-rw-r--r--deps/v8/test/inspector/runtime/get-properties-expected.txt39
-rw-r--r--deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt11
-rw-r--r--deps/v8/test/inspector/runtime/get-properties-on-proxy.js101
-rw-r--r--deps/v8/test/inspector/runtime/get-properties-preview-expected.txt32
-rw-r--r--deps/v8/test/inspector/runtime/get-properties-preview.js25
-rw-r--r--deps/v8/test/inspector/runtime/get-properties.js221
-rw-r--r--deps/v8/test/inspector/runtime/property-on-console-proto-expected.txt12
-rw-r--r--deps/v8/test/inspector/runtime/property-on-console-proto.js25
-rw-r--r--deps/v8/test/inspector/runtime/protocol-works-with-different-locale-expected.txt138
-rw-r--r--deps/v8/test/inspector/runtime/protocol-works-with-different-locale.js40
-rw-r--r--deps/v8/test/inspector/runtime/run-script-async-expected.txt191
-rw-r--r--deps/v8/test/inspector/runtime/run-script-async.js110
-rw-r--r--deps/v8/test/inspector/runtime/set-or-map-entries-expected.txt9
-rw-r--r--deps/v8/test/inspector/runtime/set-or-map-entries.js52
-rw-r--r--deps/v8/test/inspector/task-runner.cc145
-rw-r--r--deps/v8/test/inspector/task-runner.h80
-rw-r--r--deps/v8/test/inspector/testcfg.py109
-rw-r--r--deps/v8/test/intl/assert.js8
-rw-r--r--deps/v8/test/intl/date-format/date-format-to-parts.js20
-rw-r--r--deps/v8/test/intl/date-format/parse-MMMdy.js59
-rw-r--r--deps/v8/test/intl/date-format/parse-invalid-input.js37
-rw-r--r--deps/v8/test/intl/date-format/parse-mdy.js53
-rw-r--r--deps/v8/test/intl/date-format/parse-mdyhms.js65
-rw-r--r--deps/v8/test/intl/extra-flag.js23
-rw-r--r--deps/v8/test/intl/intl.status5
-rw-r--r--deps/v8/test/intl/no-extra-flag.js23
-rw-r--r--deps/v8/test/intl/number-format/parse-decimal.js52
-rw-r--r--deps/v8/test/intl/number-format/parse-invalid-input.js40
-rw-r--r--deps/v8/test/intl/number-format/parse-percent.js46
-rw-r--r--deps/v8/test/js-perf-test/JSTests.json32
-rw-r--r--deps/v8/test/js-perf-test/Object/ObjectTests.json6
-rw-r--r--deps/v8/test/js-perf-test/Object/create.js70
-rw-r--r--deps/v8/test/js-perf-test/Object/run.js3
-rw-r--r--deps/v8/test/js-perf-test/StringIterators/run.js27
-rw-r--r--deps/v8/test/js-perf-test/StringIterators/string-iterator.js239
-rw-r--r--deps/v8/test/message/arrow-invalid-rest-2.out6
-rw-r--r--deps/v8/test/message/arrow-invalid-rest.out6
-rw-r--r--deps/v8/test/message/export-duplicate-as.js4
-rw-r--r--deps/v8/test/message/export-duplicate-as.out4
-rw-r--r--deps/v8/test/message/export-duplicate-default.js1
-rw-r--r--deps/v8/test/message/export-duplicate-default.out2
-rw-r--r--deps/v8/test/message/export-duplicate.js3
-rw-r--r--deps/v8/test/message/export-duplicate.out6
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-binop-lhs.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-binop-rhs.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-comma.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-extends.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-for-in.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-for-of.js16
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-for-of.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-logical-and.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-logical-or.js14
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-logical-or.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-subclass.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-try-catch-finally.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-try-try-catch-finally.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-try.js17
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-try.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-inside-member-expr.js14
-rw-r--r--deps/v8/test/message/syntactic-tail-call-inside-member-expr.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-of-eval.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-of-identifier.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-of-new.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-sloppy.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-without-return.out4
-rw-r--r--deps/v8/test/mjsunit/array-indexing-receiver.js10
-rw-r--r--deps/v8/test/mjsunit/compiler/dead-string-char-from-code.js76
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-materialize.js29
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-exception-1.js2219
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-exception-2.js2063
-rw-r--r--deps/v8/test/mjsunit/compiler/instanceof.js133
-rw-r--r--deps/v8/test/mjsunit/compiler/number-isfinite.js29
-rw-r--r--deps/v8/test/mjsunit/compiler/number-isinteger.js30
-rw-r--r--deps/v8/test/mjsunit/compiler/number-isnan.js28
-rw-r--r--deps/v8/test/mjsunit/compiler/number-issafeinteger.js50
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-5320.js (renamed from deps/v8/test/intl/number-format/parse-currency.js)31
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-625966.js (renamed from deps/v8/test/message/syntactic-tail-call-of-identifier.js)8
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-626986.js23
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-638132.js26
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-639210.js38
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-644048.js16
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-644633.js (renamed from deps/v8/test/message/syntactic-tail-call-of-new.js)11
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-645851.js19
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-650215.js (renamed from deps/v8/test/message/syntactic-tail-call-in-for-in.js)16
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-compare-negate.js18
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-escape-analysis-indirect.js17
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-math-sign-nan-type.js (renamed from deps/v8/test/message/syntactic-tail-call-in-comma.js)14
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-strict-equals-mixed-feedback.js23
-rw-r--r--deps/v8/test/mjsunit/compiler/string-add-try-catch.js39
-rw-r--r--deps/v8/test/mjsunit/compiler/unsigned-min-max.js37
-rw-r--r--deps/v8/test/mjsunit/debug-function-scopes.js1
-rw-r--r--deps/v8/test/mjsunit/debug-print.js47
-rw-r--r--deps/v8/test/mjsunit/debug-scopes.js116
-rw-r--r--deps/v8/test/mjsunit/element-accessor.js17
-rw-r--r--deps/v8/test/mjsunit/es6/block-sloppy-function.js18
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/promise-all-uncaught.js45
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/promise-race-uncaught.js45
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-caught-by-default-reject-handler.js20
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-all.js4
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-uncaught.js4
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/throw-caught-by-default-reject-handler.js22
-rw-r--r--deps/v8/test/mjsunit/es6/function-name.js5
-rw-r--r--deps/v8/test/mjsunit/es6/promise-thenable-proxy.js23
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-5337.js39
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-650172.js (renamed from deps/v8/test/message/syntactic-tail-call-of-eval.js)7
-rw-r--r--deps/v8/test/mjsunit/es6/string-iterator.js8
-rw-r--r--deps/v8/test/mjsunit/es6/super.js32
-rw-r--r--deps/v8/test/mjsunit/es6/tail-call-megatest.js7
-rw-r--r--deps/v8/test/mjsunit/es6/tail-call.js47
-rw-r--r--deps/v8/test/mjsunit/es8/syntactic-tail-call-parsing-sloppy.js410
-rw-r--r--deps/v8/test/mjsunit/es8/syntactic-tail-call-parsing.js393
-rw-r--r--deps/v8/test/mjsunit/es8/syntactic-tail-call-simple.js143
-rw-r--r--deps/v8/test/mjsunit/es8/syntactic-tail-call.js604
-rw-r--r--deps/v8/test/mjsunit/fixed-context-shapes-when-recompiling.js362
-rw-r--r--deps/v8/test/mjsunit/function-var.js23
-rw-r--r--deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases.js216
-rw-r--r--deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases0.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases1.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases2.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases3.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/async-debug-caught-exception.js55
-rw-r--r--deps/v8/test/mjsunit/harmony/debug-async-function-async-task-event.js37
-rw-r--r--deps/v8/test/mjsunit/harmony/default-parameter-do-expression.js21
-rw-r--r--deps/v8/test/mjsunit/keyed-load-generic.js20
-rw-r--r--deps/v8/test/mjsunit/lazy-inner-functions.js16
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status150
-rw-r--r--deps/v8/test/mjsunit/modules-circular-valid.js7
-rw-r--r--deps/v8/test/mjsunit/modules-default-name1.js10
-rw-r--r--deps/v8/test/mjsunit/modules-default-name2.js10
-rw-r--r--deps/v8/test/mjsunit/modules-default-name3.js10
-rw-r--r--deps/v8/test/mjsunit/modules-default-name4.js10
-rw-r--r--deps/v8/test/mjsunit/modules-default-name5.js10
-rw-r--r--deps/v8/test/mjsunit/modules-default-name6.js10
-rw-r--r--deps/v8/test/mjsunit/modules-default-name7.js11
-rw-r--r--deps/v8/test/mjsunit/modules-default-name8.js10
-rw-r--r--deps/v8/test/mjsunit/modules-default-name9.js10
-rw-r--r--deps/v8/test/mjsunit/modules-default.js11
-rw-r--r--deps/v8/test/mjsunit/modules-empty-import1.js9
-rw-r--r--deps/v8/test/mjsunit/modules-empty-import2.js9
-rw-r--r--deps/v8/test/mjsunit/modules-empty-import3.js9
-rw-r--r--deps/v8/test/mjsunit/modules-empty-import4.js11
-rw-r--r--deps/v8/test/mjsunit/modules-error-trace.js9
-rw-r--r--deps/v8/test/mjsunit/modules-exports1.js55
-rw-r--r--deps/v8/test/mjsunit/modules-exports2.js31
-rw-r--r--deps/v8/test/mjsunit/modules-exports3.js48
-rw-r--r--deps/v8/test/mjsunit/modules-fail-1.js7
-rw-r--r--deps/v8/test/mjsunit/modules-fail-2.js7
-rw-r--r--deps/v8/test/mjsunit/modules-fail-3.js7
-rw-r--r--deps/v8/test/mjsunit/modules-fail-4.js8
-rw-r--r--deps/v8/test/mjsunit/modules-fail-5.js9
-rw-r--r--deps/v8/test/mjsunit/modules-fail-6.js8
-rw-r--r--deps/v8/test/mjsunit/modules-fail-7.js8
-rw-r--r--deps/v8/test/mjsunit/modules-fail-8.js7
-rw-r--r--deps/v8/test/mjsunit/modules-fail-cyclic-1.js8
-rw-r--r--deps/v8/test/mjsunit/modules-fail-cyclic-2.js8
-rw-r--r--deps/v8/test/mjsunit/modules-fail-cyclic-3.js8
-rw-r--r--deps/v8/test/mjsunit/modules-fail-star-exports-conflict.js10
-rw-r--r--deps/v8/test/mjsunit/modules-imports1.js26
-rw-r--r--deps/v8/test/mjsunit/modules-imports2.js26
-rw-r--r--deps/v8/test/mjsunit/modules-imports3.js38
-rw-r--r--deps/v8/test/mjsunit/modules-imports4.js31
-rw-r--r--deps/v8/test/mjsunit/modules-imports5.js9
-rw-r--r--deps/v8/test/mjsunit/modules-imports6.js25
-rw-r--r--deps/v8/test/mjsunit/modules-imports7.js8
-rw-r--r--deps/v8/test/mjsunit/modules-init1.js9
-rw-r--r--deps/v8/test/mjsunit/modules-init2.js8
-rw-r--r--deps/v8/test/mjsunit/modules-init3.js20
-rw-r--r--deps/v8/test/mjsunit/modules-preparse.js12
-rw-r--r--deps/v8/test/mjsunit/modules-skip-1.js9
-rw-r--r--deps/v8/test/mjsunit/modules-skip-2.js7
-rw-r--r--deps/v8/test/mjsunit/modules-skip-3.js8
-rw-r--r--deps/v8/test/mjsunit/modules-skip-4.js6
-rw-r--r--deps/v8/test/mjsunit/modules-skip-5.js5
-rw-r--r--deps/v8/test/mjsunit/modules-skip-6.js7
-rw-r--r--deps/v8/test/mjsunit/modules-skip-7.js6
-rw-r--r--deps/v8/test/mjsunit/modules-skip-circular-valid.js8
-rw-r--r--deps/v8/test/mjsunit/modules-skip-cyclic-3.js6
-rw-r--r--deps/v8/test/mjsunit/modules-skip-cyclic.js5
-rw-r--r--deps/v8/test/mjsunit/modules-skip-default-name1.js5
-rw-r--r--deps/v8/test/mjsunit/modules-skip-default-name2.js5
-rw-r--r--deps/v8/test/mjsunit/modules-skip-default-name3.js5
-rw-r--r--deps/v8/test/mjsunit/modules-skip-default-name4.js5
-rw-r--r--deps/v8/test/mjsunit/modules-skip-default-name5.js5
-rw-r--r--deps/v8/test/mjsunit/modules-skip-default-name6.js5
-rw-r--r--deps/v8/test/mjsunit/modules-skip-default-name7.js5
-rw-r--r--deps/v8/test/mjsunit/modules-skip-default-name8.js5
-rw-r--r--deps/v8/test/mjsunit/modules-skip-default-name9.js5
-rw-r--r--deps/v8/test/mjsunit/modules-skip-empty-import-aux.js6
-rw-r--r--deps/v8/test/mjsunit/modules-skip-empty-import.js6
-rw-r--r--deps/v8/test/mjsunit/modules-skip-init1.js6
-rw-r--r--deps/v8/test/mjsunit/modules-skip-init3.js20
-rw-r--r--deps/v8/test/mjsunit/modules-skip-star-exports-conflict.js6
-rw-r--r--deps/v8/test/mjsunit/modules-skip-star-exports-cycle.js6
-rw-r--r--deps/v8/test/mjsunit/modules-star-exports-cycle.js (renamed from deps/v8/test/message/syntactic-tail-call-in-binop-rhs.js)15
-rw-r--r--deps/v8/test/mjsunit/modules-this.js (renamed from deps/v8/test/message/syntactic-tail-call-sloppy.js)6
-rw-r--r--deps/v8/test/mjsunit/regexp-lastIndex.js22
-rw-r--r--deps/v8/test/mjsunit/regexp.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2437.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2438.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-353551.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4577.js (renamed from deps/v8/test/mjsunit/bugs/bug-4577.js)8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5332.js31
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5351.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5357.js (renamed from deps/v8/test/mjsunit/compiler/regress-645179.js)19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5380.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5404.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5405.js28
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5440.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5559.js38
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5648.js35
-rw-r--r--deps/v8/test/mjsunit/regress/regress-642409.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-645680.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-648373-sloppy-arguments-includesValues.js33
-rw-r--r--deps/v8/test/mjsunit/regress/regress-649067.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-649078.js (renamed from deps/v8/test/message/syntactic-tail-call-in-logical-and.js)14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-651327.js217
-rw-r--r--deps/v8/test/mjsunit/regress/regress-655573.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-662935.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-666046.js57
-rw-r--r--deps/v8/test/mjsunit/regress/regress-abort-context-allocate-params.js941
-rw-r--r--deps/v8/test/mjsunit/regress/regress-abort-preparsing-params.js946
-rw-r--r--deps/v8/test/mjsunit/regress/regress-arguments-liveness-analysis.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-cr-658267.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631027.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-635798.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-635923.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-640497.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-643073.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-644111.js (renamed from deps/v8/test/message/syntactic-tail-call-without-return.js)13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-644245.js (renamed from deps/v8/test/message/syntactic-tail-call-in-try-catch-finally.js)18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-644631.js (renamed from deps/v8/test/message/syntactic-tail-call-in-binop-lhs.js)10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-645103.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-645888.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-647217.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-648737.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-648740.js (renamed from deps/v8/test/message/syntactic-tail-call-in-extends.js)11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-650404.js36
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-650933.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-650973.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-651403-global.js (renamed from deps/v8/test/message/syntactic-tail-call-in-try-try-catch-finally.js)20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-651403.js (renamed from deps/v8/test/message/syntactic-tail-call-in-subclass.js)18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-652186-global.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-652186-local.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-654723.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-657478.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-663402.js40
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-667689.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-673008.js23
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-02256.js967
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-02862.js107
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-644682.js26
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-647649.js43
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-651961.js24
-rw-r--r--deps/v8/test/mjsunit/stack-overflow-arity-catch-noinline.js (renamed from deps/v8/test/webkit/fast/js/stack-overflow-arrity-catch.js)38
-rw-r--r--deps/v8/test/mjsunit/stack-traces-overflow.js7
-rw-r--r--deps/v8/test/mjsunit/substr.js54
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-expr.js151
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-f32.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-f64.js37
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-i32.js14
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-u32.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm.js88
-rw-r--r--deps/v8/test/mjsunit/wasm/calls.js11
-rw-r--r--deps/v8/test/mjsunit/wasm/compiled-module-management.js50
-rw-r--r--deps/v8/test/mjsunit/wasm/compiled-module-serialization.js32
-rw-r--r--deps/v8/test/mjsunit/wasm/debug-disassembly.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/ensure-wasm-binaries-up-to-date.js21
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions.js383
-rw-r--r--deps/v8/test/mjsunit/wasm/export-table.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/ffi-error.js39
-rw-r--r--deps/v8/test/mjsunit/wasm/ffi.js18
-rw-r--r--deps/v8/test/mjsunit/wasm/frame-inspection.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/function-names.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-frame.js5
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory.js255
-rw-r--r--deps/v8/test/mjsunit/wasm/import-table.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/incrementer.wasmbin66 -> 45 bytes
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-calls.js45
-rw-r--r--deps/v8/test/mjsunit/wasm/instance-gc.js122
-rw-r--r--deps/v8/test/mjsunit/wasm/instantiate-module-basic.js85
-rw-r--r--deps/v8/test/mjsunit/wasm/memory-size.js30
-rw-r--r--deps/v8/test/mjsunit/wasm/memory.js93
-rw-r--r--deps/v8/test/mjsunit/wasm/module-memory.js61
-rw-r--r--deps/v8/test/mjsunit/wasm/parallel_compilation.js13
-rw-r--r--deps/v8/test/mjsunit/wasm/receiver.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/stack.js20
-rw-r--r--deps/v8/test/mjsunit/wasm/stackwalk.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/start-function.js8
-rw-r--r--deps/v8/test/mjsunit/wasm/table.js95
-rw-r--r--deps/v8/test/mjsunit/wasm/test-import-export-wrapper.js48
-rw-r--r--deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js14
-rw-r--r--deps/v8/test/mjsunit/wasm/trap-location.js28
-rw-r--r--deps/v8/test/mjsunit/wasm/unicode-validation.js3
-rw-r--r--deps/v8/test/mjsunit/wasm/verify-function-simple.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-constants.js72
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-module-builder.js111
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-object-api.js1
-rw-r--r--deps/v8/test/mozilla/mozilla.status8
-rw-r--r--deps/v8/test/test262/detachArrayBuffer.js2
-rw-r--r--deps/v8/test/test262/harness-adapt.js4
-rw-r--r--deps/v8/test/test262/test262.status202
-rw-r--r--deps/v8/test/unittests/BUILD.gn147
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-job-unittest.cc217
-rw-r--r--deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc25
-rw-r--r--deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc218
-rw-r--r--deps/v8/test/unittests/compiler/branch-elimination-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc20
-rw-r--r--deps/v8/test/unittests/compiler/control-equivalence-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/escape-analysis-unittest.cc11
-rw-r--r--deps/v8/test/unittests/compiler/instruction-selector-unittest.cc13
-rw-r--r--deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc185
-rw-r--r--deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc15
-rw-r--r--deps/v8/test/unittests/compiler/js-operator-unittest.cc1
-rw-r--r--deps/v8/test/unittests/compiler/js-type-feedback-unittest.cc336
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc255
-rw-r--r--deps/v8/test/unittests/compiler/load-elimination-unittest.cc263
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc126
-rw-r--r--deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc243
-rw-r--r--deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc242
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc39
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h10
-rw-r--r--deps/v8/test/unittests/compiler/opcodes-unittest.cc67
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc119
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-unittest.cc8
-rw-r--r--deps/v8/test/unittests/compiler/typed-optimization-unittest.cc226
-rw-r--r--deps/v8/test/unittests/compiler/zone-pool-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/gc-tracer-unittest.cc75
-rw-r--r--deps/v8/test/unittests/heap/slot-set-unittest.cc57
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc365
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc8
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc112
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-dead-code-optimizer-unittest.cc9
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc12
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-peephole-optimizer-unittest.cc64
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-pipeline-unittest.cc40
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc254
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc85
-rw-r--r--deps/v8/test/unittests/interpreter/bytecodes-unittest.cc61
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc12
-rw-r--r--deps/v8/test/unittests/test-utils.h7
-rw-r--r--deps/v8/test/unittests/unicode-unittest.cc39
-rw-r--r--deps/v8/test/unittests/unittests.gyp6
-rw-r--r--deps/v8/test/unittests/unittests.status14
-rw-r--r--deps/v8/test/unittests/value-serializer-unittest.cc1098
-rw-r--r--deps/v8/test/unittests/wasm/ast-decoder-unittest.cc2134
-rw-r--r--deps/v8/test/unittests/wasm/control-transfer-unittest.cc406
-rw-r--r--deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc47
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc754
-rw-r--r--deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc78
-rw-r--r--deps/v8/test/unittests/wasm/wasm-module-builder-unittest.cc (renamed from deps/v8/test/unittests/wasm/encoder-unittest.cc)14
-rw-r--r--deps/v8/test/webkit/fast/js/stack-overflow-arrity-catch-expected.txt33
-rw-r--r--deps/v8/test/webkit/fast/regex/lastIndex-expected.txt4
-rw-r--r--deps/v8/test/webkit/webkit.status5
-rw-r--r--deps/v8/tools/callstats.html206
-rwxr-xr-xdeps/v8/tools/callstats.py199
-rwxr-xr-xdeps/v8/tools/dev/v8gen.py189
-rw-r--r--deps/v8/tools/gcmole/gcmole.lua64
-rw-r--r--deps/v8/tools/gcmole/run-gcmole.isolate1
-rw-r--r--deps/v8/tools/gen-inlining-tests.py566
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py2
-rwxr-xr-xdeps/v8/tools/grokdump.py206
-rw-r--r--deps/v8/tools/mb/OWNERS3
-rw-r--r--deps/v8/tools/mb/PRESUBMIT.py41
-rw-r--r--deps/v8/tools/mb/README.md22
-rw-r--r--deps/v8/tools/mb/docs/README.md4
-rw-r--r--deps/v8/tools/mb/docs/design_spec.md426
-rw-r--r--deps/v8/tools/mb/docs/user_guide.md297
-rwxr-xr-xdeps/v8/tools/mb/mb8
-rwxr-xr-xdeps/v8/tools/mb/mb.bat6
-rwxr-xr-xdeps/v8/tools/mb/mb.py1500
-rwxr-xr-xdeps/v8/tools/mb/mb_unittest.py572
-rw-r--r--deps/v8/tools/oom_dump/README33
-rw-r--r--deps/v8/tools/oom_dump/SConstruct42
-rw-r--r--deps/v8/tools/oom_dump/oom_dump.cc283
-rwxr-xr-xdeps/v8/tools/presubmit.py19
-rwxr-xr-xdeps/v8/tools/run-perf.sh7
-rwxr-xr-xdeps/v8/tools/run-tests.py22
-rw-r--r--deps/v8/tools/testrunner/local/testsuite.py17
-rw-r--r--deps/v8/tools/testrunner/local/variants.py4
-rwxr-xr-xdeps/v8/tools/try_perf.py1
-rw-r--r--deps/v8/tools/turbolizer/constants.js10
-rw-r--r--deps/v8/tools/turbolizer/disassembly-view.js62
-rw-r--r--deps/v8/tools/turbolizer/index.html6
-rw-r--r--deps/v8/tools/turbolizer/text-view.js2
-rw-r--r--deps/v8/tools/turbolizer/turbo-visualizer.css12
-rw-r--r--deps/v8/tools/turbolizer/turbo-visualizer.js11
-rwxr-xr-xdeps/v8/tools/update-wasm-fuzzers.sh56
-rw-r--r--deps/v8/tools/v8heapconst.py4
-rwxr-xr-xdeps/v8/tools/verify_source_deps.py143
1522 files changed, 117616 insertions, 59955 deletions
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index ac9b0bd9ac..29fd9226de 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -85,12 +85,13 @@ shell_g
/tools/luci-go/linux64/isolate
/tools/luci-go/mac64/isolate
/tools/luci-go/win64/isolate.exe
-/tools/mb
/tools/oom_dump/oom_dump
/tools/oom_dump/oom_dump.o
/tools/swarming_client
/tools/visual_studio/Debug
/tools/visual_studio/Release
+/test/fuzzer/wasm
+/test/fuzzer/wasm_asmjs
/v8.log.ll
/xcodebuild
TAGS
@@ -105,3 +106,7 @@ turbo*.cfg
turbo*.dot
turbo*.json
v8.ignition_dispatches_table.json
+/test/fuzzer/wasm.tar.gz
+/test/fuzzer/wasm_asmjs.tar.gz
+/src/inspector/build/closure-compiler.tar.gz
+/src/inspector/build/closure-compiler \ No newline at end of file
diff --git a/deps/v8/.gn b/deps/v8/.gn
index a1c0ff8dce..aee1752d4b 100644
--- a/deps/v8/.gn
+++ b/deps/v8/.gn
@@ -14,8 +14,7 @@ secondary_source = "//build/secondary/"
# matching these patterns (see "gn help label_pattern" for format) will have
# their includes checked for proper dependencies when you run either
# "gn check" or "gn gen --check".
-check_targets = [
-]
+check_targets = []
# These are the list of GN files that run exec_script. This whitelist exists
# to force additional review for new uses of exec_script, which is strongly
@@ -45,7 +44,5 @@ exec_script_whitelist = [
"//build/toolchain/win/BUILD.gn",
"//build/util/branding.gni",
"//build/util/version.gni",
- "//test/cctest/BUILD.gn",
"//test/test262/BUILD.gn",
- "//test/unittests/BUILD.gn",
]
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index d70be77d86..0229c9259e 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -63,6 +63,7 @@ Felix Geisendörfer <haimuiba@gmail.com>
Filipe David Manana <fdmanana@gmail.com>
Franziska Hinkelmann <franziska.hinkelmann@gmail.com>
Geoffrey Garside <ggarside@gmail.com>
+Gwang Yoon Hwang <ryumiel@company100.net>
Han Choongwoo <cwhan.tunz@gmail.com>
Hirofumi Mako <mkhrfm@gmail.com>
Honggyu Kim <honggyu.kp@gmail.com>
@@ -95,14 +96,17 @@ Mike Pennisi <mike@mikepennisi.com>
Milton Chiang <milton.chiang@mediatek.com>
Myeong-bo Shim <m0609.shim@samsung.com>
Nicolas Antonius Ernst Leopold Maria Kaiser <nikai@nikai.net>
+Noj Vek <nojvek@gmail.com>
Oleksandr Chekhovskyi <oleksandr.chekhovskyi@gmail.com>
Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com>
+Peter Rybin <peter.rybin@gmail.com>
Peter Varga <pvarga@inf.u-szeged.hu>
Paul Lind <plind44@gmail.com>
Rafal Krypa <rafal@krypa.net>
Refael Ackermann <refack@gmail.com>
Rene Rebe <rene@exactcode.de>
+Rob Wu <rob@robwu.nl>
Robert Mustacchi <rm@fingolfin.org>
Robert Nagy <robert.nagy@gmail.com>
Ryan Dahl <ry@tinyclouds.org>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index dcefe3706b..06870b6039 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -62,6 +62,9 @@ declare_args() {
# tools/gen-postmortem-metadata.py for details.
v8_postmortem_support = false
+ # Switches off inlining in V8.
+ v8_no_inline = false
+
# Similar to vfp but on MIPS.
v8_can_use_fpu_instructions = true
@@ -94,6 +97,11 @@ if (v8_enable_disassembler == "") {
v8_enable_disassembler = is_debug && !v8_optimized_debug
}
+# Specifies if the target build is a simulator build. Comparing target cpu
+# with v8 target cpu to not affect simulator builds for making cross-compile
+# snapshots.
+is_target_simulator = target_cpu != v8_target_cpu
+
v8_generated_peephole_source = "$target_gen_dir/bytecode-peephole-table.cc"
v8_random_seed = "314159265"
v8_toolset_for_shell = "host"
@@ -107,10 +115,7 @@ config("internal_config") {
include_dirs = [ "." ]
if (is_component_build) {
- defines = [
- "V8_SHARED",
- "BUILDING_V8_SHARED",
- ]
+ defines = [ "BUILDING_V8_SHARED" ]
}
}
@@ -134,12 +139,12 @@ config("libsampler_config") {
# itself.
config("external_config") {
if (is_component_build) {
- defines = [
- "V8_SHARED",
- "USING_V8_SHARED",
- ]
+ defines = [ "USING_V8_SHARED" ]
}
include_dirs = [ "include" ]
+ if (v8_enable_inspector_override) {
+ include_dirs += [ "$target_gen_dir/include" ]
+ }
libs = []
if (is_android && current_toolchain != host_toolchain) {
libs += [ "log" ]
@@ -200,7 +205,7 @@ config("toolchain") {
if (v8_current_cpu == "arm") {
defines += [ "V8_TARGET_ARCH_ARM" ]
- if (arm_version == 7) {
+ if (arm_version >= 7) {
defines += [ "CAN_USE_ARMV7_INSTRUCTIONS" ]
}
if (arm_fpu == "vfpv3-d16") {
@@ -233,6 +238,12 @@ config("toolchain") {
defines += [ "V8_TARGET_ARCH_ARM64" ]
}
+ # Mips64el/mipsel simulators.
+ if (is_target_simulator &&
+ (v8_current_cpu == "mipsel" || v8_current_cpu == "mips64el")) {
+ defines += [ "_MIPS_TARGET_SIMULATOR" ]
+ }
+
# TODO(jochen): Add support for mips.
if (v8_current_cpu == "mipsel") {
defines += [ "V8_TARGET_ARCH_MIPS" ]
@@ -343,6 +354,13 @@ config("toolchain") {
} else if (dcheck_always_on) {
defines += [ "DEBUG" ]
}
+
+ if (v8_no_inline) {
+ cflags += [
+ "-fno-inline-functions",
+ "-fno-inline",
+ ]
+ }
}
###############################################################################
@@ -374,17 +392,16 @@ action("js2c") {
"src/js/regexp.js",
"src/js/arraybuffer.js",
"src/js/typedarray.js",
- "src/js/iterator-prototype.js",
"src/js/collection.js",
"src/js/weak-collection.js",
"src/js/collection-iterator.js",
"src/js/promise.js",
"src/js/messages.js",
"src/js/array-iterator.js",
- "src/js/string-iterator.js",
"src/js/templates.js",
"src/js/spread.js",
"src/js/proxy.js",
+ "src/js/async-await.js",
"src/debug/mirrors.js",
"src/debug/debug.js",
"src/debug/liveedit.js",
@@ -427,7 +444,6 @@ action("js2c_experimental") {
sources = [
"src/js/macros.py",
"src/messages.h",
- "src/js/harmony-async-await.js",
"src/js/harmony-atomics.js",
"src/js/harmony-simd.js",
"src/js/harmony-string-padding.js",
@@ -439,8 +455,8 @@ action("js2c_experimental") {
if (v8_enable_i18n_support) {
sources += [
+ "src/js/datetime-format-to-parts.js",
"src/js/icu-case-mapping.js",
- "src/js/intl-extra.js",
]
}
@@ -809,6 +825,8 @@ v8_source_set("v8_base") {
sources = [
"//base/trace_event/common/trace_event_common.h",
+
+ ### gcmole(all) ###
"include/v8-debug.h",
"include/v8-experimental.h",
"include/v8-platform.h",
@@ -857,10 +875,14 @@ v8_source_set("v8_base") {
"src/ast/ast-numbering.h",
"src/ast/ast-traversal-visitor.h",
"src/ast/ast-type-bounds.h",
+ "src/ast/ast-types.cc",
+ "src/ast/ast-types.h",
"src/ast/ast-value-factory.cc",
"src/ast/ast-value-factory.h",
"src/ast/ast.cc",
"src/ast/ast.h",
+ "src/ast/compile-time-value.cc",
+ "src/ast/compile-time-value.h",
"src/ast/context-slot-cache.cc",
"src/ast/context-slot-cache.h",
"src/ast/modules.cc",
@@ -868,7 +890,6 @@ v8_source_set("v8_base") {
"src/ast/prettyprinter.cc",
"src/ast/prettyprinter.h",
"src/ast/scopeinfo.cc",
- "src/ast/scopeinfo.h",
"src/ast/scopes.cc",
"src/ast/scopes.h",
"src/ast/variables.cc",
@@ -904,12 +925,14 @@ v8_source_set("v8_base") {
"src/builtins/builtins-handler.cc",
"src/builtins/builtins-internal.cc",
"src/builtins/builtins-interpreter.cc",
+ "src/builtins/builtins-iterator.cc",
"src/builtins/builtins-json.cc",
"src/builtins/builtins-math.cc",
"src/builtins/builtins-number.cc",
"src/builtins/builtins-object.cc",
"src/builtins/builtins-proxy.cc",
"src/builtins/builtins-reflect.cc",
+ "src/builtins/builtins-regexp.cc",
"src/builtins/builtins-sharedarraybuffer.cc",
"src/builtins/builtins-string.cc",
"src/builtins/builtins-symbol.cc",
@@ -940,6 +963,8 @@ v8_source_set("v8_base") {
"src/compilation-cache.h",
"src/compilation-dependencies.cc",
"src/compilation-dependencies.h",
+ "src/compilation-info.cc",
+ "src/compilation-info.h",
"src/compilation-statistics.cc",
"src/compilation-statistics.h",
"src/compiler-dispatcher/compiler-dispatcher-job.cc",
@@ -1069,6 +1094,8 @@ v8_source_set("v8_base") {
"src/compiler/loop-peeling.h",
"src/compiler/loop-variable-optimizer.cc",
"src/compiler/loop-variable-optimizer.h",
+ "src/compiler/machine-graph-verifier.cc",
+ "src/compiler/machine-graph-verifier.h",
"src/compiler/machine-operator-reducer.cc",
"src/compiler/machine-operator-reducer.h",
"src/compiler/machine-operator.cc",
@@ -1132,12 +1159,16 @@ v8_source_set("v8_base") {
"src/compiler/store-store-elimination.h",
"src/compiler/tail-call-optimization.cc",
"src/compiler/tail-call-optimization.h",
+ "src/compiler/type-cache.cc",
+ "src/compiler/type-cache.h",
"src/compiler/type-hint-analyzer.cc",
"src/compiler/type-hint-analyzer.h",
- "src/compiler/type-hints.cc",
- "src/compiler/type-hints.h",
+ "src/compiler/typed-optimization.cc",
+ "src/compiler/typed-optimization.h",
"src/compiler/typer.cc",
"src/compiler/typer.h",
+ "src/compiler/types.cc",
+ "src/compiler/types.h",
"src/compiler/unwinding-info-writer.h",
"src/compiler/value-numbering-reducer.cc",
"src/compiler/value-numbering-reducer.h",
@@ -1216,6 +1247,7 @@ v8_source_set("v8_base") {
"src/crankshaft/lithium-allocator.h",
"src/crankshaft/lithium-codegen.cc",
"src/crankshaft/lithium-codegen.h",
+ "src/crankshaft/lithium-inl.h",
"src/crankshaft/lithium.cc",
"src/crankshaft/lithium.h",
"src/crankshaft/typing.cc",
@@ -1314,6 +1346,7 @@ v8_source_set("v8_base") {
"src/heap/heap-inl.h",
"src/heap/heap.cc",
"src/heap/heap.h",
+ "src/heap/incremental-marking-inl.h",
"src/heap/incremental-marking-job.cc",
"src/heap/incremental-marking-job.h",
"src/heap/incremental-marking.cc",
@@ -1351,6 +1384,7 @@ v8_source_set("v8_base") {
"src/ic/call-optimization.h",
"src/ic/handler-compiler.cc",
"src/ic/handler-compiler.h",
+ "src/ic/handler-configuration.h",
"src/ic/ic-compiler.cc",
"src/ic/ic-compiler.h",
"src/ic/ic-inl.h",
@@ -1382,12 +1416,13 @@ v8_source_set("v8_base") {
"src/interpreter/bytecode-generator.h",
"src/interpreter/bytecode-label.cc",
"src/interpreter/bytecode-label.h",
+ "src/interpreter/bytecode-operands.cc",
+ "src/interpreter/bytecode-operands.h",
"src/interpreter/bytecode-peephole-optimizer.cc",
"src/interpreter/bytecode-peephole-optimizer.h",
"src/interpreter/bytecode-peephole-table.h",
"src/interpreter/bytecode-pipeline.cc",
"src/interpreter/bytecode-pipeline.h",
- "src/interpreter/bytecode-register-allocator.cc",
"src/interpreter/bytecode-register-allocator.h",
"src/interpreter/bytecode-register-optimizer.cc",
"src/interpreter/bytecode-register-optimizer.h",
@@ -1422,11 +1457,16 @@ v8_source_set("v8_base") {
"src/layout-descriptor.h",
"src/list-inl.h",
"src/list.h",
+ "src/locked-queue-inl.h",
+ "src/locked-queue.h",
"src/log-inl.h",
"src/log-utils.cc",
"src/log-utils.h",
"src/log.cc",
"src/log.h",
+ "src/lookup-cache-inl.h",
+ "src/lookup-cache.cc",
+ "src/lookup-cache.h",
"src/lookup.cc",
"src/lookup.h",
"src/machine-type.cc",
@@ -1444,6 +1484,8 @@ v8_source_set("v8_base") {
"src/objects.h",
"src/ostreams.cc",
"src/ostreams.h",
+ "src/parsing/duplicate-finder.cc",
+ "src/parsing/duplicate-finder.h",
"src/parsing/expression-classifier.h",
"src/parsing/func-name-inferrer.cc",
"src/parsing/func-name-inferrer.h",
@@ -1495,6 +1537,8 @@ v8_source_set("v8_base") {
"src/profiler/strings-storage.h",
"src/profiler/tick-sample.cc",
"src/profiler/tick-sample.h",
+ "src/profiler/tracing-cpu-profiler.cc",
+ "src/profiler/tracing-cpu-profiler.h",
"src/profiler/unbound-queue-inl.h",
"src/profiler/unbound-queue.h",
"src/property-descriptor.cc",
@@ -1601,15 +1645,13 @@ v8_source_set("v8_base") {
"src/transitions-inl.h",
"src/transitions.cc",
"src/transitions.h",
- "src/type-cache.cc",
- "src/type-cache.h",
"src/type-feedback-vector-inl.h",
"src/type-feedback-vector.cc",
"src/type-feedback-vector.h",
+ "src/type-hints.cc",
+ "src/type-hints.h",
"src/type-info.cc",
"src/type-info.h",
- "src/types.cc",
- "src/types.h",
"src/unicode-cache-inl.h",
"src/unicode-cache.h",
"src/unicode-decoder.cc",
@@ -1629,6 +1671,7 @@ v8_source_set("v8_base") {
"src/v8threads.h",
"src/value-serializer.cc",
"src/value-serializer.h",
+ "src/vector.h",
"src/version.cc",
"src/version.h",
"src/vm-state-inl.h",
@@ -1636,8 +1679,6 @@ v8_source_set("v8_base") {
"src/wasm/ast-decoder.cc",
"src/wasm/ast-decoder.h",
"src/wasm/decoder.h",
- "src/wasm/encoder.cc",
- "src/wasm/encoder.h",
"src/wasm/leb-helper.h",
"src/wasm/module-decoder.cc",
"src/wasm/module-decoder.h",
@@ -1654,20 +1695,27 @@ v8_source_set("v8_base") {
"src/wasm/wasm-js.cc",
"src/wasm/wasm-js.h",
"src/wasm/wasm-macro-gen.h",
+ "src/wasm/wasm-module-builder.cc",
+ "src/wasm/wasm-module-builder.h",
"src/wasm/wasm-module.cc",
"src/wasm/wasm-module.h",
"src/wasm/wasm-opcodes.cc",
"src/wasm/wasm-opcodes.h",
"src/wasm/wasm-result.cc",
"src/wasm/wasm-result.h",
- "src/zone-allocator.h",
- "src/zone-containers.h",
- "src/zone.cc",
- "src/zone.h",
+ "src/zone/accounting-allocator.cc",
+ "src/zone/accounting-allocator.h",
+ "src/zone/zone-allocator.h",
+ "src/zone/zone-allocator.h",
+ "src/zone/zone-containers.h",
+ "src/zone/zone-segment.cc",
+ "src/zone/zone-segment.h",
+ "src/zone/zone.cc",
+ "src/zone/zone.h",
]
if (v8_current_cpu == "x86") {
- sources += [
+ sources += [ ### gcmole(arch:ia32) ###
"src/builtins/ia32/builtins-ia32.cc",
"src/compiler/ia32/code-generator-ia32.cc",
"src/compiler/ia32/instruction-codes-ia32.h",
@@ -1696,6 +1744,8 @@ v8_source_set("v8_base") {
"src/ia32/interface-descriptors-ia32.cc",
"src/ia32/macro-assembler-ia32.cc",
"src/ia32/macro-assembler-ia32.h",
+ "src/ia32/simulator-ia32.cc",
+ "src/ia32/simulator-ia32.h",
"src/ic/ia32/access-compiler-ia32.cc",
"src/ic/ia32/handler-compiler-ia32.cc",
"src/ic/ia32/ic-compiler-ia32.cc",
@@ -1705,7 +1755,7 @@ v8_source_set("v8_base") {
"src/regexp/ia32/regexp-macro-assembler-ia32.h",
]
} else if (v8_current_cpu == "x64") {
- sources += [
+ sources += [ ### gcmole(arch:x64) ###
"src/builtins/x64/builtins-x64.cc",
"src/compiler/x64/code-generator-x64.cc",
"src/compiler/x64/instruction-codes-x64.h",
@@ -1728,6 +1778,7 @@ v8_source_set("v8_base") {
"src/ic/x64/stub-cache-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.h",
+ "src/third_party/valgrind/valgrind.h",
"src/x64/assembler-x64-inl.h",
"src/x64/assembler-x64.cc",
"src/x64/assembler-x64.h",
@@ -1744,9 +1795,12 @@ v8_source_set("v8_base") {
"src/x64/interface-descriptors-x64.cc",
"src/x64/macro-assembler-x64.cc",
"src/x64/macro-assembler-x64.h",
+ "src/x64/simulator-x64.cc",
+ "src/x64/simulator-x64.h",
+ "src/x64/sse-instr.h",
]
} else if (v8_current_cpu == "arm") {
- sources += [
+ sources += [ ### gcmole(arch:arm) ###
"src/arm/assembler-arm-inl.h",
"src/arm/assembler-arm.cc",
"src/arm/assembler-arm.h",
@@ -1792,7 +1846,7 @@ v8_source_set("v8_base") {
"src/regexp/arm/regexp-macro-assembler-arm.h",
]
} else if (v8_current_cpu == "arm64") {
- sources += [
+ sources += [ ### gcmole(arch:arm64) ###
"src/arm64/assembler-arm64-inl.h",
"src/arm64/assembler-arm64.cc",
"src/arm64/assembler-arm64.h",
@@ -1850,8 +1904,8 @@ v8_source_set("v8_base") {
"src/regexp/arm64/regexp-macro-assembler-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.h",
]
- } else if (v8_current_cpu == "mipsel") {
- sources += [
+ } else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
+ sources += [ ### gcmole(arch:mipsel) ###
"src/builtins/mips/builtins-mips.cc",
"src/compiler/mips/code-generator-mips.cc",
"src/compiler/mips/instruction-codes-mips.h",
@@ -1892,8 +1946,8 @@ v8_source_set("v8_base") {
"src/regexp/mips/regexp-macro-assembler-mips.cc",
"src/regexp/mips/regexp-macro-assembler-mips.h",
]
- } else if (v8_current_cpu == "mips64el") {
- sources += [
+ } else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
+ sources += [ ### gcmole(arch:mips64el) ###
"src/builtins/mips64/builtins-mips64.cc",
"src/compiler/mips64/code-generator-mips64.cc",
"src/compiler/mips64/instruction-codes-mips64.h",
@@ -1934,8 +1988,50 @@ v8_source_set("v8_base") {
"src/regexp/mips64/regexp-macro-assembler-mips64.cc",
"src/regexp/mips64/regexp-macro-assembler-mips64.h",
]
+ } else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
+ sources += [ ### gcmole(arch:ppc) ###
+ "src/builtins/ppc/builtins-ppc.cc",
+ "src/compiler/ppc/code-generator-ppc.cc",
+ "src/compiler/ppc/instruction-codes-ppc.h",
+ "src/compiler/ppc/instruction-scheduler-ppc.cc",
+ "src/compiler/ppc/instruction-selector-ppc.cc",
+ "src/crankshaft/ppc/lithium-codegen-ppc.cc",
+ "src/crankshaft/ppc/lithium-codegen-ppc.h",
+ "src/crankshaft/ppc/lithium-gap-resolver-ppc.cc",
+ "src/crankshaft/ppc/lithium-gap-resolver-ppc.h",
+ "src/crankshaft/ppc/lithium-ppc.cc",
+ "src/crankshaft/ppc/lithium-ppc.h",
+ "src/debug/ppc/debug-ppc.cc",
+ "src/full-codegen/ppc/full-codegen-ppc.cc",
+ "src/ic/ppc/access-compiler-ppc.cc",
+ "src/ic/ppc/handler-compiler-ppc.cc",
+ "src/ic/ppc/ic-compiler-ppc.cc",
+ "src/ic/ppc/ic-ppc.cc",
+ "src/ic/ppc/stub-cache-ppc.cc",
+ "src/ppc/assembler-ppc-inl.h",
+ "src/ppc/assembler-ppc.cc",
+ "src/ppc/assembler-ppc.h",
+ "src/ppc/code-stubs-ppc.cc",
+ "src/ppc/code-stubs-ppc.h",
+ "src/ppc/codegen-ppc.cc",
+ "src/ppc/codegen-ppc.h",
+ "src/ppc/constants-ppc.cc",
+ "src/ppc/constants-ppc.h",
+ "src/ppc/cpu-ppc.cc",
+ "src/ppc/deoptimizer-ppc.cc",
+ "src/ppc/disasm-ppc.cc",
+ "src/ppc/frames-ppc.cc",
+ "src/ppc/frames-ppc.h",
+ "src/ppc/interface-descriptors-ppc.cc",
+ "src/ppc/macro-assembler-ppc.cc",
+ "src/ppc/macro-assembler-ppc.h",
+ "src/ppc/simulator-ppc.cc",
+ "src/ppc/simulator-ppc.h",
+ "src/regexp/ppc/regexp-macro-assembler-ppc.cc",
+ "src/regexp/ppc/regexp-macro-assembler-ppc.h",
+ ]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
- sources += [
+ sources += [ ### gcmole(arch:s390) ###
"src/builtins/s390/builtins-s390.cc",
"src/compiler/s390/code-generator-s390.cc",
"src/compiler/s390/instruction-codes-s390.h",
@@ -1976,6 +2072,46 @@ v8_source_set("v8_base") {
"src/s390/simulator-s390.cc",
"src/s390/simulator-s390.h",
]
+ } else if (v8_current_cpu == "x87") {
+ sources += [ ### gcmole(arch:x87) ###
+ "src/builtins/x87/builtins-x87.cc",
+ "src/compiler/x87/code-generator-x87.cc",
+ "src/compiler/x87/instruction-codes-x87.h",
+ "src/compiler/x87/instruction-scheduler-x87.cc",
+ "src/compiler/x87/instruction-selector-x87.cc",
+ "src/crankshaft/x87/lithium-codegen-x87.cc",
+ "src/crankshaft/x87/lithium-codegen-x87.h",
+ "src/crankshaft/x87/lithium-gap-resolver-x87.cc",
+ "src/crankshaft/x87/lithium-gap-resolver-x87.h",
+ "src/crankshaft/x87/lithium-x87.cc",
+ "src/crankshaft/x87/lithium-x87.h",
+ "src/debug/x87/debug-x87.cc",
+ "src/full-codegen/x87/full-codegen-x87.cc",
+ "src/ic/x87/access-compiler-x87.cc",
+ "src/ic/x87/handler-compiler-x87.cc",
+ "src/ic/x87/ic-compiler-x87.cc",
+ "src/ic/x87/ic-x87.cc",
+ "src/ic/x87/stub-cache-x87.cc",
+ "src/regexp/x87/regexp-macro-assembler-x87.cc",
+ "src/regexp/x87/regexp-macro-assembler-x87.h",
+ "src/x87/assembler-x87-inl.h",
+ "src/x87/assembler-x87.cc",
+ "src/x87/assembler-x87.h",
+ "src/x87/code-stubs-x87.cc",
+ "src/x87/code-stubs-x87.h",
+ "src/x87/codegen-x87.cc",
+ "src/x87/codegen-x87.h",
+ "src/x87/cpu-x87.cc",
+ "src/x87/deoptimizer-x87.cc",
+ "src/x87/disasm-x87.cc",
+ "src/x87/frames-x87.cc",
+ "src/x87/frames-x87.h",
+ "src/x87/interface-descriptors-x87.cc",
+ "src/x87/macro-assembler-x87.cc",
+ "src/x87/macro-assembler-x87.h",
+ "src/x87/simulator-x87.cc",
+ "src/x87/simulator-x87.h",
+ ]
}
configs = [ ":internal_config" ]
@@ -2010,14 +2146,16 @@ v8_source_set("v8_base") {
sources += [ "$target_gen_dir/debug-support.cc" ]
deps += [ ":postmortem-metadata" ]
}
+
+ if (v8_enable_inspector_override) {
+ deps += [ "src/inspector:inspector" ]
+ }
}
v8_source_set("v8_libbase") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
- "src/base/accounting-allocator.cc",
- "src/base/accounting-allocator.h",
"src/base/adapters.h",
"src/base/atomic-utils.h",
"src/base/atomicops.h",
@@ -2035,6 +2173,7 @@ v8_source_set("v8_libbase") {
"src/base/bits.cc",
"src/base/bits.h",
"src/base/build_config.h",
+ "src/base/compiler-specific.h",
"src/base/cpu.cc",
"src/base/cpu.h",
"src/base/debug/stack_trace.cc",
@@ -2048,6 +2187,7 @@ v8_source_set("v8_libbase") {
"src/base/free_deleter.h",
"src/base/functional.cc",
"src/base/functional.h",
+ "src/base/hashmap-entry.h",
"src/base/hashmap.h",
"src/base/ieee754.cc",
"src/base/ieee754.h",
@@ -2200,6 +2340,27 @@ v8_source_set("fuzzer_support") {
configs = [ ":internal_config_base" ]
deps = [
+ ":v8",
+ ]
+
+ public_deps = [
+ ":v8_libplatform",
+ ]
+}
+
+# Used by fuzzers that would require exposing too many symbols for a proper
+# component build.
+v8_source_set("fuzzer_support_nocomponent") {
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+
+ sources = [
+ "test/fuzzer/fuzzer-support.cc",
+ "test/fuzzer/fuzzer-support.h",
+ ]
+
+ configs = [ ":internal_config_base" ]
+
+ deps = [
":v8_maybe_snapshot",
]
@@ -2247,7 +2408,10 @@ v8_executable("mkpeephole") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
+ "src/interpreter/bytecode-operands.cc",
+ "src/interpreter/bytecode-operands.h",
"src/interpreter/bytecode-peephole-optimizer.h",
+ "src/interpreter/bytecode-traits.h",
"src/interpreter/bytecodes.cc",
"src/interpreter/bytecodes.h",
"src/interpreter/mkpeephole.cc",
@@ -2336,6 +2500,7 @@ if (is_component_build) {
v8_executable("d8") {
sources = [
+ "$target_gen_dir/d8-js.cc",
"src/d8.cc",
"src/d8.h",
]
@@ -2363,9 +2528,6 @@ v8_executable("d8") {
sources += [ "src/d8-windows.cc" ]
}
- if (!is_component_build) {
- sources += [ "$target_gen_dir/d8-js.cc" ]
- }
if (v8_enable_i18n_support) {
deps += [ "//third_party/icu" ]
}
@@ -2516,7 +2678,10 @@ v8_source_set("json_fuzzer") {
":fuzzer_support",
]
- configs = [ ":internal_config" ]
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
}
v8_fuzzer("json_fuzzer") {
@@ -2528,10 +2693,13 @@ v8_source_set("parser_fuzzer") {
]
deps = [
- ":fuzzer_support",
+ ":fuzzer_support_nocomponent",
]
- configs = [ ":internal_config" ]
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
}
v8_fuzzer("parser_fuzzer") {
@@ -2546,12 +2714,38 @@ v8_source_set("regexp_fuzzer") {
":fuzzer_support",
]
- configs = [ ":internal_config" ]
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
}
v8_fuzzer("regexp_fuzzer") {
}
+v8_source_set("wasm_module_runner") {
+ sources = [
+ "test/common/wasm/wasm-module-runner.cc",
+ "test/common/wasm/wasm-module-runner.h",
+ ]
+
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
+}
+
+v8_source_set("wasm_test_signatures") {
+ sources = [
+ "test/common/wasm/test-signatures.h",
+ ]
+
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
+}
+
v8_source_set("wasm_fuzzer") {
sources = [
"test/fuzzer/wasm.cc",
@@ -2559,9 +2753,13 @@ v8_source_set("wasm_fuzzer") {
deps = [
":fuzzer_support",
+ ":wasm_module_runner",
]
- configs = [ ":internal_config" ]
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
}
v8_fuzzer("wasm_fuzzer") {
@@ -2574,10 +2772,186 @@ v8_source_set("wasm_asmjs_fuzzer") {
deps = [
":fuzzer_support",
+ ":wasm_module_runner",
]
- configs = [ ":internal_config" ]
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
}
v8_fuzzer("wasm_asmjs_fuzzer") {
}
+
+v8_source_set("wasm_code_fuzzer") {
+ sources = [
+ "test/fuzzer/wasm-code.cc",
+ ]
+
+ deps = [
+ ":fuzzer_support",
+ ":wasm_module_runner",
+ ":wasm_test_signatures",
+ ]
+
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
+}
+
+v8_fuzzer("wasm_code_fuzzer") {
+}
+
+v8_source_set("lib_wasm_section_fuzzer") {
+ sources = [
+ "test/fuzzer/wasm-section-fuzzers.cc",
+ "test/fuzzer/wasm-section-fuzzers.h",
+ ]
+
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
+}
+
+v8_source_set("wasm_types_section_fuzzer") {
+ sources = [
+ "test/fuzzer/wasm-types-section.cc",
+ ]
+
+ deps = [
+ ":fuzzer_support",
+ ":lib_wasm_section_fuzzer",
+ ":wasm_module_runner",
+ ]
+
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
+}
+
+v8_fuzzer("wasm_types_section_fuzzer") {
+}
+
+v8_source_set("wasm_names_section_fuzzer") {
+ sources = [
+ "test/fuzzer/wasm-names-section.cc",
+ ]
+
+ deps = [
+ ":fuzzer_support",
+ ":lib_wasm_section_fuzzer",
+ ":wasm_module_runner",
+ ]
+
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
+}
+
+v8_fuzzer("wasm_names_section_fuzzer") {
+}
+
+v8_source_set("wasm_globals_section_fuzzer") {
+ sources = [
+ "test/fuzzer/wasm-globals-section.cc",
+ ]
+
+ deps = [
+ ":fuzzer_support",
+ ":lib_wasm_section_fuzzer",
+ ":wasm_module_runner",
+ ]
+
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
+}
+
+v8_fuzzer("wasm_globals_section_fuzzer") {
+}
+
+v8_source_set("wasm_imports_section_fuzzer") {
+ sources = [
+ "test/fuzzer/wasm-imports-section.cc",
+ ]
+
+ deps = [
+ ":fuzzer_support",
+ ":lib_wasm_section_fuzzer",
+ ":wasm_module_runner",
+ ]
+
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
+}
+
+v8_fuzzer("wasm_imports_section_fuzzer") {
+}
+
+v8_source_set("wasm_function_sigs_section_fuzzer") {
+ sources = [
+ "test/fuzzer/wasm-function-sigs-section.cc",
+ ]
+
+ deps = [
+ ":fuzzer_support",
+ ":lib_wasm_section_fuzzer",
+ ":wasm_module_runner",
+ ]
+
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
+}
+
+v8_fuzzer("wasm_function_sigs_section_fuzzer") {
+}
+
+v8_source_set("wasm_memory_section_fuzzer") {
+ sources = [
+ "test/fuzzer/wasm-memory-section.cc",
+ ]
+
+ deps = [
+ ":fuzzer_support",
+ ":lib_wasm_section_fuzzer",
+ ":wasm_module_runner",
+ ]
+
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
+}
+
+v8_fuzzer("wasm_memory_section_fuzzer") {
+}
+
+v8_source_set("wasm_data_section_fuzzer") {
+ sources = [
+ "test/fuzzer/wasm-data-section.cc",
+ ]
+
+ deps = [
+ ":fuzzer_support",
+ ":lib_wasm_section_fuzzer",
+ ":wasm_module_runner",
+ ]
+
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
+}
+
+v8_fuzzer("wasm_data_section_fuzzer") {
+}
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index b2a43a1121..40c8537022 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,1987 @@
+2016-10-05: Version 5.5.372
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-05: Version 5.5.371
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-05: Version 5.5.370
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-05: Version 5.5.369
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-05: Version 5.5.368
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-05: Version 5.5.367
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-04: Version 5.5.366
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-04: Version 5.5.365
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-04: Version 5.5.364
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-04: Version 5.5.363
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-04: Version 5.5.362
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-04: Version 5.5.361
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-04: Version 5.5.360
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-04: Version 5.5.359
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-04: Version 5.5.358
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-04: Version 5.5.357
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-04: Version 5.5.356
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-04: Version 5.5.355
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-04: Version 5.5.354
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-04: Version 5.5.353
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-04: Version 5.5.352
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-03: Version 5.5.351
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-03: Version 5.5.350
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-03: Version 5.5.349
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-03: Version 5.5.348
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-03: Version 5.5.347
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-03: Version 5.5.346
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-02: Version 5.5.345
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-02: Version 5.5.344
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-02: Version 5.5.343
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-02: Version 5.5.342
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-01: Version 5.5.341
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-01: Version 5.5.340
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-30: Version 5.5.339
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-30: Version 5.5.338
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-30: Version 5.5.337
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-30: Version 5.5.336
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-30: Version 5.5.335
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-29: Version 5.5.334
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-29: Version 5.5.333
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-29: Version 5.5.332
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-29: Version 5.5.331
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-29: Version 5.5.330
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-29: Version 5.5.329
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-29: Version 5.5.328
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-29: Version 5.5.327
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-29: Version 5.5.326
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-29: Version 5.5.325
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-29: Version 5.5.324
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-29: Version 5.5.323
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-29: Version 5.5.322
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-29: Version 5.5.321
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-29: Version 5.5.320
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-29: Version 5.5.319
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-29: Version 5.5.318
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-29: Version 5.5.317
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-28: Version 5.5.316
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-28: Version 5.5.315
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-28: Version 5.5.314
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-28: Version 5.5.313
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-28: Version 5.5.312
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-28: Version 5.5.311
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-28: Version 5.5.310
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-28: Version 5.5.309
+
+ [wasm] Master CL for Binary 0xC changes (Chromium issue 575167).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-27: Version 5.5.308
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-27: Version 5.5.307
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-27: Version 5.5.306
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-27: Version 5.5.305
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-27: Version 5.5.304
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-27: Version 5.5.303
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-27: Version 5.5.302
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-27: Version 5.5.301
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-27: Version 5.5.300
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-27: Version 5.5.299
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-27: Version 5.5.298
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-27: Version 5.5.297
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-27: Version 5.5.296
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-26: Version 5.5.295
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-26: Version 5.5.294
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-26: Version 5.5.293
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-26: Version 5.5.292
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-26: Version 5.5.291
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-26: Version 5.5.290
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-26: Version 5.5.289
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-26: Version 5.5.288
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-26: Version 5.5.287
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-26: Version 5.5.286
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-26: Version 5.5.285
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-26: Version 5.5.284
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-26: Version 5.5.283
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-26: Version 5.5.282
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-24: Version 5.5.281
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-24: Version 5.5.280
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-24: Version 5.5.279
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-24: Version 5.5.278
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-23: Version 5.5.277
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-23: Version 5.5.276
+
+ [wasm] Master CL for Binary 0xC changes (Chromium issue 575167).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-23: Version 5.5.275
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-22: Version 5.5.274
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-22: Version 5.5.273
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-22: Version 5.5.272
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-22: Version 5.5.271
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-22: Version 5.5.270
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-22: Version 5.5.269
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-22: Version 5.5.268
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-22: Version 5.5.267
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-22: Version 5.5.266
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-22: Version 5.5.265
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-22: Version 5.5.264
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-22: Version 5.5.263
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-22: Version 5.5.262
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-22: Version 5.5.261
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-22: Version 5.5.260
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-22: Version 5.5.259
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-22: Version 5.5.258
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-21: Version 5.5.257
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-21: Version 5.5.256
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-21: Version 5.5.255
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-21: Version 5.5.254
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-21: Version 5.5.253
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-21: Version 5.5.252
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-21: Version 5.5.251
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-21: Version 5.5.250
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-21: Version 5.5.249
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-21: Version 5.5.248
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-21: Version 5.5.247
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-21: Version 5.5.246
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-21: Version 5.5.245
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-21: Version 5.5.244
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-21: Version 5.5.243
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-21: Version 5.5.242
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-21: Version 5.5.241
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-21: Version 5.5.240
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-21: Version 5.5.239
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-21: Version 5.5.238
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-20: Version 5.5.237
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-20: Version 5.5.236
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-20: Version 5.5.235
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-20: Version 5.5.234
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-20: Version 5.5.233
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-20: Version 5.5.232
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-20: Version 5.5.231
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-20: Version 5.5.230
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-19: Version 5.5.229
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-19: Version 5.5.228
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-19: Version 5.5.227
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-16: Version 5.5.226
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-16: Version 5.5.225
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-16: Version 5.5.224
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-16: Version 5.5.223
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-16: Version 5.5.222
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-16: Version 5.5.221
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-16: Version 5.5.220
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-16: Version 5.5.219
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-16: Version 5.5.218
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-15: Version 5.5.217
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-15: Version 5.5.216
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-15: Version 5.5.215
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-15: Version 5.5.214
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-15: Version 5.5.213
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-15: Version 5.5.212
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-15: Version 5.5.211
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-15: Version 5.5.210
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-15: Version 5.5.209
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-15: Version 5.5.208
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-15: Version 5.5.207
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-14: Version 5.5.206
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-14: Version 5.5.205
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-14: Version 5.5.204
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-14: Version 5.5.203
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-14: Version 5.5.202
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-14: Version 5.5.201
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-14: Version 5.5.200
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-14: Version 5.5.199
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-14: Version 5.5.198
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-14: Version 5.5.197
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-14: Version 5.5.196
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-14: Version 5.5.195
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-14: Version 5.5.194
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-14: Version 5.5.193
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-14: Version 5.5.192
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-14: Version 5.5.191
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-14: Version 5.5.190
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-14: Version 5.5.189
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-14: Version 5.5.188
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-14: Version 5.5.187
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-14: Version 5.5.186
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-13: Version 5.5.185
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-13: Version 5.5.184
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-13: Version 5.5.183
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-13: Version 5.5.182
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-13: Version 5.5.181
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-13: Version 5.5.180
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-13: Version 5.5.179
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-13: Version 5.5.178
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-13: Version 5.5.177
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-13: Version 5.5.176
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-13: Version 5.5.175
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-13: Version 5.5.174
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-13: Version 5.5.173
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-13: Version 5.5.172
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-13: Version 5.5.171
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-13: Version 5.5.170
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-12: Version 5.5.169
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-12: Version 5.5.168
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-12: Version 5.5.167
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-12: Version 5.5.166
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-12: Version 5.5.165
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-12: Version 5.5.164
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-12: Version 5.5.163
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-12: Version 5.5.162
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-12: Version 5.5.161
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-12: Version 5.5.160
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-12: Version 5.5.159
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-12: Version 5.5.158
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-12: Version 5.5.157
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-12: Version 5.5.156
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-12: Version 5.5.155
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-12: Version 5.5.154
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-12: Version 5.5.153
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-12: Version 5.5.152
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-12: Version 5.5.151
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-12: Version 5.5.150
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-10: Version 5.5.149
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-09: Version 5.5.148
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-09: Version 5.5.147
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-09: Version 5.5.146
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-09: Version 5.5.145
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-09: Version 5.5.144
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-09: Version 5.5.143
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-09: Version 5.5.142
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-09: Version 5.5.141
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-09: Version 5.5.140
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-09: Version 5.5.139
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-09: Version 5.5.138
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-09: Version 5.5.137
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-09: Version 5.5.136
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-09: Version 5.5.135
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-09: Version 5.5.134
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-09: Version 5.5.133
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-08: Version 5.5.132
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-08: Version 5.5.131
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-08: Version 5.5.130
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-08: Version 5.5.129
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-08: Version 5.5.128
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-08: Version 5.5.127
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-08: Version 5.5.126
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-08: Version 5.5.125
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-08: Version 5.5.124
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-08: Version 5.5.123
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-07: Version 5.5.122
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-07: Version 5.5.121
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-07: Version 5.5.120
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-07: Version 5.5.119
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-07: Version 5.5.118
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-07: Version 5.5.117
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-07: Version 5.5.116
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-07: Version 5.5.115
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-07: Version 5.5.114
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-07: Version 5.5.113
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-07: Version 5.5.112
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-07: Version 5.5.111
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-06: Version 5.5.110
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-06: Version 5.5.109
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-06: Version 5.5.108
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-06: Version 5.5.107
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-06: Version 5.5.106
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-06: Version 5.5.105
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-06: Version 5.5.104
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-06: Version 5.5.103
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-06: Version 5.5.102
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-06: Version 5.5.101
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-05: Version 5.5.100
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-05: Version 5.5.99
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-05: Version 5.5.98
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-05: Version 5.5.97
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-05: Version 5.5.96
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-05: Version 5.5.95
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-05: Version 5.5.94
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-05: Version 5.5.93
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-05: Version 5.5.92
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-05: Version 5.5.91
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-05: Version 5.5.90
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-05: Version 5.5.89
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-05: Version 5.5.88
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-05: Version 5.5.87
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-05: Version 5.5.86
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-04: Version 5.5.85
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-04: Version 5.5.84
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-04: Version 5.5.83
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-02: Version 5.5.82
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-02: Version 5.5.81
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-02: Version 5.5.80
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-02: Version 5.5.79
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-02: Version 5.5.78
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-02: Version 5.5.77
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-02: Version 5.5.76
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-02: Version 5.5.75
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-02: Version 5.5.74
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-02: Version 5.5.73
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-02: Version 5.5.72
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-02: Version 5.5.71
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-01: Version 5.5.70
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-01: Version 5.5.69
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-01: Version 5.5.68
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-01: Version 5.5.67
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-01: Version 5.5.66
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-01: Version 5.5.65
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-01: Version 5.5.64
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-01: Version 5.5.63
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-01: Version 5.5.62
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-01: Version 5.5.61
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-01: Version 5.5.60
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-01: Version 5.5.59
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-01: Version 5.5.58
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-01: Version 5.5.57
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-01: Version 5.5.56
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-01: Version 5.5.55
+
+ Performance and stability improvements on all platforms.
+
+
+2016-09-01: Version 5.5.54
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-31: Version 5.5.53
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-31: Version 5.5.52
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-31: Version 5.5.51
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-31: Version 5.5.50
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-31: Version 5.5.49
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-31: Version 5.5.48
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-31: Version 5.5.47
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-31: Version 5.5.46
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-31: Version 5.5.45
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-31: Version 5.5.44
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-31: Version 5.5.43
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-31: Version 5.5.42
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-31: Version 5.5.41
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-31: Version 5.5.40
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-31: Version 5.5.39
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-31: Version 5.5.38
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-31: Version 5.5.37
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-30: Version 5.5.36
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-30: Version 5.5.35
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-30: Version 5.5.34
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-30: Version 5.5.33
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-30: Version 5.5.32
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-30: Version 5.5.31
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-30: Version 5.5.30
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-30: Version 5.5.29
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-30: Version 5.5.28
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-30: Version 5.5.27
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-30: Version 5.5.26
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-30: Version 5.5.25
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-30: Version 5.5.24
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-30: Version 5.5.23
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-30: Version 5.5.22
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-29: Version 5.5.21
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-29: Version 5.5.20
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-29: Version 5.5.19
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-29: Version 5.5.18
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-29: Version 5.5.17
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-29: Version 5.5.16
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-29: Version 5.5.15
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-29: Version 5.5.14
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-29: Version 5.5.13
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-29: Version 5.5.12
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-29: Version 5.5.11
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-29: Version 5.5.10
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-29: Version 5.5.9
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-29: Version 5.5.8
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-29: Version 5.5.7
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-29: Version 5.5.6
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-28: Version 5.5.5
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-28: Version 5.5.4
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-28: Version 5.5.3
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-28: Version 5.5.2
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-26: Version 5.5.1
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-25: Version 5.4.524
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-25: Version 5.4.523
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-25: Version 5.4.522
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-25: Version 5.4.521
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-25: Version 5.4.520
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-25: Version 5.4.519
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-25: Version 5.4.518
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-24: Version 5.4.517
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-24: Version 5.4.516
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-24: Version 5.4.515
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-24: Version 5.4.514
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-24: Version 5.4.513
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-24: Version 5.4.512
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-24: Version 5.4.511
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-24: Version 5.4.510
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-24: Version 5.4.509
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-24: Version 5.4.508
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-24: Version 5.4.507
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-24: Version 5.4.506
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-23: Version 5.4.505
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-23: Version 5.4.504
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-23: Version 5.4.503
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-23: Version 5.4.502
+
+ Performance and stability improvements on all platforms.
+
+
+2016-08-23: Version 5.4.501
+
+ Performance and stability improvements on all platforms.
+
+
2016-08-23: Version 5.4.500
Performance and stability improvements on all platforms.
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index 6cac01d597..058cd8bea6 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -3,61 +3,62 @@
# all paths in here must match this assumption.
vars = {
- "git_url": "https://chromium.googlesource.com",
+ "chromium_url": "https://chromium.googlesource.com",
}
deps = {
"v8/build":
- Var("git_url") + "/chromium/src/build.git" + "@" + "59daf502c36f20b5c9292f4bd9af85791f8a5884",
+ Var("chromium_url") + "/chromium/src/build.git" + "@" + "475d5b37ded6589c9f8a0d19ced54ddf2e6d14a0",
"v8/tools/gyp":
- Var("git_url") + "/external/gyp.git" + "@" + "702ac58e477214c635d9b541932e75a95d349352",
+ Var("chromium_url") + "/external/gyp.git" + "@" + "e7079f0e0e14108ab0dba58728ff219637458563",
"v8/third_party/icu":
- Var("git_url") + "/chromium/deps/icu.git" + "@" + "2341038bf72869a5683a893a2b319a48ffec7f62",
+ Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "b0bd3ee50bc2e768d7a17cbc60d87f517f024dbe",
"v8/third_party/instrumented_libraries":
- Var("git_url") + "/chromium/src/third_party/instrumented_libraries.git" + "@" + "f15768d7fdf68c0748d20738184120c8ab2e6db7",
+ Var("chromium_url") + "/chromium/src/third_party/instrumented_libraries.git" + "@" + "45f5814b1543e41ea0be54c771e3840ea52cca4a",
"v8/buildtools":
- Var("git_url") + "/chromium/buildtools.git" + "@" + "adb8bf4e8fc92aa1717bf151b862d58e6f27c4f2",
+ Var("chromium_url") + "/chromium/buildtools.git" + "@" + "5fd66957f08bb752dca714a591c84587c9d70762",
"v8/base/trace_event/common":
- Var("git_url") + "/chromium/src/base/trace_event/common.git" + "@" + "315bf1e2d45be7d53346c31cfcc37424a32c30c8",
+ Var("chromium_url") + "/chromium/src/base/trace_event/common.git" + "@" + "e0fa02a02f61430dae2bddfd89a334ea4389f495",
"v8/third_party/WebKit/Source/platform/inspector_protocol":
- Var("git_url") + "/chromium/src/third_party/WebKit/Source/platform/inspector_protocol.git" + "@" + "547960151fb364dd9a382fa79ffc9abfb184e3d1",
+ Var("chromium_url") + "/chromium/src/third_party/WebKit/Source/platform/inspector_protocol.git" + "@" + "3280c57c4c575ce82ccd13e4a403492fb4ca624b",
"v8/third_party/jinja2":
- Var("git_url") + "/chromium/src/third_party/jinja2.git" + "@" + "2222b31554f03e62600cd7e383376a7c187967a1",
+ Var("chromium_url") + "/chromium/src/third_party/jinja2.git" + "@" + "b61a2c009a579593a259c1b300e0ad02bf48fd78",
"v8/third_party/markupsafe":
- Var("git_url") + "/chromium/src/third_party/markupsafe.git" + "@" + "484a5661041cac13bfc688a26ec5434b05d18961",
- "v8/tools/mb":
- Var('git_url') + '/chromium/src/tools/mb.git' + '@' + "99788b8b516c44d7db25cfb68695bc234fdee5ed",
+ Var("chromium_url") + "/chromium/src/third_party/markupsafe.git" + "@" + "484a5661041cac13bfc688a26ec5434b05d18961",
"v8/tools/swarming_client":
- Var('git_url') + '/external/swarming.client.git' + '@' + "e4288c3040a32f2e7ad92f957668f2ee3d36e5a6",
+ Var('chromium_url') + '/external/swarming.client.git' + '@' + "380e32662312eb107f06fcba6409b0409f8fef72",
"v8/testing/gtest":
- Var("git_url") + "/external/github.com/google/googletest.git" + "@" + "6f8a66431cb592dad629028a50b3dd418a408c87",
+ Var("chromium_url") + "/external/github.com/google/googletest.git" + "@" + "6f8a66431cb592dad629028a50b3dd418a408c87",
"v8/testing/gmock":
- Var("git_url") + "/external/googlemock.git" + "@" + "0421b6f358139f02e102c9c332ce19a33faf75be",
+ Var("chromium_url") + "/external/googlemock.git" + "@" + "0421b6f358139f02e102c9c332ce19a33faf75be",
"v8/test/benchmarks/data":
- Var("git_url") + "/v8/deps/third_party/benchmarks.git" + "@" + "05d7188267b4560491ff9155c5ee13e207ecd65f",
+ Var("chromium_url") + "/v8/deps/third_party/benchmarks.git" + "@" + "05d7188267b4560491ff9155c5ee13e207ecd65f",
"v8/test/mozilla/data":
- Var("git_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be",
- "v8/test/simdjs/data": Var("git_url") + "/external/github.com/tc39/ecmascript_simd.git" + "@" + "baf493985cb9ea7cdbd0d68704860a8156de9556",
+ Var("chromium_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be",
+ "v8/test/simdjs/data": Var("chromium_url") + "/external/github.com/tc39/ecmascript_simd.git" + "@" + "baf493985cb9ea7cdbd0d68704860a8156de9556",
"v8/test/test262/data":
- Var("git_url") + "/external/github.com/tc39/test262.git" + "@" + "88bc7fe7586f161201c5f14f55c9c489f82b1b67",
+ Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "29c23844494a7cc2fbebc6948d2cb0bcaddb24e7",
"v8/test/test262/harness":
- Var("git_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "cbd968f54f7a95c6556d53ba852292a4c49d11d8",
+ Var("chromium_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "cbd968f54f7a95c6556d53ba852292a4c49d11d8",
"v8/tools/clang":
- Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "3afb04a8153e40ff00f9eaa14337851c3ab4a368",
+ Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "1f92f999fc374a479e98a189ebdfe25c09484486",
}
deps_os = {
"android": {
"v8/third_party/android_tools":
- Var("git_url") + "/android_tools.git" + "@" + "af1c5a4cd6329ccdcf8c2bc93d9eea02f9d74869",
+ Var("chromium_url") + "/android_tools.git" + "@" + "25d57ead05d3dfef26e9c19b13ed10b0a69829cf",
},
"win": {
"v8/third_party/cygwin":
- Var("git_url") + "/chromium/deps/cygwin.git" + "@" + "c89e446b273697fadf3a10ff1007a97c0b7de6df",
+ Var("chromium_url") + "/chromium/deps/cygwin.git" + "@" + "c89e446b273697fadf3a10ff1007a97c0b7de6df",
}
}
-recursedeps = [ 'v8/third_party/android_tools' ]
+recursedeps = [
+ "v8/buildtools",
+ "v8/third_party/android_tools",
+]
include_rules = [
# Everybody can use some things.
@@ -204,6 +205,39 @@ hooks = [
],
},
{
+ "name": "wasm_fuzzer",
+ "pattern": ".",
+ "action": [ "download_from_google_storage",
+ "--no_resume",
+ "--no_auth",
+ "-u",
+ "--bucket", "v8-wasm-fuzzer",
+ "-s", "v8/test/fuzzer/wasm.tar.gz.sha1",
+ ],
+ },
+ {
+ "name": "wasm_asmjs_fuzzer",
+ "pattern": ".",
+ "action": [ "download_from_google_storage",
+ "--no_resume",
+ "--no_auth",
+ "-u",
+ "--bucket", "v8-wasm-asmjs-fuzzer",
+ "-s", "v8/test/fuzzer/wasm_asmjs.tar.gz.sha1",
+ ],
+ },
+ {
+ "name": "closure_compiler",
+ "pattern": ".",
+ "action": [ "download_from_google_storage",
+ "--no_resume",
+ "--no_auth",
+ "-u",
+ "--bucket", "chromium-v8-closure-compiler",
+ "-s", "v8/src/inspector/build/closure-compiler.tar.gz.sha1",
+ ],
+ },
+ {
# Downloads the current stable linux sysroot to build/linux/ if needed.
# This sysroot updates at about the same rate that the chrome build deps
# change.
@@ -259,6 +293,6 @@ hooks = [
{
# A change to a .gyp, .gypi, or to GYP itself should run the generator.
"pattern": ".",
- "action": ["python", "v8/gypfiles/gyp_v8"],
+ "action": ["python", "v8/gypfiles/gyp_v8", "--running-as-hook"],
},
]
diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS
index 26701eef59..028f4ff12c 100644
--- a/deps/v8/OWNERS
+++ b/deps/v8/OWNERS
@@ -22,7 +22,6 @@ mtrofin@chromium.org
mvstanton@chromium.org
mythria@chromium.org
neis@chromium.org
-oth@chromium.org
rmcilroy@chromium.org
rossberg@chromium.org
titzer@chromium.org
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index 5255ca11fa..78e7482efb 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -216,6 +216,38 @@ def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
return []
+def _CheckMissingFiles(input_api, output_api):
+ """Runs verify_source_deps.py to ensure no files were added that are not in
+ GN.
+ """
+ # We need to wait until we have an input_api object and use this
+ # roundabout construct to import checkdeps because this file is
+ # eval-ed and thus doesn't have __file__.
+ original_sys_path = sys.path
+ try:
+ sys.path = sys.path + [input_api.os_path.join(
+ input_api.PresubmitLocalPath(), 'tools')]
+ from verify_source_deps import missing_gn_files, missing_gyp_files
+ finally:
+ # Restore sys.path to what it was before.
+ sys.path = original_sys_path
+
+ gn_files = missing_gn_files()
+ gyp_files = missing_gyp_files()
+ results = []
+ if gn_files:
+ results.append(output_api.PresubmitError(
+ "You added one or more source files but didn't update the\n"
+ "corresponding BUILD.gn files:\n",
+ gn_files))
+ if gyp_files:
+ results.append(output_api.PresubmitError(
+ "You added one or more source files but didn't update the\n"
+ "corresponding gyp files:\n",
+ gyp_files))
+ return results
+
+
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
@@ -231,6 +263,7 @@ def _CommonChecks(input_api, output_api):
_CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api))
results.extend(
_CheckNoInlineHeaderIncludesInNormalHeaders(input_api, output_api))
+ results.extend(_CheckMissingFiles(input_api, output_api))
return results
diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h
index 0c16e7b723..0db92692a0 100644
--- a/deps/v8/base/trace_event/common/trace_event_common.h
+++ b/deps/v8/base/trace_event/common/trace_event_common.h
@@ -297,8 +297,8 @@
#define TRACE_EVENT_INSTANT_WITH_TIMESTAMP0(category_group, name, scope, \
timestamp) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_INSTANT, category_group, name, 0, 0, timestamp, \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
+ TRACE_EVENT_PHASE_INSTANT, category_group, name, timestamp, \
TRACE_EVENT_FLAG_NONE | scope)
// Syntactic sugars for the sampling tracing in the main thread.
@@ -308,8 +308,8 @@
TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(0)
#define TRACE_EVENT_SET_SAMPLING_STATE(category, name) \
TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(0, category, name)
-#define TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(categoryAndName) \
- TRACE_EVENT_SET_NONCONST_SAMPLING_STATE_FOR_BUCKET(0, categoryAndName)
+#define TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(category_and_name) \
+ TRACE_EVENT_SET_NONCONST_SAMPLING_STATE_FOR_BUCKET(0, category_and_name)
// Records a single BEGIN event called "name" immediately, with 0, 1 or 2
// associated arguments. If the category is not enabled, then this
@@ -395,10 +395,15 @@
TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
arg2_name, arg2_val)
+#define TRACE_EVENT_MARK_WITH_TIMESTAMP0(category_group, name, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
+ TRACE_EVENT_PHASE_MARK, category_group, name, timestamp, \
+ TRACE_EVENT_FLAG_NONE)
+
#define TRACE_EVENT_MARK_WITH_TIMESTAMP1(category_group, name, timestamp, \
arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_MARK, category_group, name, 0, 0, timestamp, \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
+ TRACE_EVENT_PHASE_MARK, category_group, name, timestamp, \
TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
#define TRACE_EVENT_COPY_MARK(category_group, name) \
@@ -406,8 +411,8 @@
TRACE_EVENT_FLAG_COPY)
#define TRACE_EVENT_COPY_MARK_WITH_TIMESTAMP(category_group, name, timestamp) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_MARK, category_group, name, 0, 0, timestamp, \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
+ TRACE_EVENT_PHASE_MARK, category_group, name, timestamp, \
TRACE_EVENT_FLAG_COPY)
// Similar to TRACE_EVENT_ENDx but with a custom |at| timestamp provided.
@@ -544,6 +549,12 @@
TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \
TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_SAMPLE_WITH_ID1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_SAMPLE, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \
+ arg1_val)
+
// ASYNC_STEP_* APIs should be only used by legacy code. New code should
// consider using NESTABLE_ASYNC_* APIs to describe substeps within an async
// event.
@@ -774,16 +785,19 @@
TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
-// with one associated argument. If the category is not enabled, then this
-// does nothing.
+// with none, one or two associated argument. If the category is not enabled,
+// then this does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+
#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT1(category_group, name, id, \
arg1_name, arg1_val) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
category_group, name, id, \
TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
-// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
-// with 2 associated arguments. If the category is not enabled, then this
-// does nothing.
+
#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2( \
category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
@@ -828,15 +842,6 @@
TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
-// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
-// with 2 associated arguments. If the category is not enabled, then this
-// does nothing.
-#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2( \
- category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
- TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id, \
- TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
-
// Records a single FLOW_BEGIN event called "name" immediately, with 0, 1 or 2
// associated arguments. If the category is not enabled, then this
// does nothing.
@@ -958,48 +963,47 @@
#define TRACE_EVENT_CLOCK_SYNC_ISSUER(sync_id, issue_ts, issue_end_ts) \
INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
TRACE_EVENT_PHASE_CLOCK_SYNC, "__metadata", "clock_sync", \
- issue_end_ts.ToInternalValue(), TRACE_EVENT_FLAG_NONE, \
- "sync_id", sync_id, "issue_ts", issue_ts.ToInternalValue())
+ issue_end_ts, TRACE_EVENT_FLAG_NONE, \
+ "sync_id", sync_id, "issue_ts", issue_ts)
// Macros to track the life time and value of arbitrary client objects.
// See also TraceTrackableObject.
#define TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group, name, id) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
- TRACE_EVENT_PHASE_CREATE_OBJECT, category_group, name, \
- TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
+ TRACE_EVENT_PHASE_CREATE_OBJECT, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group, name, id, \
snapshot) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \
- TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
+ id, TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
-#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID_AND_TIMESTAMP( \
- category_group, name, id, timestamp, snapshot) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \
- TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, \
- TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
+#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID_AND_TIMESTAMP( \
+ category_group, name, id, timestamp, snapshot) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \
+ id, TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+ "snapshot", snapshot)
#define TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group, name, id) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
- TRACE_EVENT_PHASE_DELETE_OBJECT, category_group, name, \
- TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
+ TRACE_EVENT_PHASE_DELETE_OBJECT, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
// Records entering and leaving trace event contexts. |category_group| and
// |name| specify the context category and type. |context| is a
// snapshotted context object id.
-#define TRACE_EVENT_ENTER_CONTEXT(category_group, name, context) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
- TRACE_EVENT_PHASE_ENTER_CONTEXT, category_group, name, \
- TRACE_ID_DONT_MANGLE(context), TRACE_EVENT_FLAG_NONE)
-#define TRACE_EVENT_LEAVE_CONTEXT(category_group, name, context) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
- TRACE_EVENT_PHASE_LEAVE_CONTEXT, category_group, name, \
- TRACE_ID_DONT_MANGLE(context), TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ENTER_CONTEXT(category_group, name, context) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ENTER_CONTEXT, category_group, name, context, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_LEAVE_CONTEXT(category_group, name, context) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_LEAVE_CONTEXT, category_group, name, context, \
+ TRACE_EVENT_FLAG_NONE)
#define TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context) \
- INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, \
- TRACE_ID_DONT_MANGLE(context))
+ INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context)
// Macro to specify that two trace IDs are identical. For example,
// TRACE_BIND_IDS(
@@ -1083,6 +1087,7 @@
#define TRACE_EVENT_FLAG_NONE (static_cast<unsigned int>(0))
#define TRACE_EVENT_FLAG_COPY (static_cast<unsigned int>(1 << 0))
#define TRACE_EVENT_FLAG_HAS_ID (static_cast<unsigned int>(1 << 1))
+// TODO(crbug.com/639003): Free this bit after ID mangling is deprecated.
#define TRACE_EVENT_FLAG_MANGLE_ID (static_cast<unsigned int>(1 << 2))
#define TRACE_EVENT_FLAG_SCOPE_OFFSET (static_cast<unsigned int>(1 << 3))
#define TRACE_EVENT_FLAG_SCOPE_EXTRA (static_cast<unsigned int>(1 << 4))
@@ -1093,6 +1098,8 @@
#define TRACE_EVENT_FLAG_FLOW_OUT (static_cast<unsigned int>(1 << 9))
#define TRACE_EVENT_FLAG_HAS_CONTEXT_ID (static_cast<unsigned int>(1 << 10))
#define TRACE_EVENT_FLAG_HAS_PROCESS_ID (static_cast<unsigned int>(1 << 11))
+#define TRACE_EVENT_FLAG_HAS_LOCAL_ID (static_cast<unsigned int>(1 << 12))
+#define TRACE_EVENT_FLAG_HAS_GLOBAL_ID (static_cast<unsigned int>(1 << 13))
#define TRACE_EVENT_FLAG_SCOPE_MASK \
(static_cast<unsigned int>(TRACE_EVENT_FLAG_SCOPE_OFFSET | \
diff --git a/deps/v8/build_overrides/build.gni b/deps/v8/build_overrides/build.gni
index da6d3e0ded..6b8a4ff219 100644
--- a/deps/v8/build_overrides/build.gni
+++ b/deps/v8/build_overrides/build.gni
@@ -16,3 +16,11 @@ build_with_chromium = false
# Some non-Chromium builds don't support building java targets.
enable_java_templates = false
+
+# Some non-Chromium builds don't use Chromium's third_party/binutils.
+linux_use_bundled_binutils_override = true
+
+# Allows different projects to specify their own suppressions files.
+asan_suppressions_file = "//build/sanitizers/asan_suppressions.cc"
+lsan_suppressions_file = "//build/sanitizers/lsan_suppressions.cc"
+tsan_suppressions_file = "//build/sanitizers/tsan_suppressions.cc"
diff --git a/deps/v8/build_overrides/v8.gni b/deps/v8/build_overrides/v8.gni
index fc4a70e579..09ea4570b0 100644
--- a/deps/v8/build_overrides/v8.gni
+++ b/deps/v8/build_overrides/v8.gni
@@ -11,10 +11,8 @@ if (is_android) {
import("//build/config/android/config.gni")
}
-if (((v8_current_cpu == "x86" ||
- v8_current_cpu == "x64" ||
- v8_current_cpu=="x87") &&
- (is_linux || is_mac)) ||
+if (((v8_current_cpu == "x86" || v8_current_cpu == "x64" ||
+ v8_current_cpu == "x87") && (is_linux || is_mac)) ||
(v8_current_cpu == "ppc64" && is_linux)) {
v8_enable_gdbjit_default = true
}
@@ -23,4 +21,12 @@ v8_imminent_deprecation_warnings_default = true
# Add simple extras solely for the purpose of the cctests.
v8_extra_library_files = [ "//test/cctest/test-extra.js" ]
-v8_experimental_extra_library_files = [ "//test/cctest/test-experimental-extra.js" ]
+v8_experimental_extra_library_files =
+ [ "//test/cctest/test-experimental-extra.js" ]
+
+declare_args() {
+ # Enable inspector. See include/v8-inspector.h.
+ v8_enable_inspector = false
+}
+
+v8_enable_inspector_override = v8_enable_inspector
diff --git a/deps/v8/gypfiles/config/win/msvs_dependencies.isolate b/deps/v8/gypfiles/config/win/msvs_dependencies.isolate
deleted file mode 100644
index ff92227363..0000000000
--- a/deps/v8/gypfiles/config/win/msvs_dependencies.isolate
+++ /dev/null
@@ -1,77 +0,0 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Copyright 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-{
- 'conditions': [
- # Copy the VS runtime DLLs into the isolate so that they
- # don't have to be preinstalled on the target machine.
- #
- # VS2013 runtimes
- ['OS=="win" and msvs_version==2013 and component=="shared_library" and CONFIGURATION_NAME=="Debug"', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/x64/msvcp120d.dll',
- '<(PRODUCT_DIR)/x64/msvcr120d.dll',
- ],
- },
- }],
- ['OS=="win" and msvs_version==2013 and component=="shared_library" and CONFIGURATION_NAME=="Release"', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/x64/msvcp120.dll',
- '<(PRODUCT_DIR)/x64/msvcr120.dll',
- ],
- },
- }],
- ['OS=="win" and msvs_version==2013 and component=="shared_library" and (CONFIGURATION_NAME=="Debug" or CONFIGURATION_NAME=="Debug_x64")', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/msvcp120d.dll',
- '<(PRODUCT_DIR)/msvcr120d.dll',
- ],
- },
- }],
- ['OS=="win" and msvs_version==2013 and component=="shared_library" and (CONFIGURATION_NAME=="Release" or CONFIGURATION_NAME=="Release_x64")', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/msvcp120.dll',
- '<(PRODUCT_DIR)/msvcr120.dll',
- ],
- },
- }],
- # VS2015 runtimes
- ['OS=="win" and msvs_version==2015 and component=="shared_library" and CONFIGURATION_NAME=="Debug"', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/x64/msvcp140d.dll',
- '<(PRODUCT_DIR)/x64/vccorlib140d.dll',
- ],
- },
- }],
- ['OS=="win" and msvs_version==2015 and component=="shared_library" and CONFIGURATION_NAME=="Release"', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/x64/msvcp140.dll',
- '<(PRODUCT_DIR)/x64/vccorlib140.dll',
- ],
- },
- }],
- ['OS=="win" and msvs_version==2015 and component=="shared_library" and (CONFIGURATION_NAME=="Debug" or CONFIGURATION_NAME=="Debug_x64")', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/msvcp140d.dll',
- '<(PRODUCT_DIR)/vccorlib140d.dll',
- ],
- },
- }],
- ['OS=="win" and msvs_version==2015 and component=="shared_library" and (CONFIGURATION_NAME=="Release" or CONFIGURATION_NAME=="Release_x64")', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/msvcp140.dll',
- '<(PRODUCT_DIR)/vccorlib140.dll',
- ],
- },
- }],
- ],
-} \ No newline at end of file
diff --git a/deps/v8/gypfiles/get_landmines.py b/deps/v8/gypfiles/get_landmines.py
index 9fcca4b968..432dfd7ae5 100755
--- a/deps/v8/gypfiles/get_landmines.py
+++ b/deps/v8/gypfiles/get_landmines.py
@@ -28,6 +28,8 @@ def main():
print 'Clobbering to hopefully resolve problem with mksnapshot'
print 'Clobber after ICU roll.'
print 'Clobber after Android NDK update.'
+ print 'Clober to fix windows build problems.'
+ print 'Clober again to fix windows build problems.'
return 0
diff --git a/deps/v8/gypfiles/gyp_v8 b/deps/v8/gypfiles/gyp_v8
index 8be39d9615..b8b5f742b1 100755
--- a/deps/v8/gypfiles/gyp_v8
+++ b/deps/v8/gypfiles/gyp_v8
@@ -118,10 +118,22 @@ def run_gyp(args):
if __name__ == '__main__':
args = sys.argv[1:]
- if int(os.environ.get('GYP_CHROMIUM_NO_ACTION', 0)):
+ gyp_chromium_no_action = os.environ.get('GYP_CHROMIUM_NO_ACTION')
+ if gyp_chromium_no_action == '1':
print 'Skipping gyp_v8 due to GYP_CHROMIUM_NO_ACTION env var.'
sys.exit(0)
+ running_as_hook = '--running-as-hook'
+ if running_as_hook in args and gyp_chromium_no_action != '0':
+ print 'GYP is now disabled by default in runhooks.\n'
+ print 'If you really want to run this, either run '
+ print '`python gypfiles/gyp_v8` explicitly by hand '
+ print 'or set the environment variable GYP_CHROMIUM_NO_ACTION=0.'
+ sys.exit(0)
+
+ if running_as_hook in args:
+ args.remove(running_as_hook)
+
gyp_environment.set_environment()
# This could give false positives since it doesn't actually do real option
diff --git a/deps/v8/gypfiles/standalone.gypi b/deps/v8/gypfiles/standalone.gypi
index 6599bb8351..7e41ce84ae 100644
--- a/deps/v8/gypfiles/standalone.gypi
+++ b/deps/v8/gypfiles/standalone.gypi
@@ -46,6 +46,7 @@
'msvs_multi_core_compile%': '1',
'mac_deployment_target%': '10.7',
'release_extra_cflags%': '',
+ 'v8_enable_inspector%': 0,
'variables': {
'variables': {
'variables': {
@@ -319,7 +320,7 @@
'android_ndk_root%': '<(base_dir)/third_party/android_tools/ndk/',
'android_host_arch%': "<!(uname -m | sed -e 's/i[3456]86/x86/')",
# Version of the NDK. Used to ensure full rebuilds on NDK rolls.
- 'android_ndk_version%': 'r11c',
+ 'android_ndk_version%': 'r12b',
'host_os%': "<!(uname -s | sed -e 's/Linux/linux/;s/Darwin/mac/')",
'os_folder_name%': "<!(uname -s | sed -e 's/Linux/linux/;s/Darwin/darwin/')",
},
@@ -378,6 +379,9 @@
'arm_version%': '<(arm_version)',
'host_os%': '<(host_os)',
+ # Print to stdout on Android.
+ 'v8_android_log_stdout%': 1,
+
'conditions': [
['android_ndk_root==""', {
'variables': {
diff --git a/deps/v8/include/DEPS b/deps/v8/include/DEPS
new file mode 100644
index 0000000000..ca60f841f5
--- /dev/null
+++ b/deps/v8/include/DEPS
@@ -0,0 +1,4 @@
+include_rules = [
+ # v8-inspector-protocol.h depends on generated files under include/inspector.
+ "+inspector",
+]
diff --git a/deps/v8/include/OWNERS b/deps/v8/include/OWNERS
index efa3b936d5..07f8a610c5 100644
--- a/deps/v8/include/OWNERS
+++ b/deps/v8/include/OWNERS
@@ -1,2 +1,7 @@
danno@chromium.org
jochen@chromium.org
+
+per-file v8-inspector.h=dgozman@chromium.org
+per-file v8-inspector.h=pfeldman@chromium.org
+per-file v8-inspector-protocol.h=dgozman@chromium.org
+per-file v8-inspector-protocol.h=pfeldman@chromium.org
diff --git a/deps/v8/include/libplatform/v8-tracing.h b/deps/v8/include/libplatform/v8-tracing.h
index 7646ea5489..e9f4941478 100644
--- a/deps/v8/include/libplatform/v8-tracing.h
+++ b/deps/v8/include/libplatform/v8-tracing.h
@@ -7,9 +7,17 @@
#include <fstream>
#include <memory>
+#include <unordered_set>
#include <vector>
+#include "v8-platform.h" // NOLINT(build/include)
+
namespace v8 {
+
+namespace base {
+class Mutex;
+} // namespace base
+
namespace platform {
namespace tracing {
@@ -28,19 +36,22 @@ class TraceObject {
TraceObject() {}
~TraceObject();
- void Initialize(char phase, const uint8_t* category_enabled_flag,
- const char* name, const char* scope, uint64_t id,
- uint64_t bind_id, int num_args, const char** arg_names,
- const uint8_t* arg_types, const uint64_t* arg_values,
- unsigned int flags);
+ void Initialize(
+ char phase, const uint8_t* category_enabled_flag, const char* name,
+ const char* scope, uint64_t id, uint64_t bind_id, int num_args,
+ const char** arg_names, const uint8_t* arg_types,
+ const uint64_t* arg_values,
+ std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
+ unsigned int flags);
void UpdateDuration();
- void InitializeForTesting(char phase, const uint8_t* category_enabled_flag,
- const char* name, const char* scope, uint64_t id,
- uint64_t bind_id, int num_args,
- const char** arg_names, const uint8_t* arg_types,
- const uint64_t* arg_values, unsigned int flags,
- int pid, int tid, int64_t ts, int64_t tts,
- uint64_t duration, uint64_t cpu_duration);
+ void InitializeForTesting(
+ char phase, const uint8_t* category_enabled_flag, const char* name,
+ const char* scope, uint64_t id, uint64_t bind_id, int num_args,
+ const char** arg_names, const uint8_t* arg_types,
+ const uint64_t* arg_values,
+ std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
+ unsigned int flags, int pid, int tid, int64_t ts, int64_t tts,
+ uint64_t duration, uint64_t cpu_duration);
int pid() const { return pid_; }
int tid() const { return tid_; }
@@ -56,6 +67,9 @@ class TraceObject {
const char** arg_names() { return arg_names_; }
uint8_t* arg_types() { return arg_types_; }
ArgValue* arg_values() { return arg_values_; }
+ std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables() {
+ return arg_convertables_;
+ }
unsigned int flags() const { return flags_; }
int64_t ts() { return ts_; }
int64_t tts() { return tts_; }
@@ -71,10 +85,12 @@ class TraceObject {
const uint8_t* category_enabled_flag_;
uint64_t id_;
uint64_t bind_id_;
- int num_args_;
+ int num_args_ = 0;
const char* arg_names_[kTraceMaxNumArgs];
uint8_t arg_types_[kTraceMaxNumArgs];
ArgValue arg_values_[kTraceMaxNumArgs];
+ std::unique_ptr<v8::ConvertableToTraceFormat>
+ arg_convertables_[kTraceMaxNumArgs];
char* parameter_copy_storage_ = nullptr;
unsigned int flags_;
int64_t ts_;
@@ -217,21 +233,27 @@ class TracingController {
ENABLED_FOR_ETW_EXPORT = 1 << 3
};
- TracingController() {}
+ TracingController();
+ ~TracingController();
void Initialize(TraceBuffer* trace_buffer);
const uint8_t* GetCategoryGroupEnabled(const char* category_group);
static const char* GetCategoryGroupName(const uint8_t* category_enabled_flag);
- uint64_t AddTraceEvent(char phase, const uint8_t* category_enabled_flag,
- const char* name, const char* scope, uint64_t id,
- uint64_t bind_id, int32_t num_args,
- const char** arg_names, const uint8_t* arg_types,
- const uint64_t* arg_values, unsigned int flags);
+ uint64_t AddTraceEvent(
+ char phase, const uint8_t* category_enabled_flag, const char* name,
+ const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
+ const char** arg_names, const uint8_t* arg_types,
+ const uint64_t* arg_values,
+ std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
+ unsigned int flags);
void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
const char* name, uint64_t handle);
void StartTracing(TraceConfig* trace_config);
void StopTracing();
+ void AddTraceStateObserver(Platform::TraceStateObserver* observer);
+ void RemoveTraceStateObserver(Platform::TraceStateObserver* observer);
+
private:
const uint8_t* GetCategoryGroupEnabledInternal(const char* category_group);
void UpdateCategoryGroupEnabledFlag(size_t category_index);
@@ -239,6 +261,8 @@ class TracingController {
std::unique_ptr<TraceBuffer> trace_buffer_;
std::unique_ptr<TraceConfig> trace_config_;
+ std::unique_ptr<base::Mutex> mutex_;
+ std::unordered_set<Platform::TraceStateObserver*> observers_;
Mode mode_ = DISABLED;
// Disallow copy and assign
diff --git a/deps/v8/include/v8-inspector-protocol.h b/deps/v8/include/v8-inspector-protocol.h
new file mode 100644
index 0000000000..612a2ebc39
--- /dev/null
+++ b/deps/v8/include/v8-inspector-protocol.h
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_V8_INSPECTOR_PROTOCOL_H_
+#define V8_V8_INSPECTOR_PROTOCOL_H_
+
+#include "inspector/Debugger.h" // NOLINT(build/include)
+#include "inspector/Runtime.h" // NOLINT(build/include)
+#include "inspector/Schema.h" // NOLINT(build/include)
+#include "v8-inspector.h" // NOLINT(build/include)
+
+#endif // V8_V8_INSPECTOR_PROTOCOL_H_
diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h
new file mode 100644
index 0000000000..0855ac101b
--- /dev/null
+++ b/deps/v8/include/v8-inspector.h
@@ -0,0 +1,267 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_V8_INSPECTOR_H_
+#define V8_V8_INSPECTOR_H_
+
+#include <stdint.h>
+#include <cctype>
+
+#include <memory>
+
+#include "v8.h" // NOLINT(build/include)
+
+namespace v8_inspector {
+
+namespace protocol {
+namespace Debugger {
+namespace API {
+class SearchMatch;
+}
+}
+namespace Runtime {
+namespace API {
+class RemoteObject;
+class StackTrace;
+}
+}
+namespace Schema {
+namespace API {
+class Domain;
+}
+}
+} // namespace protocol
+
+class V8_EXPORT StringView {
+ public:
+ StringView() : m_is8Bit(true), m_length(0), m_characters8(nullptr) {}
+
+ StringView(const uint8_t* characters, size_t length)
+ : m_is8Bit(true), m_length(length), m_characters8(characters) {}
+
+ StringView(const uint16_t* characters, size_t length)
+ : m_is8Bit(false), m_length(length), m_characters16(characters) {}
+
+ bool is8Bit() const { return m_is8Bit; }
+ size_t length() const { return m_length; }
+
+ // TODO(dgozman): add DCHECK(m_is8Bit) to accessors once platform can be used
+ // here.
+ const uint8_t* characters8() const { return m_characters8; }
+ const uint16_t* characters16() const { return m_characters16; }
+
+ private:
+ bool m_is8Bit;
+ size_t m_length;
+ union {
+ const uint8_t* m_characters8;
+ const uint16_t* m_characters16;
+ };
+};
+
+class V8_EXPORT StringBuffer {
+ public:
+ virtual ~StringBuffer() {}
+ virtual const StringView& string() = 0;
+ // This method copies contents.
+ static std::unique_ptr<StringBuffer> create(const StringView&);
+};
+
+class V8_EXPORT V8ContextInfo {
+ public:
+ V8ContextInfo(v8::Local<v8::Context> context, int contextGroupId,
+ const StringView& humanReadableName)
+ : context(context),
+ contextGroupId(contextGroupId),
+ humanReadableName(humanReadableName),
+ hasMemoryOnConsole(false) {}
+
+ v8::Local<v8::Context> context;
+ // Each v8::Context is a part of a group. The group id must be non-zero.
+ int contextGroupId;
+ StringView humanReadableName;
+ StringView origin;
+ StringView auxData;
+ bool hasMemoryOnConsole;
+
+ private:
+ // Disallow copying and allocating this one.
+ enum NotNullTagEnum { NotNullLiteral };
+ void* operator new(size_t) = delete;
+ void* operator new(size_t, NotNullTagEnum, void*) = delete;
+ void* operator new(size_t, void*) = delete;
+ V8ContextInfo(const V8ContextInfo&) = delete;
+ V8ContextInfo& operator=(const V8ContextInfo&) = delete;
+};
+
+class V8_EXPORT V8StackTrace {
+ public:
+ virtual bool isEmpty() const = 0;
+ virtual StringView topSourceURL() const = 0;
+ virtual int topLineNumber() const = 0;
+ virtual int topColumnNumber() const = 0;
+ virtual StringView topScriptId() const = 0;
+ virtual StringView topFunctionName() const = 0;
+
+ virtual ~V8StackTrace() {}
+ virtual std::unique_ptr<protocol::Runtime::API::StackTrace>
+ buildInspectorObject() const = 0;
+ virtual std::unique_ptr<StringBuffer> toString() const = 0;
+
+ // Safe to pass between threads, drops async chain.
+ virtual std::unique_ptr<V8StackTrace> clone() = 0;
+};
+
+class V8_EXPORT V8InspectorSession {
+ public:
+ virtual ~V8InspectorSession() {}
+
+ // Cross-context inspectable values (DOM nodes in different worlds, etc.).
+ class V8_EXPORT Inspectable {
+ public:
+ virtual v8::Local<v8::Value> get(v8::Local<v8::Context>) = 0;
+ virtual ~Inspectable() {}
+ };
+ virtual void addInspectedObject(std::unique_ptr<Inspectable>) = 0;
+
+ // Dispatching protocol messages.
+ static bool canDispatchMethod(const StringView& method);
+ virtual void dispatchProtocolMessage(const StringView& message) = 0;
+ virtual std::unique_ptr<StringBuffer> stateJSON() = 0;
+ virtual std::vector<std::unique_ptr<protocol::Schema::API::Domain>>
+ supportedDomains() = 0;
+
+ // Debugger actions.
+ virtual void schedulePauseOnNextStatement(const StringView& breakReason,
+ const StringView& breakDetails) = 0;
+ virtual void cancelPauseOnNextStatement() = 0;
+ virtual void breakProgram(const StringView& breakReason,
+ const StringView& breakDetails) = 0;
+ virtual void setSkipAllPauses(bool) = 0;
+ virtual void resume() = 0;
+ virtual void stepOver() = 0;
+ virtual std::vector<std::unique_ptr<protocol::Debugger::API::SearchMatch>>
+ searchInTextByLines(const StringView& text, const StringView& query,
+ bool caseSensitive, bool isRegex) = 0;
+
+ // Remote objects.
+ virtual std::unique_ptr<protocol::Runtime::API::RemoteObject> wrapObject(
+ v8::Local<v8::Context>, v8::Local<v8::Value>,
+ const StringView& groupName) = 0;
+ virtual bool unwrapObject(std::unique_ptr<StringBuffer>* error,
+ const StringView& objectId, v8::Local<v8::Value>*,
+ v8::Local<v8::Context>*,
+ std::unique_ptr<StringBuffer>* objectGroup) = 0;
+ virtual void releaseObjectGroup(const StringView&) = 0;
+};
+
+enum class V8ConsoleAPIType { kClear, kDebug, kLog, kInfo, kWarning, kError };
+
+class V8_EXPORT V8InspectorClient {
+ public:
+ virtual ~V8InspectorClient() {}
+
+ virtual void runMessageLoopOnPause(int contextGroupId) {}
+ virtual void quitMessageLoopOnPause() {}
+ virtual void runIfWaitingForDebugger(int contextGroupId) {}
+
+ virtual void muteMetrics(int contextGroupId) {}
+ virtual void unmuteMetrics(int contextGroupId) {}
+
+ virtual void beginUserGesture() {}
+ virtual void endUserGesture() {}
+
+ virtual std::unique_ptr<StringBuffer> valueSubtype(v8::Local<v8::Value>) {
+ return nullptr;
+ }
+ virtual bool formatAccessorsAsProperties(v8::Local<v8::Value>) {
+ return false;
+ }
+ virtual bool isInspectableHeapObject(v8::Local<v8::Object>) { return true; }
+
+ virtual v8::Local<v8::Context> ensureDefaultContextInGroup(
+ int contextGroupId) {
+ return v8::Local<v8::Context>();
+ }
+ virtual void beginEnsureAllContextsInGroup(int contextGroupId) {}
+ virtual void endEnsureAllContextsInGroup(int contextGroupId) {}
+
+ virtual void installAdditionalCommandLineAPI(v8::Local<v8::Context>,
+ v8::Local<v8::Object>) {}
+ virtual void consoleAPIMessage(int contextGroupId, V8ConsoleAPIType,
+ const StringView& message,
+ const StringView& url, unsigned lineNumber,
+ unsigned columnNumber, V8StackTrace*) {}
+ virtual v8::MaybeLocal<v8::Value> memoryInfo(v8::Isolate*,
+ v8::Local<v8::Context>) {
+ return v8::MaybeLocal<v8::Value>();
+ }
+
+ virtual void consoleTime(const StringView& title) {}
+ virtual void consoleTimeEnd(const StringView& title) {}
+ virtual void consoleTimeStamp(const StringView& title) {}
+ virtual double currentTimeMS() { return 0; }
+ typedef void (*TimerCallback)(void*);
+ virtual void startRepeatingTimer(double, TimerCallback, void* data) {}
+ virtual void cancelTimer(void* data) {}
+
+ // TODO(dgozman): this was added to support service worker shadow page. We
+ // should not connect at all.
+ virtual bool canExecuteScripts(int contextGroupId) { return true; }
+};
+
+class V8_EXPORT V8Inspector {
+ public:
+ static std::unique_ptr<V8Inspector> create(v8::Isolate*, V8InspectorClient*);
+ virtual ~V8Inspector() {}
+
+ // Contexts instrumentation.
+ virtual void contextCreated(const V8ContextInfo&) = 0;
+ virtual void contextDestroyed(v8::Local<v8::Context>) = 0;
+ virtual void resetContextGroup(int contextGroupId) = 0;
+
+ // Various instrumentation.
+ virtual void willExecuteScript(v8::Local<v8::Context>, int scriptId) = 0;
+ virtual void didExecuteScript(v8::Local<v8::Context>) = 0;
+ virtual void idleStarted() = 0;
+ virtual void idleFinished() = 0;
+
+ // Async stack traces instrumentation.
+ virtual void asyncTaskScheduled(const StringView& taskName, void* task,
+ bool recurring) = 0;
+ virtual void asyncTaskCanceled(void* task) = 0;
+ virtual void asyncTaskStarted(void* task) = 0;
+ virtual void asyncTaskFinished(void* task) = 0;
+ virtual void allAsyncTasksCanceled() = 0;
+
+ // Exceptions instrumentation.
+ virtual unsigned exceptionThrown(
+ v8::Local<v8::Context>, const StringView& message,
+ v8::Local<v8::Value> exception, const StringView& detailedMessage,
+ const StringView& url, unsigned lineNumber, unsigned columnNumber,
+ std::unique_ptr<V8StackTrace>, int scriptId) = 0;
+ virtual void exceptionRevoked(v8::Local<v8::Context>, unsigned exceptionId,
+ const StringView& message) = 0;
+
+ // Connection.
+ class V8_EXPORT Channel {
+ public:
+ virtual ~Channel() {}
+ virtual void sendProtocolResponse(int callId,
+ const StringView& message) = 0;
+ virtual void sendProtocolNotification(const StringView& message) = 0;
+ virtual void flushProtocolNotifications() = 0;
+ };
+ virtual std::unique_ptr<V8InspectorSession> connect(
+ int contextGroupId, Channel*, const StringView& state) = 0;
+
+ // API methods.
+ virtual std::unique_ptr<V8StackTrace> createStackTrace(
+ v8::Local<v8::StackTrace>) = 0;
+ virtual std::unique_ptr<V8StackTrace> captureStackTrace(bool fullStack) = 0;
+};
+
+} // namespace v8_inspector
+
+#endif // V8_V8_INSPECTOR_H_
diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h
index 4023a5b234..e11567488b 100644
--- a/deps/v8/include/v8-platform.h
+++ b/deps/v8/include/v8-platform.h
@@ -7,6 +7,8 @@
#include <stddef.h>
#include <stdint.h>
+#include <memory>
+#include <string>
namespace v8 {
@@ -17,24 +19,38 @@ class Isolate;
*/
class Task {
public:
- virtual ~Task() {}
+ virtual ~Task() = default;
virtual void Run() = 0;
};
-
/**
-* An IdleTask represents a unit of work to be performed in idle time.
-* The Run method is invoked with an argument that specifies the deadline in
-* seconds returned by MonotonicallyIncreasingTime().
-* The idle task is expected to complete by this deadline.
-*/
+ * An IdleTask represents a unit of work to be performed in idle time.
+ * The Run method is invoked with an argument that specifies the deadline in
+ * seconds returned by MonotonicallyIncreasingTime().
+ * The idle task is expected to complete by this deadline.
+ */
class IdleTask {
public:
- virtual ~IdleTask() {}
+ virtual ~IdleTask() = default;
virtual void Run(double deadline_in_seconds) = 0;
};
+/**
+ * The interface represents complex arguments to trace events.
+ */
+class ConvertableToTraceFormat {
+ public:
+ virtual ~ConvertableToTraceFormat() = default;
+
+ /**
+ * Append the class info to the provided |out| string. The appended
+ * data must be a valid JSON object. Strings must be properly quoted, and
+ * escaped. There is no processing applied to the content after it is
+ * appended.
+ */
+ virtual void AppendAsTraceFormat(std::string* out) const = 0;
+};
/**
* V8 Platform abstraction layer.
@@ -54,7 +70,7 @@ class Platform {
kLongRunningTask
};
- virtual ~Platform() {}
+ virtual ~Platform() = default;
/**
* Gets the number of threads that are used to execute background tasks. Is
@@ -159,11 +175,43 @@ class Platform {
}
/**
+ * Adds a trace event to the platform tracing system. This function call is
+ * usually the result of a TRACE_* macro from trace_event_common.h when
+ * tracing and the category of the particular trace are enabled. It is not
+ * advisable to call this function on its own; it is really only meant to be
+ * used by the trace macros. The returned handle can be used by
+ * UpdateTraceEventDuration to update the duration of COMPLETE events.
+ */
+ virtual uint64_t AddTraceEvent(
+ char phase, const uint8_t* category_enabled_flag, const char* name,
+ const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
+ const char** arg_names, const uint8_t* arg_types,
+ const uint64_t* arg_values,
+ std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
+ unsigned int flags) {
+ return AddTraceEvent(phase, category_enabled_flag, name, scope, id, bind_id,
+ num_args, arg_names, arg_types, arg_values, flags);
+ }
+
+ /**
* Sets the duration field of a COMPLETE trace event. It must be called with
* the handle returned from AddTraceEvent().
**/
virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
const char* name, uint64_t handle) {}
+
+ class TraceStateObserver {
+ public:
+ virtual ~TraceStateObserver() = default;
+ virtual void OnTraceEnabled() = 0;
+ virtual void OnTraceDisabled() = 0;
+ };
+
+ /** Adds tracing state change observer. */
+ virtual void AddTraceStateObserver(TraceStateObserver*) {}
+
+ /** Removes tracing state change observer. */
+ virtual void RemoveTraceStateObserver(TraceStateObserver*) {}
};
} // namespace v8
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index bcb69f3763..6ee0340f3c 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -46,6 +46,20 @@ template class V8_EXPORT std::vector<v8::CpuProfileDeoptInfo>;
namespace v8 {
+/**
+ * TracingCpuProfiler monitors tracing being enabled/disabled
+ * and emits CpuProfile trace events once v8.cpu_profile2 tracing category
+ * is enabled. It has no overhead unless the category is enabled.
+ */
+class V8_EXPORT TracingCpuProfiler {
+ public:
+ static std::unique_ptr<TracingCpuProfiler> Create(Isolate*);
+ virtual ~TracingCpuProfiler() = default;
+
+ protected:
+ TracingCpuProfiler() = default;
+};
+
// TickSample captures the information collected for each sample.
struct TickSample {
// Internal profiling (with --prof + tools/$OS-tick-processor) wants to
@@ -131,6 +145,13 @@ class V8_EXPORT CpuProfileNode {
/** Returns function name (empty string for anonymous functions.) */
Local<String> GetFunctionName() const;
+ /**
+ * Returns function name (empty string for anonymous functions.)
+ * The string ownership is *not* passed to the caller. It stays valid until
+ * profile is deleted. The function is thread safe.
+ */
+ const char* GetFunctionNameStr() const;
+
/** Returns id of the script where function is located. */
int GetScriptId() const;
@@ -138,6 +159,13 @@ class V8_EXPORT CpuProfileNode {
Local<String> GetScriptResourceName() const;
/**
+ * Returns resource name for script from where the function originates.
+ * The string ownership is *not* passed to the caller. It stays valid until
+ * profile is deleted. The function is thread safe.
+ */
+ const char* GetScriptResourceNameStr() const;
+
+ /**
* Returns the number, 1-based, of the line where the function originates.
* kNoLineNumberInfo if no line number information is available.
*/
diff --git a/deps/v8/include/v8-util.h b/deps/v8/include/v8-util.h
index 8133fdd49d..99c59fe302 100644
--- a/deps/v8/include/v8-util.h
+++ b/deps/v8/include/v8-util.h
@@ -206,14 +206,19 @@ class PersistentValueMapBase {
}
/**
- * Call V8::RegisterExternallyReferencedObject with the map value for given
- * key.
+ * Deprecated. Call V8::RegisterExternallyReferencedObject with the map value
+ * for given key.
+ * TODO(hlopko) Remove once migration to reporter is finished.
*/
- void RegisterExternallyReferencedObject(K& key) {
+ void RegisterExternallyReferencedObject(K& key) {}
+
+ /**
+ * Use EmbedderReachableReferenceReporter with the map value for given key.
+ */
+ void RegisterExternallyReferencedObject(
+ EmbedderReachableReferenceReporter* reporter, K& key) {
DCHECK(Contains(key));
- V8::RegisterExternallyReferencedObject(
- reinterpret_cast<internal::Object**>(FromVal(Traits::Get(&impl_, key))),
- reinterpret_cast<internal::Isolate*>(GetIsolate()));
+ reporter->ReportExternalReference(FromVal(Traits::Get(&impl_, key)));
}
/**
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index a39676e377..b216cf04c3 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 5
-#define V8_MINOR_VERSION 4
-#define V8_BUILD_NUMBER 500
-#define V8_PATCH_LEVEL 46
+#define V8_MINOR_VERSION 5
+#define V8_BUILD_NUMBER 372
+#define V8_PATCH_LEVEL 40
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index d7e39adbae..36edf5334a 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -51,7 +51,7 @@
#else // V8_OS_WIN
// Setup for Linux shared library export.
-#if V8_HAS_ATTRIBUTE_VISIBILITY && defined(V8_SHARED)
+#if V8_HAS_ATTRIBUTE_VISIBILITY
# ifdef BUILDING_V8_SHARED
# define V8_EXPORT __attribute__ ((visibility("default")))
# else
@@ -70,6 +70,7 @@ namespace v8 {
class AccessorSignature;
class Array;
+class ArrayBuffer;
class Boolean;
class BooleanObject;
class Context;
@@ -95,6 +96,7 @@ class ObjectTemplate;
class Platform;
class Primitive;
class Promise;
+class PropertyDescriptor;
class Proxy;
class RawOperationDescriptor;
class Script;
@@ -341,7 +343,7 @@ class Local {
#if !defined(V8_IMMINENT_DEPRECATION_WARNINGS)
-// Local is an alias for Local for historical reasons.
+// Handle is an alias for Local for historical reasons.
template <class T>
using Handle = Local<T>;
#endif
@@ -466,6 +468,16 @@ class WeakCallbackInfo {
enum class WeakCallbackType { kParameter, kInternalFields, kFinalizer };
/**
+ * A reporter class that embedder will use to report reachable references found
+ * by EmbedderHeapTracer.
+ */
+class V8_EXPORT EmbedderReachableReferenceReporter {
+ public:
+ virtual void ReportExternalReference(Value* object) = 0;
+ virtual ~EmbedderReachableReferenceReporter() = default;
+};
+
+/**
* An object reference that is independent of any handle scope. Where
* a Local handle only lives as long as the HandleScope in which it was
* allocated, a PersistentBase handle remains valid until it is explicitly
@@ -562,11 +574,18 @@ template <class T> class PersistentBase {
V8_INLINE void ClearWeak() { ClearWeak<void>(); }
/**
+ * Deprecated.
+ * TODO(hlopko): remove once migration to reporter is finished.
+ */
+ V8_INLINE void RegisterExternalReference(Isolate* isolate) const {}
+
+ /**
* Allows the embedder to tell the v8 garbage collector that a certain object
* is alive. Only allowed when the embedder is asked to trace its heap by
* EmbedderHeapTracer.
*/
- V8_INLINE void RegisterExternalReference(Isolate* isolate) const;
+ V8_INLINE void RegisterExternalReference(
+ EmbedderReachableReferenceReporter* reporter) const;
/**
* Marks the reference to this object independent. Garbage collector is free
@@ -615,6 +634,9 @@ template <class T> class PersistentBase {
*/
V8_INLINE uint16_t WrapperClassId() const;
+ PersistentBase(const PersistentBase& other) = delete; // NOLINT
+ void operator=(const PersistentBase&) = delete;
+
private:
friend class Isolate;
friend class Utils;
@@ -630,8 +652,6 @@ template <class T> class PersistentBase {
friend class Object;
explicit V8_INLINE PersistentBase(T* val) : val_(val) {}
- PersistentBase(const PersistentBase& other) = delete; // NOLINT
- void operator=(const PersistentBase&) = delete;
V8_INLINE static T* New(Isolate* isolate, T* that);
T* val_;
@@ -835,11 +855,12 @@ class Global : public PersistentBase<T> {
*/
typedef void MoveOnlyTypeForCPP03;
+ Global(const Global&) = delete;
+ void operator=(const Global&) = delete;
+
private:
template <class F>
friend class ReturnValue;
- Global(const Global&) = delete;
- void operator=(const Global&) = delete;
V8_INLINE T* operator*() const { return this->val_; }
};
@@ -878,6 +899,11 @@ class V8_EXPORT HandleScope {
return reinterpret_cast<Isolate*>(isolate_);
}
+ HandleScope(const HandleScope&) = delete;
+ void operator=(const HandleScope&) = delete;
+ void* operator new(size_t size) = delete;
+ void operator delete(void*, size_t) = delete;
+
protected:
V8_INLINE HandleScope() {}
@@ -891,13 +917,6 @@ class V8_EXPORT HandleScope {
static internal::Object** CreateHandle(internal::HeapObject* heap_object,
internal::Object* value);
- // Make it hard to create heap-allocated or illegal handle scopes by
- // disallowing certain operations.
- HandleScope(const HandleScope&);
- void operator=(const HandleScope&);
- void* operator new(size_t size);
- void operator delete(void*, size_t);
-
internal::Isolate* isolate_;
internal::Object** prev_next_;
internal::Object** prev_limit_;
@@ -932,16 +951,13 @@ class V8_EXPORT EscapableHandleScope : public HandleScope {
return Local<T>(reinterpret_cast<T*>(slot));
}
+ EscapableHandleScope(const EscapableHandleScope&) = delete;
+ void operator=(const EscapableHandleScope&) = delete;
+ void* operator new(size_t size) = delete;
+ void operator delete(void*, size_t) = delete;
+
private:
internal::Object** Escape(internal::Object** escape_value);
-
- // Make it hard to create heap-allocated or illegal handle scopes by
- // disallowing certain operations.
- EscapableHandleScope(const EscapableHandleScope&);
- void operator=(const EscapableHandleScope&);
- void* operator new(size_t size);
- void operator delete(void*, size_t);
-
internal::Object** escape_slot_;
};
@@ -950,14 +966,12 @@ class V8_EXPORT SealHandleScope {
SealHandleScope(Isolate* isolate);
~SealHandleScope();
- private:
- // Make it hard to create heap-allocated or illegal handle scopes by
- // disallowing certain operations.
- SealHandleScope(const SealHandleScope&);
- void operator=(const SealHandleScope&);
- void* operator new(size_t size);
- void operator delete(void*, size_t);
+ SealHandleScope(const SealHandleScope&) = delete;
+ void operator=(const SealHandleScope&) = delete;
+ void* operator new(size_t size) = delete;
+ void operator delete(void*, size_t) = delete;
+ private:
internal::Isolate* const isolate_;
internal::Object** prev_limit_;
int prev_sealed_level_;
@@ -1073,6 +1087,47 @@ class V8_EXPORT UnboundScript {
static const int kNoScriptId = 0;
};
+/**
+ * This is an unfinished experimental feature, and is only exposed
+ * here for internal testing purposes. DO NOT USE.
+ *
+ * A compiled JavaScript module.
+ */
+class V8_EXPORT Module {
+ public:
+ /**
+ * Returns the number of modules requested by this module.
+ */
+ int GetModuleRequestsLength() const;
+
+ /**
+ * Returns the ith module specifier in this module.
+ * i must be < GetModuleRequestsLength() and >= 0.
+ */
+ Local<String> GetModuleRequest(int i) const;
+
+ void SetEmbedderData(Local<Value> data);
+ Local<Value> GetEmbedderData() const;
+
+ typedef MaybeLocal<Module> (*ResolveCallback)(Local<Context> context,
+ Local<String> specifier,
+ Local<Module> referrer,
+ Local<Value> data);
+
+ /**
+ * ModuleDeclarationInstantiation
+ *
+ * Returns false if an exception occurred during instantiation.
+ */
+ V8_WARN_UNUSED_RESULT bool Instantiate(
+ Local<Context> context, ResolveCallback callback,
+ Local<Value> callback_data = Local<Value>());
+
+ /**
+ * ModuleEvaluation
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> Evaluate(Local<Context> context);
+};
/**
* A compiled JavaScript script, tied to a Context which was active when the
@@ -1148,10 +1203,9 @@ class V8_EXPORT ScriptCompiler {
bool rejected;
BufferPolicy buffer_policy;
- private:
- // Prevent copying. Not implemented.
- CachedData(const CachedData&);
- CachedData& operator=(const CachedData&);
+ // Prevent copying.
+ CachedData(const CachedData&) = delete;
+ CachedData& operator=(const CachedData&) = delete;
};
/**
@@ -1171,11 +1225,12 @@ class V8_EXPORT ScriptCompiler {
// alive.
V8_INLINE const CachedData* GetCachedData() const;
+ // Prevent copying.
+ Source(const Source&) = delete;
+ Source& operator=(const Source&) = delete;
+
private:
friend class ScriptCompiler;
- // Prevent copying. Not implemented.
- Source(const Source&);
- Source& operator=(const Source&);
Local<String> source_string;
@@ -1258,11 +1313,11 @@ class V8_EXPORT ScriptCompiler {
internal::StreamedSource* impl() const { return impl_; }
- private:
- // Prevent copying. Not implemented.
- StreamedSource(const StreamedSource&);
- StreamedSource& operator=(const StreamedSource&);
+ // Prevent copying.
+ StreamedSource(const StreamedSource&) = delete;
+ StreamedSource& operator=(const StreamedSource&) = delete;
+ private:
internal::StreamedSource* impl_;
};
@@ -1376,18 +1431,17 @@ class V8_EXPORT ScriptCompiler {
static uint32_t CachedDataVersionTag();
/**
- * Compile an ES6 module.
- *
* This is an unfinished experimental feature, and is only exposed
- * here for internal testing purposes.
- * Only parsing works at the moment. Do not use.
+ * here for internal testing purposes. DO NOT USE.
*
- * TODO(adamk): Script is likely the wrong return value for this;
- * should return some new Module type.
+ * Compile an ES module, returning a Module that encapsulates
+ * the compiled code.
+ *
+ * Corresponds to the ParseModule abstract operation in the
+ * ECMAScript specification.
*/
- static V8_WARN_UNUSED_RESULT MaybeLocal<Script> CompileModule(
- Local<Context> context, Source* source,
- CompileOptions options = kNoCompileOptions);
+ static V8_WARN_UNUSED_RESULT MaybeLocal<Module> CompileModule(
+ Isolate* isolate, Source* source);
/**
* Compile a function for a given context. This is equivalent to running
@@ -1664,6 +1718,174 @@ class V8_EXPORT JSON {
Local<String> gap = Local<String>());
};
+/**
+ * Value serialization compatible with the HTML structured clone algorithm.
+ * The format is backward-compatible (i.e. safe to store to disk).
+ *
+ * WARNING: This API is under development, and changes (including incompatible
+ * changes to the API or wire format) may occur without notice until this
+ * warning is removed.
+ */
+class V8_EXPORT ValueSerializer {
+ public:
+ class V8_EXPORT Delegate {
+ public:
+ virtual ~Delegate() {}
+
+ /*
+ * Handles the case where a DataCloneError would be thrown in the structured
+ * clone spec. Other V8 embedders may throw some other appropriate exception
+ * type.
+ */
+ virtual void ThrowDataCloneError(Local<String> message) = 0;
+
+ /*
+ * The embedder overrides this method to write some kind of host object, if
+ * possible. If not, a suitable exception should be thrown and
+ * Nothing<bool>() returned.
+ */
+ virtual Maybe<bool> WriteHostObject(Isolate* isolate, Local<Object> object);
+ };
+
+ explicit ValueSerializer(Isolate* isolate);
+ ValueSerializer(Isolate* isolate, Delegate* delegate);
+ ~ValueSerializer();
+
+ /*
+ * Writes out a header, which includes the format version.
+ */
+ void WriteHeader();
+
+ /*
+ * Serializes a JavaScript value into the buffer.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> WriteValue(Local<Context> context,
+ Local<Value> value);
+
+ /*
+ * Returns the stored data. This serializer should not be used once the buffer
+ * is released. The contents are undefined if a previous write has failed.
+ */
+ std::vector<uint8_t> ReleaseBuffer();
+
+ /*
+ * Marks an ArrayBuffer as havings its contents transferred out of band.
+ * Pass the corresponding JSArrayBuffer in the deserializing context to
+ * ValueDeserializer::TransferArrayBuffer.
+ */
+ void TransferArrayBuffer(uint32_t transfer_id,
+ Local<ArrayBuffer> array_buffer);
+
+ /*
+ * Similar to TransferArrayBuffer, but for SharedArrayBuffer.
+ */
+ void TransferSharedArrayBuffer(uint32_t transfer_id,
+ Local<SharedArrayBuffer> shared_array_buffer);
+
+ /*
+ * Write raw data in various common formats to the buffer.
+ * Note that integer types are written in base-128 varint format, not with a
+ * binary copy. For use during an override of Delegate::WriteHostObject.
+ */
+ void WriteUint32(uint32_t value);
+ void WriteUint64(uint64_t value);
+ void WriteDouble(double value);
+ void WriteRawBytes(const void* source, size_t length);
+
+ private:
+ ValueSerializer(const ValueSerializer&) = delete;
+ void operator=(const ValueSerializer&) = delete;
+
+ struct PrivateData;
+ PrivateData* private_;
+};
+
+/**
+ * Deserializes values from data written with ValueSerializer, or a compatible
+ * implementation.
+ *
+ * WARNING: This API is under development, and changes (including incompatible
+ * changes to the API or wire format) may occur without notice until this
+ * warning is removed.
+ */
+class V8_EXPORT ValueDeserializer {
+ public:
+ class V8_EXPORT Delegate {
+ public:
+ virtual ~Delegate() {}
+
+ /*
+ * The embedder overrides this method to read some kind of host object, if
+ * possible. If not, a suitable exception should be thrown and
+ * MaybeLocal<Object>() returned.
+ */
+ virtual MaybeLocal<Object> ReadHostObject(Isolate* isolate);
+ };
+
+ ValueDeserializer(Isolate* isolate, const uint8_t* data, size_t size);
+ ValueDeserializer(Isolate* isolate, const uint8_t* data, size_t size,
+ Delegate* delegate);
+ ~ValueDeserializer();
+
+ /*
+ * Reads and validates a header (including the format version).
+ * May, for example, reject an invalid or unsupported wire format.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> ReadHeader(Local<Context> context);
+ V8_DEPRECATE_SOON("Use Local<Context> version", Maybe<bool> ReadHeader());
+
+ /*
+ * Deserializes a JavaScript value from the buffer.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> ReadValue(Local<Context> context);
+
+ /*
+ * Accepts the array buffer corresponding to the one passed previously to
+ * ValueSerializer::TransferArrayBuffer.
+ */
+ void TransferArrayBuffer(uint32_t transfer_id,
+ Local<ArrayBuffer> array_buffer);
+
+ /*
+ * Similar to TransferArrayBuffer, but for SharedArrayBuffer.
+ * transfer_id exists in the same namespace as unshared ArrayBuffer objects.
+ */
+ void TransferSharedArrayBuffer(uint32_t transfer_id,
+ Local<SharedArrayBuffer> shared_array_buffer);
+
+ /*
+ * Must be called before ReadHeader to enable support for reading the legacy
+ * wire format (i.e., which predates this being shipped).
+ *
+ * Don't use this unless you need to read data written by previous versions of
+ * blink::ScriptValueSerializer.
+ */
+ void SetSupportsLegacyWireFormat(bool supports_legacy_wire_format);
+
+ /*
+ * Reads the underlying wire format version. Likely mostly to be useful to
+ * legacy code reading old wire format versions. Must be called after
+ * ReadHeader.
+ */
+ uint32_t GetWireFormatVersion() const;
+
+ /*
+ * Reads raw data in various common formats to the buffer.
+ * Note that integer types are read in base-128 varint format, not with a
+ * binary copy. For use during an override of Delegate::ReadHostObject.
+ */
+ V8_WARN_UNUSED_RESULT bool ReadUint32(uint32_t* value);
+ V8_WARN_UNUSED_RESULT bool ReadUint64(uint64_t* value);
+ V8_WARN_UNUSED_RESULT bool ReadDouble(double* value);
+ V8_WARN_UNUSED_RESULT bool ReadRawBytes(size_t length, const void** data);
+
+ private:
+ ValueDeserializer(const ValueDeserializer&) = delete;
+ void operator=(const ValueDeserializer&) = delete;
+
+ struct PrivateData;
+ PrivateData* private_;
+};
/**
* A map whose keys are referenced weakly. It is similar to JavaScript WeakMap
@@ -1811,6 +2033,11 @@ class V8_EXPORT Value : public Data {
bool IsRegExp() const;
/**
+ * Returns true if this value is an async function.
+ */
+ bool IsAsyncFunction() const;
+
+ /**
* Returns true if this value is a Generator function.
* This is an experimental feature.
*/
@@ -2207,11 +2434,11 @@ class V8_EXPORT String : public Name {
*/
virtual void Dispose() { delete this; }
- private:
// Disallow copying and assigning.
- ExternalStringResourceBase(const ExternalStringResourceBase&);
- void operator=(const ExternalStringResourceBase&);
+ ExternalStringResourceBase(const ExternalStringResourceBase&) = delete;
+ void operator=(const ExternalStringResourceBase&) = delete;
+ private:
friend class v8::internal::Heap;
};
@@ -2413,13 +2640,14 @@ class V8_EXPORT String : public Name {
char* operator*() { return str_; }
const char* operator*() const { return str_; }
int length() const { return length_; }
+
+ // Disallow copying and assigning.
+ Utf8Value(const Utf8Value&) = delete;
+ void operator=(const Utf8Value&) = delete;
+
private:
char* str_;
int length_;
-
- // Disallow copying and assigning.
- Utf8Value(const Utf8Value&);
- void operator=(const Utf8Value&);
};
/**
@@ -2435,13 +2663,14 @@ class V8_EXPORT String : public Name {
uint16_t* operator*() { return str_; }
const uint16_t* operator*() const { return str_; }
int length() const { return length_; }
+
+ // Disallow copying and assigning.
+ Value(const Value&) = delete;
+ void operator=(const Value&) = delete;
+
private:
uint16_t* str_;
int length_;
-
- // Disallow copying and assigning.
- Value(const Value&);
- void operator=(const Value&);
};
private:
@@ -2575,11 +2804,17 @@ class V8_EXPORT Uint32 : public Integer {
static void CheckCast(v8::Value* obj);
};
-
+/**
+ * PropertyAttribute.
+ */
enum PropertyAttribute {
- None = 0,
- ReadOnly = 1 << 0,
- DontEnum = 1 << 1,
+ /** None. **/
+ None = 0,
+ /** ReadOnly, i.e., not writable. **/
+ ReadOnly = 1 << 0,
+ /** DontEnum, i.e., not enumerable. **/
+ DontEnum = 1 << 1,
+ /** DontDelete, i.e., not configurable. **/
DontDelete = 1 << 2
};
@@ -2693,6 +2928,22 @@ class V8_EXPORT Object : public Value {
Local<Context> context, Local<Name> key, Local<Value> value,
PropertyAttribute attributes = None);
+ // Implements Object.DefineProperty(O, P, Attributes), see Ecma-262 19.1.2.4.
+ //
+ // The defineProperty function is used to add an own property or
+ // update the attributes of an existing own property of an object.
+ //
+ // Both data and accessor descriptors can be used.
+ //
+ // In general, CreateDataProperty is faster, however, does not allow
+ // for specifying attributes or an accessor descriptor.
+ //
+ // The PropertyDescriptor can change when redefining a property.
+ //
+ // Returns true on success.
+ V8_WARN_UNUSED_RESULT Maybe<bool> DefineProperty(
+ Local<Context> context, Local<Name> key, PropertyDescriptor& descriptor);
+
// Sets an own property on this object bypassing interceptors and
// overriding accessors or read-only properties.
//
@@ -2736,6 +2987,21 @@ class V8_EXPORT Object : public Value {
Local<Context> context, Local<String> key);
V8_DEPRECATE_SOON("Use maybe version", bool Has(Local<Value> key));
+ /**
+ * Object::Has() calls the abstract operation HasProperty(O, P) described
+ * in ECMA-262, 7.3.10. Has() returns
+ * true, if the object has the property, either own or on the prototype chain.
+ * Interceptors, i.e., PropertyQueryCallbacks, are called if present.
+ *
+ * Has() has the same side effects as JavaScript's `variable in object`.
+ * For example, calling Has() on a revoked proxy will throw an exception.
+ *
+ * \note Has() converts the key to a name, which possibly calls back into
+ * JavaScript.
+ *
+ * See also v8::Object::HasOwnProperty() and
+ * v8::Object::HasRealNamedProperty().
+ */
V8_WARN_UNUSED_RESULT Maybe<bool> Has(Local<Context> context,
Local<Value> key);
@@ -2900,12 +3166,31 @@ class V8_EXPORT Object : public Value {
// Testers for local properties.
V8_DEPRECATED("Use maybe version", bool HasOwnProperty(Local<String> key));
+
+ /**
+ * HasOwnProperty() is like JavaScript's Object.prototype.hasOwnProperty().
+ *
+ * See also v8::Object::Has() and v8::Object::HasRealNamedProperty().
+ */
V8_WARN_UNUSED_RESULT Maybe<bool> HasOwnProperty(Local<Context> context,
Local<Name> key);
V8_WARN_UNUSED_RESULT Maybe<bool> HasOwnProperty(Local<Context> context,
uint32_t index);
V8_DEPRECATE_SOON("Use maybe version",
bool HasRealNamedProperty(Local<String> key));
+ /**
+ * Use HasRealNamedProperty() if you want to check if an object has an own
+ * property without causing side effects, i.e., without calling interceptors.
+ *
+ * This function is similar to v8::Object::HasOwnProperty(), but it does not
+ * call interceptors.
+ *
+ * \note Consider using non-masking interceptors, i.e., the interceptors are
+ * not called if the receiver has the real named property. See
+ * `v8::PropertyHandlerFlags::kNonMasking`.
+ *
+ * See also v8::Object::Has().
+ */
V8_WARN_UNUSED_RESULT Maybe<bool> HasRealNamedProperty(Local<Context> context,
Local<Name> key);
V8_DEPRECATE_SOON("Use maybe version",
@@ -2988,6 +3273,12 @@ class V8_EXPORT Object : public Value {
*/
Local<Context> CreationContext();
+ /** Same as above, but works for Persistents */
+ V8_INLINE static Local<Context> CreationContext(
+ const PersistentBase<Object>& object) {
+ return object.val_->CreationContext();
+ }
+
/**
* Checks whether a callback is set by the
* ObjectTemplate::SetCallAsFunctionHandler method.
@@ -3236,12 +3527,91 @@ class FunctionCallbackInfo {
template<typename T>
class PropertyCallbackInfo {
public:
+ /**
+ * \return The isolate of the property access.
+ */
V8_INLINE Isolate* GetIsolate() const;
+
+ /**
+ * \return The data set in the configuration, i.e., in
+ * `NamedPropertyHandlerConfiguration` or
+ * `IndexedPropertyHandlerConfiguration.`
+ */
V8_INLINE Local<Value> Data() const;
+
+ /**
+ * \return The receiver. In many cases, this is the object on which the
+ * property access was intercepted. When using
+ * `Reflect.Get`, `Function.prototype.call`, or similar functions, it is the
+ * object passed in as receiver or thisArg.
+ *
+ * \code
+ * void GetterCallback(Local<Name> name,
+ * const v8::PropertyCallbackInfo<v8::Value>& info) {
+ * auto context = info.GetIsolate()->GetCurrentContext();
+ *
+ * v8::Local<v8::Value> a_this =
+ * info.This()
+ * ->GetRealNamedProperty(context, v8_str("a"))
+ * .ToLocalChecked();
+ * v8::Local<v8::Value> a_holder =
+ * info.Holder()
+ * ->GetRealNamedProperty(context, v8_str("a"))
+ * .ToLocalChecked();
+ *
+ * CHECK(v8_str("r")->Equals(context, a_this).FromJust());
+ * CHECK(v8_str("obj")->Equals(context, a_holder).FromJust());
+ *
+ * info.GetReturnValue().Set(name);
+ * }
+ *
+ * v8::Local<v8::FunctionTemplate> templ =
+ * v8::FunctionTemplate::New(isolate);
+ * templ->InstanceTemplate()->SetHandler(
+ * v8::NamedPropertyHandlerConfiguration(GetterCallback));
+ * LocalContext env;
+ * env->Global()
+ * ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
+ * .ToLocalChecked()
+ * ->NewInstance(env.local())
+ * .ToLocalChecked())
+ * .FromJust();
+ *
+ * CompileRun("obj.a = 'obj'; var r = {a: 'r'}; Reflect.get(obj, 'x', r)");
+ * \endcode
+ */
V8_INLINE Local<Object> This() const;
+
+ /**
+ * \return The object in the prototype chain of the receiver that has the
+ * interceptor. Suppose you have `x` and its prototype is `y`, and `y`
+ * has an interceptor. Then `info.This()` is `x` and `info.Holder()` is `y`.
+ * The Holder() could be a hidden object (the global object, rather
+ * than the global proxy).
+ *
+ * \note For security reasons, do not pass the object back into the runtime.
+ */
V8_INLINE Local<Object> Holder() const;
+
+ /**
+ * \return The return value of the callback.
+ * Can be changed by calling Set().
+ * \code
+ * info.GetReturnValue().Set(...)
+ * \endcode
+ *
+ */
V8_INLINE ReturnValue<T> GetReturnValue() const;
+
+ /**
+ * \return True if the intercepted function should throw if an error occurs.
+ * Usually, `true` corresponds to `'use strict'`.
+ *
+ * \note Always `false` when intercepting `Reflect.Set()`
+ * independent of the language mode.
+ */
V8_INLINE bool ShouldThrowOnError() const;
+
// This shouldn't be public, but the arm compiler needs it.
static const int kArgsLength = 7;
@@ -3431,6 +3801,78 @@ class V8_EXPORT Promise : public Object {
static void CheckCast(Value* obj);
};
+/**
+ * An instance of a Property Descriptor, see Ecma-262 6.2.4.
+ *
+ * Properties in a descriptor are present or absent. If you do not set
+ * `enumerable`, `configurable`, and `writable`, they are absent. If `value`,
+ * `get`, or `set` are absent, but you must specify them in the constructor, use
+ * empty handles.
+ *
+ * Accessors `get` and `set` must be callable or undefined if they are present.
+ *
+ * \note Only query properties if they are present, i.e., call `x()` only if
+ * `has_x()` returns true.
+ *
+ * \code
+ * // var desc = {writable: false}
+ * v8::PropertyDescriptor d(Local<Value>()), false);
+ * d.value(); // error, value not set
+ * if (d.has_writable()) {
+ * d.writable(); // false
+ * }
+ *
+ * // var desc = {value: undefined}
+ * v8::PropertyDescriptor d(v8::Undefined(isolate));
+ *
+ * // var desc = {get: undefined}
+ * v8::PropertyDescriptor d(v8::Undefined(isolate), Local<Value>()));
+ * \endcode
+ */
+class V8_EXPORT PropertyDescriptor {
+ public:
+ // GenericDescriptor
+ PropertyDescriptor();
+
+ // DataDescriptor
+ PropertyDescriptor(Local<Value> value);
+
+ // DataDescriptor with writable property
+ PropertyDescriptor(Local<Value> value, bool writable);
+
+ // AccessorDescriptor
+ PropertyDescriptor(Local<Value> get, Local<Value> set);
+
+ ~PropertyDescriptor();
+
+ Local<Value> value() const;
+ bool has_value() const;
+
+ Local<Value> get() const;
+ bool has_get() const;
+ Local<Value> set() const;
+ bool has_set() const;
+
+ void set_enumerable(bool enumerable);
+ bool enumerable() const;
+ bool has_enumerable() const;
+
+ void set_configurable(bool configurable);
+ bool configurable() const;
+ bool has_configurable() const;
+
+ bool writable() const;
+ bool has_writable() const;
+
+ struct PrivateData;
+ PrivateData* get_private() const { return private_; }
+
+ PropertyDescriptor(const PropertyDescriptor&) = delete;
+ void operator=(const PropertyDescriptor&) = delete;
+
+ private:
+ PrivateData* private_;
+};
/**
* An instance of the built-in Proxy constructor (ECMA-262, 6th Edition,
@@ -3444,7 +3886,7 @@ class V8_EXPORT Proxy : public Object {
void Revoke();
/**
- * Creates a new empty Map.
+ * Creates a new Proxy for the target object.
*/
static MaybeLocal<Proxy> New(Local<Context> context,
Local<Object> local_target,
@@ -4296,36 +4738,115 @@ typedef void (*NamedPropertyEnumeratorCallback)(
// TODO(dcarney): Deprecate and remove previous typedefs, and replace
// GenericNamedPropertyFooCallback with just NamedPropertyFooCallback.
+
/**
- * GenericNamedProperty[Getter|Setter] are used as interceptors on object.
- * See ObjectTemplate::SetNamedPropertyHandler.
+ * Interceptor for get requests on an object.
+ *
+ * Use `info.GetReturnValue().Set()` to set the return value of the
+ * intercepted get request.
+ *
+ * \param property The name of the property for which the request was
+ * intercepted.
+ * \param info Information about the intercepted request, such as
+ * isolate, receiver, return value, or whether running in `'use strict`' mode.
+ * See `PropertyCallbackInfo`.
+ *
+ * \code
+ * void GetterCallback(
+ * Local<Name> name,
+ * const v8::PropertyCallbackInfo<v8::Value>& info) {
+ * info.GetReturnValue().Set(v8_num(42));
+ * }
+ *
+ * v8::Local<v8::FunctionTemplate> templ =
+ * v8::FunctionTemplate::New(isolate);
+ * templ->InstanceTemplate()->SetHandler(
+ * v8::NamedPropertyHandlerConfiguration(GetterCallback));
+ * LocalContext env;
+ * env->Global()
+ * ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
+ * .ToLocalChecked()
+ * ->NewInstance(env.local())
+ * .ToLocalChecked())
+ * .FromJust();
+ * v8::Local<v8::Value> result = CompileRun("obj.a = 17; obj.a");
+ * CHECK(v8_num(42)->Equals(env.local(), result).FromJust());
+ * \endcode
+ *
+ * See also `ObjectTemplate::SetHandler`.
*/
typedef void (*GenericNamedPropertyGetterCallback)(
Local<Name> property, const PropertyCallbackInfo<Value>& info);
-
/**
- * Returns the value if the setter intercepts the request.
- * Otherwise, returns an empty handle.
+ * Interceptor for set requests on an object.
+ *
+ * Use `info.GetReturnValue()` to indicate whether the request was intercepted
+ * or not. If the setter successfully intercepts the request, i.e., if the
+ * request should not be further executed, call
+ * `info.GetReturnValue().Set(value)`. If the setter
+ * did not intercept the request, i.e., if the request should be handled as
+ * if no interceptor is present, do not not call `Set()`.
+ *
+ * \param property The name of the property for which the request was
+ * intercepted.
+ * \param value The value which the property will have if the request
+ * is not intercepted.
+ * \param info Information about the intercepted request, such as
+ * isolate, receiver, return value, or whether running in `'use strict'` mode.
+ * See `PropertyCallbackInfo`.
+ *
+ * See also
+ * `ObjectTemplate::SetHandler.`
*/
typedef void (*GenericNamedPropertySetterCallback)(
Local<Name> property, Local<Value> value,
const PropertyCallbackInfo<Value>& info);
-
/**
- * Returns a non-empty handle if the interceptor intercepts the request.
- * The result is an integer encoding property attributes (like v8::None,
- * v8::DontEnum, etc.)
+ * Intercepts all requests that query the attributes of the
+ * property, e.g., getOwnPropertyDescriptor(), propertyIsEnumerable(), and
+ * defineProperty().
+ *
+ * Use `info.GetReturnValue().Set(value)` to set the property attributes. The
+ * value is an interger encoding a `v8::PropertyAttribute`.
+ *
+ * \param property The name of the property for which the request was
+ * intercepted.
+ * \param info Information about the intercepted request, such as
+ * isolate, receiver, return value, or whether running in `'use strict'` mode.
+ * See `PropertyCallbackInfo`.
+ *
+ * \note Some functions query the property attributes internally, even though
+ * they do not return the attributes. For example, `hasOwnProperty()` can
+ * trigger this interceptor depending on the state of the object.
+ *
+ * See also
+ * `ObjectTemplate::SetHandler.`
*/
typedef void (*GenericNamedPropertyQueryCallback)(
Local<Name> property, const PropertyCallbackInfo<Integer>& info);
-
/**
- * Returns a non-empty handle if the deleter intercepts the request.
- * The return value is true if the property could be deleted and false
- * otherwise.
+ * Interceptor for delete requests on an object.
+ *
+ * Use `info.GetReturnValue()` to indicate whether the request was intercepted
+ * or not. If the deleter successfully intercepts the request, i.e., if the
+ * request should not be further executed, call
+ * `info.GetReturnValue().Set(value)` with a boolean `value`. The `value` is
+ * used as the return value of `delete`.
+ *
+ * \param property The name of the property for which the request was
+ * intercepted.
+ * \param info Information about the intercepted request, such as
+ * isolate, receiver, return value, or whether running in `'use strict'` mode.
+ * See `PropertyCallbackInfo`.
+ *
+ * \note If you need to mimic the behavior of `delete`, i.e., throw in strict
+ * mode instead of returning false, use `info.ShouldThrowOnError()` to determine
+ * if you are in strict mode.
+ *
+ * See also `ObjectTemplate::SetHandler.`
*/
typedef void (*GenericNamedPropertyDeleterCallback)(
Local<Name> property, const PropertyCallbackInfo<Boolean>& info);
@@ -4338,52 +4859,99 @@ typedef void (*GenericNamedPropertyDeleterCallback)(
typedef void (*GenericNamedPropertyEnumeratorCallback)(
const PropertyCallbackInfo<Array>& info);
+/**
+ * Interceptor for defineProperty requests on an object.
+ *
+ * Use `info.GetReturnValue()` to indicate whether the request was intercepted
+ * or not. If the definer successfully intercepts the request, i.e., if the
+ * request should not be further executed, call
+ * `info.GetReturnValue().Set(value)`. If the definer
+ * did not intercept the request, i.e., if the request should be handled as
+ * if no interceptor is present, do not not call `Set()`.
+ *
+ * \param property The name of the property for which the request was
+ * intercepted.
+ * \param desc The property descriptor which is used to define the
+ * property if the request is not intercepted.
+ * \param info Information about the intercepted request, such as
+ * isolate, receiver, return value, or whether running in `'use strict'` mode.
+ * See `PropertyCallbackInfo`.
+ *
+ * See also `ObjectTemplate::SetHandler`.
+ */
+typedef void (*GenericNamedPropertyDefinerCallback)(
+ Local<Name> property, const PropertyDescriptor& desc,
+ const PropertyCallbackInfo<Value>& info);
+
+/**
+ * Interceptor for getOwnPropertyDescriptor requests on an object.
+ *
+ * Use `info.GetReturnValue().Set()` to set the return value of the
+ * intercepted request. The return value must be an object that
+ * can be converted to a PropertyDescriptor, e.g., a `v8::value` returned from
+ * `v8::Object::getOwnPropertyDescriptor`.
+ *
+ * \param property The name of the property for which the request was
+ * intercepted.
+ * \info Information about the intercepted request, such as
+ * isolate, receiver, return value, or whether running in `'use strict'` mode.
+ * See `PropertyCallbackInfo`.
+ *
+ * \note If GetOwnPropertyDescriptor is intercepted, it will
+ * always return true, i.e., indicate that the property was found.
+ *
+ * See also `ObjectTemplate::SetHandler`.
+ */
+typedef void (*GenericNamedPropertyDescriptorCallback)(
+ Local<Name> property, const PropertyCallbackInfo<Value>& info);
/**
- * Returns the value of the property if the getter intercepts the
- * request. Otherwise, returns an empty handle.
+ * See `v8::GenericNamedPropertyGetterCallback`.
*/
typedef void (*IndexedPropertyGetterCallback)(
uint32_t index,
const PropertyCallbackInfo<Value>& info);
-
/**
- * Returns the value if the setter intercepts the request.
- * Otherwise, returns an empty handle.
+ * See `v8::GenericNamedPropertySetterCallback`.
*/
typedef void (*IndexedPropertySetterCallback)(
uint32_t index,
Local<Value> value,
const PropertyCallbackInfo<Value>& info);
-
/**
- * Returns a non-empty handle if the interceptor intercepts the request.
- * The result is an integer encoding property attributes.
+ * See `v8::GenericNamedPropertyQueryCallback`.
*/
typedef void (*IndexedPropertyQueryCallback)(
uint32_t index,
const PropertyCallbackInfo<Integer>& info);
-
/**
- * Returns a non-empty handle if the deleter intercepts the request.
- * The return value is true if the property could be deleted and false
- * otherwise.
+ * See `v8::GenericNamedPropertyDeleterCallback`.
*/
typedef void (*IndexedPropertyDeleterCallback)(
uint32_t index,
const PropertyCallbackInfo<Boolean>& info);
-
/**
- * Returns an array containing the indices of the properties the
- * indexed property getter intercepts.
+ * See `v8::GenericNamedPropertyEnumeratorCallback`.
*/
typedef void (*IndexedPropertyEnumeratorCallback)(
const PropertyCallbackInfo<Array>& info);
+/**
+ * See `v8::GenericNamedPropertyDefinerCallback`.
+ */
+typedef void (*IndexedPropertyDefinerCallback)(
+ uint32_t index, const PropertyDescriptor& desc,
+ const PropertyCallbackInfo<Value>& info);
+
+/**
+ * See `v8::GenericNamedPropertyDescriptorCallback`.
+ */
+typedef void (*IndexedPropertyDescriptorCallback)(
+ uint32_t index, const PropertyCallbackInfo<Value>& info);
/**
* Access type specification.
@@ -4617,23 +5185,37 @@ class V8_EXPORT FunctionTemplate : public Template {
friend class ObjectTemplate;
};
-
+/**
+ * Configuration flags for v8::NamedPropertyHandlerConfiguration or
+ * v8::IndexedPropertyHandlerConfiguration.
+ */
enum class PropertyHandlerFlags {
+ /**
+ * None.
+ */
kNone = 0,
- // See ALL_CAN_READ above.
+
+ /**
+ * See ALL_CAN_READ above.
+ */
kAllCanRead = 1,
- // Will not call into interceptor for properties on the receiver or prototype
- // chain. Currently only valid for named interceptors.
+
+ /** Will not call into interceptor for properties on the receiver or prototype
+ * chain, i.e., only call into interceptor for properties that do not exist.
+ * Currently only valid for named interceptors.
+ */
kNonMasking = 1 << 1,
- // Will not call into interceptor for symbol lookup. Only meaningful for
- // named interceptors.
+
+ /**
+ * Will not call into interceptor for symbol lookup. Only meaningful for
+ * named interceptors.
+ */
kOnlyInterceptStrings = 1 << 2,
};
-
struct NamedPropertyHandlerConfiguration {
NamedPropertyHandlerConfiguration(
- /** Note: getter is required **/
+ /** Note: getter is required */
GenericNamedPropertyGetterCallback getter = 0,
GenericNamedPropertySetterCallback setter = 0,
GenericNamedPropertyQueryCallback query = 0,
@@ -4646,6 +5228,27 @@ struct NamedPropertyHandlerConfiguration {
query(query),
deleter(deleter),
enumerator(enumerator),
+ definer(0),
+ descriptor(0),
+ data(data),
+ flags(flags) {}
+
+ NamedPropertyHandlerConfiguration(
+ GenericNamedPropertyGetterCallback getter,
+ GenericNamedPropertySetterCallback setter,
+ GenericNamedPropertyDescriptorCallback descriptor,
+ GenericNamedPropertyDeleterCallback deleter,
+ GenericNamedPropertyEnumeratorCallback enumerator,
+ GenericNamedPropertyDefinerCallback definer,
+ Local<Value> data = Local<Value>(),
+ PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
+ : getter(getter),
+ setter(setter),
+ query(0),
+ deleter(deleter),
+ enumerator(enumerator),
+ definer(definer),
+ descriptor(descriptor),
data(data),
flags(flags) {}
@@ -4654,6 +5257,8 @@ struct NamedPropertyHandlerConfiguration {
GenericNamedPropertyQueryCallback query;
GenericNamedPropertyDeleterCallback deleter;
GenericNamedPropertyEnumeratorCallback enumerator;
+ GenericNamedPropertyDefinerCallback definer;
+ GenericNamedPropertyDescriptorCallback descriptor;
Local<Value> data;
PropertyHandlerFlags flags;
};
@@ -4661,7 +5266,7 @@ struct NamedPropertyHandlerConfiguration {
struct IndexedPropertyHandlerConfiguration {
IndexedPropertyHandlerConfiguration(
- /** Note: getter is required **/
+ /** Note: getter is required */
IndexedPropertyGetterCallback getter = 0,
IndexedPropertySetterCallback setter = 0,
IndexedPropertyQueryCallback query = 0,
@@ -4674,6 +5279,27 @@ struct IndexedPropertyHandlerConfiguration {
query(query),
deleter(deleter),
enumerator(enumerator),
+ definer(0),
+ descriptor(0),
+ data(data),
+ flags(flags) {}
+
+ IndexedPropertyHandlerConfiguration(
+ IndexedPropertyGetterCallback getter,
+ IndexedPropertySetterCallback setter,
+ IndexedPropertyDescriptorCallback descriptor,
+ IndexedPropertyDeleterCallback deleter,
+ IndexedPropertyEnumeratorCallback enumerator,
+ IndexedPropertyDefinerCallback definer,
+ Local<Value> data = Local<Value>(),
+ PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
+ : getter(getter),
+ setter(setter),
+ query(0),
+ deleter(deleter),
+ enumerator(enumerator),
+ definer(definer),
+ descriptor(descriptor),
data(data),
flags(flags) {}
@@ -4682,6 +5308,8 @@ struct IndexedPropertyHandlerConfiguration {
IndexedPropertyQueryCallback query;
IndexedPropertyDeleterCallback deleter;
IndexedPropertyEnumeratorCallback enumerator;
+ IndexedPropertyDefinerCallback definer;
+ IndexedPropertyDescriptorCallback descriptor;
Local<Value> data;
PropertyHandlerFlags flags;
};
@@ -4977,6 +5605,10 @@ class V8_EXPORT Extension { // NOLINT
void set_auto_enable(bool value) { auto_enable_ = value; }
bool auto_enable() { return auto_enable_; }
+ // Disallow copying and assigning.
+ Extension(const Extension&) = delete;
+ void operator=(const Extension&) = delete;
+
private:
const char* name_;
size_t source_length_; // expected to initialize before source_
@@ -4984,10 +5616,6 @@ class V8_EXPORT Extension { // NOLINT
int dep_count_;
const char** deps_;
bool auto_enable_;
-
- // Disallow copying and assigning.
- Extension(const Extension&);
- void operator=(const Extension&);
};
@@ -5213,13 +5841,13 @@ class V8_EXPORT MicrotasksScope {
*/
static bool IsRunningMicrotasks(Isolate* isolate);
+ // Prevent copying.
+ MicrotasksScope(const MicrotasksScope&) = delete;
+ MicrotasksScope& operator=(const MicrotasksScope&) = delete;
+
private:
internal::Isolate* const isolate_;
bool run_;
-
- // Prevent copying.
- MicrotasksScope(const MicrotasksScope&);
- MicrotasksScope& operator=(const MicrotasksScope&);
};
@@ -5520,8 +6148,8 @@ enum class MemoryPressureLevel { kNone, kModerate, kCritical };
* Interface for tracing through the embedder heap. During the v8 garbage
* collection, v8 collects hidden fields of all potential wrappers, and at the
* end of its marking phase iterates the collection and asks the embedder to
- * trace through its heap and call PersistentBase::RegisterExternalReference on
- * each js object reachable from any of the given wrappers.
+ * trace through its heap and use reporter to report each js object reachable
+ * from any of the given wrappers.
*
* Before the first call to the TraceWrappersFrom function TracePrologue will be
* called. When the garbage collection cycle is finished, TraceEpilogue will be
@@ -5530,36 +6158,49 @@ enum class MemoryPressureLevel { kNone, kModerate, kCritical };
class V8_EXPORT EmbedderHeapTracer {
public:
enum ForceCompletionAction { FORCE_COMPLETION, DO_NOT_FORCE_COMPLETION };
+
struct AdvanceTracingActions {
explicit AdvanceTracingActions(ForceCompletionAction force_completion_)
: force_completion(force_completion_) {}
ForceCompletionAction force_completion;
};
+
/**
- * V8 will call this method with internal fields of found wrappers.
- * Embedder is expected to store them in it's marking deque and trace
- * reachable wrappers from them when asked by AdvanceTracing method.
+ * V8 will call this method with internal fields of found wrappers. The
+ * embedder is expected to store them in its marking deque and trace
+ * reachable wrappers from them when called through |AdvanceTracing|.
*/
virtual void RegisterV8References(
const std::vector<std::pair<void*, void*> >& internal_fields) = 0;
+
+ /**
+ * Deprecated.
+ * TODO(hlopko) Remove once the migration to reporter is finished.
+ */
+ virtual void TracePrologue() {}
+
/**
- * V8 will call this method at the beginning of the gc cycle.
+ * V8 will call this method at the beginning of a GC cycle. Embedder is
+ * expected to use EmbedderReachableReferenceReporter for reporting all
+ * reachable v8 objects.
*/
- virtual void TracePrologue() = 0;
+ virtual void TracePrologue(EmbedderReachableReferenceReporter* reporter) {}
+
/**
* Embedder is expected to trace its heap starting from wrappers reported by
- * RegisterV8References method, and call
- * PersistentBase::RegisterExternalReference() on all reachable wrappers.
+ * RegisterV8References method, and use reporter for all reachable wrappers.
* Embedder is expected to stop tracing by the given deadline.
*
* Returns true if there is still work to do.
*/
virtual bool AdvanceTracing(double deadline_in_ms,
AdvanceTracingActions actions) = 0;
+
/**
- * V8 will call this method at the end of the gc cycle. Allocation is *not*
- * allowed in the TraceEpilogue.
+ * V8 will call this method at the end of a GC cycle.
+ *
+ * Note that allocation is *not* allowed within |TraceEpilogue|.
*/
virtual void TraceEpilogue() = 0;
@@ -5574,6 +6215,11 @@ class V8_EXPORT EmbedderHeapTracer {
*/
virtual void AbortTracing() {}
+ /**
+ * Returns the number of wrappers that are still to be traced by the embedder.
+ */
+ virtual size_t NumberOfWrappersToTrace() { return 0; }
+
protected:
virtual ~EmbedderHeapTracer() = default;
};
@@ -5671,12 +6317,12 @@ class V8_EXPORT Isolate {
~Scope() { isolate_->Exit(); }
+ // Prevent copying of Scope objects.
+ Scope(const Scope&) = delete;
+ Scope& operator=(const Scope&) = delete;
+
private:
Isolate* const isolate_;
-
- // Prevent copying of Scope objects.
- Scope(const Scope&);
- Scope& operator=(const Scope&);
};
@@ -5690,14 +6336,15 @@ class V8_EXPORT Isolate {
DisallowJavascriptExecutionScope(Isolate* isolate, OnFailure on_failure);
~DisallowJavascriptExecutionScope();
+ // Prevent copying of Scope objects.
+ DisallowJavascriptExecutionScope(const DisallowJavascriptExecutionScope&) =
+ delete;
+ DisallowJavascriptExecutionScope& operator=(
+ const DisallowJavascriptExecutionScope&) = delete;
+
private:
bool on_failure_;
void* internal_;
-
- // Prevent copying of Scope objects.
- DisallowJavascriptExecutionScope(const DisallowJavascriptExecutionScope&);
- DisallowJavascriptExecutionScope& operator=(
- const DisallowJavascriptExecutionScope&);
};
@@ -5709,14 +6356,15 @@ class V8_EXPORT Isolate {
explicit AllowJavascriptExecutionScope(Isolate* isolate);
~AllowJavascriptExecutionScope();
+ // Prevent copying of Scope objects.
+ AllowJavascriptExecutionScope(const AllowJavascriptExecutionScope&) =
+ delete;
+ AllowJavascriptExecutionScope& operator=(
+ const AllowJavascriptExecutionScope&) = delete;
+
private:
void* internal_throws_;
void* internal_assert_;
-
- // Prevent copying of Scope objects.
- AllowJavascriptExecutionScope(const AllowJavascriptExecutionScope&);
- AllowJavascriptExecutionScope& operator=(
- const AllowJavascriptExecutionScope&);
};
/**
@@ -5728,13 +6376,14 @@ class V8_EXPORT Isolate {
explicit SuppressMicrotaskExecutionScope(Isolate* isolate);
~SuppressMicrotaskExecutionScope();
- private:
- internal::Isolate* const isolate_;
-
// Prevent copying of Scope objects.
- SuppressMicrotaskExecutionScope(const SuppressMicrotaskExecutionScope&);
+ SuppressMicrotaskExecutionScope(const SuppressMicrotaskExecutionScope&) =
+ delete;
SuppressMicrotaskExecutionScope& operator=(
- const SuppressMicrotaskExecutionScope&);
+ const SuppressMicrotaskExecutionScope&) = delete;
+
+ private:
+ internal::Isolate* const isolate_;
};
/**
@@ -6460,17 +7109,17 @@ class V8_EXPORT Isolate {
*/
bool IsInUse();
+ Isolate() = delete;
+ ~Isolate() = delete;
+ Isolate(const Isolate&) = delete;
+ Isolate& operator=(const Isolate&) = delete;
+ void* operator new(size_t size) = delete;
+ void operator delete(void*, size_t) = delete;
+
private:
template <class K, class V, class Traits>
friend class PersistentValueMapBase;
- Isolate();
- Isolate(const Isolate&);
- ~Isolate();
- Isolate& operator=(const Isolate&);
- void* operator new(size_t size);
- void operator delete(void*, size_t);
-
void SetObjectGroupId(internal::Object** object, UniqueId id);
void SetReferenceFromGroup(UniqueId id, internal::Object** object);
void SetReference(internal::Object** parent, internal::Object** child);
@@ -6857,8 +7506,6 @@ class V8_EXPORT V8 {
int* index);
static Local<Value> GetEternal(Isolate* isolate, int index);
- static void RegisterExternallyReferencedObject(internal::Object** object,
- internal::Isolate* isolate);
template <class K, class V, class T>
friend class PersistentValueMapBase;
@@ -6925,12 +7572,12 @@ class SnapshotCreator {
*/
StartupData CreateBlob(FunctionCodeHandling function_code_handling);
+ // Disallow copying and assigning.
+ SnapshotCreator(const SnapshotCreator&) = delete;
+ void operator=(const SnapshotCreator&) = delete;
+
private:
void* data_;
-
- // Disallow copying and assigning.
- SnapshotCreator(const SnapshotCreator&);
- void operator=(const SnapshotCreator&);
};
/**
@@ -7134,15 +7781,14 @@ class V8_EXPORT TryCatch {
return handler->js_stack_comparable_address_;
}
+ TryCatch(const TryCatch&) = delete;
+ void operator=(const TryCatch&) = delete;
+ void* operator new(size_t size) = delete;
+ void operator delete(void*, size_t) = delete;
+
private:
void ResetInternal();
- // Make it hard to create heap-allocated TryCatch blocks.
- TryCatch(const TryCatch&);
- void operator=(const TryCatch&);
- void* operator new(size_t size);
- void operator delete(void*, size_t);
-
v8::internal::Isolate* isolate_;
v8::TryCatch* next_;
void* exception_;
@@ -7498,16 +8144,16 @@ class V8_EXPORT Locker {
*/
static bool IsActive();
+ // Disallow copying and assigning.
+ Locker(const Locker&) = delete;
+ void operator=(const Locker&) = delete;
+
private:
void Initialize(Isolate* isolate);
bool has_lock_;
bool top_level_;
internal::Isolate* isolate_;
-
- // Disallow copying and assigning.
- Locker(const Locker&);
- void operator=(const Locker&);
};
@@ -7643,8 +8289,8 @@ class Internals {
static const int kNodeIsPartiallyDependentShift = 4;
static const int kNodeIsActiveShift = 4;
- static const int kJSObjectType = 0xb7;
- static const int kJSApiObjectType = 0xb6;
+ static const int kJSObjectType = 0xb9;
+ static const int kJSApiObjectType = 0xb8;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
static const int kForeignType = 0x87;
@@ -7920,11 +8566,10 @@ P* PersistentBase<T>::ClearWeak() {
}
template <class T>
-void PersistentBase<T>::RegisterExternalReference(Isolate* isolate) const {
+void PersistentBase<T>::RegisterExternalReference(
+ EmbedderReachableReferenceReporter* reporter) const {
if (IsEmpty()) return;
- V8::RegisterExternallyReferencedObject(
- reinterpret_cast<internal::Object**>(this->val_),
- reinterpret_cast<internal::Isolate*>(isolate));
+ reporter->ReportExternalReference(this->val_);
}
template <class T>
diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl
index edfd254316..2747be5909 100644
--- a/deps/v8/infra/mb/mb_config.pyl
+++ b/deps/v8/infra/mb/mb_config.pyl
@@ -9,12 +9,18 @@
# Bots are ordered by appearance on waterfall.
'masters': {
'developer_default': {
+ 'arm.debug': 'default_debug_arm',
+ 'arm.optdebug': 'default_optdebug_arm',
+ 'arm.release': 'default_release_arm',
+ 'arm64.debug': 'default_debug_arm64',
+ 'arm64.optdebug': 'default_optdebug_arm64',
+ 'arm64.release': 'default_release_arm64',
+ 'ia32.debug': 'default_debug_x86',
+ 'ia32.optdebug': 'default_optdebug_x86',
+ 'ia32.release': 'default_release_x86',
'x64.debug': 'default_debug_x64',
'x64.optdebug': 'default_optdebug_x64',
'x64.release': 'default_release_x64',
- 'x86.debug': 'default_debug_x86',
- 'x86.optdebug': 'default_optdebug_x86',
- 'x86.release': 'default_release_x86',
},
'client.dart.fyi': {
@@ -32,7 +38,7 @@
'V8 Linux - nosnap builder': 'gn_release_x86_no_snap',
'V8 Linux - nosnap debug builder': 'gn_debug_x86_no_snap',
'V8 Linux - shared': 'gn_release_x86_shared_verify_heap',
- 'V8 Linux - noi18n - debug': 'gyp_debug_x86_no_i18n',
+ 'V8 Linux - noi18n - debug': 'gn_debug_x86_no_i18n',
# Linux64.
'V8 Linux64 - builder': 'gn_release_x64',
'V8 Linux64 - debug builder': 'gn_debug_x64_valgrind',
@@ -40,34 +46,35 @@
'V8 Linux64 - internal snapshot': 'gn_release_x64_internal',
'V8 Linux64 - gyp': 'gyp_release_x64',
# Windows.
- 'V8 Win32 - builder': 'gyp_release_x86_minimal_symbols',
- 'V8 Win32 - debug builder': 'gyp_debug_x86_minimal_symbols',
+ 'V8 Win32 - builder': 'gn_release_x86_minimal_symbols',
+ 'V8 Win32 - debug builder': 'gn_debug_x86_minimal_symbols',
'V8 Win32 - nosnap - shared':
- 'gyp_release_x86_no_snap_shared_minimal_symbols',
- 'V8 Win64': 'gyp_release_x64_minimal_symbols',
- 'V8 Win64 - debug': 'gyp_debug_x64_minimal_symbols',
- 'V8 Win64 - clang': 'gyp_release_x64_clang',
+ 'gn_release_x86_no_snap_shared_minimal_symbols',
+ 'V8 Win64': 'gn_release_x64_minimal_symbols',
+ 'V8 Win64 - debug': 'gn_debug_x64_minimal_symbols',
+ # TODO(machenbach): Switch plugins on when errors are fixed.
+ 'V8 Win64 - clang': 'gn_release_x64_clang',
# Mac.
'V8 Mac': 'gn_release_x86',
'V8 Mac - debug': 'gn_debug_x86',
'V8 Mac64': 'gn_release_x64',
'V8 Mac64 - debug': 'gn_debug_x64',
'V8 Mac GC Stress': 'gn_debug_x86',
- 'V8 Mac64 ASAN': 'gyp_release_x64_asan',
+ 'V8 Mac64 ASAN': 'gn_release_x64_asan_no_lsan',
# Sanitizers.
- 'V8 Linux64 ASAN': 'gyp_release_x64_asan',
+ 'V8 Linux64 ASAN': 'gn_release_x64_asan',
'V8 Linux64 TSAN': 'gn_release_x64_tsan',
'V8 Linux - arm64 - sim - MSAN': 'gn_release_simulate_arm64_msan',
# Clusterfuzz.
'V8 Linux64 ASAN no inline - release builder':
- 'gyp_release_x64_asan_symbolized_edge_verify_heap',
- 'V8 Linux64 ASAN - debug builder': 'gyp_debug_x64_asan_edge',
+ 'gn_release_x64_asan_symbolized_edge_verify_heap',
+ 'V8 Linux64 ASAN - debug builder': 'gn_debug_x64_asan_edge',
'V8 Linux64 ASAN arm64 - debug builder':
- 'gyp_debug_simulate_arm64_asan_edge',
+ 'gn_debug_simulate_arm64_asan_edge',
'V8 Linux ASAN arm - debug builder':
- 'gyp_debug_simulate_arm_asan_edge',
+ 'gn_debug_simulate_arm_asan_edge',
'V8 Linux ASAN mipsel - debug builder':
- 'gyp_debug_simulate_mipsel_asan_edge',
+ 'gn_debug_simulate_mipsel_asan_edge',
# Misc.
'V8 Linux gcc 4.8': 'gn_release_x86_gcc',
# FYI.
@@ -86,13 +93,13 @@
'client.v8.ports': {
# Arm.
- 'V8 Arm - builder': 'gyp_release_arm',
- 'V8 Arm - debug builder': 'gyp_debug_arm',
- 'V8 Android Arm - builder': 'gyp_release_android_arm',
- 'V8 Linux - arm - sim': 'gyp_release_simulate_arm',
- 'V8 Linux - arm - sim - debug': 'gyp_debug_simulate_arm',
+ 'V8 Arm - builder': 'gn_release_arm',
+ 'V8 Arm - debug builder': 'gn_debug_arm',
+ 'V8 Android Arm - builder': 'gn_release_android_arm',
+ 'V8 Linux - arm - sim': 'gn_release_simulate_arm',
+ 'V8 Linux - arm - sim - debug': 'gn_debug_simulate_arm',
# Arm64.
- 'V8 Android Arm64 - builder': 'gyp_release_android_arm64',
+ 'V8 Android Arm64 - builder': 'gn_release_android_arm64',
'V8 Linux - arm64 - sim': 'gn_release_simulate_arm64',
'V8 Linux - arm64 - sim - debug': 'gn_debug_simulate_arm64',
'V8 Linux - arm64 - sim - nosnap - debug':
@@ -100,8 +107,8 @@
'V8 Linux - arm64 - sim - gc stress': 'gn_debug_simulate_arm64',
# Mips.
'V8 Mips - builder': 'gyp_release_mips_no_snap_no_i18n',
- 'V8 Linux - mipsel - sim - builder': 'gyp_release_simulate_mipsel',
- 'V8 Linux - mips64el - sim - builder': 'gyp_release_simulate_mips64el',
+ 'V8 Linux - mipsel - sim - builder': 'gn_release_simulate_mipsel',
+ 'V8 Linux - mips64el - sim - builder': 'gn_release_simulate_mips64el',
# PPC.
'V8 Linux - ppc - sim': 'gyp_release_simulate_ppc',
'V8 Linux - ppc64 - sim': 'gyp_release_simulate_ppc64',
@@ -117,18 +124,18 @@
'V8 Linux - beta branch - debug': 'gn_debug_x86',
'V8 Linux - stable branch': 'gn_release_x86',
'V8 Linux - stable branch - debug': 'gn_debug_x86',
- 'V8 Linux64 - beta branch': 'gyp_release_x64',
+ 'V8 Linux64 - beta branch': 'gn_release_x64',
'V8 Linux64 - beta branch - debug': 'gn_debug_x64',
'V8 Linux64 - stable branch': 'gn_release_x64',
'V8 Linux64 - stable branch - debug': 'gn_debug_x64',
- 'V8 arm - sim - beta branch': 'gyp_release_simulate_arm',
- 'V8 arm - sim - beta branch - debug': 'gyp_debug_simulate_arm',
- 'V8 arm - sim - stable branch': 'gyp_release_simulate_arm',
- 'V8 arm - sim - stable branch - debug': 'gyp_debug_simulate_arm',
- 'V8 mips64el - sim - beta branch': 'gyp_release_simulate_mips64el',
- 'V8 mips64el - sim - stable branch': 'gyp_release_simulate_mips64el',
- 'V8 mipsel - sim - beta branch': 'gyp_release_simulate_mipsel',
- 'V8 mipsel - sim - stable branch': 'gyp_release_simulate_mipsel',
+ 'V8 arm - sim - beta branch': 'gn_release_simulate_arm',
+ 'V8 arm - sim - beta branch - debug': 'gn_debug_simulate_arm',
+ 'V8 arm - sim - stable branch': 'gn_release_simulate_arm',
+ 'V8 arm - sim - stable branch - debug': 'gn_debug_simulate_arm',
+ 'V8 mips64el - sim - beta branch': 'gn_release_simulate_mips64el',
+ 'V8 mips64el - sim - stable branch': 'gn_release_simulate_mips64el',
+ 'V8 mipsel - sim - beta branch': 'gn_release_simulate_mipsel',
+ 'V8 mipsel - sim - stable branch': 'gn_release_simulate_mipsel',
'V8 ppc - sim - beta branch': 'gyp_release_simulate_ppc',
'V8 ppc - sim - stable branch': 'gyp_release_simulate_ppc',
'V8 ppc64 - sim - beta branch': 'gyp_release_simulate_ppc64',
@@ -143,8 +150,8 @@
'v8_linux_avx2_dbg': 'gn_debug_x86_trybot',
'v8_linux_nodcheck_rel_ng': 'gn_release_x86_minimal_symbols',
'v8_linux_dbg_ng': 'gn_debug_x86_trybot',
- 'v8_linux_noi18n_rel_ng': 'gyp_release_x86_no_i18n_trybot',
- 'v8_linux_gc_stress_dbg': 'gyp_debug_x86_trybot',
+ 'v8_linux_noi18n_rel_ng': 'gn_release_x86_no_i18n_trybot',
+ 'v8_linux_gc_stress_dbg': 'gn_debug_x86_trybot',
'v8_linux_nosnap_rel': 'gn_release_x86_no_snap_trybot',
'v8_linux_nosnap_dbg': 'gn_debug_x86_no_snap_trybot',
'v8_linux_gcc_compile_rel': 'gn_release_x86_gcc_minimal_symbols',
@@ -153,34 +160,34 @@
'v8_linux64_gyp_rel_ng': 'gyp_release_x64',
'v8_linux64_avx2_rel_ng': 'gn_release_x64_trybot',
'v8_linux64_avx2_dbg': 'gn_debug_x64_trybot',
- 'v8_linux64_asan_rel_ng': 'gyp_release_x64_asan_minimal_symbols',
+ 'v8_linux64_asan_rel_ng': 'gn_release_x64_asan_minimal_symbols',
'v8_linux64_msan_rel': 'gn_release_simulate_arm64_msan_minimal_symbols',
'v8_linux64_sanitizer_coverage_rel':
'gyp_release_x64_asan_minimal_symbols_coverage',
'v8_linux64_tsan_rel': 'gn_release_x64_tsan_minimal_symbols',
- 'v8_win_dbg': 'gyp_debug_x86_trybot',
- 'v8_win_compile_dbg': 'gyp_debug_x86_trybot',
- 'v8_win_rel_ng': 'gyp_release_x86_trybot',
+ 'v8_win_dbg': 'gn_debug_x86_trybot',
+ 'v8_win_compile_dbg': 'gn_debug_x86_trybot',
+ 'v8_win_rel_ng': 'gn_release_x86_trybot',
'v8_win_nosnap_shared_rel_ng':
- 'gyp_release_x86_no_snap_shared_minimal_symbols',
- 'v8_win64_dbg': 'gyp_debug_x64_minimal_symbols',
- 'v8_win64_rel_ng': 'gyp_release_x64_trybot',
+ 'gn_release_x86_no_snap_shared_minimal_symbols',
+ 'v8_win64_dbg': 'gn_debug_x64_minimal_symbols',
+ 'v8_win64_rel_ng': 'gn_release_x64_trybot',
'v8_mac_rel_ng': 'gn_release_x86_trybot',
'v8_mac_dbg': 'gn_debug_x86_trybot',
'v8_mac_gc_stress_dbg': 'gn_debug_x86_trybot',
'v8_mac64_rel': 'gn_release_x64_trybot',
'v8_mac64_dbg': 'gn_debug_x64_minimal_symbols',
- 'v8_mac64_asan_rel': 'gyp_release_x64_asan',
- 'v8_linux_arm_rel_ng': 'gyp_release_simulate_arm_trybot',
- 'v8_linux_arm_dbg': 'gyp_debug_simulate_arm',
- 'v8_linux_arm_armv8a_rel': 'gyp_release_simulate_arm_trybot',
- 'v8_linux_arm_armv8a_dbg': 'gyp_debug_simulate_arm',
+ 'v8_mac64_asan_rel': 'gn_release_x64_asan_no_lsan',
+ 'v8_linux_arm_rel_ng': 'gn_release_simulate_arm_trybot',
+ 'v8_linux_arm_dbg': 'gn_debug_simulate_arm',
+ 'v8_linux_arm_armv8a_rel': 'gn_release_simulate_arm_trybot',
+ 'v8_linux_arm_armv8a_dbg': 'gn_debug_simulate_arm',
'v8_linux_arm64_rel_ng': 'gn_release_simulate_arm64_trybot',
'v8_linux_arm64_dbg': 'gn_debug_simulate_arm64',
'v8_linux_arm64_gc_stress_dbg': 'gn_debug_simulate_arm64',
- 'v8_linux_mipsel_compile_rel': 'gyp_release_simulate_mipsel',
- 'v8_linux_mips64el_compile_rel': 'gyp_release_simulate_mips64el',
- 'v8_android_arm_compile_rel': 'gyp_release_android_arm',
+ 'v8_linux_mipsel_compile_rel': 'gn_release_simulate_mipsel',
+ 'v8_linux_mips64el_compile_rel': 'gn_release_simulate_mips64el',
+ 'v8_android_arm_compile_rel': 'gn_release_android_arm',
},
},
@@ -189,6 +196,20 @@
# gyp/gn, release/debug, arch type, other values alphabetically.
'configs': {
# Developer default configs.
+ 'default_debug_arm': [
+ 'gn', 'debug', 'simulate_arm', 'v8_enable_slow_dchecks',
+ 'v8_full_debug'],
+ 'default_optdebug_arm': [
+ 'gn', 'debug', 'simulate_arm', 'v8_enable_slow_dchecks'],
+ 'default_release_arm': [
+ 'gn', 'release', 'simulate_arm'],
+ 'default_debug_arm64': [
+ 'gn', 'debug', 'simulate_arm64', 'v8_enable_slow_dchecks',
+ 'v8_full_debug'],
+ 'default_optdebug_arm64': [
+ 'gn', 'debug', 'simulate_arm64', 'v8_enable_slow_dchecks'],
+ 'default_release_arm64': [
+ 'gn', 'release', 'simulate_arm64'],
'default_debug_x64': [
'gn', 'debug', 'x64', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_x64': [
@@ -204,12 +225,24 @@
# GN debug configs for simulators.
+ 'gn_debug_simulate_arm': [
+ 'gn', 'debug_bot', 'simulate_arm', 'swarming'],
+ 'gn_debug_simulate_arm_asan_edge': [
+ 'gn', 'debug_bot', 'simulate_arm', 'asan', 'edge'],
'gn_debug_simulate_arm64': [
'gn', 'debug_bot', 'simulate_arm64', 'swarming'],
+ 'gn_debug_simulate_arm64_asan_edge': [
+ 'gn', 'debug_bot', 'simulate_arm64', 'asan', 'lsan', 'edge'],
'gn_debug_simulate_arm64_no_snap': [
'gn', 'debug_bot', 'simulate_arm64', 'swarming', 'v8_snapshot_none'],
+ 'gn_debug_simulate_mipsel_asan_edge': [
+ 'gn', 'debug_bot', 'simulate_mipsel', 'asan', 'edge'],
# GN release configs for simulators.
+ 'gn_release_simulate_arm': [
+ 'gn', 'release_bot', 'simulate_arm', 'swarming'],
+ 'gn_release_simulate_arm_trybot': [
+ 'gn', 'release_trybot', 'simulate_arm', 'swarming'],
'gn_release_simulate_arm64': [
'gn', 'release_bot', 'simulate_arm64', 'swarming'],
'gn_release_simulate_arm64_msan': [
@@ -219,12 +252,44 @@
'swarming'],
'gn_release_simulate_arm64_trybot': [
'gn', 'release_trybot', 'simulate_arm64', 'swarming'],
+ 'gn_release_simulate_mipsel': [
+ 'gn', 'release_bot', 'simulate_mipsel', 'swarming'],
+ 'gn_release_simulate_mips64el': [
+ 'gn', 'release_bot', 'simulate_mips64el', 'swarming'],
+
+ # GN debug configs for arm.
+ 'gn_debug_arm': [
+ 'gn', 'debug_bot', 'arm', 'crosscompile', 'hard_float', 'swarming'],
+
+ # GN release configs for arm.
+ 'gn_release_arm': [
+ 'gn', 'release_bot', 'arm', 'crosscompile', 'hard_float', 'swarming'],
+ 'gn_release_android_arm': [
+ 'gn', 'release_bot', 'arm', 'android', 'crosscompile',
+ 'minimal_symbols', 'swarming'],
+ 'gn_release_android_arm64': [
+ 'gn', 'release_bot', 'arm64', 'android', 'crosscompile',
+ 'minimal_symbols', 'swarming'],
# GN release configs for x64.
'gn_release_x64': [
'gn', 'release_bot', 'x64', 'swarming'],
+ 'gn_release_x64_asan': [
+ 'gn', 'release_bot', 'x64', 'asan', 'lsan', 'swarming'],
+ 'gn_release_x64_asan_minimal_symbols': [
+ 'gn', 'release_bot', 'x64', 'asan', 'lsan', 'minimal_symbols',
+ 'swarming'],
+ 'gn_release_x64_asan_no_lsan': [
+ 'gn', 'release_bot', 'x64', 'asan', 'swarming'],
+ 'gn_release_x64_asan_symbolized_edge_verify_heap': [
+ 'gn', 'release_bot', 'x64', 'asan', 'edge', 'lsan', 'symbolized',
+ 'v8_verify_heap'],
+ 'gn_release_x64_clang': [
+ 'gn', 'release_bot', 'x64', 'clang', 'swarming'],
'gn_release_x64_internal': [
'gn', 'release_bot', 'x64', 'swarming', 'v8_snapshot_internal'],
+ 'gn_release_x64_minimal_symbols': [
+ 'gn', 'release_bot', 'x64', 'minimal_symbols', 'swarming'],
'gn_release_x64_trybot': [
'gn', 'release_trybot', 'x64', 'swarming'],
'gn_release_x64_tsan': [
@@ -235,6 +300,8 @@
# GN debug configs for x64.
'gn_debug_x64': [
'gn', 'debug_bot', 'x64', 'swarming'],
+ 'gn_debug_x64_asan_edge': [
+ 'gn', 'debug_bot', 'x64', 'asan', 'lsan', 'edge'],
'gn_debug_x64_custom': [
'gn', 'debug_bot', 'x64', 'swarming', 'v8_snapshot_custom'],
'gn_debug_x64_minimal_symbols': [
@@ -247,6 +314,10 @@
# GN debug configs for x86.
'gn_debug_x86': [
'gn', 'debug_bot', 'x86', 'swarming'],
+ 'gn_debug_x86_minimal_symbols': [
+ 'gn', 'debug_bot', 'x86', 'minimal_symbols', 'swarming'],
+ 'gn_debug_x86_no_i18n': [
+ 'gn', 'debug_bot', 'x86', 'v8_no_i18n'],
'gn_debug_x86_no_snap': [
'gn', 'debug_bot', 'x86', 'swarming', 'v8_snapshot_none'],
'gn_debug_x86_no_snap_trybot': [
@@ -267,8 +338,13 @@
'gn', 'release_trybot', 'x86', 'gcmole', 'swarming'],
'gn_release_x86_minimal_symbols': [
'gn', 'release_bot', 'x86', 'minimal_symbols', 'swarming'],
+ 'gn_release_x86_no_i18n_trybot': [
+ 'gn', 'release_trybot', 'x86', 'swarming', 'v8_no_i18n'],
'gn_release_x86_no_snap': [
'gn', 'release_bot', 'x86', 'swarming', 'v8_snapshot_none'],
+ 'gn_release_x86_no_snap_shared_minimal_symbols': [
+ 'gn', 'release', 'x86', 'goma', 'minimal_symbols', 'shared', 'swarming',
+ 'v8_snapshot_none'],
'gn_release_x86_no_snap_trybot': [
'gn', 'release_trybot', 'x86', 'swarming', 'v8_snapshot_none'],
'gn_release_x86_shared_verify_heap': [
@@ -276,65 +352,25 @@
'gn_release_x86_trybot': [
'gn', 'release_trybot', 'x86', 'swarming'],
- # Gyp debug configs for arm.
- 'gyp_debug_arm': [
- 'gyp', 'debug_bot', 'arm', 'crosscompile', 'hard_float', 'swarming'],
-
# Gyp debug configs for simulators.
- 'gyp_debug_simulate_arm': [
- 'gyp', 'debug_bot', 'simulate_arm', 'swarming'],
- 'gyp_debug_simulate_arm_asan_edge': [
- 'gyp', 'debug_bot', 'simulate_arm', 'asan', 'edge'],
- 'gyp_debug_simulate_arm64_asan_edge': [
- 'gyp', 'debug_bot', 'simulate_arm64', 'asan', 'lsan', 'edge'],
- 'gyp_debug_simulate_mipsel_asan_edge': [
- 'gyp', 'debug_bot', 'simulate_mipsel', 'asan', 'edge'],
'gyp_debug_simulate_x87_no_snap': [
'gyp', 'debug_bot', 'simulate_x87', 'swarming', 'v8_snapshot_none'],
- # Gyp debug configs for x64.
- 'gyp_debug_x64_asan_edge': [
- 'gyp', 'debug_bot', 'x64', 'asan', 'lsan', 'edge'],
- 'gyp_debug_x64_minimal_symbols': [
- 'gyp', 'debug_bot', 'x64', 'minimal_symbols', 'swarming'],
-
# Gyp debug configs for x86.
'gyp_debug_x86': [
'gyp', 'debug_bot', 'x86', 'swarming'],
- 'gyp_debug_x86_minimal_symbols': [
- 'gyp', 'debug_bot', 'x86', 'minimal_symbols', 'swarming'],
- 'gyp_debug_x86_trybot': [
- 'gyp', 'debug_trybot', 'x86', 'swarming'],
- 'gyp_debug_x86_no_i18n': [
- 'gyp', 'debug_bot', 'x86', 'v8_no_i18n'],
'gyp_debug_x86_vtunejit': [
'gyp', 'debug_bot', 'x86', 'v8_enable_vtunejit'],
'gyp_full_debug_x86': [
'gyp', 'debug', 'x86', 'goma', 'static', 'v8_enable_slow_dchecks',
'v8_full_debug'],
- # Gyp release configs for arm.
- 'gyp_release_arm': [
- 'gyp', 'release_bot', 'arm', 'crosscompile', 'hard_float', 'swarming'],
- 'gyp_release_android_arm': [
- 'gyp', 'release_bot', 'arm', 'android', 'crosscompile', 'swarming'],
- 'gyp_release_android_arm64': [
- 'gyp', 'release_bot', 'arm64', 'android', 'crosscompile', 'swarming'],
-
# Gyp release configs for mips.
'gyp_release_mips_no_snap_no_i18n': [
'gyp', 'release', 'mips', 'crosscompile', 'static', 'v8_no_i18n',
'v8_snapshot_none'],
# Gyp release configs for simulators.
- 'gyp_release_simulate_arm': [
- 'gyp', 'release_bot', 'simulate_arm', 'swarming'],
- 'gyp_release_simulate_arm_trybot': [
- 'gyp', 'release_trybot', 'simulate_arm', 'swarming'],
- 'gyp_release_simulate_mipsel': [
- 'gyp', 'release_bot', 'simulate_mipsel', 'swarming'],
- 'gyp_release_simulate_mips64el': [
- 'gyp', 'release_bot', 'simulate_mips64el', 'swarming'],
'gyp_release_simulate_ppc': [
'gyp', 'release_bot', 'simulate_ppc', 'swarming'],
'gyp_release_simulate_ppc64': [
@@ -347,44 +383,21 @@
# Gyp release configs for x64.
'gyp_release_x64': [
'gyp', 'release_bot', 'x64', 'swarming'],
- 'gyp_release_x64_asan': [
- 'gyp', 'release_bot', 'x64', 'asan', 'lsan', 'swarming'],
- 'gyp_release_x64_asan_minimal_symbols': [
- 'gyp', 'release_bot', 'x64', 'asan', 'lsan', 'minimal_symbols',
- 'swarming'],
'gyp_release_x64_asan_minimal_symbols_coverage': [
'gyp', 'release_bot', 'x64', 'asan', 'bb', 'coverage', 'lsan',
'minimal_symbols', 'swarming'],
- 'gyp_release_x64_asan_symbolized_edge_verify_heap': [
- 'gyp', 'release_bot', 'x64', 'asan', 'edge', 'lsan', 'symbolized',
- 'v8_verify_heap'],
'gyp_release_x64_cfi_symbolized': [
'gyp', 'release_bot', 'x64', 'cfi', 'swarming', 'symbolized'],
- 'gyp_release_x64_clang': [
- 'gyp', 'release_bot', 'x64', 'clang', 'swarming'],
'gyp_release_x64_gcc_coverage': [
'gyp', 'release_bot', 'x64', 'coverage', 'gcc'],
- 'gyp_release_x64_minimal_symbols': [
- 'gyp', 'release_bot', 'x64', 'minimal_symbols', 'swarming'],
- 'gyp_release_x64_trybot': [
- 'gyp', 'release_trybot', 'x64', 'swarming'],
# Gyp release configs for x86.
'gyp_release_x86_disassembler': [
'gyp', 'release_bot', 'x86', 'v8_enable_disassembler'],
'gyp_release_x86_interpreted_regexp': [
'gyp', 'release_bot', 'x86', 'v8_interpreted_regexp'],
- 'gyp_release_x86_minimal_symbols': [
- 'gyp', 'release_bot', 'x86', 'minimal_symbols', 'swarming'],
- 'gyp_release_x86_no_i18n_trybot': [
- 'gyp', 'release_trybot', 'x86', 'swarming', 'v8_no_i18n'],
- 'gyp_release_x86_no_snap_shared_minimal_symbols': [
- 'gyp', 'release', 'x86', 'goma', 'minimal_symbols', 'shared', 'swarming',
- 'v8_snapshot_none'],
'gyp_release_x86_predictable': [
'gyp', 'release_bot', 'x86', 'v8_enable_verify_predictable'],
- 'gyp_release_x86_trybot': [
- 'gyp', 'release_trybot', 'x86', 'swarming'],
},
'mixins': {
@@ -444,7 +457,7 @@
'debug_bot': {
'mixins': [
- 'debug', 'static', 'goma', 'v8_enable_slow_dchecks',
+ 'debug', 'shared', 'goma', 'v8_enable_slow_dchecks',
'v8_optimized_debug'],
},
@@ -533,7 +546,8 @@
},
'simulate_mipsel': {
- 'gn_args': 'target_cpu="x86" v8_target_cpu="mipsel"',
+ 'gn_args':
+ 'target_cpu="x86" v8_target_cpu="mipsel" mips_arch_variant="r2"',
'gyp_defines': 'target_arch=ia32 v8_target_arch=mipsel',
},
@@ -577,9 +591,9 @@
'gyp_defines': 'test_isolation_mode=prepare',
},
- # TODO(machenbach): Remove the symbolized config after the bots are gone.
+ # TODO(machenbach): Remove the symbolized config after the bots are gone.
'symbolized': {
- 'gn_args': 'symbolized=true',
+ 'gn_args': 'v8_no_inline=true',
'gyp_defines':
'release_extra_cflags="-fno-inline-functions -fno-inline"',
},
@@ -595,8 +609,8 @@
},
'v8_no_i18n': {
- 'gn_args': 'v8_enable_i18n_support=false',
- 'gyp_defines': 'v8_enable_i18n_support=0',
+ 'gn_args': 'v8_enable_i18n_support=false icu_use_data_file=false',
+ 'gyp_defines': 'v8_enable_i18n_support=0 icu_use_data_file_flag=0',
},
'v8_enable_disassembler': {
diff --git a/deps/v8/src/address-map.cc b/deps/v8/src/address-map.cc
index 61292bf562..3122b33693 100644
--- a/deps/v8/src/address-map.cc
+++ b/deps/v8/src/address-map.cc
@@ -13,7 +13,7 @@ namespace internal {
RootIndexMap::RootIndexMap(Isolate* isolate) {
map_ = isolate->root_index_map();
if (map_ != NULL) return;
- map_ = new base::HashMap(base::HashMap::PointersMatch);
+ map_ = new base::HashMap();
for (uint32_t i = 0; i < Heap::kStrongRootListLength; i++) {
Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i);
Object* root = isolate->heap()->root(root_index);
diff --git a/deps/v8/src/address-map.h b/deps/v8/src/address-map.h
index 0ce93d24e8..95e9cb064b 100644
--- a/deps/v8/src/address-map.h
+++ b/deps/v8/src/address-map.h
@@ -189,9 +189,7 @@ class SerializerReference {
class SerializerReferenceMap : public AddressMapBase {
public:
SerializerReferenceMap()
- : no_allocation_(),
- map_(base::HashMap::PointersMatch),
- attached_reference_index_(0) {}
+ : no_allocation_(), map_(), attached_reference_index_(0) {}
SerializerReference Lookup(HeapObject* obj) {
base::HashMap::Entry* entry = LookupEntry(&map_, obj, false);
diff --git a/deps/v8/src/allocation.h b/deps/v8/src/allocation.h
index 8581cc9e9d..a92b71f08e 100644
--- a/deps/v8/src/allocation.h
+++ b/deps/v8/src/allocation.h
@@ -13,10 +13,10 @@ namespace internal {
// Called when allocation routines fail to allocate.
// This function should not return, but should terminate the current
// processing.
-void FatalProcessOutOfMemory(const char* message);
+V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(const char* message);
// Superclass for classes managed with new & delete.
-class Malloced {
+class V8_EXPORT_PRIVATE Malloced {
public:
void* operator new(size_t size) { return New(size); }
void operator delete(void* p) { Delete(p); }
@@ -72,7 +72,7 @@ void DeleteArray(T* array) {
// The normal strdup functions use malloc. These versions of StrDup
// and StrNDup uses new and calls the FatalProcessOutOfMemory handler
// if allocation fails.
-char* StrDup(const char* str);
+V8_EXPORT_PRIVATE char* StrDup(const char* str);
char* StrNDup(const char* str, int n);
diff --git a/deps/v8/src/api-arguments-inl.h b/deps/v8/src/api-arguments-inl.h
index eefdf35adc..bf72fc4e6f 100644
--- a/deps/v8/src/api-arguments-inl.h
+++ b/deps/v8/src/api-arguments-inl.h
@@ -20,8 +20,6 @@ namespace internal {
Handle<Name> name) { \
Isolate* isolate = this->isolate(); \
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function); \
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED( \
- isolate, &tracing::TraceEventStatsTable::Function); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
PropertyCallbackInfo<ApiReturn> info(begin()); \
@@ -46,8 +44,6 @@ FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(WRITE_CALL_1_NAME)
uint32_t index) { \
Isolate* isolate = this->isolate(); \
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function); \
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED( \
- isolate, &tracing::TraceEventStatsTable::Function); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
PropertyCallbackInfo<ApiReturn> info(begin()); \
@@ -68,9 +64,6 @@ Handle<Object> PropertyCallbackArguments::Call(
Isolate* isolate = this->isolate();
RuntimeCallTimerScope timer(
isolate, &RuntimeCallStats::GenericNamedPropertySetterCallback);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate,
- &tracing::TraceEventStatsTable::GenericNamedPropertySetterCallback);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
PropertyCallbackInfo<v8::Value> info(begin());
@@ -80,14 +73,27 @@ Handle<Object> PropertyCallbackArguments::Call(
return GetReturnValue<Object>(isolate);
}
+Handle<Object> PropertyCallbackArguments::Call(
+ GenericNamedPropertyDefinerCallback f, Handle<Name> name,
+ const v8::PropertyDescriptor& desc) {
+ Isolate* isolate = this->isolate();
+ RuntimeCallTimerScope timer(
+ isolate, &RuntimeCallStats::GenericNamedPropertyDefinerCallback);
+ VMState<EXTERNAL> state(isolate);
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
+ PropertyCallbackInfo<v8::Value> info(begin());
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-define", holder(), *name));
+ f(v8::Utils::ToLocal(name), desc, info);
+ return GetReturnValue<Object>(isolate);
+}
+
Handle<Object> PropertyCallbackArguments::Call(IndexedPropertySetterCallback f,
uint32_t index,
Handle<Object> value) {
Isolate* isolate = this->isolate();
RuntimeCallTimerScope timer(isolate,
&RuntimeCallStats::IndexedPropertySetterCallback);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate, &tracing::TraceEventStatsTable::IndexedPropertySetterCallback);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
PropertyCallbackInfo<v8::Value> info(begin());
@@ -97,13 +103,26 @@ Handle<Object> PropertyCallbackArguments::Call(IndexedPropertySetterCallback f,
return GetReturnValue<Object>(isolate);
}
+Handle<Object> PropertyCallbackArguments::Call(
+ IndexedPropertyDefinerCallback f, uint32_t index,
+ const v8::PropertyDescriptor& desc) {
+ Isolate* isolate = this->isolate();
+ RuntimeCallTimerScope timer(
+ isolate, &RuntimeCallStats::IndexedPropertyDefinerCallback);
+ VMState<EXTERNAL> state(isolate);
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
+ PropertyCallbackInfo<v8::Value> info(begin());
+ LOG(isolate,
+ ApiIndexedPropertyAccess("interceptor-indexed-define", holder(), index));
+ f(index, desc, info);
+ return GetReturnValue<Object>(isolate);
+}
+
void PropertyCallbackArguments::Call(AccessorNameSetterCallback f,
Handle<Name> name, Handle<Object> value) {
Isolate* isolate = this->isolate();
RuntimeCallTimerScope timer(isolate,
&RuntimeCallStats::AccessorNameSetterCallback);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate, &tracing::TraceEventStatsTable::AccessorNameSetterCallback);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
PropertyCallbackInfo<void> info(begin());
diff --git a/deps/v8/src/api-arguments.cc b/deps/v8/src/api-arguments.cc
index 6e347c7b62..f8d6c8fcc3 100644
--- a/deps/v8/src/api-arguments.cc
+++ b/deps/v8/src/api-arguments.cc
@@ -13,8 +13,6 @@ namespace internal {
Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
Isolate* isolate = this->isolate();
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::FunctionCallback);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate, &internal::tracing::TraceEventStatsTable::FunctionCallback);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
FunctionCallbackInfo<v8::Value> info(begin(), argv_, argc_);
@@ -26,8 +24,6 @@ Handle<JSObject> PropertyCallbackArguments::Call(
IndexedPropertyEnumeratorCallback f) {
Isolate* isolate = this->isolate();
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::PropertyCallback);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate, &internal::tracing::TraceEventStatsTable::PropertyCallback);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
PropertyCallbackInfo<v8::Array> info(begin());
diff --git a/deps/v8/src/api-arguments.h b/deps/v8/src/api-arguments.h
index 0dfe61824a..9e01f3ae7d 100644
--- a/deps/v8/src/api-arguments.h
+++ b/deps/v8/src/api-arguments.h
@@ -119,9 +119,16 @@ class PropertyCallbackArguments
inline Handle<Object> Call(GenericNamedPropertySetterCallback f,
Handle<Name> name, Handle<Object> value);
+ inline Handle<Object> Call(GenericNamedPropertyDefinerCallback f,
+ Handle<Name> name,
+ const v8::PropertyDescriptor& desc);
+
inline Handle<Object> Call(IndexedPropertySetterCallback f, uint32_t index,
Handle<Object> value);
+ inline Handle<Object> Call(IndexedPropertyDefinerCallback f, uint32_t index,
+ const v8::PropertyDescriptor& desc);
+
inline void Call(AccessorNameSetterCallback f, Handle<Name> name,
Handle<Object> value);
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index 0f3c3b69ed..ea2cce5c88 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -17,42 +17,39 @@ namespace {
class InvokeScope {
public:
- explicit InvokeScope(Isolate* isolate) : save_context_(isolate) {}
+ explicit InvokeScope(Isolate* isolate)
+ : isolate_(isolate), save_context_(isolate) {}
~InvokeScope() {
- Isolate* isolate = save_context_.isolate();
- bool has_exception = isolate->has_pending_exception();
+ bool has_exception = isolate_->has_pending_exception();
if (has_exception) {
- isolate->ReportPendingMessages();
+ isolate_->ReportPendingMessages();
} else {
- isolate->clear_pending_message();
+ isolate_->clear_pending_message();
}
}
private:
+ Isolate* isolate_;
SaveContext save_context_;
};
-enum class CacheCheck { kCheck, kSkip };
-
-MaybeHandle<JSObject> InstantiateObject(
- Isolate* isolate, Handle<ObjectTemplateInfo> data,
- Handle<JSReceiver> new_target, CacheCheck cache_check = CacheCheck::kCheck,
- bool is_hidden_prototype = false);
+MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
+ Handle<ObjectTemplateInfo> data,
+ Handle<JSReceiver> new_target,
+ bool is_hidden_prototype);
-MaybeHandle<JSFunction> InstantiateFunction(
- Isolate* isolate, Handle<FunctionTemplateInfo> data,
- CacheCheck cache_check = CacheCheck::kCheck,
- Handle<Name> name = Handle<Name>());
+MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
+ Handle<FunctionTemplateInfo> data,
+ Handle<Name> name = Handle<Name>());
MaybeHandle<Object> Instantiate(Isolate* isolate, Handle<Object> data,
Handle<Name> name = Handle<Name>()) {
if (data->IsFunctionTemplateInfo()) {
return InstantiateFunction(isolate,
- Handle<FunctionTemplateInfo>::cast(data),
- CacheCheck::kCheck, name);
+ Handle<FunctionTemplateInfo>::cast(data), name);
} else if (data->IsObjectTemplateInfo()) {
return InstantiateObject(isolate, Handle<ObjectTemplateInfo>::cast(data),
- Handle<JSReceiver>());
+ Handle<JSReceiver>(), false);
} else {
return data;
}
@@ -199,15 +196,14 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
Handle<FixedArray> array =
isolate->factory()->NewFixedArray(max_number_of_properties);
- info = *data;
- while (info != nullptr) {
+ for (Handle<TemplateInfoT> temp(*data); *temp != nullptr;
+ temp = handle(temp->GetParent(isolate), isolate)) {
// Accumulate accessors.
- Object* maybe_properties = info->property_accessors();
+ Object* maybe_properties = temp->property_accessors();
if (!maybe_properties->IsUndefined(isolate)) {
valid_descriptors = AccessorInfo::AppendUnique(
handle(maybe_properties, isolate), array, valid_descriptors);
}
- info = info->GetParent(isolate);
}
// Install accumulated accessors.
@@ -339,17 +335,9 @@ bool IsSimpleInstantiation(Isolate* isolate, ObjectTemplateInfo* info,
return fun->context()->native_context() == isolate->raw_native_context();
}
-MaybeHandle<JSObject> InstantiateObjectWithInvokeScope(
- Isolate* isolate, Handle<ObjectTemplateInfo> info,
- Handle<JSReceiver> new_target) {
- InvokeScope invoke_scope(isolate);
- return InstantiateObject(isolate, info, new_target, CacheCheck::kSkip);
-}
-
MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
Handle<ObjectTemplateInfo> info,
Handle<JSReceiver> new_target,
- CacheCheck cache_check,
bool is_hidden_prototype) {
Handle<JSFunction> constructor;
int serial_number = Smi::cast(info->serial_number())->value();
@@ -363,7 +351,7 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
}
// Fast path.
Handle<JSObject> result;
- if (serial_number && cache_check == CacheCheck::kCheck) {
+ if (serial_number) {
if (ProbeInstantiationsCache(isolate, serial_number).ToHandle(&result)) {
return isolate->factory()->CopyJSObject(result);
}
@@ -397,6 +385,7 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
if (info->immutable_proto()) {
JSObject::SetImmutableProto(object);
}
+ // TODO(dcarney): is this necessary?
JSObject::MigrateSlowToFast(result, 0, "ApiNatives::InstantiateObject");
if (serial_number) {
@@ -406,18 +395,12 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
return result;
}
-MaybeHandle<JSFunction> InstantiateFunctionWithInvokeScope(
- Isolate* isolate, Handle<FunctionTemplateInfo> info) {
- InvokeScope invoke_scope(isolate);
- return InstantiateFunction(isolate, info, CacheCheck::kSkip);
-}
MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
Handle<FunctionTemplateInfo> data,
- CacheCheck cache_check,
Handle<Name> name) {
int serial_number = Smi::cast(data->serial_number())->value();
- if (serial_number && cache_check == CacheCheck::kCheck) {
+ if (serial_number) {
Handle<JSObject> result;
if (ProbeInstantiationsCache(isolate, serial_number).ToHandle(&result)) {
return Handle<JSFunction>::cast(result);
@@ -434,8 +417,7 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
InstantiateObject(
isolate,
handle(ObjectTemplateInfo::cast(prototype_templ), isolate),
- Handle<JSReceiver>(), CacheCheck::kCheck,
- data->hidden_prototype()),
+ Handle<JSReceiver>(), data->hidden_prototype()),
JSFunction);
}
Object* parent = data->parent_template();
@@ -505,31 +487,17 @@ void AddPropertyToPropertyList(Isolate* isolate, Handle<TemplateInfo> templ,
} // namespace
MaybeHandle<JSFunction> ApiNatives::InstantiateFunction(
- Handle<FunctionTemplateInfo> info) {
- Isolate* isolate = info->GetIsolate();
- int serial_number = Smi::cast(info->serial_number())->value();
- if (serial_number) {
- Handle<JSObject> result;
- if (ProbeInstantiationsCache(isolate, serial_number).ToHandle(&result)) {
- return Handle<JSFunction>::cast(result);
- }
- }
- return InstantiateFunctionWithInvokeScope(isolate, info);
+ Handle<FunctionTemplateInfo> data) {
+ Isolate* isolate = data->GetIsolate();
+ InvokeScope invoke_scope(isolate);
+ return ::v8::internal::InstantiateFunction(isolate, data);
}
MaybeHandle<JSObject> ApiNatives::InstantiateObject(
- Handle<ObjectTemplateInfo> info, Handle<JSReceiver> new_target) {
- Isolate* isolate = info->GetIsolate();
- int serial_number = Smi::cast(info->serial_number())->value();
- if (serial_number && !new_target.is_null() &&
- IsSimpleInstantiation(isolate, *info, *new_target)) {
- // Fast path.
- Handle<JSObject> result;
- if (ProbeInstantiationsCache(isolate, serial_number).ToHandle(&result)) {
- return isolate->factory()->CopyJSObject(result);
- }
- }
- return InstantiateObjectWithInvokeScope(isolate, info, new_target);
+ Handle<ObjectTemplateInfo> data, Handle<JSReceiver> new_target) {
+ Isolate* isolate = data->GetIsolate();
+ InvokeScope invoke_scope(isolate);
+ return ::v8::internal::InstantiateObject(isolate, data, new_target, false);
}
MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 6858a325c4..44933b965b 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -24,6 +24,7 @@
#include "src/base/functional.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
+#include "src/base/safe_conversions.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
#include "src/char-predicates-inl.h"
@@ -68,6 +69,7 @@
#include "src/unicode-inl.h"
#include "src/v8.h"
#include "src/v8threads.h"
+#include "src/value-serializer.h"
#include "src/version.h"
#include "src/vm-state-inl.h"
#include "src/wasm/wasm-module.h"
@@ -77,9 +79,6 @@ namespace v8 {
#define LOG_API(isolate, class_name, function_name) \
i::RuntimeCallTimerScope _runtime_timer( \
isolate, &i::RuntimeCallStats::API_##class_name##_##function_name); \
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED( \
- isolate, &internal::tracing::TraceEventStatsTable:: \
- API_##class_name##_##function_name); \
LOG(isolate, ApiEntryCall("v8::" #class_name "::" #function_name))
#define ENTER_V8(isolate) i::VMState<v8::OTHER> __state__((isolate))
@@ -105,6 +104,16 @@ namespace v8 {
PREPARE_FOR_EXECUTION_GENERIC(isolate, context, class_name, function_name, \
bailout_value, HandleScopeClass, do_callback);
+#define PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE( \
+ category, name, context, class_name, function_name, bailout_value, \
+ HandleScopeClass, do_callback) \
+ auto isolate = context.IsEmpty() \
+ ? i::Isolate::Current() \
+ : reinterpret_cast<i::Isolate*>(context->GetIsolate()); \
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate, category, name); \
+ PREPARE_FOR_EXECUTION_GENERIC(isolate, context, class_name, function_name, \
+ bailout_value, HandleScopeClass, do_callback);
+
#define PREPARE_FOR_EXECUTION_WITH_ISOLATE(isolate, class_name, function_name, \
T) \
PREPARE_FOR_EXECUTION_GENERIC(isolate, Local<Context>(), class_name, \
@@ -126,6 +135,10 @@ namespace v8 {
PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \
Nothing<T>(), i::HandleScope, false)
+#define PREPARE_FOR_EXECUTION_BOOL(context, class_name, function_name) \
+ PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \
+ false, i::HandleScope, false)
+
#define EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, value) \
do { \
if (has_pending_exception) { \
@@ -142,6 +155,8 @@ namespace v8 {
#define RETURN_ON_FAILED_EXECUTION_PRIMITIVE(T) \
EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, Nothing<T>())
+#define RETURN_ON_FAILED_EXECUTION_BOOL() \
+ EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, false)
#define RETURN_TO_LOCAL_UNCHECKED(maybe_local, T) \
return maybe_local.FromMaybe(Local<T>());
@@ -513,7 +528,8 @@ StartupData SnapshotCreator::CreateBlob(
// If we don't do this then we end up with a stray root pointing at the
// context even after we have disposed of the context.
- isolate->heap()->CollectAllAvailableGarbage("mksnapshot");
+ isolate->heap()->CollectAllAvailableGarbage(
+ i::GarbageCollectionReason::kSnapshotCreator);
isolate->heap()->CompactWeakFixedArrays();
i::DisallowHeapAllocation no_gc_from_here_on;
@@ -770,11 +786,6 @@ i::Object** V8::CopyPersistent(i::Object** obj) {
return result.location();
}
-void V8::RegisterExternallyReferencedObject(i::Object** object,
- i::Isolate* isolate) {
- isolate->heap()->RegisterExternallyReferencedObject(object);
-}
-
void V8::MakeWeak(i::Object** location, void* parameter,
int internal_field_index1, int internal_field_index2,
WeakCallbackInfo<void>::Callback weak_callback) {
@@ -1503,12 +1514,17 @@ void ObjectTemplate::SetAccessor(v8::Local<Name> name,
signature, i::FLAG_disable_old_api_accessors);
}
-template <typename Getter, typename Setter, typename Query, typename Deleter,
- typename Enumerator>
+template <typename Getter, typename Setter, typename Query, typename Descriptor,
+ typename Deleter, typename Enumerator, typename Definer>
static i::Handle<i::InterceptorInfo> CreateInterceptorInfo(
i::Isolate* isolate, Getter getter, Setter setter, Query query,
- Deleter remover, Enumerator enumerator, Local<Value> data,
- PropertyHandlerFlags flags) {
+ Descriptor descriptor, Deleter remover, Enumerator enumerator,
+ Definer definer, Local<Value> data, PropertyHandlerFlags flags) {
+ DCHECK(query == nullptr ||
+ descriptor == nullptr); // Either intercept attributes or descriptor.
+ DCHECK(query == nullptr ||
+ definer ==
+ nullptr); // Only use descriptor callback with definer callback.
auto obj = i::Handle<i::InterceptorInfo>::cast(
isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE));
obj->set_flags(0);
@@ -1516,8 +1532,10 @@ static i::Handle<i::InterceptorInfo> CreateInterceptorInfo(
if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
+ if (descriptor != 0) SET_FIELD_WRAPPED(obj, set_descriptor, descriptor);
if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
+ if (definer != 0) SET_FIELD_WRAPPED(obj, set_definer, definer);
obj->set_can_intercept_symbols(
!(static_cast<int>(flags) &
static_cast<int>(PropertyHandlerFlags::kOnlyInterceptStrings)));
@@ -1533,40 +1551,37 @@ static i::Handle<i::InterceptorInfo> CreateInterceptorInfo(
return obj;
}
-template <typename Getter, typename Setter, typename Query, typename Deleter,
- typename Enumerator>
-static void ObjectTemplateSetNamedPropertyHandler(ObjectTemplate* templ,
- Getter getter, Setter setter,
- Query query, Deleter remover,
- Enumerator enumerator,
- Local<Value> data,
- PropertyHandlerFlags flags) {
+template <typename Getter, typename Setter, typename Query, typename Descriptor,
+ typename Deleter, typename Enumerator, typename Definer>
+static void ObjectTemplateSetNamedPropertyHandler(
+ ObjectTemplate* templ, Getter getter, Setter setter, Query query,
+ Descriptor descriptor, Deleter remover, Enumerator enumerator,
+ Definer definer, Local<Value> data, PropertyHandlerFlags flags) {
i::Isolate* isolate = Utils::OpenHandle(templ)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
auto cons = EnsureConstructor(isolate, templ);
EnsureNotInstantiated(cons, "ObjectTemplateSetNamedPropertyHandler");
- auto obj = CreateInterceptorInfo(isolate, getter, setter, query, remover,
- enumerator, data, flags);
+ auto obj = CreateInterceptorInfo(isolate, getter, setter, query, descriptor,
+ remover, enumerator, definer, data, flags);
cons->set_named_property_handler(*obj);
}
-
void ObjectTemplate::SetNamedPropertyHandler(
NamedPropertyGetterCallback getter, NamedPropertySetterCallback setter,
NamedPropertyQueryCallback query, NamedPropertyDeleterCallback remover,
NamedPropertyEnumeratorCallback enumerator, Local<Value> data) {
ObjectTemplateSetNamedPropertyHandler(
- this, getter, setter, query, remover, enumerator, data,
+ this, getter, setter, query, nullptr, remover, enumerator, nullptr, data,
PropertyHandlerFlags::kOnlyInterceptStrings);
}
-
void ObjectTemplate::SetHandler(
const NamedPropertyHandlerConfiguration& config) {
ObjectTemplateSetNamedPropertyHandler(
- this, config.getter, config.setter, config.query, config.deleter,
- config.enumerator, config.data, config.flags);
+ this, config.getter, config.setter, config.query, config.descriptor,
+ config.deleter, config.enumerator, config.definer, config.data,
+ config.flags);
}
@@ -1626,13 +1641,14 @@ void ObjectTemplate::SetAccessCheckCallbackAndHandler(
SET_FIELD_WRAPPED(info, set_callback, callback);
auto named_interceptor = CreateInterceptorInfo(
isolate, named_handler.getter, named_handler.setter, named_handler.query,
- named_handler.deleter, named_handler.enumerator, named_handler.data,
- named_handler.flags);
+ named_handler.descriptor, named_handler.deleter, named_handler.enumerator,
+ named_handler.definer, named_handler.data, named_handler.flags);
info->set_named_interceptor(*named_interceptor);
auto indexed_interceptor = CreateInterceptorInfo(
isolate, indexed_handler.getter, indexed_handler.setter,
- indexed_handler.query, indexed_handler.deleter,
- indexed_handler.enumerator, indexed_handler.data, indexed_handler.flags);
+ indexed_handler.query, indexed_handler.descriptor,
+ indexed_handler.deleter, indexed_handler.enumerator,
+ indexed_handler.definer, indexed_handler.data, indexed_handler.flags);
info->set_indexed_interceptor(*indexed_interceptor);
if (data.IsEmpty()) {
@@ -1651,9 +1667,10 @@ void ObjectTemplate::SetHandler(
i::HandleScope scope(isolate);
auto cons = EnsureConstructor(isolate, this);
EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetHandler");
- auto obj = CreateInterceptorInfo(
- isolate, config.getter, config.setter, config.query, config.deleter,
- config.enumerator, config.data, config.flags);
+ auto obj = CreateInterceptorInfo(isolate, config.getter, config.setter,
+ config.query, config.descriptor,
+ config.deleter, config.enumerator,
+ config.definer, config.data, config.flags);
cons->set_indexed_property_handler(*obj);
}
@@ -1834,17 +1851,19 @@ Local<Value> UnboundScript::GetSourceMappingURL() {
MaybeLocal<Value> Script::Run(Local<Context> context) {
- PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, Script, Run, Value)
+ PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE(
+ "v8", "V8.Execute", context, Script, Run, MaybeLocal<Value>(),
+ InternalEscapableScope, true);
i::HistogramTimerScope execute_timer(isolate->counters()->execute(), true);
i::AggregatingHistogramTimerScope timer(isolate->counters()->compile_lazy());
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
- TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
auto fun = i::Handle<i::JSFunction>::cast(Utils::OpenHandle(this));
+
i::Handle<i::Object> receiver = isolate->global_proxy();
Local<Value> result;
- has_pending_exception =
- !ToLocal<Value>(i::Execution::Call(isolate, fun, receiver, 0, NULL),
- &result);
+ has_pending_exception = !ToLocal<Value>(
+ i::Execution::Call(isolate, fun, receiver, 0, nullptr), &result);
+
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(result);
}
@@ -1866,6 +1885,58 @@ Local<UnboundScript> Script::GetUnboundScript() {
i::Handle<i::SharedFunctionInfo>(i::JSFunction::cast(*obj)->shared()));
}
+int Module::GetModuleRequestsLength() const {
+ i::Handle<i::Module> self = Utils::OpenHandle(this);
+ return self->info()->module_requests()->length();
+}
+
+Local<String> Module::GetModuleRequest(int i) const {
+ CHECK_GE(i, 0);
+ i::Handle<i::Module> self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
+ i::Handle<i::FixedArray> module_requests(self->info()->module_requests(),
+ isolate);
+ CHECK_LT(i, module_requests->length());
+ return ToApiHandle<String>(i::handle(module_requests->get(i), isolate));
+}
+
+void Module::SetEmbedderData(Local<Value> data) {
+ Utils::OpenHandle(this)->set_embedder_data(*Utils::OpenHandle(*data));
+}
+
+Local<Value> Module::GetEmbedderData() const {
+ auto self = Utils::OpenHandle(this);
+ return ToApiHandle<Value>(
+ i::handle(self->embedder_data(), self->GetIsolate()));
+}
+
+bool Module::Instantiate(Local<Context> context,
+ Module::ResolveCallback callback,
+ Local<Value> callback_data) {
+ PREPARE_FOR_EXECUTION_BOOL(context, Module, Instantiate);
+ has_pending_exception = !i::Module::Instantiate(
+ Utils::OpenHandle(this), context, callback, callback_data);
+ RETURN_ON_FAILED_EXECUTION_BOOL();
+ return true;
+}
+
+MaybeLocal<Value> Module::Evaluate(Local<Context> context) {
+ PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE(
+ "v8", "V8.Execute", context, Module, Evaluate, MaybeLocal<Value>(),
+ InternalEscapableScope, true);
+ i::HistogramTimerScope execute_timer(isolate->counters()->execute(), true);
+ i::AggregatingHistogramTimerScope timer(isolate->counters()->compile_lazy());
+ i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
+
+ i::Handle<i::Module> self = Utils::OpenHandle(this);
+ // It's an API error to call Evaluate before Instantiate.
+ CHECK(self->code()->IsJSFunction());
+
+ Local<Value> result;
+ has_pending_exception = !ToLocal(i::Module::Evaluate(self), &result);
+ RETURN_ON_FAILED_EXECUTION(Value);
+ RETURN_ESCAPED(result);
+}
MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
Isolate* v8_isolate, Source* source, CompileOptions options,
@@ -1976,16 +2047,16 @@ Local<Script> ScriptCompiler::Compile(
RETURN_TO_LOCAL_UNCHECKED(Compile(context, source, options), Script);
}
+MaybeLocal<Module> ScriptCompiler::CompileModule(Isolate* isolate,
+ Source* source) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-MaybeLocal<Script> ScriptCompiler::CompileModule(Local<Context> context,
- Source* source,
- CompileOptions options) {
- auto isolate = context->GetIsolate();
- auto maybe = CompileUnboundInternal(isolate, source, options, true);
- Local<UnboundScript> generic;
- if (!maybe.ToLocal(&generic)) return MaybeLocal<Script>();
- v8::Context::Scope scope(context);
- return generic->BindToCurrentContext();
+ auto maybe = CompileUnboundInternal(isolate, source, kNoCompileOptions, true);
+ Local<UnboundScript> unbound;
+ if (!maybe.ToLocal(&unbound)) return MaybeLocal<Module>();
+
+ i::Handle<i::SharedFunctionInfo> shared = Utils::OpenHandle(*unbound);
+ return ToApiHandle<Module>(i_isolate->factory()->NewModule(shared));
}
@@ -2084,7 +2155,13 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
Utils::OpenHandle(*context_extensions[i]);
if (!extension->IsJSObject()) return Local<Function>();
i::Handle<i::JSFunction> closure(context->closure(), isolate);
- context = factory->NewWithContext(closure, context, extension);
+ context = factory->NewWithContext(
+ closure, context,
+ i::ScopeInfo::CreateForWithScope(
+ isolate, context->IsNativeContext()
+ ? i::Handle<i::ScopeInfo>::null()
+ : i::Handle<i::ScopeInfo>(context->scope_info())),
+ extension);
}
i::Handle<i::Object> name_obj;
@@ -2138,6 +2215,9 @@ Local<Function> ScriptCompiler::CompileFunctionInContext(
ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreamingScript(
Isolate* v8_isolate, StreamedSource* source, CompileOptions options) {
+ if (!i::FLAG_script_streaming) {
+ return nullptr;
+ }
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
return new i::BackgroundParsingTask(source->impl(), options,
i::FLAG_stack_size, isolate);
@@ -2171,17 +2251,19 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
}
source->info->set_script(script);
- source->info->set_context(isolate->native_context());
- // Create a canonical handle scope before internalizing parsed values if
- // compiling bytecode. This is required for off-thread bytecode generation.
- std::unique_ptr<i::CanonicalHandleScope> canonical;
- if (i::FLAG_ignition) canonical.reset(new i::CanonicalHandleScope(isolate));
+ {
+ // Create a canonical handle scope if compiling ignition bytecode. This is
+ // required by the constant array builder to de-duplicate objects without
+ // dereferencing handles.
+ std::unique_ptr<i::CanonicalHandleScope> canonical;
+ if (i::FLAG_ignition) canonical.reset(new i::CanonicalHandleScope(isolate));
- // Do the parsing tasks which need to be done on the main thread. This will
- // also handle parse errors.
- source->parser->Internalize(isolate, script,
- source->info->literal() == nullptr);
+ // Do the parsing tasks which need to be done on the main thread. This will
+ // also handle parse errors.
+ source->parser->Internalize(isolate, script,
+ source->info->literal() == nullptr);
+ }
source->parser->HandleSourceURLComments(isolate, script);
i::Handle<i::SharedFunctionInfo> result;
@@ -2192,9 +2274,10 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
}
has_pending_exception = result.is_null();
if (has_pending_exception) isolate->ReportPendingMessages();
- RETURN_ON_FAILED_EXECUTION(Script);
- source->info->clear_script(); // because script goes out of scope.
+ source->Release();
+
+ RETURN_ON_FAILED_EXECUTION(Script);
Local<UnboundScript> generic = ToApiHandle<UnboundScript>(result);
if (generic.IsEmpty()) return Local<Script>();
@@ -2263,8 +2346,8 @@ v8::TryCatch::TryCatch()
ResetInternal();
// Special handling for simulators which have a separate JS stack.
js_stack_comparable_address_ =
- reinterpret_cast<void*>(v8::internal::SimulatorStack::RegisterCTryCatch(
- isolate_, v8::internal::GetCurrentStackPosition()));
+ reinterpret_cast<void*>(i::SimulatorStack::RegisterCTryCatch(
+ isolate_, i::GetCurrentStackPosition()));
isolate_->RegisterTryCatchHandler(this);
}
@@ -2280,8 +2363,8 @@ v8::TryCatch::TryCatch(v8::Isolate* isolate)
ResetInternal();
// Special handling for simulators which have a separate JS stack.
js_stack_comparable_address_ =
- reinterpret_cast<void*>(v8::internal::SimulatorStack::RegisterCTryCatch(
- isolate_, v8::internal::GetCurrentStackPosition()));
+ reinterpret_cast<void*>(i::SimulatorStack::RegisterCTryCatch(
+ isolate_, i::GetCurrentStackPosition()));
isolate_->RegisterTryCatchHandler(this);
}
@@ -2300,7 +2383,7 @@ v8::TryCatch::~TryCatch() {
isolate_->RestorePendingMessageFromTryCatch(this);
}
isolate_->UnregisterTryCatchHandler(this);
- v8::internal::SimulatorStack::UnregisterCTryCatch(isolate_);
+ i::SimulatorStack::UnregisterCTryCatch(isolate_);
reinterpret_cast<Isolate*>(isolate_)->ThrowException(exc);
DCHECK(!isolate_->thread_local_top()->rethrowing_message_);
} else {
@@ -2311,7 +2394,7 @@ v8::TryCatch::~TryCatch() {
isolate_->CancelScheduledExceptionFromTryCatch(this);
}
isolate_->UnregisterTryCatchHandler(this);
- v8::internal::SimulatorStack::UnregisterCTryCatch(isolate_);
+ i::SimulatorStack::UnregisterCTryCatch(isolate_);
}
}
@@ -2832,6 +2915,205 @@ MaybeLocal<String> JSON::Stringify(Local<Context> context,
RETURN_ESCAPED(result);
}
+// --- V a l u e S e r i a l i z a t i o n ---
+
+Maybe<bool> ValueSerializer::Delegate::WriteHostObject(Isolate* v8_isolate,
+ Local<Object> object) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ isolate->ScheduleThrow(*isolate->factory()->NewError(
+ isolate->error_function(), i::MessageTemplate::kDataCloneError,
+ Utils::OpenHandle(*object)));
+ return Nothing<bool>();
+}
+
+struct ValueSerializer::PrivateData {
+ explicit PrivateData(i::Isolate* i, ValueSerializer::Delegate* delegate)
+ : isolate(i), serializer(i, delegate) {}
+ i::Isolate* isolate;
+ i::ValueSerializer serializer;
+};
+
+ValueSerializer::ValueSerializer(Isolate* isolate)
+ : ValueSerializer(isolate, nullptr) {}
+
+ValueSerializer::ValueSerializer(Isolate* isolate, Delegate* delegate)
+ : private_(
+ new PrivateData(reinterpret_cast<i::Isolate*>(isolate), delegate)) {}
+
+ValueSerializer::~ValueSerializer() { delete private_; }
+
+void ValueSerializer::WriteHeader() { private_->serializer.WriteHeader(); }
+
+Maybe<bool> ValueSerializer::WriteValue(Local<Context> context,
+ Local<Value> value) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, ValueSerializer, WriteValue, bool);
+ i::Handle<i::Object> object = Utils::OpenHandle(*value);
+ Maybe<bool> result = private_->serializer.WriteObject(object);
+ has_pending_exception = result.IsNothing();
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return result;
+}
+
+std::vector<uint8_t> ValueSerializer::ReleaseBuffer() {
+ return private_->serializer.ReleaseBuffer();
+}
+
+void ValueSerializer::TransferArrayBuffer(uint32_t transfer_id,
+ Local<ArrayBuffer> array_buffer) {
+ private_->serializer.TransferArrayBuffer(transfer_id,
+ Utils::OpenHandle(*array_buffer));
+}
+
+void ValueSerializer::TransferSharedArrayBuffer(
+ uint32_t transfer_id, Local<SharedArrayBuffer> shared_array_buffer) {
+ private_->serializer.TransferArrayBuffer(
+ transfer_id, Utils::OpenHandle(*shared_array_buffer));
+}
+
+void ValueSerializer::WriteUint32(uint32_t value) {
+ private_->serializer.WriteUint32(value);
+}
+
+void ValueSerializer::WriteUint64(uint64_t value) {
+ private_->serializer.WriteUint64(value);
+}
+
+void ValueSerializer::WriteDouble(double value) {
+ private_->serializer.WriteDouble(value);
+}
+
+void ValueSerializer::WriteRawBytes(const void* source, size_t length) {
+ private_->serializer.WriteRawBytes(source, length);
+}
+
+MaybeLocal<Object> ValueDeserializer::Delegate::ReadHostObject(
+ Isolate* v8_isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ isolate->ScheduleThrow(*isolate->factory()->NewError(
+ isolate->error_function(),
+ i::MessageTemplate::kDataCloneDeserializationError));
+ return MaybeLocal<Object>();
+}
+
+struct ValueDeserializer::PrivateData {
+ PrivateData(i::Isolate* i, i::Vector<const uint8_t> data, Delegate* delegate)
+ : isolate(i), deserializer(i, data, delegate) {}
+ i::Isolate* isolate;
+ i::ValueDeserializer deserializer;
+ bool has_aborted = false;
+ bool supports_legacy_wire_format = false;
+};
+
+ValueDeserializer::ValueDeserializer(Isolate* isolate, const uint8_t* data,
+ size_t size)
+ : ValueDeserializer(isolate, data, size, nullptr) {}
+
+ValueDeserializer::ValueDeserializer(Isolate* isolate, const uint8_t* data,
+ size_t size, Delegate* delegate) {
+ if (base::IsValueInRangeForNumericType<int>(size)) {
+ private_ = new PrivateData(
+ reinterpret_cast<i::Isolate*>(isolate),
+ i::Vector<const uint8_t>(data, static_cast<int>(size)), delegate);
+ } else {
+ private_ = new PrivateData(reinterpret_cast<i::Isolate*>(isolate),
+ i::Vector<const uint8_t>(nullptr, 0), nullptr);
+ private_->has_aborted = true;
+ }
+}
+
+ValueDeserializer::~ValueDeserializer() { delete private_; }
+
+Maybe<bool> ValueDeserializer::ReadHeader(Local<Context> context) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, ValueDeserializer, ReadHeader, bool);
+
+ // We could have aborted during the constructor.
+ // If so, ReadHeader is where we report it.
+ if (private_->has_aborted) {
+ isolate->Throw(*isolate->factory()->NewError(
+ i::MessageTemplate::kDataCloneDeserializationError));
+ has_pending_exception = true;
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ }
+
+ bool read_header = false;
+ has_pending_exception = !private_->deserializer.ReadHeader().To(&read_header);
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ DCHECK(read_header);
+
+ // TODO(jbroman): Today, all wire formats are "legacy". When a more supported
+ // format is added, compare the version of the internal serializer to the
+ // minimum non-legacy version number.
+ if (!private_->supports_legacy_wire_format) {
+ isolate->Throw(*isolate->factory()->NewError(
+ i::MessageTemplate::kDataCloneDeserializationVersionError));
+ has_pending_exception = true;
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ }
+
+ return Just(true);
+}
+
+Maybe<bool> ValueDeserializer::ReadHeader() {
+ Isolate* isolate = reinterpret_cast<Isolate*>(private_->isolate);
+ return ReadHeader(isolate->GetEnteredContext());
+}
+
+void ValueDeserializer::SetSupportsLegacyWireFormat(
+ bool supports_legacy_wire_format) {
+ private_->supports_legacy_wire_format = supports_legacy_wire_format;
+}
+
+uint32_t ValueDeserializer::GetWireFormatVersion() const {
+ CHECK(!private_->has_aborted);
+ return private_->deserializer.GetWireFormatVersion();
+}
+
+MaybeLocal<Value> ValueDeserializer::ReadValue(Local<Context> context) {
+ CHECK(!private_->has_aborted);
+ PREPARE_FOR_EXECUTION(context, ValueDeserializer, ReadValue, Value);
+ i::MaybeHandle<i::Object> result;
+ if (GetWireFormatVersion() > 0) {
+ result = private_->deserializer.ReadObject();
+ } else {
+ result =
+ private_->deserializer.ReadObjectUsingEntireBufferForLegacyFormat();
+ }
+ Local<Value> value;
+ has_pending_exception = !ToLocal(result, &value);
+ RETURN_ON_FAILED_EXECUTION(Value);
+ RETURN_ESCAPED(value);
+}
+
+void ValueDeserializer::TransferArrayBuffer(uint32_t transfer_id,
+ Local<ArrayBuffer> array_buffer) {
+ CHECK(!private_->has_aborted);
+ private_->deserializer.TransferArrayBuffer(transfer_id,
+ Utils::OpenHandle(*array_buffer));
+}
+
+void ValueDeserializer::TransferSharedArrayBuffer(
+ uint32_t transfer_id, Local<SharedArrayBuffer> shared_array_buffer) {
+ CHECK(!private_->has_aborted);
+ private_->deserializer.TransferArrayBuffer(
+ transfer_id, Utils::OpenHandle(*shared_array_buffer));
+}
+
+bool ValueDeserializer::ReadUint32(uint32_t* value) {
+ return private_->deserializer.ReadUint32(value);
+}
+
+bool ValueDeserializer::ReadUint64(uint64_t* value) {
+ return private_->deserializer.ReadUint64(value);
+}
+
+bool ValueDeserializer::ReadDouble(double* value) {
+ return private_->deserializer.ReadDouble(value);
+}
+
+bool ValueDeserializer::ReadRawBytes(size_t length, const void** data) {
+ return private_->deserializer.ReadRawBytes(length, data);
+}
+
// --- D a t a ---
bool Value::FullIsUndefined() const {
@@ -3019,12 +3301,18 @@ bool Value::IsRegExp() const {
return obj->IsJSRegExp();
}
+bool Value::IsAsyncFunction() const {
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (!obj->IsJSFunction()) return false;
+ i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(obj);
+ return i::IsAsyncFunction(func->shared()->kind());
+}
bool Value::IsGeneratorFunction() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (!obj->IsJSFunction()) return false;
i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(obj);
- return func->shared()->is_generator();
+ return i::IsGeneratorFunction(func->shared()->kind());
}
@@ -3662,6 +3950,98 @@ Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
return result;
}
+struct v8::PropertyDescriptor::PrivateData {
+ PrivateData() : desc() {}
+ i::PropertyDescriptor desc;
+};
+
+v8::PropertyDescriptor::PropertyDescriptor() : private_(new PrivateData()) {}
+
+// DataDescriptor
+v8::PropertyDescriptor::PropertyDescriptor(v8::Local<v8::Value> value)
+ : private_(new PrivateData()) {
+ private_->desc.set_value(Utils::OpenHandle(*value, true));
+}
+
+// DataDescriptor with writable field
+v8::PropertyDescriptor::PropertyDescriptor(v8::Local<v8::Value> value,
+ bool writable)
+ : private_(new PrivateData()) {
+ private_->desc.set_value(Utils::OpenHandle(*value, true));
+ private_->desc.set_writable(writable);
+}
+
+// AccessorDescriptor
+v8::PropertyDescriptor::PropertyDescriptor(v8::Local<v8::Value> get,
+ v8::Local<v8::Value> set)
+ : private_(new PrivateData()) {
+ DCHECK(get.IsEmpty() || get->IsUndefined() || get->IsFunction());
+ DCHECK(set.IsEmpty() || set->IsUndefined() || set->IsFunction());
+ private_->desc.set_get(Utils::OpenHandle(*get, true));
+ private_->desc.set_set(Utils::OpenHandle(*set, true));
+}
+
+v8::PropertyDescriptor::~PropertyDescriptor() { delete private_; }
+
+v8::Local<Value> v8::PropertyDescriptor::value() const {
+ DCHECK(private_->desc.has_value());
+ return Utils::ToLocal(private_->desc.value());
+}
+
+v8::Local<Value> v8::PropertyDescriptor::get() const {
+ DCHECK(private_->desc.has_get());
+ return Utils::ToLocal(private_->desc.get());
+}
+
+v8::Local<Value> v8::PropertyDescriptor::set() const {
+ DCHECK(private_->desc.has_set());
+ return Utils::ToLocal(private_->desc.set());
+}
+
+bool v8::PropertyDescriptor::has_value() const {
+ return private_->desc.has_value();
+}
+bool v8::PropertyDescriptor::has_get() const {
+ return private_->desc.has_get();
+}
+bool v8::PropertyDescriptor::has_set() const {
+ return private_->desc.has_set();
+}
+
+bool v8::PropertyDescriptor::writable() const {
+ DCHECK(private_->desc.has_writable());
+ return private_->desc.writable();
+}
+
+bool v8::PropertyDescriptor::has_writable() const {
+ return private_->desc.has_writable();
+}
+
+void v8::PropertyDescriptor::set_enumerable(bool enumerable) {
+ private_->desc.set_enumerable(enumerable);
+}
+
+bool v8::PropertyDescriptor::enumerable() const {
+ DCHECK(private_->desc.has_enumerable());
+ return private_->desc.enumerable();
+}
+
+bool v8::PropertyDescriptor::has_enumerable() const {
+ return private_->desc.has_enumerable();
+}
+
+void v8::PropertyDescriptor::set_configurable(bool configurable) {
+ private_->desc.set_configurable(configurable);
+}
+
+bool v8::PropertyDescriptor::configurable() const {
+ DCHECK(private_->desc.has_configurable());
+ return private_->desc.configurable();
+}
+
+bool v8::PropertyDescriptor::has_configurable() const {
+ return private_->desc.has_configurable();
+}
Maybe<bool> v8::Object::DefineOwnProperty(v8::Local<v8::Context> context,
v8::Local<Name> key,
@@ -3672,13 +4052,6 @@ Maybe<bool> v8::Object::DefineOwnProperty(v8::Local<v8::Context> context,
i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- if (self->IsAccessCheckNeeded() &&
- !isolate->MayAccess(handle(isolate->context()),
- i::Handle<i::JSObject>::cast(self))) {
- isolate->ReportFailedAccessCheck(i::Handle<i::JSObject>::cast(self));
- return Nothing<bool>();
- }
-
i::PropertyDescriptor desc;
desc.set_writable(!(attributes & v8::ReadOnly));
desc.set_enumerable(!(attributes & v8::DontEnum));
@@ -3691,6 +4064,19 @@ Maybe<bool> v8::Object::DefineOwnProperty(v8::Local<v8::Context> context,
return success;
}
+Maybe<bool> v8::Object::DefineProperty(v8::Local<v8::Context> context,
+ v8::Local<Name> key,
+ PropertyDescriptor& descriptor) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, DefineProperty, bool);
+ i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
+ i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
+
+ Maybe<bool> success = i::JSReceiver::DefineOwnProperty(
+ isolate, self, key_obj, &descriptor.get_private()->desc,
+ i::Object::DONT_THROW);
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return success;
+}
MUST_USE_RESULT
static i::MaybeHandle<i::Object> DefineObjectProperty(
@@ -4408,9 +4794,10 @@ bool v8::Object::IsConstructor() {
MaybeLocal<Value> Object::CallAsFunction(Local<Context> context,
Local<Value> recv, int argc,
Local<Value> argv[]) {
- PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, Object, CallAsFunction, Value);
+ PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE(
+ "v8", "V8.Execute", context, Object, CallAsFunction, MaybeLocal<Value>(),
+ InternalEscapableScope, true);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
- TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
auto self = Utils::OpenHandle(this);
auto recv_obj = Utils::OpenHandle(*recv);
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
@@ -4434,10 +4821,10 @@ Local<v8::Value> Object::CallAsFunction(v8::Local<v8::Value> recv, int argc,
MaybeLocal<Value> Object::CallAsConstructor(Local<Context> context, int argc,
Local<Value> argv[]) {
- PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, Object, CallAsConstructor,
- Value);
+ PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE(
+ "v8", "V8.Execute", context, Object, CallAsConstructor,
+ MaybeLocal<Value>(), InternalEscapableScope, true);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
- TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
auto self = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
@@ -4485,9 +4872,10 @@ Local<v8::Object> Function::NewInstance() const {
MaybeLocal<Object> Function::NewInstance(Local<Context> context, int argc,
v8::Local<v8::Value> argv[]) const {
- PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, Function, NewInstance, Object);
+ PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE(
+ "v8", "V8.Execute", context, Function, NewInstance, MaybeLocal<Object>(),
+ InternalEscapableScope, true);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
- TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
auto self = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
@@ -4509,9 +4897,10 @@ Local<v8::Object> Function::NewInstance(int argc,
MaybeLocal<v8::Value> Function::Call(Local<Context> context,
v8::Local<v8::Value> recv, int argc,
v8::Local<v8::Value> argv[]) {
- PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, Function, Call, Value);
+ PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE(
+ "v8", "V8.Execute", context, Function, Call, MaybeLocal<Value>(),
+ InternalEscapableScope, true);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
- TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
@@ -5708,8 +6097,8 @@ Local<Context> NewContext(v8::Isolate* external_isolate,
v8::MaybeLocal<Value> global_object,
size_t context_snapshot_index) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
- LOG_API(isolate, Context, New);
TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.NewContext");
+ LOG_API(isolate, Context, New);
i::HandleScope scope(isolate);
ExtensionConfiguration no_extensions;
if (extensions == NULL) extensions = &no_extensions;
@@ -6820,8 +7209,9 @@ MaybeLocal<WasmCompiledModule> WasmCompiledModule::Deserialize(
if (!maybe_compiled_part.ToHandle(&compiled_part)) {
return MaybeLocal<WasmCompiledModule>();
}
- return Local<WasmCompiledModule>::Cast(Utils::ToLocal(
- i::wasm::CreateCompiledModuleObject(i_isolate, compiled_part)));
+ return Local<WasmCompiledModule>::Cast(
+ Utils::ToLocal(i::wasm::CreateCompiledModuleObject(
+ i_isolate, compiled_part, i::wasm::ModuleOrigin::kWasmOrigin)));
}
// static
@@ -7234,8 +7624,7 @@ Local<Integer> v8::Integer::NewFromUnsigned(Isolate* isolate, uint32_t value) {
void Isolate::ReportExternalAllocationLimitReached() {
i::Heap* heap = reinterpret_cast<i::Isolate*>(this)->heap();
if (heap->gc_state() != i::Heap::NOT_IN_GC) return;
- heap->ReportExternalMemoryPressure(
- "external memory allocation limit reached.");
+ heap->ReportExternalMemoryPressure();
}
@@ -7303,27 +7692,24 @@ v8::Local<Value> Isolate::ThrowException(v8::Local<v8::Value> value) {
void Isolate::SetObjectGroupId(internal::Object** object, UniqueId id) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
internal_isolate->global_handles()->SetObjectGroupId(
- v8::internal::Handle<v8::internal::Object>(object).location(),
- id);
+ i::Handle<i::Object>(object).location(), id);
}
void Isolate::SetReferenceFromGroup(UniqueId id, internal::Object** object) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
internal_isolate->global_handles()->SetReferenceFromGroup(
- id,
- v8::internal::Handle<v8::internal::Object>(object).location());
+ id, i::Handle<i::Object>(object).location());
}
void Isolate::SetReference(internal::Object** parent,
internal::Object** child) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
- i::Object** parent_location =
- v8::internal::Handle<v8::internal::Object>(parent).location();
+ i::Object** parent_location = i::Handle<i::Object>(parent).location();
internal_isolate->global_handles()->SetReference(
reinterpret_cast<i::HeapObject**>(parent_location),
- v8::internal::Handle<v8::internal::Object>(child).location());
+ i::Handle<i::Object>(child).location());
}
@@ -7398,13 +7784,13 @@ void Isolate::RequestGarbageCollectionForTesting(GarbageCollectionType type) {
CHECK(i::FLAG_expose_gc);
if (type == kMinorGarbageCollection) {
reinterpret_cast<i::Isolate*>(this)->heap()->CollectGarbage(
- i::NEW_SPACE, "Isolate::RequestGarbageCollection",
+ i::NEW_SPACE, i::GarbageCollectionReason::kTesting,
kGCCallbackFlagForced);
} else {
DCHECK_EQ(kFullGarbageCollection, type);
reinterpret_cast<i::Isolate*>(this)->heap()->CollectAllGarbage(
i::Heap::kAbortIncrementalMarkingMask,
- "Isolate::RequestGarbageCollection", kGCCallbackFlagForced);
+ i::GarbageCollectionReason::kTesting, kGCCallbackFlagForced);
}
}
@@ -7833,7 +8219,8 @@ void Isolate::LowMemoryNotification() {
i::HistogramTimerScope idle_notification_scope(
isolate->counters()->gc_low_memory_notification());
TRACE_EVENT0("v8", "V8.GCLowMemoryNotification");
- isolate->heap()->CollectAllAvailableGarbage("low memory notification");
+ isolate->heap()->CollectAllAvailableGarbage(
+ i::GarbageCollectionReason::kLowMemoryNotification);
}
}
@@ -7857,8 +8244,7 @@ void Isolate::IsolateInBackgroundNotification() {
void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- return isolate->heap()->MemoryPressureNotification(level,
- Locker::IsLocked(this));
+ isolate->heap()->MemoryPressureNotification(level, Locker::IsLocked(this));
}
void Isolate::SetRAILMode(RAILMode rail_mode) {
@@ -8325,6 +8711,10 @@ Local<String> CpuProfileNode::GetFunctionName() const {
}
}
+const char* CpuProfileNode::GetFunctionNameStr() const {
+ const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
+ return node->entry()->name();
+}
int CpuProfileNode::GetScriptId() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
@@ -8332,7 +8722,6 @@ int CpuProfileNode::GetScriptId() const {
return entry->script_id();
}
-
Local<String> CpuProfileNode::GetScriptResourceName() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
i::Isolate* isolate = node->isolate();
@@ -8340,6 +8729,10 @@ Local<String> CpuProfileNode::GetScriptResourceName() const {
node->entry()->resource_name()));
}
+const char* CpuProfileNode::GetScriptResourceNameStr() const {
+ const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
+ return node->entry()->resource_name();
+}
int CpuProfileNode::GetLineNumber() const {
return reinterpret_cast<const i::ProfileNode*>(this)->entry()->line_number();
@@ -8966,9 +9359,6 @@ void InvokeAccessorGetterCallback(
Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(isolate,
&RuntimeCallStats::AccessorGetterCallback);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate,
- &internal::tracing::TraceEventStatsTable::AccessorGetterCallback);
Address getter_address = reinterpret_cast<Address>(reinterpret_cast<intptr_t>(
getter));
VMState<EXTERNAL> state(isolate);
@@ -8982,9 +9372,6 @@ void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(isolate,
&RuntimeCallStats::InvokeFunctionCallback);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate,
- &internal::tracing::TraceEventStatsTable::InvokeFunctionCallback);
Address callback_address =
reinterpret_cast<Address>(reinterpret_cast<intptr_t>(callback));
VMState<EXTERNAL> state(isolate);
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index ede7ba9adc..22c10dda14 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -69,7 +69,6 @@ class RegisteredExtension {
static RegisteredExtension* first_extension_;
};
-
#define OPEN_HANDLE_LIST(V) \
V(Template, TemplateInfo) \
V(FunctionTemplate, FunctionTemplateInfo) \
@@ -101,6 +100,7 @@ class RegisteredExtension {
V(Symbol, Symbol) \
V(Script, JSFunction) \
V(UnboundScript, SharedFunctionInfo) \
+ V(Module, Module) \
V(Function, JSReceiver) \
V(Message, JSMessageObject) \
V(Context, Context) \
@@ -124,6 +124,8 @@ class Utils {
v8::internal::Handle<v8::internal::Context> obj);
static inline Local<Value> ToLocal(
v8::internal::Handle<v8::internal::Object> obj);
+ static inline Local<Module> ToLocal(
+ v8::internal::Handle<v8::internal::Module> obj);
static inline Local<Name> ToLocal(
v8::internal::Handle<v8::internal::Name> obj);
static inline Local<String> ToLocal(
@@ -136,6 +138,8 @@ class Utils {
v8::internal::Handle<v8::internal::JSReceiver> obj);
static inline Local<Object> ToLocal(
v8::internal::Handle<v8::internal::JSObject> obj);
+ static inline Local<Function> ToLocal(
+ v8::internal::Handle<v8::internal::JSFunction> obj);
static inline Local<Array> ToLocal(
v8::internal::Handle<v8::internal::JSArray> obj);
static inline Local<Map> ToLocal(
@@ -284,12 +288,14 @@ inline bool ToLocal(v8::internal::MaybeHandle<v8::internal::Object> maybe,
MAKE_TO_LOCAL(ToLocal, Context, Context)
MAKE_TO_LOCAL(ToLocal, Object, Value)
+MAKE_TO_LOCAL(ToLocal, Module, Module)
MAKE_TO_LOCAL(ToLocal, Name, Name)
MAKE_TO_LOCAL(ToLocal, String, String)
MAKE_TO_LOCAL(ToLocal, Symbol, Symbol)
MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
MAKE_TO_LOCAL(ToLocal, JSReceiver, Object)
MAKE_TO_LOCAL(ToLocal, JSObject, Object)
+MAKE_TO_LOCAL(ToLocal, JSFunction, Function)
MAKE_TO_LOCAL(ToLocal, JSArray, Array)
MAKE_TO_LOCAL(ToLocal, JSMap, Map)
MAKE_TO_LOCAL(ToLocal, JSSet, Set)
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index 9c629ce936..92c7075ee5 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -81,21 +81,20 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
// TODO(cbruni): add global flag to check whether any tracing events have been
// enabled.
-// TODO(cbruni): Convert the IsContext CHECK back to a DCHECK.
#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name) \
static INLINE(Type __RT_impl_##Name(Arguments args, Isolate* isolate)); \
\
V8_NOINLINE static Type Stats_##Name(int args_length, Object** args_object, \
Isolate* isolate) { \
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Name); \
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), \
+ "V8.Runtime_" #Name); \
Arguments args(args_length, args_object); \
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED( \
- isolate, &tracing::TraceEventStatsTable::Name); \
return __RT_impl_##Name(args, isolate); \
} \
\
Type Name(int args_length, Object** args_object, Isolate* isolate) { \
- CHECK(isolate->context() == nullptr || isolate->context()->IsContext()); \
+ DCHECK(isolate->context() == nullptr || isolate->context()->IsContext()); \
CLOBBER_DOUBLE_REGISTERS(); \
if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() || \
FLAG_runtime_call_stats)) { \
diff --git a/deps/v8/src/arm/OWNERS b/deps/v8/src/arm/OWNERS
deleted file mode 100644
index 906a5ce641..0000000000
--- a/deps/v8/src/arm/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-rmcilroy@chromium.org
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index b1f33e009e..bc501b1099 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -46,7 +46,7 @@
namespace v8 {
namespace internal {
-bool CpuFeatures::SupportsCrankshaft() { return IsSupported(VFP3); }
+bool CpuFeatures::SupportsCrankshaft() { return true; }
bool CpuFeatures::SupportsSimd128() { return false; }
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 78ffe25390..ee02027610 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -46,85 +46,192 @@
namespace v8 {
namespace internal {
-// Get the CPU features enabled by the build. For cross compilation the
-// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS
-// can be defined to enable ARMv7 and VFPv3 instructions when building the
-// snapshot.
-static unsigned CpuFeaturesImpliedByCompiler() {
- unsigned answer = 0;
-#ifdef CAN_USE_ARMV8_INSTRUCTIONS
- if (FLAG_enable_armv8) {
- answer |= 1u << ARMv8;
- // ARMv8 always features VFP and NEON.
- answer |= 1u << ARMv7 | 1u << VFP3 | 1u << NEON | 1u << VFP32DREGS;
- answer |= 1u << SUDIV;
+static const unsigned kArmv6 = 0u;
+static const unsigned kArmv7 = kArmv6 | (1u << ARMv7);
+static const unsigned kArmv7WithSudiv = kArmv7 | (1u << ARMv7_SUDIV);
+static const unsigned kArmv8 = kArmv7WithSudiv | (1u << ARMv8);
+
+static unsigned CpuFeaturesFromCommandLine() {
+ unsigned result;
+ if (strcmp(FLAG_arm_arch, "armv8") == 0) {
+ result = kArmv8;
+ } else if (strcmp(FLAG_arm_arch, "armv7+sudiv") == 0) {
+ result = kArmv7WithSudiv;
+ } else if (strcmp(FLAG_arm_arch, "armv7") == 0) {
+ result = kArmv7;
+ } else if (strcmp(FLAG_arm_arch, "armv6") == 0) {
+ result = kArmv6;
+ } else {
+ fprintf(stderr, "Error: unrecognised value for --arm-arch ('%s').\n",
+ FLAG_arm_arch);
+ fprintf(stderr,
+ "Supported values are: armv8\n"
+ " armv7+sudiv\n"
+ " armv7\n"
+ " armv6\n");
+ CHECK(false);
+ }
+
+ // If any of the old (deprecated) flags are specified, print a warning, but
+ // otherwise try to respect them for now.
+ // TODO(jbramley): When all the old bots have been updated, remove this.
+ if (FLAG_enable_armv7.has_value || FLAG_enable_vfp3.has_value ||
+ FLAG_enable_32dregs.has_value || FLAG_enable_neon.has_value ||
+ FLAG_enable_sudiv.has_value || FLAG_enable_armv8.has_value) {
+ // As an approximation of the old behaviour, set the default values from the
+ // arm_arch setting, then apply the flags over the top.
+ bool enable_armv7 = (result & (1u << ARMv7)) != 0;
+ bool enable_vfp3 = (result & (1u << ARMv7)) != 0;
+ bool enable_32dregs = (result & (1u << ARMv7)) != 0;
+ bool enable_neon = (result & (1u << ARMv7)) != 0;
+ bool enable_sudiv = (result & (1u << ARMv7_SUDIV)) != 0;
+ bool enable_armv8 = (result & (1u << ARMv8)) != 0;
+ if (FLAG_enable_armv7.has_value) {
+ fprintf(stderr,
+ "Warning: --enable_armv7 is deprecated. "
+ "Use --arm_arch instead.\n");
+ enable_armv7 = FLAG_enable_armv7.value;
+ }
+ if (FLAG_enable_vfp3.has_value) {
+ fprintf(stderr,
+ "Warning: --enable_vfp3 is deprecated. "
+ "Use --arm_arch instead.\n");
+ enable_vfp3 = FLAG_enable_vfp3.value;
+ }
+ if (FLAG_enable_32dregs.has_value) {
+ fprintf(stderr,
+ "Warning: --enable_32dregs is deprecated. "
+ "Use --arm_arch instead.\n");
+ enable_32dregs = FLAG_enable_32dregs.value;
+ }
+ if (FLAG_enable_neon.has_value) {
+ fprintf(stderr,
+ "Warning: --enable_neon is deprecated. "
+ "Use --arm_arch instead.\n");
+ enable_neon = FLAG_enable_neon.value;
+ }
+ if (FLAG_enable_sudiv.has_value) {
+ fprintf(stderr,
+ "Warning: --enable_sudiv is deprecated. "
+ "Use --arm_arch instead.\n");
+ enable_sudiv = FLAG_enable_sudiv.value;
+ }
+ if (FLAG_enable_armv8.has_value) {
+ fprintf(stderr,
+ "Warning: --enable_armv8 is deprecated. "
+ "Use --arm_arch instead.\n");
+ enable_armv8 = FLAG_enable_armv8.value;
+ }
+ // Emulate the old implications.
+ if (enable_armv8) {
+ enable_vfp3 = true;
+ enable_neon = true;
+ enable_32dregs = true;
+ enable_sudiv = true;
+ }
+ // Select the best available configuration.
+ if (enable_armv7 && enable_vfp3 && enable_32dregs && enable_neon) {
+ if (enable_sudiv) {
+ if (enable_armv8) {
+ result = kArmv8;
+ } else {
+ result = kArmv7WithSudiv;
+ }
+ } else {
+ result = kArmv7;
+ }
+ } else {
+ result = kArmv6;
+ }
}
-#endif // CAN_USE_ARMV8_INSTRUCTIONS
-#ifdef CAN_USE_ARMV7_INSTRUCTIONS
- if (FLAG_enable_armv7) answer |= 1u << ARMv7;
-#endif // CAN_USE_ARMV7_INSTRUCTIONS
-#ifdef CAN_USE_VFP3_INSTRUCTIONS
- if (FLAG_enable_vfp3) answer |= 1u << VFP3 | 1u << ARMv7;
-#endif // CAN_USE_VFP3_INSTRUCTIONS
-#ifdef CAN_USE_VFP32DREGS
- if (FLAG_enable_32dregs) answer |= 1u << VFP32DREGS;
-#endif // CAN_USE_VFP32DREGS
-#ifdef CAN_USE_NEON
- if (FLAG_enable_neon) answer |= 1u << NEON;
-#endif // CAN_USE_VFP32DREGS
+ return result;
+}
+
+// Get the CPU features enabled by the build.
+// For cross compilation the preprocessor symbols such as
+// CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS can be used to
+// enable ARMv7 and VFPv3 instructions when building the snapshot. However,
+// these flags should be consistent with a supported ARM configuration:
+// "armv6": ARMv6 + VFPv2
+// "armv7": ARMv7 + VFPv3-D32 + NEON
+// "armv7+sudiv": ARMv7 + VFPv4-D32 + NEON + SUDIV
+// "armv8": ARMv8 (+ all of the above)
+static constexpr unsigned CpuFeaturesFromCompiler() {
+// TODO(jbramley): Once the build flags are simplified, these tests should
+// also be simplified.
+
+// Check *architectural* implications.
+#if defined(CAN_USE_ARMV8_INSTRUCTIONS) && !defined(CAN_USE_ARMV7_INSTRUCTIONS)
+#error "CAN_USE_ARMV8_INSTRUCTIONS should imply CAN_USE_ARMV7_INSTRUCTIONS"
+#endif
+#if defined(CAN_USE_ARMV8_INSTRUCTIONS) && !defined(CAN_USE_SUDIV)
+#error "CAN_USE_ARMV8_INSTRUCTIONS should imply CAN_USE_SUDIV"
+#endif
+#if defined(CAN_USE_ARMV7_INSTRUCTIONS) != defined(CAN_USE_VFP3_INSTRUCTIONS)
+// V8 requires VFP, and all ARMv7 devices with VFP have VFPv3. Similarly,
+// VFPv3 isn't available before ARMv7.
+#error "CAN_USE_ARMV7_INSTRUCTIONS should match CAN_USE_VFP3_INSTRUCTIONS"
+#endif
+#if defined(CAN_USE_NEON) && !defined(CAN_USE_ARMV7_INSTRUCTIONS)
+#error "CAN_USE_NEON should imply CAN_USE_ARMV7_INSTRUCTIONS"
+#endif
- return answer;
+// Find compiler-implied features.
+#if defined(CAN_USE_ARMV8_INSTRUCTIONS) && \
+ defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(CAN_USE_SUDIV) && \
+ defined(CAN_USE_NEON) && defined(CAN_USE_VFP3_INSTRUCTIONS)
+ return kArmv8;
+#elif defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(CAN_USE_SUDIV) && \
+ defined(CAN_USE_NEON) && defined(CAN_USE_VFP3_INSTRUCTIONS)
+ return kArmv7WithSudiv;
+#elif defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(CAN_USE_NEON) && \
+ defined(CAN_USE_VFP3_INSTRUCTIONS)
+ return kArmv7;
+#else
+ return kArmv6;
+#endif
}
void CpuFeatures::ProbeImpl(bool cross_compile) {
- supported_ |= CpuFeaturesImpliedByCompiler();
dcache_line_size_ = 64;
+ unsigned command_line = CpuFeaturesFromCommandLine();
// Only use statically determined features for cross compile (snapshot).
- if (cross_compile) return;
+ if (cross_compile) {
+ supported_ |= command_line & CpuFeaturesFromCompiler();
+ return;
+ }
#ifndef __arm__
// For the simulator build, use whatever the flags specify.
- if (FLAG_enable_armv8) {
- supported_ |= 1u << ARMv8;
- // ARMv8 always features VFP and NEON.
- supported_ |= 1u << ARMv7 | 1u << VFP3 | 1u << NEON | 1u << VFP32DREGS;
- supported_ |= 1u << SUDIV;
- if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
- }
- if (FLAG_enable_armv7) {
- supported_ |= 1u << ARMv7;
- if (FLAG_enable_vfp3) supported_ |= 1u << VFP3;
- if (FLAG_enable_neon) supported_ |= 1u << NEON | 1u << VFP32DREGS;
- if (FLAG_enable_sudiv) supported_ |= 1u << SUDIV;
- if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
- if (FLAG_enable_32dregs) supported_ |= 1u << VFP32DREGS;
- }
+ supported_ |= command_line;
#else // __arm__
// Probe for additional features at runtime.
base::CPU cpu;
- if (FLAG_enable_vfp3 && cpu.has_vfp3()) {
- // This implementation also sets the VFP flags if runtime
- // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
- // 0406B, page A1-6.
- supported_ |= 1u << VFP3 | 1u << ARMv7;
+ // Runtime detection is slightly fuzzy, and some inferences are necessary.
+ unsigned runtime = kArmv6;
+ // NEON and VFPv3 imply at least ARMv7-A.
+ if (cpu.has_neon() && cpu.has_vfp3_d32()) {
+ DCHECK(cpu.has_vfp3());
+ runtime |= kArmv7;
+ if (cpu.has_idiva()) {
+ runtime |= kArmv7WithSudiv;
+ if (cpu.architecture() >= 8) {
+ runtime |= kArmv8;
+ }
+ }
}
- if (FLAG_enable_neon && cpu.has_neon()) supported_ |= 1u << NEON;
- if (FLAG_enable_sudiv && cpu.has_idiva()) supported_ |= 1u << SUDIV;
+ // Use the best of the features found by CPU detection and those inferred from
+ // the build system. In both cases, restrict available features using the
+ // command-line. Note that the command-line flags are very permissive (kArmv8)
+ // by default.
+ supported_ |= command_line & CpuFeaturesFromCompiler();
+ supported_ |= command_line & runtime;
- if (cpu.architecture() >= 7) {
- if (FLAG_enable_armv7) supported_ |= 1u << ARMv7;
- if (FLAG_enable_armv8 && cpu.architecture() >= 8) {
- supported_ |= 1u << ARMv8;
- }
- // Use movw/movt for QUALCOMM ARMv7 cores.
- if (FLAG_enable_movw_movt && cpu.implementer() == base::CPU::QUALCOMM) {
- supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
- }
- }
+ // Additional tuning options.
// ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines.
if (cpu.implementer() == base::CPU::ARM &&
@@ -132,11 +239,10 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
cpu.part() == base::CPU::ARM_CORTEX_A9)) {
dcache_line_size_ = 32;
}
-
- if (FLAG_enable_32dregs && cpu.has_vfp3_d32()) supported_ |= 1u << VFP32DREGS;
#endif
- DCHECK(!IsSupported(VFP3) || IsSupported(ARMv7));
+ DCHECK_IMPLIES(IsSupported(ARMv7_SUDIV), IsSupported(ARMv7));
+ DCHECK_IMPLIES(IsSupported(ARMv8), IsSupported(ARMv7_SUDIV));
}
@@ -195,13 +301,10 @@ void CpuFeatures::PrintTarget() {
void CpuFeatures::PrintFeatures() {
- printf(
- "ARMv8=%d ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d "
- "MOVW_MOVT_IMMEDIATE_LOADS=%d",
- CpuFeatures::IsSupported(ARMv8), CpuFeatures::IsSupported(ARMv7),
- CpuFeatures::IsSupported(VFP3), CpuFeatures::IsSupported(VFP32DREGS),
- CpuFeatures::IsSupported(NEON), CpuFeatures::IsSupported(SUDIV),
- CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS));
+ printf("ARMv8=%d ARMv7=%d VFPv3=%d VFP32DREGS=%d NEON=%d SUDIV=%d",
+ CpuFeatures::IsSupported(ARMv8), CpuFeatures::IsSupported(ARMv7),
+ CpuFeatures::IsSupported(VFPv3), CpuFeatures::IsSupported(VFP32DREGS),
+ CpuFeatures::IsSupported(NEON), CpuFeatures::IsSupported(SUDIV));
#ifdef __arm__
bool eabi_hardfloat = base::OS::ArmUsingHardFloat();
#elif USE_EABI_HARDFLOAT
@@ -209,7 +312,7 @@ void CpuFeatures::PrintFeatures() {
#else
bool eabi_hardfloat = false;
#endif
- printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat);
+ printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat);
}
@@ -481,6 +584,12 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
first_const_pool_64_use_ = -1;
last_bound_pos_ = 0;
ClearRecordedAstId();
+ if (CpuFeatures::IsSupported(VFP32DREGS)) {
+ // Register objects tend to be abstracted and survive between scopes, so
+ // it's awkward to use CpuFeatures::VFP32DREGS with CpuFeatureScope. To make
+ // its use consistent with other features, we always enable it if we can.
+ EnableCpuFeature(VFP32DREGS);
+ }
}
@@ -860,10 +969,12 @@ void Assembler::target_at_put(int pos, int target_pos) {
if (target16_1 == 0) {
CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
1, CodePatcher::DONT_FLUSH);
+ CpuFeatureScope scope(patcher.masm(), ARMv7);
patcher.masm()->movw(dst, target16_0);
} else {
CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
2, CodePatcher::DONT_FLUSH);
+ CpuFeatureScope scope(patcher.masm(), ARMv7);
patcher.masm()->movw(dst, target16_0);
patcher.masm()->movt(dst, target16_1);
}
@@ -1075,13 +1186,10 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const {
static bool use_mov_immediate_load(const Operand& x,
const Assembler* assembler) {
- if (FLAG_enable_embedded_constant_pool && assembler != NULL &&
+ DCHECK(assembler != nullptr);
+ if (FLAG_enable_embedded_constant_pool &&
!assembler->is_constant_pool_available()) {
return true;
- } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
- (assembler == NULL || !assembler->predictable_code_size())) {
- // Prefer movw / movt to constant pool if it is more efficient on the CPU.
- return true;
} else if (x.must_output_reloc_info(assembler)) {
// Prefer constant pool if data is likely to be patched.
return false;
@@ -1094,6 +1202,7 @@ static bool use_mov_immediate_load(const Operand& x,
int Operand::instructions_required(const Assembler* assembler,
Instr instr) const {
+ DCHECK(assembler != nullptr);
if (rm_.is_valid()) return 1;
uint32_t dummy1, dummy2;
if (must_output_reloc_info(assembler) ||
@@ -1105,8 +1214,7 @@ int Operand::instructions_required(const Assembler* assembler,
if (use_mov_immediate_load(*this, assembler)) {
// A movw / movt or mov / orr immediate load.
instructions = CpuFeatures::IsSupported(ARMv7) ? 2 : 4;
- } else if (assembler != NULL &&
- assembler->ConstantPoolAccessIsInOverflow()) {
+ } else if (assembler->ConstantPoolAccessIsInOverflow()) {
// An overflowed constant pool load.
instructions = CpuFeatures::IsSupported(ARMv7) ? 3 : 5;
} else {
@@ -1140,6 +1248,7 @@ void Assembler::move_32_bit_immediate(Register rd,
if (use_mov_immediate_load(x, this)) {
Register target = rd.code() == pc.code() ? ip : rd;
if (CpuFeatures::IsSupported(ARMv7)) {
+ CpuFeatureScope scope(this, ARMv7);
if (!FLAG_enable_embedded_constant_pool &&
x.must_output_reloc_info(this)) {
// Make sure the movw/movt doesn't get separated.
@@ -1166,6 +1275,7 @@ void Assembler::move_32_bit_immediate(Register rd,
Register target = rd.code() == pc.code() ? ip : rd;
// Emit instructions to load constant pool offset.
if (CpuFeatures::IsSupported(ARMv7)) {
+ CpuFeatureScope scope(this, ARMv7);
movw(target, 0, cond);
movt(target, 0, cond);
} else {
@@ -1376,8 +1486,7 @@ void Assembler::bl(int branch_offset, Condition cond) {
emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
}
-
-void Assembler::blx(int branch_offset) { // v5 and above
+void Assembler::blx(int branch_offset) {
DCHECK((branch_offset & 1) == 0);
int h = ((branch_offset & 2) >> 1)*B24;
int imm24 = branch_offset >> 2;
@@ -1385,14 +1494,12 @@ void Assembler::blx(int branch_offset) { // v5 and above
emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
}
-
-void Assembler::blx(Register target, Condition cond) { // v5 and above
+void Assembler::blx(Register target, Condition cond) {
DCHECK(!target.is(pc));
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
}
-
-void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
+void Assembler::bx(Register target, Condition cond) {
DCHECK(!target.is(pc)); // use of pc is actually allowed, but discouraged
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
}
@@ -1548,13 +1655,13 @@ void Assembler::mov_label_offset(Register dst, Label* label) {
void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
- DCHECK(CpuFeatures::IsSupported(ARMv7));
+ DCHECK(IsEnabled(ARMv7));
emit(cond | 0x30*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
}
void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
- DCHECK(CpuFeatures::IsSupported(ARMv7));
+ DCHECK(IsEnabled(ARMv7));
emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
}
@@ -1684,7 +1791,6 @@ void Assembler::umull(Register dstL,
// Miscellaneous arithmetic instructions.
void Assembler::clz(Register dst, Register src, Condition cond) {
- // v5 and above.
DCHECK(!dst.is(pc) && !src.is(pc));
emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
15*B8 | CLZ | src.code());
@@ -1724,8 +1830,7 @@ void Assembler::ubfx(Register dst,
int lsb,
int width,
Condition cond) {
- // v7 and above.
- DCHECK(CpuFeatures::IsSupported(ARMv7));
+ DCHECK(IsEnabled(ARMv7));
DCHECK(!dst.is(pc) && !src.is(pc));
DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width >= 1) && (width <= (32 - lsb)));
@@ -1744,8 +1849,7 @@ void Assembler::sbfx(Register dst,
int lsb,
int width,
Condition cond) {
- // v7 and above.
- DCHECK(CpuFeatures::IsSupported(ARMv7));
+ DCHECK(IsEnabled(ARMv7));
DCHECK(!dst.is(pc) && !src.is(pc));
DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width >= 1) && (width <= (32 - lsb)));
@@ -1759,8 +1863,7 @@ void Assembler::sbfx(Register dst,
// to zero, preserving the value of the other bits.
// bfc dst, #lsb, #width
void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
- // v7 and above.
- DCHECK(CpuFeatures::IsSupported(ARMv7));
+ DCHECK(IsEnabled(ARMv7));
DCHECK(!dst.is(pc));
DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width >= 1) && (width <= (32 - lsb)));
@@ -1778,8 +1881,7 @@ void Assembler::bfi(Register dst,
int lsb,
int width,
Condition cond) {
- // v7 and above.
- DCHECK(CpuFeatures::IsSupported(ARMv7));
+ DCHECK(IsEnabled(ARMv7));
DCHECK(!dst.is(pc) && !src.is(pc));
DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width >= 1) && (width <= (32 - lsb)));
@@ -2176,8 +2278,7 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code) {
#endif // def __arm__
}
-
-void Assembler::bkpt(uint32_t imm16) { // v5 and above
+void Assembler::bkpt(uint32_t imm16) {
DCHECK(is_uint16(imm16));
emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
}
@@ -2190,17 +2291,38 @@ void Assembler::svc(uint32_t imm24, Condition cond) {
void Assembler::dmb(BarrierOption option) {
- emit(kSpecialCondition | 0x57ff*B12 | 5*B4 | option);
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ // Details available in ARM DDI 0406C.b, A8-378.
+ emit(kSpecialCondition | 0x57ff * B12 | 5 * B4 | option);
+ } else {
+ // Details available in ARM DDI 0406C.b, B3-1750.
+ // CP15DMB: CRn=c7, opc1=0, CRm=c10, opc2=5, Rt is ignored.
+ mcr(p15, 0, r0, cr7, cr10, 5);
+ }
}
void Assembler::dsb(BarrierOption option) {
- emit(kSpecialCondition | 0x57ff*B12 | 4*B4 | option);
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ // Details available in ARM DDI 0406C.b, A8-380.
+ emit(kSpecialCondition | 0x57ff * B12 | 4 * B4 | option);
+ } else {
+ // Details available in ARM DDI 0406C.b, B3-1750.
+ // CP15DSB: CRn=c7, opc1=0, CRm=c10, opc2=4, Rt is ignored.
+ mcr(p15, 0, r0, cr7, cr10, 4);
+ }
}
void Assembler::isb(BarrierOption option) {
- emit(kSpecialCondition | 0x57ff*B12 | 6*B4 | option);
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ // Details available in ARM DDI 0406C.b, A8-389.
+ emit(kSpecialCondition | 0x57ff * B12 | 6 * B4 | option);
+ } else {
+ // Details available in ARM DDI 0406C.b, B3-1750.
+ // CP15ISB: CRn=c7, opc1=0, CRm=c5, opc2=4, Rt is ignored.
+ mcr(p15, 0, r0, cr7, cr5, 4);
+ }
}
@@ -2217,13 +2339,8 @@ void Assembler::cdp(Coprocessor coproc,
crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
}
-
-void Assembler::cdp2(Coprocessor coproc,
- int opcode_1,
- CRegister crd,
- CRegister crn,
- CRegister crm,
- int opcode_2) { // v5 and above
+void Assembler::cdp2(Coprocessor coproc, int opcode_1, CRegister crd,
+ CRegister crn, CRegister crm, int opcode_2) {
cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
}
@@ -2240,13 +2357,8 @@ void Assembler::mcr(Coprocessor coproc,
rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
}
-
-void Assembler::mcr2(Coprocessor coproc,
- int opcode_1,
- Register rd,
- CRegister crn,
- CRegister crm,
- int opcode_2) { // v5 and above
+void Assembler::mcr2(Coprocessor coproc, int opcode_1, Register rd,
+ CRegister crn, CRegister crm, int opcode_2) {
mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
}
@@ -2263,13 +2375,8 @@ void Assembler::mrc(Coprocessor coproc,
rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
}
-
-void Assembler::mrc2(Coprocessor coproc,
- int opcode_1,
- Register rd,
- CRegister crn,
- CRegister crm,
- int opcode_2) { // v5 and above
+void Assembler::mrc2(Coprocessor coproc, int opcode_1, Register rd,
+ CRegister crn, CRegister crm, int opcode_2) {
mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
}
@@ -2295,20 +2402,13 @@ void Assembler::ldc(Coprocessor coproc,
coproc*B8 | (option & 255));
}
-
-void Assembler::ldc2(Coprocessor coproc,
- CRegister crd,
- const MemOperand& src,
- LFlag l) { // v5 and above
+void Assembler::ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src,
+ LFlag l) {
ldc(coproc, crd, src, l, kSpecialCondition);
}
-
-void Assembler::ldc2(Coprocessor coproc,
- CRegister crd,
- Register rn,
- int option,
- LFlag l) { // v5 and above
+void Assembler::ldc2(Coprocessor coproc, CRegister crd, Register rn, int option,
+ LFlag l) {
ldc(coproc, crd, rn, option, l, kSpecialCondition);
}
@@ -2323,6 +2423,7 @@ void Assembler::vldr(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-924.
// cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) |
// Vd(15-12) | 1011(11-8) | offset
+ DCHECK(VfpRegisterIsAvailable(dst));
int u = 1;
if (offset < 0) {
CHECK(offset != kMinInt);
@@ -2353,6 +2454,7 @@ void Assembler::vldr(const DwVfpRegister dst,
void Assembler::vldr(const DwVfpRegister dst,
const MemOperand& operand,
const Condition cond) {
+ DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(operand.am_ == Offset);
if (operand.rm().is_valid()) {
add(ip, operand.rn(),
@@ -2420,6 +2522,7 @@ void Assembler::vstr(const DwVfpRegister src,
// Instruction details available in ARM DDI 0406C.b, A8-1082.
// cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) |
// Vd(15-12) | 1011(11-8) | (offset/4)
+ DCHECK(VfpRegisterIsAvailable(src));
int u = 1;
if (offset < 0) {
CHECK(offset != kMinInt);
@@ -2450,6 +2553,7 @@ void Assembler::vstr(const DwVfpRegister src,
void Assembler::vstr(const DwVfpRegister src,
const MemOperand& operand,
const Condition cond) {
+ DCHECK(VfpRegisterIsAvailable(src));
DCHECK(operand.am_ == Offset);
if (operand.rm().is_valid()) {
add(ip, operand.rn(),
@@ -2508,16 +2612,13 @@ void Assembler::vstr(const SwVfpRegister src,
}
}
-
-void Assembler::vldm(BlockAddrMode am,
- Register base,
- DwVfpRegister first,
- DwVfpRegister last,
- Condition cond) {
+void Assembler::vldm(BlockAddrMode am, Register base, DwVfpRegister first,
+ DwVfpRegister last, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8-922.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count * 2)
DCHECK_LE(first.code(), last.code());
+ DCHECK(VfpRegisterIsAvailable(last));
DCHECK(am == ia || am == ia_w || am == db_w);
DCHECK(!base.is(pc));
@@ -2529,16 +2630,13 @@ void Assembler::vldm(BlockAddrMode am,
0xB*B8 | count*2);
}
-
-void Assembler::vstm(BlockAddrMode am,
- Register base,
- DwVfpRegister first,
- DwVfpRegister last,
- Condition cond) {
+void Assembler::vstm(BlockAddrMode am, Register base, DwVfpRegister first,
+ DwVfpRegister last, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8-1080.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count * 2)
DCHECK_LE(first.code(), last.code());
+ DCHECK(VfpRegisterIsAvailable(last));
DCHECK(am == ia || am == ia_w || am == db_w);
DCHECK(!base.is(pc));
@@ -2550,11 +2648,8 @@ void Assembler::vstm(BlockAddrMode am,
0xB*B8 | count*2);
}
-void Assembler::vldm(BlockAddrMode am,
- Register base,
- SwVfpRegister first,
- SwVfpRegister last,
- Condition cond) {
+void Assembler::vldm(BlockAddrMode am, Register base, SwVfpRegister first,
+ SwVfpRegister last, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-626.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
// first(15-12) | 1010(11-8) | (count/2)
@@ -2569,12 +2664,8 @@ void Assembler::vldm(BlockAddrMode am,
0xA*B8 | count);
}
-
-void Assembler::vstm(BlockAddrMode am,
- Register base,
- SwVfpRegister first,
- SwVfpRegister last,
- Condition cond) {
+void Assembler::vstm(BlockAddrMode am, Register base, SwVfpRegister first,
+ SwVfpRegister last, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count/2)
@@ -2602,8 +2693,6 @@ static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
// Only works for little endian floating point formats.
// We don't support VFP on the mixed endian floating point platform.
static bool FitsVmovFPImmediate(double d, uint32_t* encoding) {
- DCHECK(CpuFeatures::IsSupported(VFP3));
-
// VMOV can accept an immediate of the form:
//
// +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
@@ -2652,7 +2741,8 @@ static bool FitsVmovFPImmediate(double d, uint32_t* encoding) {
void Assembler::vmov(const SwVfpRegister dst, float imm) {
uint32_t enc;
- if (CpuFeatures::IsSupported(VFP3) && FitsVmovFPImmediate(imm, &enc)) {
+ if (CpuFeatures::IsSupported(VFPv3) && FitsVmovFPImmediate(imm, &enc)) {
+ CpuFeatureScope scope(this, VFPv3);
// The float can be encoded in the instruction.
//
// Sd = immediate
@@ -2672,6 +2762,8 @@ void Assembler::vmov(const SwVfpRegister dst, float imm) {
void Assembler::vmov(const DwVfpRegister dst,
double imm,
const Register scratch) {
+ DCHECK(VfpRegisterIsAvailable(dst));
+ DCHECK(!scratch.is(ip));
uint32_t enc;
// If the embedded constant pool is disabled, we can use the normal, inline
// constant pool. If the embedded constant pool is enabled (via
@@ -2679,7 +2771,8 @@ void Assembler::vmov(const DwVfpRegister dst,
// pointer (pp) is valid.
bool can_use_pool =
!FLAG_enable_embedded_constant_pool || is_constant_pool_available();
- if (CpuFeatures::IsSupported(VFP3) && FitsVmovFPImmediate(imm, &enc)) {
+ if (CpuFeatures::IsSupported(VFPv3) && FitsVmovFPImmediate(imm, &enc)) {
+ CpuFeatureScope scope(this, VFPv3);
// The double can be encoded in the instruction.
//
// Dd = immediate
@@ -2689,7 +2782,9 @@ void Assembler::vmov(const DwVfpRegister dst,
int vd, d;
dst.split_code(&vd, &d);
emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
- } else if (FLAG_enable_vldr_imm && can_use_pool) {
+ } else if (CpuFeatures::IsSupported(ARMv7) && FLAG_enable_vldr_imm &&
+ can_use_pool) {
+ CpuFeatureScope scope(this, ARMv7);
// TODO(jfb) Temporarily turned off until we have constant blinding or
// some equivalent mitigation: an attacker can otherwise control
// generated data which also happens to be executable, a Very Bad
@@ -2732,6 +2827,7 @@ void Assembler::vmov(const DwVfpRegister dst,
vmov(dst, VmovIndexLo, ip);
if (((lo & 0xffff) == (hi & 0xffff)) &&
CpuFeatures::IsSupported(ARMv7)) {
+ CpuFeatureScope scope(this, ARMv7);
movt(ip, hi >> 16);
} else {
mov(ip, Operand(hi));
@@ -2767,6 +2863,8 @@ void Assembler::vmov(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-938.
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
// 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+ DCHECK(VfpRegisterIsAvailable(dst));
+ DCHECK(VfpRegisterIsAvailable(src));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@@ -2784,6 +2882,7 @@ void Assembler::vmov(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-940.
// cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
// Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
+ DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(index.index == 0 || index.index == 1);
int vd, d;
dst.split_code(&vd, &d);
@@ -2800,6 +2899,7 @@ void Assembler::vmov(const Register dst,
// Instruction details available in ARM DDI 0406C.b, A8.8.342.
// cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) |
// Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
+ DCHECK(VfpRegisterIsAvailable(src));
DCHECK(index.index == 0 || index.index == 1);
int vn, n;
src.split_code(&vn, &n);
@@ -2816,6 +2916,7 @@ void Assembler::vmov(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-948.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
+ DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(!src1.is(pc) && !src2.is(pc));
int vm, m;
dst.split_code(&vm, &m);
@@ -2832,6 +2933,7 @@ void Assembler::vmov(const Register dst1,
// Instruction details available in ARM DDI 0406C.b, A8-948.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
+ DCHECK(VfpRegisterIsAvailable(src));
DCHECK(!dst1.is(pc) && !dst2.is(pc));
int vm, m;
src.split_code(&vm, &m);
@@ -2985,6 +3087,7 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
+ DCHECK(VfpRegisterIsAvailable(dst));
emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
}
@@ -3001,6 +3104,7 @@ void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
+ DCHECK(VfpRegisterIsAvailable(dst));
emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
}
@@ -3027,6 +3131,7 @@ void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
+ DCHECK(VfpRegisterIsAvailable(src));
emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
}
@@ -3035,6 +3140,7 @@ void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
+ DCHECK(VfpRegisterIsAvailable(src));
emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
}
@@ -3043,6 +3149,7 @@ void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
+ DCHECK(VfpRegisterIsAvailable(dst));
emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
}
@@ -3051,6 +3158,7 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
+ DCHECK(VfpRegisterIsAvailable(src));
emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
}
@@ -3061,8 +3169,9 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-874.
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 1010(19-16) | Vd(15-12) |
// 101(11-9) | sf=1(8) | sx=1(7) | 1(6) | i(5) | 0(4) | imm4(3-0)
+ DCHECK(IsEnabled(VFPv3));
+ DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(fraction_bits > 0 && fraction_bits <= 32);
- DCHECK(CpuFeatures::IsSupported(VFP3));
int vd, d;
dst.split_code(&vd, &d);
int imm5 = 32 - fraction_bits;
@@ -3079,6 +3188,8 @@ void Assembler::vneg(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-968.
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
// 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+ DCHECK(VfpRegisterIsAvailable(dst));
+ DCHECK(VfpRegisterIsAvailable(src));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@@ -3110,6 +3221,8 @@ void Assembler::vabs(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-524.
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
// 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+ DCHECK(VfpRegisterIsAvailable(dst));
+ DCHECK(VfpRegisterIsAvailable(src));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@@ -3142,6 +3255,9 @@ void Assembler::vadd(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-830.
// cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
+ DCHECK(VfpRegisterIsAvailable(dst));
+ DCHECK(VfpRegisterIsAvailable(src1));
+ DCHECK(VfpRegisterIsAvailable(src2));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
@@ -3180,6 +3296,9 @@ void Assembler::vsub(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-1086.
// cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+ DCHECK(VfpRegisterIsAvailable(dst));
+ DCHECK(VfpRegisterIsAvailable(src1));
+ DCHECK(VfpRegisterIsAvailable(src2));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
@@ -3218,6 +3337,9 @@ void Assembler::vmul(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-960.
// cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
+ DCHECK(VfpRegisterIsAvailable(dst));
+ DCHECK(VfpRegisterIsAvailable(src1));
+ DCHECK(VfpRegisterIsAvailable(src2));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
@@ -3254,6 +3376,9 @@ void Assembler::vmla(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-932.
// cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0)
+ DCHECK(VfpRegisterIsAvailable(dst));
+ DCHECK(VfpRegisterIsAvailable(src1));
+ DCHECK(VfpRegisterIsAvailable(src2));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
@@ -3288,6 +3413,9 @@ void Assembler::vmls(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-932.
// cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=1(6) | M(5) | 0(4) | Vm(3-0)
+ DCHECK(VfpRegisterIsAvailable(dst));
+ DCHECK(VfpRegisterIsAvailable(src1));
+ DCHECK(VfpRegisterIsAvailable(src2));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
@@ -3324,6 +3452,9 @@ void Assembler::vdiv(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-882.
// cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
+ DCHECK(VfpRegisterIsAvailable(dst));
+ DCHECK(VfpRegisterIsAvailable(src1));
+ DCHECK(VfpRegisterIsAvailable(src2));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
@@ -3360,6 +3491,8 @@ void Assembler::vcmp(const DwVfpRegister src1,
// Instruction details available in ARM DDI 0406C.b, A8-864.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+ DCHECK(VfpRegisterIsAvailable(src1));
+ DCHECK(VfpRegisterIsAvailable(src2));
int vd, d;
src1.split_code(&vd, &d);
int vm, m;
@@ -3391,6 +3524,7 @@ void Assembler::vcmp(const DwVfpRegister src1,
// Instruction details available in ARM DDI 0406C.b, A8-864.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
+ DCHECK(VfpRegisterIsAvailable(src1));
DCHECK(src2 == 0.0);
int vd, d;
src1.split_code(&vd, &d);
@@ -3411,12 +3545,76 @@ void Assembler::vcmp(const SwVfpRegister src1, const float src2,
0x5 * B9 | B6);
}
+void Assembler::vmaxnm(const DwVfpRegister dst, const DwVfpRegister src1,
+ const DwVfpRegister src2) {
+ // kSpecialCondition(31-28) | 11101(27-23) | D(22) | 00(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
+ DCHECK(IsEnabled(ARMv8));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+
+ emit(kSpecialCondition | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 |
+ 0x5 * B9 | B8 | n * B7 | m * B5 | vm);
+}
+
+void Assembler::vmaxnm(const SwVfpRegister dst, const SwVfpRegister src1,
+ const SwVfpRegister src2) {
+ // kSpecialCondition(31-28) | 11101(27-23) | D(22) | 00(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
+ DCHECK(IsEnabled(ARMv8));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+
+ emit(kSpecialCondition | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 |
+ 0x5 * B9 | n * B7 | m * B5 | vm);
+}
+
+void Assembler::vminnm(const DwVfpRegister dst, const DwVfpRegister src1,
+ const DwVfpRegister src2) {
+ // kSpecialCondition(31-28) | 11101(27-23) | D(22) | 00(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+ DCHECK(IsEnabled(ARMv8));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+
+ emit(kSpecialCondition | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 |
+ 0x5 * B9 | B8 | n * B7 | B6 | m * B5 | vm);
+}
+
+void Assembler::vminnm(const SwVfpRegister dst, const SwVfpRegister src1,
+ const SwVfpRegister src2) {
+ // kSpecialCondition(31-28) | 11101(27-23) | D(22) | 00(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+ DCHECK(IsEnabled(ARMv8));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+
+ emit(kSpecialCondition | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 |
+ 0x5 * B9 | n * B7 | B6 | m * B5 | vm);
+}
+
void Assembler::vsel(Condition cond, const DwVfpRegister dst,
const DwVfpRegister src1, const DwVfpRegister src2) {
// cond=kSpecialCondition(31-28) | 11100(27-23) | D(22) |
// vsel_cond=XX(21-20) | Vn(19-16) | Vd(15-12) | 101(11-9) | sz=1(8) | N(7) |
// 0(6) | M(5) | 0(4) | Vm(3-0)
- DCHECK(CpuFeatures::IsSupported(ARMv8));
+ DCHECK(IsEnabled(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
@@ -3448,7 +3646,7 @@ void Assembler::vsel(Condition cond, const SwVfpRegister dst,
// cond=kSpecialCondition(31-28) | 11100(27-23) | D(22) |
// vsel_cond=XX(21-20) | Vn(19-16) | Vd(15-12) | 101(11-9) | sz=0(8) | N(7) |
// 0(6) | M(5) | 0(4) | Vm(3-0)
- DCHECK(CpuFeatures::IsSupported(ARMv8));
+ DCHECK(IsEnabled(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
@@ -3481,6 +3679,8 @@ void Assembler::vsqrt(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-1058.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
+ DCHECK(VfpRegisterIsAvailable(dst));
+ DCHECK(VfpRegisterIsAvailable(src));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@@ -3524,7 +3724,7 @@ void Assembler::vrinta(const SwVfpRegister dst, const SwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=00(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0)
- DCHECK(CpuFeatures::IsSupported(ARMv8));
+ DCHECK(IsEnabled(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@@ -3538,7 +3738,7 @@ void Assembler::vrinta(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=00(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0)
- DCHECK(CpuFeatures::IsSupported(ARMv8));
+ DCHECK(IsEnabled(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@@ -3552,7 +3752,7 @@ void Assembler::vrintn(const SwVfpRegister dst, const SwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=01(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0)
- DCHECK(CpuFeatures::IsSupported(ARMv8));
+ DCHECK(IsEnabled(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@@ -3566,7 +3766,7 @@ void Assembler::vrintn(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=01(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0)
- DCHECK(CpuFeatures::IsSupported(ARMv8));
+ DCHECK(IsEnabled(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@@ -3580,7 +3780,7 @@ void Assembler::vrintp(const SwVfpRegister dst, const SwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=10(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0)
- DCHECK(CpuFeatures::IsSupported(ARMv8));
+ DCHECK(IsEnabled(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@@ -3594,7 +3794,7 @@ void Assembler::vrintp(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=10(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0)
- DCHECK(CpuFeatures::IsSupported(ARMv8));
+ DCHECK(IsEnabled(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@@ -3608,7 +3808,7 @@ void Assembler::vrintm(const SwVfpRegister dst, const SwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=11(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0)
- DCHECK(CpuFeatures::IsSupported(ARMv8));
+ DCHECK(IsEnabled(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@@ -3622,7 +3822,7 @@ void Assembler::vrintm(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=11(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0)
- DCHECK(CpuFeatures::IsSupported(ARMv8));
+ DCHECK(IsEnabled(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@@ -3636,7 +3836,7 @@ void Assembler::vrintz(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond) {
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) |
// Vd(15-12) | 101(11-9) | sz=0(8) | op=1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- DCHECK(CpuFeatures::IsSupported(ARMv8));
+ DCHECK(IsEnabled(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@@ -3650,7 +3850,7 @@ void Assembler::vrintz(const DwVfpRegister dst, const DwVfpRegister src,
const Condition cond) {
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | op=1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- DCHECK(CpuFeatures::IsSupported(ARMv8));
+ DCHECK(IsEnabled(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@@ -3668,7 +3868,7 @@ void Assembler::vld1(NeonSize size,
// Instruction details available in ARM DDI 0406C.b, A8.8.320.
// 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) |
// Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
- DCHECK(CpuFeatures::IsSupported(NEON));
+ DCHECK(IsEnabled(NEON));
int vd, d;
dst.base().split_code(&vd, &d);
emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 |
@@ -3682,7 +3882,7 @@ void Assembler::vst1(NeonSize size,
// Instruction details available in ARM DDI 0406C.b, A8.8.404.
// 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) |
// Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
- DCHECK(CpuFeatures::IsSupported(NEON));
+ DCHECK(IsEnabled(NEON));
int vd, d;
src.base().split_code(&vd, &d);
emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 |
@@ -3694,7 +3894,7 @@ void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
// Instruction details available in ARM DDI 0406C.b, A8.8.346.
// 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) |
// 000(18-16) | Vd(15-12) | 101000(11-6) | M(5) | 1(4) | Vm(3-0)
- DCHECK(CpuFeatures::IsSupported(NEON));
+ DCHECK(IsEnabled(NEON));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@@ -3703,6 +3903,29 @@ void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
(dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm);
}
+void Assembler::vswp(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
+ DCHECK(VfpRegisterIsAvailable(srcdst0));
+ DCHECK(VfpRegisterIsAvailable(srcdst1));
+ DCHECK(!srcdst0.is(kScratchDoubleReg));
+ DCHECK(!srcdst1.is(kScratchDoubleReg));
+
+ if (srcdst0.is(srcdst1)) return; // Swapping aliased registers emits nothing.
+
+ if (CpuFeatures::IsSupported(NEON)) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.418.
+ // 1111(31-28) | 00111(27-23) | D(22) | 110010(21-16) |
+ // Vd(15-12) | 000000(11-6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ srcdst0.split_code(&vd, &d);
+ int vm, m;
+ srcdst1.split_code(&vm, &m);
+ emit(0xFU * B28 | 7 * B23 | d * B22 | 0x32 * B16 | vd * B12 | m * B5 | vm);
+ } else {
+ vmov(kScratchDoubleReg, srcdst0);
+ vmov(srcdst0, srcdst1);
+ vmov(srcdst1, kScratchDoubleReg);
+ }
+}
// Pseudo instructions.
void Assembler::nop(int type) {
@@ -4208,6 +4431,7 @@ void Assembler::PatchConstantPoolAccessInstruction(
Instr instr = instr_at(pc);
if (access == ConstantPoolEntry::OVERFLOWED) {
if (CpuFeatures::IsSupported(ARMv7)) {
+ CpuFeatureScope scope(this, ARMv7);
// Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
Instr next_instr = instr_at(pc + kInstrSize);
DCHECK((IsMovW(instr) && Instruction::ImmedMovwMovtValue(instr) == 0));
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 0b9cd91733..e5448f79ae 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -1022,7 +1022,8 @@ class Assembler : public AssemblerBase {
void bkpt(uint32_t imm16); // v5 and above
void svc(uint32_t imm24, Condition cond = al);
- // Synchronization instructions
+ // Synchronization instructions.
+ // On ARMv6, an equivalent CP15 operation will be used.
void dmb(BarrierOption option);
void dsb(BarrierOption option);
void isb(BarrierOption option);
@@ -1258,6 +1259,19 @@ class Assembler : public AssemblerBase {
void vcmp(const SwVfpRegister src1, const float src2,
const Condition cond = al);
+ void vmaxnm(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2);
+ void vmaxnm(const SwVfpRegister dst,
+ const SwVfpRegister src1,
+ const SwVfpRegister src2);
+ void vminnm(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2);
+ void vminnm(const SwVfpRegister dst,
+ const SwVfpRegister src1,
+ const SwVfpRegister src2);
+
// VSEL supports cond in {eq, ne, ge, lt, gt, le, vs, vc}.
void vsel(const Condition cond,
const DwVfpRegister dst,
@@ -1289,8 +1303,8 @@ class Assembler : public AssemblerBase {
const Condition cond = al);
// Support for NEON.
- // All these APIs support D0 to D31 and Q0 to Q15.
+ // All these APIs support D0 to D31 and Q0 to Q15.
void vld1(NeonSize size,
const NeonListOperand& dst,
const NeonMemOperand& src);
@@ -1299,6 +1313,9 @@ class Assembler : public AssemblerBase {
const NeonMemOperand& dst);
void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src);
+ // Currently, vswp supports only D0 to D31.
+ void vswp(DwVfpRegister srcdst0, DwVfpRegister srcdst1);
+
// Pseudo instructions
// Different nop operations are used by the code generator to detect certain
@@ -1586,6 +1603,12 @@ class Assembler : public AssemblerBase {
(pc_offset() < no_const_pool_before_);
}
+ bool VfpRegisterIsAvailable(DwVfpRegister reg) {
+ DCHECK(reg.is_valid());
+ return IsEnabled(VFP32DREGS) ||
+ (reg.reg_code < LowDwVfpRegister::kMaxNumLowRegisters);
+ }
+
private:
int next_buffer_check_; // pc offset of next buffer check
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 264f24f8da..de6803fa6f 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -553,17 +553,14 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// 3) Fall through to both_loaded_as_doubles.
// 4) Jump to lhs_not_nan.
// In cases 3 and 4 we have found out we were dealing with a number-number
- // comparison. If VFP3 is supported the double values of the numbers have
- // been loaded into d7 and d6. Otherwise, the double values have been loaded
- // into r0, r1, r2, and r3.
+ // comparison. The double values of the numbers have been loaded into d7 (lhs)
+ // and d6 (rhs).
EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
__ bind(&both_loaded_as_doubles);
- // The arguments have been converted to doubles and stored in d6 and d7, if
- // VFP3 is supported, or in r0, r1, r2, and r3.
+ // The arguments have been converted to doubles and stored in d6 and d7.
__ bind(&lhs_not_nan);
Label no_nan;
- // ARMv7 VFP3 instructions to implement double precision comparison.
__ VFPCompareAndSetFlags(d7, d6);
Label nan;
__ b(vs, &nan);
@@ -1646,7 +1643,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// r2 : feedback vector
// r3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
- Label done_initialize_count, done_increment_count;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
@@ -1666,7 +1662,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
Register weak_value = r9;
__ ldr(weak_value, FieldMemOperand(r5, WeakCell::kValueOffset));
__ cmp(r1, weak_value);
- __ b(eq, &done_increment_count);
+ __ b(eq, &done);
__ CompareRoot(r5, Heap::kmegamorphic_symbolRootIndex);
__ b(eq, &done);
__ ldr(feedback_map, FieldMemOperand(r5, HeapObject::kMapOffset));
@@ -1689,7 +1685,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
__ cmp(r1, r5);
__ b(ne, &megamorphic);
- __ jmp(&done_increment_count);
+ __ jmp(&done);
__ bind(&miss);
@@ -1718,32 +1714,22 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub);
- __ b(&done_initialize_count);
+ __ b(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub);
- __ bind(&done_initialize_count);
- // Initialize the call counter.
- __ Move(r5, Operand(Smi::FromInt(1)));
- __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
- __ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + kPointerSize));
- __ b(&done);
-
- __ bind(&done_increment_count);
+ __ bind(&done);
- // Increment the call count for monomorphic function calls.
+ // Increment the call count for all function calls.
__ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
__ add(r5, r5, Operand(FixedArray::kHeaderSize + kPointerSize));
__ ldr(r4, FieldMemOperand(r5, 0));
__ add(r4, r4, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(r5, 0));
-
- __ bind(&done);
}
-
void CallConstructStub::Generate(MacroAssembler* masm) {
// r0 : number of arguments
// r1 : the function to call
@@ -1785,6 +1771,17 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
+// Note: feedback_vector and slot are clobbered after the call.
+static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
+ Register slot) {
+ __ add(feedback_vector, feedback_vector,
+ Operand::PointerOffsetFromSmiKey(slot));
+ __ add(feedback_vector, feedback_vector,
+ Operand(FixedArray::kHeaderSize + kPointerSize));
+ __ ldr(slot, FieldMemOperand(feedback_vector, 0));
+ __ add(slot, slot, Operand(Smi::FromInt(1)));
+ __ str(slot, FieldMemOperand(feedback_vector, 0));
+}
void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// r1 - function
@@ -1798,11 +1795,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ mov(r0, Operand(arg_count()));
// Increment the call count for monomorphic function calls.
- __ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
- __ add(r2, r2, Operand(FixedArray::kHeaderSize + kPointerSize));
- __ ldr(r3, FieldMemOperand(r2, 0));
- __ add(r3, r3, Operand(Smi::FromInt(1)));
- __ str(r3, FieldMemOperand(r2, 0));
+ IncrementCallCount(masm, r2, r3);
__ mov(r2, r4);
__ mov(r3, r1);
@@ -1815,7 +1808,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// r1 - function
// r3 - slot id (Smi)
// r2 - vector
- Label extra_checks_or_miss, call, call_function;
+ Label extra_checks_or_miss, call, call_function, call_count_incremented;
int argc = arg_count();
ParameterCount actual(argc);
@@ -1845,14 +1838,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
// convincing us that we have a monomorphic JSFunction.
__ JumpIfSmi(r1, &extra_checks_or_miss);
+ __ bind(&call_function);
+
// Increment the call count for monomorphic function calls.
- __ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
- __ add(r2, r2, Operand(FixedArray::kHeaderSize + kPointerSize));
- __ ldr(r3, FieldMemOperand(r2, 0));
- __ add(r3, r3, Operand(Smi::FromInt(1)));
- __ str(r3, FieldMemOperand(r2, 0));
+ IncrementCallCount(masm, r2, r3);
- __ bind(&call_function);
__ mov(r0, Operand(argc));
__ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
tail_call_mode()),
@@ -1893,6 +1883,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
__ bind(&call);
+
+ // Increment the call count for megamorphic function calls.
+ IncrementCallCount(masm, r2, r3);
+
+ __ bind(&call_count_incremented);
__ mov(r0, Operand(argc));
__ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET);
@@ -1919,11 +1914,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ cmp(r4, ip);
__ b(ne, &miss);
- // Initialize the call counter.
- __ Move(r5, Operand(Smi::FromInt(1)));
- __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
- __ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + kPointerSize));
-
// Store the function. Use a stub since we need a frame for allocation.
// r2 - vector
// r3 - slot
@@ -1931,9 +1921,13 @@ void CallICStub::Generate(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(masm->isolate());
+ __ Push(r2);
+ __ Push(r3);
__ Push(cp, r1);
__ CallStub(&create_stub);
__ Pop(cp, r1);
+ __ Pop(r3);
+ __ Pop(r2);
}
__ jmp(&call_function);
@@ -1943,7 +1937,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&miss);
GenerateMiss(masm);
- __ jmp(&call);
+ __ jmp(&call_count_incremented);
}
@@ -2131,291 +2125,6 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
}
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // lr: return address
- // sp[0]: to
- // sp[4]: from
- // sp[8]: string
-
- // This stub is called from the native-call %_SubString(...), so
- // nothing can be assumed about the arguments. It is tested that:
- // "string" is a sequential string,
- // both "from" and "to" are smis, and
- // 0 <= from <= to <= string.length.
- // If any of these assumptions fail, we call the runtime system.
-
- const int kToOffset = 0 * kPointerSize;
- const int kFromOffset = 1 * kPointerSize;
- const int kStringOffset = 2 * kPointerSize;
-
- __ Ldrd(r2, r3, MemOperand(sp, kToOffset));
- STATIC_ASSERT(kFromOffset == kToOffset + 4);
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
-
- // Arithmetic shift right by one un-smi-tags. In this case we rotate right
- // instead because we bail out on non-smi values: ROR and ASR are equivalent
- // for smis but they set the flags in a way that's easier to optimize.
- __ mov(r2, Operand(r2, ROR, 1), SetCC);
- __ mov(r3, Operand(r3, ROR, 1), SetCC, cc);
- // If either to or from had the smi tag bit set, then C is set now, and N
- // has the same value: we rotated by 1, so the bottom bit is now the top bit.
- // We want to bailout to runtime here if From is negative. In that case, the
- // next instruction is not executed and we fall through to bailing out to
- // runtime.
- // Executed if both r2 and r3 are untagged integers.
- __ sub(r2, r2, Operand(r3), SetCC, cc);
- // One of the above un-smis or the above SUB could have set N==1.
- __ b(mi, &runtime); // Either "from" or "to" is not an smi, or from > to.
-
- // Make sure first argument is a string.
- __ ldr(r0, MemOperand(sp, kStringOffset));
- __ JumpIfSmi(r0, &runtime);
- Condition is_string = masm->IsObjectStringType(r0, r1);
- __ b(NegateCondition(is_string), &runtime);
-
- Label single_char;
- __ cmp(r2, Operand(1));
- __ b(eq, &single_char);
-
- // Short-cut for the case of trivial substring.
- Label return_r0;
- // r0: original string
- // r2: result string length
- __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
- __ cmp(r2, Operand(r4, ASR, 1));
- // Return original string.
- __ b(eq, &return_r0);
- // Longer than original string's length or negative: unsafe arguments.
- __ b(hi, &runtime);
- // Shorter than original string's length: an actual substring.
-
- // Deal with different string types: update the index if necessary
- // and put the underlying string into r5.
- // r0: original string
- // r1: instance type
- // r2: length
- // r3: from index (untagged)
- Label underlying_unpacked, sliced_string, seq_or_external_string;
- // If the string is not indirect, it can only be sequential or external.
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
- STATIC_ASSERT(kIsIndirectStringMask != 0);
- __ tst(r1, Operand(kIsIndirectStringMask));
- __ b(eq, &seq_or_external_string);
-
- __ tst(r1, Operand(kSlicedNotConsMask));
- __ b(ne, &sliced_string);
- // Cons string. Check whether it is flat, then fetch first part.
- __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
- __ CompareRoot(r5, Heap::kempty_stringRootIndex);
- __ b(ne, &runtime);
- __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
- // Update instance type.
- __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked);
-
- __ bind(&sliced_string);
- // Sliced string. Fetch parent and correct start index by offset.
- __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
- __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset));
- __ add(r3, r3, Operand(r4, ASR, 1)); // Add offset to index.
- // Update instance type.
- __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked);
-
- __ bind(&seq_or_external_string);
- // Sequential or external string. Just move string to the expected register.
- __ mov(r5, r0);
-
- __ bind(&underlying_unpacked);
-
- if (FLAG_string_slices) {
- Label copy_routine;
- // r5: underlying subject string
- // r1: instance type of underlying subject string
- // r2: length
- // r3: adjusted start index (untagged)
- __ cmp(r2, Operand(SlicedString::kMinLength));
- // Short slice. Copy instead of slicing.
- __ b(lt, &copy_routine);
- // Allocate new sliced string. At this point we do not reload the instance
- // type including the string encoding because we simply rely on the info
- // provided by the original string. It does not matter if the original
- // string's encoding is wrong because we always have to recheck encoding of
- // the newly created string's parent anyways due to externalized strings.
- Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ tst(r1, Operand(kStringEncodingMask));
- __ b(eq, &two_byte_slice);
- __ AllocateOneByteSlicedString(r0, r2, r6, r4, &runtime);
- __ jmp(&set_slice_header);
- __ bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime);
- __ bind(&set_slice_header);
- __ mov(r3, Operand(r3, LSL, 1));
- __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
- __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
- __ jmp(&return_r0);
-
- __ bind(&copy_routine);
- }
-
- // r5: underlying subject string
- // r1: instance type of underlying subject string
- // r2: length
- // r3: adjusted start index (untagged)
- Label two_byte_sequential, sequential_string, allocate_result;
- STATIC_ASSERT(kExternalStringTag != 0);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r1, Operand(kExternalStringTag));
- __ b(eq, &sequential_string);
-
- // Handle external string.
- // Rule out short external strings.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ tst(r1, Operand(kShortExternalStringTag));
- __ b(ne, &runtime);
- __ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset));
- // r5 already points to the first character of underlying string.
- __ jmp(&allocate_result);
-
- __ bind(&sequential_string);
- // Locate first character of underlying subject string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- __ bind(&allocate_result);
- // Sequential acii string. Allocate the result.
- STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
- __ tst(r1, Operand(kStringEncodingMask));
- __ b(eq, &two_byte_sequential);
-
- // Allocate and copy the resulting one-byte string.
- __ AllocateOneByteString(r0, r2, r4, r6, r1, &runtime);
-
- // Locate first character of substring to copy.
- __ add(r5, r5, r3);
- // Locate first character of result.
- __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- // r0: result string
- // r1: first character of result string
- // r2: result string length
- // r5: first character of substring to copy
- STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharacters(
- masm, r1, r5, r2, r3, String::ONE_BYTE_ENCODING);
- __ jmp(&return_r0);
-
- // Allocate and copy the resulting two-byte string.
- __ bind(&two_byte_sequential);
- __ AllocateTwoByteString(r0, r2, r4, r6, r1, &runtime);
-
- // Locate first character of substring to copy.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ add(r5, r5, Operand(r3, LSL, 1));
- // Locate first character of result.
- __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- // r0: result string.
- // r1: first character of result.
- // r2: result length.
- // r5: first character of substring to copy.
- STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharacters(
- masm, r1, r5, r2, r3, String::TWO_BYTE_ENCODING);
-
- __ bind(&return_r0);
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
- __ Drop(3);
- __ Ret();
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString);
-
- __ bind(&single_char);
- // r0: original string
- // r1: instance type
- // r2: length
- // r3: from index (untagged)
- __ SmiTag(r3, r3);
- StringCharAtGenerator generator(r0, r3, r2, r0, &runtime, &runtime, &runtime,
- RECEIVER_IS_STRING);
- generator.GenerateFast(masm);
- __ Drop(3);
- __ Ret();
- generator.SkipSlow(masm, &runtime);
-}
-
-void ToStringStub::Generate(MacroAssembler* masm) {
- // The ToString stub takes one argument in r0.
- Label is_number;
- __ JumpIfSmi(r0, &is_number);
-
- __ CompareObjectType(r0, r1, r1, FIRST_NONSTRING_TYPE);
- // r0: receiver
- // r1: receiver instance type
- __ Ret(lo);
-
- Label not_heap_number;
- __ cmp(r1, Operand(HEAP_NUMBER_TYPE));
- __ b(ne, &not_heap_number);
- __ bind(&is_number);
- NumberToStringStub stub(isolate());
- __ TailCallStub(&stub);
- __ bind(&not_heap_number);
-
- Label not_oddball;
- __ cmp(r1, Operand(ODDBALL_TYPE));
- __ b(ne, &not_oddball);
- __ ldr(r0, FieldMemOperand(r0, Oddball::kToStringOffset));
- __ Ret();
- __ bind(&not_oddball);
-
- __ push(r0); // Push argument.
- __ TailCallRuntime(Runtime::kToString);
-}
-
-
-void ToNameStub::Generate(MacroAssembler* masm) {
- // The ToName stub takes one argument in r0.
- Label is_number;
- __ JumpIfSmi(r0, &is_number);
-
- STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
- __ CompareObjectType(r0, r1, r1, LAST_NAME_TYPE);
- // r0: receiver
- // r1: receiver instance type
- __ Ret(ls);
-
- Label not_heap_number;
- __ cmp(r1, Operand(HEAP_NUMBER_TYPE));
- __ b(ne, &not_heap_number);
- __ bind(&is_number);
- NumberToStringStub stub(isolate());
- __ TailCallStub(&stub);
- __ bind(&not_heap_number);
-
- Label not_oddball;
- __ cmp(r1, Operand(ODDBALL_TYPE));
- __ b(ne, &not_oddball);
- __ ldr(r0, FieldMemOperand(r0, Oddball::kToStringOffset));
- __ Ret();
- __ bind(&not_oddball);
-
- __ push(r0); // Push argument.
- __ TailCallRuntime(Runtime::kToName);
-}
-
-
void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {
@@ -3275,16 +2984,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental;
Label need_incremental_pop_scratch;
- __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
- __ ldr(regs_.scratch1(),
- MemOperand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset));
- __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC);
- __ str(regs_.scratch1(),
- MemOperand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset));
- __ b(mi, &need_incremental);
-
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
@@ -3712,7 +3411,7 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
__ ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
// Load the map into the correct register.
- DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
+ DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
__ mov(feedback, too_far);
__ add(pc, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -4425,7 +4124,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// Fall back to %AllocateInNewSpace (if not too big).
Label too_big_for_new_space;
__ bind(&allocate);
- __ cmp(r6, Operand(Page::kMaxRegularHeapObjectSize));
+ __ cmp(r6, Operand(kMaxRegularHeapObjectSize));
__ b(gt, &too_big_for_new_space);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
@@ -4763,7 +4462,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// Fall back to %AllocateInNewSpace (if not too big).
Label too_big_for_new_space;
__ bind(&allocate);
- __ cmp(r6, Operand(Page::kMaxRegularHeapObjectSize));
+ __ cmp(r6, Operand(kMaxRegularHeapObjectSize));
__ b(gt, &too_big_for_new_space);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 75801454e8..e63da5c766 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -39,6 +39,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
Label less_4;
if (CpuFeatures::IsSupported(NEON)) {
+ CpuFeatureScope scope(&masm, NEON);
Label loop, less_256, less_128, less_64, less_32, _16_or_less, _8_or_less;
Label size_less_than_8;
__ pld(MemOperand(src, 0));
@@ -193,6 +194,7 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
Register src = r1;
Register chars = r2;
if (CpuFeatures::IsSupported(NEON)) {
+ CpuFeatureScope scope(&masm, NEON);
Register temp = r3;
Label loop;
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index a1620516a2..2bade20fed 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -477,40 +477,42 @@ class Instruction {
*reinterpret_cast<Instr*>(this) = value;
}
- // Read one particular bit out of the instruction bits.
+ // Extract a single bit from the instruction bits and return it as bit 0 in
+ // the result.
inline int Bit(int nr) const {
return (InstructionBits() >> nr) & 1;
}
- // Read a bit field's value out of the instruction bits.
+ // Extract a bit field <hi:lo> from the instruction bits and return it in the
+ // least-significant bits of the result.
inline int Bits(int hi, int lo) const {
return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
}
- // Read a bit field out of the instruction bits.
+ // Read a bit field <hi:lo>, leaving its position unchanged in the result.
inline int BitField(int hi, int lo) const {
return InstructionBits() & (((2 << (hi - lo)) - 1) << lo);
}
// Static support.
- // Read one particular bit out of the instruction bits.
+ // Extract a single bit from the instruction bits and return it as bit 0 in
+ // the result.
static inline int Bit(Instr instr, int nr) {
return (instr >> nr) & 1;
}
- // Read the value of a bit field out of the instruction bits.
+ // Extract a bit field <hi:lo> from the instruction bits and return it in the
+ // least-significant bits of the result.
static inline int Bits(Instr instr, int hi, int lo) {
return (instr >> lo) & ((2 << (hi - lo)) - 1);
}
-
- // Read a bit field out of the instruction bits.
+ // Read a bit field <hi:lo>, leaving its position unchanged in the result.
static inline int BitField(Instr instr, int hi, int lo) {
return instr & (((2 << (hi - lo)) - 1) << lo);
}
-
// Accessors for the different named fields used in the ARM encoding.
// The naming of these accessor corresponds to figure A3-1.
//
@@ -525,13 +527,11 @@ class Instruction {
// Generally applicable fields
- inline Condition ConditionValue() const {
- return static_cast<Condition>(Bits(31, 28));
- }
+ inline int ConditionValue() const { return Bits(31, 28); }
inline Condition ConditionField() const {
return static_cast<Condition>(BitField(31, 28));
}
- DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionValue);
+ DECLARE_STATIC_TYPED_ACCESSOR(int, ConditionValue);
DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionField);
inline int TypeValue() const { return Bits(27, 25); }
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index c569e6615b..e49fed97c8 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -119,14 +119,20 @@ void Deoptimizer::TableEntryGenerator::Generate() {
DCHECK(kDoubleRegZero.code() == 14);
DCHECK(kScratchDoubleReg.code() == 15);
- // Check CPU flags for number of registers, setting the Z condition flag.
- __ CheckFor32DRegs(ip);
-
- // Push registers d0-d15, and possibly d16-d31, on the stack.
- // If d16-d31 are not pushed, decrease the stack pointer instead.
- __ vstm(db_w, sp, d16, d31, ne);
- __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
- __ vstm(db_w, sp, d0, d15);
+ {
+ // We use a run-time check for VFP32DREGS.
+ CpuFeatureScope scope(masm(), VFP32DREGS,
+ CpuFeatureScope::kDontCheckSupported);
+
+ // Check CPU flags for number of registers, setting the Z condition flag.
+ __ CheckFor32DRegs(ip);
+
+ // Push registers d0-d15, and possibly d16-d31, on the stack.
+ // If d16-d31 are not pushed, decrease the stack pointer instead.
+ __ vstm(db_w, sp, d16, d31, ne);
+ __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
+ __ vstm(db_w, sp, d0, d15);
+ }
// Push all 16 registers (needed to populate FrameDescription::registers_).
// TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
@@ -259,9 +265,6 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ cmp(r4, r1);
__ b(lt, &outer_push_loop);
- // Check CPU flags for number of registers, setting the Z condition flag.
- __ CheckFor32DRegs(ip);
-
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 1e1c75d8b8..e408e85da3 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -105,6 +105,8 @@ class Decoder {
void DecodeType6(Instruction* instr);
// Type 7 includes special Debugger instructions.
int DecodeType7(Instruction* instr);
+ // CP15 coprocessor instructions.
+ void DecodeTypeCP15(Instruction* instr);
// For VFP support.
void DecodeTypeVFP(Instruction* instr);
void DecodeType6CoprocessorIns(Instruction* instr);
@@ -1279,18 +1281,16 @@ void Decoder::DecodeType3(Instruction* instr) {
break;
}
}
- if (FLAG_enable_sudiv) {
- if (instr->Bits(5, 4) == 0x1) {
- if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
- if (instr->Bit(21) == 0x1) {
- // UDIV (in V8 notation matching ARM ISA format) rn = rm/rs
- Format(instr, "udiv'cond'b 'rn, 'rm, 'rs");
- } else {
- // SDIV (in V8 notation matching ARM ISA format) rn = rm/rs
- Format(instr, "sdiv'cond'b 'rn, 'rm, 'rs");
- }
- break;
+ if (instr->Bits(5, 4) == 0x1) {
+ if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
+ if (instr->Bit(21) == 0x1) {
+ // UDIV (in V8 notation matching ARM ISA format) rn = rm/rs
+ Format(instr, "udiv'cond'b 'rn, 'rm, 'rs");
+ } else {
+ // SDIV (in V8 notation matching ARM ISA format) rn = rm/rs
+ Format(instr, "sdiv'cond'b 'rn, 'rm, 'rs");
}
+ break;
}
}
Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
@@ -1374,7 +1374,18 @@ int Decoder::DecodeType7(Instruction* instr) {
Format(instr, "svc'cond 'svc");
}
} else {
- DecodeTypeVFP(instr);
+ switch (instr->CoprocessorValue()) {
+ case 10: // Fall through.
+ case 11:
+ DecodeTypeVFP(instr);
+ break;
+ case 15:
+ DecodeTypeCP15(instr);
+ break;
+ default:
+ Unknown(instr);
+ break;
+ }
}
return Instruction::kInstrSize;
}
@@ -1556,6 +1567,34 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
}
}
+void Decoder::DecodeTypeCP15(Instruction* instr) {
+ VERIFY((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0));
+ VERIFY(instr->CoprocessorValue() == 15);
+
+ if (instr->Bit(4) == 1) {
+ int crn = instr->Bits(19, 16);
+ int crm = instr->Bits(3, 0);
+ int opc1 = instr->Bits(23, 21);
+ int opc2 = instr->Bits(7, 5);
+ if ((opc1 == 0) && (crn == 7)) {
+ // ARMv6 memory barrier operations.
+ // Details available in ARM DDI 0406C.b, B3-1750.
+ if ((crm == 10) && (opc2 == 5)) {
+ Format(instr, "mcr'cond (CP15DMB)");
+ } else if ((crm == 10) && (opc2 == 4)) {
+ Format(instr, "mcr'cond (CP15DSB)");
+ } else if ((crm == 5) && (opc2 == 4)) {
+ Format(instr, "mcr'cond (CP15ISB)");
+ } else {
+ Unknown(instr);
+ }
+ } else {
+ Unknown(instr);
+ }
+ } else {
+ Unknown(instr);
+ }
+}
void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
Instruction* instr) {
@@ -1786,6 +1825,13 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int imm3 = instr->Bits(21, 19);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmovl.u%d q%d, d%d", imm3*8, Vd, Vm);
+ } else if ((instr->Bits(21, 16) == 0x32) && (instr->Bits(11, 7) == 0) &&
+ (instr->Bit(4) == 0)) {
+ int Vd = instr->VFPDRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ char rtype = (instr->Bit(6) == 0) ? 'd' : 'q';
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vswp %c%d, %c%d", rtype, Vd, rtype, Vm);
} else {
Unknown(instr);
}
@@ -1898,6 +1944,22 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
UNREACHABLE(); // Case analysis is exhaustive.
break;
}
+ } else if ((instr->Opc1Value() == 0x4) && (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(4) == 0x0)) {
+ // VMAXNM, VMINNM (floating-point)
+ if (instr->SzValue() == 0x1) {
+ if (instr->Bit(6) == 0x1) {
+ Format(instr, "vminnm.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vmaxnm.f64 'Dd, 'Dn, 'Dm");
+ }
+ } else {
+ if (instr->Bit(6) == 0x1) {
+ Format(instr, "vminnm.f32 'Sd, 'Sn, 'Sm");
+ } else {
+ Format(instr, "vmaxnm.f32 'Sd, 'Sn, 'Sm");
+ }
+ }
} else {
Unknown(instr);
}
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index d26804a4ff..a002b8d44a 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -42,13 +42,9 @@ const Register StoreDescriptor::SlotRegister() { return r4; }
const Register StoreWithVectorDescriptor::VectorRegister() { return r3; }
-const Register VectorStoreTransitionDescriptor::SlotRegister() { return r4; }
-const Register VectorStoreTransitionDescriptor::VectorRegister() { return r3; }
-const Register VectorStoreTransitionDescriptor::MapRegister() { return r5; }
-
-
-const Register StoreTransitionDescriptor::MapRegister() { return r3; }
-
+const Register StoreTransitionDescriptor::SlotRegister() { return r4; }
+const Register StoreTransitionDescriptor::VectorRegister() { return r3; }
+const Register StoreTransitionDescriptor::MapRegister() { return r5; }
const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r2; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r0; }
@@ -375,7 +371,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
&default_descriptor);
}
-void ApiCallbackDescriptorBase::InitializePlatformSpecific(
+void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
@@ -414,7 +410,19 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
r0, // argument count (not including receiver)
r3, // new target
r1, // constructor to call
- r2 // address of the first argument
+ r2, // allocation site feedback if available, undefined otherwise
+ r4 // address of the first argument
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterPushArgsAndConstructArrayDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r0, // argument count (not including receiver)
+ r1, // target to call checked to be Array function
+ r2, // allocation site feedback if available, undefined otherwise
+ r3 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index a08673d462..00f8ab5cf5 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -250,15 +250,17 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) {
}
}
-void MacroAssembler::Move(SwVfpRegister dst, SwVfpRegister src) {
+void MacroAssembler::Move(SwVfpRegister dst, SwVfpRegister src,
+ Condition cond) {
if (!dst.is(src)) {
- vmov(dst, src);
+ vmov(dst, src, cond);
}
}
-void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
+void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src,
+ Condition cond) {
if (!dst.is(src)) {
- vmov(dst, src);
+ vmov(dst, src, cond);
}
}
@@ -285,6 +287,7 @@ void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
!src2.must_output_reloc_info(this) &&
CpuFeatures::IsSupported(ARMv7) &&
base::bits::IsPowerOfTwo32(src2.immediate() + 1)) {
+ CpuFeatureScope scope(this, ARMv7);
ubfx(dst, src1, 0,
WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
} else {
@@ -303,6 +306,7 @@ void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
}
} else {
+ CpuFeatureScope scope(this, ARMv7);
ubfx(dst, src1, lsb, width, cond);
}
}
@@ -323,6 +327,7 @@ void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
}
} else {
+ CpuFeatureScope scope(this, ARMv7);
sbfx(dst, src1, lsb, width, cond);
}
}
@@ -346,6 +351,7 @@ void MacroAssembler::Bfi(Register dst,
mov(scratch, Operand(scratch, LSL, lsb));
orr(dst, dst, scratch);
} else {
+ CpuFeatureScope scope(this, ARMv7);
bfi(dst, src, lsb, width, cond);
}
}
@@ -358,6 +364,7 @@ void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, src, Operand(mask));
} else {
+ CpuFeatureScope scope(this, ARMv7);
Move(dst, src, cond);
bfc(dst, lsb, width, cond);
}
@@ -404,15 +411,6 @@ void MacroAssembler::Store(Register src,
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond) {
- if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
- isolate()->heap()->RootCanBeTreatedAsConstant(index) &&
- !predictable_code_size()) {
- // The CPU supports fast immediate values, and this root will never
- // change. We will load it as a relocatable immediate value.
- Handle<Object> root = isolate()->heap()->root_handle(index);
- mov(destination, Operand(root), LeaveCC, cond);
- return;
- }
ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
}
@@ -430,9 +428,7 @@ void MacroAssembler::InNewSpace(Register object,
Condition cond,
Label* branch) {
DCHECK(cond == eq || cond == ne);
- const int mask =
- (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
- CheckPageFlag(object, scratch, mask, cond, branch);
+ CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cond, branch);
}
@@ -1054,6 +1050,7 @@ void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
vmov(dst, VmovIndexLo, src);
}
}
+
void MacroAssembler::LslPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
@@ -1971,7 +1968,7 @@ void MacroAssembler::Allocate(int object_size,
Register scratch2,
Label* gc_required,
AllocationFlags flags) {
- DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(object_size <= kMaxRegularHeapObjectSize);
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
@@ -2049,7 +2046,6 @@ void MacroAssembler::Allocate(int object_size,
// point, so we cannot just use add().
DCHECK(object_size > 0);
Register source = result;
- Condition cond = al;
int shift = 0;
while (object_size != 0) {
if (((object_size >> shift) & 0x03) == 0) {
@@ -2060,9 +2056,8 @@ void MacroAssembler::Allocate(int object_size,
shift += 8;
Operand bits_operand(bits);
DCHECK(bits_operand.instructions_required(this) == 1);
- add(result_end, source, bits_operand, LeaveCC, cond);
+ add(result_end, source, bits_operand);
source = result_end;
- cond = cc;
}
}
@@ -2226,7 +2221,7 @@ void MacroAssembler::FastAllocate(Register object_size, Register result,
void MacroAssembler::FastAllocate(int object_size, Register result,
Register scratch1, Register scratch2,
AllocationFlags flags) {
- DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(object_size <= kMaxRegularHeapObjectSize);
DCHECK(!AreAliased(result, scratch1, scratch2, ip));
// Make object size into bytes.
@@ -2261,7 +2256,6 @@ void MacroAssembler::FastAllocate(int object_size, Register result,
// this point, so we cannot just use add().
DCHECK(object_size > 0);
Register source = result;
- Condition cond = al;
int shift = 0;
while (object_size != 0) {
if (((object_size >> shift) & 0x03) == 0) {
@@ -2272,9 +2266,8 @@ void MacroAssembler::FastAllocate(int object_size, Register result,
shift += 8;
Operand bits_operand(bits);
DCHECK(bits_operand.instructions_required(this) == 1);
- add(result_end, source, bits_operand, LeaveCC, cond);
+ add(result_end, source, bits_operand);
source = result_end;
- cond = cc;
}
}
@@ -2650,7 +2643,8 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFPv3)) {
+ CpuFeatureScope scope(this, VFPv3);
vmov(value.low(), smi);
vcvt_f64_s32(value, 1);
} else {
@@ -2807,6 +2801,7 @@ void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src,
int num_least_bits) {
if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
+ CpuFeatureScope scope(this, ARMv7);
ubfx(dst, src, kSmiTagSize, num_least_bits);
} else {
SmiUntag(dst, src);
@@ -3416,6 +3411,7 @@ void MacroAssembler::CheckFor32DRegs(Register scratch) {
void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
+ CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
CheckFor32DRegs(scratch);
vstm(db_w, location, d16, d31, ne);
sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
@@ -3424,12 +3420,151 @@ void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
+ CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
CheckFor32DRegs(scratch);
vldm(ia_w, location, d0, d15);
vldm(ia_w, location, d16, d31, ne);
add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
}
+template <typename T>
+void MacroAssembler::FloatMaxHelper(T result, T left, T right,
+ Label* out_of_line) {
+ // This trivial case is caught sooner, so that the out-of-line code can be
+ // completely avoided.
+ DCHECK(!left.is(right));
+
+ if (CpuFeatures::IsSupported(ARMv8)) {
+ CpuFeatureScope scope(this, ARMv8);
+ VFPCompareAndSetFlags(left, right);
+ b(vs, out_of_line);
+ vmaxnm(result, left, right);
+ } else {
+ Label done;
+ VFPCompareAndSetFlags(left, right);
+ b(vs, out_of_line);
+ // Avoid a conditional instruction if the result register is unique.
+ bool aliased_result_reg = result.is(left) || result.is(right);
+ Move(result, right, aliased_result_reg ? mi : al);
+ Move(result, left, gt);
+ b(ne, &done);
+ // Left and right are equal, but check for +/-0.
+ VFPCompareAndSetFlags(left, 0.0);
+ b(eq, out_of_line);
+ // The arguments are equal and not zero, so it doesn't matter which input we
+ // pick. We have already moved one input into the result (if it didn't
+ // already alias) so there's nothing more to do.
+ bind(&done);
+ }
+}
+
+template <typename T>
+void MacroAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) {
+ DCHECK(!left.is(right));
+
+ // ARMv8: At least one of left and right is a NaN.
+ // Anything else: At least one of left and right is a NaN, or both left and
+ // right are zeroes with unknown sign.
+
+ // If left and right are +/-0, select the one with the most positive sign.
+ // If left or right are NaN, vadd propagates the appropriate one.
+ vadd(result, left, right);
+}
+
+template <typename T>
+void MacroAssembler::FloatMinHelper(T result, T left, T right,
+ Label* out_of_line) {
+ // This trivial case is caught sooner, so that the out-of-line code can be
+ // completely avoided.
+ DCHECK(!left.is(right));
+
+ if (CpuFeatures::IsSupported(ARMv8)) {
+ CpuFeatureScope scope(this, ARMv8);
+ VFPCompareAndSetFlags(left, right);
+ b(vs, out_of_line);
+ vminnm(result, left, right);
+ } else {
+ Label done;
+ VFPCompareAndSetFlags(left, right);
+ b(vs, out_of_line);
+ // Avoid a conditional instruction if the result register is unique.
+ bool aliased_result_reg = result.is(left) || result.is(right);
+ Move(result, left, aliased_result_reg ? mi : al);
+ Move(result, right, gt);
+ b(ne, &done);
+ // Left and right are equal, but check for +/-0.
+ VFPCompareAndSetFlags(left, 0.0);
+ // If the arguments are equal and not zero, it doesn't matter which input we
+ // pick. We have already moved one input into the result (if it didn't
+ // already alias) so there's nothing more to do.
+ b(ne, &done);
+ // At this point, both left and right are either 0 or -0.
+ // We could use a single 'vorr' instruction here if we had NEON support.
+ // The algorithm used is -((-L) + (-R)), which is most efficiently expressed
+ // as -((-L) - R).
+ if (left.is(result)) {
+ DCHECK(!right.is(result));
+ vneg(result, left);
+ vsub(result, result, right);
+ vneg(result, result);
+ } else {
+ DCHECK(!left.is(result));
+ vneg(result, right);
+ vsub(result, result, left);
+ vneg(result, result);
+ }
+ bind(&done);
+ }
+}
+
+template <typename T>
+void MacroAssembler::FloatMinOutOfLineHelper(T result, T left, T right) {
+ DCHECK(!left.is(right));
+
+ // At least one of left and right is a NaN. Use vadd to propagate the NaN
+ // appropriately. +/-0 is handled inline.
+ vadd(result, left, right);
+}
+
+void MacroAssembler::FloatMax(SwVfpRegister result, SwVfpRegister left,
+ SwVfpRegister right, Label* out_of_line) {
+ FloatMaxHelper(result, left, right, out_of_line);
+}
+
+void MacroAssembler::FloatMin(SwVfpRegister result, SwVfpRegister left,
+ SwVfpRegister right, Label* out_of_line) {
+ FloatMinHelper(result, left, right, out_of_line);
+}
+
+void MacroAssembler::FloatMax(DwVfpRegister result, DwVfpRegister left,
+ DwVfpRegister right, Label* out_of_line) {
+ FloatMaxHelper(result, left, right, out_of_line);
+}
+
+void MacroAssembler::FloatMin(DwVfpRegister result, DwVfpRegister left,
+ DwVfpRegister right, Label* out_of_line) {
+ FloatMinHelper(result, left, right, out_of_line);
+}
+
+void MacroAssembler::FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left,
+ SwVfpRegister right) {
+ FloatMaxOutOfLineHelper(result, left, right);
+}
+
+void MacroAssembler::FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left,
+ SwVfpRegister right) {
+ FloatMinOutOfLineHelper(result, left, right);
+}
+
+void MacroAssembler::FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left,
+ DwVfpRegister right) {
+ FloatMaxOutOfLineHelper(result, left, right);
+}
+
+void MacroAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
+ DwVfpRegister right) {
+ FloatMinOutOfLineHelper(result, left, right);
+}
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
Register first, Register second, Register scratch1, Register scratch2,
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 2f1b3c2cae..d524d84674 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -123,6 +123,18 @@ class MacroAssembler: public Assembler {
void CallDeoptimizer(Address target);
static int CallDeoptimizerSize();
+ // Emit code that loads |parameter_index|'th parameter from the stack to
+ // the register according to the CallInterfaceDescriptor definition.
+ // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
+ // below the caller's sp.
+ template <class Descriptor>
+ void LoadParameterFromStack(
+ Register reg, typename Descriptor::ParameterIndices parameter_index,
+ int sp_to_ra_offset_in_words = 0) {
+ DCHECK(Descriptor::kPassLastArgsOnStack);
+ UNIMPLEMENTED();
+ }
+
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Drop(int count, Condition cond = al);
@@ -170,8 +182,8 @@ class MacroAssembler: public Assembler {
mov(dst, src, sbit, cond);
}
}
- void Move(SwVfpRegister dst, SwVfpRegister src);
- void Move(DwVfpRegister dst, DwVfpRegister src);
+ void Move(SwVfpRegister dst, SwVfpRegister src, Condition cond = al);
+ void Move(DwVfpRegister dst, DwVfpRegister src, Condition cond = al);
void Load(Register dst, const MemOperand& src, Representation r);
void Store(Register src, const MemOperand& dst, Representation r);
@@ -1082,6 +1094,32 @@ class MacroAssembler: public Assembler {
// values to location, restoring [d0..(d15|d31)].
void RestoreFPRegs(Register location, Register scratch);
+ // Perform a floating-point min or max operation with the
+ // (IEEE-754-compatible) semantics of ARM64's fmin/fmax. Some cases, typically
+ // NaNs or +/-0.0, are expected to be rare and are handled in out-of-line
+ // code. The specific behaviour depends on supported instructions.
+ //
+ // These functions assume (and assert) that !left.is(right). It is permitted
+ // for the result to alias either input register.
+ void FloatMax(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right,
+ Label* out_of_line);
+ void FloatMin(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right,
+ Label* out_of_line);
+ void FloatMax(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right,
+ Label* out_of_line);
+ void FloatMin(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right,
+ Label* out_of_line);
+
+ // Generate out-of-line cases for the macros above.
+ void FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left,
+ SwVfpRegister right);
+ void FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left,
+ SwVfpRegister right);
+ void FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left,
+ DwVfpRegister right);
+ void FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
+ DwVfpRegister right);
+
// ---------------------------------------------------------------------------
// Runtime calls
@@ -1513,6 +1551,16 @@ class MacroAssembler: public Assembler {
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
+ // Implementation helpers for FloatMin and FloatMax.
+ template <typename T>
+ void FloatMaxHelper(T result, T left, T right, Label* out_of_line);
+ template <typename T>
+ void FloatMinHelper(T result, T left, T right, Label* out_of_line);
+ template <typename T>
+ void FloatMaxOutOfLineHelper(T result, T left, T right);
+ template <typename T>
+ void FloatMinOutOfLineHelper(T result, T left, T right);
+
bool generating_stub_;
bool has_frame_;
// This handle will be patched with the code object on installation.
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index cfcc5b16c5..331a7e9dfd 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -575,8 +575,8 @@ void Simulator::set_last_debugger_input(char* input) {
last_debugger_input_ = input;
}
-void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
- size_t size) {
+void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
+ void* start_addr, size_t size) {
intptr_t start = reinterpret_cast<intptr_t>(start_addr);
int intra_line = (start & CachePage::kLineMask);
start -= intra_line;
@@ -596,7 +596,8 @@ void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
}
}
-CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
+CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
+ void* page) {
base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
if (entry->value == NULL) {
CachePage* new_page = new CachePage();
@@ -607,7 +608,8 @@ CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
// Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start, int size) {
+void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache,
+ intptr_t start, int size) {
DCHECK(size <= CachePage::kPageSize);
DCHECK(AllOnOnePage(start, size - 1));
DCHECK((start & CachePage::kLineMask) == 0);
@@ -619,7 +621,8 @@ void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start, int size) {
memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
}
-void Simulator::CheckICache(base::HashMap* i_cache, Instruction* instr) {
+void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
+ Instruction* instr) {
intptr_t address = reinterpret_cast<intptr_t>(instr);
void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@@ -652,7 +655,7 @@ void Simulator::Initialize(Isolate* isolate) {
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
if (i_cache_ == NULL) {
- i_cache_ = new base::HashMap(&ICacheMatch);
+ i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
Initialize(isolate);
@@ -783,7 +786,8 @@ class Redirection {
// static
-void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
+void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
+ Redirection* first) {
Redirection::DeleteChain(first);
if (i_cache != nullptr) {
for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
@@ -2886,26 +2890,24 @@ void Simulator::DecodeType3(Instruction* instr) {
return;
}
}
- if (FLAG_enable_sudiv) {
- if (instr->Bits(5, 4) == 0x1) {
- if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
- // (s/u)div (in V8 notation matching ARM ISA format) rn = rm/rs
- // Format(instr, "'(s/u)div'cond'b 'rn, 'rm, 'rs);
- int rm = instr->RmValue();
- int32_t rm_val = get_register(rm);
- int rs = instr->RsValue();
- int32_t rs_val = get_register(rs);
- int32_t ret_val = 0;
- // udiv
- if (instr->Bit(21) == 0x1) {
- ret_val = bit_cast<int32_t>(base::bits::UnsignedDiv32(
- bit_cast<uint32_t>(rm_val), bit_cast<uint32_t>(rs_val)));
- } else {
- ret_val = base::bits::SignedDiv32(rm_val, rs_val);
- }
- set_register(rn, ret_val);
- return;
+ if (instr->Bits(5, 4) == 0x1) {
+ if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
+ // (s/u)div (in V8 notation matching ARM ISA format) rn = rm/rs
+ // Format(instr, "'(s/u)div'cond'b 'rn, 'rm, 'rs);
+ int rm = instr->RmValue();
+ int32_t rm_val = get_register(rm);
+ int rs = instr->RsValue();
+ int32_t rs_val = get_register(rs);
+ int32_t ret_val = 0;
+ // udiv
+ if (instr->Bit(21) == 0x1) {
+ ret_val = bit_cast<int32_t>(base::bits::UnsignedDiv32(
+ bit_cast<uint32_t>(rm_val), bit_cast<uint32_t>(rs_val)));
+ } else {
+ ret_val = base::bits::SignedDiv32(rm_val, rs_val);
}
+ set_register(rn, ret_val);
+ return;
}
}
// Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
@@ -3026,7 +3028,17 @@ void Simulator::DecodeType7(Instruction* instr) {
if (instr->Bit(24) == 1) {
SoftwareInterrupt(instr);
} else {
- DecodeTypeVFP(instr);
+ switch (instr->CoprocessorValue()) {
+ case 10: // Fall through.
+ case 11:
+ DecodeTypeVFP(instr);
+ break;
+ case 15:
+ DecodeTypeCP15(instr);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
}
}
@@ -3335,6 +3347,31 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
}
}
+void Simulator::DecodeTypeCP15(Instruction* instr) {
+ DCHECK((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0));
+ DCHECK(instr->CoprocessorValue() == 15);
+
+ if (instr->Bit(4) == 1) {
+ // mcr
+ int crn = instr->Bits(19, 16);
+ int crm = instr->Bits(3, 0);
+ int opc1 = instr->Bits(23, 21);
+ int opc2 = instr->Bits(7, 5);
+ if ((opc1 == 0) && (crn == 7)) {
+ // ARMv6 memory barrier operations.
+ // Details available in ARM DDI 0406C.b, B3-1750.
+ if (((crm == 10) && (opc2 == 5)) || // CP15DMB
+ ((crm == 10) && (opc2 == 4)) || // CP15DSB
+ ((crm == 5) && (opc2 == 4))) { // CP15ISB
+ // These are ignored by the simulator for now.
+ } else {
+ UNIMPLEMENTED();
+ }
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+}
void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
Instruction* instr) {
@@ -3750,6 +3787,21 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
e++;
}
set_q_register(Vd, reinterpret_cast<uint64_t*>(to));
+ } else if ((instr->Bits(21, 16) == 0x32) && (instr->Bits(11, 7) == 0) &&
+ (instr->Bit(4) == 0)) {
+ int vd = instr->VFPDRegValue(kDoublePrecision);
+ int vm = instr->VFPMRegValue(kDoublePrecision);
+ if (instr->Bit(6) == 0) {
+ // vswp Dd, Dm.
+ uint64_t dval, mval;
+ get_d_register(vd, &dval);
+ get_d_register(vm, &mval);
+ set_d_register(vm, &dval);
+ set_d_register(vd, &mval);
+ } else {
+ // Q register vswp unimplemented.
+ UNIMPLEMENTED();
+ }
} else {
UNIMPLEMENTED();
}
@@ -3848,6 +3900,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
} else if (instr->SpecialValue() == 0xA && instr->Bits(22, 20) == 7) {
// dsb, dmb, isb: ignore instruction for now.
// TODO(binji): implement
+ // Also refer to the ARMv6 CP15 equivalents in DecodeTypeCP15.
} else {
UNIMPLEMENTED();
}
@@ -3908,6 +3961,69 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
}
+ } else if ((instr->Opc1Value() == 0x4) && (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(4) == 0x0)) {
+ if (instr->SzValue() == 0x1) {
+ int m = instr->VFPMRegValue(kDoublePrecision);
+ int n = instr->VFPNRegValue(kDoublePrecision);
+ int d = instr->VFPDRegValue(kDoublePrecision);
+ double dn_value = get_double_from_d_register(n);
+ double dm_value = get_double_from_d_register(m);
+ double dd_value;
+ if (instr->Bit(6) == 0x1) { // vminnm
+ if ((dn_value < dm_value) || std::isnan(dm_value)) {
+ dd_value = dn_value;
+ } else if ((dm_value < dn_value) || std::isnan(dn_value)) {
+ dd_value = dm_value;
+ } else {
+ DCHECK_EQ(dn_value, dm_value);
+ // Make sure that we pick the most negative sign for +/-0.
+ dd_value = std::signbit(dn_value) ? dn_value : dm_value;
+ }
+ } else { // vmaxnm
+ if ((dn_value > dm_value) || std::isnan(dm_value)) {
+ dd_value = dn_value;
+ } else if ((dm_value > dn_value) || std::isnan(dn_value)) {
+ dd_value = dm_value;
+ } else {
+ DCHECK_EQ(dn_value, dm_value);
+ // Make sure that we pick the most positive sign for +/-0.
+ dd_value = std::signbit(dn_value) ? dm_value : dn_value;
+ }
+ }
+ dd_value = canonicalizeNaN(dd_value);
+ set_d_register_from_double(d, dd_value);
+ } else {
+ int m = instr->VFPMRegValue(kSinglePrecision);
+ int n = instr->VFPNRegValue(kSinglePrecision);
+ int d = instr->VFPDRegValue(kSinglePrecision);
+ float sn_value = get_float_from_s_register(n);
+ float sm_value = get_float_from_s_register(m);
+ float sd_value;
+ if (instr->Bit(6) == 0x1) { // vminnm
+ if ((sn_value < sm_value) || std::isnan(sm_value)) {
+ sd_value = sn_value;
+ } else if ((sm_value < sn_value) || std::isnan(sn_value)) {
+ sd_value = sm_value;
+ } else {
+ DCHECK_EQ(sn_value, sm_value);
+ // Make sure that we pick the most negative sign for +/-0.
+ sd_value = std::signbit(sn_value) ? sn_value : sm_value;
+ }
+ } else { // vmaxnm
+ if ((sn_value > sm_value) || std::isnan(sm_value)) {
+ sd_value = sn_value;
+ } else if ((sm_value > sn_value) || std::isnan(sn_value)) {
+ sd_value = sm_value;
+ } else {
+ DCHECK_EQ(sn_value, sm_value);
+ // Make sure that we pick the most positive sign for +/-0.
+ sd_value = std::signbit(sn_value) ? sm_value : sn_value;
+ }
+ }
+ sd_value = canonicalizeNaN(sd_value);
+ set_s_register_from_float(d, sd_value);
+ }
} else {
UNIMPLEMENTED();
}
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 71b8e40862..7435b77255 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -200,7 +200,7 @@ class Simulator {
// Call on program start.
static void Initialize(Isolate* isolate);
- static void TearDown(base::HashMap* i_cache, Redirection* first);
+ static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
@@ -222,7 +222,8 @@ class Simulator {
char* last_debugger_input() { return last_debugger_input_; }
// ICache checking.
- static void FlushICache(base::HashMap* i_cache, void* start, size_t size);
+ static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
+ size_t size);
// Returns true if pc register contains one of the 'special_values' defined
// below (bad_lr, end_sim_pc).
@@ -327,6 +328,9 @@ class Simulator {
void DecodeType6(Instruction* instr);
void DecodeType7(Instruction* instr);
+ // CP15 coprocessor instructions.
+ void DecodeTypeCP15(Instruction* instr);
+
// Support for VFP.
void DecodeTypeVFP(Instruction* instr);
void DecodeType6CoprocessorIns(Instruction* instr);
@@ -341,9 +345,12 @@ class Simulator {
void InstructionDecode(Instruction* instr);
// ICache.
- static void CheckICache(base::HashMap* i_cache, Instruction* instr);
- static void FlushOnePage(base::HashMap* i_cache, intptr_t start, int size);
- static CachePage* GetCachePage(base::HashMap* i_cache, void* page);
+ static void CheckICache(base::CustomMatcherHashMap* i_cache,
+ Instruction* instr);
+ static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start,
+ int size);
+ static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
+ void* page);
// Runtime call support.
static void* RedirectExternalReference(
@@ -403,7 +410,7 @@ class Simulator {
char* last_debugger_input_;
// Icache simulation
- base::HashMap* i_cache_;
+ base::CustomMatcherHashMap* i_cache_;
// Registered breakpoints.
Instruction* break_pc_;
diff --git a/deps/v8/src/arm64/OWNERS b/deps/v8/src/arm64/OWNERS
deleted file mode 100644
index 906a5ce641..0000000000
--- a/deps/v8/src/arm64/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-rmcilroy@chromium.org
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index 5f103bc20c..ca5ea8035a 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -1089,6 +1089,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Ldr(cp, MemOperand(cp));
__ Mov(jssp, Operand(pending_handler_sp_address));
__ Ldr(jssp, MemOperand(jssp));
+ __ Mov(csp, jssp);
__ Mov(fp, Operand(pending_handler_fp_address));
__ Ldr(fp, MemOperand(fp));
@@ -1845,7 +1846,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
// feedback_vector : the feedback vector
// index : slot in feedback vector (smi)
Label initialize, done, miss, megamorphic, not_array_function;
- Label done_initialize_count, done_increment_count;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
@@ -1868,7 +1868,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
Label check_allocation_site;
__ Ldr(feedback_value, FieldMemOperand(feedback, WeakCell::kValueOffset));
__ Cmp(function, feedback_value);
- __ B(eq, &done_increment_count);
+ __ B(eq, &done);
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ B(eq, &done);
__ Ldr(feedback_map, FieldMemOperand(feedback, HeapObject::kMapOffset));
@@ -1890,7 +1890,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
__ Cmp(function, scratch1);
__ B(ne, &megamorphic);
- __ B(&done_increment_count);
+ __ B(&done);
__ Bind(&miss);
@@ -1921,33 +1921,22 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub, argc, function,
feedback_vector, index, new_target);
- __ B(&done_initialize_count);
+ __ B(&done);
__ Bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub, argc, function,
feedback_vector, index, new_target);
- __ bind(&done_initialize_count);
- // Initialize the call counter.
- __ Mov(scratch1, Operand(Smi::FromInt(1)));
- __ Adds(scratch2, feedback_vector,
- Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ Str(scratch1,
- FieldMemOperand(scratch2, FixedArray::kHeaderSize + kPointerSize));
- __ b(&done);
-
- __ bind(&done_increment_count);
+ __ Bind(&done);
- // Increment the call count for monomorphic function calls.
+ // Increment the call count for all function calls.
__ Add(scratch1, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ Add(scratch1, scratch1, Operand(FixedArray::kHeaderSize + kPointerSize));
__ Ldr(scratch2, FieldMemOperand(scratch1, 0));
__ Add(scratch2, scratch2, Operand(Smi::FromInt(1)));
__ Str(scratch2, FieldMemOperand(scratch1, 0));
-
- __ Bind(&done);
}
@@ -1995,6 +1984,17 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
+// Note: feedback_vector and slot are clobbered after the call.
+static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
+ Register slot) {
+ __ Add(feedback_vector, feedback_vector,
+ Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
+ __ Add(feedback_vector, feedback_vector,
+ Operand(FixedArray::kHeaderSize + kPointerSize));
+ __ Ldr(slot, FieldMemOperand(feedback_vector, 0));
+ __ Add(slot, slot, Operand(Smi::FromInt(1)));
+ __ Str(slot, FieldMemOperand(feedback_vector, 0));
+}
void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// x1 - function
@@ -2014,13 +2014,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ Mov(x0, Operand(arg_count()));
// Increment the call count for monomorphic function calls.
- __ Add(feedback_vector, feedback_vector,
- Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ Add(feedback_vector, feedback_vector,
- Operand(FixedArray::kHeaderSize + kPointerSize));
- __ Ldr(index, FieldMemOperand(feedback_vector, 0));
- __ Add(index, index, Operand(Smi::FromInt(1)));
- __ Str(index, FieldMemOperand(feedback_vector, 0));
+ IncrementCallCount(masm, feedback_vector, index);
// Set up arguments for the array constructor stub.
Register allocation_site_arg = feedback_vector;
@@ -2038,7 +2032,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// x1 - function
// x3 - slot id (Smi)
// x2 - vector
- Label extra_checks_or_miss, call, call_function;
+ Label extra_checks_or_miss, call, call_function, call_count_incremented;
int argc = arg_count();
ParameterCount actual(argc);
@@ -2073,16 +2067,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
// convincing us that we have a monomorphic JSFunction.
__ JumpIfSmi(function, &extra_checks_or_miss);
+ __ Bind(&call_function);
+
// Increment the call count for monomorphic function calls.
- __ Add(feedback_vector, feedback_vector,
- Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ Add(feedback_vector, feedback_vector,
- Operand(FixedArray::kHeaderSize + kPointerSize));
- __ Ldr(index, FieldMemOperand(feedback_vector, 0));
- __ Add(index, index, Operand(Smi::FromInt(1)));
- __ Str(index, FieldMemOperand(feedback_vector, 0));
+ IncrementCallCount(masm, feedback_vector, index);
- __ Bind(&call_function);
__ Mov(x0, argc);
__ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
tail_call_mode()),
@@ -2106,6 +2095,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ jmp(&miss);
}
+ // TODO(mvstanton): the code below is effectively disabled. Investigate.
__ JumpIfRoot(x4, Heap::kuninitialized_symbolRootIndex, &miss);
// We are going megamorphic. If the feedback is a JSFunction, it is fine
@@ -2118,6 +2108,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
__ Bind(&call);
+
+ // Increment the call count for megamorphic function calls.
+ IncrementCallCount(masm, feedback_vector, index);
+
+ __ Bind(&call_count_incremented);
__ Mov(x0, argc);
__ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET);
@@ -2143,12 +2138,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Cmp(x4, x5);
__ B(ne, &miss);
- // Initialize the call counter.
- __ Mov(x5, Smi::FromInt(1));
- __ Adds(x4, feedback_vector,
- Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize + kPointerSize));
-
// Store the function. Use a stub since we need a frame for allocation.
// x2 - vector
// x3 - slot
@@ -2156,9 +2145,13 @@ void CallICStub::Generate(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(masm->isolate());
+ __ Push(feedback_vector, index);
+
__ Push(cp, function);
__ CallStub(&create_stub);
__ Pop(cp, function);
+
+ __ Pop(feedback_vector, index);
}
__ B(&call_function);
@@ -2168,7 +2161,8 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&miss);
GenerateMiss(masm);
- __ B(&call);
+ // The runtime increments the call count in the vector for us.
+ __ B(&call_count_incremented);
}
@@ -2681,321 +2675,6 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
}
-void SubStringStub::Generate(MacroAssembler* masm) {
- ASM_LOCATION("SubStringStub::Generate");
- Label runtime;
-
- // Stack frame on entry.
- // lr: return address
- // jssp[0]: substring "to" offset
- // jssp[8]: substring "from" offset
- // jssp[16]: pointer to string object
-
- // This stub is called from the native-call %_SubString(...), so
- // nothing can be assumed about the arguments. It is tested that:
- // "string" is a sequential string,
- // both "from" and "to" are smis, and
- // 0 <= from <= to <= string.length (in debug mode.)
- // If any of these assumptions fail, we call the runtime system.
-
- static const int kToOffset = 0 * kPointerSize;
- static const int kFromOffset = 1 * kPointerSize;
- static const int kStringOffset = 2 * kPointerSize;
-
- Register to = x0;
- Register from = x15;
- Register input_string = x10;
- Register input_length = x11;
- Register input_type = x12;
- Register result_string = x0;
- Register result_length = x1;
- Register temp = x3;
-
- __ Peek(to, kToOffset);
- __ Peek(from, kFromOffset);
-
- // Check that both from and to are smis. If not, jump to runtime.
- __ JumpIfEitherNotSmi(from, to, &runtime);
- __ SmiUntag(from);
- __ SmiUntag(to);
-
- // Calculate difference between from and to. If to < from, branch to runtime.
- __ Subs(result_length, to, from);
- __ B(mi, &runtime);
-
- // Check from is positive.
- __ Tbnz(from, kWSignBit, &runtime);
-
- // Make sure first argument is a string.
- __ Peek(input_string, kStringOffset);
- __ JumpIfSmi(input_string, &runtime);
- __ IsObjectJSStringType(input_string, input_type, &runtime);
-
- Label single_char;
- __ Cmp(result_length, 1);
- __ B(eq, &single_char);
-
- // Short-cut for the case of trivial substring.
- Label return_x0;
- __ Ldrsw(input_length,
- UntagSmiFieldMemOperand(input_string, String::kLengthOffset));
-
- __ Cmp(result_length, input_length);
- __ CmovX(x0, input_string, eq);
- // Return original string.
- __ B(eq, &return_x0);
-
- // Longer than original string's length or negative: unsafe arguments.
- __ B(hi, &runtime);
-
- // Shorter than original string's length: an actual substring.
-
- // x0 to substring end character offset
- // x1 result_length length of substring result
- // x10 input_string pointer to input string object
- // x10 unpacked_string pointer to unpacked string object
- // x11 input_length length of input string
- // x12 input_type instance type of input string
- // x15 from substring start character offset
-
- // Deal with different string types: update the index if necessary and put
- // the underlying string into register unpacked_string.
- Label underlying_unpacked, sliced_string, seq_or_external_string;
- Label update_instance_type;
- // If the string is not indirect, it can only be sequential or external.
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
- STATIC_ASSERT(kIsIndirectStringMask != 0);
-
- // Test for string types, and branch/fall through to appropriate unpacking
- // code.
- __ Tst(input_type, kIsIndirectStringMask);
- __ B(eq, &seq_or_external_string);
- __ Tst(input_type, kSlicedNotConsMask);
- __ B(ne, &sliced_string);
-
- Register unpacked_string = input_string;
-
- // Cons string. Check whether it is flat, then fetch first part.
- __ Ldr(temp, FieldMemOperand(input_string, ConsString::kSecondOffset));
- __ JumpIfNotRoot(temp, Heap::kempty_stringRootIndex, &runtime);
- __ Ldr(unpacked_string,
- FieldMemOperand(input_string, ConsString::kFirstOffset));
- __ B(&update_instance_type);
-
- __ Bind(&sliced_string);
- // Sliced string. Fetch parent and correct start index by offset.
- __ Ldrsw(temp,
- UntagSmiFieldMemOperand(input_string, SlicedString::kOffsetOffset));
- __ Add(from, from, temp);
- __ Ldr(unpacked_string,
- FieldMemOperand(input_string, SlicedString::kParentOffset));
-
- __ Bind(&update_instance_type);
- __ Ldr(temp, FieldMemOperand(unpacked_string, HeapObject::kMapOffset));
- __ Ldrb(input_type, FieldMemOperand(temp, Map::kInstanceTypeOffset));
- // Now control must go to &underlying_unpacked. Since the no code is generated
- // before then we fall through instead of generating a useless branch.
-
- __ Bind(&seq_or_external_string);
- // Sequential or external string. Registers unpacked_string and input_string
- // alias, so there's nothing to do here.
- // Note that if code is added here, the above code must be updated.
-
- // x0 result_string pointer to result string object (uninit)
- // x1 result_length length of substring result
- // x10 unpacked_string pointer to unpacked string object
- // x11 input_length length of input string
- // x12 input_type instance type of input string
- // x15 from substring start character offset
- __ Bind(&underlying_unpacked);
-
- if (FLAG_string_slices) {
- Label copy_routine;
- __ Cmp(result_length, SlicedString::kMinLength);
- // Short slice. Copy instead of slicing.
- __ B(lt, &copy_routine);
- // Allocate new sliced string. At this point we do not reload the instance
- // type including the string encoding because we simply rely on the info
- // provided by the original string. It does not matter if the original
- // string's encoding is wrong because we always have to recheck encoding of
- // the newly created string's parent anyway due to externalized strings.
- Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_slice);
- __ AllocateOneByteSlicedString(result_string, result_length, x3, x4,
- &runtime);
- __ B(&set_slice_header);
-
- __ Bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(result_string, result_length, x3, x4,
- &runtime);
-
- __ Bind(&set_slice_header);
- __ SmiTag(from);
- __ Str(from, FieldMemOperand(result_string, SlicedString::kOffsetOffset));
- __ Str(unpacked_string,
- FieldMemOperand(result_string, SlicedString::kParentOffset));
- __ B(&return_x0);
-
- __ Bind(&copy_routine);
- }
-
- // x0 result_string pointer to result string object (uninit)
- // x1 result_length length of substring result
- // x10 unpacked_string pointer to unpacked string object
- // x11 input_length length of input string
- // x12 input_type instance type of input string
- // x13 unpacked_char0 pointer to first char of unpacked string (uninit)
- // x13 substring_char0 pointer to first char of substring (uninit)
- // x14 result_char0 pointer to first char of result (uninit)
- // x15 from substring start character offset
- Register unpacked_char0 = x13;
- Register substring_char0 = x13;
- Register result_char0 = x14;
- Label two_byte_sequential, sequential_string, allocate_result;
- STATIC_ASSERT(kExternalStringTag != 0);
- STATIC_ASSERT(kSeqStringTag == 0);
-
- __ Tst(input_type, kExternalStringTag);
- __ B(eq, &sequential_string);
-
- __ Tst(input_type, kShortExternalStringTag);
- __ B(ne, &runtime);
- __ Ldr(unpacked_char0,
- FieldMemOperand(unpacked_string, ExternalString::kResourceDataOffset));
- // unpacked_char0 points to the first character of the underlying string.
- __ B(&allocate_result);
-
- __ Bind(&sequential_string);
- // Locate first character of underlying subject string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ Add(unpacked_char0, unpacked_string,
- SeqOneByteString::kHeaderSize - kHeapObjectTag);
-
- __ Bind(&allocate_result);
- // Sequential one-byte string. Allocate the result.
- STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
- __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_sequential);
-
- // Allocate and copy the resulting one-byte string.
- __ AllocateOneByteString(result_string, result_length, x3, x4, x5, &runtime);
-
- // Locate first character of substring to copy.
- __ Add(substring_char0, unpacked_char0, from);
-
- // Locate first character of result.
- __ Add(result_char0, result_string,
- SeqOneByteString::kHeaderSize - kHeapObjectTag);
-
- STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
- __ B(&return_x0);
-
- // Allocate and copy the resulting two-byte string.
- __ Bind(&two_byte_sequential);
- __ AllocateTwoByteString(result_string, result_length, x3, x4, x5, &runtime);
-
- // Locate first character of substring to copy.
- __ Add(substring_char0, unpacked_char0, Operand(from, LSL, 1));
-
- // Locate first character of result.
- __ Add(result_char0, result_string,
- SeqTwoByteString::kHeaderSize - kHeapObjectTag);
-
- STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- __ Add(result_length, result_length, result_length);
- __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
-
- __ Bind(&return_x0);
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->sub_string_native(), 1, x3, x4);
- __ Drop(3);
- __ Ret();
-
- __ Bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString);
-
- __ bind(&single_char);
- // x1: result_length
- // x10: input_string
- // x12: input_type
- // x15: from (untagged)
- __ SmiTag(from);
- StringCharAtGenerator generator(input_string, from, result_length, x0,
- &runtime, &runtime, &runtime,
- RECEIVER_IS_STRING);
- generator.GenerateFast(masm);
- __ Drop(3);
- __ Ret();
- generator.SkipSlow(masm, &runtime);
-}
-
-void ToStringStub::Generate(MacroAssembler* masm) {
- // The ToString stub takes one argument in x0.
- Label is_number;
- __ JumpIfSmi(x0, &is_number);
-
- Label not_string;
- __ JumpIfObjectType(x0, x1, x1, FIRST_NONSTRING_TYPE, &not_string, hs);
- // x0: receiver
- // x1: receiver instance type
- __ Ret();
- __ Bind(&not_string);
-
- Label not_heap_number;
- __ Cmp(x1, HEAP_NUMBER_TYPE);
- __ B(ne, &not_heap_number);
- __ Bind(&is_number);
- NumberToStringStub stub(isolate());
- __ TailCallStub(&stub);
- __ Bind(&not_heap_number);
-
- Label not_oddball;
- __ Cmp(x1, ODDBALL_TYPE);
- __ B(ne, &not_oddball);
- __ Ldr(x0, FieldMemOperand(x0, Oddball::kToStringOffset));
- __ Ret();
- __ Bind(&not_oddball);
-
- __ Push(x0); // Push argument.
- __ TailCallRuntime(Runtime::kToString);
-}
-
-
-void ToNameStub::Generate(MacroAssembler* masm) {
- // The ToName stub takes one argument in x0.
- Label is_number;
- __ JumpIfSmi(x0, &is_number);
-
- Label not_name;
- STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
- __ JumpIfObjectType(x0, x1, x1, LAST_NAME_TYPE, &not_name, hi);
- // x0: receiver
- // x1: receiver instance type
- __ Ret();
- __ Bind(&not_name);
-
- Label not_heap_number;
- __ Cmp(x1, HEAP_NUMBER_TYPE);
- __ B(ne, &not_heap_number);
- __ Bind(&is_number);
- NumberToStringStub stub(isolate());
- __ TailCallStub(&stub);
- __ Bind(&not_heap_number);
-
- Label not_oddball;
- __ Cmp(x1, ODDBALL_TYPE);
- __ B(ne, &not_oddball);
- __ Ldr(x0, FieldMemOperand(x0, Oddball::kToStringOffset));
- __ Ret();
- __ Bind(&not_oddball);
-
- __ Push(x0); // Push argument.
- __ TailCallRuntime(Runtime::kToName);
-}
-
-
void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {
@@ -3195,16 +2874,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental;
Label need_incremental_pop_scratch;
- Register mem_chunk = regs_.scratch0();
- Register counter = regs_.scratch1();
- __ Bic(mem_chunk, regs_.object(), Page::kPageAlignmentMask);
- __ Ldr(counter,
- MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
- __ Subs(counter, counter, 1);
- __ Str(counter,
- MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
- __ B(mi, &need_incremental);
-
// If the object is not black we don't have to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
@@ -3655,7 +3324,7 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
__ Ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
// Load the map into the correct register.
- DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
+ DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
__ mov(feedback, too_far);
__ Add(receiver_map, receiver_map, Code::kHeaderSize - kHeapObjectTag);
__ Jump(receiver_map);
@@ -4673,7 +4342,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// Fall back to %AllocateInNewSpace (if not too big).
Label too_big_for_new_space;
__ Bind(&allocate);
- __ Cmp(x6, Operand(Page::kMaxRegularHeapObjectSize));
+ __ Cmp(x6, Operand(kMaxRegularHeapObjectSize));
__ B(gt, &too_big_for_new_space);
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -5093,7 +4762,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// Fall back to %AllocateInNewSpace (if not too big).
Label too_big_for_new_space;
__ Bind(&allocate);
- __ Cmp(x6, Operand(Page::kMaxRegularHeapObjectSize));
+ __ Cmp(x6, Operand(kMaxRegularHeapObjectSize));
__ B(gt, &too_big_for_new_space);
{
FrameScope scope(masm, StackFrame::INTERNAL);
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index 881d2d83be..d7bc3de01a 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -42,13 +42,9 @@ const Register StoreDescriptor::SlotRegister() { return x4; }
const Register StoreWithVectorDescriptor::VectorRegister() { return x3; }
-const Register VectorStoreTransitionDescriptor::SlotRegister() { return x4; }
-const Register VectorStoreTransitionDescriptor::VectorRegister() { return x3; }
-const Register VectorStoreTransitionDescriptor::MapRegister() { return x5; }
-
-
-const Register StoreTransitionDescriptor::MapRegister() { return x3; }
-
+const Register StoreTransitionDescriptor::SlotRegister() { return x4; }
+const Register StoreTransitionDescriptor::VectorRegister() { return x3; }
+const Register StoreTransitionDescriptor::MapRegister() { return x5; }
const Register StoreGlobalViaContextDescriptor::SlotRegister() { return x2; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return x0; }
@@ -407,7 +403,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
&default_descriptor);
}
-void ApiCallbackDescriptorBase::InitializePlatformSpecific(
+void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
@@ -446,7 +442,19 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
x0, // argument count (not including receiver)
x3, // new target
x1, // constructor to call
- x2 // address of the first argument
+ x2, // allocation site feedback if available, undefined otherwise
+ x4 // address of the first argument
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterPushArgsAndConstructArrayDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ x0, // argument count (not including receiver)
+ x1, // target to call checked to be Array function
+ x2, // allocation site feedback if available, undefined otherwise
+ x3 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index f674dd53e7..87ea1eb9d5 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -1571,9 +1571,8 @@ void MacroAssembler::InNewSpace(Register object,
Label* branch) {
DCHECK(cond == eq || cond == ne);
UseScratchRegisterScope temps(this);
- const int mask =
- (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
- CheckPageFlag(object, temps.AcquireSameSizeAs(object), mask, cond, branch);
+ CheckPageFlag(object, temps.AcquireSameSizeAs(object),
+ MemoryChunk::kIsInNewSpaceMask, cond, branch);
}
@@ -3037,7 +3036,7 @@ void MacroAssembler::Allocate(int object_size,
Register scratch2,
Label* gc_required,
AllocationFlags flags) {
- DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(object_size <= kMaxRegularHeapObjectSize);
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
@@ -3196,7 +3195,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
void MacroAssembler::FastAllocate(int object_size, Register result,
Register scratch1, Register scratch2,
AllocationFlags flags) {
- DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(object_size <= kMaxRegularHeapObjectSize);
DCHECK(!AreAliased(result, scratch1, scratch2));
DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index 06e9a1d9bb..37e9926e9b 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -742,6 +742,18 @@ class MacroAssembler : public Assembler {
// csp must be aligned to 16 bytes.
void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
+ // Emit code that loads |parameter_index|'th parameter from the stack to
+ // the register according to the CallInterfaceDescriptor definition.
+ // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
+ // below the caller's sp.
+ template <class Descriptor>
+ void LoadParameterFromStack(
+ Register reg, typename Descriptor::ParameterIndices parameter_index,
+ int sp_to_ra_offset_in_words = 0) {
+ DCHECK(Descriptor::kPassLastArgsOnStack);
+ UNIMPLEMENTED();
+ }
+
// Claim or drop stack space without actually accessing memory.
//
// In debug mode, both of these will write invalid data into the claimed or
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index f5595a8ed1..83b4cf7ee8 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -524,7 +524,8 @@ class Redirection {
// static
-void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
+void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
+ Redirection* first) {
Redirection::DeleteChain(first);
}
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index d4901098ef..c8c715a067 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -151,7 +151,8 @@ typedef SimRegisterBase SimFPRegister; // v0-v31
class Simulator : public DecoderVisitor {
public:
- static void FlushICache(base::HashMap* i_cache, void* start, size_t size) {
+ static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
+ size_t size) {
USE(i_cache);
USE(start);
USE(size);
@@ -167,7 +168,7 @@ class Simulator : public DecoderVisitor {
static void Initialize(Isolate* isolate);
- static void TearDown(base::HashMap* i_cache, Redirection* first);
+ static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
static Simulator* current(v8::internal::Isolate* isolate);
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index e94d91730e..a1af1af368 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -16,9 +16,9 @@
#include "src/objects.h"
#include "src/parsing/parse-info.h"
-#include "src/wasm/encoder.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-js.h"
+#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-result.h"
@@ -30,29 +30,6 @@ namespace v8 {
namespace internal {
namespace {
-i::MaybeHandle<i::FixedArray> CompileModule(
- i::Isolate* isolate, const byte* start, const byte* end,
- ErrorThrower* thrower,
- internal::wasm::ModuleOrigin origin = i::wasm::kWasmOrigin) {
- // Decode but avoid a redundant pass over function bodies for verification.
- // Verification will happen during compilation.
- i::Zone zone(isolate->allocator());
- internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
- isolate, &zone, start, end, false, origin);
-
- i::MaybeHandle<i::FixedArray> compiled_module;
- if (result.failed() && origin == internal::wasm::kAsmJsOrigin) {
- thrower->Error("Asm.js converted module failed to decode");
- } else if (result.failed()) {
- thrower->Failed("", result);
- } else {
- compiled_module = result.val->CompileFunctions(isolate, thrower);
- }
-
- if (result.val) delete result.val;
- return compiled_module;
-}
-
Handle<i::Object> StdlibMathMember(i::Isolate* isolate,
Handle<JSReceiver> stdlib,
Handle<Name> name) {
@@ -187,9 +164,9 @@ MaybeHandle<FixedArray> AsmJs::ConvertAsmToWasm(ParseInfo* info) {
i::Handle<i::FixedArray> foreign_globals;
auto module = builder.Run(&foreign_globals);
- i::MaybeHandle<i::FixedArray> compiled =
- CompileModule(info->isolate(), module->begin(), module->end(), &thrower,
- internal::wasm::kAsmJsOrigin);
+ i::MaybeHandle<i::JSObject> compiled = wasm::CreateModuleObjectFromBytes(
+ info->isolate(), module->begin(), module->end(), &thrower,
+ internal::wasm::kAsmJsOrigin);
DCHECK(!compiled.is_null());
wasm::AsmTyper::StdlibSet uses = typer.StdlibUses();
@@ -223,24 +200,25 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(i::Isolate* isolate,
Handle<FixedArray> wasm_data,
Handle<JSArrayBuffer> memory,
Handle<JSReceiver> foreign) {
- i::Handle<i::FixedArray> compiled(i::FixedArray::cast(wasm_data->get(0)));
+ i::Handle<i::JSObject> module(i::JSObject::cast(wasm_data->get(0)));
i::Handle<i::FixedArray> foreign_globals(
i::FixedArray::cast(wasm_data->get(1)));
ErrorThrower thrower(isolate, "Asm.js -> WebAssembly instantiation");
i::MaybeHandle<i::JSObject> maybe_module_object =
- i::wasm::WasmModule::Instantiate(isolate, compiled, foreign, memory);
+ i::wasm::WasmModule::Instantiate(isolate, &thrower, module, foreign,
+ memory);
if (maybe_module_object.is_null()) {
return MaybeHandle<Object>();
}
- i::Handle<i::Name> name(isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("__foreign_init__")));
+ i::Handle<i::Name> init_name(isolate->factory()->InternalizeUtf8String(
+ wasm::AsmWasmBuilder::foreign_init_name));
i::Handle<i::Object> module_object = maybe_module_object.ToHandleChecked();
i::MaybeHandle<i::Object> maybe_init =
- i::Object::GetProperty(module_object, name);
+ i::Object::GetProperty(module_object, init_name);
DCHECK(!maybe_init.is_null());
i::Handle<i::Object> init = maybe_init.ToHandleChecked();
@@ -265,10 +243,18 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(i::Isolate* isolate,
i::MaybeHandle<i::Object> retval = i::Execution::Call(
isolate, init, undefined, foreign_globals->length(), foreign_args_array);
delete[] foreign_args_array;
-
DCHECK(!retval.is_null());
- return maybe_module_object;
+ i::Handle<i::Name> single_function_name(
+ isolate->factory()->InternalizeUtf8String(
+ wasm::AsmWasmBuilder::single_function_name));
+ i::MaybeHandle<i::Object> single_function =
+ i::Object::GetProperty(module_object, single_function_name);
+ if (!single_function.is_null() &&
+ !single_function.ToHandleChecked()->IsUndefined(isolate)) {
+ return single_function;
+ }
+ return module_object;
}
} // namespace internal
diff --git a/deps/v8/src/asmjs/asm-js.h b/deps/v8/src/asmjs/asm-js.h
index 44bf04df9e..a2c5cec280 100644
--- a/deps/v8/src/asmjs/asm-js.h
+++ b/deps/v8/src/asmjs/asm-js.h
@@ -5,24 +5,21 @@
#ifndef V8_ASMJS_ASM_JS_H_
#define V8_ASMJS_ASM_JS_H_
-#ifndef V8_SHARED
-#include "src/allocation.h"
-#include "src/base/hashmap.h"
-#else
-#include "include/v8.h"
-#include "src/base/compiler-specific.h"
-#endif // !V8_SHARED
-#include "src/parsing/parser.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
+
+class JSArrayBuffer;
+class ParseInfo;
+
// Interface to compile and instantiate for asmjs.
class AsmJs {
public:
- static MaybeHandle<FixedArray> ConvertAsmToWasm(i::ParseInfo* info);
- static bool IsStdlibValid(i::Isolate* isolate, Handle<FixedArray> wasm_data,
+ static MaybeHandle<FixedArray> ConvertAsmToWasm(ParseInfo* info);
+ static bool IsStdlibValid(Isolate* isolate, Handle<FixedArray> wasm_data,
Handle<JSReceiver> stdlib);
- static MaybeHandle<Object> InstantiateAsmWasm(i::Isolate* isolate,
+ static MaybeHandle<Object> InstantiateAsmWasm(Isolate* isolate,
Handle<FixedArray> wasm_data,
Handle<JSArrayBuffer> memory,
Handle<JSReceiver> foreign);
diff --git a/deps/v8/src/asmjs/asm-typer.cc b/deps/v8/src/asmjs/asm-typer.cc
index 1d070a0207..94cc4dbfd1 100644
--- a/deps/v8/src/asmjs/asm-typer.cc
+++ b/deps/v8/src/asmjs/asm-typer.cc
@@ -17,7 +17,6 @@
#include "src/base/bits.h"
#include "src/codegen.h"
#include "src/globals.h"
-#include "src/type-cache.h"
#include "src/utils.h"
#define FAIL(node, msg) \
@@ -129,14 +128,13 @@ AsmTyper::AsmTyper(Isolate* isolate, Zone* zone, Script* script,
script_(script),
root_(root),
forward_definitions_(zone),
+ ffi_use_signatures_(zone),
stdlib_types_(zone),
stdlib_math_types_(zone),
module_info_(VariableInfo::ForSpecialSymbol(zone_, kModule)),
- global_scope_(ZoneHashMap::PointersMatch,
- ZoneHashMap::kDefaultHashMapCapacity,
+ global_scope_(ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)),
- local_scope_(ZoneHashMap::PointersMatch,
- ZoneHashMap::kDefaultHashMapCapacity,
+ local_scope_(ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)),
stack_limit_(isolate->stack_guard()->real_climit()),
node_types_(zone_),
@@ -330,8 +328,8 @@ AsmTyper::VariableInfo* AsmTyper::ImportLookup(Property* import) {
return i->second;
}
-AsmTyper::VariableInfo* AsmTyper::Lookup(Variable* variable) {
- ZoneHashMap* scope = in_function_ ? &local_scope_ : &global_scope_;
+AsmTyper::VariableInfo* AsmTyper::Lookup(Variable* variable) const {
+ const ZoneHashMap* scope = in_function_ ? &local_scope_ : &global_scope_;
ZoneHashMap::Entry* entry =
scope->Lookup(variable, ComputePointerHash(variable));
if (entry == nullptr && in_function_) {
@@ -424,6 +422,8 @@ AsmType* AsmTyper::TypeOf(AstNode* node) const {
return AsmType::None();
}
+AsmType* AsmTyper::TypeOf(Variable* v) const { return Lookup(v)->type(); }
+
AsmTyper::StandardMember AsmTyper::VariableAsStandardMember(Variable* var) {
auto* var_info = Lookup(var);
if (var_info == nullptr) {
@@ -606,8 +606,10 @@ AsmType* AsmTyper::ValidateModule(FunctionLiteral* fun) {
if (estatement != nullptr) {
Assignment* assignment = estatement->expression()->AsAssignment();
if (assignment != nullptr && assignment->target()->IsVariableProxy() &&
- assignment->target()->AsVariableProxy()->var()->mode() ==
- CONST_LEGACY) {
+ assignment->target()
+ ->AsVariableProxy()
+ ->var()
+ ->is_sloppy_function_name()) {
use_asm_directive = iter.Next();
}
}
@@ -760,7 +762,7 @@ AsmType* AsmTyper::ValidateGlobalDeclaration(Assignment* assign) {
bool global_variable = false;
if (value->IsLiteral() || value->IsCall()) {
AsmType* type = nullptr;
- RECURSE(type = VariableTypeAnnotations(value));
+ RECURSE(type = VariableTypeAnnotations(value, true));
target_info = new (zone_) VariableInfo(type);
target_info->set_mutability(VariableInfo::kMutableGlobal);
global_variable = true;
@@ -1509,7 +1511,7 @@ AsmType* AsmTyper::ValidateCompareOperation(CompareOperation* cmp) {
}
namespace {
-bool IsNegate(BinaryOperation* binop) {
+bool IsInvert(BinaryOperation* binop) {
if (binop->op() != Token::BIT_XOR) {
return false;
}
@@ -1524,7 +1526,7 @@ bool IsNegate(BinaryOperation* binop) {
}
bool IsUnaryMinus(BinaryOperation* binop) {
- // *VIOLATION* The parser replaces uses of +x with x*1.0.
+ // *VIOLATION* The parser replaces uses of -x with x*-1.
if (binop->op() != Token::MUL) {
return false;
}
@@ -1570,7 +1572,7 @@ AsmType* AsmTyper::ValidateBinaryOperation(BinaryOperation* expr) {
}
if (IsUnaryMinus(expr)) {
- // *VIOLATION* the parser converts -x to x * -1.0.
+ // *VIOLATION* the parser converts -x to x * -1.
AsmType* left_type;
RECURSE(left_type = ValidateExpression(expr->left()));
SetTypeOf(expr->right(), left_type);
@@ -1595,11 +1597,11 @@ AsmType* AsmTyper::ValidateBinaryOperation(BinaryOperation* expr) {
case Token::BIT_AND:
return ValidateBitwiseANDExpression(expr);
case Token::BIT_XOR:
- if (IsNegate(expr)) {
+ if (IsInvert(expr)) {
auto* left = expr->left();
auto* left_as_binop = left->AsBinaryOperation();
- if (left_as_binop != nullptr && IsNegate(left_as_binop)) {
+ if (left_as_binop != nullptr && IsInvert(left_as_binop)) {
// This is the special ~~ operator.
AsmType* left_type;
RECURSE(left_type = ValidateExpression(left_as_binop->left()));
@@ -1660,6 +1662,12 @@ AsmType* AsmTyper::ValidateNumericLiteral(Literal* literal) {
return AsmType::Double();
}
+ // The parser collapses expressions like !0 and !123 to true/false.
+ // We therefore need to permit these as alternate versions of 0 / 1.
+ if (literal->raw_value()->IsTrue() || literal->raw_value()->IsFalse()) {
+ return AsmType::Int();
+ }
+
uint32_t value;
if (!literal->value()->ToUint32(&value)) {
int32_t value;
@@ -2305,9 +2313,20 @@ AsmType* AsmTyper::ValidateCall(AsmType* return_type, Call* call) {
FAIL(call, "Calling something that's not a function.");
}
- if (callee_type->AsFFIType() != nullptr &&
- return_type == AsmType::Float()) {
- FAIL(call, "Foreign functions can't return float.");
+ if (callee_type->AsFFIType() != nullptr) {
+ if (return_type == AsmType::Float()) {
+ FAIL(call, "Foreign functions can't return float.");
+ }
+ // Record FFI use signature, since the asm->wasm translator must know
+ // all uses up-front.
+ ffi_use_signatures_.emplace_back(
+ FFIUseSignature(call_var_proxy->var(), zone_));
+ FFIUseSignature* sig = &ffi_use_signatures_.back();
+ sig->return_type_ = return_type;
+ sig->arg_types_.reserve(args.size());
+ for (size_t i = 0; i < args.size(); ++i) {
+ sig->arg_types_.emplace_back(args[i]);
+ }
}
if (!callee_type->CanBeInvokedWith(return_type, args)) {
@@ -2662,7 +2681,8 @@ AsmType* AsmTyper::ReturnTypeAnnotations(ReturnStatement* statement) {
// 5.4 VariableTypeAnnotations
// Also used for 5.5 GlobalVariableTypeAnnotations
-AsmType* AsmTyper::VariableTypeAnnotations(Expression* initializer) {
+AsmType* AsmTyper::VariableTypeAnnotations(Expression* initializer,
+ bool global) {
if (auto* literal = initializer->AsLiteral()) {
if (literal->raw_value()->ContainsDot()) {
SetTypeOf(initializer, AsmType::Double());
@@ -2703,10 +2723,13 @@ AsmType* AsmTyper::VariableTypeAnnotations(Expression* initializer) {
"to fround.");
}
- if (!src_expr->raw_value()->ContainsDot()) {
- FAIL(initializer,
- "Invalid float type annotation - expected literal argument to be a "
- "floating point literal.");
+ // Float constants must contain dots in local, but not in globals.
+ if (!global) {
+ if (!src_expr->raw_value()->ContainsDot()) {
+ FAIL(initializer,
+ "Invalid float type annotation - expected literal argument to be a "
+ "floating point literal.");
+ }
}
return AsmType::Float();
diff --git a/deps/v8/src/asmjs/asm-typer.h b/deps/v8/src/asmjs/asm-typer.h
index 6b9c70cf00..942ca21370 100644
--- a/deps/v8/src/asmjs/asm-typer.h
+++ b/deps/v8/src/asmjs/asm-typer.h
@@ -12,12 +12,12 @@
#include "src/allocation.h"
#include "src/asmjs/asm-types.h"
#include "src/ast/ast-type-bounds.h"
+#include "src/ast/ast-types.h"
#include "src/ast/ast.h"
#include "src/effects.h"
#include "src/type-info.h"
-#include "src/types.h"
-#include "src/zone-containers.h"
-#include "src/zone.h"
+#include "src/zone/zone-containers.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
@@ -73,12 +73,26 @@ class AsmTyper final {
const char* error_message() const { return error_message_; }
AsmType* TypeOf(AstNode* node) const;
+ AsmType* TypeOf(Variable* v) const;
StandardMember VariableAsStandardMember(Variable* var);
typedef std::unordered_set<StandardMember, std::hash<int> > StdlibSet;
StdlibSet StdlibUses() const { return stdlib_uses_; }
+ // Each FFI import has a usage-site signature associated with it.
+ struct FFIUseSignature {
+ Variable* var;
+ ZoneVector<AsmType*> arg_types_;
+ AsmType* return_type_;
+ FFIUseSignature(Variable* v, Zone* zone)
+ : var(v), arg_types_(zone), return_type_(nullptr) {}
+ };
+
+ const ZoneVector<FFIUseSignature>& FFIUseSignatures() {
+ return ffi_use_signatures_;
+ }
+
private:
friend class v8::internal::wasm::AsmTyperHarnessBuilder;
@@ -192,7 +206,7 @@ class AsmTyper final {
// Lookup(Delta, Gamma, x)
//
// Delta is the global_scope_ member, and Gamma, local_scope_.
- VariableInfo* Lookup(Variable* variable);
+ VariableInfo* Lookup(Variable* variable) const;
// All of the ValidateXXX methods below return AsmType::None() in case of
// validation failure.
@@ -292,8 +306,9 @@ class AsmTyper final {
// 5.2 ReturnTypeAnnotations
AsmType* ReturnTypeAnnotations(ReturnStatement* statement);
// 5.4 VariableTypeAnnotations
- AsmType* VariableTypeAnnotations(Expression* initializer);
// 5.5 GlobalVariableTypeAnnotations
+ AsmType* VariableTypeAnnotations(Expression* initializer,
+ bool global = false);
AsmType* ImportExpression(Property* import);
AsmType* NewHeapView(CallNew* new_heap_view);
@@ -306,6 +321,7 @@ class AsmTyper final {
AsmType* return_type_ = nullptr;
ZoneVector<VariableInfo*> forward_definitions_;
+ ZoneVector<FFIUseSignature> ffi_use_signatures_;
ObjectTypeMap stdlib_types_;
ObjectTypeMap stdlib_math_types_;
diff --git a/deps/v8/src/asmjs/asm-types.h b/deps/v8/src/asmjs/asm-types.h
index c307bf534b..6fe42013c0 100644
--- a/deps/v8/src/asmjs/asm-types.h
+++ b/deps/v8/src/asmjs/asm-types.h
@@ -8,8 +8,8 @@
#include <string>
#include "src/base/macros.h"
-#include "src/zone-containers.h"
-#include "src/zone.h"
+#include "src/zone/zone-containers.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/asmjs/asm-wasm-builder.cc b/deps/v8/src/asmjs/asm-wasm-builder.cc
index 6419459307..091f7935f6 100644
--- a/deps/v8/src/asmjs/asm-wasm-builder.cc
+++ b/deps/v8/src/asmjs/asm-wasm-builder.cc
@@ -32,6 +32,7 @@ namespace wasm {
} while (false)
enum AsmScope { kModuleScope, kInitScope, kFuncScope, kExportScope };
+enum ValueFate { kDrop, kLeaveOnStack };
struct ForeignVariable {
Handle<Name> name;
@@ -43,14 +44,11 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
public:
AsmWasmBuilderImpl(Isolate* isolate, Zone* zone, FunctionLiteral* literal,
AsmTyper* typer)
- : local_variables_(base::HashMap::PointersMatch,
- ZoneHashMap::kDefaultHashMapCapacity,
+ : local_variables_(ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)),
- functions_(base::HashMap::PointersMatch,
- ZoneHashMap::kDefaultHashMapCapacity,
+ functions_(ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)),
- global_variables_(base::HashMap::PointersMatch,
- ZoneHashMap::kDefaultHashMapCapacity,
+ global_variables_(ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)),
scope_(kModuleScope),
builder_(new (zone) WasmModuleBuilder(zone)),
@@ -61,46 +59,43 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
typer_(typer),
breakable_blocks_(zone),
foreign_variables_(zone),
- init_function_index_(0),
- foreign_init_function_index_(0),
+ init_function_(nullptr),
+ foreign_init_function_(nullptr),
next_table_index_(0),
- function_tables_(base::HashMap::PointersMatch,
- ZoneHashMap::kDefaultHashMapCapacity,
+ function_tables_(ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)),
imported_function_table_(this) {
InitializeAstVisitor(isolate);
}
void InitializeInitFunction() {
- init_function_index_ = builder_->AddFunction();
FunctionSig::Builder b(zone(), 0, 0);
- current_function_builder_ = builder_->FunctionAt(init_function_index_);
- current_function_builder_->SetSignature(b.Build());
- builder_->MarkStartFunction(init_function_index_);
- current_function_builder_ = nullptr;
+ init_function_ = builder_->AddFunction(b.Build());
+ builder_->MarkStartFunction(init_function_);
}
void BuildForeignInitFunction() {
- foreign_init_function_index_ = builder_->AddFunction();
+ foreign_init_function_ = builder_->AddFunction();
FunctionSig::Builder b(zone(), 0, foreign_variables_.size());
for (auto i = foreign_variables_.begin(); i != foreign_variables_.end();
++i) {
b.AddParam(i->type);
}
- current_function_builder_ =
- builder_->FunctionAt(foreign_init_function_index_);
- current_function_builder_->SetExported();
+ foreign_init_function_->SetExported();
std::string raw_name = "__foreign_init__";
- current_function_builder_->SetName(raw_name.data(),
- static_cast<int>(raw_name.size()));
- current_function_builder_->SetSignature(b.Build());
+ foreign_init_function_->SetName(
+ AsmWasmBuilder::foreign_init_name,
+ static_cast<int>(strlen(AsmWasmBuilder::foreign_init_name)));
+
+ foreign_init_function_->SetName(raw_name.data(),
+ static_cast<int>(raw_name.size()));
+ foreign_init_function_->SetSignature(b.Build());
for (size_t pos = 0; pos < foreign_variables_.size(); ++pos) {
- current_function_builder_->EmitGetLocal(static_cast<uint32_t>(pos));
+ foreign_init_function_->EmitGetLocal(static_cast<uint32_t>(pos));
ForeignVariable* fv = &foreign_variables_[pos];
uint32_t index = LookupOrInsertGlobal(fv->var, fv->type);
- current_function_builder_->EmitWithVarInt(kExprSetGlobal, index);
+ foreign_init_function_->EmitWithVarInt(kExprSetGlobal, index);
}
- current_function_builder_ = nullptr;
}
i::Handle<i::FixedArray> GetForeignArgs() {
@@ -124,8 +119,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitFunctionDeclaration(FunctionDeclaration* decl) {
DCHECK_EQ(kModuleScope, scope_);
DCHECK_NULL(current_function_builder_);
- uint32_t index = LookupOrInsertFunction(decl->proxy()->var());
- current_function_builder_ = builder_->FunctionAt(index);
+ current_function_builder_ = LookupOrInsertFunction(decl->proxy()->var());
scope_ = kFuncScope;
RECURSE(Visit(decl->fun()));
scope_ = kModuleScope;
@@ -157,8 +151,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
}
if (scope_ == kFuncScope) {
- BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock,
- false);
+ BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock);
RECURSE(VisitStatements(stmt->statements()));
} else {
RECURSE(VisitStatements(stmt->statements()));
@@ -171,10 +164,12 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
public:
BlockVisitor(AsmWasmBuilderImpl* builder, BreakableStatement* stmt,
- WasmOpcode opcode, bool is_loop)
+ WasmOpcode opcode)
: builder_(builder) {
- builder_->breakable_blocks_.push_back(std::make_pair(stmt, is_loop));
- builder_->current_function_builder_->Emit(opcode);
+ builder_->breakable_blocks_.push_back(
+ std::make_pair(stmt, opcode == kExprLoop));
+ // block and loops have a type immediate.
+ builder_->current_function_builder_->EmitWithU8(opcode, kLocalVoid);
}
~BlockVisitor() {
builder_->current_function_builder_->Emit(kExprEnd);
@@ -183,7 +178,32 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
};
void VisitExpressionStatement(ExpressionStatement* stmt) {
- RECURSE(Visit(stmt->expression()));
+ VisitForEffect(stmt->expression());
+ }
+
+ void VisitForEffect(Expression* expr) {
+ if (expr->IsAssignment()) {
+ // Don't emit drops for assignments. Instead use SetLocal/GetLocal.
+ VisitAssignment(expr->AsAssignment(), kDrop);
+ return;
+ }
+ if (expr->IsCall()) {
+ // Only emit a drop if the call has a non-void return value.
+ if (VisitCallExpression(expr->AsCall()) && scope_ == kFuncScope) {
+ current_function_builder_->Emit(kExprDrop);
+ }
+ return;
+ }
+ if (expr->IsBinaryOperation()) {
+ BinaryOperation* binop = expr->AsBinaryOperation();
+ if (binop->op() == Token::COMMA) {
+ VisitForEffect(binop->left());
+ VisitForEffect(binop->right());
+ return;
+ }
+ }
+ RECURSE(Visit(expr));
+ if (scope_ == kFuncScope) current_function_builder_->Emit(kExprDrop);
}
void VisitEmptyStatement(EmptyStatement* stmt) {}
@@ -193,7 +213,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitIfStatement(IfStatement* stmt) {
DCHECK_EQ(kFuncScope, scope_);
RECURSE(Visit(stmt->condition()));
- current_function_builder_->Emit(kExprIf);
+ current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
// WASM ifs come with implement blocks for both arms.
breakable_blocks_.push_back(std::make_pair(nullptr, false));
if (stmt->HasThenStatement()) {
@@ -207,48 +227,26 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
breakable_blocks_.pop_back();
}
- void VisitContinueStatement(ContinueStatement* stmt) {
+ void DoBreakOrContinue(BreakableStatement* target, bool is_continue) {
DCHECK_EQ(kFuncScope, scope_);
- DCHECK_NOT_NULL(stmt->target());
- int i = static_cast<int>(breakable_blocks_.size()) - 1;
- int block_distance = 0;
- for (; i >= 0; i--) {
+ for (int i = static_cast<int>(breakable_blocks_.size()) - 1; i >= 0; --i) {
auto elem = breakable_blocks_.at(i);
- if (elem.first == stmt->target()) {
- DCHECK(elem.second);
- break;
- } else if (elem.second) {
- block_distance += 2;
- } else {
- block_distance += 1;
+ if (elem.first == target && elem.second == is_continue) {
+ int block_distance = static_cast<int>(breakable_blocks_.size() - i - 1);
+ current_function_builder_->Emit(kExprBr);
+ current_function_builder_->EmitVarInt(block_distance);
+ return;
}
}
- DCHECK(i >= 0);
- current_function_builder_->EmitWithU8(kExprBr, ARITY_0);
- current_function_builder_->EmitVarInt(block_distance);
+ UNREACHABLE(); // statement not found
+ }
+
+ void VisitContinueStatement(ContinueStatement* stmt) {
+ DoBreakOrContinue(stmt->target(), true);
}
void VisitBreakStatement(BreakStatement* stmt) {
- DCHECK_EQ(kFuncScope, scope_);
- DCHECK_NOT_NULL(stmt->target());
- int i = static_cast<int>(breakable_blocks_.size()) - 1;
- int block_distance = 0;
- for (; i >= 0; i--) {
- auto elem = breakable_blocks_.at(i);
- if (elem.first == stmt->target()) {
- if (elem.second) {
- block_distance++;
- }
- break;
- } else if (elem.second) {
- block_distance += 2;
- } else {
- block_distance += 1;
- }
- }
- DCHECK(i >= 0);
- current_function_builder_->EmitWithU8(kExprBr, ARITY_0);
- current_function_builder_->EmitVarInt(block_distance);
+ DoBreakOrContinue(stmt->target(), false);
}
void VisitReturnStatement(ReturnStatement* stmt) {
@@ -258,9 +256,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
scope_ = kModuleScope;
} else if (scope_ == kFuncScope) {
RECURSE(Visit(stmt->expression()));
- uint8_t arity =
- TypeOf(stmt->expression()) == kAstStmt ? ARITY_0 : ARITY_1;
- current_function_builder_->EmitWithU8(kExprReturn, arity);
+ current_function_builder_->Emit(kExprReturn);
} else {
UNREACHABLE();
}
@@ -276,7 +272,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
VisitVariableProxy(tag);
current_function_builder_->EmitI32Const(node->begin);
current_function_builder_->Emit(kExprI32LtS);
- current_function_builder_->Emit(kExprIf);
+ current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
if_depth++;
breakable_blocks_.push_back(std::make_pair(nullptr, false));
HandleCase(node->left, case_to_block, tag, default_block, if_depth);
@@ -286,7 +282,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
VisitVariableProxy(tag);
current_function_builder_->EmitI32Const(node->end);
current_function_builder_->Emit(kExprI32GtS);
- current_function_builder_->Emit(kExprIf);
+ current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
if_depth++;
breakable_blocks_.push_back(std::make_pair(nullptr, false));
HandleCase(node->right, case_to_block, tag, default_block, if_depth);
@@ -296,9 +292,9 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
VisitVariableProxy(tag);
current_function_builder_->EmitI32Const(node->begin);
current_function_builder_->Emit(kExprI32Eq);
- current_function_builder_->Emit(kExprIf);
+ current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
DCHECK(case_to_block.find(node->begin) != case_to_block.end());
- current_function_builder_->EmitWithU8(kExprBr, ARITY_0);
+ current_function_builder_->Emit(kExprBr);
current_function_builder_->EmitVarInt(1 + if_depth +
case_to_block[node->begin]);
current_function_builder_->Emit(kExprEnd);
@@ -310,22 +306,22 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
} else {
VisitVariableProxy(tag);
}
- current_function_builder_->EmitWithU8(kExprBrTable, ARITY_0);
+ current_function_builder_->Emit(kExprBrTable);
current_function_builder_->EmitVarInt(node->end - node->begin + 1);
- for (int v = node->begin; v <= node->end; v++) {
+ for (int v = node->begin; v <= node->end; ++v) {
if (case_to_block.find(v) != case_to_block.end()) {
- byte break_code[] = {BR_TARGET(if_depth + case_to_block[v])};
- current_function_builder_->EmitCode(break_code, sizeof(break_code));
+ uint32_t target = if_depth + case_to_block[v];
+ current_function_builder_->EmitVarInt(target);
} else {
- byte break_code[] = {BR_TARGET(if_depth + default_block)};
- current_function_builder_->EmitCode(break_code, sizeof(break_code));
+ uint32_t target = if_depth + default_block;
+ current_function_builder_->EmitVarInt(target);
}
if (v == kMaxInt) {
break;
}
}
- byte break_code[] = {BR_TARGET(if_depth + default_block)};
- current_function_builder_->EmitCode(break_code, sizeof(break_code));
+ uint32_t target = if_depth + default_block;
+ current_function_builder_->EmitVarInt(target);
}
while (if_depth-- != prev_if_depth) {
@@ -342,14 +338,14 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
if (case_count == 0) {
return;
}
- BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock, false);
+ BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock);
ZoneVector<BlockVisitor*> blocks(zone_);
ZoneVector<int32_t> cases(zone_);
ZoneMap<int, unsigned int> case_to_block(zone_);
bool has_default = false;
- for (int i = case_count - 1; i >= 0; i--) {
+ for (int i = case_count - 1; i >= 0; --i) {
CaseClause* clause = clauses->at(i);
- blocks.push_back(new BlockVisitor(this, nullptr, kExprBlock, false));
+ blocks.push_back(new BlockVisitor(this, nullptr, kExprBlock));
if (!clause->is_default()) {
Literal* label = clause->label()->AsLiteral();
Handle<Object> value = label->value();
@@ -366,12 +362,12 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
if (!has_default || case_count > 1) {
int default_block = has_default ? case_count - 1 : case_count;
- BlockVisitor switch_logic_block(this, nullptr, kExprBlock, false);
+ BlockVisitor switch_logic_block(this, nullptr, kExprBlock);
CaseNode* root = OrderCases(&cases, zone_);
HandleCase(root, case_to_block, tag, default_block, 0);
if (root->left != nullptr || root->right != nullptr ||
root->begin == root->end) {
- current_function_builder_->EmitWithU8(kExprBr, ARITY_0);
+ current_function_builder_->Emit(kExprBr);
current_function_builder_->EmitVarInt(default_block);
}
}
@@ -388,22 +384,24 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitDoWhileStatement(DoWhileStatement* stmt) {
DCHECK_EQ(kFuncScope, scope_);
- BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true);
+ BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock);
+ BlockVisitor loop(this, stmt->AsBreakableStatement(), kExprLoop);
RECURSE(Visit(stmt->body()));
RECURSE(Visit(stmt->cond()));
- current_function_builder_->Emit(kExprIf);
- current_function_builder_->EmitWithU8U8(kExprBr, ARITY_0, 1);
+ current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
+ current_function_builder_->EmitWithU8(kExprBr, 1);
current_function_builder_->Emit(kExprEnd);
}
void VisitWhileStatement(WhileStatement* stmt) {
DCHECK_EQ(kFuncScope, scope_);
- BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true);
+ BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock);
+ BlockVisitor loop(this, stmt->AsBreakableStatement(), kExprLoop);
RECURSE(Visit(stmt->cond()));
breakable_blocks_.push_back(std::make_pair(nullptr, false));
- current_function_builder_->Emit(kExprIf);
+ current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
RECURSE(Visit(stmt->body()));
- current_function_builder_->EmitWithU8U8(kExprBr, ARITY_0, 1);
+ current_function_builder_->EmitWithU8(kExprBr, 1);
current_function_builder_->Emit(kExprEnd);
breakable_blocks_.pop_back();
}
@@ -413,13 +411,13 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
if (stmt->init() != nullptr) {
RECURSE(Visit(stmt->init()));
}
- BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true);
+ BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock);
+ BlockVisitor loop(this, stmt->AsBreakableStatement(), kExprLoop);
if (stmt->cond() != nullptr) {
RECURSE(Visit(stmt->cond()));
current_function_builder_->Emit(kExprI32Eqz);
- current_function_builder_->Emit(kExprIf);
- current_function_builder_->Emit(kExprNop);
- current_function_builder_->EmitWithU8U8(kExprBr, ARITY_0, 2);
+ current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
+ current_function_builder_->EmitWithU8(kExprBr, 2);
current_function_builder_->Emit(kExprEnd);
}
if (stmt->body() != nullptr) {
@@ -428,8 +426,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
if (stmt->next() != nullptr) {
RECURSE(Visit(stmt->next()));
}
- current_function_builder_->Emit(kExprNop);
- current_function_builder_->EmitWithU8U8(kExprBr, ARITY_0, 0);
+ current_function_builder_->EmitWithU8(kExprBr, 0);
}
void VisitForInStatement(ForInStatement* stmt) { UNREACHABLE(); }
@@ -446,19 +443,13 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
DeclarationScope* scope = expr->scope();
if (scope_ == kFuncScope) {
if (auto* func_type = typer_->TypeOf(expr)->AsFunctionType()) {
- // Build the signature for the function.
- LocalType return_type = TypeFrom(func_type->ReturnType());
+ // Add the parameters for the function.
const auto& arguments = func_type->Arguments();
- FunctionSig::Builder b(zone(), return_type == kAstStmt ? 0 : 1,
- arguments.size());
- if (return_type != kAstStmt) b.AddReturn(return_type);
for (int i = 0; i < expr->parameter_count(); ++i) {
LocalType type = TypeFrom(arguments[i]);
DCHECK_NE(kAstStmt, type);
- b.AddParam(type);
InsertParameter(scope->parameter(i), type, i);
}
- current_function_builder_->SetSignature(b.Build());
} else {
UNREACHABLE();
}
@@ -476,7 +467,24 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
RECURSE(Visit(expr->condition()));
// WASM ifs come with implicit blocks for both arms.
breakable_blocks_.push_back(std::make_pair(nullptr, false));
- current_function_builder_->Emit(kExprIf);
+ LocalTypeCode type;
+ switch (TypeOf(expr)) {
+ case kAstI32:
+ type = kLocalI32;
+ break;
+ case kAstI64:
+ type = kLocalI64;
+ break;
+ case kAstF32:
+ type = kLocalF32;
+ break;
+ case kAstF64:
+ type = kLocalF64;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ current_function_builder_->EmitWithU8(kExprIf, type);
RECURSE(Visit(expr->then_expression()));
current_function_builder_->Emit(kExprElse);
RECURSE(Visit(expr->else_expression()));
@@ -551,12 +559,22 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
current_function_builder_->EmitGetLocal(
LookupOrInsertLocal(var, var_type));
}
+ } else if (scope_ == kExportScope) {
+ Variable* var = expr->var();
+ DCHECK(var->is_function());
+ WasmFunctionBuilder* function = LookupOrInsertFunction(var);
+ function->SetExported();
+ function->SetName(
+ AsmWasmBuilder::single_function_name,
+ static_cast<int>(strlen(AsmWasmBuilder::single_function_name)));
}
}
void VisitLiteral(Literal* expr) {
Handle<Object> value = expr->value();
- if (!value->IsNumber() || (scope_ != kFuncScope && scope_ != kInitScope)) {
+ if (!(value->IsNumber() || expr->raw_value()->IsTrue() ||
+ expr->raw_value()->IsFalse()) ||
+ (scope_ != kFuncScope && scope_ != kInitScope)) {
return;
}
AsmType* type = typer_->TypeOf(expr);
@@ -577,10 +595,40 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
int32_t i = static_cast<int32_t>(u);
byte code[] = {WASM_I32V(i)};
current_function_builder_->EmitCode(code, sizeof(code));
+ } else if (type->IsA(AsmType::Int())) {
+ // The parser can collapse !0, !1 etc to true / false.
+ // Allow these as int literals.
+ if (expr->raw_value()->IsTrue()) {
+ byte code[] = {WASM_I32V(1)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ } else if (expr->raw_value()->IsFalse()) {
+ byte code[] = {WASM_I32V(0)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ } else if (expr->raw_value()->IsNumber()) {
+ // This can happen when -x becomes x * -1 (due to the parser).
+ int32_t i = 0;
+ if (!value->ToInt32(&i) || i != -1) {
+ UNREACHABLE();
+ }
+ byte code[] = {WASM_I32V(i)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ } else {
+ UNREACHABLE();
+ }
} else if (type->IsA(AsmType::Double())) {
+ // TODO(bradnelson): Pattern match the case where negation occurs and
+ // emit f64.neg instead.
double val = expr->raw_value()->AsNumber();
byte code[] = {WASM_F64(val)};
current_function_builder_->EmitCode(code, sizeof(code));
+ } else if (type->IsA(AsmType::Float())) {
+ // This can happen when -fround(x) becomes fround(x) * 1.0[float]
+ // (due to the parser).
+ // TODO(bradnelson): Pattern match this and emit f32.neg instead.
+ double val = expr->raw_value()->AsNumber();
+ DCHECK_EQ(-1.0, val);
+ byte code[] = {WASM_F32(val)};
+ current_function_builder_->EmitCode(code, sizeof(code));
} else {
UNREACHABLE();
}
@@ -601,11 +649,10 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
DCHECK(name->IsPropertyName());
const AstRawString* raw_name = name->AsRawPropertyName();
if (var->is_function()) {
- uint32_t index = LookupOrInsertFunction(var);
- builder_->FunctionAt(index)->SetExported();
- builder_->FunctionAt(index)->SetName(
- reinterpret_cast<const char*>(raw_name->raw_data()),
- raw_name->length());
+ WasmFunctionBuilder* function = LookupOrInsertFunction(var);
+ function->SetExported();
+ function->SetName(reinterpret_cast<const char*>(raw_name->raw_data()),
+ raw_name->length());
}
}
}
@@ -613,7 +660,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitArrayLiteral(ArrayLiteral* expr) { UNREACHABLE(); }
void LoadInitFunction() {
- current_function_builder_ = builder_->FunctionAt(init_function_index_);
+ current_function_builder_ = init_function_;
scope_ = kInitScope;
}
@@ -642,7 +689,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
for (int i = 0; i < funcs->values()->length(); ++i) {
VariableProxy* func = funcs->values()->at(i)->AsVariableProxy();
DCHECK_NOT_NULL(func);
- builder_->AddIndirectFunction(LookupOrInsertFunction(func->var()));
+ builder_->AddIndirectFunction(
+ LookupOrInsertFunction(func->var())->func_index());
}
}
@@ -684,20 +732,20 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
public:
explicit ImportedFunctionTable(AsmWasmBuilderImpl* builder)
- : table_(base::HashMap::PointersMatch,
- ZoneHashMap::kDefaultHashMapCapacity,
+ : table_(ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(builder->zone())),
builder_(builder) {}
void AddImport(Variable* v, const char* name, int name_length) {
ImportedFunctionIndices* indices = new (builder_->zone())
ImportedFunctionIndices(name, name_length, builder_->zone());
- ZoneHashMap::Entry* entry = table_.LookupOrInsert(
+ auto* entry = table_.LookupOrInsert(
v, ComputePointerHash(v), ZoneAllocationPolicy(builder_->zone()));
entry->value = indices;
}
- uint32_t GetFunctionIndex(Variable* v, FunctionSig* sig) {
+ // Get a function's index (or allocate if new).
+ uint32_t LookupOrInsertImport(Variable* v, FunctionSig* sig) {
ZoneHashMap::Entry* entry = table_.Lookup(v, ComputePointerHash(v));
DCHECK_NOT_NULL(entry);
ImportedFunctionIndices* indices =
@@ -774,7 +822,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
RECURSE(Visit(value));
}
- void EmitAssignment(Assignment* expr, MachineType type) {
+ void EmitAssignment(Assignment* expr, MachineType type, ValueFate fate) {
// Match the left hand side of the assignment.
VariableProxy* target_var = expr->target()->AsVariableProxy();
if (target_var != nullptr) {
@@ -783,11 +831,19 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
LocalType var_type = TypeOf(expr);
DCHECK_NE(kAstStmt, var_type);
if (var->IsContextSlot()) {
- current_function_builder_->EmitWithVarInt(
- kExprSetGlobal, LookupOrInsertGlobal(var, var_type));
+ uint32_t index = LookupOrInsertGlobal(var, var_type);
+ current_function_builder_->EmitWithVarInt(kExprSetGlobal, index);
+ if (fate == kLeaveOnStack) {
+ current_function_builder_->EmitWithVarInt(kExprGetGlobal, index);
+ }
} else {
- current_function_builder_->EmitSetLocal(
- LookupOrInsertLocal(var, var_type));
+ if (fate == kDrop) {
+ current_function_builder_->EmitSetLocal(
+ LookupOrInsertLocal(var, var_type));
+ } else {
+ current_function_builder_->EmitTeeLocal(
+ LookupOrInsertLocal(var, var_type));
+ }
}
}
@@ -799,6 +855,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
->IsA(AsmType::Float32Array())) {
current_function_builder_->Emit(kExprF32ConvertF64);
}
+ // Note that unlike StoreMem, AsmjsStoreMem ignores out-of-bounds writes.
WasmOpcode opcode;
if (type == MachineType::Int8()) {
opcode = kExprI32AsmjsStoreMem8;
@@ -820,6 +877,10 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
UNREACHABLE();
}
current_function_builder_->Emit(opcode);
+ if (fate == kDrop) {
+ // Asm.js stores to memory leave their result on the stack.
+ current_function_builder_->Emit(kExprDrop);
+ }
}
if (target_var == nullptr && target_prop == nullptr) {
@@ -828,12 +889,16 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
void VisitAssignment(Assignment* expr) {
+ VisitAssignment(expr, kLeaveOnStack);
+ }
+
+ void VisitAssignment(Assignment* expr, ValueFate fate) {
bool as_init = false;
if (scope_ == kModuleScope) {
// Skip extra assignment inserted by the parser when in this form:
// (function Module(a, b, c) {... })
if (expr->target()->IsVariableProxy() &&
- expr->target()->AsVariableProxy()->var()->mode() == CONST_LEGACY) {
+ expr->target()->AsVariableProxy()->var()->is_sloppy_function_name()) {
return;
}
Property* prop = expr->value()->AsProperty();
@@ -873,12 +938,12 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
if (as_init) LoadInitFunction();
- MachineType mtype;
+ MachineType mtype = MachineType::None();
bool is_nop = false;
EmitAssignmentLhs(expr->target(), &mtype);
EmitAssignmentRhs(expr->target(), expr->value(), &is_nop);
if (!is_nop) {
- EmitAssignment(expr, mtype);
+ EmitAssignment(expr, mtype, fate);
}
if (as_init) UnLoadInitFunction();
}
@@ -1099,24 +1164,24 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
case AsmTyper::kMathAbs: {
if (call_type == kAstI32) {
- uint32_t tmp = current_function_builder_->AddLocal(kAstI32);
+ WasmTemporary tmp(current_function_builder_, kAstI32);
// if set_local(tmp, x) < 0
Visit(call->arguments()->at(0));
- current_function_builder_->EmitSetLocal(tmp);
+ current_function_builder_->EmitTeeLocal(tmp.index());
byte code[] = {WASM_I8(0)};
current_function_builder_->EmitCode(code, sizeof(code));
current_function_builder_->Emit(kExprI32LtS);
- current_function_builder_->Emit(kExprIf);
+ current_function_builder_->EmitWithU8(kExprIf, kLocalI32);
// then (0 - tmp)
current_function_builder_->EmitCode(code, sizeof(code));
- current_function_builder_->EmitGetLocal(tmp);
+ current_function_builder_->EmitGetLocal(tmp.index());
current_function_builder_->Emit(kExprI32Sub);
// else tmp
current_function_builder_->Emit(kExprElse);
- current_function_builder_->EmitGetLocal(tmp);
+ current_function_builder_->EmitGetLocal(tmp.index());
// end
current_function_builder_->Emit(kExprEnd);
@@ -1134,25 +1199,25 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
case AsmTyper::kMathMin: {
// TODO(bradnelson): Change wasm to match Math.min in asm.js mode.
if (call_type == kAstI32) {
- uint32_t tmp_x = current_function_builder_->AddLocal(kAstI32);
- uint32_t tmp_y = current_function_builder_->AddLocal(kAstI32);
+ WasmTemporary tmp_x(current_function_builder_, kAstI32);
+ WasmTemporary tmp_y(current_function_builder_, kAstI32);
// if set_local(tmp_x, x) < set_local(tmp_y, y)
Visit(call->arguments()->at(0));
- current_function_builder_->EmitSetLocal(tmp_x);
+ current_function_builder_->EmitTeeLocal(tmp_x.index());
Visit(call->arguments()->at(1));
- current_function_builder_->EmitSetLocal(tmp_y);
+ current_function_builder_->EmitTeeLocal(tmp_y.index());
current_function_builder_->Emit(kExprI32LeS);
- current_function_builder_->Emit(kExprIf);
+ current_function_builder_->EmitWithU8(kExprIf, kLocalI32);
// then tmp_x
- current_function_builder_->EmitGetLocal(tmp_x);
+ current_function_builder_->EmitGetLocal(tmp_x.index());
// else tmp_y
current_function_builder_->Emit(kExprElse);
- current_function_builder_->EmitGetLocal(tmp_y);
+ current_function_builder_->EmitGetLocal(tmp_y.index());
current_function_builder_->Emit(kExprEnd);
} else if (call_type == kAstF32) {
@@ -1169,26 +1234,26 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
case AsmTyper::kMathMax: {
// TODO(bradnelson): Change wasm to match Math.max in asm.js mode.
if (call_type == kAstI32) {
- uint32_t tmp_x = current_function_builder_->AddLocal(kAstI32);
- uint32_t tmp_y = current_function_builder_->AddLocal(kAstI32);
+ WasmTemporary tmp_x(current_function_builder_, kAstI32);
+ WasmTemporary tmp_y(current_function_builder_, kAstI32);
// if set_local(tmp_x, x) < set_local(tmp_y, y)
Visit(call->arguments()->at(0));
- current_function_builder_->EmitSetLocal(tmp_x);
+ current_function_builder_->EmitTeeLocal(tmp_x.index());
Visit(call->arguments()->at(1));
- current_function_builder_->EmitSetLocal(tmp_y);
+ current_function_builder_->EmitTeeLocal(tmp_y.index());
current_function_builder_->Emit(kExprI32LeS);
- current_function_builder_->Emit(kExprIf);
+ current_function_builder_->EmitWithU8(kExprIf, kLocalI32);
// then tmp_y
- current_function_builder_->EmitGetLocal(tmp_y);
+ current_function_builder_->EmitGetLocal(tmp_y.index());
// else tmp_x
current_function_builder_->Emit(kExprElse);
- current_function_builder_->EmitGetLocal(tmp_x);
+ current_function_builder_->EmitGetLocal(tmp_x.index());
current_function_builder_->Emit(kExprEnd);
} else if (call_type == kAstF32) {
@@ -1267,18 +1332,23 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
}
- void VisitCall(Call* expr) {
+ void VisitCall(Call* expr) { VisitCallExpression(expr); }
+
+ bool VisitCallExpression(Call* expr) {
Call::CallType call_type = expr->GetCallType();
+ bool returns_value = true;
switch (call_type) {
case Call::OTHER_CALL: {
- DCHECK_EQ(kFuncScope, scope_);
VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (proxy != nullptr) {
+ DCHECK(kFuncScope == scope_ ||
+ typer_->VariableAsStandardMember(proxy->var()) ==
+ AsmTyper::kMathFround);
if (VisitStdlibFunction(expr, proxy)) {
- return;
+ return true;
}
}
- uint32_t index;
+ DCHECK(kFuncScope == scope_);
VariableProxy* vp = expr->expression()->AsVariableProxy();
DCHECK_NOT_NULL(vp);
if (typer_->TypeOf(vp)->AsFFIType() != nullptr) {
@@ -1288,22 +1358,24 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
args->length());
if (return_type != kAstStmt) {
sig.AddReturn(return_type);
+ } else {
+ returns_value = false;
}
for (int i = 0; i < args->length(); ++i) {
sig.AddParam(TypeOf(args->at(i)));
}
- index =
- imported_function_table_.GetFunctionIndex(vp->var(), sig.Build());
+ uint32_t index = imported_function_table_.LookupOrInsertImport(
+ vp->var(), sig.Build());
VisitCallArgs(expr);
- current_function_builder_->Emit(kExprCallImport);
- current_function_builder_->EmitVarInt(expr->arguments()->length());
+ current_function_builder_->Emit(kExprCallFunction);
current_function_builder_->EmitVarInt(index);
} else {
- index = LookupOrInsertFunction(vp->var());
+ WasmFunctionBuilder* function = LookupOrInsertFunction(vp->var());
VisitCallArgs(expr);
current_function_builder_->Emit(kExprCallFunction);
- current_function_builder_->EmitVarInt(expr->arguments()->length());
- current_function_builder_->EmitVarInt(index);
+ current_function_builder_->EmitDirectCallIndex(
+ function->func_index());
+ returns_value = function->signature()->return_count() > 0;
}
break;
}
@@ -1314,18 +1386,28 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
VariableProxy* var = p->obj()->AsVariableProxy();
DCHECK_NOT_NULL(var);
FunctionTableIndices* indices = LookupFunctionTable(var->var());
- RECURSE(Visit(p->key()));
+ Visit(p->key()); // TODO(titzer): should use RECURSE()
+
+ // We have to use a temporary for the correct order of evaluation.
current_function_builder_->EmitI32Const(indices->start_index);
current_function_builder_->Emit(kExprI32Add);
+ WasmTemporary tmp(current_function_builder_, kAstI32);
+ current_function_builder_->EmitSetLocal(tmp.index());
+
VisitCallArgs(expr);
+
+ current_function_builder_->EmitGetLocal(tmp.index());
current_function_builder_->Emit(kExprCallIndirect);
- current_function_builder_->EmitVarInt(expr->arguments()->length());
current_function_builder_->EmitVarInt(indices->signature_index);
+ returns_value =
+ builder_->GetSignature(indices->signature_index)->return_count() >
+ 0;
break;
}
default:
UNREACHABLE();
}
+ return returns_value;
}
void VisitCallNew(CallNew* expr) { UNREACHABLE(); }
@@ -1511,16 +1593,13 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
RECURSE(Visit(GetLeft(expr)));
} else {
if (expr->op() == Token::COMMA) {
- current_function_builder_->Emit(kExprBlock);
+ RECURSE(VisitForEffect(expr->left()));
+ RECURSE(Visit(expr->right()));
+ return;
}
-
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- if (expr->op() == Token::COMMA) {
- current_function_builder_->Emit(kExprEnd);
- }
-
switch (expr->op()) {
BINOP_CASE(Token::ADD, Add, NON_SIGNED_BINOP, true);
BINOP_CASE(Token::SUB, Sub, NON_SIGNED_BINOP, true);
@@ -1720,18 +1799,33 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
return (reinterpret_cast<IndexContainer*>(entry->value))->index;
}
- uint32_t LookupOrInsertFunction(Variable* v) {
+ WasmFunctionBuilder* LookupOrInsertFunction(Variable* v) {
DCHECK_NOT_NULL(builder_);
ZoneHashMap::Entry* entry = functions_.Lookup(v, ComputePointerHash(v));
if (entry == nullptr) {
- uint32_t index = builder_->AddFunction();
- IndexContainer* container = new (zone()) IndexContainer();
- container->index = index;
+ auto* func_type = typer_->TypeOf(v)->AsFunctionType();
+ DCHECK_NOT_NULL(func_type);
+ // Build the signature for the function.
+ LocalType return_type = TypeFrom(func_type->ReturnType());
+ const auto& arguments = func_type->Arguments();
+ FunctionSig::Builder b(zone(), return_type == kAstStmt ? 0 : 1,
+ arguments.size());
+ if (return_type != kAstStmt) b.AddReturn(return_type);
+ for (int i = 0; i < static_cast<int>(arguments.size()); ++i) {
+ LocalType type = TypeFrom(arguments[i]);
+ DCHECK_NE(kAstStmt, type);
+ b.AddParam(type);
+ }
+
+ WasmFunctionBuilder* function = builder_->AddFunction(b.Build());
entry = functions_.LookupOrInsert(v, ComputePointerHash(v),
ZoneAllocationPolicy(zone()));
- entry->value = container;
+ function->SetName(
+ reinterpret_cast<const char*>(v->raw_name()->raw_data()),
+ v->raw_name()->length());
+ entry->value = function;
}
- return (reinterpret_cast<IndexContainer*>(entry->value))->index;
+ return (reinterpret_cast<WasmFunctionBuilder*>(entry->value));
}
LocalType TypeOf(Expression* expr) { return TypeFrom(typer_->TypeOf(expr)); }
@@ -1766,8 +1860,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
AsmTyper* typer_;
ZoneVector<std::pair<BreakableStatement*, bool>> breakable_blocks_;
ZoneVector<ForeignVariable> foreign_variables_;
- uint32_t init_function_index_;
- uint32_t foreign_init_function_index_;
+ WasmFunctionBuilder* init_function_;
+ WasmFunctionBuilder* foreign_init_function_;
uint32_t next_table_index_;
ZoneHashMap function_tables_;
ImportedFunctionTable imported_function_table_;
@@ -1792,6 +1886,10 @@ ZoneBuffer* AsmWasmBuilder::Run(i::Handle<i::FixedArray>* foreign_args) {
impl.builder_->WriteTo(*buffer);
return buffer;
}
+
+const char* AsmWasmBuilder::foreign_init_name = "__foreign_init__";
+const char* AsmWasmBuilder::single_function_name = "__single_function__";
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/asmjs/asm-wasm-builder.h b/deps/v8/src/asmjs/asm-wasm-builder.h
index 3276c887b0..9f85dfaf07 100644
--- a/deps/v8/src/asmjs/asm-wasm-builder.h
+++ b/deps/v8/src/asmjs/asm-wasm-builder.h
@@ -8,8 +8,8 @@
#include "src/allocation.h"
#include "src/asmjs/asm-typer.h"
#include "src/objects.h"
-#include "src/wasm/encoder.h"
-#include "src/zone.h"
+#include "src/wasm/wasm-module-builder.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
@@ -24,6 +24,9 @@ class AsmWasmBuilder {
AsmTyper* typer);
ZoneBuffer* Run(Handle<FixedArray>* foreign_args);
+ static const char* foreign_init_name;
+ static const char* single_function_name;
+
private:
Isolate* isolate_;
Zone* zone_;
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 83dbbe8134..b44bc06ba3 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -120,7 +120,7 @@ double min_int;
double one_half;
double minus_one_half;
double negative_infinity;
-double the_hole_nan;
+uint64_t the_hole_nan;
double uint32_bias;
};
@@ -190,6 +190,7 @@ void AssemblerBase::FlushICache(Isolate* isolate, void* start, size_t size) {
if (size == 0) return;
#if defined(USE_SIMULATOR)
+ base::LockGuard<base::Mutex> lock_guard(isolate->simulator_i_cache_mutex());
Simulator::FlushICache(isolate->simulator_i_cache(), start, size);
#else
CpuFeatures::FlushICache(start, size);
@@ -233,22 +234,14 @@ PredictableCodeSizeScope::~PredictableCodeSizeScope() {
// Implementation of CpuFeatureScope
#ifdef DEBUG
-CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f)
+CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
+ CheckPolicy check)
: assembler_(assembler) {
- DCHECK(CpuFeatures::IsSupported(f));
+ DCHECK_IMPLIES(check == kCheckSupported, CpuFeatures::IsSupported(f));
old_enabled_ = assembler_->enabled_cpu_features();
- uint64_t mask = static_cast<uint64_t>(1) << f;
- // TODO(svenpanne) This special case below doesn't belong here!
-#if V8_TARGET_ARCH_ARM
- // ARMv7 is implied by VFP3.
- if (f == VFP3) {
- mask |= static_cast<uint64_t>(1) << ARMv7;
- }
-#endif
- assembler_->set_enabled_cpu_features(old_enabled_ | mask);
+ assembler_->EnableCpuFeature(f);
}
-
CpuFeatureScope::~CpuFeatureScope() {
assembler_->set_enabled_cpu_features(old_enabled_);
}
@@ -350,19 +343,18 @@ void RelocInfo::update_wasm_memory_reference(
DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
if (IsWasmMemoryReference(rmode_)) {
Address updated_reference;
- DCHECK(old_size == 0 || Memory::IsAddressInRange(
- old_base, wasm_memory_reference(), old_size));
+ DCHECK_GE(wasm_memory_reference(), old_base);
updated_reference = new_base + (wasm_memory_reference() - old_base);
- DCHECK(new_size == 0 ||
- Memory::IsAddressInRange(new_base, updated_reference, new_size));
+ // The reference is not checked here but at runtime. Validity of references
+ // may change over time.
unchecked_update_wasm_memory_reference(updated_reference,
icache_flush_mode);
} else if (IsWasmMemorySizeReference(rmode_)) {
- uint32_t updated_size_reference;
- DCHECK(old_size == 0 || wasm_memory_size_reference() <= old_size);
- updated_size_reference =
- new_size + (wasm_memory_size_reference() - old_size);
- DCHECK(updated_size_reference <= new_size);
+ uint32_t current_size_reference = wasm_memory_size_reference();
+ DCHECK(old_size == 0 || current_size_reference <= old_size);
+ uint32_t offset = old_size - current_size_reference;
+ DCHECK_GE(new_size, offset);
+ uint32_t updated_size_reference = new_size - offset;
unchecked_update_wasm_memory_size(updated_size_reference,
icache_flush_mode);
} else {
@@ -930,7 +922,7 @@ void ExternalReference::SetUp() {
double_constants.min_int = kMinInt;
double_constants.one_half = 0.5;
double_constants.minus_one_half = -0.5;
- double_constants.the_hole_nan = bit_cast<double>(kHoleNanInt64);
+ double_constants.the_hole_nan = kHoleNanInt64;
double_constants.negative_infinity = -V8_INFINITY;
double_constants.uint32_bias =
static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
@@ -1601,17 +1593,6 @@ ExternalReference ExternalReference::debug_after_break_target_address(
}
-ExternalReference ExternalReference::virtual_handler_register(
- Isolate* isolate) {
- return ExternalReference(isolate->virtual_handler_register_address());
-}
-
-
-ExternalReference ExternalReference::virtual_slot_register(Isolate* isolate) {
- return ExternalReference(isolate->virtual_slot_register_address());
-}
-
-
ExternalReference ExternalReference::runtime_function_table_address(
Isolate* isolate) {
return ExternalReference(
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 77beac12a2..a925032e2d 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -80,9 +80,14 @@ class AssemblerBase: public Malloced {
void set_enabled_cpu_features(uint64_t features) {
enabled_cpu_features_ = features;
}
+ // Features are usually enabled by CpuFeatureScope, which also asserts that
+ // the features are supported before they are enabled.
bool IsEnabled(CpuFeature f) {
return (enabled_cpu_features_ & (static_cast<uint64_t>(1) << f)) != 0;
}
+ void EnableCpuFeature(CpuFeature f) {
+ enabled_cpu_features_ |= (static_cast<uint64_t>(1) << f);
+ }
bool is_constant_pool_available() const {
if (FLAG_enable_embedded_constant_pool) {
@@ -184,15 +189,22 @@ class PredictableCodeSizeScope {
// Enable a specified feature within a scope.
class CpuFeatureScope BASE_EMBEDDED {
public:
+ enum CheckPolicy {
+ kCheckSupported,
+ kDontCheckSupported,
+ };
+
#ifdef DEBUG
- CpuFeatureScope(AssemblerBase* assembler, CpuFeature f);
+ CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
+ CheckPolicy check = kCheckSupported);
~CpuFeatureScope();
private:
AssemblerBase* assembler_;
uint64_t old_enabled_;
#else
- CpuFeatureScope(AssemblerBase* assembler, CpuFeature f) {}
+ CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
+ CheckPolicy check = kCheckSupported) {}
#endif
};
@@ -1035,9 +1047,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference invoke_function_callback(Isolate* isolate);
static ExternalReference invoke_accessor_getter_callback(Isolate* isolate);
- static ExternalReference virtual_handler_register(Isolate* isolate);
- static ExternalReference virtual_slot_register(Isolate* isolate);
-
static ExternalReference runtime_function_table_address(Isolate* isolate);
Address address() const { return reinterpret_cast<Address>(address_); }
diff --git a/deps/v8/src/assert-scope.h b/deps/v8/src/assert-scope.h
index 84e6990b04..fde49f8406 100644
--- a/deps/v8/src/assert-scope.h
+++ b/deps/v8/src/assert-scope.h
@@ -7,6 +7,7 @@
#include <stdint.h>
#include "src/base/macros.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -33,14 +34,13 @@ enum PerIsolateAssertType {
COMPILATION_ASSERT
};
-
template <PerThreadAssertType kType, bool kAllow>
class PerThreadAssertScope {
public:
- PerThreadAssertScope();
- ~PerThreadAssertScope();
+ V8_EXPORT_PRIVATE PerThreadAssertScope();
+ V8_EXPORT_PRIVATE ~PerThreadAssertScope();
- static bool IsAllowed();
+ V8_EXPORT_PRIVATE static bool IsAllowed();
private:
PerThreadAssertData* data_;
diff --git a/deps/v8/src/ast/OWNERS b/deps/v8/src/ast/OWNERS
index 65a00bc68c..b4e1473f83 100644
--- a/deps/v8/src/ast/OWNERS
+++ b/deps/v8/src/ast/OWNERS
@@ -3,6 +3,7 @@ set noparent
adamk@chromium.org
bmeurer@chromium.org
littledan@chromium.org
+marja@chromium.org
mstarzinger@chromium.org
rossberg@chromium.org
verwaest@chromium.org
diff --git a/deps/v8/src/ast/ast-expression-rewriter.cc b/deps/v8/src/ast/ast-expression-rewriter.cc
index 7bb8f08192..c4fa71be0a 100644
--- a/deps/v8/src/ast/ast-expression-rewriter.cc
+++ b/deps/v8/src/ast/ast-expression-rewriter.cc
@@ -201,11 +201,10 @@ void AstExpressionRewriter::VisitClassLiteral(ClassLiteral* node) {
AST_REWRITE_PROPERTY(FunctionLiteral, node, constructor);
ZoneList<typename ClassLiteral::Property*>* properties = node->properties();
for (int i = 0; i < properties->length(); i++) {
- VisitObjectLiteralProperty(properties->at(i));
+ VisitLiteralProperty(properties->at(i));
}
}
-
void AstExpressionRewriter::VisitNativeFunctionLiteral(
NativeFunctionLiteral* node) {
REWRITE_THIS(node);
@@ -243,13 +242,11 @@ void AstExpressionRewriter::VisitObjectLiteral(ObjectLiteral* node) {
REWRITE_THIS(node);
ZoneList<typename ObjectLiteral::Property*>* properties = node->properties();
for (int i = 0; i < properties->length(); i++) {
- VisitObjectLiteralProperty(properties->at(i));
+ VisitLiteralProperty(properties->at(i));
}
}
-
-void AstExpressionRewriter::VisitObjectLiteralProperty(
- ObjectLiteralProperty* property) {
+void AstExpressionRewriter::VisitLiteralProperty(LiteralProperty* property) {
if (property == nullptr) return;
AST_REWRITE_PROPERTY(Expression, property, key);
AST_REWRITE_PROPERTY(Expression, property, value);
diff --git a/deps/v8/src/ast/ast-expression-rewriter.h b/deps/v8/src/ast/ast-expression-rewriter.h
index ac45d76b30..dfed3e195d 100644
--- a/deps/v8/src/ast/ast-expression-rewriter.h
+++ b/deps/v8/src/ast/ast-expression-rewriter.h
@@ -9,7 +9,7 @@
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/type-info.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
@@ -33,7 +33,7 @@ class AstExpressionRewriter : public AstVisitor<AstExpressionRewriter> {
virtual void VisitStatements(ZoneList<Statement*>* statements);
virtual void VisitExpressions(ZoneList<Expression*>* expressions);
- virtual void VisitObjectLiteralProperty(ObjectLiteralProperty* property);
+ virtual void VisitLiteralProperty(LiteralProperty* property);
protected:
virtual bool RewriteExpression(Expression* expr) = 0;
diff --git a/deps/v8/src/ast/ast-literal-reindexer.cc b/deps/v8/src/ast/ast-literal-reindexer.cc
index a349ae0e97..81a5225fdc 100644
--- a/deps/v8/src/ast/ast-literal-reindexer.cc
+++ b/deps/v8/src/ast/ast-literal-reindexer.cc
@@ -249,21 +249,18 @@ void AstLiteralReindexer::VisitClassLiteral(ClassLiteral* node) {
VisitVariableProxy(node->class_variable_proxy());
}
for (int i = 0; i < node->properties()->length(); i++) {
- VisitObjectLiteralProperty(node->properties()->at(i));
+ VisitLiteralProperty(node->properties()->at(i));
}
}
-
void AstLiteralReindexer::VisitObjectLiteral(ObjectLiteral* node) {
UpdateIndex(node);
for (int i = 0; i < node->properties()->length(); i++) {
- VisitObjectLiteralProperty(node->properties()->at(i));
+ VisitLiteralProperty(node->properties()->at(i));
}
}
-
-void AstLiteralReindexer::VisitObjectLiteralProperty(
- ObjectLiteralProperty* node) {
+void AstLiteralReindexer::VisitLiteralProperty(LiteralProperty* node) {
Visit(node->key());
Visit(node->value());
}
diff --git a/deps/v8/src/ast/ast-literal-reindexer.h b/deps/v8/src/ast/ast-literal-reindexer.h
index b33e0c541c..4e0ca6bef5 100644
--- a/deps/v8/src/ast/ast-literal-reindexer.h
+++ b/deps/v8/src/ast/ast-literal-reindexer.h
@@ -26,7 +26,7 @@ class AstLiteralReindexer final : public AstVisitor<AstLiteralReindexer> {
void VisitStatements(ZoneList<Statement*>* statements);
void VisitDeclarations(ZoneList<Declaration*>* declarations);
void VisitArguments(ZoneList<Expression*>* arguments);
- void VisitObjectLiteralProperty(ObjectLiteralProperty* property);
+ void VisitLiteralProperty(LiteralProperty* property);
void UpdateIndex(MaterializedLiteral* literal) {
literal->literal_index_ = next_index_++;
diff --git a/deps/v8/src/ast/ast-numbering.cc b/deps/v8/src/ast/ast-numbering.cc
index 1b9905a2c6..e1b11f655a 100644
--- a/deps/v8/src/ast/ast-numbering.cc
+++ b/deps/v8/src/ast/ast-numbering.cc
@@ -39,7 +39,7 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
void VisitStatements(ZoneList<Statement*>* statements);
void VisitDeclarations(ZoneList<Declaration*>* declarations);
void VisitArguments(ZoneList<Expression*>* arguments);
- void VisitObjectLiteralProperty(ObjectLiteralProperty* property);
+ void VisitLiteralProperty(LiteralProperty* property);
int ReserveIdRange(int n) {
int tmp = next_id_;
@@ -233,14 +233,6 @@ void AstNumberingVisitor::VisitCountOperation(CountOperation* node) {
void AstNumberingVisitor::VisitBlock(Block* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Block::num_ids()));
-
- if (FLAG_ignition && node->scope() != nullptr &&
- node->scope()->NeedsContext()) {
- // Create ScopeInfo while on the main thread to avoid allocation during
- // potentially concurrent bytecode generation.
- node->scope()->GetScopeInfo(isolate_);
- }
-
if (node->scope() != NULL) VisitDeclarations(node->scope()->declarations());
VisitStatements(node->statements());
}
@@ -257,6 +249,27 @@ void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(CallRuntime::num_ids()));
VisitArguments(node->arguments());
+ // To support catch prediction within async/await:
+ //
+ // The AstNumberingVisitor is when catch prediction currently occurs, and it
+ // is the only common point that has access to this information. The parser
+ // just doesn't know yet. Take the following two cases of catch prediction:
+ //
+ // try { await fn(); } catch (e) { }
+ // try { await fn(); } finally { }
+ //
+ // When parsing the await that we want to mark as caught or uncaught, it's
+ // not yet known whether it will be followed by a 'finally' or a 'catch.
+ // The AstNumberingVisitor is what learns whether it is caught. To make
+ // the information available later to the runtime, the AstNumberingVisitor
+ // has to stash it somewhere. Changing the runtime function into another
+ // one in ast-numbering seemed like a simple and straightforward solution to
+ // that problem.
+ if (node->is_jsruntime() &&
+ node->context_index() == Context::ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX &&
+ catch_prediction_ == HandlerTable::ASYNC_AWAIT) {
+ node->set_context_index(Context::ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX);
+ }
}
@@ -370,6 +383,7 @@ void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
node->set_base_id(ReserveIdRange(CompareOperation::num_ids()));
Visit(node->left());
Visit(node->right());
+ ReserveFeedbackSlots(node);
}
@@ -444,6 +458,7 @@ void AstNumberingVisitor::VisitCaseClause(CaseClause* node) {
node->set_base_id(ReserveIdRange(CaseClause::num_ids()));
if (!node->is_default()) Visit(node->label());
VisitStatements(node->statements());
+ ReserveFeedbackSlots(node);
}
@@ -470,7 +485,7 @@ void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
VisitVariableProxy(node->class_variable_proxy());
}
for (int i = 0; i < node->properties()->length(); i++) {
- VisitObjectLiteralProperty(node->properties()->at(i));
+ VisitLiteralProperty(node->properties()->at(i));
}
ReserveFeedbackSlots(node);
}
@@ -480,7 +495,7 @@ void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(node->num_ids()));
for (int i = 0; i < node->properties()->length(); i++) {
- VisitObjectLiteralProperty(node->properties()->at(i));
+ VisitLiteralProperty(node->properties()->at(i));
}
node->BuildConstantProperties(isolate_);
// Mark all computed expressions that are bound to a key that
@@ -490,15 +505,12 @@ void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
ReserveFeedbackSlots(node);
}
-
-void AstNumberingVisitor::VisitObjectLiteralProperty(
- ObjectLiteralProperty* node) {
+void AstNumberingVisitor::VisitLiteralProperty(LiteralProperty* node) {
if (node->is_computed_name()) DisableCrankshaft(kComputedPropertyName);
Visit(node->key());
Visit(node->value());
}
-
void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(node->num_ids()));
@@ -570,27 +582,22 @@ void AstNumberingVisitor::VisitRewritableExpression(
bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
DeclarationScope* scope = node->scope();
if (scope->new_target_var()) DisableCrankshaft(kSuperReference);
- if (scope->calls_eval()) DisableOptimization(kFunctionCallsEval);
+ if (scope->calls_eval()) DisableCrankshaft(kFunctionCallsEval);
if (scope->arguments() != NULL && !scope->arguments()->IsStackAllocated()) {
DisableCrankshaft(kContextAllocatedArguments);
}
- int rest_index;
- if (scope->rest_parameter(&rest_index)) {
+ if (scope->rest_parameter() != nullptr) {
DisableCrankshaft(kRestParameter);
}
- if (FLAG_ignition && scope->NeedsContext() && scope->is_script_scope()) {
- // Create ScopeInfo while on the main thread to avoid allocation during
- // potentially concurrent bytecode generation.
- node->scope()->GetScopeInfo(isolate_);
- }
-
if (IsGeneratorFunction(node->kind()) || IsAsyncFunction(node->kind())) {
- // TODO(neis): We may want to allow Turbofan optimization here if
- // --turbo-from-bytecode is set and we know that Ignition is used.
- // Unfortunately we can't express that here.
- DisableOptimization(kGenerator);
+ // Generators can be optimized if --turbo-from-bytecode is set.
+ if (FLAG_turbo_from_bytecode) {
+ DisableCrankshaft(kGenerator);
+ } else {
+ DisableOptimization(kGenerator);
+ }
}
VisitDeclarations(scope->declarations());
diff --git a/deps/v8/src/ast/ast-traversal-visitor.h b/deps/v8/src/ast/ast-traversal-visitor.h
index 0f2976c4ca..e0f88e19a9 100644
--- a/deps/v8/src/ast/ast-traversal-visitor.h
+++ b/deps/v8/src/ast/ast-traversal-visitor.h
@@ -447,9 +447,9 @@ void AstTraversalVisitor<Subclass>::VisitClassLiteral(ClassLiteral* expr) {
RECURSE_EXPRESSION(Visit(expr->extends()));
}
RECURSE_EXPRESSION(Visit(expr->constructor()));
- ZoneList<ObjectLiteralProperty*>* props = expr->properties();
+ ZoneList<ClassLiteralProperty*>* props = expr->properties();
for (int i = 0; i < props->length(); ++i) {
- ObjectLiteralProperty* prop = props->at(i);
+ ClassLiteralProperty* prop = props->at(i);
if (!prop->key()->IsLiteral()) {
RECURSE_EXPRESSION(Visit(prop->key()));
}
diff --git a/deps/v8/src/ast/ast-type-bounds.h b/deps/v8/src/ast/ast-type-bounds.h
index ec26fdfc02..0d1a3c8498 100644
--- a/deps/v8/src/ast/ast-type-bounds.h
+++ b/deps/v8/src/ast/ast-type-bounds.h
@@ -7,8 +7,8 @@
#ifndef V8_AST_AST_TYPE_BOUNDS_H_
#define V8_AST_AST_TYPE_BOUNDS_H_
-#include "src/types.h"
-#include "src/zone-containers.h"
+#include "src/ast/ast-types.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -20,18 +20,18 @@ class AstTypeBounds {
explicit AstTypeBounds(Zone* zone) : bounds_map_(zone) {}
~AstTypeBounds() {}
- Bounds get(Expression* expression) const {
- ZoneMap<Expression*, Bounds>::const_iterator i =
+ AstBounds get(Expression* expression) const {
+ ZoneMap<Expression*, AstBounds>::const_iterator i =
bounds_map_.find(expression);
- return (i != bounds_map_.end()) ? i->second : Bounds::Unbounded();
+ return (i != bounds_map_.end()) ? i->second : AstBounds::Unbounded();
}
- void set(Expression* expression, Bounds bounds) {
+ void set(Expression* expression, AstBounds bounds) {
bounds_map_[expression] = bounds;
}
private:
- ZoneMap<Expression*, Bounds> bounds_map_;
+ ZoneMap<Expression*, AstBounds> bounds_map_;
};
} // namespace internal
diff --git a/deps/v8/src/types.cc b/deps/v8/src/ast/ast-types.cc
index c978dac5c2..a075e8e787 100644
--- a/deps/v8/src/types.cc
+++ b/deps/v8/src/ast/ast-types.cc
@@ -4,7 +4,7 @@
#include <iomanip>
-#include "src/types.h"
+#include "src/ast/ast-types.h"
#include "src/handles-inl.h"
#include "src/ostreams.h"
@@ -12,21 +12,20 @@
namespace v8 {
namespace internal {
-
// NOTE: If code is marked as being a "shortcut", this means that removing
// the code won't affect the semantics of the surrounding function definition.
// static
-bool Type::IsInteger(i::Object* x) {
- return x->IsNumber() && Type::IsInteger(x->Number());
+bool AstType::IsInteger(i::Object* x) {
+ return x->IsNumber() && AstType::IsInteger(x->Number());
}
// -----------------------------------------------------------------------------
// Range-related helper functions.
-bool RangeType::Limits::IsEmpty() { return this->min > this->max; }
+bool AstRangeType::Limits::IsEmpty() { return this->min > this->max; }
-RangeType::Limits RangeType::Limits::Intersect(Limits lhs, Limits rhs) {
+AstRangeType::Limits AstRangeType::Limits::Intersect(Limits lhs, Limits rhs) {
DisallowHeapAllocation no_allocation;
Limits result(lhs);
if (lhs.min < rhs.min) result.min = rhs.min;
@@ -34,7 +33,7 @@ RangeType::Limits RangeType::Limits::Intersect(Limits lhs, Limits rhs) {
return result;
}
-RangeType::Limits RangeType::Limits::Union(Limits lhs, Limits rhs) {
+AstRangeType::Limits AstRangeType::Limits::Union(Limits lhs, Limits rhs) {
DisallowHeapAllocation no_allocation;
if (lhs.IsEmpty()) return rhs;
if (rhs.IsEmpty()) return lhs;
@@ -44,38 +43,36 @@ RangeType::Limits RangeType::Limits::Union(Limits lhs, Limits rhs) {
return result;
}
-bool Type::Overlap(RangeType* lhs, RangeType* rhs) {
+bool AstType::Overlap(AstRangeType* lhs, AstRangeType* rhs) {
DisallowHeapAllocation no_allocation;
- return !RangeType::Limits::Intersect(RangeType::Limits(lhs),
- RangeType::Limits(rhs))
+ return !AstRangeType::Limits::Intersect(AstRangeType::Limits(lhs),
+ AstRangeType::Limits(rhs))
.IsEmpty();
}
-bool Type::Contains(RangeType* lhs, RangeType* rhs) {
+bool AstType::Contains(AstRangeType* lhs, AstRangeType* rhs) {
DisallowHeapAllocation no_allocation;
return lhs->Min() <= rhs->Min() && rhs->Max() <= lhs->Max();
}
-bool Type::Contains(RangeType* lhs, ConstantType* rhs) {
+bool AstType::Contains(AstRangeType* lhs, AstConstantType* rhs) {
DisallowHeapAllocation no_allocation;
- return IsInteger(*rhs->Value()) &&
- lhs->Min() <= rhs->Value()->Number() &&
+ return IsInteger(*rhs->Value()) && lhs->Min() <= rhs->Value()->Number() &&
rhs->Value()->Number() <= lhs->Max();
}
-bool Type::Contains(RangeType* range, i::Object* val) {
+bool AstType::Contains(AstRangeType* range, i::Object* val) {
DisallowHeapAllocation no_allocation;
- return IsInteger(val) &&
- range->Min() <= val->Number() && val->Number() <= range->Max();
+ return IsInteger(val) && range->Min() <= val->Number() &&
+ val->Number() <= range->Max();
}
-
// -----------------------------------------------------------------------------
// Min and Max computation.
-double Type::Min() {
+double AstType::Min() {
DCHECK(this->SemanticIs(Number()));
- if (this->IsBitset()) return BitsetType::Min(this->AsBitset());
+ if (this->IsBitset()) return AstBitsetType::Min(this->AsBitset());
if (this->IsUnion()) {
double min = +V8_INFINITY;
for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
@@ -89,9 +86,9 @@ double Type::Min() {
return 0;
}
-double Type::Max() {
+double AstType::Max() {
DCHECK(this->SemanticIs(Number()));
- if (this->IsBitset()) return BitsetType::Max(this->AsBitset());
+ if (this->IsBitset()) return AstBitsetType::Max(this->AsBitset());
if (this->IsUnion()) {
double max = -V8_INFINITY;
for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
@@ -105,13 +102,11 @@ double Type::Max() {
return 0;
}
-
// -----------------------------------------------------------------------------
// Glb and lub computation.
-
// The largest bitset subsumed by this type.
-Type::bitset BitsetType::Glb(Type* type) {
+AstType::bitset AstBitsetType::Glb(AstType* type) {
DisallowHeapAllocation no_allocation;
// Fast case.
if (IsBitset(type)) {
@@ -119,19 +114,18 @@ Type::bitset BitsetType::Glb(Type* type) {
} else if (type->IsUnion()) {
SLOW_DCHECK(type->AsUnion()->Wellformed());
return type->AsUnion()->Get(0)->BitsetGlb() |
- SEMANTIC(type->AsUnion()->Get(1)->BitsetGlb()); // Shortcut.
+ AST_SEMANTIC(type->AsUnion()->Get(1)->BitsetGlb()); // Shortcut.
} else if (type->IsRange()) {
- bitset glb = SEMANTIC(
- BitsetType::Glb(type->AsRange()->Min(), type->AsRange()->Max()));
- return glb | REPRESENTATION(type->BitsetLub());
+ bitset glb = AST_SEMANTIC(
+ AstBitsetType::Glb(type->AsRange()->Min(), type->AsRange()->Max()));
+ return glb | AST_REPRESENTATION(type->BitsetLub());
} else {
return type->Representation();
}
}
-
// The smallest bitset subsuming this type, possibly not a proper one.
-Type::bitset BitsetType::Lub(Type* type) {
+AstType::bitset AstBitsetType::Lub(AstType* type) {
DisallowHeapAllocation no_allocation;
if (IsBitset(type)) return type->AsBitset();
if (type->IsUnion()) {
@@ -140,7 +134,7 @@ Type::bitset BitsetType::Lub(Type* type) {
int bitset = type->AsUnion()->Get(0)->BitsetLub();
for (int i = 0, n = type->AsUnion()->Length(); i < n; ++i) {
// Other elements only contribute their semantic part.
- bitset |= SEMANTIC(type->AsUnion()->Get(i)->BitsetLub());
+ bitset |= AST_SEMANTIC(type->AsUnion()->Get(i)->BitsetLub());
}
return bitset;
}
@@ -155,7 +149,7 @@ Type::bitset BitsetType::Lub(Type* type) {
return kNone;
}
-Type::bitset BitsetType::Lub(i::Map* map) {
+AstType::bitset AstBitsetType::Lub(i::Map* map) {
DisallowHeapAllocation no_allocation;
switch (map->instance_type()) {
case STRING_TYPE:
@@ -214,7 +208,6 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case JS_DATE_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
- case JS_MODULE_TYPE:
case JS_ARRAY_BUFFER_TYPE:
case JS_ARRAY_TYPE:
case JS_REGEXP_TYPE: // TODO(rossberg): there should be a RegExp type.
@@ -224,6 +217,7 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case JS_MAP_TYPE:
case JS_SET_ITERATOR_TYPE:
case JS_MAP_ITERATOR_TYPE:
+ case JS_STRING_ITERATOR_TYPE:
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
case JS_PROMISE_TYPE:
@@ -250,6 +244,7 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case SCRIPT_TYPE:
case CODE_TYPE:
case PROPERTY_CELL_TYPE:
+ case MODULE_TYPE:
return kOtherInternal & kTaggedPointer;
// Remaining instance types are unsupported for now. If any of them do
@@ -265,6 +260,7 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case ACCESS_CHECK_INFO_TYPE:
case INTERCEPTOR_INFO_TYPE:
case CALL_HANDLER_INFO_TYPE:
+ case PROMISE_CONTAINER_TYPE:
case FUNCTION_TEMPLATE_INFO_TYPE:
case OBJECT_TEMPLATE_INFO_TYPE:
case SIGNATURE_INFO_TYPE:
@@ -278,7 +274,7 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case CELL_TYPE:
case WEAK_CELL_TYPE:
case PROTOTYPE_INFO_TYPE:
- case SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION_TYPE:
+ case CONTEXT_EXTENSION_TYPE:
UNREACHABLE();
return kNone;
}
@@ -286,16 +282,16 @@ Type::bitset BitsetType::Lub(i::Map* map) {
return kNone;
}
-Type::bitset BitsetType::Lub(i::Object* value) {
+AstType::bitset AstBitsetType::Lub(i::Object* value) {
DisallowHeapAllocation no_allocation;
if (value->IsNumber()) {
return Lub(value->Number()) &
- (value->IsSmi() ? kTaggedSigned : kTaggedPointer);
+ (value->IsSmi() ? kTaggedSigned : kTaggedPointer);
}
return Lub(i::HeapObject::cast(value)->map());
}
-Type::bitset BitsetType::Lub(double value) {
+AstType::bitset AstBitsetType::Lub(double value) {
DisallowHeapAllocation no_allocation;
if (i::IsMinusZero(value)) return kMinusZero;
if (std::isnan(value)) return kNaN;
@@ -303,9 +299,8 @@ Type::bitset BitsetType::Lub(double value) {
return kOtherNumber;
}
-
// Minimum values of plain numeric bitsets.
-const BitsetType::Boundary BitsetType::BoundariesArray[] = {
+const AstBitsetType::Boundary AstBitsetType::BoundariesArray[] = {
{kOtherNumber, kPlainNumber, -V8_INFINITY},
{kOtherSigned32, kNegative32, kMinInt},
{kNegative31, kNegative31, -0x40000000},
@@ -314,45 +309,47 @@ const BitsetType::Boundary BitsetType::BoundariesArray[] = {
{kOtherUnsigned32, kUnsigned32, 0x80000000},
{kOtherNumber, kPlainNumber, static_cast<double>(kMaxUInt32) + 1}};
-const BitsetType::Boundary* BitsetType::Boundaries() { return BoundariesArray; }
+const AstBitsetType::Boundary* AstBitsetType::Boundaries() {
+ return BoundariesArray;
+}
-size_t BitsetType::BoundariesSize() {
+size_t AstBitsetType::BoundariesSize() {
// Windows doesn't like arraysize here.
// return arraysize(BoundariesArray);
return 7;
}
-Type::bitset BitsetType::ExpandInternals(Type::bitset bits) {
+AstType::bitset AstBitsetType::ExpandInternals(AstType::bitset bits) {
DisallowHeapAllocation no_allocation;
- if (!(bits & SEMANTIC(kPlainNumber))) return bits; // Shortcut.
+ if (!(bits & AST_SEMANTIC(kPlainNumber))) return bits; // Shortcut.
const Boundary* boundaries = Boundaries();
for (size_t i = 0; i < BoundariesSize(); ++i) {
- DCHECK(BitsetType::Is(boundaries[i].internal, boundaries[i].external));
- if (bits & SEMANTIC(boundaries[i].internal))
- bits |= SEMANTIC(boundaries[i].external);
+ DCHECK(AstBitsetType::Is(boundaries[i].internal, boundaries[i].external));
+ if (bits & AST_SEMANTIC(boundaries[i].internal))
+ bits |= AST_SEMANTIC(boundaries[i].external);
}
return bits;
}
-Type::bitset BitsetType::Lub(double min, double max) {
+AstType::bitset AstBitsetType::Lub(double min, double max) {
DisallowHeapAllocation no_allocation;
int lub = kNone;
const Boundary* mins = Boundaries();
for (size_t i = 1; i < BoundariesSize(); ++i) {
if (min < mins[i].min) {
- lub |= mins[i-1].internal;
+ lub |= mins[i - 1].internal;
if (max < mins[i].min) return lub;
}
}
return lub | mins[BoundariesSize() - 1].internal;
}
-Type::bitset BitsetType::NumberBits(bitset bits) {
- return SEMANTIC(bits & kPlainNumber);
+AstType::bitset AstBitsetType::NumberBits(bitset bits) {
+ return AST_SEMANTIC(bits & kPlainNumber);
}
-Type::bitset BitsetType::Glb(double min, double max) {
+AstType::bitset AstBitsetType::Glb(double min, double max) {
DisallowHeapAllocation no_allocation;
int glb = kNone;
const Boundary* mins = Boundaries();
@@ -368,16 +365,16 @@ Type::bitset BitsetType::Glb(double min, double max) {
}
// OtherNumber also contains float numbers, so it can never be
// in the greatest lower bound.
- return glb & ~(SEMANTIC(kOtherNumber));
+ return glb & ~(AST_SEMANTIC(kOtherNumber));
}
-double BitsetType::Min(bitset bits) {
+double AstBitsetType::Min(bitset bits) {
DisallowHeapAllocation no_allocation;
- DCHECK(Is(SEMANTIC(bits), kNumber));
+ DCHECK(Is(AST_SEMANTIC(bits), kNumber));
const Boundary* mins = Boundaries();
- bool mz = SEMANTIC(bits & kMinusZero);
+ bool mz = AST_SEMANTIC(bits & kMinusZero);
for (size_t i = 0; i < BoundariesSize(); ++i) {
- if (Is(SEMANTIC(mins[i].internal), bits)) {
+ if (Is(AST_SEMANTIC(mins[i].internal), bits)) {
return mz ? std::min(0.0, mins[i].min) : mins[i].min;
}
}
@@ -385,50 +382,49 @@ double BitsetType::Min(bitset bits) {
return std::numeric_limits<double>::quiet_NaN();
}
-double BitsetType::Max(bitset bits) {
+double AstBitsetType::Max(bitset bits) {
DisallowHeapAllocation no_allocation;
- DCHECK(Is(SEMANTIC(bits), kNumber));
+ DCHECK(Is(AST_SEMANTIC(bits), kNumber));
const Boundary* mins = Boundaries();
- bool mz = SEMANTIC(bits & kMinusZero);
- if (BitsetType::Is(SEMANTIC(mins[BoundariesSize() - 1].internal), bits)) {
+ bool mz = AST_SEMANTIC(bits & kMinusZero);
+ if (AstBitsetType::Is(AST_SEMANTIC(mins[BoundariesSize() - 1].internal),
+ bits)) {
return +V8_INFINITY;
}
for (size_t i = BoundariesSize() - 1; i-- > 0;) {
- if (Is(SEMANTIC(mins[i].internal), bits)) {
- return mz ?
- std::max(0.0, mins[i+1].min - 1) : mins[i+1].min - 1;
+ if (Is(AST_SEMANTIC(mins[i].internal), bits)) {
+ return mz ? std::max(0.0, mins[i + 1].min - 1) : mins[i + 1].min - 1;
}
}
if (mz) return 0;
return std::numeric_limits<double>::quiet_NaN();
}
-
// -----------------------------------------------------------------------------
// Predicates.
-bool Type::SimplyEquals(Type* that) {
+bool AstType::SimplyEquals(AstType* that) {
DisallowHeapAllocation no_allocation;
if (this->IsClass()) {
- return that->IsClass()
- && *this->AsClass()->Map() == *that->AsClass()->Map();
+ return that->IsClass() &&
+ *this->AsClass()->Map() == *that->AsClass()->Map();
}
if (this->IsConstant()) {
- return that->IsConstant()
- && *this->AsConstant()->Value() == *that->AsConstant()->Value();
+ return that->IsConstant() &&
+ *this->AsConstant()->Value() == *that->AsConstant()->Value();
}
if (this->IsContext()) {
- return that->IsContext()
- && this->AsContext()->Outer()->Equals(that->AsContext()->Outer());
+ return that->IsContext() &&
+ this->AsContext()->Outer()->Equals(that->AsContext()->Outer());
}
if (this->IsArray()) {
- return that->IsArray()
- && this->AsArray()->Element()->Equals(that->AsArray()->Element());
+ return that->IsArray() &&
+ this->AsArray()->Element()->Equals(that->AsArray()->Element());
}
if (this->IsFunction()) {
if (!that->IsFunction()) return false;
- FunctionType* this_fun = this->AsFunction();
- FunctionType* that_fun = that->AsFunction();
+ AstFunctionType* this_fun = this->AsFunction();
+ AstFunctionType* that_fun = that->AsFunction();
if (this_fun->Arity() != that_fun->Arity() ||
!this_fun->Result()->Equals(that_fun->Result()) ||
!this_fun->Receiver()->Equals(that_fun->Receiver())) {
@@ -441,8 +437,8 @@ bool Type::SimplyEquals(Type* that) {
}
if (this->IsTuple()) {
if (!that->IsTuple()) return false;
- TupleType* this_tuple = this->AsTuple();
- TupleType* that_tuple = that->AsTuple();
+ AstTupleType* this_tuple = this->AsTuple();
+ AstTupleType* that_tuple = that->AsTuple();
if (this_tuple->Arity() != that_tuple->Arity()) {
return false;
}
@@ -455,26 +451,25 @@ bool Type::SimplyEquals(Type* that) {
return false;
}
-Type::bitset Type::Representation() {
- return REPRESENTATION(this->BitsetLub());
+AstType::bitset AstType::Representation() {
+ return AST_REPRESENTATION(this->BitsetLub());
}
-
// Check if [this] <= [that].
-bool Type::SlowIs(Type* that) {
+bool AstType::SlowIs(AstType* that) {
DisallowHeapAllocation no_allocation;
// Fast bitset cases
if (that->IsBitset()) {
- return BitsetType::Is(this->BitsetLub(), that->AsBitset());
+ return AstBitsetType::Is(this->BitsetLub(), that->AsBitset());
}
if (this->IsBitset()) {
- return BitsetType::Is(this->AsBitset(), that->BitsetGlb());
+ return AstBitsetType::Is(this->AsBitset(), that->BitsetGlb());
}
// Check the representations.
- if (!BitsetType::Is(Representation(), that->Representation())) {
+ if (!AstBitsetType::Is(Representation(), that->Representation())) {
return false;
}
@@ -482,19 +477,19 @@ bool Type::SlowIs(Type* that) {
return SemanticIs(that);
}
-
-// Check if SEMANTIC([this]) <= SEMANTIC([that]). The result of the method
+// Check if AST_SEMANTIC([this]) <= AST_SEMANTIC([that]). The result of the
+// method
// should be independent of the representation axis of the types.
-bool Type::SemanticIs(Type* that) {
+bool AstType::SemanticIs(AstType* that) {
DisallowHeapAllocation no_allocation;
if (this == that) return true;
if (that->IsBitset()) {
- return BitsetType::Is(SEMANTIC(this->BitsetLub()), that->AsBitset());
+ return AstBitsetType::Is(AST_SEMANTIC(this->BitsetLub()), that->AsBitset());
}
if (this->IsBitset()) {
- return BitsetType::Is(SEMANTIC(this->AsBitset()), that->BitsetGlb());
+ return AstBitsetType::Is(AST_SEMANTIC(this->AsBitset()), that->BitsetGlb());
}
// (T1 \/ ... \/ Tn) <= T if (T1 <= T) /\ ... /\ (Tn <= T)
@@ -525,7 +520,7 @@ bool Type::SemanticIs(Type* that) {
}
// Most precise _current_ type of a value (usually its class).
-Type* Type::NowOf(i::Object* value, Zone* zone) {
+AstType* AstType::NowOf(i::Object* value, Zone* zone) {
if (value->IsSmi() ||
i::HeapObject::cast(value)->map()->instance_type() == HEAP_NUMBER_TYPE) {
return Of(value, zone);
@@ -533,7 +528,7 @@ Type* Type::NowOf(i::Object* value, Zone* zone) {
return Class(i::handle(i::HeapObject::cast(value)->map()), zone);
}
-bool Type::NowContains(i::Object* value) {
+bool AstType::NowContains(i::Object* value) {
DisallowHeapAllocation no_allocation;
if (this->IsAny()) return true;
if (value->IsHeapObject()) {
@@ -545,7 +540,7 @@ bool Type::NowContains(i::Object* value) {
return this->Contains(value);
}
-bool Type::NowIs(Type* that) {
+bool AstType::NowIs(AstType* that) {
DisallowHeapAllocation no_allocation;
// TODO(rossberg): this is incorrect for
@@ -563,27 +558,25 @@ bool Type::NowIs(Type* that) {
return this->Is(that);
}
-
// Check if [this] contains only (currently) stable classes.
-bool Type::NowStable() {
+bool AstType::NowStable() {
DisallowHeapAllocation no_allocation;
return !this->IsClass() || this->AsClass()->Map()->is_stable();
}
-
// Check if [this] and [that] overlap.
-bool Type::Maybe(Type* that) {
+bool AstType::Maybe(AstType* that) {
DisallowHeapAllocation no_allocation;
// Take care of the representation part (and also approximate
// the semantic part).
- if (!BitsetType::IsInhabited(this->BitsetLub() & that->BitsetLub()))
+ if (!AstBitsetType::IsInhabited(this->BitsetLub() & that->BitsetLub()))
return false;
return SemanticMaybe(that);
}
-bool Type::SemanticMaybe(Type* that) {
+bool AstType::SemanticMaybe(AstType* that) {
DisallowHeapAllocation no_allocation;
// (T1 \/ ... \/ Tn) overlaps T if (T1 overlaps T) \/ ... \/ (Tn overlaps T)
@@ -602,7 +595,8 @@ bool Type::SemanticMaybe(Type* that) {
return false;
}
- if (!BitsetType::SemanticIsInhabited(this->BitsetLub() & that->BitsetLub()))
+ if (!AstBitsetType::SemanticIsInhabited(this->BitsetLub() &
+ that->BitsetLub()))
return false;
if (this->IsBitset() && that->IsBitset()) return true;
@@ -617,12 +611,12 @@ bool Type::SemanticMaybe(Type* that) {
return Overlap(this->AsRange(), that->AsRange());
}
if (that->IsBitset()) {
- bitset number_bits = BitsetType::NumberBits(that->AsBitset());
- if (number_bits == BitsetType::kNone) {
+ bitset number_bits = AstBitsetType::NumberBits(that->AsBitset());
+ if (number_bits == AstBitsetType::kNone) {
return false;
}
- double min = std::max(BitsetType::Min(number_bits), this->Min());
- double max = std::min(BitsetType::Max(number_bits), this->Max());
+ double min = std::max(AstBitsetType::Min(number_bits), this->Min());
+ double max = std::min(AstBitsetType::Max(number_bits), this->Max());
return min <= max;
}
}
@@ -635,9 +629,8 @@ bool Type::SemanticMaybe(Type* that) {
return this->SimplyEquals(that);
}
-
// Return the range in [this], or [NULL].
-Type* Type::GetRange() {
+AstType* AstType::GetRange() {
DisallowHeapAllocation no_allocation;
if (this->IsRange()) return this;
if (this->IsUnion() && this->AsUnion()->Get(1)->IsRange()) {
@@ -646,19 +639,19 @@ Type* Type::GetRange() {
return NULL;
}
-bool Type::Contains(i::Object* value) {
+bool AstType::Contains(i::Object* value) {
DisallowHeapAllocation no_allocation;
for (Iterator<i::Object> it = this->Constants(); !it.Done(); it.Advance()) {
if (*it.Current() == value) return true;
}
if (IsInteger(value)) {
- Type* range = this->GetRange();
+ AstType* range = this->GetRange();
if (range != NULL && Contains(range->AsRange(), value)) return true;
}
- return BitsetType::New(BitsetType::Lub(value))->Is(this);
+ return AstBitsetType::New(AstBitsetType::Lub(value))->Is(this);
}
-bool UnionType::Wellformed() {
+bool AstUnionType::Wellformed() {
DisallowHeapAllocation no_allocation;
// This checks the invariants of the union representation:
// 1. There are at least two elements.
@@ -668,7 +661,7 @@ bool UnionType::Wellformed() {
// 5. No element (except the bitset) is a subtype of any other.
// 6. If there is a range, then the bitset type does not contain
// plain number bits.
- DCHECK(this->Length() >= 2); // (1)
+ DCHECK(this->Length() >= 2); // (1)
DCHECK(this->Get(0)->IsBitset()); // (2a)
for (int i = 0; i < this->Length(); ++i) {
@@ -681,26 +674,23 @@ bool UnionType::Wellformed() {
}
}
DCHECK(!this->Get(1)->IsRange() ||
- (BitsetType::NumberBits(this->Get(0)->AsBitset()) ==
- BitsetType::kNone)); // (6)
+ (AstBitsetType::NumberBits(this->Get(0)->AsBitset()) ==
+ AstBitsetType::kNone)); // (6)
return true;
}
-
// -----------------------------------------------------------------------------
// Union and intersection
-
static bool AddIsSafe(int x, int y) {
- return x >= 0 ?
- y <= std::numeric_limits<int>::max() - x :
- y >= std::numeric_limits<int>::min() - x;
+ return x >= 0 ? y <= std::numeric_limits<int>::max() - x
+ : y >= std::numeric_limits<int>::min() - x;
}
-Type* Type::Intersect(Type* type1, Type* type2, Zone* zone) {
+AstType* AstType::Intersect(AstType* type1, AstType* type2, Zone* zone) {
// Fast case: bit sets.
if (type1->IsBitset() && type2->IsBitset()) {
- return BitsetType::New(type1->AsBitset() & type2->AsBitset());
+ return AstBitsetType::New(type1->AsBitset() & type2->AsBitset());
}
// Fast case: top or bottom types.
@@ -731,38 +721,39 @@ Type* Type::Intersect(Type* type1, Type* type2, Zone* zone) {
}
bitset bits =
- SEMANTIC(type1->BitsetGlb() & type2->BitsetGlb()) | representation;
+ AST_SEMANTIC(type1->BitsetGlb() & type2->BitsetGlb()) | representation;
int size1 = type1->IsUnion() ? type1->AsUnion()->Length() : 1;
int size2 = type2->IsUnion() ? type2->AsUnion()->Length() : 1;
if (!AddIsSafe(size1, size2)) return Any();
int size = size1 + size2;
if (!AddIsSafe(size, 2)) return Any();
size += 2;
- Type* result_type = UnionType::New(size, zone);
- UnionType* result = result_type->AsUnion();
+ AstType* result_type = AstUnionType::New(size, zone);
+ AstUnionType* result = result_type->AsUnion();
size = 0;
// Deal with bitsets.
- result->Set(size++, BitsetType::New(bits));
+ result->Set(size++, AstBitsetType::New(bits));
- RangeType::Limits lims = RangeType::Limits::Empty();
+ AstRangeType::Limits lims = AstRangeType::Limits::Empty();
size = IntersectAux(type1, type2, result, size, &lims, zone);
// If the range is not empty, then insert it into the union and
// remove the number bits from the bitset.
if (!lims.IsEmpty()) {
- size = UpdateRange(RangeType::New(lims, representation, zone), result, size,
- zone);
+ size = UpdateRange(AstRangeType::New(lims, representation, zone), result,
+ size, zone);
// Remove the number bits.
- bitset number_bits = BitsetType::NumberBits(bits);
+ bitset number_bits = AstBitsetType::NumberBits(bits);
bits &= ~number_bits;
- result->Set(0, BitsetType::New(bits));
+ result->Set(0, AstBitsetType::New(bits));
}
return NormalizeUnion(result_type, size, zone);
}
-int Type::UpdateRange(Type* range, UnionType* result, int size, Zone* zone) {
+int AstType::UpdateRange(AstType* range, AstUnionType* result, int size,
+ Zone* zone) {
if (size == 1) {
result->Set(size++, range);
} else {
@@ -772,7 +763,7 @@ int Type::UpdateRange(Type* range, UnionType* result, int size, Zone* zone) {
}
// Remove any components that just got subsumed.
- for (int i = 2; i < size; ) {
+ for (int i = 2; i < size;) {
if (result->Get(i)->SemanticIs(range)) {
result->Set(i, result->Get(--size));
} else {
@@ -782,26 +773,27 @@ int Type::UpdateRange(Type* range, UnionType* result, int size, Zone* zone) {
return size;
}
-RangeType::Limits Type::ToLimits(bitset bits, Zone* zone) {
- bitset number_bits = BitsetType::NumberBits(bits);
+AstRangeType::Limits AstType::ToLimits(bitset bits, Zone* zone) {
+ bitset number_bits = AstBitsetType::NumberBits(bits);
- if (number_bits == BitsetType::kNone) {
- return RangeType::Limits::Empty();
+ if (number_bits == AstBitsetType::kNone) {
+ return AstRangeType::Limits::Empty();
}
- return RangeType::Limits(BitsetType::Min(number_bits),
- BitsetType::Max(number_bits));
+ return AstRangeType::Limits(AstBitsetType::Min(number_bits),
+ AstBitsetType::Max(number_bits));
}
-RangeType::Limits Type::IntersectRangeAndBitset(Type* range, Type* bitset,
- Zone* zone) {
- RangeType::Limits range_lims(range->AsRange());
- RangeType::Limits bitset_lims = ToLimits(bitset->AsBitset(), zone);
- return RangeType::Limits::Intersect(range_lims, bitset_lims);
+AstRangeType::Limits AstType::IntersectRangeAndBitset(AstType* range,
+ AstType* bitset,
+ Zone* zone) {
+ AstRangeType::Limits range_lims(range->AsRange());
+ AstRangeType::Limits bitset_lims = ToLimits(bitset->AsBitset(), zone);
+ return AstRangeType::Limits::Intersect(range_lims, bitset_lims);
}
-int Type::IntersectAux(Type* lhs, Type* rhs, UnionType* result, int size,
- RangeType::Limits* lims, Zone* zone) {
+int AstType::IntersectAux(AstType* lhs, AstType* rhs, AstUnionType* result,
+ int size, AstRangeType::Limits* lims, Zone* zone) {
if (lhs->IsUnion()) {
for (int i = 0, n = lhs->AsUnion()->Length(); i < n; ++i) {
size =
@@ -817,31 +809,33 @@ int Type::IntersectAux(Type* lhs, Type* rhs, UnionType* result, int size,
return size;
}
- if (!BitsetType::SemanticIsInhabited(lhs->BitsetLub() & rhs->BitsetLub())) {
+ if (!AstBitsetType::SemanticIsInhabited(lhs->BitsetLub() &
+ rhs->BitsetLub())) {
return size;
}
if (lhs->IsRange()) {
if (rhs->IsBitset()) {
- RangeType::Limits lim = IntersectRangeAndBitset(lhs, rhs, zone);
+ AstRangeType::Limits lim = IntersectRangeAndBitset(lhs, rhs, zone);
if (!lim.IsEmpty()) {
- *lims = RangeType::Limits::Union(lim, *lims);
+ *lims = AstRangeType::Limits::Union(lim, *lims);
}
return size;
}
if (rhs->IsClass()) {
- *lims =
- RangeType::Limits::Union(RangeType::Limits(lhs->AsRange()), *lims);
+ *lims = AstRangeType::Limits::Union(AstRangeType::Limits(lhs->AsRange()),
+ *lims);
}
if (rhs->IsConstant() && Contains(lhs->AsRange(), rhs->AsConstant())) {
return AddToUnion(rhs, result, size, zone);
}
if (rhs->IsRange()) {
- RangeType::Limits lim = RangeType::Limits::Intersect(
- RangeType::Limits(lhs->AsRange()), RangeType::Limits(rhs->AsRange()));
+ AstRangeType::Limits lim =
+ AstRangeType::Limits::Intersect(AstRangeType::Limits(lhs->AsRange()),
+ AstRangeType::Limits(rhs->AsRange()));
if (!lim.IsEmpty()) {
- *lims = RangeType::Limits::Union(lim, *lims);
+ *lims = AstRangeType::Limits::Union(lim, *lims);
}
}
return size;
@@ -862,29 +856,29 @@ int Type::IntersectAux(Type* lhs, Type* rhs, UnionType* result, int size,
return size;
}
-
// Make sure that we produce a well-formed range and bitset:
// If the range is non-empty, the number bits in the bitset should be
// clear. Moreover, if we have a canonical range (such as Signed32),
// we want to produce a bitset rather than a range.
-Type* Type::NormalizeRangeAndBitset(Type* range, bitset* bits, Zone* zone) {
+AstType* AstType::NormalizeRangeAndBitset(AstType* range, bitset* bits,
+ Zone* zone) {
// Fast path: If the bitset does not mention numbers, we can just keep the
// range.
- bitset number_bits = BitsetType::NumberBits(*bits);
+ bitset number_bits = AstBitsetType::NumberBits(*bits);
if (number_bits == 0) {
return range;
}
// If the range is semantically contained within the bitset, return None and
// leave the bitset untouched.
- bitset range_lub = SEMANTIC(range->BitsetLub());
- if (BitsetType::Is(range_lub, *bits)) {
+ bitset range_lub = AST_SEMANTIC(range->BitsetLub());
+ if (AstBitsetType::Is(range_lub, *bits)) {
return None();
}
// Slow path: reconcile the bitset range and the range.
- double bitset_min = BitsetType::Min(number_bits);
- double bitset_max = BitsetType::Max(number_bits);
+ double bitset_min = AstBitsetType::Min(number_bits);
+ double bitset_max = AstBitsetType::Max(number_bits);
double range_min = range->Min();
double range_max = range->Max();
@@ -905,13 +899,13 @@ Type* Type::NormalizeRangeAndBitset(Type* range, bitset* bits, Zone* zone) {
if (bitset_max > range_max) {
range_max = bitset_max;
}
- return RangeType::New(range_min, range_max, BitsetType::kNone, zone);
+ return AstRangeType::New(range_min, range_max, AstBitsetType::kNone, zone);
}
-Type* Type::Union(Type* type1, Type* type2, Zone* zone) {
+AstType* AstType::Union(AstType* type1, AstType* type2, Zone* zone) {
// Fast case: bit sets.
if (type1->IsBitset() && type2->IsBitset()) {
- return BitsetType::New(type1->AsBitset() | type2->AsBitset());
+ return AstBitsetType::New(type1->AsBitset() | type2->AsBitset());
}
// Fast case: top or bottom types.
@@ -936,30 +930,30 @@ Type* Type::Union(Type* type1, Type* type2, Zone* zone) {
int size = size1 + size2;
if (!AddIsSafe(size, 2)) return Any();
size += 2;
- Type* result_type = UnionType::New(size, zone);
- UnionType* result = result_type->AsUnion();
+ AstType* result_type = AstUnionType::New(size, zone);
+ AstUnionType* result = result_type->AsUnion();
size = 0;
// Compute the new bitset.
- bitset new_bitset = SEMANTIC(type1->BitsetGlb() | type2->BitsetGlb());
+ bitset new_bitset = AST_SEMANTIC(type1->BitsetGlb() | type2->BitsetGlb());
// Deal with ranges.
- Type* range = None();
- Type* range1 = type1->GetRange();
- Type* range2 = type2->GetRange();
+ AstType* range = None();
+ AstType* range1 = type1->GetRange();
+ AstType* range2 = type2->GetRange();
if (range1 != NULL && range2 != NULL) {
- RangeType::Limits lims =
- RangeType::Limits::Union(RangeType::Limits(range1->AsRange()),
- RangeType::Limits(range2->AsRange()));
- Type* union_range = RangeType::New(lims, representation, zone);
+ AstRangeType::Limits lims =
+ AstRangeType::Limits::Union(AstRangeType::Limits(range1->AsRange()),
+ AstRangeType::Limits(range2->AsRange()));
+ AstType* union_range = AstRangeType::New(lims, representation, zone);
range = NormalizeRangeAndBitset(union_range, &new_bitset, zone);
} else if (range1 != NULL) {
range = NormalizeRangeAndBitset(range1, &new_bitset, zone);
} else if (range2 != NULL) {
range = NormalizeRangeAndBitset(range2, &new_bitset, zone);
}
- new_bitset = SEMANTIC(new_bitset) | representation;
- Type* bits = BitsetType::New(new_bitset);
+ new_bitset = AST_SEMANTIC(new_bitset) | representation;
+ AstType* bits = AstBitsetType::New(new_bitset);
result->Set(size++, bits);
if (!range->IsNone()) result->Set(size++, range);
@@ -968,10 +962,10 @@ Type* Type::Union(Type* type1, Type* type2, Zone* zone) {
return NormalizeUnion(result_type, size, zone);
}
-
// Add [type] to [result] unless [type] is bitset, range, or already subsumed.
// Return new size of [result].
-int Type::AddToUnion(Type* type, UnionType* result, int size, Zone* zone) {
+int AstType::AddToUnion(AstType* type, AstUnionType* result, int size,
+ Zone* zone) {
if (type->IsBitset() || type->IsRange()) return size;
if (type->IsUnion()) {
for (int i = 0, n = type->AsUnion()->Length(); i < n; ++i) {
@@ -986,8 +980,8 @@ int Type::AddToUnion(Type* type, UnionType* result, int size, Zone* zone) {
return size;
}
-Type* Type::NormalizeUnion(Type* union_type, int size, Zone* zone) {
- UnionType* unioned = union_type->AsUnion();
+AstType* AstType::NormalizeUnion(AstType* union_type, int size, Zone* zone) {
+ AstUnionType* unioned = union_type->AsUnion();
DCHECK(size >= 1);
DCHECK(unioned->Get(0)->IsBitset());
// If the union has just one element, return it.
@@ -996,15 +990,15 @@ Type* Type::NormalizeUnion(Type* union_type, int size, Zone* zone) {
}
bitset bits = unioned->Get(0)->AsBitset();
// If the union only consists of a range, we can get rid of the union.
- if (size == 2 && SEMANTIC(bits) == BitsetType::kNone) {
- bitset representation = REPRESENTATION(bits);
+ if (size == 2 && AST_SEMANTIC(bits) == AstBitsetType::kNone) {
+ bitset representation = AST_REPRESENTATION(bits);
if (representation == unioned->Get(1)->Representation()) {
return unioned->Get(1);
}
if (unioned->Get(1)->IsRange()) {
- return RangeType::New(unioned->Get(1)->AsRange()->Min(),
- unioned->Get(1)->AsRange()->Max(),
- unioned->Get(0)->AsBitset(), zone);
+ return AstRangeType::New(unioned->Get(1)->AsRange()->Min(),
+ unioned->Get(1)->AsRange()->Max(),
+ unioned->Get(0)->AsBitset(), zone);
}
}
unioned->Shrink(size);
@@ -1012,26 +1006,23 @@ Type* Type::NormalizeUnion(Type* union_type, int size, Zone* zone) {
return union_type;
}
-
// -----------------------------------------------------------------------------
// Component extraction
// static
-Type* Type::Representation(Type* t, Zone* zone) {
- return BitsetType::New(t->Representation());
+AstType* AstType::Representation(AstType* t, Zone* zone) {
+ return AstBitsetType::New(t->Representation());
}
-
// static
-Type* Type::Semantic(Type* t, Zone* zone) {
- return Intersect(t, BitsetType::New(BitsetType::kSemantic), zone);
+AstType* AstType::Semantic(AstType* t, Zone* zone) {
+ return Intersect(t, AstBitsetType::New(AstBitsetType::kSemantic), zone);
}
-
// -----------------------------------------------------------------------------
// Iteration.
-int Type::NumClasses() {
+int AstType::NumClasses() {
DisallowHeapAllocation no_allocation;
if (this->IsClass()) {
return 1;
@@ -1046,7 +1037,7 @@ int Type::NumClasses() {
}
}
-int Type::NumConstants() {
+int AstType::NumConstants() {
DisallowHeapAllocation no_allocation;
if (this->IsConstant()) {
return 1;
@@ -1062,48 +1053,47 @@ int Type::NumConstants() {
}
template <class T>
-Type* Type::Iterator<T>::get_type() {
+AstType* AstType::Iterator<T>::get_type() {
DCHECK(!Done());
return type_->IsUnion() ? type_->AsUnion()->Get(index_) : type_;
}
-
// C++ cannot specialise nested templates, so we have to go through this
// contortion with an auxiliary template to simulate it.
template <class T>
struct TypeImplIteratorAux {
- static bool matches(Type* type);
- static i::Handle<T> current(Type* type);
+ static bool matches(AstType* type);
+ static i::Handle<T> current(AstType* type);
};
template <>
struct TypeImplIteratorAux<i::Map> {
- static bool matches(Type* type) { return type->IsClass(); }
- static i::Handle<i::Map> current(Type* type) {
+ static bool matches(AstType* type) { return type->IsClass(); }
+ static i::Handle<i::Map> current(AstType* type) {
return type->AsClass()->Map();
}
};
template <>
struct TypeImplIteratorAux<i::Object> {
- static bool matches(Type* type) { return type->IsConstant(); }
- static i::Handle<i::Object> current(Type* type) {
+ static bool matches(AstType* type) { return type->IsConstant(); }
+ static i::Handle<i::Object> current(AstType* type) {
return type->AsConstant()->Value();
}
};
template <class T>
-bool Type::Iterator<T>::matches(Type* type) {
+bool AstType::Iterator<T>::matches(AstType* type) {
return TypeImplIteratorAux<T>::matches(type);
}
template <class T>
-i::Handle<T> Type::Iterator<T>::Current() {
+i::Handle<T> AstType::Iterator<T>::Current() {
return TypeImplIteratorAux<T>::current(get_type());
}
template <class T>
-void Type::Iterator<T>::Advance() {
+void AstType::Iterator<T>::Advance() {
DisallowHeapAllocation no_allocation;
++index_;
if (type_->IsUnion()) {
@@ -1116,31 +1106,33 @@ void Type::Iterator<T>::Advance() {
index_ = -1;
}
-
// -----------------------------------------------------------------------------
// Printing.
-const char* BitsetType::Name(bitset bits) {
+const char* AstBitsetType::Name(bitset bits) {
switch (bits) {
- case REPRESENTATION(kAny): return "Any";
- #define RETURN_NAMED_REPRESENTATION_TYPE(type, value) \
- case REPRESENTATION(k##type): return #type;
- REPRESENTATION_BITSET_TYPE_LIST(RETURN_NAMED_REPRESENTATION_TYPE)
- #undef RETURN_NAMED_REPRESENTATION_TYPE
-
- #define RETURN_NAMED_SEMANTIC_TYPE(type, value) \
- case SEMANTIC(k##type): return #type;
- SEMANTIC_BITSET_TYPE_LIST(RETURN_NAMED_SEMANTIC_TYPE)
- INTERNAL_BITSET_TYPE_LIST(RETURN_NAMED_SEMANTIC_TYPE)
- #undef RETURN_NAMED_SEMANTIC_TYPE
+ case AST_REPRESENTATION(kAny):
+ return "Any";
+#define RETURN_NAMED_REPRESENTATION_TYPE(type, value) \
+ case AST_REPRESENTATION(k##type): \
+ return #type;
+ AST_REPRESENTATION_BITSET_TYPE_LIST(RETURN_NAMED_REPRESENTATION_TYPE)
+#undef RETURN_NAMED_REPRESENTATION_TYPE
+
+#define RETURN_NAMED_SEMANTIC_TYPE(type, value) \
+ case AST_SEMANTIC(k##type): \
+ return #type;
+ AST_SEMANTIC_BITSET_TYPE_LIST(RETURN_NAMED_SEMANTIC_TYPE)
+ AST_INTERNAL_BITSET_TYPE_LIST(RETURN_NAMED_SEMANTIC_TYPE)
+#undef RETURN_NAMED_SEMANTIC_TYPE
default:
return NULL;
}
}
-void BitsetType::Print(std::ostream& os, // NOLINT
- bitset bits) {
+void AstBitsetType::Print(std::ostream& os, // NOLINT
+ bitset bits) {
DisallowHeapAllocation no_allocation;
const char* name = Name(bits);
if (name != NULL) {
@@ -1150,13 +1142,13 @@ void BitsetType::Print(std::ostream& os, // NOLINT
// clang-format off
static const bitset named_bitsets[] = {
-#define BITSET_CONSTANT(type, value) REPRESENTATION(k##type),
- REPRESENTATION_BITSET_TYPE_LIST(BITSET_CONSTANT)
+#define BITSET_CONSTANT(type, value) AST_REPRESENTATION(k##type),
+ AST_REPRESENTATION_BITSET_TYPE_LIST(BITSET_CONSTANT)
#undef BITSET_CONSTANT
-#define BITSET_CONSTANT(type, value) SEMANTIC(k##type),
- INTERNAL_BITSET_TYPE_LIST(BITSET_CONSTANT)
- SEMANTIC_BITSET_TYPE_LIST(BITSET_CONSTANT)
+#define BITSET_CONSTANT(type, value) AST_SEMANTIC(k##type),
+ AST_INTERNAL_BITSET_TYPE_LIST(BITSET_CONSTANT)
+ AST_SEMANTIC_BITSET_TYPE_LIST(BITSET_CONSTANT)
#undef BITSET_CONSTANT
};
// clang-format on
@@ -1176,14 +1168,14 @@ void BitsetType::Print(std::ostream& os, // NOLINT
os << ")";
}
-void Type::PrintTo(std::ostream& os, PrintDimension dim) {
+void AstType::PrintTo(std::ostream& os, PrintDimension dim) {
DisallowHeapAllocation no_allocation;
if (dim != REPRESENTATION_DIM) {
if (this->IsBitset()) {
- BitsetType::Print(os, SEMANTIC(this->AsBitset()));
+ AstBitsetType::Print(os, AST_SEMANTIC(this->AsBitset()));
} else if (this->IsClass()) {
os << "Class(" << static_cast<void*>(*this->AsClass()->Map()) << " < ";
- BitsetType::New(BitsetType::Lub(this))->PrintTo(os, dim);
+ AstBitsetType::New(AstBitsetType::Lub(this))->PrintTo(os, dim);
os << ")";
} else if (this->IsConstant()) {
os << "Constant(" << Brief(*this->AsConstant()->Value()) << ")";
@@ -1201,7 +1193,7 @@ void Type::PrintTo(std::ostream& os, PrintDimension dim) {
} else if (this->IsUnion()) {
os << "(";
for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
- Type* type_i = this->AsUnion()->Get(i);
+ AstType* type_i = this->AsUnion()->Get(i);
if (i > 0) os << " | ";
type_i->PrintTo(os, dim);
}
@@ -1225,7 +1217,7 @@ void Type::PrintTo(std::ostream& os, PrintDimension dim) {
} else if (this->IsTuple()) {
os << "<";
for (int i = 0, n = this->AsTuple()->Arity(); i < n; ++i) {
- Type* type_i = this->AsTuple()->Element(i);
+ AstType* type_i = this->AsTuple()->Element(i);
if (i > 0) os << ", ";
type_i->PrintTo(os, dim);
}
@@ -1236,34 +1228,33 @@ void Type::PrintTo(std::ostream& os, PrintDimension dim) {
}
if (dim == BOTH_DIMS) os << "/";
if (dim != SEMANTIC_DIM) {
- BitsetType::Print(os, REPRESENTATION(this->BitsetLub()));
+ AstBitsetType::Print(os, AST_REPRESENTATION(this->BitsetLub()));
}
}
-
#ifdef DEBUG
-void Type::Print() {
+void AstType::Print() {
OFStream os(stdout);
PrintTo(os);
os << std::endl;
}
-void BitsetType::Print(bitset bits) {
+void AstBitsetType::Print(bitset bits) {
OFStream os(stdout);
Print(os, bits);
os << std::endl;
}
#endif
-BitsetType::bitset BitsetType::SignedSmall() {
+AstBitsetType::bitset AstBitsetType::SignedSmall() {
return i::SmiValuesAre31Bits() ? kSigned31 : kSigned32;
}
-BitsetType::bitset BitsetType::UnsignedSmall() {
+AstBitsetType::bitset AstBitsetType::UnsignedSmall() {
return i::SmiValuesAre31Bits() ? kUnsigned30 : kUnsigned31;
}
#define CONSTRUCT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
- Type* Type::Name(Isolate* isolate, Zone* zone) { \
+ AstType* AstType::Name(Isolate* isolate, Zone* zone) { \
return Class(i::handle(isolate->heap()->name##_map()), zone); \
}
SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
@@ -1272,8 +1263,8 @@ SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
// -----------------------------------------------------------------------------
// Instantiations.
-template class Type::Iterator<i::Map>;
-template class Type::Iterator<i::Object>;
+template class AstType::Iterator<i::Map>;
+template class AstType::Iterator<i::Object>;
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/ast-types.h b/deps/v8/src/ast/ast-types.h
new file mode 100644
index 0000000000..0b6e23ffda
--- /dev/null
+++ b/deps/v8/src/ast/ast-types.h
@@ -0,0 +1,1024 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_AST_AST_TYPES_H_
+#define V8_AST_AST_TYPES_H_
+
+#include "src/conversions.h"
+#include "src/handles.h"
+#include "src/objects.h"
+#include "src/ostreams.h"
+
+namespace v8 {
+namespace internal {
+
+// SUMMARY
+//
+// A simple type system for compiler-internal use. It is based entirely on
+// union types, and all subtyping hence amounts to set inclusion. Besides the
+// obvious primitive types and some predefined unions, the type language also
+// can express class types (a.k.a. specific maps) and singleton types (i.e.,
+// concrete constants).
+//
+// Types consist of two dimensions: semantic (value range) and representation.
+// Both are related through subtyping.
+//
+//
+// SEMANTIC DIMENSION
+//
+// The following equations and inequations hold for the semantic axis:
+//
+// None <= T
+// T <= Any
+//
+// Number = Signed32 \/ Unsigned32 \/ Double
+// Smi <= Signed32
+// Name = String \/ Symbol
+// UniqueName = InternalizedString \/ Symbol
+// InternalizedString < String
+//
+// Receiver = Object \/ Proxy
+// Array < Object
+// Function < Object
+// RegExp < Object
+// OtherUndetectable < Object
+// DetectableReceiver = Receiver - OtherUndetectable
+//
+// Class(map) < T iff instance_type(map) < T
+// Constant(x) < T iff instance_type(map(x)) < T
+// Array(T) < Array
+// Function(R, S, T0, T1, ...) < Function
+// Context(T) < Internal
+//
+// Both structural Array and Function types are invariant in all parameters;
+// relaxing this would make Union and Intersect operations more involved.
+// There is no subtyping relation between Array, Function, or Context types
+// and respective Constant types, since these types cannot be reconstructed
+// for arbitrary heap values.
+// Note also that Constant(x) < Class(map(x)) does _not_ hold, since x's map can
+// change! (Its instance type cannot, however.)
+// TODO(rossberg): the latter is not currently true for proxies, because of fix,
+// but will hold once we implement direct proxies.
+// However, we also define a 'temporal' variant of the subtyping relation that
+// considers the _current_ state only, i.e., Constant(x) <_now Class(map(x)).
+//
+//
+// REPRESENTATIONAL DIMENSION
+//
+// For the representation axis, the following holds:
+//
+// None <= R
+// R <= Any
+//
+// UntaggedInt = UntaggedInt1 \/ UntaggedInt8 \/
+// UntaggedInt16 \/ UntaggedInt32
+// UntaggedFloat = UntaggedFloat32 \/ UntaggedFloat64
+// UntaggedNumber = UntaggedInt \/ UntaggedFloat
+// Untagged = UntaggedNumber \/ UntaggedPtr
+// Tagged = TaggedInt \/ TaggedPtr
+//
+// Subtyping relates the two dimensions, for example:
+//
+// Number <= Tagged \/ UntaggedNumber
+// Object <= TaggedPtr \/ UntaggedPtr
+//
+// That holds because the semantic type constructors defined by the API create
+// types that allow for all possible representations, and dually, the ones for
+// representation types initially include all semantic ranges. Representations
+// can then e.g. be narrowed for a given semantic type using intersection:
+//
+// SignedSmall /\ TaggedInt (a 'smi')
+// Number /\ TaggedPtr (a heap number)
+//
+//
+// RANGE TYPES
+//
+// A range type represents a continuous integer interval by its minimum and
+// maximum value. Either value may be an infinity, in which case that infinity
+// itself is also included in the range. A range never contains NaN or -0.
+//
+// If a value v happens to be an integer n, then Constant(v) is considered a
+// subtype of Range(n, n) (and therefore also a subtype of any larger range).
+// In order to avoid large unions, however, it is usually a good idea to use
+// Range rather than Constant.
+//
+//
+// PREDICATES
+//
+// There are two main functions for testing types:
+//
+// T1->Is(T2) -- tests whether T1 is included in T2 (i.e., T1 <= T2)
+// T1->Maybe(T2) -- tests whether T1 and T2 overlap (i.e., T1 /\ T2 =/= 0)
+//
+// Typically, the former is to be used to select representations (e.g., via
+// T->Is(SignedSmall())), and the latter to check whether a specific case needs
+// handling (e.g., via T->Maybe(Number())).
+//
+// There is no functionality to discover whether a type is a leaf in the
+// lattice. That is intentional. It should always be possible to refine the
+// lattice (e.g., splitting up number types further) without invalidating any
+// existing assumptions or tests.
+// Consequently, do not normally use Equals for type tests, always use Is!
+//
+// The NowIs operator implements state-sensitive subtying, as described above.
+// Any compilation decision based on such temporary properties requires runtime
+// guarding!
+//
+//
+// PROPERTIES
+//
+// Various formal properties hold for constructors, operators, and predicates
+// over types. For example, constructors are injective and subtyping is a
+// complete partial order.
+//
+// See test/cctest/test-types.cc for a comprehensive executable specification,
+// especially with respect to the properties of the more exotic 'temporal'
+// constructors and predicates (those prefixed 'Now').
+//
+//
+// IMPLEMENTATION
+//
+// Internally, all 'primitive' types, and their unions, are represented as
+// bitsets. Bit 0 is reserved for tagging. Class is a heap pointer to the
+// respective map. Only structured types require allocation.
+// Note that the bitset representation is closed under both Union and Intersect.
+
+// -----------------------------------------------------------------------------
+// Values for bitset types
+
+// clang-format off
+
+#define AST_MASK_BITSET_TYPE_LIST(V) \
+ V(Representation, 0xffc00000u) \
+ V(Semantic, 0x003ffffeu)
+
+#define AST_REPRESENTATION(k) ((k) & AstBitsetType::kRepresentation)
+#define AST_SEMANTIC(k) ((k) & AstBitsetType::kSemantic)
+
+#define AST_REPRESENTATION_BITSET_TYPE_LIST(V) \
+ V(None, 0) \
+ V(UntaggedBit, 1u << 22 | kSemantic) \
+ V(UntaggedIntegral8, 1u << 23 | kSemantic) \
+ V(UntaggedIntegral16, 1u << 24 | kSemantic) \
+ V(UntaggedIntegral32, 1u << 25 | kSemantic) \
+ V(UntaggedFloat32, 1u << 26 | kSemantic) \
+ V(UntaggedFloat64, 1u << 27 | kSemantic) \
+ V(UntaggedSimd128, 1u << 28 | kSemantic) \
+ V(UntaggedPointer, 1u << 29 | kSemantic) \
+ V(TaggedSigned, 1u << 30 | kSemantic) \
+ V(TaggedPointer, 1u << 31 | kSemantic) \
+ \
+ V(UntaggedIntegral, kUntaggedBit | kUntaggedIntegral8 | \
+ kUntaggedIntegral16 | kUntaggedIntegral32) \
+ V(UntaggedFloat, kUntaggedFloat32 | kUntaggedFloat64) \
+ V(UntaggedNumber, kUntaggedIntegral | kUntaggedFloat) \
+ V(Untagged, kUntaggedNumber | kUntaggedPointer) \
+ V(Tagged, kTaggedSigned | kTaggedPointer)
+
+#define AST_INTERNAL_BITSET_TYPE_LIST(V) \
+ V(OtherUnsigned31, 1u << 1 | AST_REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(OtherUnsigned32, 1u << 2 | AST_REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(OtherSigned32, 1u << 3 | AST_REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(OtherNumber, 1u << 4 | AST_REPRESENTATION(kTagged | kUntaggedNumber))
+
+#define AST_SEMANTIC_BITSET_TYPE_LIST(V) \
+ V(Negative31, 1u << 5 | \
+ AST_REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(Null, 1u << 6 | AST_REPRESENTATION(kTaggedPointer)) \
+ V(Undefined, 1u << 7 | AST_REPRESENTATION(kTaggedPointer)) \
+ V(Boolean, 1u << 8 | AST_REPRESENTATION(kTaggedPointer)) \
+ V(Unsigned30, 1u << 9 | \
+ AST_REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(MinusZero, 1u << 10 | \
+ AST_REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(NaN, 1u << 11 | \
+ AST_REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(Symbol, 1u << 12 | AST_REPRESENTATION(kTaggedPointer)) \
+ V(InternalizedString, 1u << 13 | AST_REPRESENTATION(kTaggedPointer)) \
+ V(OtherString, 1u << 14 | AST_REPRESENTATION(kTaggedPointer)) \
+ V(Simd, 1u << 15 | AST_REPRESENTATION(kTaggedPointer)) \
+ V(OtherObject, 1u << 17 | AST_REPRESENTATION(kTaggedPointer)) \
+ V(OtherUndetectable, 1u << 16 | AST_REPRESENTATION(kTaggedPointer)) \
+ V(Proxy, 1u << 18 | AST_REPRESENTATION(kTaggedPointer)) \
+ V(Function, 1u << 19 | AST_REPRESENTATION(kTaggedPointer)) \
+ V(Hole, 1u << 20 | AST_REPRESENTATION(kTaggedPointer)) \
+ V(OtherInternal, 1u << 21 | \
+ AST_REPRESENTATION(kTagged | kUntagged)) \
+ \
+ V(Signed31, kUnsigned30 | kNegative31) \
+ V(Signed32, kSigned31 | kOtherUnsigned31 | \
+ kOtherSigned32) \
+ V(Signed32OrMinusZero, kSigned32 | kMinusZero) \
+ V(Signed32OrMinusZeroOrNaN, kSigned32 | kMinusZero | kNaN) \
+ V(Negative32, kNegative31 | kOtherSigned32) \
+ V(Unsigned31, kUnsigned30 | kOtherUnsigned31) \
+ V(Unsigned32, kUnsigned30 | kOtherUnsigned31 | \
+ kOtherUnsigned32) \
+ V(Unsigned32OrMinusZero, kUnsigned32 | kMinusZero) \
+ V(Unsigned32OrMinusZeroOrNaN, kUnsigned32 | kMinusZero | kNaN) \
+ V(Integral32, kSigned32 | kUnsigned32) \
+ V(PlainNumber, kIntegral32 | kOtherNumber) \
+ V(OrderedNumber, kPlainNumber | kMinusZero) \
+ V(MinusZeroOrNaN, kMinusZero | kNaN) \
+ V(Number, kOrderedNumber | kNaN) \
+ V(String, kInternalizedString | kOtherString) \
+ V(UniqueName, kSymbol | kInternalizedString) \
+ V(Name, kSymbol | kString) \
+ V(BooleanOrNumber, kBoolean | kNumber) \
+ V(BooleanOrNullOrNumber, kBooleanOrNumber | kNull) \
+ V(BooleanOrNullOrUndefined, kBoolean | kNull | kUndefined) \
+ V(NullOrNumber, kNull | kNumber) \
+ V(NullOrUndefined, kNull | kUndefined) \
+ V(Undetectable, kNullOrUndefined | kOtherUndetectable) \
+ V(NumberOrOddball, kNumber | kNullOrUndefined | kBoolean | kHole) \
+ V(NumberOrSimdOrString, kNumber | kSimd | kString) \
+ V(NumberOrString, kNumber | kString) \
+ V(NumberOrUndefined, kNumber | kUndefined) \
+ V(PlainPrimitive, kNumberOrString | kBoolean | kNullOrUndefined) \
+ V(Primitive, kSymbol | kSimd | kPlainPrimitive) \
+ V(DetectableReceiver, kFunction | kOtherObject | kProxy) \
+ V(Object, kFunction | kOtherObject | kOtherUndetectable) \
+ V(Receiver, kObject | kProxy) \
+ V(StringOrReceiver, kString | kReceiver) \
+ V(Unique, kBoolean | kUniqueName | kNull | kUndefined | \
+ kReceiver) \
+ V(Internal, kHole | kOtherInternal) \
+ V(NonInternal, kPrimitive | kReceiver) \
+ V(NonNumber, kUnique | kString | kInternal) \
+ V(Any, 0xfffffffeu)
+
+// clang-format on
+
+/*
+ * The following diagrams show how integers (in the mathematical sense) are
+ * divided among the different atomic numerical types.
+ *
+ * ON OS32 N31 U30 OU31 OU32 ON
+ * ______[_______[_______[_______[_______[_______[_______
+ * -2^31 -2^30 0 2^30 2^31 2^32
+ *
+ * E.g., OtherUnsigned32 (OU32) covers all integers from 2^31 to 2^32-1.
+ *
+ * Some of the atomic numerical bitsets are internal only (see
+ * INTERNAL_BITSET_TYPE_LIST). To a types user, they should only occur in
+ * union with certain other bitsets. For instance, OtherNumber should only
+ * occur as part of PlainNumber.
+ */
+
+#define AST_PROPER_BITSET_TYPE_LIST(V) \
+ AST_REPRESENTATION_BITSET_TYPE_LIST(V) \
+ AST_SEMANTIC_BITSET_TYPE_LIST(V)
+
+#define AST_BITSET_TYPE_LIST(V) \
+ AST_MASK_BITSET_TYPE_LIST(V) \
+ AST_REPRESENTATION_BITSET_TYPE_LIST(V) \
+ AST_INTERNAL_BITSET_TYPE_LIST(V) \
+ AST_SEMANTIC_BITSET_TYPE_LIST(V)
+
+class AstType;
+
+// -----------------------------------------------------------------------------
+// Bitset types (internal).
+
+class AstBitsetType {
+ public:
+ typedef uint32_t bitset; // Internal
+
+ enum : uint32_t {
+#define DECLARE_TYPE(type, value) k##type = (value),
+ AST_BITSET_TYPE_LIST(DECLARE_TYPE)
+#undef DECLARE_TYPE
+ kUnusedEOL = 0
+ };
+
+ static bitset SignedSmall();
+ static bitset UnsignedSmall();
+
+ bitset Bitset() {
+ return static_cast<bitset>(reinterpret_cast<uintptr_t>(this) ^ 1u);
+ }
+
+ static bool IsInhabited(bitset bits) {
+ return AST_SEMANTIC(bits) != kNone && AST_REPRESENTATION(bits) != kNone;
+ }
+
+ static bool SemanticIsInhabited(bitset bits) {
+ return AST_SEMANTIC(bits) != kNone;
+ }
+
+ static bool Is(bitset bits1, bitset bits2) {
+ return (bits1 | bits2) == bits2;
+ }
+
+ static double Min(bitset);
+ static double Max(bitset);
+
+ static bitset Glb(AstType* type); // greatest lower bound that's a bitset
+ static bitset Glb(double min, double max);
+ static bitset Lub(AstType* type); // least upper bound that's a bitset
+ static bitset Lub(i::Map* map);
+ static bitset Lub(i::Object* value);
+ static bitset Lub(double value);
+ static bitset Lub(double min, double max);
+ static bitset ExpandInternals(bitset bits);
+
+ static const char* Name(bitset);
+ static void Print(std::ostream& os, bitset); // NOLINT
+#ifdef DEBUG
+ static void Print(bitset);
+#endif
+
+ static bitset NumberBits(bitset bits);
+
+ static bool IsBitset(AstType* type) {
+ return reinterpret_cast<uintptr_t>(type) & 1;
+ }
+
+ static AstType* NewForTesting(bitset bits) { return New(bits); }
+
+ private:
+ friend class AstType;
+
+ static AstType* New(bitset bits) {
+ return reinterpret_cast<AstType*>(static_cast<uintptr_t>(bits | 1u));
+ }
+
+ struct Boundary {
+ bitset internal;
+ bitset external;
+ double min;
+ };
+ static const Boundary BoundariesArray[];
+ static inline const Boundary* Boundaries();
+ static inline size_t BoundariesSize();
+};
+
+// -----------------------------------------------------------------------------
+// Superclass for non-bitset types (internal).
+class AstTypeBase {
+ protected:
+ friend class AstType;
+
+ enum Kind {
+ kClass,
+ kConstant,
+ kContext,
+ kArray,
+ kFunction,
+ kTuple,
+ kUnion,
+ kRange
+ };
+
+ Kind kind() const { return kind_; }
+ explicit AstTypeBase(Kind kind) : kind_(kind) {}
+
+ static bool IsKind(AstType* type, Kind kind) {
+ if (AstBitsetType::IsBitset(type)) return false;
+ AstTypeBase* base = reinterpret_cast<AstTypeBase*>(type);
+ return base->kind() == kind;
+ }
+
+ // The hacky conversion to/from AstType*.
+ static AstType* AsType(AstTypeBase* type) {
+ return reinterpret_cast<AstType*>(type);
+ }
+ static AstTypeBase* FromType(AstType* type) {
+ return reinterpret_cast<AstTypeBase*>(type);
+ }
+
+ private:
+ Kind kind_;
+};
+
+// -----------------------------------------------------------------------------
+// Class types.
+
+class AstClassType : public AstTypeBase {
+ public:
+ i::Handle<i::Map> Map() { return map_; }
+
+ private:
+ friend class AstType;
+ friend class AstBitsetType;
+
+ static AstType* New(i::Handle<i::Map> map, Zone* zone) {
+ return AsType(new (zone->New(sizeof(AstClassType)))
+ AstClassType(AstBitsetType::Lub(*map), map));
+ }
+
+ static AstClassType* cast(AstType* type) {
+ DCHECK(IsKind(type, kClass));
+ return static_cast<AstClassType*>(FromType(type));
+ }
+
+ AstClassType(AstBitsetType::bitset bitset, i::Handle<i::Map> map)
+ : AstTypeBase(kClass), bitset_(bitset), map_(map) {}
+
+ AstBitsetType::bitset Lub() { return bitset_; }
+
+ AstBitsetType::bitset bitset_;
+ Handle<i::Map> map_;
+};
+
+// -----------------------------------------------------------------------------
+// Constant types.
+
+class AstConstantType : public AstTypeBase {
+ public:
+ i::Handle<i::Object> Value() { return object_; }
+
+ private:
+ friend class AstType;
+ friend class AstBitsetType;
+
+ static AstType* New(i::Handle<i::Object> value, Zone* zone) {
+ AstBitsetType::bitset bitset = AstBitsetType::Lub(*value);
+ return AsType(new (zone->New(sizeof(AstConstantType)))
+ AstConstantType(bitset, value));
+ }
+
+ static AstConstantType* cast(AstType* type) {
+ DCHECK(IsKind(type, kConstant));
+ return static_cast<AstConstantType*>(FromType(type));
+ }
+
+ AstConstantType(AstBitsetType::bitset bitset, i::Handle<i::Object> object)
+ : AstTypeBase(kConstant), bitset_(bitset), object_(object) {}
+
+ AstBitsetType::bitset Lub() { return bitset_; }
+
+ AstBitsetType::bitset bitset_;
+ Handle<i::Object> object_;
+};
+// TODO(neis): Also cache value if numerical.
+// TODO(neis): Allow restricting the representation.
+
+// -----------------------------------------------------------------------------
+// Range types.
+
+class AstRangeType : public AstTypeBase {
+ public:
+ struct Limits {
+ double min;
+ double max;
+ Limits(double min, double max) : min(min), max(max) {}
+ explicit Limits(AstRangeType* range)
+ : min(range->Min()), max(range->Max()) {}
+ bool IsEmpty();
+ static Limits Empty() { return Limits(1, 0); }
+ static Limits Intersect(Limits lhs, Limits rhs);
+ static Limits Union(Limits lhs, Limits rhs);
+ };
+
+ double Min() { return limits_.min; }
+ double Max() { return limits_.max; }
+
+ private:
+ friend class AstType;
+ friend class AstBitsetType;
+ friend class AstUnionType;
+
+ static AstType* New(double min, double max,
+ AstBitsetType::bitset representation, Zone* zone) {
+ return New(Limits(min, max), representation, zone);
+ }
+
+ static bool IsInteger(double x) {
+ return nearbyint(x) == x && !i::IsMinusZero(x); // Allows for infinities.
+ }
+
+ static AstType* New(Limits lim, AstBitsetType::bitset representation,
+ Zone* zone) {
+ DCHECK(IsInteger(lim.min) && IsInteger(lim.max));
+ DCHECK(lim.min <= lim.max);
+ DCHECK(AST_REPRESENTATION(representation) == representation);
+ AstBitsetType::bitset bits =
+ AST_SEMANTIC(AstBitsetType::Lub(lim.min, lim.max)) | representation;
+
+ return AsType(new (zone->New(sizeof(AstRangeType)))
+ AstRangeType(bits, lim));
+ }
+
+ static AstRangeType* cast(AstType* type) {
+ DCHECK(IsKind(type, kRange));
+ return static_cast<AstRangeType*>(FromType(type));
+ }
+
+ AstRangeType(AstBitsetType::bitset bitset, Limits limits)
+ : AstTypeBase(kRange), bitset_(bitset), limits_(limits) {}
+
+ AstBitsetType::bitset Lub() { return bitset_; }
+
+ AstBitsetType::bitset bitset_;
+ Limits limits_;
+};
+
+// -----------------------------------------------------------------------------
+// Context types.
+
+class AstContextType : public AstTypeBase {
+ public:
+ AstType* Outer() { return outer_; }
+
+ private:
+ friend class AstType;
+
+ static AstType* New(AstType* outer, Zone* zone) {
+ return AsType(new (zone->New(sizeof(AstContextType)))
+ AstContextType(outer)); // NOLINT
+ }
+
+ static AstContextType* cast(AstType* type) {
+ DCHECK(IsKind(type, kContext));
+ return static_cast<AstContextType*>(FromType(type));
+ }
+
+ explicit AstContextType(AstType* outer)
+ : AstTypeBase(kContext), outer_(outer) {}
+
+ AstType* outer_;
+};
+
+// -----------------------------------------------------------------------------
+// Array types.
+
+class AstArrayType : public AstTypeBase {
+ public:
+ AstType* Element() { return element_; }
+
+ private:
+ friend class AstType;
+
+ explicit AstArrayType(AstType* element)
+ : AstTypeBase(kArray), element_(element) {}
+
+ static AstType* New(AstType* element, Zone* zone) {
+ return AsType(new (zone->New(sizeof(AstArrayType))) AstArrayType(element));
+ }
+
+ static AstArrayType* cast(AstType* type) {
+ DCHECK(IsKind(type, kArray));
+ return static_cast<AstArrayType*>(FromType(type));
+ }
+
+ AstType* element_;
+};
+
+// -----------------------------------------------------------------------------
+// Superclass for types with variable number of type fields.
+class AstStructuralType : public AstTypeBase {
+ public:
+ int LengthForTesting() { return Length(); }
+
+ protected:
+ friend class AstType;
+
+ int Length() { return length_; }
+
+ AstType* Get(int i) {
+ DCHECK(0 <= i && i < this->Length());
+ return elements_[i];
+ }
+
+ void Set(int i, AstType* type) {
+ DCHECK(0 <= i && i < this->Length());
+ elements_[i] = type;
+ }
+
+ void Shrink(int length) {
+ DCHECK(2 <= length && length <= this->Length());
+ length_ = length;
+ }
+
+ AstStructuralType(Kind kind, int length, i::Zone* zone)
+ : AstTypeBase(kind), length_(length) {
+ elements_ =
+ reinterpret_cast<AstType**>(zone->New(sizeof(AstType*) * length));
+ }
+
+ private:
+ int length_;
+ AstType** elements_;
+};
+
+// -----------------------------------------------------------------------------
+// Function types.
+
+class AstFunctionType : public AstStructuralType {
+ public:
+ int Arity() { return this->Length() - 2; }
+ AstType* Result() { return this->Get(0); }
+ AstType* Receiver() { return this->Get(1); }
+ AstType* Parameter(int i) { return this->Get(2 + i); }
+
+ void InitParameter(int i, AstType* type) { this->Set(2 + i, type); }
+
+ private:
+ friend class AstType;
+
+ AstFunctionType(AstType* result, AstType* receiver, int arity, Zone* zone)
+ : AstStructuralType(kFunction, 2 + arity, zone) {
+ Set(0, result);
+ Set(1, receiver);
+ }
+
+ static AstType* New(AstType* result, AstType* receiver, int arity,
+ Zone* zone) {
+ return AsType(new (zone->New(sizeof(AstFunctionType)))
+ AstFunctionType(result, receiver, arity, zone));
+ }
+
+ static AstFunctionType* cast(AstType* type) {
+ DCHECK(IsKind(type, kFunction));
+ return static_cast<AstFunctionType*>(FromType(type));
+ }
+};
+
+// -----------------------------------------------------------------------------
+// Tuple types.
+
+class AstTupleType : public AstStructuralType {
+ public:
+ int Arity() { return this->Length(); }
+ AstType* Element(int i) { return this->Get(i); }
+
+ void InitElement(int i, AstType* type) { this->Set(i, type); }
+
+ private:
+ friend class AstType;
+
+ AstTupleType(int length, Zone* zone)
+ : AstStructuralType(kTuple, length, zone) {}
+
+ static AstType* New(int length, Zone* zone) {
+ return AsType(new (zone->New(sizeof(AstTupleType)))
+ AstTupleType(length, zone));
+ }
+
+ static AstTupleType* cast(AstType* type) {
+ DCHECK(IsKind(type, kTuple));
+ return static_cast<AstTupleType*>(FromType(type));
+ }
+};
+
+// -----------------------------------------------------------------------------
+// Union types (internal).
+// A union is a structured type with the following invariants:
+// - its length is at least 2
+// - at most one field is a bitset, and it must go into index 0
+// - no field is a union
+// - no field is a subtype of any other field
+class AstUnionType : public AstStructuralType {
+ private:
+ friend AstType;
+ friend AstBitsetType;
+
+ AstUnionType(int length, Zone* zone)
+ : AstStructuralType(kUnion, length, zone) {}
+
+ static AstType* New(int length, Zone* zone) {
+ return AsType(new (zone->New(sizeof(AstUnionType)))
+ AstUnionType(length, zone));
+ }
+
+ static AstUnionType* cast(AstType* type) {
+ DCHECK(IsKind(type, kUnion));
+ return static_cast<AstUnionType*>(FromType(type));
+ }
+
+ bool Wellformed();
+};
+
+class AstType {
+ public:
+ typedef AstBitsetType::bitset bitset; // Internal
+
+// Constructors.
+#define DEFINE_TYPE_CONSTRUCTOR(type, value) \
+ static AstType* type() { return AstBitsetType::New(AstBitsetType::k##type); }
+ AST_PROPER_BITSET_TYPE_LIST(DEFINE_TYPE_CONSTRUCTOR)
+#undef DEFINE_TYPE_CONSTRUCTOR
+
+ static AstType* SignedSmall() {
+ return AstBitsetType::New(AstBitsetType::SignedSmall());
+ }
+ static AstType* UnsignedSmall() {
+ return AstBitsetType::New(AstBitsetType::UnsignedSmall());
+ }
+
+ static AstType* Class(i::Handle<i::Map> map, Zone* zone) {
+ return AstClassType::New(map, zone);
+ }
+ static AstType* Constant(i::Handle<i::Object> value, Zone* zone) {
+ return AstConstantType::New(value, zone);
+ }
+ static AstType* Range(double min, double max, Zone* zone) {
+ return AstRangeType::New(min, max,
+ AST_REPRESENTATION(AstBitsetType::kTagged |
+ AstBitsetType::kUntaggedNumber),
+ zone);
+ }
+ static AstType* Context(AstType* outer, Zone* zone) {
+ return AstContextType::New(outer, zone);
+ }
+ static AstType* Array(AstType* element, Zone* zone) {
+ return AstArrayType::New(element, zone);
+ }
+ static AstType* Function(AstType* result, AstType* receiver, int arity,
+ Zone* zone) {
+ return AstFunctionType::New(result, receiver, arity, zone);
+ }
+ static AstType* Function(AstType* result, Zone* zone) {
+ return Function(result, Any(), 0, zone);
+ }
+ static AstType* Function(AstType* result, AstType* param0, Zone* zone) {
+ AstType* function = Function(result, Any(), 1, zone);
+ function->AsFunction()->InitParameter(0, param0);
+ return function;
+ }
+ static AstType* Function(AstType* result, AstType* param0, AstType* param1,
+ Zone* zone) {
+ AstType* function = Function(result, Any(), 2, zone);
+ function->AsFunction()->InitParameter(0, param0);
+ function->AsFunction()->InitParameter(1, param1);
+ return function;
+ }
+ static AstType* Function(AstType* result, AstType* param0, AstType* param1,
+ AstType* param2, Zone* zone) {
+ AstType* function = Function(result, Any(), 3, zone);
+ function->AsFunction()->InitParameter(0, param0);
+ function->AsFunction()->InitParameter(1, param1);
+ function->AsFunction()->InitParameter(2, param2);
+ return function;
+ }
+ static AstType* Function(AstType* result, int arity, AstType** params,
+ Zone* zone) {
+ AstType* function = Function(result, Any(), arity, zone);
+ for (int i = 0; i < arity; ++i) {
+ function->AsFunction()->InitParameter(i, params[i]);
+ }
+ return function;
+ }
+ static AstType* Tuple(AstType* first, AstType* second, AstType* third,
+ Zone* zone) {
+ AstType* tuple = AstTupleType::New(3, zone);
+ tuple->AsTuple()->InitElement(0, first);
+ tuple->AsTuple()->InitElement(1, second);
+ tuple->AsTuple()->InitElement(2, third);
+ return tuple;
+ }
+
+#define CONSTRUCT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
+ static AstType* Name(Isolate* isolate, Zone* zone);
+ SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
+#undef CONSTRUCT_SIMD_TYPE
+
+ static AstType* Union(AstType* type1, AstType* type2, Zone* zone);
+ static AstType* Intersect(AstType* type1, AstType* type2, Zone* zone);
+
+ static AstType* Of(double value, Zone* zone) {
+ return AstBitsetType::New(
+ AstBitsetType::ExpandInternals(AstBitsetType::Lub(value)));
+ }
+ static AstType* Of(i::Object* value, Zone* zone) {
+ return AstBitsetType::New(
+ AstBitsetType::ExpandInternals(AstBitsetType::Lub(value)));
+ }
+ static AstType* Of(i::Handle<i::Object> value, Zone* zone) {
+ return Of(*value, zone);
+ }
+
+ static AstType* For(i::Map* map) {
+ return AstBitsetType::New(
+ AstBitsetType::ExpandInternals(AstBitsetType::Lub(map)));
+ }
+ static AstType* For(i::Handle<i::Map> map) { return For(*map); }
+
+ // Extraction of components.
+ static AstType* Representation(AstType* t, Zone* zone);
+ static AstType* Semantic(AstType* t, Zone* zone);
+
+ // Predicates.
+ bool IsInhabited() { return AstBitsetType::IsInhabited(this->BitsetLub()); }
+
+ bool Is(AstType* that) { return this == that || this->SlowIs(that); }
+ bool Maybe(AstType* that);
+ bool Equals(AstType* that) { return this->Is(that) && that->Is(this); }
+
+ // Equivalent to Constant(val)->Is(this), but avoiding allocation.
+ bool Contains(i::Object* val);
+ bool Contains(i::Handle<i::Object> val) { return this->Contains(*val); }
+
+ // State-dependent versions of the above that consider subtyping between
+ // a constant and its map class.
+ static AstType* NowOf(i::Object* value, Zone* zone);
+ static AstType* NowOf(i::Handle<i::Object> value, Zone* zone) {
+ return NowOf(*value, zone);
+ }
+ bool NowIs(AstType* that);
+ bool NowContains(i::Object* val);
+ bool NowContains(i::Handle<i::Object> val) { return this->NowContains(*val); }
+
+ bool NowStable();
+
+ // Inspection.
+ bool IsRange() { return IsKind(AstTypeBase::kRange); }
+ bool IsClass() { return IsKind(AstTypeBase::kClass); }
+ bool IsConstant() { return IsKind(AstTypeBase::kConstant); }
+ bool IsContext() { return IsKind(AstTypeBase::kContext); }
+ bool IsArray() { return IsKind(AstTypeBase::kArray); }
+ bool IsFunction() { return IsKind(AstTypeBase::kFunction); }
+ bool IsTuple() { return IsKind(AstTypeBase::kTuple); }
+
+ AstClassType* AsClass() { return AstClassType::cast(this); }
+ AstConstantType* AsConstant() { return AstConstantType::cast(this); }
+ AstRangeType* AsRange() { return AstRangeType::cast(this); }
+ AstContextType* AsContext() { return AstContextType::cast(this); }
+ AstArrayType* AsArray() { return AstArrayType::cast(this); }
+ AstFunctionType* AsFunction() { return AstFunctionType::cast(this); }
+ AstTupleType* AsTuple() { return AstTupleType::cast(this); }
+
+ // Minimum and maximum of a numeric type.
+ // These functions do not distinguish between -0 and +0. If the type equals
+ // kNaN, they return NaN; otherwise kNaN is ignored. Only call these
+ // functions on subtypes of Number.
+ double Min();
+ double Max();
+
+ // Extracts a range from the type: if the type is a range or a union
+ // containing a range, that range is returned; otherwise, NULL is returned.
+ AstType* GetRange();
+
+ static bool IsInteger(i::Object* x);
+ static bool IsInteger(double x) {
+ return nearbyint(x) == x && !i::IsMinusZero(x); // Allows for infinities.
+ }
+
+ int NumClasses();
+ int NumConstants();
+
+ template <class T>
+ class Iterator {
+ public:
+ bool Done() const { return index_ < 0; }
+ i::Handle<T> Current();
+ void Advance();
+
+ private:
+ friend class AstType;
+
+ Iterator() : index_(-1) {}
+ explicit Iterator(AstType* type) : type_(type), index_(-1) { Advance(); }
+
+ inline bool matches(AstType* type);
+ inline AstType* get_type();
+
+ AstType* type_;
+ int index_;
+ };
+
+ Iterator<i::Map> Classes() {
+ if (this->IsBitset()) return Iterator<i::Map>();
+ return Iterator<i::Map>(this);
+ }
+ Iterator<i::Object> Constants() {
+ if (this->IsBitset()) return Iterator<i::Object>();
+ return Iterator<i::Object>(this);
+ }
+
+ // Printing.
+
+ enum PrintDimension { BOTH_DIMS, SEMANTIC_DIM, REPRESENTATION_DIM };
+
+ void PrintTo(std::ostream& os, PrintDimension dim = BOTH_DIMS); // NOLINT
+
+#ifdef DEBUG
+ void Print();
+#endif
+
+ // Helpers for testing.
+ bool IsBitsetForTesting() { return IsBitset(); }
+ bool IsUnionForTesting() { return IsUnion(); }
+ bitset AsBitsetForTesting() { return AsBitset(); }
+ AstUnionType* AsUnionForTesting() { return AsUnion(); }
+
+ private:
+ // Friends.
+ template <class>
+ friend class Iterator;
+ friend AstBitsetType;
+ friend AstUnionType;
+
+ // Internal inspection.
+ bool IsKind(AstTypeBase::Kind kind) {
+ return AstTypeBase::IsKind(this, kind);
+ }
+
+ bool IsNone() { return this == None(); }
+ bool IsAny() { return this == Any(); }
+ bool IsBitset() { return AstBitsetType::IsBitset(this); }
+ bool IsUnion() { return IsKind(AstTypeBase::kUnion); }
+
+ bitset AsBitset() {
+ DCHECK(this->IsBitset());
+ return reinterpret_cast<AstBitsetType*>(this)->Bitset();
+ }
+ AstUnionType* AsUnion() { return AstUnionType::cast(this); }
+
+ bitset Representation();
+
+ // Auxiliary functions.
+ bool SemanticMaybe(AstType* that);
+
+ bitset BitsetGlb() { return AstBitsetType::Glb(this); }
+ bitset BitsetLub() { return AstBitsetType::Lub(this); }
+
+ bool SlowIs(AstType* that);
+ bool SemanticIs(AstType* that);
+
+ static bool Overlap(AstRangeType* lhs, AstRangeType* rhs);
+ static bool Contains(AstRangeType* lhs, AstRangeType* rhs);
+ static bool Contains(AstRangeType* range, AstConstantType* constant);
+ static bool Contains(AstRangeType* range, i::Object* val);
+
+ static int UpdateRange(AstType* type, AstUnionType* result, int size,
+ Zone* zone);
+
+ static AstRangeType::Limits IntersectRangeAndBitset(AstType* range,
+ AstType* bits,
+ Zone* zone);
+ static AstRangeType::Limits ToLimits(bitset bits, Zone* zone);
+
+ bool SimplyEquals(AstType* that);
+
+ static int AddToUnion(AstType* type, AstUnionType* result, int size,
+ Zone* zone);
+ static int IntersectAux(AstType* type, AstType* other, AstUnionType* result,
+ int size, AstRangeType::Limits* limits, Zone* zone);
+ static AstType* NormalizeUnion(AstType* unioned, int size, Zone* zone);
+ static AstType* NormalizeRangeAndBitset(AstType* range, bitset* bits,
+ Zone* zone);
+};
+
+// -----------------------------------------------------------------------------
+// Type bounds. A simple struct to represent a pair of lower/upper types.
+
+struct AstBounds {
+ AstType* lower;
+ AstType* upper;
+
+ AstBounds()
+ : // Make sure accessing uninitialized bounds crashes big-time.
+ lower(nullptr),
+ upper(nullptr) {}
+ explicit AstBounds(AstType* t) : lower(t), upper(t) {}
+ AstBounds(AstType* l, AstType* u) : lower(l), upper(u) {
+ DCHECK(lower->Is(upper));
+ }
+
+ // Unrestricted bounds.
+ static AstBounds Unbounded() {
+ return AstBounds(AstType::None(), AstType::Any());
+ }
+
+ // Meet: both b1 and b2 are known to hold.
+ static AstBounds Both(AstBounds b1, AstBounds b2, Zone* zone) {
+ AstType* lower = AstType::Union(b1.lower, b2.lower, zone);
+ AstType* upper = AstType::Intersect(b1.upper, b2.upper, zone);
+ // Lower bounds are considered approximate, correct as necessary.
+ if (!lower->Is(upper)) lower = upper;
+ return AstBounds(lower, upper);
+ }
+
+ // Join: either b1 or b2 is known to hold.
+ static AstBounds Either(AstBounds b1, AstBounds b2, Zone* zone) {
+ AstType* lower = AstType::Intersect(b1.lower, b2.lower, zone);
+ AstType* upper = AstType::Union(b1.upper, b2.upper, zone);
+ return AstBounds(lower, upper);
+ }
+
+ static AstBounds NarrowLower(AstBounds b, AstType* t, Zone* zone) {
+ AstType* lower = AstType::Union(b.lower, t, zone);
+ // Lower bounds are considered approximate, correct as necessary.
+ if (!lower->Is(b.upper)) lower = b.upper;
+ return AstBounds(lower, b.upper);
+ }
+ static AstBounds NarrowUpper(AstBounds b, AstType* t, Zone* zone) {
+ AstType* lower = b.lower;
+ AstType* upper = AstType::Intersect(b.upper, t, zone);
+ // Lower bounds are considered approximate, correct as necessary.
+ if (!lower->Is(upper)) lower = upper;
+ return AstBounds(lower, upper);
+ }
+
+ bool Narrows(AstBounds that) {
+ return that.lower->Is(this->lower) && this->upper->Is(that.upper);
+ }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_AST_AST_TYPES_H_
diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc
index a271751839..33ccec7fa8 100644
--- a/deps/v8/src/ast/ast-value-factory.cc
+++ b/deps/v8/src/ast/ast-value-factory.cc
@@ -237,28 +237,14 @@ AstRawString* AstValueFactory::GetTwoByteStringInternal(
const AstRawString* AstValueFactory::GetString(Handle<String> literal) {
- // For the FlatContent to stay valid, we shouldn't do any heap
- // allocation. Make sure we won't try to internalize the string in GetString.
AstRawString* result = NULL;
- Isolate* saved_isolate = isolate_;
- isolate_ = NULL;
- {
- DisallowHeapAllocation no_gc;
- String::FlatContent content = literal->GetFlatContent();
- if (content.IsOneByte()) {
- result = GetOneByteStringInternal(content.ToOneByteVector());
- } else {
- DCHECK(content.IsTwoByte());
- result = GetTwoByteStringInternal(content.ToUC16Vector());
- }
- }
- isolate_ = saved_isolate;
- if (strings_ != nullptr && isolate_) {
- // Only the string we are creating is uninternalized at this point.
- DCHECK_EQ(result, strings_);
- DCHECK_NULL(strings_->next());
- result->Internalize(isolate_);
- ResetStrings();
+ DisallowHeapAllocation no_gc;
+ String::FlatContent content = literal->GetFlatContent();
+ if (content.IsOneByte()) {
+ result = GetOneByteStringInternal(content.ToOneByteVector());
+ } else {
+ DCHECK(content.IsTwoByte());
+ result = GetTwoByteStringInternal(content.ToUC16Vector());
}
return result;
}
@@ -274,15 +260,40 @@ const AstConsString* AstValueFactory::NewConsString(
return new_string;
}
-
-void AstValueFactory::Internalize(Isolate* isolate) {
- if (isolate_) {
- DCHECK_NULL(strings_);
- DCHECK_NULL(values_);
- // Everything is already internalized.
- return;
+const AstRawString* AstValueFactory::ConcatStrings(const AstRawString* left,
+ const AstRawString* right) {
+ int left_length = left->length();
+ int right_length = right->length();
+ const unsigned char* left_data = left->raw_data();
+ const unsigned char* right_data = right->raw_data();
+ if (left->is_one_byte() && right->is_one_byte()) {
+ uint8_t* buffer = zone_->NewArray<uint8_t>(left_length + right_length);
+ memcpy(buffer, left_data, left_length);
+ memcpy(buffer + left_length, right_data, right_length);
+ Vector<const uint8_t> literal(buffer, left_length + right_length);
+ return GetOneByteStringInternal(literal);
+ } else {
+ uint16_t* buffer = zone_->NewArray<uint16_t>(left_length + right_length);
+ if (left->is_one_byte()) {
+ for (int i = 0; i < left_length; ++i) {
+ buffer[i] = left_data[i];
+ }
+ } else {
+ memcpy(buffer, left_data, 2 * left_length);
+ }
+ if (right->is_one_byte()) {
+ for (int i = 0; i < right_length; ++i) {
+ buffer[i + left_length] = right_data[i];
+ }
+ } else {
+ memcpy(buffer + left_length, right_data, 2 * right_length);
+ }
+ Vector<const uint16_t> literal(buffer, left_length + right_length);
+ return GetTwoByteStringInternal(literal);
}
+}
+void AstValueFactory::Internalize(Isolate* isolate) {
// Strings need to be internalized before values, because values refer to
// strings.
for (AstString* current = strings_; current != nullptr;) {
@@ -295,7 +306,6 @@ void AstValueFactory::Internalize(Isolate* isolate) {
current->Internalize(isolate);
current = next;
}
- isolate_ = isolate;
ResetStrings();
values_ = nullptr;
}
diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index da209e122c..bc3eca264e 100644
--- a/deps/v8/src/ast/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -283,8 +283,8 @@ class AstValue : public ZoneObject {
F(default, "default") \
F(done, "done") \
F(dot, ".") \
+ F(dot_class_field_init, ".class-field-init") \
F(dot_for, ".for") \
- F(dot_generator, ".generator") \
F(dot_generator_object, ".generator_object") \
F(dot_iterator, ".iterator") \
F(dot_result, ".result") \
@@ -326,7 +326,6 @@ class AstValueFactory {
values_(nullptr),
strings_end_(&strings_),
zone_(zone),
- isolate_(NULL),
hash_seed_(hash_seed) {
ResetStrings();
#define F(name, str) name##_string_ = NULL;
@@ -352,11 +351,10 @@ class AstValueFactory {
const AstRawString* GetString(Handle<String> literal);
const AstConsString* NewConsString(const AstString* left,
const AstString* right);
+ const AstRawString* ConcatStrings(const AstRawString* left,
+ const AstRawString* right);
void Internalize(Isolate* isolate);
- bool IsInternalized() {
- return isolate_ != NULL;
- }
#define F(name, str) \
const AstRawString* name##_string() { \
@@ -384,21 +382,13 @@ class AstValueFactory {
private:
AstValue* AddValue(AstValue* value) {
- if (isolate_) {
- value->Internalize(isolate_);
- } else {
- value->set_next(values_);
- values_ = value;
- }
+ value->set_next(values_);
+ values_ = value;
return value;
}
AstString* AddString(AstString* string) {
- if (isolate_) {
- string->Internalize(isolate_);
- } else {
- *strings_end_ = string;
- strings_end_ = string->next_location();
- }
+ *strings_end_ = string;
+ strings_end_ = string->next_location();
return string;
}
void ResetStrings() {
@@ -413,7 +403,7 @@ class AstValueFactory {
static bool AstRawStringCompare(void* a, void* b);
// All strings are copied here, one after another (no NULLs inbetween).
- base::HashMap string_table_;
+ base::CustomMatcherHashMap string_table_;
// For keeping track of all AstValues and AstRawStrings we've created (so that
// they can be internalized later).
AstValue* values_;
@@ -422,7 +412,6 @@ class AstValueFactory {
AstString* strings_;
AstString** strings_end_;
Zone* zone_;
- Isolate* isolate_;
uint32_t hash_seed_;
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index 06037f4e6d..97d1f9d770 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -6,6 +6,7 @@
#include <cmath> // For isfinite.
+#include "src/ast/compile-time-value.h"
#include "src/ast/prettyprinter.h"
#include "src/ast/scopes.h"
#include "src/base/hashmap.h"
@@ -13,7 +14,6 @@
#include "src/code-stubs.h"
#include "src/contexts.h"
#include "src/conversions.h"
-#include "src/parsing/parser.h"
#include "src/property-details.h"
#include "src/property.h"
#include "src/string-stream.h"
@@ -83,18 +83,14 @@ bool Expression::IsNullLiteral() const {
}
bool Expression::IsUndefinedLiteral() const {
- if (IsLiteral()) {
- if (AsLiteral()->raw_value()->IsUndefined()) {
- return true;
- }
- }
+ if (IsLiteral() && AsLiteral()->raw_value()->IsUndefined()) return true;
const VariableProxy* var_proxy = AsVariableProxy();
- if (var_proxy == NULL) return false;
+ if (var_proxy == nullptr) return false;
Variable* var = var_proxy->var();
// The global identifier "undefined" is immutable. Everything
// else could be reassigned.
- return var != NULL && var->IsUnallocatedOrGlobalSlot() &&
+ return var != NULL && var->IsUnallocated() &&
var_proxy->raw_name()->IsOneByteEqualTo("undefined");
}
@@ -166,36 +162,32 @@ bool Statement::IsJump() const {
VariableProxy::VariableProxy(Variable* var, int start_position,
int end_position)
: Expression(start_position, kVariableProxy),
- bit_field_(IsThisField::encode(var->is_this()) |
- IsAssignedField::encode(false) |
- IsResolvedField::encode(false)),
end_position_(end_position),
raw_name_(var->raw_name()),
next_unresolved_(nullptr) {
+ bit_field_ |= IsThisField::encode(var->is_this()) |
+ IsAssignedField::encode(false) | IsResolvedField::encode(false);
BindTo(var);
}
VariableProxy::VariableProxy(const AstRawString* name,
- Variable::Kind variable_kind, int start_position,
+ VariableKind variable_kind, int start_position,
int end_position)
: Expression(start_position, kVariableProxy),
- bit_field_(IsThisField::encode(variable_kind == Variable::THIS) |
- IsAssignedField::encode(false) |
- IsResolvedField::encode(false)),
end_position_(end_position),
raw_name_(name),
- next_unresolved_(nullptr) {}
+ next_unresolved_(nullptr) {
+ bit_field_ |= IsThisField::encode(variable_kind == THIS_VARIABLE) |
+ IsAssignedField::encode(false) | IsResolvedField::encode(false);
+}
VariableProxy::VariableProxy(const VariableProxy* copy_from)
: Expression(copy_from->position(), kVariableProxy),
- bit_field_(copy_from->bit_field_),
end_position_(copy_from->end_position_),
next_unresolved_(nullptr) {
- if (copy_from->is_resolved()) {
- var_ = copy_from->var_;
- } else {
- raw_name_ = copy_from->raw_name_;
- }
+ bit_field_ = copy_from->bit_field_;
+ DCHECK(!copy_from->is_resolved());
+ raw_name_ = copy_from->raw_name_;
}
void VariableProxy::BindTo(Variable* var) {
@@ -253,12 +245,13 @@ void ForInStatement::AssignFeedbackVectorSlots(Isolate* isolate,
Assignment::Assignment(Token::Value op, Expression* target, Expression* value,
int pos)
: Expression(pos, kAssignment),
- bit_field_(
- IsUninitializedField::encode(false) | KeyTypeField::encode(ELEMENT) |
- StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op)),
target_(target),
value_(value),
- binary_operation_(NULL) {}
+ binary_operation_(NULL) {
+ bit_field_ |= IsUninitializedField::encode(false) |
+ KeyTypeField::encode(ELEMENT) |
+ StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op);
+}
void Assignment::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
@@ -273,7 +266,7 @@ void CountOperation::AssignFeedbackVectorSlots(Isolate* isolate,
AssignVectorSlots(expression(), spec, &slot_);
// Assign a slot to collect feedback about binary operations. Used only in
// ignition. Fullcodegen uses AstId to record type feedback.
- binary_operation_slot_ = spec->AddGeneralSlot();
+ binary_operation_slot_ = spec->AddInterpreterBinaryOpICSlot();
}
@@ -320,6 +313,7 @@ LanguageMode FunctionLiteral::language_mode() const {
return scope()->language_mode();
}
+FunctionKind FunctionLiteral::kind() const { return scope()->function_kind(); }
bool FunctionLiteral::NeedsHomeObject(Expression* expr) {
if (expr == nullptr || !expr->IsFunctionLiteral()) return false;
@@ -327,27 +321,16 @@ bool FunctionLiteral::NeedsHomeObject(Expression* expr) {
return expr->AsFunctionLiteral()->scope()->NeedsHomeObject();
}
-
ObjectLiteralProperty::ObjectLiteralProperty(Expression* key, Expression* value,
- Kind kind, bool is_static,
- bool is_computed_name)
- : key_(key),
- value_(value),
+ Kind kind, bool is_computed_name)
+ : LiteralProperty(key, value, is_computed_name),
kind_(kind),
- emit_store_(true),
- is_static_(is_static),
- is_computed_name_(is_computed_name) {}
-
+ emit_store_(true) {}
ObjectLiteralProperty::ObjectLiteralProperty(AstValueFactory* ast_value_factory,
Expression* key, Expression* value,
- bool is_static,
bool is_computed_name)
- : key_(key),
- value_(value),
- emit_store_(true),
- is_static_(is_static),
- is_computed_name_(is_computed_name) {
+ : LiteralProperty(key, value, is_computed_name), emit_store_(true) {
if (!is_computed_name &&
key->AsLiteral()->raw_value()->EqualsString(
ast_value_factory->proto_string())) {
@@ -361,13 +344,20 @@ ObjectLiteralProperty::ObjectLiteralProperty(AstValueFactory* ast_value_factory,
}
}
-bool ObjectLiteralProperty::NeedsSetFunctionName() const {
+bool LiteralProperty::NeedsSetFunctionName() const {
return is_computed_name_ &&
(value_->IsAnonymousFunctionDefinition() ||
(value_->IsFunctionLiteral() &&
IsConciseMethod(value_->AsFunctionLiteral()->kind())));
}
+ClassLiteralProperty::ClassLiteralProperty(Expression* key, Expression* value,
+ Kind kind, bool is_static,
+ bool is_computed_name)
+ : LiteralProperty(key, value, is_computed_name),
+ kind_(kind),
+ is_static_(is_static) {}
+
void ClassLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
@@ -379,7 +369,7 @@ void ClassLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
}
for (int i = 0; i < properties()->length(); i++) {
- ObjectLiteral::Property* property = properties()->at(i);
+ ClassLiteral::Property* property = properties()->at(i);
Expression* value = property->value();
if (FunctionLiteral::NeedsHomeObject(value)) {
property->SetSlot(spec->AddStoreICSlot());
@@ -387,8 +377,7 @@ void ClassLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
}
}
-
-bool ObjectLiteral::Property::IsCompileTimeValue() {
+bool ObjectLiteral::Property::IsCompileTimeValue() const {
return kind_ == CONSTANT ||
(kind_ == MATERIALIZED_LITERAL &&
CompileTimeValue::IsCompileTimeValue(value_));
@@ -399,11 +388,7 @@ void ObjectLiteral::Property::set_emit_store(bool emit_store) {
emit_store_ = emit_store;
}
-
-bool ObjectLiteral::Property::emit_store() {
- return emit_store_;
-}
-
+bool ObjectLiteral::Property::emit_store() const { return emit_store_; }
void ObjectLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
@@ -473,8 +458,8 @@ void ObjectLiteral::CalculateEmitStore(Zone* zone) {
ZoneAllocationPolicy allocator(zone);
- ZoneHashMap table(Literal::Match, ZoneHashMap::kDefaultHashMapCapacity,
- allocator);
+ CustomMatcherZoneHashMap table(
+ Literal::Match, ZoneHashMap::kDefaultHashMapCapacity, allocator);
for (int i = properties()->length() - 1; i >= 0; i--) {
ObjectLiteral::Property* property = properties()->at(i);
if (property->is_computed_name()) continue;
@@ -551,7 +536,7 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
// TODO(verwaest): Remove once we can store them inline.
if (FLAG_track_double_fields &&
(value->IsNumber() || value->IsUninitialized(isolate))) {
- may_store_doubles_ = true;
+ bit_field_ = MayStoreDoublesField::update(bit_field_, true);
}
is_simple = is_simple && !value->IsUninitialized(isolate);
@@ -578,9 +563,11 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
}
constant_properties_ = constant_properties;
- fast_elements_ =
- (max_element_index <= 32) || ((2 * elements) >= max_element_index);
- has_elements_ = elements > 0;
+ bit_field_ = FastElementsField::update(
+ bit_field_,
+ (max_element_index <= 32) || ((2 * elements) >= max_element_index));
+ bit_field_ = HasElementsField::update(bit_field_, elements > 0);
+
set_is_simple(is_simple);
set_depth(depth_acc);
}
@@ -662,8 +649,7 @@ void ArrayLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSlotCache* cache) {
// This logic that computes the number of slots needed for vector store
// ics must mirror FullCodeGenerator::VisitArrayLiteral.
- int array_index = 0;
- for (; array_index < values()->length(); array_index++) {
+ for (int array_index = 0; array_index < values()->length(); array_index++) {
Expression* subexpr = values()->at(array_index);
DCHECK(!subexpr->IsSpread());
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
@@ -731,7 +717,7 @@ void BinaryOperation::AssignFeedbackVectorSlots(
case Token::OR:
return;
default:
- type_feedback_slot_ = spec->AddGeneralSlot();
+ type_feedback_slot_ = spec->AddInterpreterBinaryOpICSlot();
return;
}
}
@@ -741,6 +727,20 @@ static bool IsTypeof(Expression* expr) {
return maybe_unary != NULL && maybe_unary->op() == Token::TYPEOF;
}
+void CompareOperation::AssignFeedbackVectorSlots(
+ Isolate* isolate, FeedbackVectorSpec* spec,
+ FeedbackVectorSlotCache* cache_) {
+ // Feedback vector slot is only used by interpreter for binary operations.
+ // Full-codegen uses AstId to record type feedback.
+ switch (op()) {
+ // instanceof and in do not collect type feedback.
+ case Token::INSTANCEOF:
+ case Token::IN:
+ return;
+ default:
+ type_feedback_slot_ = spec->AddInterpreterCompareICSlot();
+ }
+}
// Check for the pattern: typeof <expression> equals <string literal>.
static bool MatchLiteralCompareTypeof(Expression* left,
@@ -759,8 +759,8 @@ static bool MatchLiteralCompareTypeof(Expression* left,
bool CompareOperation::IsLiteralCompareTypeof(Expression** expr,
Handle<String>* check) {
- return MatchLiteralCompareTypeof(left_, op_, right_, expr, check) ||
- MatchLiteralCompareTypeof(right_, op_, left_, expr, check);
+ return MatchLiteralCompareTypeof(left_, op(), right_, expr, check) ||
+ MatchLiteralCompareTypeof(right_, op(), left_, expr, check);
}
@@ -790,8 +790,8 @@ static bool MatchLiteralCompareUndefined(Expression* left,
}
bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) {
- return MatchLiteralCompareUndefined(left_, op_, right_, expr) ||
- MatchLiteralCompareUndefined(right_, op_, left_, expr);
+ return MatchLiteralCompareUndefined(left_, op(), right_, expr) ||
+ MatchLiteralCompareUndefined(right_, op(), left_, expr);
}
@@ -809,8 +809,8 @@ static bool MatchLiteralCompareNull(Expression* left,
bool CompareOperation::IsLiteralCompareNull(Expression** expr) {
- return MatchLiteralCompareNull(left_, op_, right_, expr) ||
- MatchLiteralCompareNull(right_, op_, left_, expr);
+ return MatchLiteralCompareNull(left_, op(), right_, expr) ||
+ MatchLiteralCompareNull(right_, op(), left_, expr);
}
@@ -913,7 +913,7 @@ Call::CallType Call::GetCallType() const {
if (proxy != NULL) {
if (is_possibly_eval()) {
return POSSIBLY_EVAL_CALL;
- } else if (proxy->var()->IsUnallocatedOrGlobalSlot()) {
+ } else if (proxy->var()->IsUnallocated()) {
return GLOBAL_CALL;
} else if (proxy->var()->IsLookupSlot()) {
return LOOKUP_SLOT_CALL;
@@ -940,7 +940,13 @@ CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements,
: Expression(pos, kCaseClause),
label_(label),
statements_(statements),
- compare_type_(Type::None()) {}
+ compare_type_(AstType::None()) {}
+
+void CaseClause::AssignFeedbackVectorSlots(Isolate* isolate,
+ FeedbackVectorSpec* spec,
+ FeedbackVectorSlotCache* cache) {
+ type_feedback_slot_ = spec->AddInterpreterCompareICSlot();
+}
uint32_t Literal::Hash() {
return raw_value()->IsString()
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 1b80d3f36d..a6661becf2 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -5,6 +5,7 @@
#ifndef V8_AST_AST_H_
#define V8_AST_AST_H_
+#include "src/ast/ast-types.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/modules.h"
#include "src/ast/variables.h"
@@ -17,7 +18,6 @@
#include "src/parsing/token.h"
#include "src/runtime/runtime.h"
#include "src/small-pointer-list.h"
-#include "src/types.h"
#include "src/utils.h"
namespace v8 {
@@ -126,17 +126,11 @@ AST_NODE_LIST(DEF_FORWARD_DECLARATION)
#undef DEF_FORWARD_DECLARATION
-// Typedef only introduced to avoid unreadable code.
-typedef ZoneList<Handle<String>> ZoneStringList;
-typedef ZoneList<Handle<Object>> ZoneObjectList;
-
-
class FeedbackVectorSlotCache {
public:
explicit FeedbackVectorSlotCache(Zone* zone)
: zone_(zone),
- hash_map_(base::HashMap::PointersMatch,
- ZoneHashMap::kDefaultHashMapCapacity,
+ hash_map_(ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)) {}
void Put(Variable* variable, FeedbackVectorSlot slot) {
@@ -192,7 +186,7 @@ class AstNode: public ZoneObject {
void* operator new(size_t size, Zone* zone) { return zone->New(size); }
- NodeType node_type() const { return node_type_; }
+ NodeType node_type() const { return NodeTypeField::decode(bit_field_); }
int position() const { return position_; }
#ifdef DEBUG
@@ -211,19 +205,20 @@ class AstNode: public ZoneObject {
IterationStatement* AsIterationStatement();
MaterializedLiteral* AsMaterializedLiteral();
- protected:
- AstNode(int position, NodeType type)
- : position_(position), node_type_(type) {}
-
private:
// Hidden to prevent accidental usage. It would have to load the
// current zone from the TLS.
void* operator new(size_t size);
int position_;
- NodeType node_type_;
- // Ends with NodeType which is uint8_t sized. Deriving classes in turn begin
- // sub-int32_t-sized fields for optimum packing efficiency.
+ class NodeTypeField : public BitField<NodeType, 0, 6> {};
+
+ protected:
+ uint32_t bit_field_;
+ static const uint8_t kNextBitFieldIndex = NodeTypeField::kNext;
+
+ AstNode(int position, NodeType type)
+ : position_(position), bit_field_(NodeTypeField::encode(type)) {}
};
@@ -234,6 +229,8 @@ class Statement : public AstNode {
protected:
Statement(int position, NodeType type) : AstNode(position, type) {}
+
+ static const uint8_t kNextBitFieldIndex = AstNode::kNextBitFieldIndex;
};
@@ -349,11 +346,18 @@ class Expression : public AstNode {
BailoutId id() const { return BailoutId(local_id(0)); }
TypeFeedbackId test_id() const { return TypeFeedbackId(local_id(1)); }
+ private:
+ int local_id(int n) const { return base_id() + parent_num_ids() + n; }
+
+ int base_id_;
+ class ToBooleanTypesField
+ : public BitField<uint16_t, AstNode::kNextBitFieldIndex, 9> {};
+
protected:
Expression(int pos, NodeType type)
- : AstNode(pos, type),
- bit_field_(0),
- base_id_(BailoutId::None().ToInt()) {}
+ : AstNode(pos, type), base_id_(BailoutId::None().ToInt()) {
+ bit_field_ = ToBooleanTypesField::update(bit_field_, 0);
+ }
static int parent_num_ids() { return 0; }
void set_to_boolean_types(uint16_t types) {
@@ -364,12 +368,7 @@ class Expression : public AstNode {
return base_id_;
}
- private:
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
- uint16_t bit_field_;
- int base_id_;
- class ToBooleanTypesField : public BitField16<uint16_t, 0, 9> {};
+ static const uint8_t kNextBitFieldIndex = ToBooleanTypesField::kNext;
};
@@ -389,7 +388,7 @@ class BreakableStatement : public Statement {
// Testers.
bool is_target_for_anonymous() const {
- return breakable_type_ == TARGET_FOR_ANONYMOUS;
+ return BreakableTypeField::decode(bit_field_) == TARGET_FOR_ANONYMOUS;
}
void set_base_id(int id) { base_id_ = id; }
@@ -397,14 +396,28 @@ class BreakableStatement : public Statement {
BailoutId EntryId() const { return BailoutId(local_id(0)); }
BailoutId ExitId() const { return BailoutId(local_id(1)); }
+ private:
+ int local_id(int n) const { return base_id() + parent_num_ids() + n; }
+
+ BreakableType breakableType() const {
+ return BreakableTypeField::decode(bit_field_);
+ }
+
+ int base_id_;
+ Label break_target_;
+ ZoneList<const AstRawString*>* labels_;
+
+ class BreakableTypeField
+ : public BitField<BreakableType, Statement::kNextBitFieldIndex, 1> {};
+
protected:
BreakableStatement(ZoneList<const AstRawString*>* labels,
BreakableType breakable_type, int position, NodeType type)
: Statement(position, type),
- breakable_type_(breakable_type),
base_id_(BailoutId::None().ToInt()),
labels_(labels) {
DCHECK(labels == NULL || labels->length() > 0);
+ bit_field_ |= BreakableTypeField::encode(breakable_type);
}
static int parent_num_ids() { return 0; }
@@ -413,20 +426,16 @@ class BreakableStatement : public Statement {
return base_id_;
}
- private:
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
- BreakableType breakable_type_;
- int base_id_;
- Label break_target_;
- ZoneList<const AstRawString*>* labels_;
+ static const uint8_t kNextBitFieldIndex = BreakableTypeField::kNext;
};
class Block final : public BreakableStatement {
public:
ZoneList<Statement*>* statements() { return &statements_; }
- bool ignore_completion_value() const { return ignore_completion_value_; }
+ bool ignore_completion_value() const {
+ return IgnoreCompletionField::decode(bit_field_);
+ }
static int num_ids() { return parent_num_ids() + 1; }
BailoutId DeclsId() const { return BailoutId(local_id(0)); }
@@ -446,14 +455,20 @@ class Block final : public BreakableStatement {
bool ignore_completion_value, int pos)
: BreakableStatement(labels, TARGET_FOR_NAMED_ONLY, pos, kBlock),
statements_(capacity, zone),
- ignore_completion_value_(ignore_completion_value),
- scope_(NULL) {}
+ scope_(NULL) {
+ bit_field_ |= IgnoreCompletionField::encode(ignore_completion_value);
+ }
static int parent_num_ids() { return BreakableStatement::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
ZoneList<Statement*> statements_;
- bool ignore_completion_value_;
Scope* scope_;
+
+ class IgnoreCompletionField
+ : public BitField<bool, BreakableStatement::kNextBitFieldIndex, 1> {};
+
+ protected:
+ static const uint8_t kNextBitFieldIndex = IgnoreCompletionField::kNext;
};
@@ -469,6 +484,9 @@ class DoExpression final : public Expression {
}
bool IsAnonymousFunctionDefinition() const;
+ protected:
+ static const uint8_t kNextBitFieldIndex = Expression::kNextBitFieldIndex;
+
private:
friend class AstNodeFactory;
@@ -498,6 +516,8 @@ class Declaration : public AstNode {
Declaration(VariableProxy* proxy, Scope* scope, int pos, NodeType type)
: AstNode(pos, type), proxy_(proxy), scope_(scope) {}
+ static const uint8_t kNextBitFieldIndex = AstNode::kNextBitFieldIndex;
+
private:
VariableProxy* proxy_;
@@ -561,6 +581,9 @@ class IterationStatement : public BreakableStatement {
static int parent_num_ids() { return BreakableStatement::num_ids(); }
void Initialize(Statement* body) { body_ = body; }
+ static const uint8_t kNextBitFieldIndex =
+ BreakableStatement::kNextBitFieldIndex;
+
private:
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
@@ -715,16 +738,19 @@ class ForInStatement final : public ForEachStatement {
}
enum ForInType { FAST_FOR_IN, SLOW_FOR_IN };
- ForInType for_in_type() const { return for_in_type_; }
- void set_for_in_type(ForInType type) { for_in_type_ = type; }
+ ForInType for_in_type() const { return ForInTypeField::decode(bit_field_); }
+ void set_for_in_type(ForInType type) {
+ bit_field_ = ForInTypeField::update(bit_field_, type);
+ }
- static int num_ids() { return parent_num_ids() + 6; }
+ static int num_ids() { return parent_num_ids() + 7; }
BailoutId BodyId() const { return BailoutId(local_id(0)); }
BailoutId EnumId() const { return BailoutId(local_id(1)); }
BailoutId ToObjectId() const { return BailoutId(local_id(2)); }
BailoutId PrepareId() const { return BailoutId(local_id(3)); }
BailoutId FilterId() const { return BailoutId(local_id(4)); }
BailoutId AssignmentId() const { return BailoutId(local_id(5)); }
+ BailoutId IncrementId() const { return BailoutId(local_id(6)); }
BailoutId ContinueId() const { return EntryId(); }
BailoutId StackCheckId() const { return BodyId(); }
@@ -734,16 +760,23 @@ class ForInStatement final : public ForEachStatement {
ForInStatement(ZoneList<const AstRawString*>* labels, int pos)
: ForEachStatement(labels, pos, kForInStatement),
each_(nullptr),
- subject_(nullptr),
- for_in_type_(SLOW_FOR_IN) {}
+ subject_(nullptr) {
+ bit_field_ = ForInTypeField::update(bit_field_, SLOW_FOR_IN);
+ }
+
static int parent_num_ids() { return ForEachStatement::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
Expression* each_;
Expression* subject_;
- ForInType for_in_type_;
FeedbackVectorSlot each_slot_;
FeedbackVectorSlot for_in_feedback_slot_;
+
+ class ForInTypeField
+ : public BitField<ForInType, ForEachStatement::kNextBitFieldIndex, 1> {};
+
+ protected:
+ static const uint8_t kNextBitFieldIndex = ForInTypeField::kNext;
};
@@ -938,8 +971,18 @@ class CaseClause final : public Expression {
BailoutId EntryId() const { return BailoutId(local_id(0)); }
TypeFeedbackId CompareId() { return TypeFeedbackId(local_id(1)); }
- Type* compare_type() { return compare_type_; }
- void set_compare_type(Type* type) { compare_type_ = type; }
+ AstType* compare_type() { return compare_type_; }
+ void set_compare_type(AstType* type) { compare_type_ = type; }
+
+ // CaseClause will have both a slot in the feedback vector and the
+ // TypeFeedbackId to record the type information. TypeFeedbackId is used by
+ // full codegen and the feedback vector slot is used by interpreter.
+ void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ FeedbackVectorSlotCache* cache);
+
+ FeedbackVectorSlot CompareOperationFeedbackSlot() {
+ return type_feedback_slot_;
+ }
private:
friend class AstNodeFactory;
@@ -951,7 +994,8 @@ class CaseClause final : public Expression {
Expression* label_;
Label body_target_;
ZoneList<Statement*>* statements_;
- Type* compare_type_;
+ AstType* compare_type_;
+ FeedbackVectorSlot type_feedback_slot_;
};
@@ -1241,17 +1285,27 @@ class MaterializedLiteral : public Expression {
return depth_;
}
+ private:
+ int depth_ : 31;
+ int literal_index_;
+
+ friend class AstLiteralReindexer;
+
+ class IsSimpleField
+ : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
+
protected:
MaterializedLiteral(int literal_index, int pos, NodeType type)
- : Expression(pos, type),
- is_simple_(false),
- depth_(0),
- literal_index_(literal_index) {}
+ : Expression(pos, type), depth_(0), literal_index_(literal_index) {
+ bit_field_ |= IsSimpleField::encode(false);
+ }
// A materialized literal is simple if the values consist of only
// constants and simple object and array literals.
- bool is_simple() const { return is_simple_; }
- void set_is_simple(bool is_simple) { is_simple_ = is_simple; }
+ bool is_simple() const { return IsSimpleField::decode(bit_field_); }
+ void set_is_simple(bool is_simple) {
+ bit_field_ = IsSimpleField::update(bit_field_, is_simple);
+ }
friend class CompileTimeValue;
void set_depth(int depth) {
@@ -1271,19 +1325,45 @@ class MaterializedLiteral : public Expression {
// in the object literal boilerplate.
Handle<Object> GetBoilerplateValue(Expression* expression, Isolate* isolate);
- private:
- bool is_simple_ : 1;
- int depth_ : 31;
- int literal_index_;
-
- friend class AstLiteralReindexer;
+ static const uint8_t kNextBitFieldIndex = IsSimpleField::kNext;
};
+// Common supertype for ObjectLiteralProperty and ClassLiteralProperty
+class LiteralProperty : public ZoneObject {
+ public:
+ Expression* key() const { return key_; }
+ Expression* value() const { return value_; }
+ void set_key(Expression* e) { key_ = e; }
+ void set_value(Expression* e) { value_ = e; }
+
+ bool is_computed_name() const { return is_computed_name_; }
+
+ FeedbackVectorSlot GetSlot(int offset = 0) const {
+ DCHECK_LT(offset, static_cast<int>(arraysize(slots_)));
+ return slots_[offset];
+ }
+
+ void SetSlot(FeedbackVectorSlot slot, int offset = 0) {
+ DCHECK_LT(offset, static_cast<int>(arraysize(slots_)));
+ slots_[offset] = slot;
+ }
+
+ bool NeedsSetFunctionName() const;
+
+ protected:
+ LiteralProperty(Expression* key, Expression* value, bool is_computed_name)
+ : key_(key), value_(value), is_computed_name_(is_computed_name) {}
+
+ Expression* key_;
+ Expression* value_;
+ FeedbackVectorSlot slots_[2];
+ bool is_computed_name_;
+};
// Property is used for passing information
// about an object literal's properties from the parser
// to the code generator.
-class ObjectLiteralProperty final : public ZoneObject {
+class ObjectLiteralProperty final : public LiteralProperty {
public:
enum Kind : uint8_t {
CONSTANT, // Property with constant value (compile time).
@@ -1294,54 +1374,29 @@ class ObjectLiteralProperty final : public ZoneObject {
PROTOTYPE // Property is __proto__.
};
- Expression* key() { return key_; }
- Expression* value() { return value_; }
- Kind kind() { return kind_; }
-
- void set_key(Expression* e) { key_ = e; }
- void set_value(Expression* e) { value_ = e; }
+ Kind kind() const { return kind_; }
// Type feedback information.
- bool IsMonomorphic() { return !receiver_type_.is_null(); }
- Handle<Map> GetReceiverType() { return receiver_type_; }
+ bool IsMonomorphic() const { return !receiver_type_.is_null(); }
+ Handle<Map> GetReceiverType() const { return receiver_type_; }
- bool IsCompileTimeValue();
+ bool IsCompileTimeValue() const;
void set_emit_store(bool emit_store);
- bool emit_store();
-
- bool is_static() const { return is_static_; }
- bool is_computed_name() const { return is_computed_name_; }
-
- FeedbackVectorSlot GetSlot(int offset = 0) const {
- DCHECK_LT(offset, static_cast<int>(arraysize(slots_)));
- return slots_[offset];
- }
- void SetSlot(FeedbackVectorSlot slot, int offset = 0) {
- DCHECK_LT(offset, static_cast<int>(arraysize(slots_)));
- slots_[offset] = slot;
- }
+ bool emit_store() const;
void set_receiver_type(Handle<Map> map) { receiver_type_ = map; }
- bool NeedsSetFunctionName() const;
-
private:
friend class AstNodeFactory;
ObjectLiteralProperty(Expression* key, Expression* value, Kind kind,
- bool is_static, bool is_computed_name);
- ObjectLiteralProperty(AstValueFactory* ast_value_factory, Expression* key,
- Expression* value, bool is_static,
bool is_computed_name);
+ ObjectLiteralProperty(AstValueFactory* ast_value_factory, Expression* key,
+ Expression* value, bool is_computed_name);
- Expression* key_;
- Expression* value_;
- FeedbackVectorSlot slots_[2];
Kind kind_;
bool emit_store_;
- bool is_static_;
- bool is_computed_name_;
Handle<Map> receiver_type_;
};
@@ -1357,9 +1412,11 @@ class ObjectLiteral final : public MaterializedLiteral {
}
int properties_count() const { return boilerplate_properties_; }
ZoneList<Property*>* properties() const { return properties_; }
- bool fast_elements() const { return fast_elements_; }
- bool may_store_doubles() const { return may_store_doubles_; }
- bool has_elements() const { return has_elements_; }
+ bool fast_elements() const { return FastElementsField::decode(bit_field_); }
+ bool may_store_doubles() const {
+ return MayStoreDoublesField::decode(bit_field_);
+ }
+ bool has_elements() const { return HasElementsField::decode(bit_field_); }
bool has_shallow_properties() const {
return depth() == 1 && !has_elements() && !may_store_doubles();
}
@@ -1429,33 +1486,42 @@ class ObjectLiteral final : public MaterializedLiteral {
uint32_t boilerplate_properties, int pos)
: MaterializedLiteral(literal_index, pos, kObjectLiteral),
boilerplate_properties_(boilerplate_properties),
- fast_elements_(false),
- has_elements_(false),
- may_store_doubles_(false),
- properties_(properties) {}
+ properties_(properties) {
+ bit_field_ |= FastElementsField::encode(false) |
+ HasElementsField::encode(false) |
+ MayStoreDoublesField::encode(false);
+ }
static int parent_num_ids() { return MaterializedLiteral::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
- uint32_t boilerplate_properties_ : 29;
- bool fast_elements_ : 1;
- bool has_elements_ : 1;
- bool may_store_doubles_ : 1;
+ uint32_t boilerplate_properties_;
FeedbackVectorSlot slot_;
Handle<FixedArray> constant_properties_;
ZoneList<Property*>* properties_;
+
+ class FastElementsField
+ : public BitField<bool, MaterializedLiteral::kNextBitFieldIndex, 1> {};
+ class HasElementsField : public BitField<bool, FastElementsField::kNext, 1> {
+ };
+ class MayStoreDoublesField
+ : public BitField<bool, HasElementsField::kNext, 1> {};
+
+ protected:
+ static const uint8_t kNextBitFieldIndex = MayStoreDoublesField::kNext;
};
// A map from property names to getter/setter pairs allocated in the zone.
class AccessorTable
: public base::TemplateHashMap<Literal, ObjectLiteral::Accessors,
+ bool (*)(void*, void*),
ZoneAllocationPolicy> {
public:
explicit AccessorTable(Zone* zone)
: base::TemplateHashMap<Literal, ObjectLiteral::Accessors,
- ZoneAllocationPolicy>(Literal::Match,
- ZoneAllocationPolicy(zone)),
+ bool (*)(void*, void*), ZoneAllocationPolicy>(
+ Literal::Match, ZoneAllocationPolicy(zone)),
zone_(zone) {}
Iterator lookup(Literal* literal) {
@@ -1628,19 +1694,19 @@ class VariableProxy final : public Expression {
friend class AstNodeFactory;
VariableProxy(Variable* var, int start_position, int end_position);
- VariableProxy(const AstRawString* name, Variable::Kind variable_kind,
+ VariableProxy(const AstRawString* name, VariableKind variable_kind,
int start_position, int end_position);
explicit VariableProxy(const VariableProxy* copy_from);
static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
- class IsThisField : public BitField8<bool, 0, 1> {};
- class IsAssignedField : public BitField8<bool, 1, 1> {};
- class IsResolvedField : public BitField8<bool, 2, 1> {};
- class IsNewTargetField : public BitField8<bool, 3, 1> {};
+ class IsThisField : public BitField<bool, Expression::kNextBitFieldIndex, 1> {
+ };
+ class IsAssignedField : public BitField<bool, IsThisField::kNext, 1> {};
+ class IsResolvedField : public BitField<bool, IsAssignedField::kNext, 1> {};
+ class IsNewTargetField : public BitField<bool, IsResolvedField::kNext, 1> {};
- uint8_t bit_field_;
// Position is stored in the AstNode superclass, but VariableProxy needs to
// know its end position too (for error messages). It cannot be inferred from
// the variable name length because it can contain escapes.
@@ -1737,22 +1803,24 @@ class Property final : public Expression {
friend class AstNodeFactory;
Property(Expression* obj, Expression* key, int pos)
- : Expression(pos, kProperty),
- bit_field_(IsForCallField::encode(false) |
- IsStringAccessField::encode(false) |
- InlineCacheStateField::encode(UNINITIALIZED)),
- obj_(obj),
- key_(key) {}
+ : Expression(pos, kProperty), obj_(obj), key_(key) {
+ bit_field_ |= IsForCallField::encode(false) |
+ IsStringAccessField::encode(false) |
+ InlineCacheStateField::encode(UNINITIALIZED);
+ }
static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
- class IsForCallField : public BitField8<bool, 0, 1> {};
- class IsStringAccessField : public BitField8<bool, 1, 1> {};
- class KeyTypeField : public BitField8<IcCheckType, 2, 1> {};
- class InlineCacheStateField : public BitField8<InlineCacheState, 3, 4> {};
+ class IsForCallField
+ : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
+ class IsStringAccessField : public BitField<bool, IsForCallField::kNext, 1> {
+ };
+ class KeyTypeField
+ : public BitField<IcCheckType, IsStringAccessField::kNext, 1> {};
+ class InlineCacheStateField
+ : public BitField<InlineCacheState, KeyTypeField::kNext, 4> {};
- uint8_t bit_field_;
FeedbackVectorSlot property_feedback_slot_;
Expression* obj_;
Expression* key_;
@@ -1789,15 +1857,6 @@ class Call final : public Expression {
return !target_.is_null();
}
- bool global_call() const {
- VariableProxy* proxy = expression_->AsVariableProxy();
- return proxy != NULL && proxy->var()->IsUnallocatedOrGlobalSlot();
- }
-
- bool known_global_function() const {
- return global_call() && !target_.is_null();
- }
-
Handle<JSFunction> target() { return target_; }
Handle<AllocationSite> allocation_site() { return allocation_site_; }
@@ -1867,11 +1926,12 @@ class Call final : public Expression {
Call(Expression* expression, ZoneList<Expression*>* arguments, int pos,
PossiblyEval possibly_eval)
: Expression(pos, kCall),
- bit_field_(
- IsUninitializedField::encode(false) |
- IsPossiblyEvalField::encode(possibly_eval == IS_POSSIBLY_EVAL)),
expression_(expression),
arguments_(arguments) {
+ bit_field_ |=
+ IsUninitializedField::encode(false) |
+ IsPossiblyEvalField::encode(possibly_eval == IS_POSSIBLY_EVAL);
+
if (expression->IsProperty()) {
expression->AsProperty()->mark_for_call();
}
@@ -1880,11 +1940,11 @@ class Call final : public Expression {
static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
- class IsUninitializedField : public BitField8<bool, 0, 1> {};
- class IsTailField : public BitField8<bool, 1, 1> {};
- class IsPossiblyEvalField : public BitField8<bool, 2, 1> {};
+ class IsUninitializedField
+ : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
+ class IsTailField : public BitField<bool, IsUninitializedField::kNext, 1> {};
+ class IsPossiblyEvalField : public BitField<bool, IsTailField::kNext, 1> {};
- uint8_t bit_field_;
FeedbackVectorSlot ic_slot_;
FeedbackVectorSlot stub_slot_;
Expression* expression_;
@@ -1904,10 +1964,9 @@ class CallNew final : public Expression {
// Type feedback information.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
- callnew_feedback_slot_ = spec->AddGeneralSlot();
- // Construct calls have two slots, one right after the other.
- // The second slot stores the call count for monomorphic calls.
- spec->AddGeneralSlot();
+ // CallNew stores feedback in the exact same way as Call. We can
+ // piggyback on the type feedback infrastructure for calls.
+ callnew_feedback_slot_ = spec->AddCallICSlot();
}
FeedbackVectorSlot CallNewFeedbackSlot() {
@@ -1915,7 +1974,7 @@ class CallNew final : public Expression {
return callnew_feedback_slot_;
}
- bool IsMonomorphic() const { return is_monomorphic_; }
+ bool IsMonomorphic() const { return IsMonomorphicField::decode(bit_field_); }
Handle<JSFunction> target() const { return target_; }
Handle<AllocationSite> allocation_site() const {
return allocation_site_;
@@ -1928,11 +1987,13 @@ class CallNew final : public Expression {
void set_allocation_site(Handle<AllocationSite> site) {
allocation_site_ = site;
}
- void set_is_monomorphic(bool monomorphic) { is_monomorphic_ = monomorphic; }
+ void set_is_monomorphic(bool monomorphic) {
+ bit_field_ = IsMonomorphicField::update(bit_field_, monomorphic);
+ }
void set_target(Handle<JSFunction> target) { target_ = target; }
void SetKnownGlobalTarget(Handle<JSFunction> target) {
target_ = target;
- is_monomorphic_ = true;
+ set_is_monomorphic(true);
}
private:
@@ -1940,19 +2001,22 @@ class CallNew final : public Expression {
CallNew(Expression* expression, ZoneList<Expression*>* arguments, int pos)
: Expression(pos, kCallNew),
- is_monomorphic_(false),
expression_(expression),
- arguments_(arguments) {}
+ arguments_(arguments) {
+ bit_field_ |= IsMonomorphicField::encode(false);
+ }
static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
- bool is_monomorphic_;
FeedbackVectorSlot callnew_feedback_slot_;
Expression* expression_;
ZoneList<Expression*>* arguments_;
Handle<JSFunction> target_;
Handle<AllocationSite> allocation_site_;
+
+ class IsMonomorphicField
+ : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
};
@@ -1969,6 +2033,10 @@ class CallRuntime final : public Expression {
DCHECK(is_jsruntime());
return context_index_;
}
+ void set_context_index(int index) {
+ DCHECK(is_jsruntime());
+ context_index_ = index;
+ }
const Runtime::Function* function() const {
DCHECK(!is_jsruntime());
return function_;
@@ -2006,7 +2074,7 @@ class CallRuntime final : public Expression {
class UnaryOperation final : public Expression {
public:
- Token::Value op() const { return op_; }
+ Token::Value op() const { return OperatorField::decode(bit_field_); }
Expression* expression() const { return expression_; }
void set_expression(Expression* e) { expression_ = e; }
@@ -2022,21 +2090,24 @@ class UnaryOperation final : public Expression {
friend class AstNodeFactory;
UnaryOperation(Token::Value op, Expression* expression, int pos)
- : Expression(pos, kUnaryOperation), op_(op), expression_(expression) {
+ : Expression(pos, kUnaryOperation), expression_(expression) {
+ bit_field_ |= OperatorField::encode(op);
DCHECK(Token::IsUnaryOp(op));
}
static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
- Token::Value op_;
Expression* expression_;
+
+ class OperatorField
+ : public BitField<Token::Value, Expression::kNextBitFieldIndex, 7> {};
};
class BinaryOperation final : public Expression {
public:
- Token::Value op() const { return static_cast<Token::Value>(op_); }
+ Token::Value op() const { return OperatorField::decode(bit_field_); }
Expression* left() const { return left_; }
void set_left(Expression* e) { left_ = e; }
Expression* right() const { return right_; }
@@ -2090,18 +2161,17 @@ class BinaryOperation final : public Expression {
BinaryOperation(Token::Value op, Expression* left, Expression* right, int pos)
: Expression(pos, kBinaryOperation),
- op_(static_cast<byte>(op)),
has_fixed_right_arg_(false),
fixed_right_arg_value_(0),
left_(left),
right_(right) {
+ bit_field_ |= OperatorField::encode(op);
DCHECK(Token::IsBinaryOp(op));
}
static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
- const byte op_; // actually Token::Value
// TODO(rossberg): the fixed arg should probably be represented as a Constant
// type for the RHS. Currenty it's actually a Maybe<int>
bool has_fixed_right_arg_;
@@ -2110,6 +2180,9 @@ class BinaryOperation final : public Expression {
Expression* right_;
Handle<AllocationSite> allocation_site_;
FeedbackVectorSlot type_feedback_slot_;
+
+ class OperatorField
+ : public BitField<Token::Value, Expression::kNextBitFieldIndex, 7> {};
};
@@ -2132,14 +2205,14 @@ class CountOperation final : public Expression {
KeyedAccessStoreMode GetStoreMode() const {
return StoreModeField::decode(bit_field_);
}
- Type* type() const { return type_; }
+ AstType* type() const { return type_; }
void set_key_type(IcCheckType type) {
bit_field_ = KeyTypeField::update(bit_field_, type);
}
void set_store_mode(KeyedAccessStoreMode mode) {
bit_field_ = StoreModeField::update(bit_field_, mode);
}
- void set_type(Type* type) { type_ = type; }
+ void set_type(AstType* type) { type_ = type; }
static int num_ids() { return parent_num_ids() + 4; }
BailoutId AssignmentId() const { return BailoutId(local_id(0)); }
@@ -2164,27 +2237,25 @@ class CountOperation final : public Expression {
friend class AstNodeFactory;
CountOperation(Token::Value op, bool is_prefix, Expression* expr, int pos)
- : Expression(pos, kCountOperation),
- bit_field_(
- IsPrefixField::encode(is_prefix) | KeyTypeField::encode(ELEMENT) |
- StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op)),
- type_(NULL),
- expression_(expr) {}
+ : Expression(pos, kCountOperation), type_(NULL), expression_(expr) {
+ bit_field_ |=
+ IsPrefixField::encode(is_prefix) | KeyTypeField::encode(ELEMENT) |
+ StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op);
+ }
static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
- class IsPrefixField : public BitField16<bool, 0, 1> {};
- class KeyTypeField : public BitField16<IcCheckType, 1, 1> {};
- class StoreModeField : public BitField16<KeyedAccessStoreMode, 2, 3> {};
- class TokenField : public BitField16<Token::Value, 5, 8> {};
+ class IsPrefixField
+ : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
+ class KeyTypeField : public BitField<IcCheckType, IsPrefixField::kNext, 1> {};
+ class StoreModeField
+ : public BitField<KeyedAccessStoreMode, KeyTypeField::kNext, 3> {};
+ class TokenField : public BitField<Token::Value, StoreModeField::kNext, 7> {};
- // Starts with 16-bit field, which should get packed together with
- // Expression's trailing 16-bit field.
- uint16_t bit_field_;
FeedbackVectorSlot slot_;
FeedbackVectorSlot binary_operation_slot_;
- Type* type_;
+ AstType* type_;
Expression* expression_;
SmallMapList receiver_types_;
};
@@ -2192,7 +2263,7 @@ class CountOperation final : public Expression {
class CompareOperation final : public Expression {
public:
- Token::Value op() const { return op_; }
+ Token::Value op() const { return OperatorField::decode(bit_field_); }
Expression* left() const { return left_; }
Expression* right() const { return right_; }
@@ -2204,8 +2275,18 @@ class CompareOperation final : public Expression {
TypeFeedbackId CompareOperationFeedbackId() const {
return TypeFeedbackId(local_id(0));
}
- Type* combined_type() const { return combined_type_; }
- void set_combined_type(Type* type) { combined_type_ = type; }
+ AstType* combined_type() const { return combined_type_; }
+ void set_combined_type(AstType* type) { combined_type_ = type; }
+
+ // CompareOperation will have both a slot in the feedback vector and the
+ // TypeFeedbackId to record the type information. TypeFeedbackId is used
+ // by full codegen and the feedback vector slot is used by interpreter.
+ void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ FeedbackVectorSlotCache* cache);
+
+ FeedbackVectorSlot CompareOperationFeedbackSlot() const {
+ return type_feedback_slot_;
+ }
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
@@ -2218,21 +2299,23 @@ class CompareOperation final : public Expression {
CompareOperation(Token::Value op, Expression* left, Expression* right,
int pos)
: Expression(pos, kCompareOperation),
- op_(op),
left_(left),
right_(right),
- combined_type_(Type::None()) {
+ combined_type_(AstType::None()) {
+ bit_field_ |= OperatorField::encode(op);
DCHECK(Token::IsCompareOp(op));
}
static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
- Token::Value op_;
Expression* left_;
Expression* right_;
- Type* combined_type_;
+ AstType* combined_type_;
+ FeedbackVectorSlot type_feedback_slot_;
+ class OperatorField
+ : public BitField<Token::Value, Expression::kNextBitFieldIndex, 7> {};
};
@@ -2356,17 +2439,14 @@ class Assignment final : public Expression {
static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
- class IsUninitializedField : public BitField16<bool, 0, 1> {};
+ class IsUninitializedField
+ : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
class KeyTypeField
- : public BitField16<IcCheckType, IsUninitializedField::kNext, 1> {};
+ : public BitField<IcCheckType, IsUninitializedField::kNext, 1> {};
class StoreModeField
- : public BitField16<KeyedAccessStoreMode, KeyTypeField::kNext, 3> {};
- class TokenField : public BitField16<Token::Value, StoreModeField::kNext, 8> {
- };
+ : public BitField<KeyedAccessStoreMode, KeyTypeField::kNext, 3> {};
+ class TokenField : public BitField<Token::Value, StoreModeField::kNext, 7> {};
- // Starts with 16-bit field, which should get packed together with
- // Expression's trailing 16-bit field.
- uint16_t bit_field_;
FeedbackVectorSlot slot_;
Expression* target_;
Expression* value_;
@@ -2393,14 +2473,14 @@ class Assignment final : public Expression {
class RewritableExpression final : public Expression {
public:
Expression* expression() const { return expr_; }
- bool is_rewritten() const { return is_rewritten_; }
+ bool is_rewritten() const { return IsRewrittenField::decode(bit_field_); }
void Rewrite(Expression* new_expression) {
DCHECK(!is_rewritten());
DCHECK_NOT_NULL(new_expression);
DCHECK(!new_expression->IsRewritableExpression());
expr_ = new_expression;
- is_rewritten_ = true;
+ bit_field_ = IsRewrittenField::update(bit_field_, true);
}
static int num_ids() { return parent_num_ids(); }
@@ -2410,15 +2490,17 @@ class RewritableExpression final : public Expression {
explicit RewritableExpression(Expression* expression)
: Expression(expression->position(), kRewritableExpression),
- is_rewritten_(false),
expr_(expression) {
+ bit_field_ |= IsRewrittenField::encode(false);
DCHECK(!expression->IsRewritableExpression());
}
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
- bool is_rewritten_;
Expression* expr_;
+
+ class IsRewrittenField
+ : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
};
// Our Yield is different from the JS yield in that it "returns" its argument as
@@ -2430,8 +2512,11 @@ class Yield final : public Expression {
Expression* generator_object() const { return generator_object_; }
Expression* expression() const { return expression_; }
+ OnException on_exception() const {
+ return OnExceptionField::decode(bit_field_);
+ }
bool rethrow_on_exception() const {
- return on_exception_ == kOnExceptionRethrow;
+ return on_exception() == kOnExceptionRethrow;
}
int yield_id() const { return yield_id_; }
@@ -2445,15 +2530,18 @@ class Yield final : public Expression {
Yield(Expression* generator_object, Expression* expression, int pos,
OnException on_exception)
: Expression(pos, kYield),
- on_exception_(on_exception),
yield_id_(-1),
generator_object_(generator_object),
- expression_(expression) {}
+ expression_(expression) {
+ bit_field_ |= OnExceptionField::encode(on_exception);
+ }
- OnException on_exception_;
int yield_id_;
Expression* generator_object_;
Expression* expression_;
+
+ class OnExceptionField
+ : public BitField<OnException, Expression::kNextBitFieldIndex, 1> {};
};
@@ -2547,14 +2635,14 @@ class FunctionLiteral final : public Expression {
inferred_name_ = Handle<String>();
}
- bool pretenure() const { return Pretenure::decode(bitfield_); }
- void set_pretenure() { bitfield_ = Pretenure::update(bitfield_, true); }
+ bool pretenure() const { return Pretenure::decode(bit_field_); }
+ void set_pretenure() { bit_field_ = Pretenure::update(bit_field_, true); }
bool has_duplicate_parameters() const {
- return HasDuplicateParameters::decode(bitfield_);
+ return HasDuplicateParameters::decode(bit_field_);
}
- bool is_function() const { return IsFunction::decode(bitfield_); }
+ bool is_function() const { return IsFunction::decode(bit_field_); }
// This is used as a heuristic on when to eagerly compile a function
// literal. We consider the following constructs as hints that the
@@ -2562,25 +2650,25 @@ class FunctionLiteral final : public Expression {
// - (function() { ... })();
// - var x = function() { ... }();
bool should_eager_compile() const {
- return ShouldEagerCompile::decode(bitfield_);
+ return ShouldEagerCompile::decode(bit_field_);
}
void set_should_eager_compile() {
- bitfield_ = ShouldEagerCompile::update(bitfield_, true);
+ bit_field_ = ShouldEagerCompile::update(bit_field_, true);
}
// A hint that we expect this function to be called (exactly) once,
// i.e. we suspect it's an initialization function.
bool should_be_used_once_hint() const {
- return ShouldBeUsedOnceHint::decode(bitfield_);
+ return ShouldNotBeUsedOnceHintField::decode(bit_field_);
}
void set_should_be_used_once_hint() {
- bitfield_ = ShouldBeUsedOnceHint::update(bitfield_, true);
+ bit_field_ = ShouldNotBeUsedOnceHintField::update(bit_field_, true);
}
FunctionType function_type() const {
- return FunctionTypeBits::decode(bitfield_);
+ return FunctionTypeBits::decode(bit_field_);
}
- FunctionKind kind() const { return FunctionKindBits::decode(bitfield_); }
+ FunctionKind kind() const;
int ast_node_count() { return ast_properties_.node_count(); }
AstProperties::Flags flags() const { return ast_properties_.flags(); }
@@ -2590,10 +2678,12 @@ class FunctionLiteral final : public Expression {
const FeedbackVectorSpec* feedback_vector_spec() const {
return ast_properties_.get_spec();
}
- bool dont_optimize() { return dont_optimize_reason_ != kNoReason; }
- BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
+ bool dont_optimize() { return dont_optimize_reason() != kNoReason; }
+ BailoutReason dont_optimize_reason() {
+ return DontOptimizeReasonField::decode(bit_field_);
+ }
void set_dont_optimize_reason(BailoutReason reason) {
- dont_optimize_reason_ = reason;
+ bit_field_ = DontOptimizeReasonField::update(bit_field_, reason);
}
bool IsAnonymousFunctionDefinition() const {
@@ -2603,6 +2693,21 @@ class FunctionLiteral final : public Expression {
int yield_count() { return yield_count_; }
void set_yield_count(int yield_count) { yield_count_ = yield_count; }
+ bool requires_class_field_init() {
+ return RequiresClassFieldInit::decode(bit_field_);
+ }
+ void set_requires_class_field_init(bool requires_class_field_init) {
+ bit_field_ =
+ RequiresClassFieldInit::update(bit_field_, requires_class_field_init);
+ }
+ bool is_class_field_initializer() {
+ return IsClassFieldInitializer::decode(bit_field_);
+ }
+ void set_is_class_field_initializer(bool is_class_field_initializer) {
+ bit_field_ =
+ IsClassFieldInitializer::update(bit_field_, is_class_field_initializer);
+ }
+
private:
friend class AstNodeFactory;
@@ -2612,10 +2717,9 @@ class FunctionLiteral final : public Expression {
int expected_property_count, int parameter_count,
FunctionType function_type,
ParameterFlag has_duplicate_parameters,
- EagerCompileHint eager_compile_hint, FunctionKind kind,
- int position, bool is_function)
+ EagerCompileHint eager_compile_hint, int position,
+ bool is_function)
: Expression(position, kFunctionLiteral),
- dont_optimize_reason_(kNoReason),
materialized_literal_count_(materialized_literal_count),
expected_property_count_(expected_property_count),
parameter_count_(parameter_count),
@@ -2626,29 +2730,32 @@ class FunctionLiteral final : public Expression {
body_(body),
raw_inferred_name_(ast_value_factory->empty_string()),
ast_properties_(zone) {
- bitfield_ =
+ bit_field_ |=
FunctionTypeBits::encode(function_type) | Pretenure::encode(false) |
HasDuplicateParameters::encode(has_duplicate_parameters ==
kHasDuplicateParameters) |
IsFunction::encode(is_function) |
ShouldEagerCompile::encode(eager_compile_hint == kShouldEagerCompile) |
- FunctionKindBits::encode(kind) | ShouldBeUsedOnceHint::encode(false);
- DCHECK(IsValidFunctionKind(kind));
- }
-
- class FunctionTypeBits : public BitField16<FunctionType, 0, 2> {};
- class Pretenure : public BitField16<bool, 2, 1> {};
- class HasDuplicateParameters : public BitField16<bool, 3, 1> {};
- class IsFunction : public BitField16<bool, 4, 1> {};
- class ShouldEagerCompile : public BitField16<bool, 5, 1> {};
- class ShouldBeUsedOnceHint : public BitField16<bool, 6, 1> {};
- class FunctionKindBits : public BitField16<FunctionKind, 7, 9> {};
-
- // Start with 16-bit field, which should get packed together
- // with Expression's trailing 16-bit field.
- uint16_t bitfield_;
-
- BailoutReason dont_optimize_reason_;
+ RequiresClassFieldInit::encode(false) |
+ ShouldNotBeUsedOnceHintField::encode(false) |
+ DontOptimizeReasonField::encode(kNoReason) |
+ IsClassFieldInitializer::encode(false);
+ }
+
+ class FunctionTypeBits
+ : public BitField<FunctionType, Expression::kNextBitFieldIndex, 2> {};
+ class Pretenure : public BitField<bool, FunctionTypeBits::kNext, 1> {};
+ class HasDuplicateParameters : public BitField<bool, Pretenure::kNext, 1> {};
+ class IsFunction : public BitField<bool, HasDuplicateParameters::kNext, 1> {};
+ class ShouldEagerCompile : public BitField<bool, IsFunction::kNext, 1> {};
+ class ShouldNotBeUsedOnceHintField
+ : public BitField<bool, ShouldEagerCompile::kNext, 1> {};
+ class RequiresClassFieldInit
+ : public BitField<bool, ShouldNotBeUsedOnceHintField::kNext, 1> {};
+ class IsClassFieldInitializer
+ : public BitField<bool, RequiresClassFieldInit::kNext, 1> {};
+ class DontOptimizeReasonField
+ : public BitField<BailoutReason, IsClassFieldInitializer::kNext, 8> {};
int materialized_literal_count_;
int expected_property_count_;
@@ -2664,10 +2771,29 @@ class FunctionLiteral final : public Expression {
AstProperties ast_properties_;
};
+// Property is used for passing information
+// about a class literal's properties from the parser to the code generator.
+class ClassLiteralProperty final : public LiteralProperty {
+ public:
+ enum Kind : uint8_t { METHOD, GETTER, SETTER, FIELD };
+
+ Kind kind() const { return kind_; }
+
+ bool is_static() const { return is_static_; }
+
+ private:
+ friend class AstNodeFactory;
+
+ ClassLiteralProperty(Expression* key, Expression* value, Kind kind,
+ bool is_static, bool is_computed_name);
+
+ Kind kind_;
+ bool is_static_;
+};
class ClassLiteral final : public Expression {
public:
- typedef ObjectLiteralProperty Property;
+ typedef ClassLiteralProperty Property;
VariableProxy* class_variable_proxy() const { return class_variable_proxy_; }
Expression* extends() const { return extends_; }
@@ -2678,6 +2804,13 @@ class ClassLiteral final : public Expression {
int start_position() const { return position(); }
int end_position() const { return end_position_; }
+ VariableProxy* static_initializer_proxy() const {
+ return static_initializer_proxy_;
+ }
+ void set_static_initializer_proxy(VariableProxy* proxy) {
+ static_initializer_proxy_ = proxy;
+ }
+
BailoutId CreateLiteralId() const { return BailoutId(local_id(0)); }
BailoutId PrototypeId() { return BailoutId(local_id(1)); }
@@ -2712,7 +2845,8 @@ class ClassLiteral final : public Expression {
class_variable_proxy_(class_variable_proxy),
extends_(extends),
constructor_(constructor),
- properties_(properties) {}
+ properties_(properties),
+ static_initializer_proxy_(nullptr) {}
static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
@@ -2724,6 +2858,7 @@ class ClassLiteral final : public Expression {
Expression* extends_;
FunctionLiteral* constructor_;
ZoneList<Property*>* properties_;
+ VariableProxy* static_initializer_proxy_;
};
@@ -3097,6 +3232,16 @@ class AstNodeFactory final BASE_EMBEDDED {
try_block, scope, variable, catch_block, HandlerTable::DESUGARING, pos);
}
+ TryCatchStatement* NewTryCatchStatementForAsyncAwait(Block* try_block,
+ Scope* scope,
+ Variable* variable,
+ Block* catch_block,
+ int pos) {
+ return new (zone_)
+ TryCatchStatement(try_block, scope, variable, catch_block,
+ HandlerTable::ASYNC_AWAIT, pos);
+ }
+
TryFinallyStatement* NewTryFinallyStatement(Block* try_block,
Block* finally_block, int pos) {
return new (zone_) TryFinallyStatement(try_block, finally_block, pos);
@@ -3110,9 +3255,9 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) EmptyStatement(pos);
}
- SloppyBlockFunctionStatement* NewSloppyBlockFunctionStatement(
- Statement* statement, Scope* scope) {
- return new (zone_) SloppyBlockFunctionStatement(statement, scope);
+ SloppyBlockFunctionStatement* NewSloppyBlockFunctionStatement(Scope* scope) {
+ return new (zone_) SloppyBlockFunctionStatement(
+ NewEmptyStatement(kNoSourcePosition), scope);
}
CaseClause* NewCaseClause(
@@ -3163,17 +3308,16 @@ class AstNodeFactory final BASE_EMBEDDED {
ObjectLiteral::Property* NewObjectLiteralProperty(
Expression* key, Expression* value, ObjectLiteralProperty::Kind kind,
- bool is_static, bool is_computed_name) {
+ bool is_computed_name) {
return new (zone_)
- ObjectLiteral::Property(key, value, kind, is_static, is_computed_name);
+ ObjectLiteral::Property(key, value, kind, is_computed_name);
}
ObjectLiteral::Property* NewObjectLiteralProperty(Expression* key,
Expression* value,
- bool is_static,
bool is_computed_name) {
return new (zone_) ObjectLiteral::Property(ast_value_factory_, key, value,
- is_static, is_computed_name);
+ is_computed_name);
}
RegExpLiteral* NewRegExpLiteral(const AstRawString* pattern, int flags,
@@ -3201,7 +3345,7 @@ class AstNodeFactory final BASE_EMBEDDED {
}
VariableProxy* NewVariableProxy(const AstRawString* name,
- Variable::Kind variable_kind,
+ VariableKind variable_kind,
int start_position = kNoSourcePosition,
int end_position = kNoSourcePosition) {
DCHECK_NOT_NULL(name);
@@ -3318,13 +3462,12 @@ class AstNodeFactory final BASE_EMBEDDED {
int expected_property_count, int parameter_count,
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::FunctionType function_type,
- FunctionLiteral::EagerCompileHint eager_compile_hint, FunctionKind kind,
- int position) {
- return new (zone_) FunctionLiteral(
- zone_, name, ast_value_factory_, scope, body,
- materialized_literal_count, expected_property_count, parameter_count,
- function_type, has_duplicate_parameters, eager_compile_hint, kind,
- position, true);
+ FunctionLiteral::EagerCompileHint eager_compile_hint, int position) {
+ return new (zone_) FunctionLiteral(zone_, name, ast_value_factory_, scope,
+ body, materialized_literal_count,
+ expected_property_count, parameter_count,
+ function_type, has_duplicate_parameters,
+ eager_compile_hint, position, true);
}
// Creates a FunctionLiteral representing a top-level script, the
@@ -3332,19 +3475,26 @@ class AstNodeFactory final BASE_EMBEDDED {
// the Function constructor.
FunctionLiteral* NewScriptOrEvalFunctionLiteral(
DeclarationScope* scope, ZoneList<Statement*>* body,
- int materialized_literal_count, int expected_property_count) {
+ int materialized_literal_count, int expected_property_count,
+ int parameter_count) {
return new (zone_) FunctionLiteral(
zone_, ast_value_factory_->empty_string(), ast_value_factory_, scope,
- body, materialized_literal_count, expected_property_count, 0,
- FunctionLiteral::kAnonymousExpression,
+ body, materialized_literal_count, expected_property_count,
+ parameter_count, FunctionLiteral::kAnonymousExpression,
FunctionLiteral::kNoDuplicateParameters,
- FunctionLiteral::kShouldLazyCompile, FunctionKind::kNormalFunction, 0,
- false);
+ FunctionLiteral::kShouldLazyCompile, 0, false);
+ }
+
+ ClassLiteral::Property* NewClassLiteralProperty(
+ Expression* key, Expression* value, ClassLiteralProperty::Kind kind,
+ bool is_static, bool is_computed_name) {
+ return new (zone_)
+ ClassLiteral::Property(key, value, kind, is_static, is_computed_name);
}
ClassLiteral* NewClassLiteral(VariableProxy* proxy, Expression* extends,
FunctionLiteral* constructor,
- ZoneList<ObjectLiteral::Property*>* properties,
+ ZoneList<ClassLiteral::Property*>* properties,
int start_position, int end_position) {
return new (zone_) ClassLiteral(proxy, extends, constructor, properties,
start_position, end_position);
@@ -3396,7 +3546,8 @@ class AstNodeFactory final BASE_EMBEDDED {
}
}
- ~BodyScope() { factory_->zone_ = prev_zone_; }
+ void Reset() { factory_->zone_ = prev_zone_; }
+ ~BodyScope() { Reset(); }
private:
AstNodeFactory* factory_;
diff --git a/deps/v8/src/ast/compile-time-value.cc b/deps/v8/src/ast/compile-time-value.cc
new file mode 100644
index 0000000000..eda536b716
--- /dev/null
+++ b/deps/v8/src/ast/compile-time-value.cc
@@ -0,0 +1,56 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ast/compile-time-value.h"
+
+#include "src/ast/ast.h"
+#include "src/factory.h"
+#include "src/handles-inl.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+bool CompileTimeValue::IsCompileTimeValue(Expression* expression) {
+ if (expression->IsLiteral()) return true;
+ MaterializedLiteral* lit = expression->AsMaterializedLiteral();
+ return lit != NULL && lit->is_simple();
+}
+
+Handle<FixedArray> CompileTimeValue::GetValue(Isolate* isolate,
+ Expression* expression) {
+ Factory* factory = isolate->factory();
+ DCHECK(IsCompileTimeValue(expression));
+ Handle<FixedArray> result = factory->NewFixedArray(2, TENURED);
+ ObjectLiteral* object_literal = expression->AsObjectLiteral();
+ if (object_literal != NULL) {
+ DCHECK(object_literal->is_simple());
+ if (object_literal->fast_elements()) {
+ result->set(kLiteralTypeSlot, Smi::FromInt(OBJECT_LITERAL_FAST_ELEMENTS));
+ } else {
+ result->set(kLiteralTypeSlot, Smi::FromInt(OBJECT_LITERAL_SLOW_ELEMENTS));
+ }
+ result->set(kElementsSlot, *object_literal->constant_properties());
+ } else {
+ ArrayLiteral* array_literal = expression->AsArrayLiteral();
+ DCHECK(array_literal != NULL && array_literal->is_simple());
+ result->set(kLiteralTypeSlot, Smi::FromInt(ARRAY_LITERAL));
+ result->set(kElementsSlot, *array_literal->constant_elements());
+ }
+ return result;
+}
+
+CompileTimeValue::LiteralType CompileTimeValue::GetLiteralType(
+ Handle<FixedArray> value) {
+ Smi* literal_type = Smi::cast(value->get(kLiteralTypeSlot));
+ return static_cast<LiteralType>(literal_type->value());
+}
+
+Handle<FixedArray> CompileTimeValue::GetElements(Handle<FixedArray> value) {
+ return Handle<FixedArray>(FixedArray::cast(value->get(kElementsSlot)));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ast/compile-time-value.h b/deps/v8/src/ast/compile-time-value.h
new file mode 100644
index 0000000000..27351b79cc
--- /dev/null
+++ b/deps/v8/src/ast/compile-time-value.h
@@ -0,0 +1,45 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_AST_COMPILE_TIME_VALUE
+#define V8_AST_COMPILE_TIME_VALUE
+
+#include "src/allocation.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class Expression;
+
+// Support for handling complex values (array and object literals) that
+// can be fully handled at compile time.
+class CompileTimeValue : public AllStatic {
+ public:
+ enum LiteralType {
+ OBJECT_LITERAL_FAST_ELEMENTS,
+ OBJECT_LITERAL_SLOW_ELEMENTS,
+ ARRAY_LITERAL
+ };
+
+ static bool IsCompileTimeValue(Expression* expression);
+
+ // Get the value as a compile time value.
+ static Handle<FixedArray> GetValue(Isolate* isolate, Expression* expression);
+
+ // Get the type of a compile time value returned by GetValue().
+ static LiteralType GetLiteralType(Handle<FixedArray> value);
+
+ // Get the elements array of a compile time value returned by GetValue().
+ static Handle<FixedArray> GetElements(Handle<FixedArray> value);
+
+ private:
+ static const int kLiteralTypeSlot = 0;
+ static const int kElementsSlot = 1;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_AST_COMPILE_TIME_VALUE
diff --git a/deps/v8/src/ast/context-slot-cache.cc b/deps/v8/src/ast/context-slot-cache.cc
index 43bd6d6b19..b1387e10f5 100644
--- a/deps/v8/src/ast/context-slot-cache.cc
+++ b/deps/v8/src/ast/context-slot-cache.cc
@@ -8,6 +8,13 @@
#include "src/ast/scopes.h"
#include "src/bootstrapper.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/factory.h -> src/objects-inl.h
+#include "src/objects-inl.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/type-feedback-vector.h ->
+// src/type-feedback-vector-inl.h
+#include "src/type-feedback-vector-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ast/context-slot-cache.h b/deps/v8/src/ast/context-slot-cache.h
index 8e9d1f7a8b..4345a65a3d 100644
--- a/deps/v8/src/ast/context-slot-cache.h
+++ b/deps/v8/src/ast/context-slot-cache.h
@@ -7,7 +7,6 @@
#include "src/allocation.h"
#include "src/ast/modules.h"
-#include "src/ast/variables.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc
index cd47c00b92..2d28d5564b 100644
--- a/deps/v8/src/ast/modules.cc
+++ b/deps/v8/src/ast/modules.cc
@@ -12,49 +12,35 @@ namespace internal {
void ModuleDescriptor::AddImport(
const AstRawString* import_name, const AstRawString* local_name,
const AstRawString* module_request, Scanner::Location loc, Zone* zone) {
- DCHECK_NOT_NULL(import_name);
- DCHECK_NOT_NULL(local_name);
- DCHECK_NOT_NULL(module_request);
- ModuleEntry* entry = new (zone) ModuleEntry(loc);
+ Entry* entry = new (zone) Entry(loc);
entry->local_name = local_name;
entry->import_name = import_name;
- entry->module_request = module_request;
- regular_imports_.insert(std::make_pair(entry->local_name, entry));
- // We don't care if there's already an entry for this local name, as in that
- // case we will report an error when declaring the variable.
+ entry->module_request = AddModuleRequest(module_request);
+ AddRegularImport(entry);
}
void ModuleDescriptor::AddStarImport(
const AstRawString* local_name, const AstRawString* module_request,
Scanner::Location loc, Zone* zone) {
- DCHECK_NOT_NULL(local_name);
- DCHECK_NOT_NULL(module_request);
- ModuleEntry* entry = new (zone) ModuleEntry(loc);
+ Entry* entry = new (zone) Entry(loc);
entry->local_name = local_name;
- entry->module_request = module_request;
- special_imports_.Add(entry, zone);
+ entry->module_request = AddModuleRequest(module_request);
+ AddNamespaceImport(entry, zone);
}
-
-void ModuleDescriptor::AddEmptyImport(
- const AstRawString* module_request, Scanner::Location loc, Zone* zone) {
- DCHECK_NOT_NULL(module_request);
- ModuleEntry* entry = new (zone) ModuleEntry(loc);
- entry->module_request = module_request;
- special_imports_.Add(entry, zone);
+void ModuleDescriptor::AddEmptyImport(const AstRawString* module_request) {
+ AddModuleRequest(module_request);
}
void ModuleDescriptor::AddExport(
const AstRawString* local_name, const AstRawString* export_name,
Scanner::Location loc, Zone* zone) {
- DCHECK_NOT_NULL(local_name);
- DCHECK_NOT_NULL(export_name);
- ModuleEntry* entry = new (zone) ModuleEntry(loc);
+ Entry* entry = new (zone) Entry(loc);
entry->export_name = export_name;
entry->local_name = local_name;
- exports_.Add(entry, zone);
+ AddRegularExport(entry);
}
@@ -63,40 +49,186 @@ void ModuleDescriptor::AddExport(
const AstRawString* module_request, Scanner::Location loc, Zone* zone) {
DCHECK_NOT_NULL(import_name);
DCHECK_NOT_NULL(export_name);
- DCHECK_NOT_NULL(module_request);
- ModuleEntry* entry = new (zone) ModuleEntry(loc);
+ Entry* entry = new (zone) Entry(loc);
entry->export_name = export_name;
entry->import_name = import_name;
- entry->module_request = module_request;
- exports_.Add(entry, zone);
+ entry->module_request = AddModuleRequest(module_request);
+ AddSpecialExport(entry, zone);
}
void ModuleDescriptor::AddStarExport(
const AstRawString* module_request, Scanner::Location loc, Zone* zone) {
- DCHECK_NOT_NULL(module_request);
- ModuleEntry* entry = new (zone) ModuleEntry(loc);
- entry->module_request = module_request;
- exports_.Add(entry, zone);
+ Entry* entry = new (zone) Entry(loc);
+ entry->module_request = AddModuleRequest(module_request);
+ AddSpecialExport(entry, zone);
+}
+
+namespace {
+
+Handle<Object> ToStringOrUndefined(Isolate* isolate, const AstRawString* s) {
+ return (s == nullptr)
+ ? Handle<Object>::cast(isolate->factory()->undefined_value())
+ : Handle<Object>::cast(s->string());
+}
+
+const AstRawString* FromStringOrUndefined(Isolate* isolate,
+ AstValueFactory* avfactory,
+ Handle<Object> object) {
+ if (object->IsUndefined(isolate)) return nullptr;
+ return avfactory->GetString(Handle<String>::cast(object));
+}
+
+} // namespace
+
+Handle<ModuleInfoEntry> ModuleDescriptor::Entry::Serialize(
+ Isolate* isolate) const {
+ CHECK(Smi::IsValid(module_request)); // TODO(neis): Check earlier?
+ return ModuleInfoEntry::New(
+ isolate, ToStringOrUndefined(isolate, export_name),
+ ToStringOrUndefined(isolate, local_name),
+ ToStringOrUndefined(isolate, import_name),
+ Handle<Object>(Smi::FromInt(module_request), isolate));
}
-void ModuleDescriptor::MakeIndirectExportsExplicit() {
- for (auto entry : exports_) {
- if (entry->export_name == nullptr) continue;
- if (entry->import_name != nullptr) continue;
+ModuleDescriptor::Entry* ModuleDescriptor::Entry::Deserialize(
+ Isolate* isolate, AstValueFactory* avfactory,
+ Handle<ModuleInfoEntry> entry) {
+ Entry* result = new (avfactory->zone()) Entry(Scanner::Location::invalid());
+ result->export_name = FromStringOrUndefined(
+ isolate, avfactory, handle(entry->export_name(), isolate));
+ result->local_name = FromStringOrUndefined(
+ isolate, avfactory, handle(entry->local_name(), isolate));
+ result->import_name = FromStringOrUndefined(
+ isolate, avfactory, handle(entry->import_name(), isolate));
+ result->module_request = Smi::cast(entry->module_request())->value();
+ return result;
+}
+
+Handle<FixedArray> ModuleDescriptor::SerializeRegularExports(Isolate* isolate,
+ Zone* zone) const {
+ // We serialize regular exports in a way that lets us later iterate over their
+ // local names and for each local name immediately access all its export
+ // names. (Regular exports have neither import name nor module request.)
+
+ ZoneVector<Handle<Object>> data(zone);
+ data.reserve(2 * regular_exports_.size());
+
+ for (auto it = regular_exports_.begin(); it != regular_exports_.end();) {
+ // Find out how many export names this local name has.
+ auto next = it;
+ int size = 0;
+ do {
+ ++next;
+ ++size;
+ } while (next != regular_exports_.end() && next->first == it->first);
+
+ Handle<FixedArray> export_names = isolate->factory()->NewFixedArray(size);
+ data.push_back(it->second->local_name->string());
+ data.push_back(export_names);
+
+ // Collect the export names.
+ int i = 0;
+ for (; it != next; ++it) {
+ export_names->set(i++, *it->second->export_name->string());
+ }
+ DCHECK_EQ(i, size);
+
+ // Continue with the next distinct key.
+ DCHECK(it == next);
+ }
+
+ // We cannot create the FixedArray earlier because we only now know the
+ // precise size (the number of unique keys in regular_exports).
+ int size = static_cast<int>(data.size());
+ Handle<FixedArray> result = isolate->factory()->NewFixedArray(size);
+ for (int i = 0; i < size; ++i) {
+ result->set(i, *data[i]);
+ }
+ return result;
+}
+
+void ModuleDescriptor::DeserializeRegularExports(Isolate* isolate,
+ AstValueFactory* avfactory,
+ Handle<FixedArray> data) {
+ for (int i = 0, length_i = data->length(); i < length_i;) {
+ Handle<String> local_name(String::cast(data->get(i++)), isolate);
+ Handle<FixedArray> export_names(FixedArray::cast(data->get(i++)), isolate);
+
+ for (int j = 0, length_j = export_names->length(); j < length_j; ++j) {
+ Handle<String> export_name(String::cast(export_names->get(j)), isolate);
+
+ Entry* entry =
+ new (avfactory->zone()) Entry(Scanner::Location::invalid());
+ entry->local_name = avfactory->GetString(local_name);
+ entry->export_name = avfactory->GetString(export_name);
+
+ AddRegularExport(entry);
+ }
+ }
+}
+
+void ModuleDescriptor::MakeIndirectExportsExplicit(Zone* zone) {
+ for (auto it = regular_exports_.begin(); it != regular_exports_.end();) {
+ Entry* entry = it->second;
DCHECK_NOT_NULL(entry->local_name);
- auto it = regular_imports_.find(entry->local_name);
- if (it != regular_imports_.end()) {
- // Found an indirect export.
- DCHECK_NOT_NULL(it->second->module_request);
- DCHECK_NOT_NULL(it->second->import_name);
- entry->import_name = it->second->import_name;
- entry->module_request = it->second->module_request;
+ auto import = regular_imports_.find(entry->local_name);
+ if (import != regular_imports_.end()) {
+ // Found an indirect export. Patch export entry and move it from regular
+ // to special.
+ DCHECK_NULL(entry->import_name);
+ DCHECK_LT(entry->module_request, 0);
+ DCHECK_NOT_NULL(import->second->import_name);
+ DCHECK_LE(0, import->second->module_request);
+ DCHECK_LT(import->second->module_request,
+ static_cast<int>(module_requests_.size()));
+ entry->import_name = import->second->import_name;
+ entry->module_request = import->second->module_request;
entry->local_name = nullptr;
+ AddSpecialExport(entry, zone);
+ it = regular_exports_.erase(it);
+ } else {
+ it++;
}
}
}
+namespace {
+
+const ModuleDescriptor::Entry* BetterDuplicate(
+ const ModuleDescriptor::Entry* candidate,
+ ZoneMap<const AstRawString*, const ModuleDescriptor::Entry*>& export_names,
+ const ModuleDescriptor::Entry* current_duplicate) {
+ DCHECK_NOT_NULL(candidate->export_name);
+ DCHECK(candidate->location.IsValid());
+ auto insert_result =
+ export_names.insert(std::make_pair(candidate->export_name, candidate));
+ if (insert_result.second) return current_duplicate;
+ if (current_duplicate == nullptr) {
+ current_duplicate = insert_result.first->second;
+ }
+ return (candidate->location.beg_pos > current_duplicate->location.beg_pos)
+ ? candidate
+ : current_duplicate;
+}
+
+} // namespace
+
+const ModuleDescriptor::Entry* ModuleDescriptor::FindDuplicateExport(
+ Zone* zone) const {
+ const ModuleDescriptor::Entry* duplicate = nullptr;
+ ZoneMap<const AstRawString*, const ModuleDescriptor::Entry*> export_names(
+ zone);
+ for (const auto& elem : regular_exports_) {
+ duplicate = BetterDuplicate(elem.second, export_names, duplicate);
+ }
+ for (auto entry : special_exports_) {
+ if (entry->export_name == nullptr) continue; // Star export.
+ duplicate = BetterDuplicate(entry, export_names, duplicate);
+ }
+ return duplicate;
+}
+
bool ModuleDescriptor::Validate(ModuleScope* module_scope,
PendingCompilationErrorHandler* error_handler,
Zone* zone) {
@@ -105,29 +237,19 @@ bool ModuleDescriptor::Validate(ModuleScope* module_scope,
// Report error iff there are duplicate exports.
{
- ZoneAllocationPolicy allocator(zone);
- ZoneHashMap* export_names = new (zone->New(sizeof(ZoneHashMap)))
- ZoneHashMap(ZoneHashMap::PointersMatch,
- ZoneHashMap::kDefaultHashMapCapacity, allocator);
- for (auto entry : exports_) {
- if (entry->export_name == nullptr) continue;
- AstRawString* key = const_cast<AstRawString*>(entry->export_name);
- ZoneHashMap::Entry* p =
- export_names->LookupOrInsert(key, key->hash(), allocator);
- DCHECK_NOT_NULL(p);
- if (p->value != nullptr) {
- error_handler->ReportMessageAt(
- entry->location.beg_pos, entry->location.end_pos,
- MessageTemplate::kDuplicateExport, entry->export_name);
- return false;
- }
- p->value = key; // Anything but nullptr.
+ const Entry* entry = FindDuplicateExport(zone);
+ if (entry != nullptr) {
+ error_handler->ReportMessageAt(
+ entry->location.beg_pos, entry->location.end_pos,
+ MessageTemplate::kDuplicateExport, entry->export_name);
+ return false;
}
}
// Report error iff there are exports of non-existent local names.
- for (auto entry : exports_) {
- if (entry->local_name == nullptr) continue;
+ for (const auto& elem : regular_exports_) {
+ const Entry* entry = elem.second;
+ DCHECK_NOT_NULL(entry->local_name);
if (module_scope->LookupLocal(entry->local_name) == nullptr) {
error_handler->ReportMessageAt(
entry->location.beg_pos, entry->location.end_pos,
@@ -136,7 +258,7 @@ bool ModuleDescriptor::Validate(ModuleScope* module_scope,
}
}
- MakeIndirectExportsExplicit();
+ MakeIndirectExportsExplicit(zone);
return true;
}
diff --git a/deps/v8/src/ast/modules.h b/deps/v8/src/ast/modules.h
index c8f7aa3793..4d36735fa8 100644
--- a/deps/v8/src/ast/modules.h
+++ b/deps/v8/src/ast/modules.h
@@ -7,19 +7,26 @@
#include "src/parsing/scanner.h" // Only for Scanner::Location.
#include "src/pending-compilation-error-handler.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
class AstRawString;
-
+class ModuleInfoEntry;
class ModuleDescriptor : public ZoneObject {
public:
explicit ModuleDescriptor(Zone* zone)
- : exports_(1, zone), special_imports_(1, zone), regular_imports_(zone) {}
+ : module_requests_(zone),
+ special_exports_(1, zone),
+ namespace_imports_(1, zone),
+ regular_exports_(zone),
+ regular_imports_(zone) {}
+
+ // The following Add* methods are high-level convenience functions for use by
+ // the parser.
// import x from "foo.js";
// import {x} from "foo.js";
@@ -37,9 +44,7 @@ class ModuleDescriptor : public ZoneObject {
// import "foo.js";
// import {} from "foo.js";
// export {} from "foo.js"; (sic!)
- void AddEmptyImport(
- const AstRawString* module_request, const Scanner::Location loc,
- Zone* zone);
+ void AddEmptyImport(const AstRawString* module_request);
// export {x};
// export {x as y};
@@ -67,38 +72,107 @@ class ModuleDescriptor : public ZoneObject {
bool Validate(ModuleScope* module_scope,
PendingCompilationErrorHandler* error_handler, Zone* zone);
- struct ModuleEntry : public ZoneObject {
+ struct Entry : public ZoneObject {
const Scanner::Location location;
const AstRawString* export_name;
const AstRawString* local_name;
const AstRawString* import_name;
- const AstRawString* module_request;
-
- explicit ModuleEntry(Scanner::Location loc)
+ // The module_request value records the order in which modules are
+ // requested. It also functions as an index into the ModuleInfo's array of
+ // module specifiers and into the Module's array of requested modules. A
+ // negative value means no module request.
+ int module_request;
+
+ // TODO(neis): Remove local_name component?
+ explicit Entry(Scanner::Location loc)
: location(loc),
export_name(nullptr),
local_name(nullptr),
import_name(nullptr),
- module_request(nullptr) {}
+ module_request(-1) {}
+
+ // (De-)serialization support.
+ // Note that the location value is not preserved as it's only needed by the
+ // parser. (A Deserialize'd entry has an invalid location.)
+ Handle<ModuleInfoEntry> Serialize(Isolate* isolate) const;
+ static Entry* Deserialize(Isolate* isolate, AstValueFactory* avfactory,
+ Handle<ModuleInfoEntry> entry);
};
- const ZoneList<ModuleEntry*>& exports() const { return exports_; }
+ // Module requests.
+ const ZoneMap<const AstRawString*, int>& module_requests() const {
+ return module_requests_;
+ }
- // Empty imports and namespace imports.
- const ZoneList<const ModuleEntry*>& special_imports() const {
- return special_imports_;
+ // Namespace imports.
+ const ZoneList<const Entry*>& namespace_imports() const {
+ return namespace_imports_;
}
// All the remaining imports, indexed by local name.
- const ZoneMap<const AstRawString*, const ModuleEntry*>& regular_imports()
- const {
+ const ZoneMap<const AstRawString*, const Entry*>& regular_imports() const {
return regular_imports_;
}
+ // Star exports and explicitly indirect exports.
+ const ZoneList<const Entry*>& special_exports() const {
+ return special_exports_;
+ }
+
+ // All the remaining exports, indexed by local name.
+ // After canonicalization (see Validate), these are exactly the local exports.
+ const ZoneMultimap<const AstRawString*, Entry*>& regular_exports() const {
+ return regular_exports_;
+ }
+
+ void AddRegularExport(Entry* entry) {
+ DCHECK_NOT_NULL(entry->export_name);
+ DCHECK_NOT_NULL(entry->local_name);
+ DCHECK_NULL(entry->import_name);
+ DCHECK_LT(entry->module_request, 0);
+ regular_exports_.insert(std::make_pair(entry->local_name, entry));
+ }
+
+ void AddSpecialExport(const Entry* entry, Zone* zone) {
+ DCHECK_NULL(entry->local_name);
+ DCHECK_LE(0, entry->module_request);
+ special_exports_.Add(entry, zone);
+ }
+
+ void AddRegularImport(const Entry* entry) {
+ DCHECK_NOT_NULL(entry->import_name);
+ DCHECK_NOT_NULL(entry->local_name);
+ DCHECK_NULL(entry->export_name);
+ DCHECK_LE(0, entry->module_request);
+ regular_imports_.insert(std::make_pair(entry->local_name, entry));
+ // We don't care if there's already an entry for this local name, as in that
+ // case we will report an error when declaring the variable.
+ }
+
+ void AddNamespaceImport(const Entry* entry, Zone* zone) {
+ DCHECK_NULL(entry->import_name);
+ DCHECK_NULL(entry->export_name);
+ DCHECK_NOT_NULL(entry->local_name);
+ DCHECK_LE(0, entry->module_request);
+ namespace_imports_.Add(entry, zone);
+ }
+
+ Handle<FixedArray> SerializeRegularExports(Isolate* isolate,
+ Zone* zone) const;
+ void DeserializeRegularExports(Isolate* isolate, AstValueFactory* avfactory,
+ Handle<FixedArray> data);
+
private:
- ZoneList<ModuleEntry*> exports_;
- ZoneList<const ModuleEntry*> special_imports_;
- ZoneMap<const AstRawString*, const ModuleEntry*> regular_imports_;
+ // TODO(neis): Use STL datastructure instead of ZoneList?
+ ZoneMap<const AstRawString*, int> module_requests_;
+ ZoneList<const Entry*> special_exports_;
+ ZoneList<const Entry*> namespace_imports_;
+ ZoneMultimap<const AstRawString*, Entry*> regular_exports_;
+ ZoneMap<const AstRawString*, const Entry*> regular_imports_;
+
+ // If there are multiple export entries with the same export name, return the
+ // last of them (in source order). Otherwise return nullptr.
+ const Entry* FindDuplicateExport(Zone* zone) const;
// Find any implicitly indirect exports and make them explicit.
//
@@ -116,7 +190,15 @@ class ModuleDescriptor : public ZoneObject {
// into:
// import {a as b} from "X"; export {a as c} from "X";
// (The import entry is never deleted.)
- void MakeIndirectExportsExplicit();
+ void MakeIndirectExportsExplicit(Zone* zone);
+
+ int AddModuleRequest(const AstRawString* specifier) {
+ DCHECK_NOT_NULL(specifier);
+ auto it = module_requests_
+ .insert(std::make_pair(specifier, module_requests_.size()))
+ .first;
+ return it->second;
+ }
};
} // namespace internal
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index f19ee23de1..874c15991e 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -9,6 +9,7 @@
#include "src/ast/ast-value-factory.h"
#include "src/ast/scopes.h"
#include "src/base/platform/platform.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -603,8 +604,8 @@ void AstPrinter::PrintLiteralWithModeIndented(const char* info,
PrintLiteralIndented(info, value, true);
} else {
EmbeddedVector<char, 256> buf;
- int pos = SNPrintF(buf, "%s (mode = %s", info,
- Variable::Mode2String(var->mode()));
+ int pos =
+ SNPrintF(buf, "%s (mode = %s", info, VariableMode2String(var->mode()));
SNPrintF(buf + pos, ")");
PrintLiteralIndented(buf.start(), value, true);
}
@@ -870,6 +871,9 @@ void AstPrinter::PrintTryStatement(TryStatement* node) {
case HandlerTable::DESUGARING:
prediction = "DESUGARING";
break;
+ case HandlerTable::ASYNC_AWAIT:
+ prediction = "ASYNC_AWAIT";
+ break;
}
Print(" %s\n", prediction);
}
@@ -897,34 +901,27 @@ void AstPrinter::VisitClassLiteral(ClassLiteral* node) {
if (node->extends() != nullptr) {
PrintIndentedVisit("EXTENDS", node->extends());
}
- PrintProperties(node->properties());
+ PrintClassProperties(node->properties());
}
-
-void AstPrinter::PrintProperties(
- ZoneList<ObjectLiteral::Property*>* properties) {
+void AstPrinter::PrintClassProperties(
+ ZoneList<ClassLiteral::Property*>* properties) {
for (int i = 0; i < properties->length(); i++) {
- ObjectLiteral::Property* property = properties->at(i);
+ ClassLiteral::Property* property = properties->at(i);
const char* prop_kind = nullptr;
switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- prop_kind = "CONSTANT";
+ case ClassLiteral::Property::METHOD:
+ prop_kind = "METHOD";
break;
- case ObjectLiteral::Property::COMPUTED:
- prop_kind = "COMPUTED";
- break;
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- prop_kind = "MATERIALIZED_LITERAL";
- break;
- case ObjectLiteral::Property::PROTOTYPE:
- prop_kind = "PROTOTYPE";
- break;
- case ObjectLiteral::Property::GETTER:
+ case ClassLiteral::Property::GETTER:
prop_kind = "GETTER";
break;
- case ObjectLiteral::Property::SETTER:
+ case ClassLiteral::Property::SETTER:
prop_kind = "SETTER";
break;
+ case ClassLiteral::Property::FIELD:
+ prop_kind = "FIELD";
+ break;
}
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "PROPERTY%s - %s", property->is_static() ? " - STATIC" : "",
@@ -986,7 +983,40 @@ void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) {
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "literal_index = %d\n", node->literal_index());
PrintIndented(buf.start());
- PrintProperties(node->properties());
+ PrintObjectProperties(node->properties());
+}
+
+void AstPrinter::PrintObjectProperties(
+ ZoneList<ObjectLiteral::Property*>* properties) {
+ for (int i = 0; i < properties->length(); i++) {
+ ObjectLiteral::Property* property = properties->at(i);
+ const char* prop_kind = nullptr;
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ prop_kind = "CONSTANT";
+ break;
+ case ObjectLiteral::Property::COMPUTED:
+ prop_kind = "COMPUTED";
+ break;
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ prop_kind = "MATERIALIZED_LITERAL";
+ break;
+ case ObjectLiteral::Property::PROTOTYPE:
+ prop_kind = "PROTOTYPE";
+ break;
+ case ObjectLiteral::Property::GETTER:
+ prop_kind = "GETTER";
+ break;
+ case ObjectLiteral::Property::SETTER:
+ prop_kind = "SETTER";
+ break;
+ }
+ EmbeddedVector<char, 128> buf;
+ SNPrintF(buf, "PROPERTY - %s", prop_kind);
+ IndentedScope prop(this, buf.start());
+ PrintIndentedVisit("KEY", properties->at(i)->key());
+ PrintIndentedVisit("VALUE", properties->at(i)->value());
+ }
}
@@ -1028,9 +1058,6 @@ void AstPrinter::VisitVariableProxy(VariableProxy* node) {
case VariableLocation::CONTEXT:
SNPrintF(buf + pos, " context[%d]", var->index());
break;
- case VariableLocation::GLOBAL:
- SNPrintF(buf + pos, " global[%d]", var->index());
- break;
case VariableLocation::LOOKUP:
SNPrintF(buf + pos, " lookup");
break;
diff --git a/deps/v8/src/ast/prettyprinter.h b/deps/v8/src/ast/prettyprinter.h
index 9b0e22abc2..2d553babde 100644
--- a/deps/v8/src/ast/prettyprinter.h
+++ b/deps/v8/src/ast/prettyprinter.h
@@ -93,7 +93,8 @@ class AstPrinter final : public AstVisitor<AstPrinter> {
Variable* var,
Handle<Object> value);
void PrintLabelsIndented(ZoneList<const AstRawString*>* labels);
- void PrintProperties(ZoneList<ObjectLiteral::Property*>* properties);
+ void PrintObjectProperties(ZoneList<ObjectLiteral::Property*>* properties);
+ void PrintClassProperties(ZoneList<ClassLiteral::Property*>* properties);
void PrintTryStatement(TryStatement* try_statement);
void inc_indent() { indent_++; }
diff --git a/deps/v8/src/ast/scopeinfo.cc b/deps/v8/src/ast/scopeinfo.cc
index 7189de3372..5354b8d737 100644
--- a/deps/v8/src/ast/scopeinfo.cc
+++ b/deps/v8/src/ast/scopeinfo.cc
@@ -2,33 +2,92 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ast/scopeinfo.h"
-
#include <stdlib.h>
#include "src/ast/context-slot-cache.h"
#include "src/ast/scopes.h"
+#include "src/ast/variables.h"
#include "src/bootstrapper.h"
namespace v8 {
namespace internal {
+// An entry in ModuleVariableEntries consists of several slots:
+enum ModuleVariableEntryOffset {
+ kModuleVariableNameOffset,
+ kModuleVariableIndexOffset,
+ kModuleVariablePropertiesOffset,
+ kModuleVariableEntryLength // Sentinel value.
+};
-Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
- Scope* scope) {
- // Collect stack and context locals.
- ZoneList<Variable*> stack_locals(scope->StackLocalCount(), zone);
- ZoneList<Variable*> context_locals(scope->ContextLocalCount(), zone);
- ZoneList<Variable*> context_globals(scope->ContextGlobalCount(), zone);
+#ifdef DEBUG
+bool ScopeInfo::Equals(ScopeInfo* other) const {
+ if (length() != other->length()) return false;
+ for (int index = 0; index < length(); ++index) {
+ Object* entry = get(index);
+ Object* other_entry = other->get(index);
+ if (entry->IsSmi()) {
+ if (entry != other_entry) return false;
+ } else {
+ if (HeapObject::cast(entry)->map()->instance_type() !=
+ HeapObject::cast(other_entry)->map()->instance_type()) {
+ return false;
+ }
+ if (entry->IsString()) {
+ if (!String::cast(entry)->Equals(String::cast(other_entry))) {
+ return false;
+ }
+ } else if (entry->IsScopeInfo()) {
+ if (!ScopeInfo::cast(entry)->Equals(ScopeInfo::cast(other_entry))) {
+ return false;
+ }
+ } else if (entry->IsModuleInfo()) {
+ if (!ModuleInfo::cast(entry)->Equals(ModuleInfo::cast(other_entry))) {
+ return false;
+ }
+ } else {
+ UNREACHABLE();
+ return false;
+ }
+ }
+ }
+ return true;
+}
+#endif
+
+Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
+ MaybeHandle<ScopeInfo> outer_scope) {
+ // Collect variables.
+ ZoneList<Variable*>* locals = scope->locals();
+ int stack_local_count = 0;
+ int context_local_count = 0;
+ int module_vars_count = 0;
+ // Stack allocated block scope variables are allocated in the parent
+ // declaration scope, but are recorded in the block scope's scope info. First
+ // slot index indicates at which offset a particular scope starts in the
+ // parent declaration scope.
+ int first_slot_index = 0;
+ for (int i = 0; i < locals->length(); i++) {
+ Variable* var = locals->at(i);
+ switch (var->location()) {
+ case VariableLocation::LOCAL:
+ if (stack_local_count == 0) first_slot_index = var->index();
+ stack_local_count++;
+ break;
+ case VariableLocation::CONTEXT:
+ context_local_count++;
+ break;
+ case VariableLocation::MODULE:
+ module_vars_count++;
+ break;
+ default:
+ break;
+ }
+ }
+ DCHECK(module_vars_count == 0 || scope->is_module_scope());
- scope->CollectStackAndContextLocals(&stack_locals, &context_locals,
- &context_globals);
- const int stack_local_count = stack_locals.length();
- const int context_local_count = context_locals.length();
- const int context_global_count = context_globals.length();
// Make sure we allocate the correct amount.
DCHECK_EQ(scope->ContextLocalCount(), context_local_count);
- DCHECK_EQ(scope->ContextGlobalCount(), context_global_count);
// Determine use and location of the "this" binding if it is present.
VariableAllocationInfo receiver_info;
@@ -53,7 +112,6 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
// Determine use and location of the function variable if it is present.
VariableAllocationInfo function_name_info;
- VariableMode function_variable_mode;
if (scope->is_function_scope() &&
scope->AsDeclarationScope()->function_var() != nullptr) {
Variable* var = scope->AsDeclarationScope()->function_var();
@@ -65,20 +123,21 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
DCHECK(var->IsStackLocal());
function_name_info = STACK;
}
- function_variable_mode = var->mode();
} else {
function_name_info = NONE;
- function_variable_mode = VAR;
}
- DCHECK(context_global_count == 0 || scope->scope_type() == SCRIPT_SCOPE);
const bool has_function_name = function_name_info != NONE;
const bool has_receiver = receiver_info == STACK || receiver_info == CONTEXT;
const int parameter_count = scope->num_parameters();
+ const bool has_outer_scope_info = !outer_scope.is_null();
const int length = kVariablePartIndex + parameter_count +
(1 + stack_local_count) + 2 * context_local_count +
- 2 * context_global_count +
- (has_receiver ? 1 : 0) + (has_function_name ? 2 : 0);
+ (has_receiver ? 1 : 0) + (has_function_name ? 2 : 0) +
+ (has_outer_scope_info ? 1 : 0) +
+ (scope->is_module_scope()
+ ? 2 + kModuleVariableEntryLength * module_vars_count
+ : 0);
Factory* factory = isolate->factory();
Handle<ScopeInfo> scope_info = factory->NewScopeInfo(length);
@@ -96,27 +155,29 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
}
// Encode the flags.
- int flags = ScopeTypeField::encode(scope->scope_type()) |
- CallsEvalField::encode(scope->calls_eval()) |
- LanguageModeField::encode(scope->language_mode()) |
- DeclarationScopeField::encode(scope->is_declaration_scope()) |
- ReceiverVariableField::encode(receiver_info) |
- HasNewTargetField::encode(has_new_target) |
- FunctionVariableField::encode(function_name_info) |
- FunctionVariableMode::encode(function_variable_mode) |
- AsmModuleField::encode(asm_module) |
- AsmFunctionField::encode(asm_function) |
- HasSimpleParametersField::encode(has_simple_parameters) |
- FunctionKindField::encode(function_kind);
+ int flags =
+ ScopeTypeField::encode(scope->scope_type()) |
+ CallsEvalField::encode(scope->calls_eval()) |
+ LanguageModeField::encode(scope->language_mode()) |
+ DeclarationScopeField::encode(scope->is_declaration_scope()) |
+ ReceiverVariableField::encode(receiver_info) |
+ HasNewTargetField::encode(has_new_target) |
+ FunctionVariableField::encode(function_name_info) |
+ AsmModuleField::encode(asm_module) |
+ AsmFunctionField::encode(asm_function) |
+ HasSimpleParametersField::encode(has_simple_parameters) |
+ FunctionKindField::encode(function_kind) |
+ HasOuterScopeInfoField::encode(has_outer_scope_info) |
+ IsDebugEvaluateScopeField::encode(scope->is_debug_evaluate_scope());
scope_info->SetFlags(flags);
+
scope_info->SetParameterCount(parameter_count);
scope_info->SetStackLocalCount(stack_local_count);
scope_info->SetContextLocalCount(context_local_count);
- scope_info->SetContextGlobalCount(context_global_count);
int index = kVariablePartIndex;
// Add parameters.
- DCHECK(index == scope_info->ParameterEntriesIndex());
+ DCHECK_EQ(index, scope_info->ParameterNamesIndex());
if (scope->is_declaration_scope()) {
for (int i = 0; i < parameter_count; ++i) {
scope_info->set(index++,
@@ -124,68 +185,66 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
}
}
- // Add stack locals' names. We are assuming that the stack locals'
- // slots are allocated in increasing order, so we can simply add
- // them to the ScopeInfo object.
- int first_slot_index;
- if (stack_local_count > 0) {
- first_slot_index = stack_locals[0]->index();
- } else {
- first_slot_index = 0;
- }
- DCHECK(index == scope_info->StackLocalFirstSlotIndex());
+ // Add stack locals' names, context locals' names and info, module variables'
+ // names and info. We are assuming that the stack locals' slots are allocated
+ // in increasing order, so we can simply add them to the ScopeInfo object.
+ // Context locals are added using their index.
+ DCHECK_EQ(index, scope_info->StackLocalFirstSlotIndex());
scope_info->set(index++, Smi::FromInt(first_slot_index));
- DCHECK(index == scope_info->StackLocalEntriesIndex());
- for (int i = 0; i < stack_local_count; ++i) {
- DCHECK(stack_locals[i]->index() == first_slot_index + i);
- scope_info->set(index++, *stack_locals[i]->name());
- }
-
- // Due to usage analysis, context-allocated locals are not necessarily in
- // increasing order: Some of them may be parameters which are allocated before
- // the non-parameter locals. When the non-parameter locals are sorted
- // according to usage, the allocated slot indices may not be in increasing
- // order with the variable list anymore. Thus, we first need to sort them by
- // context slot index before adding them to the ScopeInfo object.
- context_locals.Sort(&Variable::CompareIndex);
-
- // Add context locals' names.
- DCHECK(index == scope_info->ContextLocalNameEntriesIndex());
- for (int i = 0; i < context_local_count; ++i) {
- scope_info->set(index++, *context_locals[i]->name());
- }
-
- // Add context globals' names.
- DCHECK(index == scope_info->ContextGlobalNameEntriesIndex());
- for (int i = 0; i < context_global_count; ++i) {
- scope_info->set(index++, *context_globals[i]->name());
- }
-
- // Add context locals' info.
- DCHECK(index == scope_info->ContextLocalInfoEntriesIndex());
- for (int i = 0; i < context_local_count; ++i) {
- Variable* var = context_locals[i];
- uint32_t value =
- ContextLocalMode::encode(var->mode()) |
- ContextLocalInitFlag::encode(var->initialization_flag()) |
- ContextLocalMaybeAssignedFlag::encode(var->maybe_assigned());
- scope_info->set(index++, Smi::FromInt(value));
- }
-
- // Add context globals' info.
- DCHECK(index == scope_info->ContextGlobalInfoEntriesIndex());
- for (int i = 0; i < context_global_count; ++i) {
- Variable* var = context_globals[i];
- // TODO(ishell): do we need this kind of info for globals here?
- uint32_t value =
- ContextLocalMode::encode(var->mode()) |
- ContextLocalInitFlag::encode(var->initialization_flag()) |
- ContextLocalMaybeAssignedFlag::encode(var->maybe_assigned());
- scope_info->set(index++, Smi::FromInt(value));
+ DCHECK_EQ(index, scope_info->StackLocalNamesIndex());
+
+ int stack_local_base = index;
+ int context_local_base = stack_local_base + stack_local_count;
+ int context_local_info_base = context_local_base + context_local_count;
+ int module_var_entry = scope_info->ModuleVariablesIndex();
+
+ for (int i = 0; i < locals->length(); ++i) {
+ Variable* var = locals->at(i);
+ switch (var->location()) {
+ case VariableLocation::LOCAL: {
+ int local_index = var->index() - first_slot_index;
+ DCHECK_LE(0, local_index);
+ DCHECK_LT(local_index, stack_local_count);
+ scope_info->set(stack_local_base + local_index, *var->name());
+ break;
+ }
+ case VariableLocation::CONTEXT: {
+ // Due to duplicate parameters, context locals aren't guaranteed to come
+ // in order.
+ int local_index = var->index() - Context::MIN_CONTEXT_SLOTS;
+ DCHECK_LE(0, local_index);
+ DCHECK_LT(local_index, context_local_count);
+ uint32_t info = VariableModeField::encode(var->mode()) |
+ InitFlagField::encode(var->initialization_flag()) |
+ MaybeAssignedFlagField::encode(var->maybe_assigned());
+ scope_info->set(context_local_base + local_index, *var->name());
+ scope_info->set(context_local_info_base + local_index,
+ Smi::FromInt(info));
+ break;
+ }
+ case VariableLocation::MODULE: {
+ scope_info->set(module_var_entry + kModuleVariableNameOffset,
+ *var->name());
+ scope_info->set(module_var_entry + kModuleVariableIndexOffset,
+ Smi::FromInt(var->index()));
+ uint32_t properties =
+ VariableModeField::encode(var->mode()) |
+ InitFlagField::encode(var->initialization_flag()) |
+ MaybeAssignedFlagField::encode(var->maybe_assigned());
+ scope_info->set(module_var_entry + kModuleVariablePropertiesOffset,
+ Smi::FromInt(properties));
+ module_var_entry += kModuleVariableEntryLength;
+ break;
+ }
+ default:
+ break;
+ }
}
+ index += stack_local_count + 2 * context_local_count;
+
// If the receiver is allocated, add its index.
- DCHECK(index == scope_info->ReceiverEntryIndex());
+ DCHECK_EQ(index, scope_info->ReceiverInfoIndex());
if (has_receiver) {
int var_index = scope->AsDeclarationScope()->receiver()->index();
scope_info->set(index++, Smi::FromInt(var_index));
@@ -194,7 +253,7 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
}
// If present, add the function variable name and its index.
- DCHECK(index == scope_info->FunctionNameEntryIndex());
+ DCHECK_EQ(index, scope_info->FunctionNameInfoIndex());
if (has_function_name) {
int var_index = scope->AsDeclarationScope()->function_var()->index();
scope_info->set(index++,
@@ -204,75 +263,130 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
var_index == scope_info->ContextLength() - 1);
}
- DCHECK(index == scope_info->length());
- DCHECK(scope->num_parameters() == scope_info->ParameterCount());
- DCHECK(scope->num_heap_slots() == scope_info->ContextLength() ||
- (scope->num_heap_slots() == kVariablePartIndex &&
- scope_info->ContextLength() == 0));
+ // If present, add the outer scope info.
+ DCHECK(index == scope_info->OuterScopeInfoIndex());
+ if (has_outer_scope_info) {
+ scope_info->set(index++, *outer_scope.ToHandleChecked());
+ }
+
+ // Module-specific information (only for module scopes).
+ if (scope->is_module_scope()) {
+ Handle<ModuleInfo> module_info =
+ ModuleInfo::New(isolate, zone, scope->AsModuleScope()->module());
+ DCHECK_EQ(index, scope_info->ModuleInfoIndex());
+ scope_info->set(index++, *module_info);
+ DCHECK_EQ(index, scope_info->ModuleVariableCountIndex());
+ scope_info->set(index++, Smi::FromInt(module_vars_count));
+ DCHECK_EQ(index, scope_info->ModuleVariablesIndex());
+ // The variable entries themselves have already been written above.
+ index += kModuleVariableEntryLength * module_vars_count;
+ }
+
+ DCHECK_EQ(index, scope_info->length());
+ DCHECK_EQ(scope->num_parameters(), scope_info->ParameterCount());
+ DCHECK_EQ(scope->num_heap_slots(), scope_info->ContextLength());
return scope_info;
}
+Handle<ScopeInfo> ScopeInfo::CreateForWithScope(
+ Isolate* isolate, MaybeHandle<ScopeInfo> outer_scope) {
+ const bool has_outer_scope_info = !outer_scope.is_null();
+ const int length = kVariablePartIndex + 1 + (has_outer_scope_info ? 1 : 0);
+
+ Factory* factory = isolate->factory();
+ Handle<ScopeInfo> scope_info = factory->NewScopeInfo(length);
+
+ // Encode the flags.
+ int flags =
+ ScopeTypeField::encode(WITH_SCOPE) | CallsEvalField::encode(false) |
+ LanguageModeField::encode(SLOPPY) | DeclarationScopeField::encode(false) |
+ ReceiverVariableField::encode(NONE) | HasNewTargetField::encode(false) |
+ FunctionVariableField::encode(NONE) | AsmModuleField::encode(false) |
+ AsmFunctionField::encode(false) | HasSimpleParametersField::encode(true) |
+ FunctionKindField::encode(kNormalFunction) |
+ HasOuterScopeInfoField::encode(has_outer_scope_info) |
+ IsDebugEvaluateScopeField::encode(false);
+ scope_info->SetFlags(flags);
+
+ scope_info->SetParameterCount(0);
+ scope_info->SetStackLocalCount(0);
+ scope_info->SetContextLocalCount(0);
+
+ int index = kVariablePartIndex;
+ DCHECK_EQ(index, scope_info->ParameterNamesIndex());
+ DCHECK_EQ(index, scope_info->StackLocalFirstSlotIndex());
+ scope_info->set(index++, Smi::FromInt(0));
+ DCHECK_EQ(index, scope_info->StackLocalNamesIndex());
+ DCHECK_EQ(index, scope_info->ReceiverInfoIndex());
+ DCHECK_EQ(index, scope_info->FunctionNameInfoIndex());
+ DCHECK(index == scope_info->OuterScopeInfoIndex());
+ if (has_outer_scope_info) {
+ scope_info->set(index++, *outer_scope.ToHandleChecked());
+ }
+ DCHECK_EQ(index, scope_info->length());
+ DCHECK_EQ(0, scope_info->ParameterCount());
+ DCHECK_EQ(Context::MIN_CONTEXT_SLOTS, scope_info->ContextLength());
+ return scope_info;
+}
Handle<ScopeInfo> ScopeInfo::CreateGlobalThisBinding(Isolate* isolate) {
DCHECK(isolate->bootstrapper()->IsActive());
const int stack_local_count = 0;
const int context_local_count = 1;
- const int context_global_count = 0;
const bool has_simple_parameters = true;
const VariableAllocationInfo receiver_info = CONTEXT;
const VariableAllocationInfo function_name_info = NONE;
- const VariableMode function_variable_mode = VAR;
const bool has_function_name = false;
const bool has_receiver = true;
+ const bool has_outer_scope_info = false;
const int parameter_count = 0;
const int length = kVariablePartIndex + parameter_count +
(1 + stack_local_count) + 2 * context_local_count +
- 2 * context_global_count +
- (has_receiver ? 1 : 0) + (has_function_name ? 2 : 0);
+ (has_receiver ? 1 : 0) + (has_function_name ? 2 : 0) +
+ (has_outer_scope_info ? 1 : 0);
Factory* factory = isolate->factory();
Handle<ScopeInfo> scope_info = factory->NewScopeInfo(length);
// Encode the flags.
- int flags = ScopeTypeField::encode(SCRIPT_SCOPE) |
- CallsEvalField::encode(false) |
- LanguageModeField::encode(SLOPPY) |
- DeclarationScopeField::encode(true) |
- ReceiverVariableField::encode(receiver_info) |
- FunctionVariableField::encode(function_name_info) |
- FunctionVariableMode::encode(function_variable_mode) |
- AsmModuleField::encode(false) | AsmFunctionField::encode(false) |
- HasSimpleParametersField::encode(has_simple_parameters) |
- FunctionKindField::encode(FunctionKind::kNormalFunction);
+ int flags =
+ ScopeTypeField::encode(SCRIPT_SCOPE) | CallsEvalField::encode(false) |
+ LanguageModeField::encode(SLOPPY) | DeclarationScopeField::encode(true) |
+ ReceiverVariableField::encode(receiver_info) |
+ FunctionVariableField::encode(function_name_info) |
+ AsmModuleField::encode(false) | AsmFunctionField::encode(false) |
+ HasSimpleParametersField::encode(has_simple_parameters) |
+ FunctionKindField::encode(FunctionKind::kNormalFunction) |
+ HasOuterScopeInfoField::encode(has_outer_scope_info) |
+ IsDebugEvaluateScopeField::encode(false);
scope_info->SetFlags(flags);
scope_info->SetParameterCount(parameter_count);
scope_info->SetStackLocalCount(stack_local_count);
scope_info->SetContextLocalCount(context_local_count);
- scope_info->SetContextGlobalCount(context_global_count);
int index = kVariablePartIndex;
const int first_slot_index = 0;
- DCHECK(index == scope_info->StackLocalFirstSlotIndex());
+ DCHECK_EQ(index, scope_info->StackLocalFirstSlotIndex());
scope_info->set(index++, Smi::FromInt(first_slot_index));
- DCHECK(index == scope_info->StackLocalEntriesIndex());
+ DCHECK_EQ(index, scope_info->StackLocalNamesIndex());
// Here we add info for context-allocated "this".
- DCHECK(index == scope_info->ContextLocalNameEntriesIndex());
+ DCHECK_EQ(index, scope_info->ContextLocalNamesIndex());
scope_info->set(index++, *isolate->factory()->this_string());
- DCHECK(index == scope_info->ContextLocalInfoEntriesIndex());
- const uint32_t value = ContextLocalMode::encode(CONST) |
- ContextLocalInitFlag::encode(kCreatedInitialized) |
- ContextLocalMaybeAssignedFlag::encode(kNotAssigned);
+ DCHECK_EQ(index, scope_info->ContextLocalInfosIndex());
+ const uint32_t value = VariableModeField::encode(CONST) |
+ InitFlagField::encode(kCreatedInitialized) |
+ MaybeAssignedFlagField::encode(kNotAssigned);
scope_info->set(index++, Smi::FromInt(value));
// And here we record that this scopeinfo binds a receiver.
- DCHECK(index == scope_info->ReceiverEntryIndex());
+ DCHECK_EQ(index, scope_info->ReceiverInfoIndex());
const int receiver_index = Context::MIN_CONTEXT_SLOTS + 0;
scope_info->set(index++, Smi::FromInt(receiver_index));
- DCHECK(index == scope_info->FunctionNameEntryIndex());
-
+ DCHECK_EQ(index, scope_info->FunctionNameInfoIndex());
+ DCHECK_EQ(index, scope_info->OuterScopeInfoIndex());
DCHECK_EQ(index, scope_info->length());
DCHECK_EQ(scope_info->ParameterCount(), 0);
DCHECK_EQ(scope_info->ContextLength(), Context::MIN_CONTEXT_SLOTS + 1);
@@ -282,12 +396,12 @@ Handle<ScopeInfo> ScopeInfo::CreateGlobalThisBinding(Isolate* isolate) {
ScopeInfo* ScopeInfo::Empty(Isolate* isolate) {
- return reinterpret_cast<ScopeInfo*>(isolate->heap()->empty_fixed_array());
+ return isolate->heap()->empty_scope_info();
}
ScopeType ScopeInfo::scope_type() {
- DCHECK(length() > 0);
+ DCHECK_LT(0, length());
return ScopeTypeField::decode(Flags());
}
@@ -325,19 +439,17 @@ int ScopeInfo::StackSlotCount() {
int ScopeInfo::ContextLength() {
if (length() > 0) {
int context_locals = ContextLocalCount();
- int context_globals = ContextGlobalCount();
bool function_name_context_slot =
FunctionVariableField::decode(Flags()) == CONTEXT;
- bool has_context = context_locals > 0 || context_globals > 0 ||
- function_name_context_slot ||
+ bool has_context = context_locals > 0 || function_name_context_slot ||
scope_type() == WITH_SCOPE ||
(scope_type() == BLOCK_SCOPE && CallsSloppyEval() &&
- is_declaration_scope()) ||
+ is_declaration_scope()) ||
(scope_type() == FUNCTION_SCOPE && CallsSloppyEval()) ||
scope_type() == MODULE_SCOPE;
if (has_context) {
- return Context::MIN_CONTEXT_SLOTS + context_locals + context_globals +
+ return Context::MIN_CONTEXT_SLOTS + context_locals +
(function_name_context_slot ? 1 : 0);
}
}
@@ -375,6 +487,30 @@ bool ScopeInfo::HasFunctionName() {
}
}
+bool ScopeInfo::HasOuterScopeInfo() {
+ if (length() > 0) {
+ return HasOuterScopeInfoField::decode(Flags());
+ } else {
+ return false;
+ }
+}
+
+bool ScopeInfo::IsDebugEvaluateScope() {
+ if (length() > 0) {
+ return IsDebugEvaluateScopeField::decode(Flags());
+ } else {
+ return false;
+ }
+}
+
+void ScopeInfo::SetIsDebugEvaluateScope() {
+ if (length() > 0) {
+ DCHECK_EQ(scope_type(), WITH_SCOPE);
+ SetFlags(Flags() | IsDebugEvaluateScopeField::encode(true));
+ } else {
+ UNREACHABLE();
+ }
+}
bool ScopeInfo::HasHeapAllocatedLocals() {
if (length() > 0) {
@@ -392,68 +528,85 @@ bool ScopeInfo::HasContext() {
String* ScopeInfo::FunctionName() {
DCHECK(HasFunctionName());
- return String::cast(get(FunctionNameEntryIndex()));
+ return String::cast(get(FunctionNameInfoIndex()));
}
+ScopeInfo* ScopeInfo::OuterScopeInfo() {
+ DCHECK(HasOuterScopeInfo());
+ return ScopeInfo::cast(get(OuterScopeInfoIndex()));
+}
+
+ModuleInfo* ScopeInfo::ModuleDescriptorInfo() {
+ DCHECK(scope_type() == MODULE_SCOPE);
+ return ModuleInfo::cast(get(ModuleInfoIndex()));
+}
String* ScopeInfo::ParameterName(int var) {
- DCHECK(0 <= var && var < ParameterCount());
- int info_index = ParameterEntriesIndex() + var;
+ DCHECK_LE(0, var);
+ DCHECK_LT(var, ParameterCount());
+ int info_index = ParameterNamesIndex() + var;
return String::cast(get(info_index));
}
String* ScopeInfo::LocalName(int var) {
- DCHECK(0 <= var && var < LocalCount());
- DCHECK(StackLocalEntriesIndex() + StackLocalCount() ==
- ContextLocalNameEntriesIndex());
- int info_index = StackLocalEntriesIndex() + var;
+ DCHECK_LE(0, var);
+ DCHECK_LT(var, LocalCount());
+ DCHECK(StackLocalNamesIndex() + StackLocalCount() ==
+ ContextLocalNamesIndex());
+ int info_index = StackLocalNamesIndex() + var;
return String::cast(get(info_index));
}
String* ScopeInfo::StackLocalName(int var) {
- DCHECK(0 <= var && var < StackLocalCount());
- int info_index = StackLocalEntriesIndex() + var;
+ DCHECK_LE(0, var);
+ DCHECK_LT(var, StackLocalCount());
+ int info_index = StackLocalNamesIndex() + var;
return String::cast(get(info_index));
}
int ScopeInfo::StackLocalIndex(int var) {
- DCHECK(0 <= var && var < StackLocalCount());
+ DCHECK_LE(0, var);
+ DCHECK_LT(var, StackLocalCount());
int first_slot_index = Smi::cast(get(StackLocalFirstSlotIndex()))->value();
return first_slot_index + var;
}
String* ScopeInfo::ContextLocalName(int var) {
- DCHECK(0 <= var && var < ContextLocalCount() + ContextGlobalCount());
- int info_index = ContextLocalNameEntriesIndex() + var;
+ DCHECK_LE(0, var);
+ DCHECK_LT(var, ContextLocalCount());
+ int info_index = ContextLocalNamesIndex() + var;
return String::cast(get(info_index));
}
VariableMode ScopeInfo::ContextLocalMode(int var) {
- DCHECK(0 <= var && var < ContextLocalCount() + ContextGlobalCount());
- int info_index = ContextLocalInfoEntriesIndex() + var;
+ DCHECK_LE(0, var);
+ DCHECK_LT(var, ContextLocalCount());
+ int info_index = ContextLocalInfosIndex() + var;
int value = Smi::cast(get(info_index))->value();
- return ContextLocalMode::decode(value);
+ return VariableModeField::decode(value);
}
InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) {
- DCHECK(0 <= var && var < ContextLocalCount() + ContextGlobalCount());
- int info_index = ContextLocalInfoEntriesIndex() + var;
+ DCHECK_LE(0, var);
+ DCHECK_LT(var, ContextLocalCount());
+ int info_index = ContextLocalInfosIndex() + var;
int value = Smi::cast(get(info_index))->value();
- return ContextLocalInitFlag::decode(value);
+ return InitFlagField::decode(value);
}
MaybeAssignedFlag ScopeInfo::ContextLocalMaybeAssignedFlag(int var) {
- DCHECK(0 <= var && var < ContextLocalCount() + ContextGlobalCount());
- int info_index = ContextLocalInfoEntriesIndex() + var;
+ DCHECK_LE(0, var);
+ DCHECK_LT(var, ContextLocalCount());
+ int info_index = ContextLocalInfosIndex() + var;
int value = Smi::cast(get(info_index))->value();
- return ContextLocalMaybeAssignedFlag::decode(value);
+ return MaybeAssignedFlagField::decode(value);
}
bool ScopeInfo::VariableIsSynthetic(String* name) {
@@ -470,8 +623,8 @@ int ScopeInfo::StackSlotIndex(String* name) {
DCHECK(name->IsInternalizedString());
if (length() > 0) {
int first_slot_index = Smi::cast(get(StackLocalFirstSlotIndex()))->value();
- int start = StackLocalEntriesIndex();
- int end = StackLocalEntriesIndex() + StackLocalCount();
+ int start = StackLocalNamesIndex();
+ int end = start + StackLocalCount();
for (int i = start; i < end; ++i) {
if (name == get(i)) {
return i - start + first_slot_index;
@@ -481,27 +634,54 @@ int ScopeInfo::StackSlotIndex(String* name) {
return -1;
}
+int ScopeInfo::ModuleIndex(Handle<String> name, VariableMode* mode,
+ InitializationFlag* init_flag,
+ MaybeAssignedFlag* maybe_assigned_flag) {
+ DCHECK_EQ(scope_type(), MODULE_SCOPE);
+ DCHECK(name->IsInternalizedString());
+ DCHECK_NOT_NULL(mode);
+ DCHECK_NOT_NULL(init_flag);
+ DCHECK_NOT_NULL(maybe_assigned_flag);
+
+ int module_vars_count = Smi::cast(get(ModuleVariableCountIndex()))->value();
+ int entry = ModuleVariablesIndex();
+ for (int i = 0; i < module_vars_count; ++i) {
+ if (*name == get(entry + kModuleVariableNameOffset)) {
+ int index = Smi::cast(get(entry + kModuleVariableIndexOffset))->value();
+ int properties =
+ Smi::cast(get(entry + kModuleVariablePropertiesOffset))->value();
+ *mode = VariableModeField::decode(properties);
+ *init_flag = InitFlagField::decode(properties);
+ *maybe_assigned_flag = MaybeAssignedFlagField::decode(properties);
+ return index;
+ }
+ entry += kModuleVariableEntryLength;
+ }
+
+ return -1;
+}
int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
Handle<String> name, VariableMode* mode,
InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag) {
DCHECK(name->IsInternalizedString());
- DCHECK(mode != NULL);
- DCHECK(init_flag != NULL);
+ DCHECK_NOT_NULL(mode);
+ DCHECK_NOT_NULL(init_flag);
+ DCHECK_NOT_NULL(maybe_assigned_flag);
+
if (scope_info->length() > 0) {
ContextSlotCache* context_slot_cache =
scope_info->GetIsolate()->context_slot_cache();
int result = context_slot_cache->Lookup(*scope_info, *name, mode, init_flag,
maybe_assigned_flag);
if (result != ContextSlotCache::kNotFound) {
- DCHECK(result < scope_info->ContextLength());
+ DCHECK_LT(result, scope_info->ContextLength());
return result;
}
- int start = scope_info->ContextLocalNameEntriesIndex();
- int end = scope_info->ContextLocalNameEntriesIndex() +
- scope_info->ContextLocalCount();
+ int start = scope_info->ContextLocalNamesIndex();
+ int end = start + scope_info->ContextLocalCount();
for (int i = start; i < end; ++i) {
if (*name == scope_info->get(i)) {
int var = i - start;
@@ -512,7 +692,7 @@ int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
context_slot_cache->Update(scope_info, name, *mode, *init_flag,
*maybe_assigned_flag, result);
- DCHECK(result < scope_info->ContextLength());
+ DCHECK_LT(result, scope_info->ContextLength());
return result;
}
}
@@ -520,46 +700,14 @@ int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
context_slot_cache->Update(scope_info, name, TEMPORARY,
kNeedsInitialization, kNotAssigned, -1);
}
- return -1;
-}
-
-int ScopeInfo::ContextGlobalSlotIndex(Handle<ScopeInfo> scope_info,
- Handle<String> name, VariableMode* mode,
- InitializationFlag* init_flag,
- MaybeAssignedFlag* maybe_assigned_flag) {
- DCHECK(name->IsInternalizedString());
- DCHECK(mode != NULL);
- DCHECK(init_flag != NULL);
- if (scope_info->length() > 0) {
- // This is to ensure that ContextLocalMode() and co. queries would work.
- DCHECK_EQ(scope_info->ContextGlobalNameEntriesIndex(),
- scope_info->ContextLocalNameEntriesIndex() +
- scope_info->ContextLocalCount());
- int base = scope_info->ContextLocalNameEntriesIndex();
- int start = scope_info->ContextGlobalNameEntriesIndex();
- int end = scope_info->ContextGlobalNameEntriesIndex() +
- scope_info->ContextGlobalCount();
- for (int i = start; i < end; ++i) {
- if (*name == scope_info->get(i)) {
- int var = i - base;
- *mode = scope_info->ContextLocalMode(var);
- *init_flag = scope_info->ContextLocalInitFlag(var);
- *maybe_assigned_flag = scope_info->ContextLocalMaybeAssignedFlag(var);
- int result = Context::MIN_CONTEXT_SLOTS + var;
- DCHECK(result < scope_info->ContextLength());
- return result;
- }
- }
- }
return -1;
}
-
String* ScopeInfo::ContextSlotName(int slot_index) {
int const var = slot_index - Context::MIN_CONTEXT_SLOTS;
DCHECK_LE(0, var);
- DCHECK_LT(var, ContextLocalCount() + ContextGlobalCount());
+ DCHECK_LT(var, ContextLocalCount());
return ContextLocalName(var);
}
@@ -572,8 +720,8 @@ int ScopeInfo::ParameterIndex(String* name) {
// last declaration of that parameter is used
// inside a function (and thus we need to look
// at the last index). Was bug# 1110337.
- int start = ParameterEntriesIndex();
- int end = ParameterEntriesIndex() + ParameterCount();
+ int start = ParameterNamesIndex();
+ int end = start + ParameterCount();
for (int i = end - 1; i >= start; --i) {
if (name == get(i)) {
return i - start;
@@ -586,19 +734,16 @@ int ScopeInfo::ParameterIndex(String* name) {
int ScopeInfo::ReceiverContextSlotIndex() {
if (length() > 0 && ReceiverVariableField::decode(Flags()) == CONTEXT)
- return Smi::cast(get(ReceiverEntryIndex()))->value();
+ return Smi::cast(get(ReceiverInfoIndex()))->value();
return -1;
}
-
-int ScopeInfo::FunctionContextSlotIndex(String* name, VariableMode* mode) {
+int ScopeInfo::FunctionContextSlotIndex(String* name) {
DCHECK(name->IsInternalizedString());
- DCHECK(mode != NULL);
if (length() > 0) {
if (FunctionVariableField::decode(Flags()) == CONTEXT &&
FunctionName() == name) {
- *mode = FunctionVariableMode::decode(Flags());
- return Smi::cast(get(FunctionNameEntryIndex() + 1))->value();
+ return Smi::cast(get(FunctionNameInfoIndex() + 1))->value();
}
}
return -1;
@@ -609,51 +754,45 @@ FunctionKind ScopeInfo::function_kind() {
return FunctionKindField::decode(Flags());
}
-
-int ScopeInfo::ParameterEntriesIndex() {
- DCHECK(length() > 0);
+int ScopeInfo::ParameterNamesIndex() {
+ DCHECK_LT(0, length());
return kVariablePartIndex;
}
int ScopeInfo::StackLocalFirstSlotIndex() {
- return ParameterEntriesIndex() + ParameterCount();
+ return ParameterNamesIndex() + ParameterCount();
}
+int ScopeInfo::StackLocalNamesIndex() { return StackLocalFirstSlotIndex() + 1; }
-int ScopeInfo::StackLocalEntriesIndex() {
- return StackLocalFirstSlotIndex() + 1;
+int ScopeInfo::ContextLocalNamesIndex() {
+ return StackLocalNamesIndex() + StackLocalCount();
}
-
-int ScopeInfo::ContextLocalNameEntriesIndex() {
- return StackLocalEntriesIndex() + StackLocalCount();
+int ScopeInfo::ContextLocalInfosIndex() {
+ return ContextLocalNamesIndex() + ContextLocalCount();
}
-
-int ScopeInfo::ContextGlobalNameEntriesIndex() {
- return ContextLocalNameEntriesIndex() + ContextLocalCount();
+int ScopeInfo::ReceiverInfoIndex() {
+ return ContextLocalInfosIndex() + ContextLocalCount();
}
-
-int ScopeInfo::ContextLocalInfoEntriesIndex() {
- return ContextGlobalNameEntriesIndex() + ContextGlobalCount();
+int ScopeInfo::FunctionNameInfoIndex() {
+ return ReceiverInfoIndex() + (HasAllocatedReceiver() ? 1 : 0);
}
-
-int ScopeInfo::ContextGlobalInfoEntriesIndex() {
- return ContextLocalInfoEntriesIndex() + ContextLocalCount();
+int ScopeInfo::OuterScopeInfoIndex() {
+ return FunctionNameInfoIndex() + (HasFunctionName() ? 2 : 0);
}
-
-int ScopeInfo::ReceiverEntryIndex() {
- return ContextGlobalInfoEntriesIndex() + ContextGlobalCount();
+int ScopeInfo::ModuleInfoIndex() {
+ return OuterScopeInfoIndex() + (HasOuterScopeInfo() ? 1 : 0);
}
+int ScopeInfo::ModuleVariableCountIndex() { return ModuleInfoIndex() + 1; }
-int ScopeInfo::FunctionNameEntryIndex() {
- return ReceiverEntryIndex() + (HasAllocatedReceiver() ? 1 : 0);
-}
+int ScopeInfo::ModuleVariablesIndex() { return ModuleVariableCountIndex() + 1; }
#ifdef DEBUG
@@ -686,19 +825,84 @@ void ScopeInfo::Print() {
PrintF("{");
if (length() > 0) {
- PrintList("parameters", 0, ParameterEntriesIndex(),
- ParameterEntriesIndex() + ParameterCount(), this);
- PrintList("stack slots", 0, StackLocalEntriesIndex(),
- StackLocalEntriesIndex() + StackLocalCount(), this);
+ PrintList("parameters", 0, ParameterNamesIndex(),
+ ParameterNamesIndex() + ParameterCount(), this);
+ PrintList("stack slots", 0, StackLocalNamesIndex(),
+ StackLocalNamesIndex() + StackLocalCount(), this);
PrintList("context slots", Context::MIN_CONTEXT_SLOTS,
- ContextLocalNameEntriesIndex(),
- ContextLocalNameEntriesIndex() + ContextLocalCount(), this);
+ ContextLocalNamesIndex(),
+ ContextLocalNamesIndex() + ContextLocalCount(), this);
+ // TODO(neis): Print module stuff if present.
}
PrintF("}\n");
}
#endif // DEBUG
+Handle<ModuleInfoEntry> ModuleInfoEntry::New(Isolate* isolate,
+ Handle<Object> export_name,
+ Handle<Object> local_name,
+ Handle<Object> import_name,
+ Handle<Object> module_request) {
+ Handle<ModuleInfoEntry> result = isolate->factory()->NewModuleInfoEntry();
+ result->set(kExportNameIndex, *export_name);
+ result->set(kLocalNameIndex, *local_name);
+ result->set(kImportNameIndex, *import_name);
+ result->set(kModuleRequestIndex, *module_request);
+ return result;
+}
+
+Handle<ModuleInfo> ModuleInfo::New(Isolate* isolate, Zone* zone,
+ ModuleDescriptor* descr) {
+ // Serialize module requests.
+ Handle<FixedArray> module_requests = isolate->factory()->NewFixedArray(
+ static_cast<int>(descr->module_requests().size()));
+ for (const auto& elem : descr->module_requests()) {
+ module_requests->set(elem.second, *elem.first->string());
+ }
+
+ // Serialize special exports.
+ Handle<FixedArray> special_exports =
+ isolate->factory()->NewFixedArray(descr->special_exports().length());
+ {
+ int i = 0;
+ for (auto entry : descr->special_exports()) {
+ special_exports->set(i++, *entry->Serialize(isolate));
+ }
+ }
+
+ // Serialize namespace imports.
+ Handle<FixedArray> namespace_imports =
+ isolate->factory()->NewFixedArray(descr->namespace_imports().length());
+ {
+ int i = 0;
+ for (auto entry : descr->namespace_imports()) {
+ namespace_imports->set(i++, *entry->Serialize(isolate));
+ }
+ }
+
+ // Serialize regular exports.
+ Handle<FixedArray> regular_exports =
+ descr->SerializeRegularExports(isolate, zone);
+
+ // Serialize regular imports.
+ Handle<FixedArray> regular_imports = isolate->factory()->NewFixedArray(
+ static_cast<int>(descr->regular_imports().size()));
+ {
+ int i = 0;
+ for (const auto& elem : descr->regular_imports()) {
+ regular_imports->set(i++, *elem.second->Serialize(isolate));
+ }
+ }
+
+ Handle<ModuleInfo> result = isolate->factory()->NewModuleInfo();
+ result->set(kModuleRequestsIndex, *module_requests);
+ result->set(kSpecialExportsIndex, *special_exports);
+ result->set(kRegularExportsIndex, *regular_exports);
+ result->set(kNamespaceImportsIndex, *namespace_imports);
+ result->set(kRegularImportsIndex, *regular_imports);
+ return result;
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/scopeinfo.h b/deps/v8/src/ast/scopeinfo.h
deleted file mode 100644
index 515c88b7de..0000000000
--- a/deps/v8/src/ast/scopeinfo.h
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_AST_SCOPEINFO_H_
-#define V8_AST_SCOPEINFO_H_
-
-#include "src/allocation.h"
-#include "src/ast/modules.h"
-#include "src/ast/variables.h"
-
-namespace v8 {
-namespace internal {
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_AST_SCOPEINFO_H_
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index 7689786ce4..c531ef5901 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -7,6 +7,7 @@
#include <set>
#include "src/accessors.h"
+#include "src/ast/ast.h"
#include "src/bootstrapper.h"
#include "src/messages.h"
#include "src/parsing/parse-info.h"
@@ -24,11 +25,11 @@ namespace internal {
// this is ensured.
VariableMap::VariableMap(Zone* zone)
- : ZoneHashMap(ZoneHashMap::PointersMatch, 8, ZoneAllocationPolicy(zone)) {}
+ : ZoneHashMap(8, ZoneAllocationPolicy(zone)) {}
Variable* VariableMap::Declare(Zone* zone, Scope* scope,
const AstRawString* name, VariableMode mode,
- Variable::Kind kind,
+ VariableKind kind,
InitializationFlag initialization_flag,
MaybeAssignedFlag maybe_assigned_flag,
bool* added) {
@@ -41,13 +42,27 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope,
if (added) *added = p->value == nullptr;
if (p->value == nullptr) {
// The variable has not been declared yet -> insert it.
- DCHECK(p->key == name);
+ DCHECK_EQ(name, p->key);
p->value = new (zone) Variable(scope, name, mode, kind, initialization_flag,
maybe_assigned_flag);
}
return reinterpret_cast<Variable*>(p->value);
}
+void VariableMap::Remove(Variable* var) {
+ const AstRawString* name = var->raw_name();
+ ZoneHashMap::Remove(const_cast<AstRawString*>(name), name->hash());
+}
+
+void VariableMap::Add(Zone* zone, Variable* var) {
+ const AstRawString* name = var->raw_name();
+ Entry* p =
+ ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->hash(),
+ ZoneAllocationPolicy(zone));
+ DCHECK_NULL(p->value);
+ DCHECK_EQ(name, p->key);
+ p->value = var;
+}
Variable* VariableMap::Lookup(const AstRawString* name) {
Entry* p = ZoneHashMap::Lookup(const_cast<AstRawString*>(name), name->hash());
@@ -60,7 +75,7 @@ Variable* VariableMap::Lookup(const AstRawString* name) {
}
SloppyBlockFunctionMap::SloppyBlockFunctionMap(Zone* zone)
- : ZoneHashMap(ZoneHashMap::PointersMatch, 8, ZoneAllocationPolicy(zone)) {}
+ : ZoneHashMap(8, ZoneAllocationPolicy(zone)) {}
void SloppyBlockFunctionMap::Declare(Zone* zone, const AstRawString* name,
SloppyBlockFunctionStatement* stmt) {
@@ -81,7 +96,7 @@ Scope::Scope(Zone* zone)
: zone_(zone),
outer_scope_(nullptr),
variables_(zone),
- ordered_variables_(4, zone),
+ locals_(4, zone),
decls_(4, zone),
scope_type_(SCRIPT_SCOPE) {
SetDefaults();
@@ -91,7 +106,7 @@ Scope::Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type)
: zone_(zone),
outer_scope_(outer_scope),
variables_(zone),
- ordered_variables_(4, zone),
+ locals_(4, zone),
decls_(4, zone),
scope_type_(scope_type) {
DCHECK_NE(SCRIPT_SCOPE, scope_type);
@@ -106,15 +121,21 @@ Scope::Snapshot::Snapshot(Scope* scope)
: outer_scope_(scope),
top_inner_scope_(scope->inner_scope_),
top_unresolved_(scope->unresolved_),
- top_temp_(scope->GetClosureScope()->temps()->length()) {}
+ top_local_(scope->GetClosureScope()->locals_.length()),
+ top_decl_(scope->GetClosureScope()->decls_.length()) {}
-DeclarationScope::DeclarationScope(Zone* zone)
+DeclarationScope::DeclarationScope(Zone* zone,
+ AstValueFactory* ast_value_factory)
: Scope(zone),
function_kind_(kNormalFunction),
- temps_(4, zone),
params_(4, zone),
sloppy_block_function_map_(zone) {
+ DCHECK_EQ(scope_type_, SCRIPT_SCOPE);
SetDefaults();
+
+ // Make sure that if we don't find the global 'this', it won't be declared as
+ // a regular dynamic global by predeclaring it with the right variable kind.
+ DeclareDynamicGlobal(ast_value_factory->this_string(), THIS_VARIABLE);
}
DeclarationScope::DeclarationScope(Zone* zone, Scope* outer_scope,
@@ -122,73 +143,117 @@ DeclarationScope::DeclarationScope(Zone* zone, Scope* outer_scope,
FunctionKind function_kind)
: Scope(zone, outer_scope, scope_type),
function_kind_(function_kind),
- temps_(4, zone),
params_(4, zone),
sloppy_block_function_map_(zone) {
+ DCHECK_NE(scope_type, SCRIPT_SCOPE);
SetDefaults();
asm_function_ = outer_scope_->IsAsmModule();
}
-ModuleScope::ModuleScope(Zone* zone, DeclarationScope* script_scope,
+ModuleScope::ModuleScope(DeclarationScope* script_scope,
AstValueFactory* ast_value_factory)
- : DeclarationScope(zone, script_scope, MODULE_SCOPE) {
+ : DeclarationScope(ast_value_factory->zone(), script_scope, MODULE_SCOPE,
+ kModule) {
+ Zone* zone = ast_value_factory->zone();
module_descriptor_ = new (zone) ModuleDescriptor(zone);
set_language_mode(STRICT);
DeclareThis(ast_value_factory);
}
-Scope::Scope(Zone* zone, Scope* inner_scope, ScopeType scope_type,
- Handle<ScopeInfo> scope_info)
+ModuleScope::ModuleScope(Isolate* isolate, Handle<ScopeInfo> scope_info,
+ AstValueFactory* avfactory)
+ : DeclarationScope(avfactory->zone(), MODULE_SCOPE, scope_info) {
+ Zone* zone = avfactory->zone();
+ ModuleInfo* module_info = scope_info->ModuleDescriptorInfo();
+
+ set_language_mode(STRICT);
+ module_descriptor_ = new (zone) ModuleDescriptor(zone);
+
+ // Deserialize special exports.
+ Handle<FixedArray> special_exports(module_info->special_exports(), isolate);
+ for (int i = 0, n = special_exports->length(); i < n; ++i) {
+ Handle<ModuleInfoEntry> serialized_entry(
+ ModuleInfoEntry::cast(special_exports->get(i)), isolate);
+ module_descriptor_->AddSpecialExport(
+ ModuleDescriptor::Entry::Deserialize(isolate, avfactory,
+ serialized_entry),
+ avfactory->zone());
+ }
+
+ // Deserialize regular exports.
+ Handle<FixedArray> regular_exports(module_info->regular_exports(), isolate);
+ module_descriptor_->DeserializeRegularExports(isolate, avfactory,
+ regular_exports);
+
+ // Deserialize namespace imports.
+ Handle<FixedArray> namespace_imports(module_info->namespace_imports(),
+ isolate);
+ for (int i = 0, n = namespace_imports->length(); i < n; ++i) {
+ Handle<ModuleInfoEntry> serialized_entry(
+ ModuleInfoEntry::cast(namespace_imports->get(i)), isolate);
+ module_descriptor_->AddNamespaceImport(
+ ModuleDescriptor::Entry::Deserialize(isolate, avfactory,
+ serialized_entry),
+ avfactory->zone());
+ }
+
+ // Deserialize regular imports.
+ Handle<FixedArray> regular_imports(module_info->regular_imports(), isolate);
+ for (int i = 0, n = regular_imports->length(); i < n; ++i) {
+ Handle<ModuleInfoEntry> serialized_entry(
+ ModuleInfoEntry::cast(regular_imports->get(i)), isolate);
+ module_descriptor_->AddRegularImport(ModuleDescriptor::Entry::Deserialize(
+ isolate, avfactory, serialized_entry));
+ }
+}
+
+Scope::Scope(Zone* zone, ScopeType scope_type, Handle<ScopeInfo> scope_info)
: zone_(zone),
outer_scope_(nullptr),
variables_(zone),
- ordered_variables_(0, zone),
+ locals_(0, zone),
decls_(0, zone),
scope_info_(scope_info),
scope_type_(scope_type) {
+ DCHECK(!scope_info.is_null());
SetDefaults();
#ifdef DEBUG
already_resolved_ = true;
#endif
- if (scope_type == WITH_SCOPE) {
- DCHECK(scope_info.is_null());
- } else {
- if (scope_info->CallsEval()) RecordEvalCall();
- set_language_mode(scope_info->language_mode());
- num_heap_slots_ = scope_info->ContextLength();
- }
+ if (scope_info->CallsEval()) RecordEvalCall();
+ set_language_mode(scope_info->language_mode());
+ num_heap_slots_ = scope_info->ContextLength();
DCHECK_LE(Context::MIN_CONTEXT_SLOTS, num_heap_slots_);
-
- if (inner_scope != nullptr) AddInnerScope(inner_scope);
}
-DeclarationScope::DeclarationScope(Zone* zone, Scope* inner_scope,
- ScopeType scope_type,
+DeclarationScope::DeclarationScope(Zone* zone, ScopeType scope_type,
Handle<ScopeInfo> scope_info)
- : Scope(zone, inner_scope, scope_type, scope_info),
+ : Scope(zone, scope_type, scope_info),
function_kind_(scope_info->function_kind()),
- temps_(0, zone),
params_(0, zone),
sloppy_block_function_map_(zone) {
+ DCHECK_NE(scope_type, SCRIPT_SCOPE);
SetDefaults();
}
-Scope::Scope(Zone* zone, Scope* inner_scope,
- const AstRawString* catch_variable_name)
+Scope::Scope(Zone* zone, const AstRawString* catch_variable_name,
+ Handle<ScopeInfo> scope_info)
: zone_(zone),
outer_scope_(nullptr),
variables_(zone),
- ordered_variables_(0, zone),
+ locals_(0, zone),
decls_(0, zone),
+ scope_info_(scope_info),
scope_type_(CATCH_SCOPE) {
SetDefaults();
#ifdef DEBUG
already_resolved_ = true;
#endif
- if (inner_scope != nullptr) AddInnerScope(inner_scope);
- Variable* variable =
- variables_.Declare(zone, this, catch_variable_name, VAR, Variable::NORMAL,
- kCreatedInitialized);
+ // Cache the catch variable, even though it's also available via the
+ // scope_info, as the parser expects that a catch scope always has the catch
+ // variable as first and only variable.
+ Variable* variable = Declare(zone, this, catch_variable_name, VAR,
+ NORMAL_VARIABLE, kCreatedInitialized);
AllocateHeapSlot(variable);
}
@@ -200,31 +265,30 @@ void DeclarationScope::SetDefaults() {
force_eager_compilation_ = false;
has_arguments_parameter_ = false;
scope_uses_super_property_ = false;
+ has_rest_ = false;
receiver_ = nullptr;
new_target_ = nullptr;
function_ = nullptr;
arguments_ = nullptr;
this_function_ = nullptr;
arity_ = 0;
- rest_index_ = -1;
}
void Scope::SetDefaults() {
#ifdef DEBUG
scope_name_ = nullptr;
already_resolved_ = false;
+ needs_migration_ = false;
#endif
inner_scope_ = nullptr;
sibling_ = nullptr;
unresolved_ = nullptr;
- dynamics_ = nullptr;
start_position_ = kNoSourcePosition;
end_position_ = kNoSourcePosition;
num_stack_slots_ = 0;
num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
- num_global_slots_ = 0;
set_language_mode(SLOPPY);
@@ -237,6 +301,8 @@ void Scope::SetDefaults() {
force_context_allocation_ = false;
is_declaration_scope_ = false;
+
+ is_lazily_parsed_ = false;
}
bool Scope::HasSimpleParameters() {
@@ -244,6 +310,16 @@ bool Scope::HasSimpleParameters() {
return !scope->is_function_scope() || scope->has_simple_parameters();
}
+void DeclarationScope::set_asm_module() {
+ asm_module_ = true;
+ // Mark any existing inner function scopes as asm function scopes.
+ for (Scope* inner = inner_scope_; inner != nullptr; inner = inner->sibling_) {
+ if (inner->is_function_scope()) {
+ inner->AsDeclarationScope()->set_asm_function();
+ }
+ }
+}
+
bool Scope::IsAsmModule() const {
return is_function_scope() && AsDeclarationScope()->asm_module();
}
@@ -253,137 +329,77 @@ bool Scope::IsAsmFunction() const {
}
Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
- Context* context,
+ ScopeInfo* scope_info,
DeclarationScope* script_scope,
AstValueFactory* ast_value_factory,
DeserializationMode deserialization_mode) {
// Reconstruct the outer scope chain from a closure's context chain.
Scope* current_scope = nullptr;
Scope* innermost_scope = nullptr;
- while (!context->IsNativeContext()) {
- if (context->IsWithContext() || context->IsDebugEvaluateContext()) {
+ Scope* outer_scope = nullptr;
+ while (scope_info) {
+ if (scope_info->scope_type() == WITH_SCOPE) {
// For scope analysis, debug-evaluate is equivalent to a with scope.
- Scope* with_scope = new (zone)
- Scope(zone, current_scope, WITH_SCOPE, Handle<ScopeInfo>());
+ outer_scope = new (zone) Scope(zone, WITH_SCOPE, handle(scope_info));
+
// TODO(yangguo): Remove once debug-evaluate properly keeps track of the
// function scope in which we are evaluating.
- if (context->IsDebugEvaluateContext()) {
- with_scope->set_is_debug_evaluate_scope();
+ if (scope_info->IsDebugEvaluateScope()) {
+ outer_scope->set_is_debug_evaluate_scope();
}
- current_scope = with_scope;
- } else if (context->IsScriptContext()) {
- Handle<ScopeInfo> scope_info(context->scope_info(), isolate);
- DCHECK_EQ(scope_info->scope_type(), SCRIPT_SCOPE);
- current_scope = new (zone)
- DeclarationScope(zone, current_scope, SCRIPT_SCOPE, scope_info);
- } else if (context->IsFunctionContext()) {
- Handle<ScopeInfo> scope_info(context->closure()->shared()->scope_info(),
- isolate);
+ } else if (scope_info->scope_type() == SCRIPT_SCOPE) {
+ // If we reach a script scope, it's the outermost scope. Install the
+ // scope info of this script context onto the existing script scope to
+ // avoid nesting script scopes.
+ if (deserialization_mode == DeserializationMode::kIncludingVariables) {
+ script_scope->SetScriptScopeInfo(handle(scope_info));
+ }
+ DCHECK(!scope_info->HasOuterScopeInfo());
+ break;
+ } else if (scope_info->scope_type() == FUNCTION_SCOPE ||
+ scope_info->scope_type() == EVAL_SCOPE) {
// TODO(neis): For an eval scope, we currently create an ordinary function
// context. This is wrong and needs to be fixed.
// https://bugs.chromium.org/p/v8/issues/detail?id=5295
- DCHECK(scope_info->scope_type() == FUNCTION_SCOPE ||
- scope_info->scope_type() == EVAL_SCOPE);
- DeclarationScope* function_scope = new (zone)
- DeclarationScope(zone, current_scope, FUNCTION_SCOPE, scope_info);
- if (scope_info->IsAsmFunction()) function_scope->set_asm_function();
- if (scope_info->IsAsmModule()) function_scope->set_asm_module();
- current_scope = function_scope;
- } else if (context->IsBlockContext()) {
- Handle<ScopeInfo> scope_info(context->scope_info(), isolate);
- DCHECK_EQ(scope_info->scope_type(), BLOCK_SCOPE);
+ outer_scope =
+ new (zone) DeclarationScope(zone, FUNCTION_SCOPE, handle(scope_info));
+ if (scope_info->IsAsmFunction())
+ outer_scope->AsDeclarationScope()->set_asm_function();
+ if (scope_info->IsAsmModule())
+ outer_scope->AsDeclarationScope()->set_asm_module();
+ } else if (scope_info->scope_type() == BLOCK_SCOPE) {
if (scope_info->is_declaration_scope()) {
- current_scope = new (zone)
- DeclarationScope(zone, current_scope, BLOCK_SCOPE, scope_info);
+ outer_scope =
+ new (zone) DeclarationScope(zone, BLOCK_SCOPE, handle(scope_info));
} else {
- current_scope =
- new (zone) Scope(zone, current_scope, BLOCK_SCOPE, scope_info);
+ outer_scope = new (zone) Scope(zone, BLOCK_SCOPE, handle(scope_info));
}
+ } else if (scope_info->scope_type() == MODULE_SCOPE) {
+ outer_scope = new (zone)
+ ModuleScope(isolate, handle(scope_info), ast_value_factory);
} else {
- DCHECK(context->IsCatchContext());
- String* name = context->catch_name();
- current_scope =
- new (zone) Scope(zone, current_scope,
- ast_value_factory->GetString(handle(name, isolate)));
+ DCHECK_EQ(scope_info->scope_type(), CATCH_SCOPE);
+ DCHECK_EQ(scope_info->LocalCount(), 1);
+ String* name = scope_info->LocalName(0);
+ outer_scope = new (zone)
+ Scope(zone, ast_value_factory->GetString(handle(name, isolate)),
+ handle(scope_info));
}
- if (deserialization_mode == DeserializationMode::kDeserializeOffHeap) {
- current_scope->DeserializeScopeInfo(isolate, ast_value_factory);
+ if (deserialization_mode == DeserializationMode::kScopesOnly) {
+ outer_scope->scope_info_ = Handle<ScopeInfo>::null();
}
+ if (current_scope != nullptr) {
+ outer_scope->AddInnerScope(current_scope);
+ }
+ current_scope = outer_scope;
if (innermost_scope == nullptr) innermost_scope = current_scope;
- context = context->previous();
+ scope_info = scope_info->HasOuterScopeInfo() ? scope_info->OuterScopeInfo()
+ : nullptr;
}
+ if (innermost_scope == nullptr) return script_scope;
script_scope->AddInnerScope(current_scope);
- script_scope->PropagateScopeInfo();
- return (innermost_scope == NULL) ? script_scope : innermost_scope;
-}
-
-void Scope::DeserializeScopeInfo(Isolate* isolate,
- AstValueFactory* ast_value_factory) {
- if (scope_info_.is_null()) return;
-
- DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
-
- std::set<const AstRawString*> names_seen;
- // Internalize context local & globals variables.
- for (int var = 0; var < scope_info_->ContextLocalCount() +
- scope_info_->ContextGlobalCount();
- ++var) {
- Handle<String> name_handle(scope_info_->ContextLocalName(var), isolate);
- const AstRawString* name = ast_value_factory->GetString(name_handle);
- if (!names_seen.insert(name).second) continue;
- int index = Context::MIN_CONTEXT_SLOTS + var;
- VariableMode mode = scope_info_->ContextLocalMode(var);
- InitializationFlag init_flag = scope_info_->ContextLocalInitFlag(var);
- MaybeAssignedFlag maybe_assigned_flag =
- scope_info_->ContextLocalMaybeAssignedFlag(var);
- VariableLocation location = var < scope_info_->ContextLocalCount()
- ? VariableLocation::CONTEXT
- : VariableLocation::GLOBAL;
- Variable::Kind kind = Variable::NORMAL;
- if (index == scope_info_->ReceiverContextSlotIndex()) {
- kind = Variable::THIS;
- }
-
- Variable* result = variables_.Declare(zone(), this, name, mode, kind,
- init_flag, maybe_assigned_flag);
- result->AllocateTo(location, index);
- }
-
- // We must read parameters from the end since for multiply declared
- // parameters the value of the last declaration of that parameter is used
- // inside a function (and thus we need to look at the last index). Was bug#
- // 1110337.
- for (int index = scope_info_->ParameterCount() - 1; index >= 0; --index) {
- Handle<String> name_handle(scope_info_->ParameterName(index), isolate);
- const AstRawString* name = ast_value_factory->GetString(name_handle);
- if (!names_seen.insert(name).second) continue;
-
- VariableMode mode = DYNAMIC;
- InitializationFlag init_flag = kCreatedInitialized;
- MaybeAssignedFlag maybe_assigned_flag = kMaybeAssigned;
- VariableLocation location = VariableLocation::LOOKUP;
- Variable::Kind kind = Variable::NORMAL;
-
- Variable* result = variables_.Declare(zone(), this, name, mode, kind,
- init_flag, maybe_assigned_flag);
- result->AllocateTo(location, index);
- }
-
- // Internalize function proxy for this scope.
- if (scope_info_->HasFunctionName()) {
- Handle<String> name_handle(scope_info_->FunctionName(), isolate);
- const AstRawString* name = ast_value_factory->GetString(name_handle);
- VariableMode mode;
- int index = scope_info_->FunctionContextSlotIndex(*name_handle, &mode);
- if (index >= 0) {
- Variable* result = AsDeclarationScope()->DeclareFunctionVar(name);
- DCHECK_EQ(mode, result->mode());
- result->AllocateTo(VariableLocation::CONTEXT, index);
- }
- }
-
- scope_info_ = Handle<ScopeInfo>::null();
+ return innermost_scope;
}
DeclarationScope* Scope::AsDeclarationScope() {
@@ -410,10 +426,124 @@ int Scope::num_parameters() const {
return is_declaration_scope() ? AsDeclarationScope()->num_parameters() : 0;
}
-void Scope::Analyze(ParseInfo* info) {
+void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
+ DCHECK(is_sloppy(language_mode()));
+ DCHECK(is_function_scope() || is_eval_scope() || is_script_scope() ||
+ (is_block_scope() && outer_scope()->is_function_scope()));
+ DCHECK(HasSimpleParameters() || is_block_scope());
+ bool has_simple_parameters = HasSimpleParameters();
+ // For each variable which is used as a function declaration in a sloppy
+ // block,
+ SloppyBlockFunctionMap* map = sloppy_block_function_map();
+ for (ZoneHashMap::Entry* p = map->Start(); p != nullptr; p = map->Next(p)) {
+ AstRawString* name = static_cast<AstRawString*>(p->key);
+
+ // If the variable wouldn't conflict with a lexical declaration
+ // or parameter,
+
+ // Check if there's a conflict with a parameter.
+ // This depends on the fact that functions always have a scope solely to
+ // hold complex parameters, and the names local to that scope are
+ // precisely the names of the parameters. IsDeclaredParameter(name) does
+ // not hold for names declared by complex parameters, nor are those
+ // bindings necessarily declared lexically, so we have to check for them
+ // explicitly. On the other hand, if there are not complex parameters,
+ // it is sufficient to just check IsDeclaredParameter.
+ if (!has_simple_parameters) {
+ if (outer_scope_->LookupLocal(name) != nullptr) {
+ continue;
+ }
+ } else {
+ if (IsDeclaredParameter(name)) {
+ continue;
+ }
+ }
+
+ bool var_created = false;
+
+ // Write in assignments to var for each block-scoped function declaration
+ auto delegates = static_cast<SloppyBlockFunctionStatement*>(p->value);
+
+ DeclarationScope* decl_scope = this;
+ while (decl_scope->is_eval_scope()) {
+ decl_scope = decl_scope->outer_scope()->GetDeclarationScope();
+ }
+ Scope* outer_scope = decl_scope->outer_scope();
+
+ for (SloppyBlockFunctionStatement* delegate = delegates;
+ delegate != nullptr; delegate = delegate->next()) {
+ // Check if there's a conflict with a lexical declaration
+ Scope* query_scope = delegate->scope()->outer_scope();
+ Variable* var = nullptr;
+ bool should_hoist = true;
+
+ // Note that we perform this loop for each delegate named 'name',
+ // which may duplicate work if those delegates share scopes.
+ // It is not sufficient to just do a Lookup on query_scope: for
+ // example, that does not prevent hoisting of the function in
+ // `{ let e; try {} catch (e) { function e(){} } }`
+ do {
+ var = query_scope->LookupLocal(name);
+ if (var != nullptr && IsLexicalVariableMode(var->mode())) {
+ should_hoist = false;
+ break;
+ }
+ query_scope = query_scope->outer_scope();
+ } while (query_scope != outer_scope);
+
+ if (!should_hoist) continue;
+
+ // Declare a var-style binding for the function in the outer scope
+ if (!var_created) {
+ var_created = true;
+ VariableProxy* proxy = factory->NewVariableProxy(name, NORMAL_VARIABLE);
+ Declaration* declaration =
+ factory->NewVariableDeclaration(proxy, this, kNoSourcePosition);
+ // Based on the preceding check, it doesn't matter what we pass as
+ // allow_harmony_restrictive_generators and
+ // sloppy_mode_block_scope_function_redefinition.
+ bool ok = true;
+ DeclareVariable(declaration, VAR,
+ Variable::DefaultInitializationFlag(VAR), false,
+ nullptr, &ok);
+ CHECK(ok); // Based on the preceding check, this should not fail
+ }
+
+ Expression* assignment = factory->NewAssignment(
+ Token::ASSIGN, NewUnresolved(factory, name),
+ delegate->scope()->NewUnresolved(factory, name), kNoSourcePosition);
+ Statement* statement =
+ factory->NewExpressionStatement(assignment, kNoSourcePosition);
+ delegate->set_statement(statement);
+ }
+ }
+}
+
+void DeclarationScope::Analyze(ParseInfo* info, AnalyzeMode mode) {
DCHECK(info->literal() != NULL);
DeclarationScope* scope = info->literal()->scope();
+ Handle<ScopeInfo> outer_scope_info;
+ if (info->maybe_outer_scope_info().ToHandle(&outer_scope_info)) {
+ if (scope->outer_scope()) {
+ DeclarationScope* script_scope = new (info->zone())
+ DeclarationScope(info->zone(), info->ast_value_factory());
+ info->set_script_scope(script_scope);
+ scope->ReplaceOuterScope(Scope::DeserializeScopeChain(
+ info->isolate(), info->zone(), *outer_scope_info, script_scope,
+ info->ast_value_factory(),
+ Scope::DeserializationMode::kIncludingVariables));
+ } else {
+ DCHECK_EQ(outer_scope_info->scope_type(), SCRIPT_SCOPE);
+ scope->SetScriptScopeInfo(outer_scope_info);
+ }
+ }
+
+ if (scope->is_eval_scope() && is_sloppy(scope->language_mode())) {
+ AstNodeFactory factory(info->ast_value_factory());
+ scope->HoistSloppyBlockFunctions(&factory);
+ }
+
// We are compiling one of three cases:
// 1) top-level code,
// 2) a function/eval/module on the top-level
@@ -422,10 +552,13 @@ void Scope::Analyze(ParseInfo* info) {
scope->outer_scope()->scope_type() == SCRIPT_SCOPE ||
scope->outer_scope()->already_resolved_);
- // Allocate the variables.
- {
- AstNodeFactory ast_node_factory(info->ast_value_factory());
- scope->AllocateVariables(info, &ast_node_factory);
+ scope->AllocateVariables(info, mode);
+
+ // Ensuring that the outer script scope has a scope info avoids having
+ // special case for native contexts vs other contexts.
+ if (info->script_scope()->scope_info_.is_null()) {
+ info->script_scope()->scope_info_ =
+ handle(ScopeInfo::Empty(info->isolate()));
}
#ifdef DEBUG
@@ -446,41 +579,91 @@ void DeclarationScope::DeclareThis(AstValueFactory* ast_value_factory) {
bool subclass_constructor = IsSubclassConstructor(function_kind_);
Variable* var = Declare(
zone(), this, ast_value_factory->this_string(),
- subclass_constructor ? CONST : VAR, Variable::THIS,
+ subclass_constructor ? CONST : VAR, THIS_VARIABLE,
subclass_constructor ? kNeedsInitialization : kCreatedInitialized);
receiver_ = var;
}
+void DeclarationScope::DeclareArguments(AstValueFactory* ast_value_factory) {
+ DCHECK(is_function_scope());
+ DCHECK(!is_arrow_scope());
+
+ arguments_ = LookupLocal(ast_value_factory->arguments_string());
+ if (arguments_ == nullptr) {
+ // Declare 'arguments' variable which exists in all non arrow functions.
+ // Note that it might never be accessed, in which case it won't be
+ // allocated during variable allocation.
+ arguments_ = Declare(zone(), this, ast_value_factory->arguments_string(),
+ VAR, NORMAL_VARIABLE, kCreatedInitialized);
+ } else if (IsLexicalVariableMode(arguments_->mode())) {
+ // Check if there's lexically declared variable named arguments to avoid
+ // redeclaration. See ES#sec-functiondeclarationinstantiation, step 20.
+ arguments_ = nullptr;
+ }
+}
+
void DeclarationScope::DeclareDefaultFunctionVariables(
AstValueFactory* ast_value_factory) {
DCHECK(is_function_scope());
DCHECK(!is_arrow_scope());
- // Declare 'arguments' variable which exists in all non arrow functions.
- // Note that it might never be accessed, in which case it won't be
- // allocated during variable allocation.
- arguments_ = Declare(zone(), this, ast_value_factory->arguments_string(), VAR,
- Variable::ARGUMENTS, kCreatedInitialized);
+ DeclareThis(ast_value_factory);
new_target_ = Declare(zone(), this, ast_value_factory->new_target_string(),
- CONST, Variable::NORMAL, kCreatedInitialized);
+ CONST, NORMAL_VARIABLE, kCreatedInitialized);
if (IsConciseMethod(function_kind_) || IsClassConstructor(function_kind_) ||
IsAccessorFunction(function_kind_)) {
this_function_ =
Declare(zone(), this, ast_value_factory->this_function_string(), CONST,
- Variable::NORMAL, kCreatedInitialized);
+ NORMAL_VARIABLE, kCreatedInitialized);
}
}
Variable* DeclarationScope::DeclareFunctionVar(const AstRawString* name) {
DCHECK(is_function_scope());
DCHECK_NULL(function_);
- VariableMode mode = is_strict(language_mode()) ? CONST : CONST_LEGACY;
- function_ = new (zone())
- Variable(this, name, mode, Variable::NORMAL, kCreatedInitialized);
+ DCHECK_NULL(variables_.Lookup(name));
+ VariableKind kind = is_sloppy(language_mode()) ? SLOPPY_FUNCTION_NAME_VARIABLE
+ : NORMAL_VARIABLE;
+ function_ =
+ new (zone()) Variable(this, name, CONST, kind, kCreatedInitialized);
+ if (calls_sloppy_eval()) {
+ NonLocal(name, DYNAMIC);
+ } else {
+ variables_.Add(zone(), function_);
+ }
return function_;
}
+bool Scope::HasBeenRemoved() const {
+ // TODO(neis): Store this information somewhere instead of calculating it.
+
+ if (!is_block_scope()) return false; // Shortcut.
+
+ Scope* parent = outer_scope();
+ if (parent == nullptr) {
+ DCHECK(is_script_scope());
+ return false;
+ }
+
+ Scope* sibling = parent->inner_scope();
+ for (; sibling != nullptr; sibling = sibling->sibling()) {
+ if (sibling == this) return false;
+ }
+
+ DCHECK_NULL(inner_scope_);
+ return true;
+}
+
+Scope* Scope::GetUnremovedScope() {
+ Scope* scope = this;
+ while (scope != nullptr && scope->HasBeenRemoved()) {
+ scope = scope->outer_scope();
+ }
+ DCHECK_NOT_NULL(scope);
+ return scope;
+}
+
Scope* Scope::FinalizeBlockScope() {
DCHECK(is_block_scope());
@@ -530,7 +713,7 @@ void Scope::Snapshot::Reparent(DeclarationScope* new_parent) const {
DCHECK_EQ(new_parent, new_parent->GetClosureScope());
DCHECK_NULL(new_parent->inner_scope_);
DCHECK_NULL(new_parent->unresolved_);
- DCHECK_EQ(0, new_parent->temps()->length());
+ DCHECK_EQ(0, new_parent->locals_.length());
Scope* inner_scope = new_parent->sibling_;
if (inner_scope != top_inner_scope_) {
for (; inner_scope->sibling() != top_inner_scope_;
@@ -557,25 +740,31 @@ void Scope::Snapshot::Reparent(DeclarationScope* new_parent) const {
outer_scope_->unresolved_ = top_unresolved_;
}
- if (outer_scope_->GetClosureScope()->temps()->length() != top_temp_) {
- ZoneList<Variable*>* temps = outer_scope_->GetClosureScope()->temps();
- for (int i = top_temp_; i < temps->length(); i++) {
- Variable* temp = temps->at(i);
- DCHECK_EQ(temp->scope(), temp->scope()->GetClosureScope());
- DCHECK_NE(temp->scope(), new_parent);
- temp->set_scope(new_parent);
- new_parent->AddTemporary(temp);
+ // TODO(verwaest): This currently only moves do-expression declared variables
+ // in default arguments that weren't already previously declared with the same
+ // name in the closure-scope. See
+ // test/mjsunit/harmony/default-parameter-do-expression.js.
+ DeclarationScope* outer_closure = outer_scope_->GetClosureScope();
+ for (int i = top_local_; i < outer_closure->locals_.length(); i++) {
+ Variable* local = outer_closure->locals_.at(i);
+ DCHECK(local->mode() == TEMPORARY || local->mode() == VAR);
+ DCHECK_EQ(local->scope(), local->scope()->GetClosureScope());
+ DCHECK_NE(local->scope(), new_parent);
+ local->set_scope(new_parent);
+ new_parent->AddLocal(local);
+ if (local->mode() == VAR) {
+ outer_closure->variables_.Remove(local);
+ new_parent->variables_.Add(new_parent->zone(), local);
}
- temps->Rewind(top_temp_);
}
+ outer_closure->locals_.Rewind(top_local_);
+ outer_closure->decls_.Rewind(top_decl_);
}
void Scope::ReplaceOuterScope(Scope* outer) {
DCHECK_NOT_NULL(outer);
DCHECK_NOT_NULL(outer_scope_);
DCHECK(!already_resolved_);
- DCHECK(!outer->already_resolved_);
- DCHECK(!outer_scope_->already_resolved_);
outer_scope_->RemoveInnerScope(this);
outer->AddInnerScope(this);
outer_scope_ = outer;
@@ -589,57 +778,44 @@ void Scope::PropagateUsageFlagsToScope(Scope* other) {
if (calls_eval()) other->RecordEvalCall();
}
-
-Variable* Scope::LookupLocal(const AstRawString* name) {
- Variable* result = variables_.Lookup(name);
- if (result != NULL || scope_info_.is_null()) {
- return result;
- }
+Variable* Scope::LookupInScopeInfo(const AstRawString* name) {
Handle<String> name_handle = name->string();
// The Scope is backed up by ScopeInfo. This means it cannot operate in a
// heap-independent mode, and all strings must be internalized immediately. So
// it's ok to get the Handle<String> here.
// If we have a serialized scope info, we might find the variable there.
// There should be no local slot with the given name.
- DCHECK(scope_info_->StackSlotIndex(*name_handle) < 0);
+ DCHECK_LT(scope_info_->StackSlotIndex(*name_handle), 0);
- // Check context slot lookup.
VariableMode mode;
- VariableLocation location = VariableLocation::CONTEXT;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
+
+ VariableLocation location = VariableLocation::CONTEXT;
int index = ScopeInfo::ContextSlotIndex(scope_info_, name_handle, &mode,
&init_flag, &maybe_assigned_flag);
- if (index < 0) {
- location = VariableLocation::GLOBAL;
- index = ScopeInfo::ContextGlobalSlotIndex(scope_info_, name_handle, &mode,
- &init_flag, &maybe_assigned_flag);
+ if (index < 0 && scope_type() == MODULE_SCOPE) {
+ location = VariableLocation::MODULE;
+ index = scope_info_->ModuleIndex(name_handle, &mode, &init_flag,
+ &maybe_assigned_flag);
}
+
if (index < 0) {
- // Check parameters.
- index = scope_info_->ParameterIndex(*name_handle);
- if (index < 0) return NULL;
-
- mode = DYNAMIC;
- location = VariableLocation::LOOKUP;
- init_flag = kCreatedInitialized;
- // Be conservative and flag parameters as maybe assigned. Better information
- // would require ScopeInfo to serialize the maybe_assigned bit also for
- // parameters.
- maybe_assigned_flag = kMaybeAssigned;
- } else {
- DCHECK(location != VariableLocation::GLOBAL ||
- (is_script_scope() && IsDeclaredVariableMode(mode) &&
- !IsLexicalVariableMode(mode)));
+ index = scope_info_->FunctionContextSlotIndex(*name_handle);
+ if (index < 0) return nullptr; // Nowhere found.
+ Variable* var = AsDeclarationScope()->DeclareFunctionVar(name);
+ DCHECK_EQ(CONST, var->mode());
+ var->AllocateTo(VariableLocation::CONTEXT, index);
+ return variables_.Lookup(name);
}
- Variable::Kind kind = Variable::NORMAL;
+ VariableKind kind = NORMAL_VARIABLE;
if (location == VariableLocation::CONTEXT &&
index == scope_info_->ReceiverContextSlotIndex()) {
- kind = Variable::THIS;
+ kind = THIS_VARIABLE;
}
// TODO(marja, rossberg): Correctly declare FUNCTION, CLASS, NEW_TARGET, and
- // ARGUMENTS bindings as their corresponding Variable::Kind.
+ // ARGUMENTS bindings as their corresponding VariableKind.
Variable* var = variables_.Declare(zone(), this, name, mode, kind, init_flag,
maybe_assigned_flag);
@@ -647,24 +823,6 @@ Variable* Scope::LookupLocal(const AstRawString* name) {
return var;
}
-Variable* DeclarationScope::LookupFunctionVar(const AstRawString* name) {
- if (function_ != nullptr && function_->raw_name() == name) {
- return function_;
- } else if (!scope_info_.is_null()) {
- // If we are backed by a scope info, try to lookup the variable there.
- VariableMode mode;
- int index = scope_info_->FunctionContextSlotIndex(*(name->string()), &mode);
- if (index < 0) return nullptr;
- Variable* var = DeclareFunctionVar(name);
- DCHECK_EQ(mode, var->mode());
- var->AllocateTo(VariableLocation::CONTEXT, index);
- return var;
- } else {
- return nullptr;
- }
-}
-
-
Variable* Scope::Lookup(const AstRawString* name) {
for (Scope* scope = this;
scope != NULL;
@@ -679,21 +837,22 @@ Variable* DeclarationScope::DeclareParameter(
const AstRawString* name, VariableMode mode, bool is_optional, bool is_rest,
bool* is_duplicate, AstValueFactory* ast_value_factory) {
DCHECK(!already_resolved_);
- DCHECK(is_function_scope());
+ DCHECK(is_function_scope() || is_module_scope());
+ DCHECK(!has_rest_);
DCHECK(!is_optional || !is_rest);
Variable* var;
if (mode == TEMPORARY) {
var = NewTemporary(name);
} else {
- var = Declare(zone(), this, name, mode, Variable::NORMAL,
- kCreatedInitialized);
+ var =
+ Declare(zone(), this, name, mode, NORMAL_VARIABLE, kCreatedInitialized);
// TODO(wingo): Avoid O(n^2) check.
*is_duplicate = IsDeclaredParameter(name);
}
if (!is_optional && !is_rest && arity_ == params_.length()) {
++arity_;
}
- if (is_rest) rest_index_ = num_parameters();
+ has_rest_ = is_rest;
params_.Add(var, zone());
if (name == ast_value_factory->arguments_string()) {
has_arguments_parameter_ = true;
@@ -702,7 +861,7 @@ Variable* DeclarationScope::DeclareParameter(
}
Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
- InitializationFlag init_flag, Variable::Kind kind,
+ InitializationFlag init_flag, VariableKind kind,
MaybeAssignedFlag maybe_assigned_flag) {
DCHECK(!already_resolved_);
// This function handles VAR, LET, and CONST modes. DYNAMIC variables are
@@ -713,10 +872,138 @@ Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
maybe_assigned_flag);
}
+Variable* Scope::DeclareVariable(
+ Declaration* declaration, VariableMode mode, InitializationFlag init,
+ bool allow_harmony_restrictive_generators,
+ bool* sloppy_mode_block_scope_function_redefinition, bool* ok) {
+ DCHECK(IsDeclaredVariableMode(mode));
+ DCHECK(!already_resolved_);
+
+ if (mode == VAR && !is_declaration_scope()) {
+ return GetDeclarationScope()->DeclareVariable(
+ declaration, mode, init, allow_harmony_restrictive_generators,
+ sloppy_mode_block_scope_function_redefinition, ok);
+ }
+ DCHECK(!is_catch_scope());
+ DCHECK(!is_with_scope());
+ DCHECK(is_declaration_scope() ||
+ (IsLexicalVariableMode(mode) && is_block_scope()));
+
+ VariableProxy* proxy = declaration->proxy();
+ DCHECK(proxy->raw_name() != NULL);
+ const AstRawString* name = proxy->raw_name();
+ bool is_function_declaration = declaration->IsFunctionDeclaration();
+
+ Variable* var = nullptr;
+ if (is_eval_scope() && is_sloppy(language_mode()) && mode == VAR) {
+ // In a var binding in a sloppy direct eval, pollute the enclosing scope
+ // with this new binding by doing the following:
+ // The proxy is bound to a lookup variable to force a dynamic declaration
+ // using the DeclareEvalVar or DeclareEvalFunction runtime functions.
+ VariableKind kind = NORMAL_VARIABLE;
+ // TODO(sigurds) figure out if kNotAssigned is OK here
+ var = new (zone()) Variable(this, name, mode, kind, init, kNotAssigned);
+ var->AllocateTo(VariableLocation::LOOKUP, -1);
+ } else {
+ // Declare the variable in the declaration scope.
+ var = LookupLocal(name);
+ if (var == NULL) {
+ // Declare the name.
+ VariableKind kind = NORMAL_VARIABLE;
+ if (is_function_declaration) {
+ kind = FUNCTION_VARIABLE;
+ }
+ var = DeclareLocal(name, mode, init, kind, kNotAssigned);
+ } else if (IsLexicalVariableMode(mode) ||
+ IsLexicalVariableMode(var->mode())) {
+ // Allow duplicate function decls for web compat, see bug 4693.
+ bool duplicate_allowed = false;
+ if (is_sloppy(language_mode()) && is_function_declaration &&
+ var->is_function()) {
+ DCHECK(IsLexicalVariableMode(mode) &&
+ IsLexicalVariableMode(var->mode()));
+ // If the duplication is allowed, then the var will show up
+ // in the SloppyBlockFunctionMap and the new FunctionKind
+ // will be a permitted duplicate.
+ FunctionKind function_kind =
+ declaration->AsFunctionDeclaration()->fun()->kind();
+ duplicate_allowed =
+ GetDeclarationScope()->sloppy_block_function_map()->Lookup(
+ const_cast<AstRawString*>(name), name->hash()) != nullptr &&
+ !IsAsyncFunction(function_kind) &&
+ !(allow_harmony_restrictive_generators &&
+ IsGeneratorFunction(function_kind));
+ }
+ if (duplicate_allowed) {
+ *sloppy_mode_block_scope_function_redefinition = true;
+ } else {
+ // The name was declared in this scope before; check for conflicting
+ // re-declarations. We have a conflict if either of the declarations
+ // is not a var (in script scope, we also have to ignore legacy const
+ // for compatibility). There is similar code in runtime.cc in the
+ // Declare functions. The function CheckConflictingVarDeclarations
+ // checks for var and let bindings from different scopes whereas this
+ // is a check for conflicting declarations within the same scope. This
+ // check also covers the special case
+ //
+ // function () { let x; { var x; } }
+ //
+ // because the var declaration is hoisted to the function scope where
+ // 'x' is already bound.
+ DCHECK(IsDeclaredVariableMode(var->mode()));
+ // In harmony we treat re-declarations as early errors. See
+ // ES5 16 for a definition of early errors.
+ *ok = false;
+ return nullptr;
+ }
+ } else if (mode == VAR) {
+ var->set_maybe_assigned();
+ }
+ }
+ DCHECK_NOT_NULL(var);
+
+ // We add a declaration node for every declaration. The compiler
+ // will only generate code if necessary. In particular, declarations
+ // for inner local variables that do not represent functions won't
+ // result in any generated code.
+ //
+ // This will lead to multiple declaration nodes for the
+ // same variable if it is declared several times. This is not a
+ // semantic issue, but it may be a performance issue since it may
+ // lead to repeated DeclareEvalVar or DeclareEvalFunction calls.
+ decls_.Add(declaration, zone());
+ proxy->BindTo(var);
+ return var;
+}
+
+VariableProxy* Scope::NewUnresolved(AstNodeFactory* factory,
+ const AstRawString* name,
+ int start_position, int end_position,
+ VariableKind kind) {
+ // Note that we must not share the unresolved variables with
+ // the same name because they may be removed selectively via
+ // RemoveUnresolved().
+ DCHECK(!already_resolved_);
+ DCHECK_EQ(!needs_migration_, factory->zone() == zone());
+ VariableProxy* proxy =
+ factory->NewVariableProxy(name, kind, start_position, end_position);
+ proxy->set_next_unresolved(unresolved_);
+ unresolved_ = proxy;
+ return proxy;
+}
+
+void Scope::AddUnresolved(VariableProxy* proxy) {
+ DCHECK(!already_resolved_);
+ DCHECK(!proxy->is_resolved());
+ proxy->set_next_unresolved(unresolved_);
+ unresolved_ = proxy;
+}
+
Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name,
- Variable::Kind kind) {
+ VariableKind kind) {
DCHECK(is_script_scope());
- return Declare(zone(), this, name, DYNAMIC_GLOBAL, kind, kCreatedInitialized);
+ return variables_.Declare(zone(), this, name, DYNAMIC_GLOBAL, kind,
+ kCreatedInitialized);
}
@@ -739,24 +1026,34 @@ bool Scope::RemoveUnresolved(VariableProxy* var) {
return false;
}
+bool Scope::RemoveUnresolved(const AstRawString* name) {
+ if (unresolved_->raw_name() == name) {
+ VariableProxy* removed = unresolved_;
+ unresolved_ = unresolved_->next_unresolved();
+ removed->set_next_unresolved(nullptr);
+ return true;
+ }
+ VariableProxy* current = unresolved_;
+ while (current != nullptr) {
+ VariableProxy* next = current->next_unresolved();
+ if (next->raw_name() == name) {
+ current->set_next_unresolved(next->next_unresolved());
+ next->set_next_unresolved(nullptr);
+ return true;
+ }
+ current = next;
+ }
+ return false;
+}
Variable* Scope::NewTemporary(const AstRawString* name) {
DeclarationScope* scope = GetClosureScope();
- Variable* var = new(zone()) Variable(scope,
- name,
- TEMPORARY,
- Variable::NORMAL,
- kCreatedInitialized);
- scope->AddTemporary(var);
+ Variable* var = new (zone())
+ Variable(scope, name, TEMPORARY, NORMAL_VARIABLE, kCreatedInitialized);
+ scope->AddLocal(var);
return var;
}
-void Scope::AddDeclaration(Declaration* declaration) {
- DCHECK(!already_resolved_);
- decls_.Add(declaration, zone());
-}
-
-
Declaration* Scope::CheckConflictingVarDeclarations() {
int length = decls_.length();
for (int i = 0; i < length; i++) {
@@ -806,63 +1103,34 @@ Declaration* Scope::CheckLexDeclarationsConflictingWith(
return nullptr;
}
-void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
- ZoneList<Variable*>* context_locals,
- ZoneList<Variable*>* context_globals) {
- DCHECK(stack_locals != NULL);
- DCHECK(context_locals != NULL);
- DCHECK(context_globals != NULL);
-
- // Collect temporaries which are always allocated on the stack, unless the
- // context as a whole has forced context allocation.
- if (is_declaration_scope()) {
- ZoneList<Variable*>* temps = AsDeclarationScope()->temps();
- for (int i = 0; i < temps->length(); i++) {
- Variable* var = (*temps)[i];
- if (var->is_used()) {
- if (var->IsContextSlot()) {
- DCHECK(has_forced_context_allocation());
- context_locals->Add(var, zone());
- } else if (var->IsStackLocal()) {
- stack_locals->Add(var, zone());
- } else {
- DCHECK(var->IsParameter());
- }
- }
- }
- }
+void DeclarationScope::AllocateVariables(ParseInfo* info, AnalyzeMode mode) {
+ ResolveVariablesRecursively(info);
+ AllocateVariablesRecursively();
- for (int i = 0; i < ordered_variables_.length(); i++) {
- Variable* var = ordered_variables_[i];
- if (var->IsStackLocal()) {
- stack_locals->Add(var, zone());
- } else if (var->IsContextSlot()) {
- context_locals->Add(var, zone());
- } else if (var->IsGlobalSlot()) {
- context_globals->Add(var, zone());
- }
+ MaybeHandle<ScopeInfo> outer_scope;
+ for (const Scope* s = outer_scope_; s != nullptr; s = s->outer_scope_) {
+ if (s->scope_info_.is_null()) continue;
+ outer_scope = s->scope_info_;
+ break;
+ }
+ AllocateScopeInfosRecursively(info->isolate(), mode, outer_scope);
+ // The debugger expects all shared function infos to contain a scope info.
+ // Since the top-most scope will end up in a shared function info, make sure
+ // it has one, even if it doesn't need a scope info.
+ // TODO(jochen|yangguo): Remove this requirement.
+ if (scope_info_.is_null()) {
+ scope_info_ = ScopeInfo::Create(info->isolate(), zone(), this, outer_scope);
}
}
-void DeclarationScope::AllocateVariables(ParseInfo* info,
- AstNodeFactory* factory) {
- // 1) Propagate scope information.
- PropagateScopeInfo();
-
- // 2) Resolve variables.
- ResolveVariablesRecursively(info, factory);
-
- // 3) Allocate variables.
- AllocateVariablesRecursively();
-}
-
-
-bool Scope::AllowsLazyParsing() const {
- // If we are inside a block scope, we must parse eagerly to find out how
- // to allocate variables on the block scope. At this point, declarations may
- // not have yet been parsed.
+bool Scope::AllowsLazyParsingWithoutUnresolvedVariables() const {
+ // If we are inside a block scope, we must find unresolved variables in the
+ // inner scopes to find out how to allocate variables on the block scope. At
+ // this point, declarations may not have yet been parsed.
for (const Scope* s = this; s != nullptr; s = s->outer_scope_) {
if (s->is_block_scope()) return false;
+ // TODO(marja): Refactor parsing modes: also add s->is_function_scope()
+ // here.
}
return true;
}
@@ -932,6 +1200,16 @@ DeclarationScope* Scope::GetClosureScope() {
return scope->AsDeclarationScope();
}
+ModuleScope* Scope::GetModuleScope() {
+ Scope* scope = this;
+ DCHECK(!scope->is_script_scope());
+ while (!scope->is_module_scope()) {
+ scope = scope->outer_scope();
+ DCHECK_NOT_NULL(scope);
+ }
+ return scope->AsModuleScope();
+}
+
DeclarationScope* Scope::GetReceiverScope() {
Scope* scope = this;
while (!scope->is_script_scope() &&
@@ -942,18 +1220,17 @@ DeclarationScope* Scope::GetReceiverScope() {
return scope->AsDeclarationScope();
}
-
-
-Handle<ScopeInfo> Scope::GetScopeInfo(Isolate* isolate) {
- if (scope_info_.is_null()) {
- scope_info_ = ScopeInfo::Create(isolate, zone(), this);
+Scope* Scope::GetOuterScopeWithContext() {
+ Scope* scope = outer_scope_;
+ while (scope && !scope->NeedsContext()) {
+ scope = scope->outer_scope();
}
- return scope_info_;
+ return scope;
}
Handle<StringSet> DeclarationScope::CollectNonLocals(
ParseInfo* info, Handle<StringSet> non_locals) {
- VariableProxy* free_variables = FetchFreeVariables(this, info);
+ VariableProxy* free_variables = FetchFreeVariables(this, true, info);
for (VariableProxy* proxy = free_variables; proxy != nullptr;
proxy = proxy->next_unresolved()) {
non_locals = StringSet::Add(non_locals, proxy->name());
@@ -961,38 +1238,73 @@ Handle<StringSet> DeclarationScope::CollectNonLocals(
return non_locals;
}
-void DeclarationScope::AnalyzePartially(DeclarationScope* migrate_to,
- AstNodeFactory* ast_node_factory) {
- // Gather info from inner scopes.
- PropagateScopeInfo();
+void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
+ bool aborted) {
+ DCHECK(is_function_scope());
- // Try to resolve unresolved variables for this Scope and migrate those which
- // cannot be resolved inside. It doesn't make sense to try to resolve them in
- // the outer Scopes here, because they are incomplete.
- for (VariableProxy* proxy = FetchFreeVariables(this); proxy != nullptr;
- proxy = proxy->next_unresolved()) {
- DCHECK(!proxy->is_resolved());
- VariableProxy* copy = ast_node_factory->CopyVariableProxy(proxy);
- migrate_to->AddUnresolved(copy);
+ // Reset all non-trivial members.
+ decls_.Rewind(0);
+ locals_.Rewind(0);
+ sloppy_block_function_map_.Clear();
+ variables_.Clear();
+ // Make sure we won't walk the scope tree from here on.
+ inner_scope_ = nullptr;
+ unresolved_ = nullptr;
+
+ // TODO(verwaest): We should properly preparse the parameters (no declarations
+ // should be created), and reparse on abort.
+ if (aborted) {
+ if (!IsArrowFunction(function_kind_)) {
+ DeclareDefaultFunctionVariables(ast_value_factory);
+ }
+ // Recreate declarations for parameters.
+ for (int i = 0; i < params_.length(); i++) {
+ Variable* var = params_[i];
+ if (var->mode() == TEMPORARY) {
+ locals_.Add(var, zone());
+ } else if (variables_.Lookup(var->raw_name()) == nullptr) {
+ variables_.Add(zone(), var);
+ locals_.Add(var, zone());
+ }
+ }
+ } else {
+ params_.Rewind(0);
}
- // Push scope data up to migrate_to. Note that migrate_to and this Scope
- // describe the same Scope, just in different Zones.
- PropagateUsageFlagsToScope(migrate_to);
- if (scope_uses_super_property_) migrate_to->scope_uses_super_property_ = true;
- if (inner_scope_calls_eval_) migrate_to->inner_scope_calls_eval_ = true;
+#ifdef DEBUG
+ needs_migration_ = false;
+#endif
+
+ is_lazily_parsed_ = !aborted;
+}
+
+void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
DCHECK(!force_eager_compilation_);
- migrate_to->set_start_position(start_position_);
- migrate_to->set_end_position(end_position_);
- migrate_to->set_language_mode(language_mode());
- migrate_to->arity_ = arity_;
- migrate_to->force_context_allocation_ = force_context_allocation_;
- outer_scope_->RemoveInnerScope(this);
- DCHECK_EQ(outer_scope_, migrate_to->outer_scope_);
- DCHECK_EQ(outer_scope_->zone(), migrate_to->zone());
- DCHECK_EQ(NeedsHomeObject(), migrate_to->NeedsHomeObject());
- DCHECK_EQ(asm_function_, migrate_to->asm_function_);
- DCHECK_EQ(arguments() != nullptr, migrate_to->arguments() != nullptr);
+ VariableProxy* unresolved = nullptr;
+
+ if (!outer_scope_->is_script_scope()) {
+ // Try to resolve unresolved variables for this Scope and migrate those
+ // which cannot be resolved inside. It doesn't make sense to try to resolve
+ // them in the outer Scopes here, because they are incomplete.
+ for (VariableProxy* proxy =
+ FetchFreeVariables(this, !FLAG_lazy_inner_functions);
+ proxy != nullptr; proxy = proxy->next_unresolved()) {
+ DCHECK(!proxy->is_resolved());
+ VariableProxy* copy = ast_node_factory->CopyVariableProxy(proxy);
+ copy->set_next_unresolved(unresolved);
+ unresolved = copy;
+ }
+
+ // Clear arguments_ if unused. This is used as a signal for optimization.
+ if (arguments_ != nullptr &&
+ !(MustAllocate(arguments_) && !has_arguments_parameter_)) {
+ arguments_ = nullptr;
+ }
+ }
+
+ ResetAfterPreparsing(ast_node_factory->ast_value_factory(), false);
+
+ unresolved_ = unresolved;
}
#ifdef DEBUG
@@ -1040,9 +1352,6 @@ static void PrintLocation(Variable* var) {
case VariableLocation::CONTEXT:
PrintF("context[%d]", var->index());
break;
- case VariableLocation::GLOBAL:
- PrintF("global[%d]", var->index());
- break;
case VariableLocation::LOOKUP:
PrintF("lookup");
break;
@@ -1055,7 +1364,7 @@ static void PrintLocation(Variable* var) {
static void PrintVar(int indent, Variable* var) {
if (var->is_used() || !var->IsUnallocated()) {
- Indent(indent, Variable::Mode2String(var->mode()));
+ Indent(indent, VariableMode2String(var->mode()));
PrintF(" ");
if (var->raw_name()->IsEmpty())
PrintF(".%p", reinterpret_cast<void*>(var));
@@ -1077,14 +1386,16 @@ static void PrintVar(int indent, Variable* var) {
}
}
-
-static void PrintMap(int indent, VariableMap* map) {
- for (VariableMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) {
+static void PrintMap(int indent, VariableMap* map, bool locals) {
+ for (VariableMap::Entry* p = map->Start(); p != nullptr; p = map->Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->value);
- if (var == NULL) {
- Indent(indent, "<?>\n");
- } else {
- PrintVar(indent, var);
+ bool local = !IsDynamicVariableMode(var->mode());
+ if (locals ? local : !local) {
+ if (var == nullptr) {
+ Indent(indent, "<?>\n");
+ } else {
+ PrintVar(indent, var);
+ }
}
}
}
@@ -1143,14 +1454,14 @@ void Scope::Print(int n) {
Indent(n1, "// scope uses 'super' property\n");
}
if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
+ if (is_lazily_parsed_) Indent(n1, "// lazily parsed\n");
if (num_stack_slots_ > 0) {
Indent(n1, "// ");
PrintF("%d stack slots\n", num_stack_slots_);
}
if (num_heap_slots_ > 0) {
Indent(n1, "// ");
- PrintF("%d heap slots (including %d global slots)\n", num_heap_slots_,
- num_global_slots_);
+ PrintF("%d heap slots\n", num_heap_slots_);
}
// Print locals.
@@ -1159,28 +1470,12 @@ void Scope::Print(int n) {
PrintVar(n1, function);
}
- if (is_declaration_scope()) {
- bool printed_header = false;
- ZoneList<Variable*>* temps = AsDeclarationScope()->temps();
- for (int i = 0; i < temps->length(); i++) {
- if (!printed_header) {
- printed_header = true;
- Indent(n1, "// temporary vars:\n");
- }
- PrintVar(n1, (*temps)[i]);
- }
- }
-
if (variables_.Start() != NULL) {
Indent(n1, "// local vars:\n");
- PrintMap(n1, &variables_);
- }
+ PrintMap(n1, &variables_, true);
- if (dynamics_ != NULL) {
Indent(n1, "// dynamic vars:\n");
- PrintMap(n1, dynamics_->GetMap(DYNAMIC));
- PrintMap(n1, dynamics_->GetMap(DYNAMIC_LOCAL));
- PrintMap(n1, dynamics_->GetMap(DYNAMIC_GLOBAL));
+ PrintMap(n1, &variables_, false);
}
// Print inner scopes (disable by providing negative n).
@@ -1208,34 +1503,26 @@ void Scope::CheckScopePositions() {
}
void Scope::CheckZones() {
+ DCHECK(!needs_migration_);
for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
CHECK_EQ(scope->zone(), zone());
+ scope->CheckZones();
}
}
#endif // DEBUG
Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) {
- if (dynamics_ == NULL) dynamics_ = new (zone()) DynamicScopePart(zone());
- VariableMap* map = dynamics_->GetMap(mode);
- Variable* var = map->Lookup(name);
- if (var == NULL) {
- // Declare a new non-local.
- DCHECK(!IsLexicalVariableMode(mode));
- var = map->Declare(zone(), NULL, name, mode, Variable::NORMAL,
- kCreatedInitialized);
- // Allocate it by giving it a dynamic lookup.
- var->AllocateTo(VariableLocation::LOOKUP, -1);
- }
+ // Declare a new non-local.
+ DCHECK(IsDynamicVariableMode(mode));
+ Variable* var = variables_.Declare(zone(), NULL, name, mode, NORMAL_VARIABLE,
+ kCreatedInitialized);
+ // Allocate it by giving it a dynamic lookup.
+ var->AllocateTo(VariableLocation::LOOKUP, -1);
return var;
}
-Variable* Scope::LookupRecursive(VariableProxy* proxy,
- BindingKind* binding_kind,
- AstNodeFactory* factory,
- Scope* outer_scope_end) {
+Variable* Scope::LookupRecursive(VariableProxy* proxy, Scope* outer_scope_end) {
DCHECK_NE(outer_scope_end, this);
- DCHECK_NOT_NULL(binding_kind);
- DCHECK_EQ(UNBOUND, *binding_kind);
// Short-cut: whenever we find a debug-evaluate scope, just look everything up
// dynamically. Debug-evaluate doesn't properly create scope info for the
// lookups it does. It may not have a valid 'this' declaration, and anything
@@ -1243,10 +1530,7 @@ Variable* Scope::LookupRecursive(VariableProxy* proxy,
// variables.
// TODO(yangguo): Remove once debug-evaluate creates proper ScopeInfo for the
// scopes in which it's evaluating.
- if (is_debug_evaluate_scope_) {
- *binding_kind = DYNAMIC_LOOKUP;
- return nullptr;
- }
+ if (is_debug_evaluate_scope_) return NonLocal(proxy->raw_name(), DYNAMIC);
// Try to find the variable in this scope.
Variable* var = LookupLocal(proxy->raw_name());
@@ -1254,54 +1538,49 @@ Variable* Scope::LookupRecursive(VariableProxy* proxy,
// We found a variable and we are done. (Even if there is an 'eval' in this
// scope which introduces the same variable again, the resulting variable
// remains the same.)
- if (var != nullptr) {
- *binding_kind = BOUND;
- return var;
- }
-
- // We did not find a variable locally. Check against the function variable, if
- // any.
- if (is_function_scope()) {
- var = AsDeclarationScope()->LookupFunctionVar(proxy->raw_name());
- if (var != nullptr) {
- *binding_kind = calls_sloppy_eval() ? BOUND_EVAL_SHADOWED : BOUND;
- return var;
- }
- }
-
- if (outer_scope_ != outer_scope_end) {
- var = outer_scope_->LookupRecursive(proxy, binding_kind, factory,
- outer_scope_end);
- if (*binding_kind == BOUND && is_function_scope()) {
+ if (var != nullptr) return var;
+
+ if (outer_scope_ == outer_scope_end) {
+ // We may just be trying to find all free variables. In that case, don't
+ // declare them in the outer scope.
+ if (!is_script_scope()) return nullptr;
+ // No binding has been found. Declare a variable on the global object.
+ return AsDeclarationScope()->DeclareDynamicGlobal(proxy->raw_name(),
+ NORMAL_VARIABLE);
+ }
+
+ DCHECK(!is_script_scope());
+
+ var = outer_scope_->LookupRecursive(proxy, outer_scope_end);
+
+ // The variable could not be resolved statically.
+ if (var == nullptr) return var;
+
+ if (is_function_scope() && !var->is_dynamic()) {
+ var->ForceContextAllocation();
+ }
+ // "this" can't be shadowed by "eval"-introduced bindings or by "with"
+ // scopes.
+ // TODO(wingo): There are other variables in this category; add them.
+ if (var->is_this()) return var;
+
+ if (is_with_scope()) {
+ // The current scope is a with scope, so the variable binding can not be
+ // statically resolved. However, note that it was necessary to do a lookup
+ // in the outer scope anyway, because if a binding exists in an outer
+ // scope, the associated variable has to be marked as potentially being
+ // accessed from inside of an inner with scope (the property may not be in
+ // the 'with' object).
+ if (!var->is_dynamic() && var->IsUnallocated()) {
+ DCHECK(!already_resolved_);
+ var->set_is_used();
var->ForceContextAllocation();
+ if (proxy->is_assigned()) var->set_maybe_assigned();
}
- // "this" can't be shadowed by "eval"-introduced bindings or by "with"
- // scopes.
- // TODO(wingo): There are other variables in this category; add them.
- if (var != nullptr && var->is_this()) return var;
-
- if (is_with_scope()) {
- // The current scope is a with scope, so the variable binding can not be
- // statically resolved. However, note that it was necessary to do a lookup
- // in the outer scope anyway, because if a binding exists in an outer
- // scope, the associated variable has to be marked as potentially being
- // accessed from inside of an inner with scope (the property may not be in
- // the 'with' object).
- if (var != nullptr && var->IsUnallocated()) {
- DCHECK(!already_resolved_);
- var->set_is_used();
- var->ForceContextAllocation();
- if (proxy->is_assigned()) var->set_maybe_assigned();
- }
- *binding_kind = DYNAMIC_LOOKUP;
- return nullptr;
- }
- } else {
- DCHECK(!is_with_scope());
- DCHECK(is_function_scope() || is_script_scope() || is_eval_scope());
+ return NonLocal(proxy->raw_name(), DYNAMIC);
}
- if (calls_sloppy_eval() && is_declaration_scope() && !is_script_scope()) {
+ if (calls_sloppy_eval() && is_declaration_scope()) {
// A variable binding may have been found in an outer scope, but the current
// scope makes a sloppy 'eval' call, so the found variable may not be the
// correct one (the 'eval' may introduce a binding with the same name). In
@@ -1309,40 +1588,58 @@ Variable* Scope::LookupRecursive(VariableProxy* proxy,
// scopes that can host var bindings (declaration scopes) need be considered
// here (this excludes block and catch scopes), and variable lookups at
// script scope are always dynamic.
- if (*binding_kind == BOUND) {
- *binding_kind = BOUND_EVAL_SHADOWED;
- } else if (*binding_kind == UNBOUND) {
- *binding_kind = UNBOUND_EVAL_SHADOWED;
+ if (var->IsGlobalObjectProperty()) {
+ return NonLocal(proxy->raw_name(), DYNAMIC_GLOBAL);
}
+
+ if (var->is_dynamic()) return var;
+
+ Variable* invalidated = var;
+ var = NonLocal(proxy->raw_name(), DYNAMIC_LOCAL);
+ var->set_local_if_not_shadowed(invalidated);
}
return var;
}
-void Scope::ResolveVariable(ParseInfo* info, VariableProxy* proxy,
- AstNodeFactory* factory) {
+void Scope::ResolveVariable(ParseInfo* info, VariableProxy* proxy) {
DCHECK(info->script_scope()->is_script_scope());
-
- // If the proxy is already resolved there's nothing to do
- // (functions and consts may be resolved by the parser).
- if (proxy->is_resolved()) return;
-
- // Otherwise, try to resolve the variable.
- BindingKind binding_kind = UNBOUND;
- Variable* var = LookupRecursive(proxy, &binding_kind, factory);
-
- ResolveTo(info, binding_kind, proxy, var);
+ DCHECK(!proxy->is_resolved());
+ Variable* var = LookupRecursive(proxy, nullptr);
+ ResolveTo(info, proxy, var);
+
+ if (FLAG_lazy_inner_functions) {
+ if (info != nullptr && info->is_native()) return;
+ // Pessimistically force context allocation for all variables to which inner
+ // scope variables could potentially resolve to.
+ Scope* scope = GetClosureScope()->outer_scope_;
+ while (scope != nullptr && scope->scope_info_.is_null()) {
+ var = scope->LookupLocal(proxy->raw_name());
+ if (var != nullptr) {
+ // Since we don't lazy parse inner arrow functions, inner functions
+ // cannot refer to the outer "this".
+ if (!var->is_dynamic() && !var->is_this() &&
+ !var->has_forced_context_allocation()) {
+ var->ForceContextAllocation();
+ var->set_is_used();
+ // We don't know what the (potentially lazy parsed) inner function
+ // does with the variable; pessimistically assume that it's assigned.
+ var->set_maybe_assigned();
+ }
+ }
+ scope = scope->outer_scope_;
+ }
+ }
}
-void Scope::ResolveTo(ParseInfo* info, BindingKind binding_kind,
- VariableProxy* proxy, Variable* var) {
+void Scope::ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var) {
#ifdef DEBUG
if (info->script_is_native()) {
// To avoid polluting the global object in native scripts
// - Variables must not be allocated to the global scope.
CHECK_NOT_NULL(outer_scope());
// - Variables must be bound locally or unallocated.
- if (BOUND != binding_kind) {
+ if (var->IsGlobalObjectProperty()) {
// The following variable name may be minified. If so, disable
// minification in js2c.py for better output.
Handle<String> name = proxy->raw_name()->string();
@@ -1357,85 +1654,44 @@ void Scope::ResolveTo(ParseInfo* info, BindingKind binding_kind,
}
#endif
- switch (binding_kind) {
- case BOUND:
- break;
-
- case BOUND_EVAL_SHADOWED:
- // We either found a variable binding that might be shadowed by eval or
- // gave up on it (e.g. by encountering a local with the same in the outer
- // scope which was not promoted to a context, this can happen if we use
- // debugger to evaluate arbitrary expressions at a break point).
- if (var->IsGlobalObjectProperty()) {
- var = NonLocal(proxy->raw_name(), DYNAMIC_GLOBAL);
- } else if (var->is_dynamic()) {
- var = NonLocal(proxy->raw_name(), DYNAMIC);
- } else {
- Variable* invalidated = var;
- var = NonLocal(proxy->raw_name(), DYNAMIC_LOCAL);
- var->set_local_if_not_shadowed(invalidated);
- }
- break;
-
- case UNBOUND:
- // No binding has been found. Declare a variable on the global object.
- var = info->script_scope()->DeclareDynamicGlobal(proxy->raw_name(),
- Variable::NORMAL);
- break;
-
- case UNBOUND_EVAL_SHADOWED:
- // No binding has been found. But some scope makes a sloppy 'eval' call.
- var = NonLocal(proxy->raw_name(), DYNAMIC_GLOBAL);
- break;
-
- case DYNAMIC_LOOKUP:
- // The variable could not be resolved statically.
- var = NonLocal(proxy->raw_name(), DYNAMIC);
- break;
- }
-
- DCHECK(var != NULL);
+ DCHECK_NOT_NULL(var);
if (proxy->is_assigned()) var->set_maybe_assigned();
-
proxy->BindTo(var);
}
-void Scope::ResolveVariablesRecursively(ParseInfo* info,
- AstNodeFactory* factory) {
+void Scope::ResolveVariablesRecursively(ParseInfo* info) {
DCHECK(info->script_scope()->is_script_scope());
// Resolve unresolved variables for this scope.
for (VariableProxy* proxy = unresolved_; proxy != nullptr;
proxy = proxy->next_unresolved()) {
- ResolveVariable(info, proxy, factory);
+ ResolveVariable(info, proxy);
}
// Resolve unresolved variables for inner scopes.
for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
- scope->ResolveVariablesRecursively(info, factory);
+ scope->ResolveVariablesRecursively(info);
}
}
VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope,
- ParseInfo* info,
+ bool try_to_resolve, ParseInfo* info,
VariableProxy* stack) {
for (VariableProxy *proxy = unresolved_, *next = nullptr; proxy != nullptr;
proxy = next) {
next = proxy->next_unresolved();
- if (proxy->is_resolved()) continue;
- // Note that we pass nullptr as AstNodeFactory: this phase should not create
- // any new AstNodes, since none of the Scopes involved are backed up by
- // ScopeInfo.
- BindingKind binding_kind = UNBOUND;
- Variable* var = LookupRecursive(proxy, &binding_kind, nullptr,
- max_outer_scope->outer_scope());
+ DCHECK(!proxy->is_resolved());
+ Variable* var = nullptr;
+ if (try_to_resolve) {
+ var = LookupRecursive(proxy, max_outer_scope->outer_scope());
+ }
if (var == nullptr) {
proxy->set_next_unresolved(stack);
stack = proxy;
} else if (info != nullptr) {
- DCHECK_NE(UNBOUND, binding_kind);
- DCHECK_NE(UNBOUND_EVAL_SHADOWED, binding_kind);
- ResolveTo(info, binding_kind, proxy, var);
+ ResolveTo(info, proxy, var);
+ } else {
+ var->set_is_used();
}
}
@@ -1443,22 +1699,13 @@ VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope,
unresolved_ = nullptr;
for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
- stack = scope->FetchFreeVariables(max_outer_scope, info, stack);
+ stack =
+ scope->FetchFreeVariables(max_outer_scope, try_to_resolve, info, stack);
}
return stack;
}
-void Scope::PropagateScopeInfo() {
- for (Scope* inner = inner_scope_; inner != nullptr; inner = inner->sibling_) {
- inner->PropagateScopeInfo();
- if (IsAsmModule() && inner->is_function_scope()) {
- inner->AsDeclarationScope()->set_asm_function();
- }
- }
-}
-
-
bool Scope::MustAllocate(Variable* var) {
DCHECK(var->location() != VariableLocation::MODULE);
// Give var a read/write use if there is a chance it might be accessed
@@ -1511,8 +1758,8 @@ void DeclarationScope::AllocateParameterLocals() {
bool uses_sloppy_arguments = false;
- // Functions have 'arguments' declared implicitly in all non arrow functions.
if (arguments_ != nullptr) {
+ DCHECK(!is_arrow_scope());
// 'arguments' is used. Unless there is also a parameter called
// 'arguments', we must be conservative and allocate all parameters to
// the context assuming they will be captured by the arguments object.
@@ -1533,21 +1780,18 @@ void DeclarationScope::AllocateParameterLocals() {
// allocate the arguments object by nulling out arguments_.
arguments_ = nullptr;
}
-
- } else {
- DCHECK(is_arrow_scope());
}
// The same parameter may occur multiple times in the parameters_ list.
// If it does, and if it is not copied into the context object, it must
// receive the highest parameter index for that parameter; thus iteration
// order is relevant!
- for (int i = params_.length() - 1; i >= 0; --i) {
- if (i == rest_index_) continue;
+ for (int i = num_parameters() - 1; i >= 0; --i) {
Variable* var = params_[i];
-
- DCHECK(var->scope() == this);
+ DCHECK(!has_rest_ || var != rest_parameter());
+ DCHECK_EQ(this, var->scope());
if (uses_sloppy_arguments) {
+ var->set_is_used();
var->ForceContextAllocation();
}
AllocateParameter(var, i);
@@ -1567,8 +1811,6 @@ void DeclarationScope::AllocateParameter(Variable* var, int index) {
var->AllocateTo(VariableLocation::PARAMETER, index);
}
}
- } else {
- DCHECK(!var->IsGlobalSlot());
}
}
@@ -1590,38 +1832,9 @@ void Scope::AllocateNonParameterLocal(Variable* var) {
}
}
-void Scope::AllocateDeclaredGlobal(Variable* var) {
- DCHECK(var->scope() == this);
- if (var->IsUnallocated()) {
- if (var->IsStaticGlobalObjectProperty()) {
- DCHECK_EQ(-1, var->index());
- DCHECK(var->name()->IsString());
- var->AllocateTo(VariableLocation::GLOBAL, num_heap_slots_++);
- num_global_slots_++;
- } else {
- // There must be only DYNAMIC_GLOBAL in the script scope.
- DCHECK(!is_script_scope() || DYNAMIC_GLOBAL == var->mode());
- }
- }
-}
-
void Scope::AllocateNonParameterLocalsAndDeclaredGlobals() {
- // All variables that have no rewrite yet are non-parameter locals.
- if (is_declaration_scope()) {
- ZoneList<Variable*>* temps = AsDeclarationScope()->temps();
- for (int i = 0; i < temps->length(); i++) {
- AllocateNonParameterLocal((*temps)[i]);
- }
- }
-
- for (int i = 0; i < ordered_variables_.length(); i++) {
- AllocateNonParameterLocal(ordered_variables_[i]);
- }
-
- if (FLAG_global_var_shortcuts) {
- for (int i = 0; i < ordered_variables_.length(); i++) {
- AllocateDeclaredGlobal(ordered_variables_[i]);
- }
+ for (int i = 0; i < locals_.length(); i++) {
+ AllocateNonParameterLocal(locals_[i]);
}
if (is_declaration_scope()) {
@@ -1638,8 +1851,8 @@ void DeclarationScope::AllocateLocals() {
AllocateNonParameterLocal(function_);
}
- DCHECK(!has_rest_parameter() || !MustAllocate(params_[rest_index_]) ||
- !params_[rest_index_]->IsUnallocated());
+ DCHECK(!has_rest_ || !MustAllocate(rest_parameter()) ||
+ !rest_parameter()->IsUnallocated());
if (new_target_ != nullptr && !MustAllocate(new_target_)) {
new_target_ = nullptr;
@@ -1651,23 +1864,23 @@ void DeclarationScope::AllocateLocals() {
}
void ModuleScope::AllocateModuleVariables() {
- for (auto it = module()->regular_imports().begin();
- it != module()->regular_imports().end(); ++it) {
- Variable* var = LookupLocal(it->second->local_name);
+ for (const auto& it : module()->regular_imports()) {
+ Variable* var = LookupLocal(it.first);
// TODO(neis): Use a meaningful index.
var->AllocateTo(VariableLocation::MODULE, 42);
}
- for (auto entry : module()->exports()) {
- if (entry->local_name == nullptr) continue;
- Variable* var = LookupLocal(entry->local_name);
- var->AllocateTo(VariableLocation::MODULE, 42);
+ for (const auto& it : module()->regular_exports()) {
+ Variable* var = LookupLocal(it.first);
+ var->AllocateTo(VariableLocation::MODULE, 0);
}
}
void Scope::AllocateVariablesRecursively() {
DCHECK(!already_resolved_);
DCHECK_EQ(0, num_stack_slots_);
+ // Don't allocate variables of preparsed scopes.
+ if (is_lazily_parsed_) return;
// Allocate variables for inner scopes.
for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
@@ -1708,6 +1921,23 @@ void Scope::AllocateVariablesRecursively() {
DCHECK(num_heap_slots_ == 0 || num_heap_slots_ >= Context::MIN_CONTEXT_SLOTS);
}
+void Scope::AllocateScopeInfosRecursively(Isolate* isolate, AnalyzeMode mode,
+ MaybeHandle<ScopeInfo> outer_scope) {
+ DCHECK(scope_info_.is_null());
+ if (mode == AnalyzeMode::kDebugger || NeedsScopeInfo()) {
+ scope_info_ = ScopeInfo::Create(isolate, zone(), this, outer_scope);
+ }
+
+ // The ScopeInfo chain should mirror the context chain, so we only link to
+ // the next outer scope that needs a context.
+ MaybeHandle<ScopeInfo> next_outer_scope = outer_scope;
+ if (NeedsContext()) next_outer_scope = scope_info_;
+
+ // Allocate ScopeInfos for inner scopes.
+ for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
+ scope->AllocateScopeInfosRecursively(isolate, mode, next_outer_scope);
+ }
+}
int Scope::StackLocalCount() const {
Variable* function =
@@ -1723,12 +1953,9 @@ int Scope::ContextLocalCount() const {
is_function_scope() ? AsDeclarationScope()->function_var() : nullptr;
bool is_function_var_in_context =
function != nullptr && function->IsContextSlot();
- return num_heap_slots() - Context::MIN_CONTEXT_SLOTS - num_global_slots() -
+ return num_heap_slots() - Context::MIN_CONTEXT_SLOTS -
(is_function_var_in_context ? 1 : 0);
}
-
-int Scope::ContextGlobalCount() const { return num_global_slots(); }
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index 8c00927421..0acff8ac32 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -5,15 +5,22 @@
#ifndef V8_AST_SCOPES_H_
#define V8_AST_SCOPES_H_
-#include "src/ast/ast.h"
#include "src/base/hashmap.h"
#include "src/globals.h"
-#include "src/zone.h"
+#include "src/objects.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
+class AstNodeFactory;
+class AstValueFactory;
+class AstRawString;
+class Declaration;
class ParseInfo;
+class SloppyBlockFunctionStatement;
+class StringSet;
+class VariableProxy;
// A hash map to support fast variable declaration and lookup.
class VariableMap: public ZoneHashMap {
@@ -21,34 +28,14 @@ class VariableMap: public ZoneHashMap {
explicit VariableMap(Zone* zone);
Variable* Declare(Zone* zone, Scope* scope, const AstRawString* name,
- VariableMode mode, Variable::Kind kind,
+ VariableMode mode, VariableKind kind,
InitializationFlag initialization_flag,
MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
bool* added = nullptr);
Variable* Lookup(const AstRawString* name);
-};
-
-
-// The dynamic scope part holds hash maps for the variables that will
-// be looked up dynamically from within eval and with scopes. The objects
-// are allocated on-demand from Scope::NonLocal to avoid wasting memory
-// and setup time for scopes that don't need them.
-class DynamicScopePart : public ZoneObject {
- public:
- explicit DynamicScopePart(Zone* zone) {
- for (int i = 0; i < 3; i++)
- maps_[i] = new(zone->New(sizeof(VariableMap))) VariableMap(zone);
- }
-
- VariableMap* GetMap(VariableMode mode) {
- int index = mode - DYNAMIC;
- DCHECK(index >= 0 && index < 3);
- return maps_[index];
- }
-
- private:
- VariableMap *maps_[3];
+ void Remove(Variable* var);
+ void Add(Zone* zone, Variable* var);
};
@@ -60,6 +47,7 @@ class SloppyBlockFunctionMap : public ZoneHashMap {
SloppyBlockFunctionStatement* statement);
};
+enum class AnalyzeMode { kRegular, kDebugger };
// Global invariants after AST construction: Each reference (i.e. identifier)
// to a JavaScript variable (including global properties) is represented by a
@@ -86,6 +74,7 @@ class Scope: public ZoneObject {
void SetScopeName(const AstRawString* scope_name) {
scope_name_ = scope_name;
}
+ void set_needs_migration() { needs_migration_ = true; }
#endif
// TODO(verwaest): Is this needed on Scope?
@@ -106,18 +95,14 @@ class Scope: public ZoneObject {
Scope* outer_scope_;
Scope* top_inner_scope_;
VariableProxy* top_unresolved_;
- int top_temp_;
+ int top_local_;
+ int top_decl_;
};
- // Compute top scope and allocate variables. For lazy compilation the top
- // scope only contains the single lazily compiled function, so this
- // doesn't re-allocate variables repeatedly.
- static void Analyze(ParseInfo* info);
-
- enum class DeserializationMode { kDeserializeOffHeap, kKeepScopeInfo };
+ enum class DeserializationMode { kIncludingVariables, kScopesOnly };
static Scope* DeserializeScopeChain(Isolate* isolate, Zone* zone,
- Context* context,
+ ScopeInfo* scope_info,
DeclarationScope* script_scope,
AstValueFactory* ast_value_factory,
DeserializationMode deserialization_mode);
@@ -127,6 +112,11 @@ class Scope: public ZoneObject {
// tree and its children are reparented.
Scope* FinalizeBlockScope();
+ bool HasBeenRemoved() const;
+
+ // Find the first scope that hasn't been removed.
+ Scope* GetUnremovedScope();
+
// Inserts outer_scope into this scope's scope chain (and removes this
// from the current outer_scope_'s inner scope list).
// Assumes outer_scope_ is non-null.
@@ -142,7 +132,13 @@ class Scope: public ZoneObject {
// Declarations
// Lookup a variable in this scope. Returns the variable or NULL if not found.
- Variable* LookupLocal(const AstRawString* name);
+ Variable* LookupLocal(const AstRawString* name) {
+ Variable* result = variables_.Lookup(name);
+ if (result != nullptr || scope_info_.is_null()) return result;
+ return LookupInScopeInfo(name);
+ }
+
+ Variable* LookupInScopeInfo(const AstRawString* name);
// Lookup a variable in this scope or outer scopes.
// Returns the variable or NULL if not found.
@@ -151,36 +147,28 @@ class Scope: public ZoneObject {
// Declare a local variable in this scope. If the variable has been
// declared before, the previously declared variable is returned.
Variable* DeclareLocal(const AstRawString* name, VariableMode mode,
- InitializationFlag init_flag, Variable::Kind kind,
+ InitializationFlag init_flag, VariableKind kind,
MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
+ Variable* DeclareVariable(Declaration* declaration, VariableMode mode,
+ InitializationFlag init,
+ bool allow_harmony_restrictive_generators,
+ bool* sloppy_mode_block_scope_function_redefinition,
+ bool* ok);
+
// Declarations list.
ZoneList<Declaration*>* declarations() { return &decls_; }
+ ZoneList<Variable*>* locals() { return &locals_; }
+
// Create a new unresolved variable.
VariableProxy* NewUnresolved(AstNodeFactory* factory,
const AstRawString* name,
int start_position = kNoSourcePosition,
int end_position = kNoSourcePosition,
- Variable::Kind kind = Variable::NORMAL) {
- // Note that we must not share the unresolved variables with
- // the same name because they may be removed selectively via
- // RemoveUnresolved().
- DCHECK(!already_resolved_);
- DCHECK_EQ(factory->zone(), zone());
- VariableProxy* proxy =
- factory->NewVariableProxy(name, kind, start_position, end_position);
- proxy->set_next_unresolved(unresolved_);
- unresolved_ = proxy;
- return proxy;
- }
+ VariableKind kind = NORMAL_VARIABLE);
- void AddUnresolved(VariableProxy* proxy) {
- DCHECK(!already_resolved_);
- DCHECK(!proxy->is_resolved());
- proxy->set_next_unresolved(unresolved_);
- unresolved_ = proxy;
- }
+ void AddUnresolved(VariableProxy* proxy);
// Remove a unresolved variable. During parsing, an unresolved variable
// may have been added optimistically, but then only the variable name
@@ -189,6 +177,7 @@ class Scope: public ZoneObject {
// allocated globally as a "ghost" variable. RemoveUnresolved removes
// such a variable again if it was added; otherwise this is a no-op.
bool RemoveUnresolved(VariableProxy* var);
+ bool RemoveUnresolved(const AstRawString* name);
// Creates a new temporary variable in this scope's TemporaryScope. The
// name is only used for printing and cannot be used to find the variable.
@@ -198,11 +187,6 @@ class Scope: public ZoneObject {
// TODO(verwaest): Move to DeclarationScope?
Variable* NewTemporary(const AstRawString* name);
- // Adds the specific declaration node to the list of declarations in
- // this scope. The declarations are processed as part of entering
- // the scope; see codegen.cc:ProcessDeclarations.
- void AddDeclaration(Declaration* declaration);
-
// ---------------------------------------------------------------------------
// Illegal redeclaration support.
@@ -223,10 +207,15 @@ class Scope: public ZoneObject {
// Scope-specific info.
// Inform the scope and outer scopes that the corresponding code contains an
- // eval call.
+ // eval call. We don't record eval calls from innner scopes in the outer most
+ // script scope, as we only see those when parsing eagerly. If we recorded the
+ // calls then, the outer most script scope would look different depending on
+ // whether we parsed eagerly or not which is undesirable.
void RecordEvalCall() {
scope_calls_eval_ = true;
- for (Scope* scope = this; scope != nullptr; scope = scope->outer_scope()) {
+ inner_scope_calls_eval_ = true;
+ for (Scope* scope = outer_scope(); scope && !scope->is_script_scope();
+ scope = scope->outer_scope()) {
scope->inner_scope_calls_eval_ = true;
}
}
@@ -353,24 +342,16 @@ class Scope: public ZoneObject {
// ---------------------------------------------------------------------------
// Variable allocation.
- // Collect stack and context allocated local variables in this scope. Note
- // that the function variable - if present - is not collected and should be
- // handled separately.
- void CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
- ZoneList<Variable*>* context_locals,
- ZoneList<Variable*>* context_globals);
-
// Result of variable allocation.
int num_stack_slots() const { return num_stack_slots_; }
int num_heap_slots() const { return num_heap_slots_; }
- int num_global_slots() const { return num_global_slots_; }
int StackLocalCount() const;
int ContextLocalCount() const;
- int ContextGlobalCount() const;
- // Determine if we can parse a function literal in this scope lazily.
- bool AllowsLazyParsing() const;
+ // Determine if we can parse a function literal in this scope lazily without
+ // caring about the unresolved variables within.
+ bool AllowsLazyParsingWithoutUnresolvedVariables() const;
// The number of contexts between this and scope; zero if this == scope.
int ContextChainLength(Scope* scope) const;
@@ -398,10 +379,13 @@ class Scope: public ZoneObject {
// 'this' is bound, and what determines the function kind.
DeclarationScope* GetReceiverScope();
- // Creates a scope info if it doesn't already exist.
- Handle<ScopeInfo> GetScopeInfo(Isolate* isolate);
+ // Find the module scope, assuming there is one.
+ ModuleScope* GetModuleScope();
- // GetScopeInfo() must have been called once to create the ScopeInfo.
+ // Find the innermost outer scope that needs a context.
+ Scope* GetOuterScopeWithContext();
+
+ // Analyze() must have been called once to create the ScopeInfo.
Handle<ScopeInfo> scope_info() {
DCHECK(!scope_info_.is_null());
return scope_info_;
@@ -436,9 +420,11 @@ class Scope: public ZoneObject {
// Retrieve `IsSimpleParameterList` of current or outer function.
bool HasSimpleParameters();
void set_is_debug_evaluate_scope() { is_debug_evaluate_scope_ = true; }
+ bool is_debug_evaluate_scope() const { return is_debug_evaluate_scope_; }
+
+ bool is_lazily_parsed() const { return is_lazily_parsed_; }
protected:
- // Creates a script scope.
explicit Scope(Zone* zone);
void set_language_mode(LanguageMode language_mode) {
@@ -447,16 +433,32 @@ class Scope: public ZoneObject {
private:
Variable* Declare(Zone* zone, Scope* scope, const AstRawString* name,
- VariableMode mode, Variable::Kind kind,
+ VariableMode mode, VariableKind kind,
InitializationFlag initialization_flag,
MaybeAssignedFlag maybe_assigned_flag = kNotAssigned) {
bool added;
Variable* var =
variables_.Declare(zone, scope, name, mode, kind, initialization_flag,
maybe_assigned_flag, &added);
- if (added) ordered_variables_.Add(var, zone);
+ if (added) locals_.Add(var, zone);
return var;
}
+
+ // This method should only be invoked on scopes created during parsing (i.e.,
+ // not deserialized from a context). Also, since NeedsContext() is only
+ // returning a valid result after variables are resolved, NeedsScopeInfo()
+ // should also be invoked after resolution.
+ bool NeedsScopeInfo() const {
+ DCHECK(!already_resolved_);
+ // A lazily parsed scope doesn't contain enough information to create a
+ // ScopeInfo from it.
+ if (is_lazily_parsed_) return false;
+ // The debugger expects all functions to have scope infos.
+ // TODO(jochen|yangguo): Remove this requirement.
+ if (is_function_scope()) return true;
+ return NeedsContext();
+ }
+
Zone* zone_;
// Scope tree.
@@ -473,9 +475,7 @@ class Scope: public ZoneObject {
// In case of non-scopeinfo-backed scopes, this contains the variables of the
// map above in order of addition.
// TODO(verwaest): Thread through Variable.
- ZoneList<Variable*> ordered_variables_;
- // Variables that must be looked up dynamically.
- DynamicScopePart* dynamics_;
+ ZoneList<Variable*> locals_;
// Unresolved variables referred to from this scope. The proxies themselves
// form a linked list of all unresolved proxies.
VariableProxy* unresolved_;
@@ -490,7 +490,10 @@ class Scope: public ZoneObject {
// True if it doesn't need scope resolution (e.g., if the scope was
// constructed based on a serialized scope info or a catch context).
- bool already_resolved_ : 1;
+ bool already_resolved_;
+ // True if this scope may contain objects from a temp zone that needs to be
+ // fixed up.
+ bool needs_migration_;
#endif
// Source positions.
@@ -500,7 +503,6 @@ class Scope: public ZoneObject {
// Computed via AllocateVariables.
int num_stack_slots_;
int num_heap_slots_;
- int num_global_slots_;
// The scope type.
const ScopeType scope_type_;
@@ -525,79 +527,30 @@ class Scope: public ZoneObject {
// True if it holds 'var' declarations.
bool is_declaration_scope_ : 1;
+ bool is_lazily_parsed_ : 1;
+
// Create a non-local variable with a given name.
// These variables are looked up dynamically at runtime.
Variable* NonLocal(const AstRawString* name, VariableMode mode);
// Variable resolution.
- // Possible results of a recursive variable lookup telling if and how a
- // variable is bound. These are returned in the output parameter *binding_kind
- // of the LookupRecursive function.
- enum BindingKind {
- // The variable reference could be statically resolved to a variable binding
- // which is returned. There is no 'with' statement between the reference and
- // the binding and no scope between the reference scope (inclusive) and
- // binding scope (exclusive) makes a sloppy 'eval' call.
- BOUND,
-
- // The variable reference could be statically resolved to a variable binding
- // which is returned. There is no 'with' statement between the reference and
- // the binding, but some scope between the reference scope (inclusive) and
- // binding scope (exclusive) makes a sloppy 'eval' call, that might
- // possibly introduce variable bindings shadowing the found one. Thus the
- // found variable binding is just a guess.
- BOUND_EVAL_SHADOWED,
-
- // The variable reference could not be statically resolved to any binding
- // and thus should be considered referencing a global variable. NULL is
- // returned. The variable reference is not inside any 'with' statement and
- // no scope between the reference scope (inclusive) and script scope
- // (exclusive) makes a sloppy 'eval' call.
- UNBOUND,
-
- // The variable reference could not be statically resolved to any binding
- // NULL is returned. The variable reference is not inside any 'with'
- // statement, but some scope between the reference scope (inclusive) and
- // script scope (exclusive) makes a sloppy 'eval' call, that might
- // possibly introduce a variable binding. Thus the reference should be
- // considered referencing a global variable unless it is shadowed by an
- // 'eval' introduced binding.
- UNBOUND_EVAL_SHADOWED,
-
- // The variable could not be statically resolved and needs to be looked up
- // dynamically. NULL is returned. There are two possible reasons:
- // * A 'with' statement has been encountered and there is no variable
- // binding for the name between the variable reference and the 'with'.
- // The variable potentially references a property of the 'with' object.
- // * The code is being executed as part of a call to 'eval' and the calling
- // context chain contains either a variable binding for the name or it
- // contains a 'with' context.
- DYNAMIC_LOOKUP
- };
-
// Lookup a variable reference given by name recursively starting with this
// scope, and stopping when reaching the outer_scope_end scope. If the code is
// executed because of a call to 'eval', the context parameter should be set
// to the calling context of 'eval'.
- Variable* LookupRecursive(VariableProxy* proxy, BindingKind* binding_kind,
- AstNodeFactory* factory,
- Scope* outer_scope_end = nullptr);
- void ResolveTo(ParseInfo* info, BindingKind binding_kind,
- VariableProxy* proxy, Variable* var);
- void ResolveVariable(ParseInfo* info, VariableProxy* proxy,
- AstNodeFactory* factory);
- void ResolveVariablesRecursively(ParseInfo* info, AstNodeFactory* factory);
+ Variable* LookupRecursive(VariableProxy* proxy, Scope* outer_scope_end);
+ void ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var);
+ void ResolveVariable(ParseInfo* info, VariableProxy* proxy);
+ void ResolveVariablesRecursively(ParseInfo* info);
// Finds free variables of this scope. This mutates the unresolved variables
// list along the way, so full resolution cannot be done afterwards.
// If a ParseInfo* is passed, non-free variables will be resolved.
VariableProxy* FetchFreeVariables(DeclarationScope* max_outer_scope,
+ bool try_to_resolve = true,
ParseInfo* info = nullptr,
VariableProxy* stack = nullptr);
- // Scope analysis.
- void PropagateScopeInfo();
-
// Predicates.
bool MustAllocate(Variable* var);
bool MustAllocateInContext(Variable* var);
@@ -610,15 +563,18 @@ class Scope: public ZoneObject {
void AllocateNonParameterLocalsAndDeclaredGlobals();
void AllocateVariablesRecursively();
+ void AllocateScopeInfosRecursively(Isolate* isolate, AnalyzeMode mode,
+ MaybeHandle<ScopeInfo> outer_scope);
+
// Construct a scope based on the scope info.
- Scope(Zone* zone, Scope* inner_scope, ScopeType type,
- Handle<ScopeInfo> scope_info);
+ Scope(Zone* zone, ScopeType type, Handle<ScopeInfo> scope_info);
// Construct a catch scope with a binding for the name.
- Scope(Zone* zone, Scope* inner_scope,
- const AstRawString* catch_variable_name);
+ Scope(Zone* zone, const AstRawString* catch_variable_name,
+ Handle<ScopeInfo> scope_info);
void AddInnerScope(Scope* inner_scope) {
+ DCHECK_EQ(!needs_migration_, inner_scope->zone() == zone());
inner_scope->sibling_ = inner_scope_;
inner_scope_ = inner_scope;
inner_scope->outer_scope_ = this;
@@ -641,9 +597,6 @@ class Scope: public ZoneObject {
void SetDefaults();
- void DeserializeScopeInfo(Isolate* isolate,
- AstValueFactory* ast_value_factory);
-
friend class DeclarationScope;
};
@@ -651,10 +604,10 @@ class DeclarationScope : public Scope {
public:
DeclarationScope(Zone* zone, Scope* outer_scope, ScopeType scope_type,
FunctionKind function_kind = kNormalFunction);
- DeclarationScope(Zone* zone, Scope* inner_scope, ScopeType scope_type,
+ DeclarationScope(Zone* zone, ScopeType scope_type,
Handle<ScopeInfo> scope_info);
// Creates a script scope.
- explicit DeclarationScope(Zone* zone);
+ DeclarationScope(Zone* zone, AstValueFactory* ast_value_factory);
bool IsDeclaredParameter(const AstRawString* name) {
// If IsSimpleParameterList is false, duplicate parameters are not allowed,
@@ -681,23 +634,29 @@ class DeclarationScope : public Scope {
IsClassConstructor(function_kind())));
}
+ void SetScriptScopeInfo(Handle<ScopeInfo> scope_info) {
+ DCHECK(is_script_scope());
+ DCHECK(scope_info_.is_null());
+ scope_info_ = scope_info;
+ }
+
bool asm_module() const { return asm_module_; }
- void set_asm_module() { asm_module_ = true; }
+ void set_asm_module();
bool asm_function() const { return asm_function_; }
void set_asm_function() { asm_module_ = true; }
void DeclareThis(AstValueFactory* ast_value_factory);
+ void DeclareArguments(AstValueFactory* ast_value_factory);
void DeclareDefaultFunctionVariables(AstValueFactory* ast_value_factory);
- // This lookup corresponds to a lookup in the "intermediate" scope sitting
- // between this scope and the outer scope. (ECMA-262, 3rd., requires that
- // the name of named function literal is kept in an intermediate scope
- // in between this scope and the next outer scope.)
- Variable* LookupFunctionVar(const AstRawString* name);
-
// Declare the function variable for a function literal. This variable
// is in an intermediate scope between this function scope and the the
// outer scope. Only possible for function scopes; at most one variable.
+ //
+ // This function needs to be called after all other variables have been
+ // declared in the scope. It will add a variable for {name} to {variables_};
+ // either the function variable itself, or a non-local in case the function
+ // calls sloppy eval.
Variable* DeclareFunctionVar(const AstRawString* name);
// Declare a parameter in this scope. When there are duplicated
@@ -712,7 +671,7 @@ class DeclarationScope : public Scope {
// scope) by a reference to an unresolved variable with no intervening
// with statements or eval calls.
Variable* DeclareDynamicGlobal(const AstRawString* name,
- Variable::Kind variable_kind);
+ VariableKind variable_kind);
// The variable corresponding to the 'this' value.
Variable* receiver() {
@@ -739,43 +698,36 @@ class DeclarationScope : public Scope {
}
// Parameters. The left-most parameter has index 0.
- // Only valid for function scopes.
+ // Only valid for function and module scopes.
Variable* parameter(int index) const {
- DCHECK(is_function_scope());
+ DCHECK(is_function_scope() || is_module_scope());
return params_[index];
}
// Returns the default function arity excluding default or rest parameters.
- int default_function_length() const { return arity_; }
-
- // Returns the number of formal parameters, up to but not including the
- // rest parameter index (if the function has rest parameters), i.e. it
- // says 2 for
- //
- // function foo(a, b) { ... }
- //
- // and
- //
- // function foo(a, b, ...c) { ... }
- //
- // but for
- //
- // function foo(a, b, c = 1) { ... }
- //
- // we return 3 here.
+ // This will be used to set the length of the function, by default.
+ // Class field initializers use this property to indicate the number of
+ // fields being initialized.
+ int arity() const { return arity_; }
+
+ // Normal code should not need to call this. Class field initializers use this
+ // property to indicate the number of fields being initialized.
+ void set_arity(int arity) { arity_ = arity; }
+
+ // Returns the number of formal parameters, excluding a possible rest
+ // parameter. Examples:
+ // function foo(a, b) {} ==> 2
+ // function foo(a, b, ...c) {} ==> 2
+ // function foo(a, b, c = 1) {} ==> 3
int num_parameters() const {
- return has_rest_parameter() ? params_.length() - 1 : params_.length();
+ return has_rest_ ? params_.length() - 1 : params_.length();
}
- // A function can have at most one rest parameter. Returns Variable* or NULL.
- Variable* rest_parameter(int* index) const {
- *index = rest_index_;
- if (rest_index_ < 0) return nullptr;
- return params_[rest_index_];
+ // The function's rest parameter (nullptr if there is none).
+ Variable* rest_parameter() const {
+ return has_rest_ ? params_[params_.length() - 1] : nullptr;
}
- bool has_rest_parameter() const { return rest_index_ >= 0; }
-
bool has_simple_parameters() const { return has_simple_parameters_; }
// TODO(caitp): manage this state in a better way. PreParser must be able to
@@ -803,44 +755,40 @@ class DeclarationScope : public Scope {
return this_function_;
}
- // Adds a temporary variable in this scope's TemporaryScope. This is for
- // adjusting the scope of temporaries used when desugaring parameter
+ // Adds a local variable in this scope's locals list. This is for adjusting
+ // the scope of temporaries and do-expression vars when desugaring parameter
// initializers.
- void AddTemporary(Variable* var) {
+ void AddLocal(Variable* var) {
DCHECK(!already_resolved_);
// Temporaries are only placed in ClosureScopes.
DCHECK_EQ(GetClosureScope(), this);
- temps_.Add(var, zone());
+ locals_.Add(var, zone());
}
- ZoneList<Variable*>* temps() { return &temps_; }
-
void DeclareSloppyBlockFunction(const AstRawString* name,
SloppyBlockFunctionStatement* statement) {
sloppy_block_function_map_.Declare(zone(), name, statement);
}
+ // Go through sloppy_block_function_map_ and hoist those (into this scope)
+ // which should be hoisted.
+ void HoistSloppyBlockFunctions(AstNodeFactory* factory);
+
SloppyBlockFunctionMap* sloppy_block_function_map() {
return &sloppy_block_function_map_;
}
- // Resolve and fill in the allocation information for all variables
- // in this scopes. Must be called *after* all scopes have been
- // processed (parsed) to ensure that unresolved variables can be
- // resolved properly.
- //
- // In the case of code compiled and run using 'eval', the context
- // parameter is the context in which eval was called. In all other
- // cases the context parameter is an empty handle.
- void AllocateVariables(ParseInfo* info, AstNodeFactory* factory);
+ // Compute top scope and allocate variables. For lazy compilation the top
+ // scope only contains the single lazily compiled function, so this
+ // doesn't re-allocate variables repeatedly.
+ static void Analyze(ParseInfo* info, AnalyzeMode mode);
// To be called during parsing. Do just enough scope analysis that we can
// discard the Scope for lazily compiled functions. In particular, this
// records variables which cannot be resolved inside the Scope (we don't yet
// know what they will resolve to since the outer Scopes are incomplete) and
// migrates them into migrate_to.
- void AnalyzePartially(DeclarationScope* migrate_to,
- AstNodeFactory* ast_node_factory);
+ void AnalyzePartially(AstNodeFactory* ast_node_factory);
Handle<StringSet> CollectNonLocals(ParseInfo* info,
Handle<StringSet> non_locals);
@@ -868,9 +816,21 @@ class DeclarationScope : public Scope {
void AllocateParameterLocals();
void AllocateReceiver();
+ void ResetAfterPreparsing(AstValueFactory* ast_value_factory, bool aborted);
+
private:
void AllocateParameter(Variable* var, int index);
+ // Resolve and fill in the allocation information for all variables
+ // in this scopes. Must be called *after* all scopes have been
+ // processed (parsed) to ensure that unresolved variables can be
+ // resolved properly.
+ //
+ // In the case of code compiled and run using 'eval', the context
+ // parameter is the context in which eval was called. In all other
+ // cases the context parameter is an empty handle.
+ void AllocateVariables(ParseInfo* info, AnalyzeMode mode);
+
void SetDefaults();
// If the scope is a function scope, this is the function kind.
@@ -882,6 +842,8 @@ class DeclarationScope : public Scope {
// This scope's outer context is an asm module.
bool asm_function_ : 1;
bool force_eager_compilation_ : 1;
+ // This function scope has a rest parameter.
+ bool has_rest_ : 1;
// This scope has a parameter called "arguments".
bool has_arguments_parameter_ : 1;
// This scope uses "super" property ('super.foo').
@@ -889,9 +851,6 @@ class DeclarationScope : public Scope {
// Info about the parameter list of a function.
int arity_;
- int rest_index_;
- // Compiler-allocated (user-invisible) temporaries.
- ZoneList<Variable*> temps_;
// Parameter list in source order.
ZoneList<Variable*> params_;
// Map of function names to lists of functions defined in sloppy blocks
@@ -910,7 +869,14 @@ class DeclarationScope : public Scope {
class ModuleScope final : public DeclarationScope {
public:
- ModuleScope(Zone* zone, DeclarationScope* script_scope,
+ ModuleScope(DeclarationScope* script_scope,
+ AstValueFactory* ast_value_factory);
+
+ // Deserialization.
+ // The generated ModuleDescriptor does not preserve all information. In
+ // particular, its module_requests map will be empty because we no longer need
+ // the map after parsing.
+ ModuleScope(Isolate* isolate, Handle<ScopeInfo> scope_info,
AstValueFactory* ast_value_factory);
ModuleDescriptor* module() const {
diff --git a/deps/v8/src/ast/variables.cc b/deps/v8/src/ast/variables.cc
index 0541f942f0..cc269cd0c7 100644
--- a/deps/v8/src/ast/variables.cc
+++ b/deps/v8/src/ast/variables.cc
@@ -13,36 +13,20 @@ namespace internal {
// ----------------------------------------------------------------------------
// Implementation Variable.
-const char* Variable::Mode2String(VariableMode mode) {
- switch (mode) {
- case VAR: return "VAR";
- case CONST_LEGACY: return "CONST_LEGACY";
- case LET: return "LET";
- case CONST: return "CONST";
- case DYNAMIC: return "DYNAMIC";
- case DYNAMIC_GLOBAL: return "DYNAMIC_GLOBAL";
- case DYNAMIC_LOCAL: return "DYNAMIC_LOCAL";
- case TEMPORARY: return "TEMPORARY";
- }
- UNREACHABLE();
- return NULL;
-}
-
Variable::Variable(Scope* scope, const AstRawString* name, VariableMode mode,
- Kind kind, InitializationFlag initialization_flag,
+ VariableKind kind, InitializationFlag initialization_flag,
MaybeAssignedFlag maybe_assigned_flag)
: scope_(scope),
name_(name),
- mode_(mode),
- kind_(kind),
- location_(VariableLocation::UNALLOCATED),
+ local_if_not_shadowed_(nullptr),
index_(-1),
initializer_position_(kNoSourcePosition),
- local_if_not_shadowed_(NULL),
- force_context_allocation_(false),
- is_used_(false),
- initialization_flag_(initialization_flag),
- maybe_assigned_(maybe_assigned_flag) {
+ bit_field_(MaybeAssignedFlagField::encode(maybe_assigned_flag) |
+ InitializationFlagField::encode(initialization_flag) |
+ VariableModeField::encode(mode) | IsUsedField::encode(false) |
+ ForceContextAllocationField::encode(false) |
+ LocationField::encode(VariableLocation::UNALLOCATED) |
+ VariableKindField::encode(kind)) {
// Var declared variables never need initialization.
DCHECK(!(mode == VAR && initialization_flag == kNeedsInitialization));
}
@@ -51,8 +35,8 @@ Variable::Variable(Scope* scope, const AstRawString* name, VariableMode mode,
bool Variable::IsGlobalObjectProperty() const {
// Temporaries are never global, they must always be allocated in the
// activation frame.
- return (IsDynamicVariableMode(mode_) ||
- (IsDeclaredVariableMode(mode_) && !IsLexicalVariableMode(mode_))) &&
+ return (IsDynamicVariableMode(mode()) ||
+ (IsDeclaredVariableMode(mode()) && !IsLexicalVariableMode(mode()))) &&
scope_ != NULL && scope_->is_script_scope();
}
@@ -60,17 +44,10 @@ bool Variable::IsGlobalObjectProperty() const {
bool Variable::IsStaticGlobalObjectProperty() const {
// Temporaries are never global, they must always be allocated in the
// activation frame.
- return (IsDeclaredVariableMode(mode_) && !IsLexicalVariableMode(mode_)) &&
+ return (IsDeclaredVariableMode(mode()) && !IsLexicalVariableMode(mode())) &&
scope_ != NULL && scope_->is_script_scope();
}
-int Variable::CompareIndex(Variable* const* v, Variable* const* w) {
- int x = (*v)->index();
- int y = (*w)->index();
- // Consider sorting them according to type as well?
- return x - y;
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/variables.h b/deps/v8/src/ast/variables.h
index f1f63b8a14..5bc7869646 100644
--- a/deps/v8/src/ast/variables.h
+++ b/deps/v8/src/ast/variables.h
@@ -6,7 +6,8 @@
#define V8_AST_VARIABLES_H_
#include "src/ast/ast-value-factory.h"
-#include "src/zone.h"
+#include "src/globals.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
@@ -17,15 +18,10 @@ namespace internal {
// after binding and variable allocation.
class Variable final : public ZoneObject {
public:
- enum Kind { NORMAL, FUNCTION, THIS, ARGUMENTS };
-
- Variable(Scope* scope, const AstRawString* name, VariableMode mode, Kind kind,
- InitializationFlag initialization_flag,
+ Variable(Scope* scope, const AstRawString* name, VariableMode mode,
+ VariableKind kind, InitializationFlag initialization_flag,
MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
- // Printing support
- static const char* Mode2String(VariableMode mode);
-
// The source code for an eval() call may refer to a variable that is
// in an outer scope about which we don't know anything (it may not
// be the script scope). scope() is NULL in that case. Currently the
@@ -38,51 +34,56 @@ class Variable final : public ZoneObject {
Handle<String> name() const { return name_->string(); }
const AstRawString* raw_name() const { return name_; }
- VariableMode mode() const { return mode_; }
+ VariableMode mode() const { return VariableModeField::decode(bit_field_); }
bool has_forced_context_allocation() const {
- return force_context_allocation_;
+ return ForceContextAllocationField::decode(bit_field_);
}
void ForceContextAllocation() {
- DCHECK(IsUnallocated() || IsContextSlot());
- force_context_allocation_ = true;
+ DCHECK(IsUnallocated() || IsContextSlot() ||
+ location() == VariableLocation::MODULE);
+ bit_field_ = ForceContextAllocationField::update(bit_field_, true);
+ }
+ bool is_used() { return IsUsedField::decode(bit_field_); }
+ void set_is_used() { bit_field_ = IsUsedField::update(bit_field_, true); }
+ MaybeAssignedFlag maybe_assigned() const {
+ return MaybeAssignedFlagField::decode(bit_field_);
+ }
+ void set_maybe_assigned() {
+ bit_field_ = MaybeAssignedFlagField::update(bit_field_, kMaybeAssigned);
}
- bool is_used() { return is_used_; }
- void set_is_used() { is_used_ = true; }
- MaybeAssignedFlag maybe_assigned() const { return maybe_assigned_; }
- void set_maybe_assigned() { maybe_assigned_ = kMaybeAssigned; }
int initializer_position() { return initializer_position_; }
void set_initializer_position(int pos) { initializer_position_ = pos; }
bool IsUnallocated() const {
- return location_ == VariableLocation::UNALLOCATED;
+ return location() == VariableLocation::UNALLOCATED;
}
- bool IsParameter() const { return location_ == VariableLocation::PARAMETER; }
- bool IsStackLocal() const { return location_ == VariableLocation::LOCAL; }
+ bool IsParameter() const { return location() == VariableLocation::PARAMETER; }
+ bool IsStackLocal() const { return location() == VariableLocation::LOCAL; }
bool IsStackAllocated() const { return IsParameter() || IsStackLocal(); }
- bool IsContextSlot() const { return location_ == VariableLocation::CONTEXT; }
- bool IsGlobalSlot() const { return location_ == VariableLocation::GLOBAL; }
- bool IsUnallocatedOrGlobalSlot() const {
- return IsUnallocated() || IsGlobalSlot();
- }
- bool IsLookupSlot() const { return location_ == VariableLocation::LOOKUP; }
+ bool IsContextSlot() const { return location() == VariableLocation::CONTEXT; }
+ bool IsLookupSlot() const { return location() == VariableLocation::LOOKUP; }
bool IsGlobalObjectProperty() const;
bool IsStaticGlobalObjectProperty() const;
- bool is_dynamic() const { return IsDynamicVariableMode(mode_); }
- bool is_const_mode() const { return IsImmutableVariableMode(mode_); }
+ bool is_dynamic() const { return IsDynamicVariableMode(mode()); }
bool binding_needs_init() const {
- DCHECK(initialization_flag_ != kNeedsInitialization ||
- IsLexicalVariableMode(mode_));
- return initialization_flag_ == kNeedsInitialization;
+ DCHECK(initialization_flag() != kNeedsInitialization ||
+ IsLexicalVariableMode(mode()));
+ return initialization_flag() == kNeedsInitialization;
+ }
+ bool throw_on_const_assignment(LanguageMode language_mode) const {
+ return kind() != SLOPPY_FUNCTION_NAME_VARIABLE || is_strict(language_mode);
}
- bool is_function() const { return kind_ == FUNCTION; }
- bool is_this() const { return kind_ == THIS; }
- bool is_arguments() const { return kind_ == ARGUMENTS; }
+ bool is_function() const { return kind() == FUNCTION_VARIABLE; }
+ bool is_this() const { return kind() == THIS_VARIABLE; }
+ bool is_sloppy_function_name() const {
+ return kind() == SLOPPY_FUNCTION_NAME_VARIABLE;
+ }
Variable* local_if_not_shadowed() const {
- DCHECK(mode_ == DYNAMIC_LOCAL && local_if_not_shadowed_ != NULL);
+ DCHECK(mode() == DYNAMIC_LOCAL && local_if_not_shadowed_ != NULL);
return local_if_not_shadowed_;
}
@@ -90,40 +91,61 @@ class Variable final : public ZoneObject {
local_if_not_shadowed_ = local;
}
- VariableLocation location() const { return location_; }
- int index() const { return index_; }
+ VariableLocation location() const {
+ return LocationField::decode(bit_field_);
+ }
+ VariableKind kind() const { return VariableKindField::decode(bit_field_); }
InitializationFlag initialization_flag() const {
- return initialization_flag_;
+ return InitializationFlagField::decode(bit_field_);
+ }
+
+ int index() const { return index_; }
+
+ bool IsExport() const {
+ DCHECK(location() == VariableLocation::MODULE);
+ return index() == 0;
}
void AllocateTo(VariableLocation location, int index) {
- DCHECK(IsUnallocated() || (location_ == location && index_ == index));
- location_ = location;
+ DCHECK(IsUnallocated() ||
+ (this->location() == location && this->index() == index));
+ bit_field_ = LocationField::update(bit_field_, location);
+ DCHECK_EQ(location, this->location());
index_ = index;
}
- static int CompareIndex(Variable* const* v, Variable* const* w);
+ static InitializationFlag DefaultInitializationFlag(VariableMode mode) {
+ DCHECK(IsDeclaredVariableMode(mode));
+ return mode == VAR ? kCreatedInitialized : kNeedsInitialization;
+ }
private:
Scope* scope_;
const AstRawString* name_;
- VariableMode mode_;
- Kind kind_;
- VariableLocation location_;
- int index_;
- int initializer_position_;
// If this field is set, this variable references the stored locally bound
// variable, but it might be shadowed by variable bindings introduced by
// sloppy 'eval' calls between the reference scope (inclusive) and the
// binding scope (exclusive).
Variable* local_if_not_shadowed_;
-
- // Usage info.
- bool force_context_allocation_; // set by variable resolver
- bool is_used_;
- InitializationFlag initialization_flag_;
- MaybeAssignedFlag maybe_assigned_;
+ int index_;
+ int initializer_position_;
+ uint16_t bit_field_;
+
+ class VariableModeField : public BitField16<VariableMode, 0, 3> {};
+ class VariableKindField
+ : public BitField16<VariableKind, VariableModeField::kNext, 3> {};
+ class LocationField
+ : public BitField16<VariableLocation, VariableKindField::kNext, 3> {};
+ class ForceContextAllocationField
+ : public BitField16<bool, LocationField::kNext, 1> {};
+ class IsUsedField
+ : public BitField16<bool, ForceContextAllocationField::kNext, 1> {};
+ class InitializationFlagField
+ : public BitField16<InitializationFlag, IsUsedField::kNext, 2> {};
+ class MaybeAssignedFlagField
+ : public BitField16<MaybeAssignedFlag, InitializationFlagField::kNext,
+ 2> {};
};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/background-parsing-task.cc b/deps/v8/src/background-parsing-task.cc
index 5df46c82b9..83075c1eec 100644
--- a/deps/v8/src/background-parsing-task.cc
+++ b/deps/v8/src/background-parsing-task.cc
@@ -3,11 +3,19 @@
// found in the LICENSE file.
#include "src/background-parsing-task.h"
+
#include "src/debug/debug.h"
+#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
+void StreamedSource::Release() {
+ parser.reset();
+ info.reset();
+ zone.reset();
+}
+
BackgroundParsingTask::BackgroundParsingTask(
StreamedSource* source, ScriptCompiler::CompileOptions options,
int stack_size, Isolate* isolate)
@@ -42,9 +50,8 @@ BackgroundParsingTask::BackgroundParsingTask(
// Parser needs to stay alive for finalizing the parsing on the main
// thread.
source_->parser.reset(new Parser(source_->info.get()));
- source_->parser->DeserializeScopeChain(
- source_->info.get(), Handle<Context>::null(),
- Scope::DeserializationMode::kDeserializeOffHeap);
+ source_->parser->DeserializeScopeChain(source_->info.get(),
+ MaybeHandle<ScopeInfo>());
}
@@ -55,8 +62,7 @@ void BackgroundParsingTask::Run() {
// Reset the stack limit of the parser to reflect correctly that we're on a
// background thread.
- uintptr_t stack_limit =
- reinterpret_cast<uintptr_t>(&stack_limit) - stack_size_ * KB;
+ uintptr_t stack_limit = GetCurrentStackPosition() - stack_size_ * KB;
source_->parser->set_stack_limit(stack_limit);
// Nullify the Isolate temporarily so that the background parser doesn't
diff --git a/deps/v8/src/background-parsing-task.h b/deps/v8/src/background-parsing-task.h
index 1bf9d74121..d7fe6ba8db 100644
--- a/deps/v8/src/background-parsing-task.h
+++ b/deps/v8/src/background-parsing-task.h
@@ -7,15 +7,16 @@
#include <memory>
+#include "include/v8.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/semaphore.h"
-#include "src/compiler.h"
#include "src/parsing/parse-info.h"
-#include "src/parsing/parser.h"
+#include "src/unicode-cache.h"
namespace v8 {
namespace internal {
+class Parser;
class ScriptData;
// Internal representation of v8::ScriptCompiler::StreamedSource. Contains all
@@ -26,6 +27,8 @@ struct StreamedSource {
ScriptCompiler::StreamedSource::Encoding encoding)
: source_stream(source_stream), encoding(encoding) {}
+ void Release();
+
// Internal implementation of v8::ScriptCompiler::StreamedSource.
std::unique_ptr<ScriptCompiler::ExternalSourceStream> source_stream;
ScriptCompiler::StreamedSource::Encoding encoding;
@@ -39,10 +42,9 @@ struct StreamedSource {
std::unique_ptr<ParseInfo> info;
std::unique_ptr<Parser> parser;
- private:
- // Prevent copying. Not implemented.
- StreamedSource(const StreamedSource&);
- StreamedSource& operator=(const StreamedSource&);
+ // Prevent copying.
+ StreamedSource(const StreamedSource&) = delete;
+ StreamedSource& operator=(const StreamedSource&) = delete;
};
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index df47eb82b7..6b7da16ad5 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -20,7 +20,6 @@ namespace internal {
V(kArgumentsObjectValueInATestContext, \
"Arguments object value in a test context") \
V(kArrayIndexConstantValueTooBig, "Array index constant value too big") \
- V(kAssignmentToArguments, "Assignment to arguments") \
V(kAssignmentToLetVariableBeforeInitialization, \
"Assignment to let variable before initialization") \
V(kAssignmentToLOOKUPVariable, "Assignment to LOOKUP variable") \
@@ -64,6 +63,8 @@ namespace internal {
V(kEval, "eval") \
V(kExpectedAllocationSite, "Expected allocation site") \
V(kExpectedBooleanValue, "Expected boolean value") \
+ V(kExpectedFixedDoubleArrayMap, \
+ "Expected a fixed double array map in fast shallow clone array literal") \
V(kExpectedFunctionObject, "Expected function object in register") \
V(kExpectedHeapNumber, "Expected HeapNumber") \
V(kExpectedJSReceiver, "Expected object to have receiver type") \
@@ -242,10 +243,6 @@ namespace internal {
V(kUnexpectedTypeForRegExpDataFixedArrayExpected, \
"Unexpected type for RegExp data, FixedArray expected") \
V(kUnexpectedValue, "Unexpected value") \
- V(kUnsupportedConstCompoundAssignment, \
- "Unsupported const compound assignment") \
- V(kUnsupportedCountOperationWithConst, \
- "Unsupported count operation with const") \
V(kUnsupportedDoubleImmediate, "Unsupported double immediate") \
V(kUnsupportedLetCompoundAssignment, "Unsupported let compound assignment") \
V(kUnsupportedLookupSlotInDeclaration, \
@@ -268,9 +265,7 @@ namespace internal {
V(kWrongArgumentCountForInvokeIntrinsic, \
"Wrong number of arguments for intrinsic") \
V(kShouldNotDirectlyEnterOsrFunction, \
- "Should not directly enter OSR-compiled function") \
- V(kConversionFromImpossibleValue, \
- "Reached conversion from value with empty type (i.e., impossible type)")
+ "Should not directly enter OSR-compiled function")
#define ERROR_MESSAGES_CONSTANTS(C, T) C,
enum BailoutReason {
diff --git a/deps/v8/src/base.isolate b/deps/v8/src/base.isolate
index a9cfc89218..c457f001fe 100644
--- a/deps/v8/src/base.isolate
+++ b/deps/v8/src/base.isolate
@@ -4,7 +4,6 @@
{
'includes': [
'../third_party/icu/icu.isolate',
- '../gypfiles/config/win/msvs_dependencies.isolate',
],
'conditions': [
['v8_use_snapshot=="true" and v8_use_external_startup_data==1', {
@@ -15,13 +14,6 @@
],
},
}],
- ['OS=="mac" and asan==1', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/libclang_rt.asan_osx_dynamic.dylib',
- ],
- },
- }],
['tsan==1', {
'variables': {
'files': [
diff --git a/deps/v8/src/base/accounting-allocator.cc b/deps/v8/src/base/accounting-allocator.cc
deleted file mode 100644
index c56f037c04..0000000000
--- a/deps/v8/src/base/accounting-allocator.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/base/accounting-allocator.h"
-
-#include <cstdlib>
-
-#if V8_LIBC_BIONIC
-#include <malloc.h> // NOLINT
-#endif
-
-namespace v8 {
-namespace base {
-
-void* AccountingAllocator::Allocate(size_t bytes) {
- void* memory = malloc(bytes);
- if (memory) {
- AtomicWord current =
- NoBarrier_AtomicIncrement(&current_memory_usage_, bytes);
- AtomicWord max = NoBarrier_Load(&max_memory_usage_);
- while (current > max) {
- max = NoBarrier_CompareAndSwap(&max_memory_usage_, max, current);
- }
- }
- return memory;
-}
-
-void AccountingAllocator::Free(void* memory, size_t bytes) {
- free(memory);
- NoBarrier_AtomicIncrement(&current_memory_usage_,
- -static_cast<AtomicWord>(bytes));
-}
-
-size_t AccountingAllocator::GetCurrentMemoryUsage() const {
- return NoBarrier_Load(&current_memory_usage_);
-}
-
-size_t AccountingAllocator::GetMaxMemoryUsage() const {
- return NoBarrier_Load(&max_memory_usage_);
-}
-
-} // namespace base
-} // namespace v8
diff --git a/deps/v8/src/base/accounting-allocator.h b/deps/v8/src/base/accounting-allocator.h
deleted file mode 100644
index 4e1baf18d4..0000000000
--- a/deps/v8/src/base/accounting-allocator.h
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_BASE_ACCOUNTING_ALLOCATOR_H_
-#define V8_BASE_ACCOUNTING_ALLOCATOR_H_
-
-#include "src/base/atomicops.h"
-#include "src/base/macros.h"
-
-namespace v8 {
-namespace base {
-
-class AccountingAllocator {
- public:
- AccountingAllocator() = default;
- virtual ~AccountingAllocator() = default;
-
- // Returns nullptr on failed allocation.
- virtual void* Allocate(size_t bytes);
- virtual void Free(void* memory, size_t bytes);
-
- size_t GetCurrentMemoryUsage() const;
- size_t GetMaxMemoryUsage() const;
-
- private:
- AtomicWord current_memory_usage_ = 0;
- AtomicWord max_memory_usage_ = 0;
-
- DISALLOW_COPY_AND_ASSIGN(AccountingAllocator);
-};
-
-} // namespace base
-} // namespace v8
-
-#endif // V8_BASE_ACCOUNTING_ALLOCATOR_H_
diff --git a/deps/v8/src/base/atomic-utils.h b/deps/v8/src/base/atomic-utils.h
index e19385dcb1..31db603bf9 100644
--- a/deps/v8/src/base/atomic-utils.h
+++ b/deps/v8/src/base/atomic-utils.h
@@ -72,6 +72,22 @@ class AtomicValue {
cast_helper<T>::to_storage_type(old_value);
}
+ V8_INLINE void SetBits(T bits, T mask) {
+ DCHECK_EQ(bits & ~mask, 0);
+ T old_value;
+ T new_value;
+ do {
+ old_value = Value();
+ new_value = (old_value & ~mask) | bits;
+ } while (!TrySetValue(old_value, new_value));
+ }
+
+ V8_INLINE void SetBit(int bit) {
+ SetBits(static_cast<T>(1) << bit, static_cast<T>(1) << bit);
+ }
+
+ V8_INLINE void ClearBit(int bit) { SetBits(0, 1 << bit); }
+
V8_INLINE void SetValue(T new_value) {
base::Release_Store(&value_, cast_helper<T>::to_storage_type(new_value));
}
diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h
index e033134feb..d113c2a0fc 100644
--- a/deps/v8/src/base/build_config.h
+++ b/deps/v8/src/base/build_config.h
@@ -55,13 +55,21 @@
defined(__ARM_ARCH_7R__) || \
defined(__ARM_ARCH_7__)
# define CAN_USE_ARMV7_INSTRUCTIONS 1
+#ifdef __ARM_ARCH_EXT_IDIV__
+#define CAN_USE_SUDIV 1
+#endif
# ifndef CAN_USE_VFP3_INSTRUCTIONS
-# define CAN_USE_VFP3_INSTRUCTIONS
+#define CAN_USE_VFP3_INSTRUCTIONS 1
# endif
#endif
#if defined(__ARM_ARCH_8A__)
+#define CAN_USE_ARMV7_INSTRUCTIONS 1
+#define CAN_USE_SUDIV 1
# define CAN_USE_ARMV8_INSTRUCTIONS 1
+#ifndef CAN_USE_VFP3_INSTRUCTIONS
+#define CAN_USE_VFP3_INSTRUCTIONS 1
+#endif
#endif
@@ -196,11 +204,6 @@
// Number of bits to represent the page size for paged spaces. The value of 20
// gives 1Mb bytes per page.
-#if V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
-// Bump up for Power Linux due to larger (64K) page size.
-const int kPageSizeBits = 22;
-#else
-const int kPageSizeBits = 20;
-#endif
+const int kPageSizeBits = 19;
#endif // V8_BASE_BUILD_CONFIG_H_
diff --git a/deps/v8/src/base/hashmap-entry.h b/deps/v8/src/base/hashmap-entry.h
new file mode 100644
index 0000000000..629e734088
--- /dev/null
+++ b/deps/v8/src/base/hashmap-entry.h
@@ -0,0 +1,54 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_HASHMAP_ENTRY_H_
+#define V8_BASE_HASHMAP_ENTRY_H_
+
+#include <cstdint>
+
+namespace v8 {
+namespace base {
+
+// HashMap entries are (key, value, hash) triplets, with a boolean indicating if
+// they are an empty entry. Some clients may not need to use the value slot
+// (e.g. implementers of sets, where the key is the value).
+template <typename Key, typename Value>
+struct TemplateHashMapEntry {
+ Key key;
+ Value value;
+ uint32_t hash; // The full hash value for key
+
+ TemplateHashMapEntry(Key key, Value value, uint32_t hash)
+ : key(key), value(value), hash(hash), exists_(true) {}
+
+ bool exists() const { return exists_; }
+
+ void clear() { exists_ = false; }
+
+ private:
+ bool exists_;
+};
+
+// Specialization for pointer-valued keys
+template <typename Key, typename Value>
+struct TemplateHashMapEntry<Key*, Value> {
+ Key* key;
+ Value value;
+ uint32_t hash; // The full hash value for key
+
+ TemplateHashMapEntry(Key* key, Value value, uint32_t hash)
+ : key(key), value(value), hash(hash) {}
+
+ bool exists() const { return key != nullptr; }
+
+ void clear() { key = nullptr; }
+};
+
+// TODO(leszeks): There could be a specialisation for void values (e.g. for
+// sets), which omits the value field
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_HASHMAP_ENTRY_H_
diff --git a/deps/v8/src/base/hashmap.h b/deps/v8/src/base/hashmap.h
index e3c47de6d7..54038c5ef3 100644
--- a/deps/v8/src/base/hashmap.h
+++ b/deps/v8/src/base/hashmap.h
@@ -12,6 +12,7 @@
#include <stdlib.h>
#include "src/base/bits.h"
+#include "src/base/hashmap-entry.h"
#include "src/base/logging.h"
namespace v8 {
@@ -23,10 +24,10 @@ class DefaultAllocationPolicy {
V8_INLINE static void Delete(void* p) { free(p); }
};
-template <class AllocationPolicy>
+template <typename Key, typename Value, class MatchFun, class AllocationPolicy>
class TemplateHashMapImpl {
public:
- typedef bool (*MatchFun)(void* key1, void* key2);
+ typedef TemplateHashMapEntry<Key, Value> Entry;
// The default capacity. This is used by the call sites which want
// to pass in a non-default AllocationPolicy but want to use the
@@ -35,38 +36,36 @@ class TemplateHashMapImpl {
// initial_capacity is the size of the initial hash map;
// it must be a power of 2 (and thus must not be 0).
- TemplateHashMapImpl(MatchFun match,
- uint32_t capacity = kDefaultHashMapCapacity,
+ TemplateHashMapImpl(uint32_t capacity = kDefaultHashMapCapacity,
+ MatchFun match = MatchFun(),
AllocationPolicy allocator = AllocationPolicy());
~TemplateHashMapImpl();
- // HashMap entries are (key, value, hash) triplets.
- // Some clients may not need to use the value slot
- // (e.g. implementers of sets, where the key is the value).
- struct Entry {
- void* key;
- void* value;
- uint32_t hash; // The full hash value for key
- };
+ // If an entry with matching key is found, returns that entry.
+ // Otherwise, nullptr is returned.
+ Entry* Lookup(const Key& key, uint32_t hash) const;
// If an entry with matching key is found, returns that entry.
- // Otherwise, NULL is returned.
- Entry* Lookup(void* key, uint32_t hash) const;
+ // If no matching entry is found, a new entry is inserted with
+ // corresponding key, key hash, and default initialized value.
+ Entry* LookupOrInsert(const Key& key, uint32_t hash,
+ AllocationPolicy allocator = AllocationPolicy());
// If an entry with matching key is found, returns that entry.
// If no matching entry is found, a new entry is inserted with
- // corresponding key, key hash, and NULL value.
- Entry* LookupOrInsert(void* key, uint32_t hash,
+ // corresponding key, key hash, and value created by func.
+ template <typename Func>
+ Entry* LookupOrInsert(const Key& key, uint32_t hash, const Func& value_func,
AllocationPolicy allocator = AllocationPolicy());
- Entry* InsertNew(void* key, uint32_t hash,
+ Entry* InsertNew(const Key& key, uint32_t hash,
AllocationPolicy allocator = AllocationPolicy());
// Removes the entry with matching key.
// It returns the value of the deleted entry
// or null if there is no value for such key.
- void* Remove(void* key, uint32_t hash);
+ Value Remove(const Key& key, uint32_t hash);
// Empties the hash map (occupancy() == 0).
void Clear();
@@ -81,97 +80,101 @@ class TemplateHashMapImpl {
// Iteration
//
- // for (Entry* p = map.Start(); p != NULL; p = map.Next(p)) {
+ // for (Entry* p = map.Start(); p != nullptr; p = map.Next(p)) {
// ...
// }
//
// If entries are inserted during iteration, the effect of
// calling Next() is undefined.
Entry* Start() const;
- Entry* Next(Entry* p) const;
-
- // Some match functions defined for convenience.
- static bool PointersMatch(void* key1, void* key2) { return key1 == key2; }
+ Entry* Next(Entry* entry) const;
private:
- MatchFun match_;
Entry* map_;
uint32_t capacity_;
uint32_t occupancy_;
+ // TODO(leszeks): This takes up space even if it has no state, maybe replace
+ // with something that does the empty base optimisation e.g. std::tuple
+ MatchFun match_;
Entry* map_end() const { return map_ + capacity_; }
- Entry* Probe(void* key, uint32_t hash) const;
+ Entry* Probe(const Key& key, uint32_t hash) const;
+ Entry* FillEmptyEntry(Entry* entry, const Key& key, const Value& value,
+ uint32_t hash,
+ AllocationPolicy allocator = AllocationPolicy());
void Initialize(uint32_t capacity, AllocationPolicy allocator);
void Resize(AllocationPolicy allocator);
};
-
-typedef TemplateHashMapImpl<DefaultAllocationPolicy> HashMap;
-
-template <class AllocationPolicy>
-TemplateHashMapImpl<AllocationPolicy>::TemplateHashMapImpl(
- MatchFun match, uint32_t initial_capacity, AllocationPolicy allocator) {
- match_ = match;
+template <typename Key, typename Value, typename MatchFun,
+ class AllocationPolicy>
+TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::
+ TemplateHashMapImpl(uint32_t initial_capacity, MatchFun match,
+ AllocationPolicy allocator)
+ : match_(match) {
Initialize(initial_capacity, allocator);
}
-template <class AllocationPolicy>
-TemplateHashMapImpl<AllocationPolicy>::~TemplateHashMapImpl() {
+template <typename Key, typename Value, typename MatchFun,
+ class AllocationPolicy>
+TemplateHashMapImpl<Key, Value, MatchFun,
+ AllocationPolicy>::~TemplateHashMapImpl() {
AllocationPolicy::Delete(map_);
}
-template <class AllocationPolicy>
-typename TemplateHashMapImpl<AllocationPolicy>::Entry*
-TemplateHashMapImpl<AllocationPolicy>::Lookup(void* key, uint32_t hash) const {
- Entry* p = Probe(key, hash);
- return p->key != NULL ? p : NULL;
+template <typename Key, typename Value, typename MatchFun,
+ class AllocationPolicy>
+typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
+TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Lookup(
+ const Key& key, uint32_t hash) const {
+ Entry* entry = Probe(key, hash);
+ return entry->exists() ? entry : nullptr;
}
-template <class AllocationPolicy>
-typename TemplateHashMapImpl<AllocationPolicy>::Entry*
-TemplateHashMapImpl<AllocationPolicy>::LookupOrInsert(
- void* key, uint32_t hash, AllocationPolicy allocator) {
- // Find a matching entry.
- Entry* p = Probe(key, hash);
- if (p->key != NULL) {
- return p;
- }
-
- return InsertNew(key, hash, allocator);
+template <typename Key, typename Value, typename MatchFun,
+ class AllocationPolicy>
+typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
+TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::LookupOrInsert(
+ const Key& key, uint32_t hash, AllocationPolicy allocator) {
+ return LookupOrInsert(key, hash, []() { return Value(); }, allocator);
}
-template <class AllocationPolicy>
-typename TemplateHashMapImpl<AllocationPolicy>::Entry*
-TemplateHashMapImpl<AllocationPolicy>::InsertNew(void* key, uint32_t hash,
- AllocationPolicy allocator) {
+template <typename Key, typename Value, typename MatchFun,
+ class AllocationPolicy>
+template <typename Func>
+typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
+TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::LookupOrInsert(
+ const Key& key, uint32_t hash, const Func& value_func,
+ AllocationPolicy allocator) {
// Find a matching entry.
- Entry* p = Probe(key, hash);
- DCHECK(p->key == NULL);
-
- // No entry found; insert one.
- p->key = key;
- p->value = NULL;
- p->hash = hash;
- occupancy_++;
-
- // Grow the map if we reached >= 80% occupancy.
- if (occupancy_ + occupancy_ / 4 >= capacity_) {
- Resize(allocator);
- p = Probe(key, hash);
+ Entry* entry = Probe(key, hash);
+ if (entry->exists()) {
+ return entry;
}
- return p;
+ return FillEmptyEntry(entry, key, value_func(), hash, allocator);
}
-template <class AllocationPolicy>
-void* TemplateHashMapImpl<AllocationPolicy>::Remove(void* key, uint32_t hash) {
+template <typename Key, typename Value, typename MatchFun,
+ class AllocationPolicy>
+typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
+TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::InsertNew(
+ const Key& key, uint32_t hash, AllocationPolicy allocator) {
+ Entry* entry = Probe(key, hash);
+ return FillEmptyEntry(entry, key, Value(), hash, allocator);
+}
+
+template <typename Key, typename Value, typename MatchFun,
+ class AllocationPolicy>
+Value TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Remove(
+ const Key& key, uint32_t hash) {
// Lookup the entry for the key to remove.
Entry* p = Probe(key, hash);
- if (p->key == NULL) {
+ if (!p->exists()) {
// Key not found nothing to remove.
- return NULL;
+ return nullptr;
}
- void* value = p->value;
+ Value value = p->value;
// To remove an entry we need to ensure that it does not create an empty
// entry that will cause the search for another entry to stop too soon. If all
// the entries between the entry to remove and the next empty slot have their
@@ -200,7 +203,7 @@ void* TemplateHashMapImpl<AllocationPolicy>::Remove(void* key, uint32_t hash) {
// All entries between p and q have their initial position between p and q
// and the entry p can be cleared without breaking the search for these
// entries.
- if (q->key == NULL) {
+ if (!q->exists()) {
break;
}
@@ -217,67 +220,92 @@ void* TemplateHashMapImpl<AllocationPolicy>::Remove(void* key, uint32_t hash) {
}
// Clear the entry which is allowed to en emptied.
- p->key = NULL;
+ p->clear();
occupancy_--;
return value;
}
-template <class AllocationPolicy>
-void TemplateHashMapImpl<AllocationPolicy>::Clear() {
+template <typename Key, typename Value, typename MatchFun,
+ class AllocationPolicy>
+void TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Clear() {
// Mark all entries as empty.
const Entry* end = map_end();
- for (Entry* p = map_; p < end; p++) {
- p->key = NULL;
+ for (Entry* entry = map_; entry < end; entry++) {
+ entry->clear();
}
occupancy_ = 0;
}
-template <class AllocationPolicy>
-typename TemplateHashMapImpl<AllocationPolicy>::Entry*
-TemplateHashMapImpl<AllocationPolicy>::Start() const {
+template <typename Key, typename Value, typename MatchFun,
+ class AllocationPolicy>
+typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
+TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Start() const {
return Next(map_ - 1);
}
-template <class AllocationPolicy>
-typename TemplateHashMapImpl<AllocationPolicy>::Entry*
-TemplateHashMapImpl<AllocationPolicy>::Next(Entry* p) const {
+template <typename Key, typename Value, typename MatchFun,
+ class AllocationPolicy>
+typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
+TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Next(
+ Entry* entry) const {
const Entry* end = map_end();
- DCHECK(map_ - 1 <= p && p < end);
- for (p++; p < end; p++) {
- if (p->key != NULL) {
- return p;
+ DCHECK(map_ - 1 <= entry && entry < end);
+ for (entry++; entry < end; entry++) {
+ if (entry->exists()) {
+ return entry;
}
}
- return NULL;
+ return nullptr;
}
-template <class AllocationPolicy>
-typename TemplateHashMapImpl<AllocationPolicy>::Entry*
-TemplateHashMapImpl<AllocationPolicy>::Probe(void* key, uint32_t hash) const {
- DCHECK(key != NULL);
-
+template <typename Key, typename Value, typename MatchFun,
+ class AllocationPolicy>
+typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
+TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Probe(
+ const Key& key, uint32_t hash) const {
DCHECK(base::bits::IsPowerOfTwo32(capacity_));
- Entry* p = map_ + (hash & (capacity_ - 1));
+ Entry* entry = map_ + (hash & (capacity_ - 1));
const Entry* end = map_end();
- DCHECK(map_ <= p && p < end);
+ DCHECK(map_ <= entry && entry < end);
DCHECK(occupancy_ < capacity_); // Guarantees loop termination.
- while (p->key != NULL && (hash != p->hash || !match_(key, p->key))) {
- p++;
- if (p >= end) {
- p = map_;
+ while (entry->exists() && !match_(hash, entry->hash, key, entry->key)) {
+ entry++;
+ if (entry >= end) {
+ entry = map_;
}
}
- return p;
+ return entry;
}
-template <class AllocationPolicy>
-void TemplateHashMapImpl<AllocationPolicy>::Initialize(
+template <typename Key, typename Value, typename MatchFun,
+ class AllocationPolicy>
+typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
+TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::FillEmptyEntry(
+ Entry* entry, const Key& key, const Value& value, uint32_t hash,
+ AllocationPolicy allocator) {
+ DCHECK(!entry->exists());
+
+ new (entry) Entry(key, value, hash);
+ occupancy_++;
+
+ // Grow the map if we reached >= 80% occupancy.
+ if (occupancy_ + occupancy_ / 4 >= capacity_) {
+ Resize(allocator);
+ entry = Probe(key, hash);
+ }
+
+ return entry;
+}
+
+template <typename Key, typename Value, typename MatchFun,
+ class AllocationPolicy>
+void TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Initialize(
uint32_t capacity, AllocationPolicy allocator) {
DCHECK(base::bits::IsPowerOfTwo32(capacity));
map_ = reinterpret_cast<Entry*>(allocator.New(capacity * sizeof(Entry)));
- if (map_ == NULL) {
+ if (map_ == nullptr) {
FATAL("Out of memory: HashMap::Initialize");
return;
}
@@ -285,8 +313,10 @@ void TemplateHashMapImpl<AllocationPolicy>::Initialize(
Clear();
}
-template <class AllocationPolicy>
-void TemplateHashMapImpl<AllocationPolicy>::Resize(AllocationPolicy allocator) {
+template <typename Key, typename Value, typename MatchFun,
+ class AllocationPolicy>
+void TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Resize(
+ AllocationPolicy allocator) {
Entry* map = map_;
uint32_t n = occupancy_;
@@ -294,10 +324,11 @@ void TemplateHashMapImpl<AllocationPolicy>::Resize(AllocationPolicy allocator) {
Initialize(capacity_ * 2, allocator);
// Rehash all current entries.
- for (Entry* p = map; n > 0; p++) {
- if (p->key != NULL) {
- Entry* entry = LookupOrInsert(p->key, p->hash, allocator);
- entry->value = p->value;
+ for (Entry* entry = map; n > 0; entry++) {
+ if (entry->exists()) {
+ Entry* new_entry = Probe(entry->key, entry->hash);
+ new_entry = FillEmptyEntry(new_entry, entry->key, entry->value,
+ entry->hash, allocator);
n--;
}
}
@@ -306,9 +337,83 @@ void TemplateHashMapImpl<AllocationPolicy>::Resize(AllocationPolicy allocator) {
AllocationPolicy::Delete(map);
}
+// Match function which compares hashes before executing a (potentially
+// expensive) key comparison.
+template <typename Key, typename MatchFun>
+struct HashEqualityThenKeyMatcher {
+ explicit HashEqualityThenKeyMatcher(MatchFun match) : match_(match) {}
+
+ bool operator()(uint32_t hash1, uint32_t hash2, const Key& key1,
+ const Key& key2) const {
+ return hash1 == hash2 && match_(key1, key2);
+ }
+
+ private:
+ MatchFun match_;
+};
+
+// Hashmap<void*, void*> which takes a custom key comparison function pointer.
+template <typename AllocationPolicy>
+class CustomMatcherTemplateHashMapImpl
+ : public TemplateHashMapImpl<
+ void*, void*,
+ HashEqualityThenKeyMatcher<void*, bool (*)(void*, void*)>,
+ AllocationPolicy> {
+ typedef TemplateHashMapImpl<
+ void*, void*, HashEqualityThenKeyMatcher<void*, bool (*)(void*, void*)>,
+ AllocationPolicy>
+ Base;
+
+ public:
+ typedef bool (*MatchFun)(void*, void*);
+
+ CustomMatcherTemplateHashMapImpl(
+ MatchFun match, uint32_t capacity = Base::kDefaultHashMapCapacity,
+ AllocationPolicy allocator = AllocationPolicy())
+ : Base(capacity, HashEqualityThenKeyMatcher<void*, MatchFun>(match),
+ allocator) {}
+};
+
+typedef CustomMatcherTemplateHashMapImpl<DefaultAllocationPolicy>
+ CustomMatcherHashMap;
+
+// Match function which compares keys directly by equality.
+template <typename Key>
+struct KeyEqualityMatcher {
+ bool operator()(uint32_t hash1, uint32_t hash2, const Key& key1,
+ const Key& key2) const {
+ return key1 == key2;
+ }
+};
+
+// Hashmap<void*, void*> which compares the key pointers directly.
+template <typename AllocationPolicy>
+class PointerTemplateHashMapImpl
+ : public TemplateHashMapImpl<void*, void*, KeyEqualityMatcher<void*>,
+ AllocationPolicy> {
+ typedef TemplateHashMapImpl<void*, void*, KeyEqualityMatcher<void*>,
+ AllocationPolicy>
+ Base;
+
+ public:
+ PointerTemplateHashMapImpl(uint32_t capacity = Base::kDefaultHashMapCapacity,
+ AllocationPolicy allocator = AllocationPolicy())
+ : Base(capacity, KeyEqualityMatcher<void*>(), allocator) {}
+};
+
+typedef PointerTemplateHashMapImpl<DefaultAllocationPolicy> HashMap;
+
// A hash map for pointer keys and values with an STL-like interface.
-template <class Key, class Value, class AllocationPolicy>
-class TemplateHashMap : private TemplateHashMapImpl<AllocationPolicy> {
+template <class Key, class Value, class MatchFun, class AllocationPolicy>
+class TemplateHashMap
+ : private TemplateHashMapImpl<void*, void*,
+ HashEqualityThenKeyMatcher<void*, MatchFun>,
+ AllocationPolicy> {
+ typedef TemplateHashMapImpl<void*, void*,
+ HashEqualityThenKeyMatcher<void*, MatchFun>,
+ AllocationPolicy>
+ Base;
+
public:
STATIC_ASSERT(sizeof(Key*) == sizeof(void*)); // NOLINT
STATIC_ASSERT(sizeof(Value*) == sizeof(void*)); // NOLINT
@@ -328,26 +433,22 @@ class TemplateHashMap : private TemplateHashMapImpl<AllocationPolicy> {
bool operator!=(const Iterator& other) { return entry_ != other.entry_; }
private:
- Iterator(const TemplateHashMapImpl<AllocationPolicy>* map,
- typename TemplateHashMapImpl<AllocationPolicy>::Entry* entry)
+ Iterator(const Base* map, typename Base::Entry* entry)
: map_(map), entry_(entry) {}
- const TemplateHashMapImpl<AllocationPolicy>* map_;
- typename TemplateHashMapImpl<AllocationPolicy>::Entry* entry_;
+ const Base* map_;
+ typename Base::Entry* entry_;
friend class TemplateHashMap;
};
- TemplateHashMap(
- typename TemplateHashMapImpl<AllocationPolicy>::MatchFun match,
- AllocationPolicy allocator = AllocationPolicy())
- : TemplateHashMapImpl<AllocationPolicy>(
- match,
- TemplateHashMapImpl<AllocationPolicy>::kDefaultHashMapCapacity,
- allocator) {}
+ TemplateHashMap(MatchFun match,
+ AllocationPolicy allocator = AllocationPolicy())
+ : Base(Base::kDefaultHashMapCapacity,
+ HashEqualityThenKeyMatcher<void*, MatchFun>(match), allocator) {}
Iterator begin() const { return Iterator(this, this->Start()); }
- Iterator end() const { return Iterator(this, NULL); }
+ Iterator end() const { return Iterator(this, nullptr); }
Iterator find(Key* key, bool insert = false,
AllocationPolicy allocator = AllocationPolicy()) {
if (insert) {
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index 822c88704a..e3866173be 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -21,12 +21,6 @@
// The expression is a compile-time constant, and therefore can be
// used in defining new arrays, for example. If you use arraysize on
// a pointer by mistake, you will get a compile-time error.
-//
-// One caveat is that arraysize() doesn't accept any array of an
-// anonymous type or a type defined inside a function. In these rare
-// cases, you have to use the unsafe ARRAYSIZE_UNSAFE() macro below. This is
-// due to a limitation in C++'s template system. The limitation might
-// eventually be removed, but it hasn't happened yet.
#define arraysize(array) (sizeof(ArraySizeHelper(array)))
diff --git a/deps/v8/src/base/platform/platform-macos.cc b/deps/v8/src/base/platform/platform-macos.cc
index b75bc47e31..69c1816777 100644
--- a/deps/v8/src/base/platform/platform-macos.cc
+++ b/deps/v8/src/base/platform/platform-macos.cc
@@ -250,10 +250,7 @@ bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
return munmap(address, size) == 0;
}
-
-bool VirtualMemory::HasLazyCommits() {
- return false;
-}
+bool VirtualMemory::HasLazyCommits() { return true; }
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/basic-block-profiler.h b/deps/v8/src/basic-block-profiler.h
index 2e7ac9c804..c3c8b649dc 100644
--- a/deps/v8/src/basic-block-profiler.h
+++ b/deps/v8/src/basic-block-profiler.h
@@ -11,6 +11,7 @@
#include <vector>
#include "src/base/macros.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -58,15 +59,16 @@ class BasicBlockProfiler {
const DataList* data_list() { return &data_list_; }
private:
- friend std::ostream& operator<<(std::ostream& os,
- const BasicBlockProfiler& s);
+ friend V8_EXPORT_PRIVATE std::ostream& operator<<(
+ std::ostream& os, const BasicBlockProfiler& s);
DataList data_list_;
DISALLOW_COPY_AND_ASSIGN(BasicBlockProfiler);
};
-std::ostream& operator<<(std::ostream& os, const BasicBlockProfiler& s);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const BasicBlockProfiler& s);
std::ostream& operator<<(std::ostream& os, const BasicBlockProfiler::Data& s);
} // namespace internal
diff --git a/deps/v8/src/bit-vector.h b/deps/v8/src/bit-vector.h
index 3703f28d91..13f9e97c30 100644
--- a/deps/v8/src/bit-vector.h
+++ b/deps/v8/src/bit-vector.h
@@ -6,7 +6,7 @@
#define V8_DATAFLOW_H_
#include "src/allocation.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index cfea3208c9..62cebfb732 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -210,7 +210,6 @@ class Genesis BASE_EMBEDDED {
HARMONY_INPROGRESS(DECLARE_FEATURE_INITIALIZATION)
HARMONY_STAGED(DECLARE_FEATURE_INITIALIZATION)
HARMONY_SHIPPING(DECLARE_FEATURE_INITIALIZATION)
- DECLARE_FEATURE_INITIALIZATION(intl_extra, "")
#undef DECLARE_FEATURE_INITIALIZATION
Handle<JSFunction> InstallArrayBuffer(Handle<JSObject> target,
@@ -661,6 +660,16 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
// Create iterator-related meta-objects.
Handle<JSObject> iterator_prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
+
+ Handle<JSFunction> iterator_prototype_iterator = SimpleCreateFunction(
+ isolate(), factory()->NewStringFromAsciiChecked("[Symbol.iterator]"),
+ Builtins::kIteratorPrototypeIterator, 0, true);
+ iterator_prototype_iterator->shared()->set_native(true);
+
+ JSObject::AddProperty(iterator_prototype, factory()->iterator_symbol(),
+ iterator_prototype_iterator, DONT_ENUM);
+ native_context()->set_initial_iterator_prototype(*iterator_prototype);
+
Handle<JSObject> generator_object_prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
native_context()->set_initial_generator_prototype(
@@ -694,6 +703,12 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
SimpleInstallFunction(generator_object_prototype, "throw",
Builtins::kGeneratorPrototypeThrow, 1, true);
+ // Internal version of generator_prototype_next, flagged as non-native.
+ Handle<JSFunction> generator_next_internal =
+ SimpleCreateFunction(isolate(), factory()->next_string(),
+ Builtins::kGeneratorPrototypeNext, 1, true);
+ native_context()->set_generator_next_internal(*generator_next_internal);
+
// Create maps for generator functions and their prototypes. Store those
// maps in the native context. The "prototype" property descriptor is
// writable, non-enumerable, and non-configurable (as per ES6 draft
@@ -991,13 +1006,10 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
error_fun->shared()->set_construct_stub(
*isolate->builtins()->ErrorConstructor());
error_fun->shared()->set_length(1);
- error_fun->shared()->set_native(true);
if (context_index == Context::ERROR_FUNCTION_INDEX) {
- Handle<JSFunction> capture_stack_trace_fun =
- SimpleInstallFunction(error_fun, "captureStackTrace",
- Builtins::kErrorCaptureStackTrace, 2, false);
- capture_stack_trace_fun->shared()->set_native(true);
+ SimpleInstallFunction(error_fun, "captureStackTrace",
+ Builtins::kErrorCaptureStackTrace, 2, false);
}
InstallWithIntrinsicDefaultProto(isolate, error_fun, context_index);
@@ -1016,7 +1028,6 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
Handle<JSFunction> to_string_fun =
SimpleInstallFunction(prototype, factory->toString_string(),
Builtins::kErrorPrototypeToString, 0, true);
- to_string_fun->shared()->set_native(true);
isolate->native_context()->set_error_to_string(*to_string_fun);
} else {
DCHECK(context_index != Context::ERROR_FUNCTION_INDEX);
@@ -1206,6 +1217,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
JSObject::kHeaderSize, MaybeHandle<JSObject>(),
Builtins::kFunctionPrototypeHasInstance,
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY));
+ has_instance->shared()->set_builtin_function_id(kFunctionHasInstance);
native_context()->set_function_has_instance(*has_instance);
// Set the expected parameters for @@hasInstance to 1; required by builtin.
@@ -1304,6 +1316,15 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Install i18n fallback functions.
SimpleInstallFunction(prototype, "toLocaleString",
Builtins::kNumberPrototypeToLocaleString, 0, false);
+
+ // Install the Number functions.
+ SimpleInstallFunction(number_fun, "isFinite", Builtins::kNumberIsFinite, 1,
+ true);
+ SimpleInstallFunction(number_fun, "isInteger", Builtins::kNumberIsInteger,
+ 1, true);
+ SimpleInstallFunction(number_fun, "isNaN", Builtins::kNumberIsNaN, 1, true);
+ SimpleInstallFunction(number_fun, "isSafeInteger",
+ Builtins::kNumberIsSafeInteger, 1, true);
}
{ // --- B o o l e a n ---
@@ -1385,6 +1406,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
1, true);
SimpleInstallFunction(prototype, "charCodeAt",
Builtins::kStringPrototypeCharCodeAt, 1, true);
+ SimpleInstallFunction(prototype, "lastIndexOf",
+ Builtins::kStringPrototypeLastIndexOf, 1, false);
+ SimpleInstallFunction(prototype, "localeCompare",
+ Builtins::kStringPrototypeLocaleCompare, 1, true);
+ SimpleInstallFunction(prototype, "normalize",
+ Builtins::kStringPrototypeNormalize, 0, false);
+ SimpleInstallFunction(prototype, "substr", Builtins::kStringPrototypeSubstr,
+ 2, true);
+ SimpleInstallFunction(prototype, "substring",
+ Builtins::kStringPrototypeSubstring, 2, true);
SimpleInstallFunction(prototype, "toString",
Builtins::kStringPrototypeToString, 0, true);
SimpleInstallFunction(prototype, "trim", Builtins::kStringPrototypeTrim, 0,
@@ -1395,6 +1426,47 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kStringPrototypeTrimRight, 0, false);
SimpleInstallFunction(prototype, "valueOf",
Builtins::kStringPrototypeValueOf, 0, true);
+
+ Handle<JSFunction> iterator = SimpleCreateFunction(
+ isolate, factory->NewStringFromAsciiChecked("[Symbol.iterator]"),
+ Builtins::kStringPrototypeIterator, 0, true);
+ iterator->shared()->set_native(true);
+ JSObject::AddProperty(prototype, factory->iterator_symbol(), iterator,
+ static_cast<PropertyAttributes>(DONT_ENUM));
+ }
+
+ { // --- S t r i n g I t e r a t o r ---
+ Handle<JSObject> iterator_prototype(
+ native_context()->initial_iterator_prototype());
+
+ Handle<JSObject> string_iterator_prototype =
+ factory->NewJSObject(isolate->object_function(), TENURED);
+ JSObject::ForceSetPrototype(string_iterator_prototype, iterator_prototype);
+
+ JSObject::AddProperty(
+ string_iterator_prototype, factory->to_string_tag_symbol(),
+ factory->NewStringFromAsciiChecked("String Iterator"),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ Handle<JSFunction> next =
+ InstallFunction(string_iterator_prototype, "next", JS_OBJECT_TYPE,
+ JSObject::kHeaderSize, MaybeHandle<JSObject>(),
+ Builtins::kStringIteratorPrototypeNext);
+ next->shared()->set_builtin_function_id(kStringIteratorNext);
+
+ // Set the expected parameters for %StringIteratorPrototype%.next to 0 (not
+ // including the receiver), as required by the builtin.
+ next->shared()->set_internal_formal_parameter_count(0);
+
+ // Set the length for the function to satisfy ECMA-262.
+ next->shared()->set_length(0);
+
+ Handle<JSFunction> string_iterator_function = CreateFunction(
+ isolate, factory->NewStringFromAsciiChecked("StringIterator"),
+ JS_STRING_ITERATOR_TYPE, JSStringIterator::kSize,
+ string_iterator_prototype, Builtins::kIllegal);
+ native_context()->set_string_iterator_map(
+ string_iterator_function->initial_map());
}
{
@@ -1576,14 +1648,28 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- R e g E x p
// Builtin functions for RegExp.prototype.
+ Handle<JSObject> prototype =
+ factory->NewJSObject(isolate->object_function(), TENURED);
Handle<JSFunction> regexp_fun =
InstallFunction(global, "RegExp", JS_REGEXP_TYPE, JSRegExp::kSize,
- isolate->initial_object_prototype(),
- Builtins::kIllegal);
+ prototype, Builtins::kRegExpConstructor);
InstallWithIntrinsicDefaultProto(isolate, regexp_fun,
Context::REGEXP_FUNCTION_INDEX);
- regexp_fun->shared()->SetConstructStub(
- *isolate->builtins()->JSBuiltinsConstructStub());
+
+ Handle<SharedFunctionInfo> shared(regexp_fun->shared(), isolate);
+ shared->SetConstructStub(*isolate->builtins()->RegExpConstructor());
+ shared->set_instance_class_name(isolate->heap()->RegExp_string());
+ shared->DontAdaptArguments();
+ shared->set_length(2);
+
+ // RegExp.prototype setup.
+
+ // Install the "constructor" property on the {prototype}.
+ JSObject::AddProperty(prototype, factory->constructor_string(), regexp_fun,
+ DONT_ENUM);
+
+ SimpleInstallFunction(prototype, "exec", Builtins::kRegExpPrototypeExec, 1,
+ true, DONT_ENUM);
DCHECK(regexp_fun->has_initial_map());
Handle<Map> initial_map(regexp_fun->initial_map());
@@ -1841,6 +1927,39 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallGetter(prototype, factory->byte_offset_string(),
Builtins::kDataViewPrototypeGetByteOffset, false,
kDataViewByteOffset);
+
+ SimpleInstallFunction(prototype, "getInt8",
+ Builtins::kDataViewPrototypeGetInt8, 1, false);
+ SimpleInstallFunction(prototype, "setInt8",
+ Builtins::kDataViewPrototypeSetInt8, 2, false);
+ SimpleInstallFunction(prototype, "getUint8",
+ Builtins::kDataViewPrototypeGetUint8, 1, false);
+ SimpleInstallFunction(prototype, "setUint8",
+ Builtins::kDataViewPrototypeSetUint8, 2, false);
+ SimpleInstallFunction(prototype, "getInt16",
+ Builtins::kDataViewPrototypeGetInt16, 1, false);
+ SimpleInstallFunction(prototype, "setInt16",
+ Builtins::kDataViewPrototypeSetInt16, 2, false);
+ SimpleInstallFunction(prototype, "getUint16",
+ Builtins::kDataViewPrototypeGetUint16, 1, false);
+ SimpleInstallFunction(prototype, "setUint16",
+ Builtins::kDataViewPrototypeSetUint16, 2, false);
+ SimpleInstallFunction(prototype, "getInt32",
+ Builtins::kDataViewPrototypeGetInt32, 1, false);
+ SimpleInstallFunction(prototype, "setInt32",
+ Builtins::kDataViewPrototypeSetInt32, 2, false);
+ SimpleInstallFunction(prototype, "getUint32",
+ Builtins::kDataViewPrototypeGetUint32, 1, false);
+ SimpleInstallFunction(prototype, "setUint32",
+ Builtins::kDataViewPrototypeSetUint32, 2, false);
+ SimpleInstallFunction(prototype, "getFloat32",
+ Builtins::kDataViewPrototypeGetFloat32, 1, false);
+ SimpleInstallFunction(prototype, "setFloat32",
+ Builtins::kDataViewPrototypeSetFloat32, 2, false);
+ SimpleInstallFunction(prototype, "getFloat64",
+ Builtins::kDataViewPrototypeGetFloat64, 1, false);
+ SimpleInstallFunction(prototype, "setFloat64",
+ Builtins::kDataViewPrototypeSetFloat64, 2, false);
}
{ // -- M a p
@@ -2178,7 +2297,6 @@ void Genesis::InitializeExperimentalGlobal() {
HARMONY_INPROGRESS(FEATURE_INITIALIZE_GLOBAL)
HARMONY_STAGED(FEATURE_INITIALIZE_GLOBAL)
HARMONY_SHIPPING(FEATURE_INITIALIZE_GLOBAL)
- FEATURE_INITIALIZE_GLOBAL(intl_extra, "")
#undef FEATURE_INITIALIZE_GLOBAL
}
@@ -2424,17 +2542,12 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
native_context->set_object_to_string(*to_string);
}
- Handle<JSObject> iterator_prototype;
+ Handle<JSObject> iterator_prototype(
+ native_context->initial_iterator_prototype());
- {
- PrototypeIterator iter(native_context->generator_object_prototype_map());
- iter.Advance(); // Advance to the prototype of generator_object_prototype.
- iterator_prototype = Handle<JSObject>(iter.GetCurrent<JSObject>());
-
- JSObject::AddProperty(container,
- factory->InternalizeUtf8String("IteratorPrototype"),
- iterator_prototype, NONE);
- }
+ JSObject::AddProperty(container,
+ factory->InternalizeUtf8String("IteratorPrototype"),
+ iterator_prototype, NONE);
{
PrototypeIterator iter(native_context->sloppy_generator_function_map());
@@ -2687,8 +2800,6 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
container, "CallSite", JS_OBJECT_TYPE, JSObject::kHeaderSize,
isolate->initial_object_prototype(), Builtins::kUnsupportedThrower);
callsite_fun->shared()->DontAdaptArguments();
- callsite_fun->shared()->set_native(true);
-
isolate->native_context()->set_callsite_function(*callsite_fun);
{
@@ -2726,8 +2837,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<JSFunction> fun;
for (const FunctionInfo& info : infos) {
- fun = SimpleInstallFunction(proto, info.name, info.id, 0, true, attrs);
- fun->shared()->set_native(true);
+ SimpleInstallFunction(proto, info.name, info.id, 0, true, attrs);
}
Accessors::FunctionSetPrototype(callsite_fun, proto).Assert();
@@ -2740,6 +2850,7 @@ void Bootstrapper::ExportExperimentalFromRuntime(Isolate* isolate,
Handle<JSObject> container) {
HandleScope scope(isolate);
+#ifdef V8_I18N_SUPPORT
#define INITIALIZE_FLAG(FLAG) \
{ \
Handle<String> name = \
@@ -2748,9 +2859,8 @@ void Bootstrapper::ExportExperimentalFromRuntime(Isolate* isolate,
isolate->factory()->ToBoolean(FLAG), NONE); \
}
- INITIALIZE_FLAG(FLAG_intl_extra)
-
#undef INITIALIZE_FLAG
+#endif
}
@@ -2763,17 +2873,17 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_lookbehind)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_named_captures)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_property)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_sent)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(intl_extra)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_explicit_tailcalls)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tailcalls)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrictive_declarations)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_string_padding)
#ifdef V8_I18N_SUPPORT
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(datetime_format_to_parts)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(icu_case_mapping)
#endif
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_async_await)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrictive_generators)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_trailing_commas)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_class_fields)
void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
const char* name, Handle<Symbol> value) {
@@ -3107,6 +3217,14 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
native_context()->set_global_eval_fun(*eval);
}
+ // Install Global.isFinite
+ SimpleInstallFunction(global_object, "isFinite", Builtins::kGlobalIsFinite, 1,
+ true, kGlobalIsFinite);
+
+ // Install Global.isNaN
+ SimpleInstallFunction(global_object, "isNaN", Builtins::kGlobalIsNaN, 1, true,
+ kGlobalIsNaN);
+
// Install Array.prototype.concat
{
Handle<JSFunction> array_constructor(native_context()->array_function());
@@ -3337,7 +3455,6 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
bool Genesis::InstallExperimentalNatives() {
- static const char* harmony_explicit_tailcalls_natives[] = {nullptr};
static const char* harmony_tailcalls_natives[] = {nullptr};
static const char* harmony_sharedarraybuffer_natives[] = {
"native harmony-atomics.js", NULL};
@@ -3350,7 +3467,6 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_regexp_named_captures_natives[] = {nullptr};
static const char* harmony_regexp_property_natives[] = {nullptr};
static const char* harmony_function_sent_natives[] = {nullptr};
- static const char* intl_extra_natives[] = {"native intl-extra.js", nullptr};
static const char* harmony_object_values_entries_natives[] = {nullptr};
static const char* harmony_object_own_property_descriptors_natives[] = {
nullptr};
@@ -3360,11 +3476,13 @@ bool Genesis::InstallExperimentalNatives() {
#ifdef V8_I18N_SUPPORT
static const char* icu_case_mapping_natives[] = {"native icu-case-mapping.js",
nullptr};
+ static const char* datetime_format_to_parts_natives[] = {
+ "native datetime-format-to-parts.js", nullptr};
#endif
- static const char* harmony_async_await_natives[] = {
- "native harmony-async-await.js", nullptr};
+ static const char* harmony_async_await_natives[] = {nullptr};
static const char* harmony_restrictive_generators_natives[] = {nullptr};
static const char* harmony_trailing_commas_natives[] = {nullptr};
+ static const char* harmony_class_fields_natives[] = {nullptr};
for (int i = ExperimentalNatives::GetDebuggerCount();
i < ExperimentalNatives::GetBuiltinsCount(); i++) {
@@ -3383,7 +3501,6 @@ bool Genesis::InstallExperimentalNatives() {
HARMONY_INPROGRESS(INSTALL_EXPERIMENTAL_NATIVES);
HARMONY_STAGED(INSTALL_EXPERIMENTAL_NATIVES);
HARMONY_SHIPPING(INSTALL_EXPERIMENTAL_NATIVES);
- INSTALL_EXPERIMENTAL_NATIVES(intl_extra, "");
#undef INSTALL_EXPERIMENTAL_NATIVES
}
@@ -3548,8 +3665,7 @@ static uint32_t Hash(RegisteredExtension* extension) {
return v8::internal::ComputePointerHash(extension);
}
-Genesis::ExtensionStates::ExtensionStates()
- : map_(base::HashMap::PointersMatch, 8) {}
+Genesis::ExtensionStates::ExtensionStates() : map_(8) {}
Genesis::ExtensionTraversalState Genesis::ExtensionStates::get_state(
RegisteredExtension* extension) {
@@ -4004,9 +4120,7 @@ Genesis::Genesis(Isolate* isolate,
// Check that the script context table is empty except for the 'this' binding.
// We do not need script contexts for native scripts.
- if (!FLAG_global_var_shortcuts) {
- DCHECK_EQ(1, native_context()->script_context_table()->used());
- }
+ DCHECK_EQ(1, native_context()->script_context_table()->used());
result_ = native_context();
}
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 1b643d437b..2c0bef2556 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -387,10 +387,9 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ bind(&to_string);
{
FrameScope scope(masm, StackFrame::MANUAL);
- ToStringStub stub(masm->isolate());
__ SmiTag(r2);
__ EnterBuiltinFrame(cp, r1, r2);
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
__ LeaveBuiltinFrame(cp, r1, r2);
__ SmiUntag(r2);
}
@@ -449,12 +448,11 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&convert);
{
FrameScope scope(masm, StackFrame::MANUAL);
- ToStringStub stub(masm->isolate());
__ SmiTag(r6);
__ EnterBuiltinFrame(cp, r1, r6);
__ Push(r3);
__ Move(r0, r2);
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
__ Move(r2, r0);
__ Pop(r3);
__ LeaveBuiltinFrame(cp, r1, r6);
@@ -1060,6 +1058,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmp(r0, Operand(masm->CodeObject())); // Self-reference to this code.
__ b(ne, &switch_to_different_code_kind);
+ // Increment invocation count for the function.
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
+ __ ldr(r2, FieldMemOperand(r2, LiteralsArray::kFeedbackVectorOffset));
+ __ ldr(r9, FieldMemOperand(
+ r2, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+ TypeFeedbackVector::kHeaderSize));
+ __ add(r9, r9, Operand(Smi::FromInt(1)));
+ __ str(r9, FieldMemOperand(
+ r2, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+ TypeFeedbackVector::kHeaderSize));
+
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ SmiTst(kInterpreterBytecodeArrayRegister);
@@ -1162,8 +1171,33 @@ void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
__ Jump(lr);
}
-static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
- Register limit, Register scratch) {
+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+ Register scratch,
+ Label* stack_overflow) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+ // Make scratch the space we have left. The stack might already be overflowed
+ // here which will cause scratch to become negative.
+ __ sub(scratch, sp, scratch);
+ // Check if the arguments will overflow the stack.
+ __ cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
+ __ b(le, stack_overflow); // Signed comparison.
+}
+
+static void Generate_InterpreterPushArgs(MacroAssembler* masm,
+ Register num_args, Register index,
+ Register limit, Register scratch,
+ Label* stack_overflow) {
+ // Add a stack check before pushing arguments.
+ Generate_StackOverflowCheck(masm, num_args, scratch, stack_overflow);
+
+ // Find the address of the last argument.
+ __ mov(limit, num_args);
+ __ mov(limit, Operand(limit, LSL, kPointerSizeLog2));
+ __ sub(limit, index, limit);
+
Label loop_header, loop_check;
__ b(al, &loop_check);
__ bind(&loop_header);
@@ -1185,14 +1219,12 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
// they are to be pushed onto the stack.
// -- r1 : the target to call (can be any Object).
// -----------------------------------
+ Label stack_overflow;
- // Find the address of the last argument.
__ add(r3, r0, Operand(1)); // Add one for receiver.
- __ mov(r3, Operand(r3, LSL, kPointerSizeLog2));
- __ sub(r3, r2, r3);
- // Push the arguments.
- Generate_InterpreterPushArgs(masm, r2, r3, r4);
+ // Push the arguments. r2, r4, r5 will be modified.
+ Generate_InterpreterPushArgs(masm, r3, r2, r4, r5, &stack_overflow);
// Call the target.
if (function_type == CallableType::kJSFunction) {
@@ -1205,30 +1237,88 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
tail_call_mode),
RelocInfo::CODE_TARGET);
}
+
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ bkpt(0);
+ }
}
// static
-void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
+ MacroAssembler* masm, CallableType construct_type) {
// ----------- S t a t e -------------
// -- r0 : argument count (not including receiver)
// -- r3 : new target
// -- r1 : constructor to call
- // -- r2 : address of the first argument
+ // -- r2 : allocation site feedback if available, undefined otherwise.
+ // -- r4 : address of the first argument
// -----------------------------------
-
- // Find the address of the last argument.
- __ mov(r4, Operand(r0, LSL, kPointerSizeLog2));
- __ sub(r4, r2, r4);
+ Label stack_overflow;
// Push a slot for the receiver to be constructed.
__ mov(ip, Operand::Zero());
__ push(ip);
- // Push the arguments.
- Generate_InterpreterPushArgs(masm, r2, r4, r5);
+ // Push the arguments. r5, r4, r6 will be modified.
+ Generate_InterpreterPushArgs(masm, r0, r4, r5, r6, &stack_overflow);
+
+ __ AssertUndefinedOrAllocationSite(r2, r5);
+ if (construct_type == CallableType::kJSFunction) {
+ __ AssertFunction(r1);
- // Call the constructor with r0, r1, and r3 unmodified.
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kConstructStubOffset));
+ // Jump to the construct function.
+ __ add(pc, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ } else {
+ DCHECK_EQ(construct_type, CallableType::kAny);
+ // Call the constructor with r0, r1, and r3 unmodified.
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ bkpt(0);
+ }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstructArray(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argument count (not including receiver)
+ // -- r1 : target to call verified to be Array function
+ // -- r2 : allocation site feedback if available, undefined otherwise.
+ // -- r3 : address of the first argument
+ // -----------------------------------
+ Label stack_overflow;
+
+ __ add(r4, r0, Operand(1)); // Add one for receiver.
+
+ // TODO(mythria): Add a stack check before pushing arguments.
+ // Push the arguments. r3, r5, r6 will be modified.
+ Generate_InterpreterPushArgs(masm, r4, r3, r5, r6, &stack_overflow);
+
+ // Array constructor expects constructor in r3. It is same as r1 here.
+ __ mov(r3, r1);
+
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ bkpt(0);
+ }
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1816,61 +1906,6 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
- int field_index) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- r1 : function
- // -- cp : context
- // -- lr : return address
- // -- sp[0] : receiver
- // -----------------------------------
-
- // 1. Pop receiver into r0 and check that it's actually a JSDate object.
- Label receiver_not_date;
- {
- __ Pop(r0);
- __ JumpIfSmi(r0, &receiver_not_date);
- __ CompareObjectType(r0, r2, r3, JS_DATE_TYPE);
- __ b(ne, &receiver_not_date);
- }
-
- // 2. Load the specified date field, falling back to the runtime as necessary.
- if (field_index == JSDate::kDateValue) {
- __ ldr(r0, FieldMemOperand(r0, JSDate::kValueOffset));
- } else {
- if (field_index < JSDate::kFirstUncachedField) {
- Label stamp_mismatch;
- __ mov(r1, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
- __ ldr(r1, MemOperand(r1));
- __ ldr(ip, FieldMemOperand(r0, JSDate::kCacheStampOffset));
- __ cmp(r1, ip);
- __ b(ne, &stamp_mismatch);
- __ ldr(r0, FieldMemOperand(
- r0, JSDate::kValueOffset + field_index * kPointerSize));
- __ Ret();
- __ bind(&stamp_mismatch);
- }
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ PrepareCallCFunction(2, r1);
- __ mov(r1, Operand(Smi::FromInt(field_index)));
- __ CallCFunction(
- ExternalReference::get_date_field_function(masm->isolate()), 2);
- }
- __ Ret();
-
- // 3. Raise a TypeError if the receiver is not a date.
- __ bind(&receiver_not_date);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ Push(r0);
- __ Move(r0, Smi::FromInt(0));
- __ EnterBuiltinFrame(cp, r1, r0);
- __ CallRuntime(Runtime::kThrowNotDateError);
- }
-}
-
-// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc
@@ -2101,26 +2136,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
}
-static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
- Label* stack_overflow) {
- // ----------- S t a t e -------------
- // -- r0 : actual number of arguments
- // -- r1 : function (passed through to callee)
- // -- r2 : expected number of arguments
- // -- r3 : new target (passed through to callee)
- // -----------------------------------
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- __ LoadRoot(r5, Heap::kRealStackLimitRootIndex);
- // Make r5 the space we have left. The stack might already be overflowed
- // here which will cause r5 to become negative.
- __ sub(r5, sp, r5);
- // Check if the arguments will overflow the stack.
- __ cmp(r5, Operand(r2, LSL, kPointerSizeLog2));
- __ b(le, stack_overflow); // Signed comparison.
-}
-
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(r0);
__ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -2786,21 +2801,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-void Builtins::Generate_ToNumber(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in r0.
- STATIC_ASSERT(kSmiTag == 0);
- __ tst(r0, Operand(kSmiTagMask));
- __ Ret(eq);
-
- __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
- // r0: receiver
- // r1: receiver instance type
- __ Ret(eq);
-
- __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
- RelocInfo::CODE_TARGET);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : actual number of arguments
@@ -2820,7 +2820,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Enough parameters: actual >= expected
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
- ArgumentAdaptorStackCheck(masm, &stack_overflow);
+ Generate_StackOverflowCheck(masm, r2, r5, &stack_overflow);
// Calculate copy start address into r0 and copy end address into r4.
// r0: actual number of arguments as a smi
@@ -2853,7 +2853,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected
__ bind(&too_few);
EnterArgumentsAdaptorFrame(masm);
- ArgumentAdaptorStackCheck(masm, &stack_overflow);
+ Generate_StackOverflowCheck(masm, r2, r5, &stack_overflow);
// Calculate copy start address into r0 and copy end address is fp.
// r0: actual number of arguments as a smi
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index 57395d835b..48551dea00 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -379,10 +379,9 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ Bind(&to_string);
{
FrameScope scope(masm, StackFrame::MANUAL);
- ToStringStub stub(masm->isolate());
__ SmiTag(x2);
__ EnterBuiltinFrame(cp, x1, x2);
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
__ LeaveBuiltinFrame(cp, x1, x2);
__ SmiUntag(x2);
}
@@ -442,12 +441,11 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ Bind(&convert);
{
FrameScope scope(masm, StackFrame::MANUAL);
- ToStringStub stub(masm->isolate());
__ SmiTag(x6);
__ EnterBuiltinFrame(cp, x1, x6);
__ Push(x3);
__ Move(x0, x2);
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
__ Move(x2, x0);
__ Pop(x3);
__ LeaveBuiltinFrame(cp, x1, x6);
@@ -1065,6 +1063,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Cmp(x0, Operand(masm->CodeObject())); // Self-reference to this code.
__ B(ne, &switch_to_different_code_kind);
+ // Increment invocation count for the function.
+ __ Ldr(x11, FieldMemOperand(x1, JSFunction::kLiteralsOffset));
+ __ Ldr(x11, FieldMemOperand(x11, LiteralsArray::kFeedbackVectorOffset));
+ __ Ldr(x10, FieldMemOperand(x11, TypeFeedbackVector::kInvocationCountIndex *
+ kPointerSize +
+ TypeFeedbackVector::kHeaderSize));
+ __ Add(x10, x10, Operand(Smi::FromInt(1)));
+ __ Str(x10, FieldMemOperand(x11, TypeFeedbackVector::kInvocationCountIndex *
+ kPointerSize +
+ TypeFeedbackVector::kHeaderSize));
+
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ AssertNotSmi(kInterpreterBytecodeArrayRegister,
@@ -1171,6 +1180,50 @@ void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
__ Ret();
}
+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+ Register scratch,
+ Label* stack_overflow) {
+ // Check the stack for overflow.
+ // We are not trying to catch interruptions (e.g. debug break and
+ // preemption) here, so the "real stack limit" is checked.
+ Label enough_stack_space;
+ __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+ // Make scratch the space we have left. The stack might already be overflowed
+ // here which will cause scratch to become negative.
+ __ Sub(scratch, jssp, scratch);
+ // Check if the arguments will overflow the stack.
+ __ Cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
+ __ B(le, stack_overflow);
+}
+
+static void Generate_InterpreterPushArgs(MacroAssembler* masm,
+ Register num_args, Register index,
+ Register last_arg, Register stack_addr,
+ Register scratch,
+ Label* stack_overflow) {
+ // Add a stack check before pushing arguments.
+ Generate_StackOverflowCheck(masm, num_args, scratch, stack_overflow);
+
+ __ Mov(scratch, num_args);
+ __ lsl(scratch, scratch, kPointerSizeLog2);
+ __ sub(last_arg, index, scratch);
+
+ // Set stack pointer and where to stop.
+ __ Mov(stack_addr, jssp);
+ __ Claim(scratch, 1);
+
+ // Push the arguments.
+ Label loop_header, loop_check;
+ __ B(&loop_check);
+ __ Bind(&loop_header);
+ // TODO(rmcilroy): Push two at a time once we ensure we keep stack aligned.
+ __ Ldr(scratch, MemOperand(index, -kPointerSize, PostIndex));
+ __ Str(scratch, MemOperand(stack_addr, -kPointerSize, PreIndex));
+ __ Bind(&loop_check);
+ __ Cmp(index, last_arg);
+ __ B(gt, &loop_header);
+}
+
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
MacroAssembler* masm, TailCallMode tail_call_mode,
@@ -1182,24 +1235,13 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
// they are to be pushed onto the stack.
// -- x1 : the target to call (can be any Object).
// -----------------------------------
+ Label stack_overflow;
- // Find the address of the last argument.
- __ add(x3, x0, Operand(1)); // Add one for receiver.
- __ lsl(x3, x3, kPointerSizeLog2);
- __ sub(x4, x2, x3);
+ // Add one for the receiver.
+ __ add(x3, x0, Operand(1));
- // Push the arguments.
- Label loop_header, loop_check;
- __ Mov(x5, jssp);
- __ Claim(x3, 1);
- __ B(&loop_check);
- __ Bind(&loop_header);
- // TODO(rmcilroy): Push two at a time once we ensure we keep stack aligned.
- __ Ldr(x3, MemOperand(x2, -kPointerSize, PostIndex));
- __ Str(x3, MemOperand(x5, -kPointerSize, PreIndex));
- __ Bind(&loop_check);
- __ Cmp(x2, x4);
- __ B(gt, &loop_header);
+ // Push the arguments. x2, x4, x5, x6 will be modified.
+ Generate_InterpreterPushArgs(masm, x3, x2, x4, x5, x6, &stack_overflow);
// Call the target.
if (function_type == CallableType::kJSFunction) {
@@ -1212,42 +1254,82 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
tail_call_mode),
RelocInfo::CODE_TARGET);
}
+
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ Unreachable();
+ }
}
// static
-void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
+ MacroAssembler* masm, CallableType construct_type) {
// ----------- S t a t e -------------
// -- x0 : argument count (not including receiver)
// -- x3 : new target
// -- x1 : constructor to call
- // -- x2 : address of the first argument
+ // -- x2 : allocation site feedback if available, undefined otherwise
+ // -- x4 : address of the first argument
// -----------------------------------
+ Label stack_overflow;
- // Find the address of the last argument.
- __ add(x5, x0, Operand(1)); // Add one for receiver (to be constructed).
- __ lsl(x5, x5, kPointerSizeLog2);
+ // Push a slot for the receiver.
+ __ Push(xzr);
- // Set stack pointer and where to stop.
- __ Mov(x6, jssp);
- __ Claim(x5, 1);
- __ sub(x4, x6, x5);
+ // Push the arguments. x5, x4, x6, x7 will be modified.
+ Generate_InterpreterPushArgs(masm, x0, x4, x5, x6, x7, &stack_overflow);
- // Push a slot for the receiver.
- __ Str(xzr, MemOperand(x6, -kPointerSize, PreIndex));
+ __ AssertUndefinedOrAllocationSite(x2, x6);
+ if (construct_type == CallableType::kJSFunction) {
+ __ AssertFunction(x1);
- Label loop_header, loop_check;
- // Push the arguments.
- __ B(&loop_check);
- __ Bind(&loop_header);
- // TODO(rmcilroy): Push two at a time once we ensure we keep stack aligned.
- __ Ldr(x5, MemOperand(x2, -kPointerSize, PostIndex));
- __ Str(x5, MemOperand(x6, -kPointerSize, PreIndex));
- __ Bind(&loop_check);
- __ Cmp(x6, x4);
- __ B(gt, &loop_header);
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kConstructStubOffset));
+ __ Add(x4, x4, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(x4);
+ } else {
+ DCHECK_EQ(construct_type, CallableType::kAny);
+ // Call the constructor with x0, x1, and x3 unmodified.
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ Unreachable();
+ }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstructArray(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : argument count (not including receiver)
+ // -- x1 : target to call verified to be Array function
+ // -- x2 : allocation site feedback if available, undefined otherwise.
+ // -- x3 : address of the first argument
+ // -----------------------------------
+ Label stack_overflow;
+
+ __ add(x4, x0, Operand(1)); // Add one for the receiver.
+
+ // Push the arguments. x3, x5, x6, x7 will be modified.
+ Generate_InterpreterPushArgs(masm, x4, x3, x5, x6, x7, &stack_overflow);
+
+ // Array constructor expects constructor in x3. It is same as call target.
+ __ mov(x3, x1);
+
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
- // Call the constructor with x0, x1, and x3 unmodified.
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ Unreachable();
+ }
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1820,60 +1902,6 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
- int field_index) {
- // ----------- S t a t e -------------
- // -- x0 : number of arguments
- // -- x1 : function
- // -- cp : context
- // -- lr : return address
- // -- jssp[0] : receiver
- // -----------------------------------
- ASM_LOCATION("Builtins::Generate_DatePrototype_GetField");
-
- // 1. Pop receiver into x0 and check that it's actually a JSDate object.
- Label receiver_not_date;
- {
- __ Pop(x0);
- __ JumpIfSmi(x0, &receiver_not_date);
- __ JumpIfNotObjectType(x0, x2, x3, JS_DATE_TYPE, &receiver_not_date);
- }
-
- // 2. Load the specified date field, falling back to the runtime as necessary.
- if (field_index == JSDate::kDateValue) {
- __ Ldr(x0, FieldMemOperand(x0, JSDate::kValueOffset));
- } else {
- if (field_index < JSDate::kFirstUncachedField) {
- Label stamp_mismatch;
- __ Mov(x1, ExternalReference::date_cache_stamp(masm->isolate()));
- __ Ldr(x1, MemOperand(x1));
- __ Ldr(x2, FieldMemOperand(x0, JSDate::kCacheStampOffset));
- __ Cmp(x1, x2);
- __ B(ne, &stamp_mismatch);
- __ Ldr(x0, FieldMemOperand(
- x0, JSDate::kValueOffset + field_index * kPointerSize));
- __ Ret();
- __ Bind(&stamp_mismatch);
- }
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Mov(x1, Smi::FromInt(field_index));
- __ CallCFunction(
- ExternalReference::get_date_field_function(masm->isolate()), 2);
- }
- __ Ret();
-
- // 3. Raise a TypeError if the receiver is not a date.
- __ Bind(&receiver_not_date);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ Push(x0);
- __ Mov(x0, Smi::FromInt(0));
- __ EnterBuiltinFrame(cp, x1, x0);
- __ CallRuntime(Runtime::kThrowNotDateError);
- }
-}
-
-// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argc
@@ -2162,27 +2190,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
}
-static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
- Label* stack_overflow) {
- // ----------- S t a t e -------------
- // -- x0 : actual number of arguments
- // -- x1 : function (passed through to callee)
- // -- x2 : expected number of arguments
- // -- x3 : new target (passed through to callee)
- // -----------------------------------
- // Check the stack for overflow.
- // We are not trying to catch interruptions (e.g. debug break and
- // preemption) here, so the "real stack limit" is checked.
- Label enough_stack_space;
- __ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
- // Make x10 the space we have left. The stack might already be overflowed
- // here which will cause x10 to become negative.
- __ Sub(x10, jssp, x10);
- // Check if the arguments will overflow the stack.
- __ Cmp(x10, Operand(x2, LSL, kPointerSizeLog2));
- __ B(le, stack_overflow);
-}
-
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(x10, x0);
__ Mov(x11, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
@@ -2451,11 +2458,9 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestAndBranchIfAnySet(
- w3, (1 << SharedFunctionInfo::kIsDefaultConstructor) |
- (1 << SharedFunctionInfo::kIsSubclassConstructor) |
- (1 << SharedFunctionInfo::kIsBaseConstructor),
- &class_constructor);
+ __ TestAndBranchIfAnySet(w3, FunctionKind::kClassConstructor
+ << SharedFunctionInfo::kFunctionKindShift,
+ &class_constructor);
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
@@ -2873,26 +2878,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-// static
-void Builtins::Generate_ToNumber(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in x0.
- Label not_smi;
- __ JumpIfNotSmi(x0, &not_smi);
- __ Ret();
- __ Bind(&not_smi);
-
- Label not_heap_number;
- __ CompareObjectType(x0, x1, x1, HEAP_NUMBER_TYPE);
- // x0: receiver
- // x1: receiver instance type
- __ B(ne, &not_heap_number);
- __ Ret();
- __ Bind(&not_heap_number);
-
- __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
- RelocInfo::CODE_TARGET);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
// ----------- S t a t e -------------
@@ -2917,7 +2902,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Enough parameters: actual >= expected
EnterArgumentsAdaptorFrame(masm);
- ArgumentAdaptorStackCheck(masm, &stack_overflow);
+ Generate_StackOverflowCheck(masm, x2, x10, &stack_overflow);
Register copy_start = x10;
Register copy_end = x11;
@@ -2964,7 +2949,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Register scratch1 = x13, scratch2 = x14;
EnterArgumentsAdaptorFrame(masm);
- ArgumentAdaptorStackCheck(masm, &stack_overflow);
+ Generate_StackOverflowCheck(masm, x2, x10, &stack_overflow);
__ Lsl(scratch2, argc_expected, kPointerSizeLog2);
__ Lsl(argc_actual, argc_actual, kPointerSizeLog2);
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index 09ee4cc2e2..b4969f1e57 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -1269,24 +1269,24 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
Node* start_from = assembler->Parameter(2);
Node* context = assembler->Parameter(3 + 2);
- Node* int32_zero = assembler->Int32Constant(0);
- Node* int32_one = assembler->Int32Constant(1);
+ Node* intptr_zero = assembler->IntPtrConstant(0);
+ Node* intptr_one = assembler->IntPtrConstant(1);
Node* the_hole = assembler->TheHoleConstant();
Node* undefined = assembler->UndefinedConstant();
Node* heap_number_map = assembler->HeapNumberMapConstant();
- Variable len_var(assembler, MachineRepresentation::kWord32),
- index_var(assembler, MachineRepresentation::kWord32),
- start_from_var(assembler, MachineRepresentation::kWord32);
+ Variable len_var(assembler, MachineType::PointerRepresentation()),
+ index_var(assembler, MachineType::PointerRepresentation()),
+ start_from_var(assembler, MachineType::PointerRepresentation());
Label init_k(assembler), return_true(assembler), return_false(assembler),
call_runtime(assembler);
Label init_len(assembler);
- index_var.Bind(int32_zero);
- len_var.Bind(int32_zero);
+ index_var.Bind(intptr_zero);
+ len_var.Bind(intptr_zero);
// Take slow path if not a JSArray, if retrieving elements requires
// traversing prototype, or if access checks are required.
@@ -1299,7 +1299,7 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
assembler->GotoUnless(assembler->WordIsSmi(len), &call_runtime);
len_var.Bind(assembler->SmiToWord(len));
- assembler->Branch(assembler->Word32Equal(len_var.value(), int32_zero),
+ assembler->Branch(assembler->WordEqual(len_var.value(), intptr_zero),
&return_false, &init_k);
}
@@ -1307,31 +1307,32 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
{
Label done(assembler), init_k_smi(assembler), init_k_heap_num(assembler),
init_k_zero(assembler), init_k_n(assembler);
- Callable call_to_integer = CodeFactory::ToInteger(assembler->isolate());
- Node* tagged_n = assembler->CallStub(call_to_integer, context, start_from);
+ Node* tagged_n = assembler->ToInteger(context, start_from);
assembler->Branch(assembler->WordIsSmi(tagged_n), &init_k_smi,
&init_k_heap_num);
assembler->Bind(&init_k_smi);
{
- start_from_var.Bind(assembler->SmiToWord32(tagged_n));
+ start_from_var.Bind(assembler->SmiUntag(tagged_n));
assembler->Goto(&init_k_n);
}
assembler->Bind(&init_k_heap_num);
{
Label do_return_false(assembler);
- Node* fp_len = assembler->ChangeInt32ToFloat64(len_var.value());
+ // This round is lossless for all valid lengths.
+ Node* fp_len = assembler->RoundIntPtrToFloat64(len_var.value());
Node* fp_n = assembler->LoadHeapNumberValue(tagged_n);
assembler->GotoIf(assembler->Float64GreaterThanOrEqual(fp_n, fp_len),
&do_return_false);
- start_from_var.Bind(assembler->TruncateFloat64ToWord32(fp_n));
+ start_from_var.Bind(assembler->ChangeInt32ToIntPtr(
+ assembler->TruncateFloat64ToWord32(fp_n)));
assembler->Goto(&init_k_n);
assembler->Bind(&do_return_false);
{
- index_var.Bind(int32_zero);
+ index_var.Bind(intptr_zero);
assembler->Goto(&return_false);
}
}
@@ -1340,7 +1341,7 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
{
Label if_positive(assembler), if_negative(assembler), done(assembler);
assembler->Branch(
- assembler->Int32LessThan(start_from_var.value(), int32_zero),
+ assembler->IntPtrLessThan(start_from_var.value(), intptr_zero),
&if_negative, &if_positive);
assembler->Bind(&if_positive);
@@ -1352,15 +1353,15 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
assembler->Bind(&if_negative);
{
index_var.Bind(
- assembler->Int32Add(len_var.value(), start_from_var.value()));
+ assembler->IntPtrAdd(len_var.value(), start_from_var.value()));
assembler->Branch(
- assembler->Int32LessThan(index_var.value(), int32_zero),
+ assembler->IntPtrLessThan(index_var.value(), intptr_zero),
&init_k_zero, &done);
}
assembler->Bind(&init_k_zero);
{
- index_var.Bind(int32_zero);
+ index_var.Bind(intptr_zero);
assembler->Goto(&done);
}
@@ -1380,9 +1381,7 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
&if_packed_doubles, &if_holey_doubles};
Node* map = assembler->LoadMap(array);
- Node* bit_field2 = assembler->LoadMapBitField2(map);
- Node* elements_kind =
- assembler->BitFieldDecode<Map::ElementsKindBits>(bit_field2);
+ Node* elements_kind = assembler->LoadMapElementsKind(map);
Node* elements = assembler->LoadElements(array);
assembler->Switch(elements_kind, &return_false, kElementsKind,
element_kind_handlers, arraysize(kElementsKind));
@@ -1411,43 +1410,41 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
assembler->Bind(&not_heap_num);
Node* search_type = assembler->LoadMapInstanceType(map);
+ assembler->GotoIf(assembler->IsStringInstanceType(search_type),
+ &string_loop);
assembler->GotoIf(
- assembler->Int32LessThan(
- search_type, assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
- &string_loop);
- assembler->GotoIf(
- assembler->WordEqual(search_type,
- assembler->Int32Constant(SIMD128_VALUE_TYPE)),
+ assembler->Word32Equal(search_type,
+ assembler->Int32Constant(SIMD128_VALUE_TYPE)),
&simd_loop);
assembler->Goto(&ident_loop);
assembler->Bind(&ident_loop);
{
assembler->GotoUnless(
- assembler->Int32LessThan(index_var.value(), len_var.value()),
+ assembler->UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
- Node* element_k =
- assembler->LoadFixedArrayElement(elements, index_var.value());
+ Node* element_k = assembler->LoadFixedArrayElement(
+ elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
assembler->GotoIf(assembler->WordEqual(element_k, search_element),
&return_true);
- index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&ident_loop);
}
assembler->Bind(&undef_loop);
{
assembler->GotoUnless(
- assembler->Int32LessThan(index_var.value(), len_var.value()),
+ assembler->UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
- Node* element_k =
- assembler->LoadFixedArrayElement(elements, index_var.value());
+ Node* element_k = assembler->LoadFixedArrayElement(
+ elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
assembler->GotoIf(assembler->WordEqual(element_k, undefined),
&return_true);
assembler->GotoIf(assembler->WordEqual(element_k, the_hole),
&return_true);
- index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&undef_loop);
}
@@ -1462,10 +1459,11 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
{
Label continue_loop(assembler), not_smi(assembler);
assembler->GotoUnless(
- assembler->Int32LessThan(index_var.value(), len_var.value()),
+ assembler->UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
- Node* element_k =
- assembler->LoadFixedArrayElement(elements, index_var.value());
+ Node* element_k = assembler->LoadFixedArrayElement(
+ elements, index_var.value(), 0,
+ CodeStubAssembler::INTPTR_PARAMETERS);
assembler->GotoUnless(assembler->WordIsSmi(element_k), &not_smi);
assembler->Branch(
assembler->Float64Equal(search_num.value(),
@@ -1481,7 +1479,7 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
&return_true, &continue_loop);
assembler->Bind(&continue_loop);
- index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&not_nan_loop);
}
@@ -1489,10 +1487,11 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
{
Label continue_loop(assembler);
assembler->GotoUnless(
- assembler->Int32LessThan(index_var.value(), len_var.value()),
+ assembler->UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
- Node* element_k =
- assembler->LoadFixedArrayElement(elements, index_var.value());
+ Node* element_k = assembler->LoadFixedArrayElement(
+ elements, index_var.value(), 0,
+ CodeStubAssembler::INTPTR_PARAMETERS);
assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
heap_number_map),
@@ -1502,7 +1501,7 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
&continue_loop);
assembler->Bind(&continue_loop);
- index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&nan_loop);
}
}
@@ -1511,14 +1510,13 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
{
Label continue_loop(assembler);
assembler->GotoUnless(
- assembler->Int32LessThan(index_var.value(), len_var.value()),
+ assembler->UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
- Node* element_k =
- assembler->LoadFixedArrayElement(elements, index_var.value());
+ Node* element_k = assembler->LoadFixedArrayElement(
+ elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
- assembler->GotoUnless(assembler->Int32LessThan(
- assembler->LoadInstanceType(element_k),
- assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+ assembler->GotoUnless(assembler->IsStringInstanceType(
+ assembler->LoadInstanceType(element_k)),
&continue_loop);
// TODO(bmeurer): Consider inlining the StringEqual logic here.
@@ -1530,7 +1528,7 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
&return_true, &continue_loop);
assembler->Bind(&continue_loop);
- index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&string_loop);
}
@@ -1543,11 +1541,11 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
assembler->Goto(&loop_body);
assembler->Bind(&loop_body);
assembler->GotoUnless(
- assembler->Int32LessThan(index_var.value(), len_var.value()),
+ assembler->UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
- Node* element_k =
- assembler->LoadFixedArrayElement(elements, index_var.value());
+ Node* element_k = assembler->LoadFixedArrayElement(
+ elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
Node* map_k = assembler->LoadMap(element_k);
@@ -1555,7 +1553,7 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
&return_true, &continue_loop);
assembler->Bind(&continue_loop);
- index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&loop_body);
}
}
@@ -1585,14 +1583,15 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
{
Label continue_loop(assembler);
assembler->GotoUnless(
- assembler->Int32LessThan(index_var.value(), len_var.value()),
+ assembler->UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
Node* element_k = assembler->LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Float64());
+ elements, index_var.value(), MachineType::Float64(), 0,
+ CodeStubAssembler::INTPTR_PARAMETERS);
assembler->BranchIfFloat64Equal(element_k, search_num.value(),
&return_true, &continue_loop);
assembler->Bind(&continue_loop);
- index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&not_nan_loop);
}
@@ -1601,13 +1600,14 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
{
Label continue_loop(assembler);
assembler->GotoUnless(
- assembler->Int32LessThan(index_var.value(), len_var.value()),
+ assembler->UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
Node* element_k = assembler->LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Float64());
+ elements, index_var.value(), MachineType::Float64(), 0,
+ CodeStubAssembler::INTPTR_PARAMETERS);
assembler->BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
assembler->Bind(&continue_loop);
- index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&nan_loop);
}
}
@@ -1639,31 +1639,18 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
{
Label continue_loop(assembler);
assembler->GotoUnless(
- assembler->Int32LessThan(index_var.value(), len_var.value()),
+ assembler->UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
- if (kPointerSize == kDoubleSize) {
- Node* element = assembler->LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Uint64());
- Node* the_hole = assembler->Int64Constant(kHoleNanInt64);
- assembler->GotoIf(assembler->Word64Equal(element, the_hole),
- &continue_loop);
- } else {
- Node* element_upper = assembler->LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Uint32(),
- kIeeeDoubleExponentWordOffset);
- assembler->GotoIf(
- assembler->Word32Equal(element_upper,
- assembler->Int32Constant(kHoleNanUpper32)),
- &continue_loop);
- }
-
+ // Load double value or continue if it contains a double hole.
Node* element_k = assembler->LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Float64());
+ elements, index_var.value(), MachineType::Float64(), 0,
+ CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop);
+
assembler->BranchIfFloat64Equal(element_k, search_num.value(),
&return_true, &continue_loop);
assembler->Bind(&continue_loop);
- index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&not_nan_loop);
}
@@ -1672,30 +1659,17 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
{
Label continue_loop(assembler);
assembler->GotoUnless(
- assembler->Int32LessThan(index_var.value(), len_var.value()),
+ assembler->UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
- if (kPointerSize == kDoubleSize) {
- Node* element = assembler->LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Uint64());
- Node* the_hole = assembler->Int64Constant(kHoleNanInt64);
- assembler->GotoIf(assembler->Word64Equal(element, the_hole),
- &continue_loop);
- } else {
- Node* element_upper = assembler->LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Uint32(),
- kIeeeDoubleExponentWordOffset);
- assembler->GotoIf(
- assembler->Word32Equal(element_upper,
- assembler->Int32Constant(kHoleNanUpper32)),
- &continue_loop);
- }
-
+ // Load double value or continue if it contains a double hole.
Node* element_k = assembler->LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Float64());
+ elements, index_var.value(), MachineType::Float64(), 0,
+ CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop);
+
assembler->BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
assembler->Bind(&continue_loop);
- index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&nan_loop);
}
@@ -1703,26 +1677,15 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
assembler->Bind(&hole_loop);
{
assembler->GotoUnless(
- assembler->Int32LessThan(index_var.value(), len_var.value()),
+ assembler->UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
- if (kPointerSize == kDoubleSize) {
- Node* element = assembler->LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Uint64());
- Node* the_hole = assembler->Int64Constant(kHoleNanInt64);
- assembler->GotoIf(assembler->Word64Equal(element, the_hole),
- &return_true);
- } else {
- Node* element_upper = assembler->LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Uint32(),
- kIeeeDoubleExponentWordOffset);
- assembler->GotoIf(
- assembler->Word32Equal(element_upper,
- assembler->Int32Constant(kHoleNanUpper32)),
- &return_true);
- }
+ // Check if the element is a double hole, but don't load it.
+ assembler->LoadFixedDoubleArrayElement(
+ elements, index_var.value(), MachineType::None(), 0,
+ CodeStubAssembler::INTPTR_PARAMETERS, &return_true);
- index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&hole_loop);
}
}
@@ -1749,23 +1712,23 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
Node* start_from = assembler->Parameter(2);
Node* context = assembler->Parameter(3 + 2);
- Node* int32_zero = assembler->Int32Constant(0);
- Node* int32_one = assembler->Int32Constant(1);
+ Node* intptr_zero = assembler->IntPtrConstant(0);
+ Node* intptr_one = assembler->IntPtrConstant(1);
Node* undefined = assembler->UndefinedConstant();
Node* heap_number_map = assembler->HeapNumberMapConstant();
- Variable len_var(assembler, MachineRepresentation::kWord32),
- index_var(assembler, MachineRepresentation::kWord32),
- start_from_var(assembler, MachineRepresentation::kWord32);
+ Variable len_var(assembler, MachineType::PointerRepresentation()),
+ index_var(assembler, MachineType::PointerRepresentation()),
+ start_from_var(assembler, MachineType::PointerRepresentation());
Label init_k(assembler), return_found(assembler), return_not_found(assembler),
call_runtime(assembler);
Label init_len(assembler);
- index_var.Bind(int32_zero);
- len_var.Bind(int32_zero);
+ index_var.Bind(intptr_zero);
+ len_var.Bind(intptr_zero);
// Take slow path if not a JSArray, if retrieving elements requires
// traversing prototype, or if access checks are required.
@@ -1778,7 +1741,7 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
assembler->GotoUnless(assembler->WordIsSmi(len), &call_runtime);
len_var.Bind(assembler->SmiToWord(len));
- assembler->Branch(assembler->Word32Equal(len_var.value(), int32_zero),
+ assembler->Branch(assembler->WordEqual(len_var.value(), intptr_zero),
&return_not_found, &init_k);
}
@@ -1786,31 +1749,32 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
{
Label done(assembler), init_k_smi(assembler), init_k_heap_num(assembler),
init_k_zero(assembler), init_k_n(assembler);
- Callable call_to_integer = CodeFactory::ToInteger(assembler->isolate());
- Node* tagged_n = assembler->CallStub(call_to_integer, context, start_from);
+ Node* tagged_n = assembler->ToInteger(context, start_from);
assembler->Branch(assembler->WordIsSmi(tagged_n), &init_k_smi,
&init_k_heap_num);
assembler->Bind(&init_k_smi);
{
- start_from_var.Bind(assembler->SmiToWord32(tagged_n));
+ start_from_var.Bind(assembler->SmiUntag(tagged_n));
assembler->Goto(&init_k_n);
}
assembler->Bind(&init_k_heap_num);
{
Label do_return_not_found(assembler);
- Node* fp_len = assembler->ChangeInt32ToFloat64(len_var.value());
+ // This round is lossless for all valid lengths.
+ Node* fp_len = assembler->RoundIntPtrToFloat64(len_var.value());
Node* fp_n = assembler->LoadHeapNumberValue(tagged_n);
assembler->GotoIf(assembler->Float64GreaterThanOrEqual(fp_n, fp_len),
&do_return_not_found);
- start_from_var.Bind(assembler->TruncateFloat64ToWord32(fp_n));
+ start_from_var.Bind(assembler->ChangeInt32ToIntPtr(
+ assembler->TruncateFloat64ToWord32(fp_n)));
assembler->Goto(&init_k_n);
assembler->Bind(&do_return_not_found);
{
- index_var.Bind(int32_zero);
+ index_var.Bind(intptr_zero);
assembler->Goto(&return_not_found);
}
}
@@ -1819,7 +1783,7 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
{
Label if_positive(assembler), if_negative(assembler), done(assembler);
assembler->Branch(
- assembler->Int32LessThan(start_from_var.value(), int32_zero),
+ assembler->IntPtrLessThan(start_from_var.value(), intptr_zero),
&if_negative, &if_positive);
assembler->Bind(&if_positive);
@@ -1831,15 +1795,15 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
assembler->Bind(&if_negative);
{
index_var.Bind(
- assembler->Int32Add(len_var.value(), start_from_var.value()));
+ assembler->IntPtrAdd(len_var.value(), start_from_var.value()));
assembler->Branch(
- assembler->Int32LessThan(index_var.value(), int32_zero),
+ assembler->IntPtrLessThan(index_var.value(), intptr_zero),
&init_k_zero, &done);
}
assembler->Bind(&init_k_zero);
{
- index_var.Bind(int32_zero);
+ index_var.Bind(intptr_zero);
assembler->Goto(&done);
}
@@ -1859,9 +1823,7 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
&if_packed_doubles, &if_holey_doubles};
Node* map = assembler->LoadMap(array);
- Node* bit_field2 = assembler->LoadMapBitField2(map);
- Node* elements_kind =
- assembler->BitFieldDecode<Map::ElementsKindBits>(bit_field2);
+ Node* elements_kind = assembler->LoadMapElementsKind(map);
Node* elements = assembler->LoadElements(array);
assembler->Switch(elements_kind, &return_not_found, kElementsKind,
element_kind_handlers, arraysize(kElementsKind));
@@ -1890,41 +1852,39 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
assembler->Bind(&not_heap_num);
Node* search_type = assembler->LoadMapInstanceType(map);
+ assembler->GotoIf(assembler->IsStringInstanceType(search_type),
+ &string_loop);
assembler->GotoIf(
- assembler->Int32LessThan(
- search_type, assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
- &string_loop);
- assembler->GotoIf(
- assembler->WordEqual(search_type,
- assembler->Int32Constant(SIMD128_VALUE_TYPE)),
+ assembler->Word32Equal(search_type,
+ assembler->Int32Constant(SIMD128_VALUE_TYPE)),
&simd_loop);
assembler->Goto(&ident_loop);
assembler->Bind(&ident_loop);
{
assembler->GotoUnless(
- assembler->Int32LessThan(index_var.value(), len_var.value()),
+ assembler->UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
- Node* element_k =
- assembler->LoadFixedArrayElement(elements, index_var.value());
+ Node* element_k = assembler->LoadFixedArrayElement(
+ elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
assembler->GotoIf(assembler->WordEqual(element_k, search_element),
&return_found);
- index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&ident_loop);
}
assembler->Bind(&undef_loop);
{
assembler->GotoUnless(
- assembler->Int32LessThan(index_var.value(), len_var.value()),
+ assembler->UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
- Node* element_k =
- assembler->LoadFixedArrayElement(elements, index_var.value());
+ Node* element_k = assembler->LoadFixedArrayElement(
+ elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
assembler->GotoIf(assembler->WordEqual(element_k, undefined),
&return_found);
- index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&undef_loop);
}
@@ -1938,10 +1898,11 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
{
Label continue_loop(assembler), not_smi(assembler);
assembler->GotoUnless(
- assembler->Int32LessThan(index_var.value(), len_var.value()),
+ assembler->UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
- Node* element_k =
- assembler->LoadFixedArrayElement(elements, index_var.value());
+ Node* element_k = assembler->LoadFixedArrayElement(
+ elements, index_var.value(), 0,
+ CodeStubAssembler::INTPTR_PARAMETERS);
assembler->GotoUnless(assembler->WordIsSmi(element_k), &not_smi);
assembler->Branch(
assembler->Float64Equal(search_num.value(),
@@ -1957,7 +1918,7 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
&return_found, &continue_loop);
assembler->Bind(&continue_loop);
- index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&not_nan_loop);
}
}
@@ -1966,14 +1927,13 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
{
Label continue_loop(assembler);
assembler->GotoUnless(
- assembler->Int32LessThan(index_var.value(), len_var.value()),
+ assembler->UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
- Node* element_k =
- assembler->LoadFixedArrayElement(elements, index_var.value());
+ Node* element_k = assembler->LoadFixedArrayElement(
+ elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
- assembler->GotoUnless(assembler->Int32LessThan(
- assembler->LoadInstanceType(element_k),
- assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+ assembler->GotoUnless(assembler->IsStringInstanceType(
+ assembler->LoadInstanceType(element_k)),
&continue_loop);
// TODO(bmeurer): Consider inlining the StringEqual logic here.
@@ -1985,7 +1945,7 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
&return_found, &continue_loop);
assembler->Bind(&continue_loop);
- index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&string_loop);
}
@@ -1998,11 +1958,11 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
assembler->Goto(&loop_body);
assembler->Bind(&loop_body);
assembler->GotoUnless(
- assembler->Int32LessThan(index_var.value(), len_var.value()),
+ assembler->UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
- Node* element_k =
- assembler->LoadFixedArrayElement(elements, index_var.value());
+ Node* element_k = assembler->LoadFixedArrayElement(
+ elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
Node* map_k = assembler->LoadMap(element_k);
@@ -2010,7 +1970,7 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
&return_found, &continue_loop);
assembler->Bind(&continue_loop);
- index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&loop_body);
}
}
@@ -2039,14 +1999,15 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
{
Label continue_loop(assembler);
assembler->GotoUnless(
- assembler->Int32LessThan(index_var.value(), len_var.value()),
+ assembler->UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
Node* element_k = assembler->LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Float64());
+ elements, index_var.value(), MachineType::Float64(), 0,
+ CodeStubAssembler::INTPTR_PARAMETERS);
assembler->BranchIfFloat64Equal(element_k, search_num.value(),
&return_found, &continue_loop);
assembler->Bind(&continue_loop);
- index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&not_nan_loop);
}
}
@@ -2075,31 +2036,18 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
{
Label continue_loop(assembler);
assembler->GotoUnless(
- assembler->Int32LessThan(index_var.value(), len_var.value()),
+ assembler->UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
- if (kPointerSize == kDoubleSize) {
- Node* element = assembler->LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Uint64());
- Node* the_hole = assembler->Int64Constant(kHoleNanInt64);
- assembler->GotoIf(assembler->Word64Equal(element, the_hole),
- &continue_loop);
- } else {
- Node* element_upper = assembler->LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Uint32(),
- kIeeeDoubleExponentWordOffset);
- assembler->GotoIf(
- assembler->Word32Equal(element_upper,
- assembler->Int32Constant(kHoleNanUpper32)),
- &continue_loop);
- }
-
+ // Load double value or continue if it contains a double hole.
Node* element_k = assembler->LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Float64());
+ elements, index_var.value(), MachineType::Float64(), 0,
+ CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop);
+
assembler->BranchIfFloat64Equal(element_k, search_num.value(),
&return_found, &continue_loop);
assembler->Bind(&continue_loop);
- index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&not_nan_loop);
}
}
diff --git a/deps/v8/src/builtins/builtins-callsite.cc b/deps/v8/src/builtins/builtins-callsite.cc
index 7fc2f98716..ae9c76dc05 100644
--- a/deps/v8/src/builtins/builtins-callsite.cc
+++ b/deps/v8/src/builtins/builtins-callsite.cc
@@ -14,7 +14,7 @@ namespace internal {
#define CHECK_CALLSITE(recv, method) \
CHECK_RECEIVER(JSObject, recv, method); \
if (!JSReceiver::HasOwnProperty( \
- recv, isolate->factory()->call_site_position_symbol()) \
+ recv, isolate->factory()->call_site_frame_array_symbol()) \
.FromMaybe(false)) { \
THROW_NEW_ERROR_RETURN_FAILURE( \
isolate, \
@@ -29,172 +29,152 @@ Object* PositiveNumberOrNull(int value, Isolate* isolate) {
return isolate->heap()->null_value();
}
+Handle<FrameArray> GetFrameArray(Isolate* isolate, Handle<JSObject> object) {
+ Handle<Object> frame_array_obj = JSObject::GetDataProperty(
+ object, isolate->factory()->call_site_frame_array_symbol());
+ return Handle<FrameArray>::cast(frame_array_obj);
+}
+
+int GetFrameIndex(Isolate* isolate, Handle<JSObject> object) {
+ Handle<Object> frame_index_obj = JSObject::GetDataProperty(
+ object, isolate->factory()->call_site_frame_index_symbol());
+ return Smi::cast(*frame_index_obj)->value();
+}
+
} // namespace
BUILTIN(CallSitePrototypeGetColumnNumber) {
HandleScope scope(isolate);
CHECK_CALLSITE(recv, "getColumnNumber");
-
- CallSite call_site(isolate, recv);
- CHECK(call_site.IsJavaScript() || call_site.IsWasm());
- return PositiveNumberOrNull(call_site.GetColumnNumber(), isolate);
+ FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+ GetFrameIndex(isolate, recv));
+ return PositiveNumberOrNull(it.Frame()->GetColumnNumber(), isolate);
}
BUILTIN(CallSitePrototypeGetEvalOrigin) {
HandleScope scope(isolate);
CHECK_CALLSITE(recv, "getEvalOrigin");
-
- CallSite call_site(isolate, recv);
- CHECK(call_site.IsJavaScript() || call_site.IsWasm());
- return *call_site.GetEvalOrigin();
+ FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+ GetFrameIndex(isolate, recv));
+ return *it.Frame()->GetEvalOrigin();
}
BUILTIN(CallSitePrototypeGetFileName) {
HandleScope scope(isolate);
CHECK_CALLSITE(recv, "getFileName");
-
- CallSite call_site(isolate, recv);
- CHECK(call_site.IsJavaScript() || call_site.IsWasm());
- return *call_site.GetFileName();
+ FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+ GetFrameIndex(isolate, recv));
+ return *it.Frame()->GetFileName();
}
-namespace {
-
-bool CallSiteIsStrict(Isolate* isolate, Handle<JSObject> receiver) {
- Handle<Object> strict;
- Handle<Symbol> symbol = isolate->factory()->call_site_strict_symbol();
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, strict,
- JSObject::GetProperty(receiver, symbol));
- return strict->BooleanValue();
-}
-
-} // namespace
-
BUILTIN(CallSitePrototypeGetFunction) {
HandleScope scope(isolate);
CHECK_CALLSITE(recv, "getFunction");
+ FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+ GetFrameIndex(isolate, recv));
- if (CallSiteIsStrict(isolate, recv))
- return *isolate->factory()->undefined_value();
-
- Handle<Symbol> symbol = isolate->factory()->call_site_function_symbol();
- RETURN_RESULT_OR_FAILURE(isolate, JSObject::GetProperty(recv, symbol));
+ StackFrameBase* frame = it.Frame();
+ if (frame->IsStrict()) return isolate->heap()->undefined_value();
+ return *frame->GetFunction();
}
BUILTIN(CallSitePrototypeGetFunctionName) {
HandleScope scope(isolate);
CHECK_CALLSITE(recv, "getFunctionName");
-
- CallSite call_site(isolate, recv);
- CHECK(call_site.IsJavaScript() || call_site.IsWasm());
- return *call_site.GetFunctionName();
+ FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+ GetFrameIndex(isolate, recv));
+ return *it.Frame()->GetFunctionName();
}
BUILTIN(CallSitePrototypeGetLineNumber) {
HandleScope scope(isolate);
CHECK_CALLSITE(recv, "getLineNumber");
-
- CallSite call_site(isolate, recv);
- CHECK(call_site.IsJavaScript() || call_site.IsWasm());
-
- int line_number = call_site.IsWasm() ? call_site.wasm_func_index()
- : call_site.GetLineNumber();
- return PositiveNumberOrNull(line_number, isolate);
+ FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+ GetFrameIndex(isolate, recv));
+ return PositiveNumberOrNull(it.Frame()->GetLineNumber(), isolate);
}
BUILTIN(CallSitePrototypeGetMethodName) {
HandleScope scope(isolate);
CHECK_CALLSITE(recv, "getMethodName");
-
- CallSite call_site(isolate, recv);
- CHECK(call_site.IsJavaScript() || call_site.IsWasm());
- return *call_site.GetMethodName();
+ FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+ GetFrameIndex(isolate, recv));
+ return *it.Frame()->GetMethodName();
}
BUILTIN(CallSitePrototypeGetPosition) {
HandleScope scope(isolate);
CHECK_CALLSITE(recv, "getPosition");
-
- Handle<Symbol> symbol = isolate->factory()->call_site_position_symbol();
- RETURN_RESULT_OR_FAILURE(isolate, JSObject::GetProperty(recv, symbol));
+ FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+ GetFrameIndex(isolate, recv));
+ return Smi::FromInt(it.Frame()->GetPosition());
}
BUILTIN(CallSitePrototypeGetScriptNameOrSourceURL) {
HandleScope scope(isolate);
CHECK_CALLSITE(recv, "getScriptNameOrSourceUrl");
-
- CallSite call_site(isolate, recv);
- CHECK(call_site.IsJavaScript() || call_site.IsWasm());
- return *call_site.GetScriptNameOrSourceUrl();
+ FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+ GetFrameIndex(isolate, recv));
+ return *it.Frame()->GetScriptNameOrSourceUrl();
}
BUILTIN(CallSitePrototypeGetThis) {
HandleScope scope(isolate);
CHECK_CALLSITE(recv, "getThis");
+ FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+ GetFrameIndex(isolate, recv));
- if (CallSiteIsStrict(isolate, recv))
- return *isolate->factory()->undefined_value();
-
- Handle<Object> receiver;
- Handle<Symbol> symbol = isolate->factory()->call_site_receiver_symbol();
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
- JSObject::GetProperty(recv, symbol));
-
- if (*receiver == isolate->heap()->call_site_constructor_symbol())
- return *isolate->factory()->undefined_value();
-
- return *receiver;
+ StackFrameBase* frame = it.Frame();
+ if (frame->IsStrict()) return isolate->heap()->undefined_value();
+ return *frame->GetReceiver();
}
BUILTIN(CallSitePrototypeGetTypeName) {
HandleScope scope(isolate);
CHECK_CALLSITE(recv, "getTypeName");
-
- CallSite call_site(isolate, recv);
- CHECK(call_site.IsJavaScript() || call_site.IsWasm());
- return *call_site.GetTypeName();
+ FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+ GetFrameIndex(isolate, recv));
+ return *it.Frame()->GetTypeName();
}
BUILTIN(CallSitePrototypeIsConstructor) {
HandleScope scope(isolate);
CHECK_CALLSITE(recv, "isConstructor");
-
- CallSite call_site(isolate, recv);
- CHECK(call_site.IsJavaScript() || call_site.IsWasm());
- return isolate->heap()->ToBoolean(call_site.IsConstructor());
+ FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+ GetFrameIndex(isolate, recv));
+ return isolate->heap()->ToBoolean(it.Frame()->IsConstructor());
}
BUILTIN(CallSitePrototypeIsEval) {
HandleScope scope(isolate);
CHECK_CALLSITE(recv, "isEval");
-
- CallSite call_site(isolate, recv);
- CHECK(call_site.IsJavaScript() || call_site.IsWasm());
- return isolate->heap()->ToBoolean(call_site.IsEval());
+ FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+ GetFrameIndex(isolate, recv));
+ return isolate->heap()->ToBoolean(it.Frame()->IsEval());
}
BUILTIN(CallSitePrototypeIsNative) {
HandleScope scope(isolate);
CHECK_CALLSITE(recv, "isNative");
-
- CallSite call_site(isolate, recv);
- CHECK(call_site.IsJavaScript() || call_site.IsWasm());
- return isolate->heap()->ToBoolean(call_site.IsNative());
+ FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+ GetFrameIndex(isolate, recv));
+ return isolate->heap()->ToBoolean(it.Frame()->IsNative());
}
BUILTIN(CallSitePrototypeIsToplevel) {
HandleScope scope(isolate);
CHECK_CALLSITE(recv, "isToplevel");
-
- CallSite call_site(isolate, recv);
- CHECK(call_site.IsJavaScript() || call_site.IsWasm());
- return isolate->heap()->ToBoolean(call_site.IsToplevel());
+ FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+ GetFrameIndex(isolate, recv));
+ return isolate->heap()->ToBoolean(it.Frame()->IsToplevel());
}
BUILTIN(CallSitePrototypeToString) {
HandleScope scope(isolate);
CHECK_CALLSITE(recv, "toString");
- RETURN_RESULT_OR_FAILURE(isolate, CallSiteUtils::ToString(isolate, recv));
+ FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
+ GetFrameIndex(isolate, recv));
+ RETURN_RESULT_OR_FAILURE(isolate, it.Frame()->ToString());
}
#undef CHECK_CALLSITE
diff --git a/deps/v8/src/builtins/builtins-conversion.cc b/deps/v8/src/builtins/builtins-conversion.cc
index 0d04a02e24..7fbe4f859e 100644
--- a/deps/v8/src/builtins/builtins-conversion.cc
+++ b/deps/v8/src/builtins/builtins-conversion.cc
@@ -110,133 +110,99 @@ void Builtins::Generate_NonPrimitiveToPrimitive_String(
}
void Builtins::Generate_StringToNumber(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef TypeConversionDescriptor Descriptor;
Node* input = assembler->Parameter(Descriptor::kArgument);
Node* context = assembler->Parameter(Descriptor::kContext);
- Label runtime(assembler);
+ assembler->Return(assembler->StringToNumber(context, input));
+}
- // Check if string has a cached array index.
- Node* hash = assembler->LoadNameHashField(input);
- Node* bit = assembler->Word32And(
- hash, assembler->Int32Constant(String::kContainsCachedArrayIndexMask));
- assembler->GotoIf(assembler->Word32NotEqual(bit, assembler->Int32Constant(0)),
- &runtime);
+void Builtins::Generate_ToName(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+ typedef TypeConversionDescriptor Descriptor;
- assembler->Return(assembler->SmiTag(
- assembler->BitFieldDecode<String::ArrayIndexValueBits>(hash)));
+ Node* input = assembler->Parameter(Descriptor::kArgument);
+ Node* context = assembler->Parameter(Descriptor::kContext);
- assembler->Bind(&runtime);
- {
- // Note: We cannot tail call to the runtime here, as js-to-wasm
- // trampolines also use this code currently, and they declare all
- // outgoing parameters as untagged, while we would push a tagged
- // object here.
- Node* result =
- assembler->CallRuntime(Runtime::kStringToNumber, context, input);
- assembler->Return(result);
- }
+ assembler->Return(assembler->ToName(context, input));
}
-// ES6 section 7.1.3 ToNumber ( argument )
+// static
void Builtins::Generate_NonNumberToNumber(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+ typedef TypeConversionDescriptor Descriptor;
+
+ Node* input = assembler->Parameter(Descriptor::kArgument);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ assembler->Return(assembler->NonNumberToNumber(context, input));
+}
+
+// ES6 section 7.1.3 ToNumber ( argument )
+void Builtins::Generate_ToNumber(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+ typedef TypeConversionDescriptor Descriptor;
+
+ Node* input = assembler->Parameter(Descriptor::kArgument);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ assembler->Return(assembler->ToNumber(context, input));
+}
+
+void Builtins::Generate_ToString(CodeStubAssembler* assembler) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
typedef TypeConversionDescriptor Descriptor;
Node* input = assembler->Parameter(Descriptor::kArgument);
Node* context = assembler->Parameter(Descriptor::kContext);
- // We might need to loop once here due to ToPrimitive conversions.
- Variable var_input(assembler, MachineRepresentation::kTagged);
- Label loop(assembler, &var_input);
- var_input.Bind(input);
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ Label is_number(assembler);
+ Label runtime(assembler);
+
+ assembler->GotoIf(assembler->WordIsSmi(input), &is_number);
+
+ Node* input_map = assembler->LoadMap(input);
+ Node* input_instance_type = assembler->LoadMapInstanceType(input_map);
+
+ Label not_string(assembler);
+ assembler->GotoUnless(assembler->IsStringInstanceType(input_instance_type),
+ &not_string);
+ assembler->Return(input);
+
+ Label not_heap_number(assembler);
+
+ assembler->Bind(&not_string);
{
- // Load the current {input} value (known to be a HeapObject).
- Node* input = var_input.value();
-
- // Dispatch on the {input} instance type.
- Node* input_instance_type = assembler->LoadInstanceType(input);
- Label if_inputisstring(assembler), if_inputisoddball(assembler),
- if_inputisreceiver(assembler, Label::kDeferred),
- if_inputisother(assembler, Label::kDeferred);
- assembler->GotoIf(assembler->Int32LessThan(
- input_instance_type,
- assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
- &if_inputisstring);
- assembler->GotoIf(
- assembler->Word32Equal(input_instance_type,
- assembler->Int32Constant(ODDBALL_TYPE)),
- &if_inputisoddball);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- assembler->Branch(assembler->Int32GreaterThanOrEqual(
- input_instance_type,
- assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE)),
- &if_inputisreceiver, &if_inputisother);
-
- assembler->Bind(&if_inputisstring);
- {
- // The {input} is a String, use the fast stub to convert it to a Number.
- // TODO(bmeurer): Consider inlining the StringToNumber logic here.
- Callable callable = CodeFactory::StringToNumber(assembler->isolate());
- assembler->TailCallStub(callable, context, input);
- }
+ assembler->GotoUnless(
+ assembler->WordEqual(input_map, assembler->HeapNumberMapConstant()),
+ &not_heap_number);
+ assembler->Goto(&is_number);
+ }
- assembler->Bind(&if_inputisoddball);
- {
- // The {input} is an Oddball, we just need to the Number value of it.
- Node* result =
- assembler->LoadObjectField(input, Oddball::kToNumberOffset);
- assembler->Return(result);
- }
+ assembler->Bind(&is_number);
+ {
+ // TODO(tebbi): inline as soon as NumberToString is in the CodeStubAssembler
+ Callable callable = CodeFactory::NumberToString(assembler->isolate());
+ assembler->Return(assembler->CallStub(callable, context, input));
+ }
- assembler->Bind(&if_inputisreceiver);
- {
- // The {input} is a JSReceiver, we need to convert it to a Primitive first
- // using the ToPrimitive type conversion, preferably yielding a Number.
- Callable callable = CodeFactory::NonPrimitiveToPrimitive(
- assembler->isolate(), ToPrimitiveHint::kNumber);
- Node* result = assembler->CallStub(callable, context, input);
-
- // Check if the {result} is already a Number.
- Label if_resultisnumber(assembler), if_resultisnotnumber(assembler);
- assembler->GotoIf(assembler->WordIsSmi(result), &if_resultisnumber);
- Node* result_map = assembler->LoadMap(result);
- assembler->Branch(
- assembler->WordEqual(result_map, assembler->HeapNumberMapConstant()),
- &if_resultisnumber, &if_resultisnotnumber);
-
- assembler->Bind(&if_resultisnumber);
- {
- // The ToPrimitive conversion already gave us a Number, so we're done.
- assembler->Return(result);
- }
-
- assembler->Bind(&if_resultisnotnumber);
- {
- // We now have a Primitive {result}, but it's not yet a Number.
- var_input.Bind(result);
- assembler->Goto(&loop);
- }
- }
+ assembler->Bind(&not_heap_number);
+ {
+ assembler->GotoIf(
+ assembler->Word32NotEqual(input_instance_type,
+ assembler->Int32Constant(ODDBALL_TYPE)),
+ &runtime);
+ assembler->Return(
+ assembler->LoadObjectField(input, Oddball::kToStringOffset));
+ }
- assembler->Bind(&if_inputisother);
- {
- // The {input} is something else (i.e. Symbol or Simd128Value), let the
- // runtime figure out the correct exception.
- // Note: We cannot tail call to the runtime here, as js-to-wasm
- // trampolines also use this code currently, and they declare all
- // outgoing parameters as untagged, while we would push a tagged
- // object here.
- Node* result = assembler->CallRuntime(Runtime::kToNumber, context, input);
- assembler->Return(result);
- }
+ assembler->Bind(&runtime);
+ {
+ assembler->Return(
+ assembler->CallRuntime(Runtime::kToString, context, input));
}
}
diff --git a/deps/v8/src/builtins/builtins-dataview.cc b/deps/v8/src/builtins/builtins-dataview.cc
index 32c5a83d2f..3d14e31d3a 100644
--- a/deps/v8/src/builtins/builtins-dataview.cc
+++ b/deps/v8/src/builtins/builtins-dataview.cc
@@ -129,5 +129,209 @@ BUILTIN(DataViewPrototypeGetByteOffset) {
return data_view->byte_offset();
}
+namespace {
+
+bool NeedToFlipBytes(bool is_little_endian) {
+#ifdef V8_TARGET_LITTLE_ENDIAN
+ return !is_little_endian;
+#else
+ return is_little_endian;
+#endif
+}
+
+template <size_t n>
+void CopyBytes(uint8_t* target, uint8_t const* source) {
+ for (size_t i = 0; i < n; i++) {
+ *(target++) = *(source++);
+ }
+}
+
+template <size_t n>
+void FlipBytes(uint8_t* target, uint8_t const* source) {
+ source = source + (n - 1);
+ for (size_t i = 0; i < n; i++) {
+ *(target++) = *(source--);
+ }
+}
+
+// ES6 section 24.2.1.1 GetViewValue (view, requestIndex, isLittleEndian, type)
+template <typename T>
+MaybeHandle<Object> GetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
+ Handle<Object> request_index,
+ bool is_little_endian) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, request_index,
+ Object::ToIndex(isolate, request_index,
+ MessageTemplate::kInvalidDataViewAccessorOffset),
+ Object);
+ size_t get_index = 0;
+ if (!TryNumberToSize(*request_index, &get_index)) {
+ THROW_NEW_ERROR(
+ isolate, NewRangeError(MessageTemplate::kInvalidDataViewAccessorOffset),
+ Object);
+ }
+ Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()),
+ isolate);
+ size_t const data_view_byte_offset = NumberToSize(data_view->byte_offset());
+ size_t const data_view_byte_length = NumberToSize(data_view->byte_length());
+ if (get_index + sizeof(T) > data_view_byte_length ||
+ get_index + sizeof(T) < get_index) { // overflow
+ THROW_NEW_ERROR(
+ isolate, NewRangeError(MessageTemplate::kInvalidDataViewAccessorOffset),
+ Object);
+ }
+ union {
+ T data;
+ uint8_t bytes[sizeof(T)];
+ } v;
+ size_t const buffer_offset = data_view_byte_offset + get_index;
+ DCHECK_GE(NumberToSize(buffer->byte_length()), buffer_offset + sizeof(T));
+ uint8_t const* const source =
+ static_cast<uint8_t*>(buffer->backing_store()) + buffer_offset;
+ if (NeedToFlipBytes(is_little_endian)) {
+ FlipBytes<sizeof(T)>(v.bytes, source);
+ } else {
+ CopyBytes<sizeof(T)>(v.bytes, source);
+ }
+ return isolate->factory()->NewNumber(v.data);
+}
+
+template <typename T>
+T DataViewConvertValue(double value);
+
+template <>
+int8_t DataViewConvertValue<int8_t>(double value) {
+ return static_cast<int8_t>(DoubleToInt32(value));
+}
+
+template <>
+int16_t DataViewConvertValue<int16_t>(double value) {
+ return static_cast<int16_t>(DoubleToInt32(value));
+}
+
+template <>
+int32_t DataViewConvertValue<int32_t>(double value) {
+ return DoubleToInt32(value);
+}
+
+template <>
+uint8_t DataViewConvertValue<uint8_t>(double value) {
+ return static_cast<uint8_t>(DoubleToUint32(value));
+}
+
+template <>
+uint16_t DataViewConvertValue<uint16_t>(double value) {
+ return static_cast<uint16_t>(DoubleToUint32(value));
+}
+
+template <>
+uint32_t DataViewConvertValue<uint32_t>(double value) {
+ return DoubleToUint32(value);
+}
+
+template <>
+float DataViewConvertValue<float>(double value) {
+ return static_cast<float>(value);
+}
+
+template <>
+double DataViewConvertValue<double>(double value) {
+ return value;
+}
+
+// ES6 section 24.2.1.2 SetViewValue (view, requestIndex, isLittleEndian, type,
+// value)
+template <typename T>
+MaybeHandle<Object> SetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
+ Handle<Object> request_index,
+ bool is_little_endian, Handle<Object> value) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, request_index,
+ Object::ToIndex(isolate, request_index,
+ MessageTemplate::kInvalidDataViewAccessorOffset),
+ Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, value, Object::ToNumber(value), Object);
+ size_t get_index = 0;
+ if (!TryNumberToSize(*request_index, &get_index)) {
+ THROW_NEW_ERROR(
+ isolate, NewRangeError(MessageTemplate::kInvalidDataViewAccessorOffset),
+ Object);
+ }
+ Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()),
+ isolate);
+ size_t const data_view_byte_offset = NumberToSize(data_view->byte_offset());
+ size_t const data_view_byte_length = NumberToSize(data_view->byte_length());
+ if (get_index + sizeof(T) > data_view_byte_length ||
+ get_index + sizeof(T) < get_index) { // overflow
+ THROW_NEW_ERROR(
+ isolate, NewRangeError(MessageTemplate::kInvalidDataViewAccessorOffset),
+ Object);
+ }
+ union {
+ T data;
+ uint8_t bytes[sizeof(T)];
+ } v;
+ v.data = DataViewConvertValue<T>(value->Number());
+ size_t const buffer_offset = data_view_byte_offset + get_index;
+ DCHECK(NumberToSize(buffer->byte_length()) >= buffer_offset + sizeof(T));
+ uint8_t* const target =
+ static_cast<uint8_t*>(buffer->backing_store()) + buffer_offset;
+ if (NeedToFlipBytes(is_little_endian)) {
+ FlipBytes<sizeof(T)>(target, v.bytes);
+ } else {
+ CopyBytes<sizeof(T)>(target, v.bytes);
+ }
+ return isolate->factory()->undefined_value();
+}
+
+} // namespace
+
+#define DATA_VIEW_PROTOTYPE_GET(Type, type) \
+ BUILTIN(DataViewPrototypeGet##Type) { \
+ HandleScope scope(isolate); \
+ CHECK_RECEIVER(JSDataView, data_view, "DataView.prototype.get" #Type); \
+ Handle<Object> byte_offset = args.atOrUndefined(isolate, 1); \
+ Handle<Object> is_little_endian = args.atOrUndefined(isolate, 2); \
+ Handle<Object> result; \
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION( \
+ isolate, result, \
+ GetViewValue<type>(isolate, data_view, byte_offset, \
+ is_little_endian->BooleanValue())); \
+ return *result; \
+ }
+DATA_VIEW_PROTOTYPE_GET(Int8, int8_t)
+DATA_VIEW_PROTOTYPE_GET(Uint8, uint8_t)
+DATA_VIEW_PROTOTYPE_GET(Int16, int16_t)
+DATA_VIEW_PROTOTYPE_GET(Uint16, uint16_t)
+DATA_VIEW_PROTOTYPE_GET(Int32, int32_t)
+DATA_VIEW_PROTOTYPE_GET(Uint32, uint32_t)
+DATA_VIEW_PROTOTYPE_GET(Float32, float)
+DATA_VIEW_PROTOTYPE_GET(Float64, double)
+#undef DATA_VIEW_PROTOTYPE_GET
+
+#define DATA_VIEW_PROTOTYPE_SET(Type, type) \
+ BUILTIN(DataViewPrototypeSet##Type) { \
+ HandleScope scope(isolate); \
+ CHECK_RECEIVER(JSDataView, data_view, "DataView.prototype.set" #Type); \
+ Handle<Object> byte_offset = args.atOrUndefined(isolate, 1); \
+ Handle<Object> value = args.atOrUndefined(isolate, 2); \
+ Handle<Object> is_little_endian = args.atOrUndefined(isolate, 3); \
+ Handle<Object> result; \
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION( \
+ isolate, result, \
+ SetViewValue<type>(isolate, data_view, byte_offset, \
+ is_little_endian->BooleanValue(), value)); \
+ return *result; \
+ }
+DATA_VIEW_PROTOTYPE_SET(Int8, int8_t)
+DATA_VIEW_PROTOTYPE_SET(Uint8, uint8_t)
+DATA_VIEW_PROTOTYPE_SET(Int16, int16_t)
+DATA_VIEW_PROTOTYPE_SET(Uint16, uint16_t)
+DATA_VIEW_PROTOTYPE_SET(Int32, int32_t)
+DATA_VIEW_PROTOTYPE_SET(Uint32, uint32_t)
+DATA_VIEW_PROTOTYPE_SET(Float32, float)
+DATA_VIEW_PROTOTYPE_SET(Float64, double)
+#undef DATA_VIEW_PROTOTYPE_SET
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
index d5c34761f5..205c8c971f 100644
--- a/deps/v8/src/builtins/builtins-date.cc
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -909,93 +909,156 @@ BUILTIN(DatePrototypeToJson) {
}
// static
-void Builtins::Generate_DatePrototypeGetDate(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kDay);
+void Builtins::Generate_DatePrototype_GetField(CodeStubAssembler* assembler,
+ int field_index) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler->Parameter(0);
+ Node* context = assembler->Parameter(3);
+
+ Label receiver_not_date(assembler, Label::kDeferred);
+
+ assembler->GotoIf(assembler->WordIsSmi(receiver), &receiver_not_date);
+ Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
+ assembler->GotoIf(
+ assembler->Word32NotEqual(receiver_instance_type,
+ assembler->Int32Constant(JS_DATE_TYPE)),
+ &receiver_not_date);
+
+ // Load the specified date field, falling back to the runtime as necessary.
+ if (field_index == JSDate::kDateValue) {
+ assembler->Return(
+ assembler->LoadObjectField(receiver, JSDate::kValueOffset));
+ } else {
+ if (field_index < JSDate::kFirstUncachedField) {
+ Label stamp_mismatch(assembler, Label::kDeferred);
+ Node* date_cache_stamp = assembler->Load(
+ MachineType::AnyTagged(),
+ assembler->ExternalConstant(
+ ExternalReference::date_cache_stamp(assembler->isolate())));
+
+ Node* cache_stamp =
+ assembler->LoadObjectField(receiver, JSDate::kCacheStampOffset);
+ assembler->GotoIf(assembler->WordNotEqual(date_cache_stamp, cache_stamp),
+ &stamp_mismatch);
+ assembler->Return(assembler->LoadObjectField(
+ receiver, JSDate::kValueOffset + field_index * kPointerSize));
+
+ assembler->Bind(&stamp_mismatch);
+ }
+
+ Node* field_index_smi = assembler->SmiConstant(Smi::FromInt(field_index));
+ Node* function = assembler->ExternalConstant(
+ ExternalReference::get_date_field_function(assembler->isolate()));
+ Node* result = assembler->CallCFunction2(
+ MachineType::AnyTagged(), MachineType::Pointer(),
+ MachineType::AnyTagged(), function, receiver, field_index_smi);
+ assembler->Return(result);
+ }
+
+ // Raise a TypeError if the receiver is not a date.
+ assembler->Bind(&receiver_not_date);
+ {
+ Node* result = assembler->CallRuntime(Runtime::kThrowNotDateError, context);
+ assembler->Return(result);
+ }
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetDate(CodeStubAssembler* assembler) {
+ Generate_DatePrototype_GetField(assembler, JSDate::kDay);
}
// static
-void Builtins::Generate_DatePrototypeGetDay(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kWeekday);
+void Builtins::Generate_DatePrototypeGetDay(CodeStubAssembler* assembler) {
+ Generate_DatePrototype_GetField(assembler, JSDate::kWeekday);
}
// static
-void Builtins::Generate_DatePrototypeGetFullYear(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kYear);
+void Builtins::Generate_DatePrototypeGetFullYear(CodeStubAssembler* assembler) {
+ Generate_DatePrototype_GetField(assembler, JSDate::kYear);
}
// static
-void Builtins::Generate_DatePrototypeGetHours(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kHour);
+void Builtins::Generate_DatePrototypeGetHours(CodeStubAssembler* assembler) {
+ Generate_DatePrototype_GetField(assembler, JSDate::kHour);
}
// static
-void Builtins::Generate_DatePrototypeGetMilliseconds(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kMillisecond);
+void Builtins::Generate_DatePrototypeGetMilliseconds(
+ CodeStubAssembler* assembler) {
+ Generate_DatePrototype_GetField(assembler, JSDate::kMillisecond);
}
// static
-void Builtins::Generate_DatePrototypeGetMinutes(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kMinute);
+void Builtins::Generate_DatePrototypeGetMinutes(CodeStubAssembler* assembler) {
+ Generate_DatePrototype_GetField(assembler, JSDate::kMinute);
}
// static
-void Builtins::Generate_DatePrototypeGetMonth(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kMonth);
+void Builtins::Generate_DatePrototypeGetMonth(CodeStubAssembler* assembler) {
+ Generate_DatePrototype_GetField(assembler, JSDate::kMonth);
}
// static
-void Builtins::Generate_DatePrototypeGetSeconds(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kSecond);
+void Builtins::Generate_DatePrototypeGetSeconds(CodeStubAssembler* assembler) {
+ Generate_DatePrototype_GetField(assembler, JSDate::kSecond);
}
// static
-void Builtins::Generate_DatePrototypeGetTime(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kDateValue);
+void Builtins::Generate_DatePrototypeGetTime(CodeStubAssembler* assembler) {
+ Generate_DatePrototype_GetField(assembler, JSDate::kDateValue);
}
// static
-void Builtins::Generate_DatePrototypeGetTimezoneOffset(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kTimezoneOffset);
+void Builtins::Generate_DatePrototypeGetTimezoneOffset(
+ CodeStubAssembler* assembler) {
+ Generate_DatePrototype_GetField(assembler, JSDate::kTimezoneOffset);
}
// static
-void Builtins::Generate_DatePrototypeGetUTCDate(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kDayUTC);
+void Builtins::Generate_DatePrototypeGetUTCDate(CodeStubAssembler* assembler) {
+ Generate_DatePrototype_GetField(assembler, JSDate::kDayUTC);
}
// static
-void Builtins::Generate_DatePrototypeGetUTCDay(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kWeekdayUTC);
+void Builtins::Generate_DatePrototypeGetUTCDay(CodeStubAssembler* assembler) {
+ Generate_DatePrototype_GetField(assembler, JSDate::kWeekdayUTC);
}
// static
-void Builtins::Generate_DatePrototypeGetUTCFullYear(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kYearUTC);
+void Builtins::Generate_DatePrototypeGetUTCFullYear(
+ CodeStubAssembler* assembler) {
+ Generate_DatePrototype_GetField(assembler, JSDate::kYearUTC);
}
// static
-void Builtins::Generate_DatePrototypeGetUTCHours(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kHourUTC);
+void Builtins::Generate_DatePrototypeGetUTCHours(CodeStubAssembler* assembler) {
+ Generate_DatePrototype_GetField(assembler, JSDate::kHourUTC);
}
// static
-void Builtins::Generate_DatePrototypeGetUTCMilliseconds(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kMillisecondUTC);
+void Builtins::Generate_DatePrototypeGetUTCMilliseconds(
+ CodeStubAssembler* assembler) {
+ Generate_DatePrototype_GetField(assembler, JSDate::kMillisecondUTC);
}
// static
-void Builtins::Generate_DatePrototypeGetUTCMinutes(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kMinuteUTC);
+void Builtins::Generate_DatePrototypeGetUTCMinutes(
+ CodeStubAssembler* assembler) {
+ Generate_DatePrototype_GetField(assembler, JSDate::kMinuteUTC);
}
// static
-void Builtins::Generate_DatePrototypeGetUTCMonth(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kMonthUTC);
+void Builtins::Generate_DatePrototypeGetUTCMonth(CodeStubAssembler* assembler) {
+ Generate_DatePrototype_GetField(assembler, JSDate::kMonthUTC);
}
// static
-void Builtins::Generate_DatePrototypeGetUTCSeconds(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kSecondUTC);
+void Builtins::Generate_DatePrototypeGetUTCSeconds(
+ CodeStubAssembler* assembler) {
+ Generate_DatePrototype_GetField(assembler, JSDate::kSecondUTC);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-global.cc b/deps/v8/src/builtins/builtins-global.cc
index d99a553d1e..2205788cfc 100644
--- a/deps/v8/src/builtins/builtins-global.cc
+++ b/deps/v8/src/builtins/builtins-global.cc
@@ -5,6 +5,7 @@
#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/code-factory.h"
#include "src/compiler.h"
#include "src/uri.h"
@@ -99,5 +100,113 @@ BUILTIN(GlobalEval) {
Execution::Call(isolate, function, target_global_proxy, 0, nullptr));
}
+// ES6 section 18.2.2 isFinite ( number )
+void Builtins::Generate_GlobalIsFinite(CodeStubAssembler* assembler) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* context = assembler->Parameter(4);
+
+ Label return_true(assembler), return_false(assembler);
+
+ // We might need to loop once for ToNumber conversion.
+ Variable var_num(assembler, MachineRepresentation::kTagged);
+ Label loop(assembler, &var_num);
+ var_num.Bind(assembler->Parameter(1));
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ // Load the current {num} value.
+ Node* num = var_num.value();
+
+ // Check if {num} is a Smi or a HeapObject.
+ assembler->GotoIf(assembler->WordIsSmi(num), &return_true);
+
+ // Check if {num} is a HeapNumber.
+ Label if_numisheapnumber(assembler),
+ if_numisnotheapnumber(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(assembler->LoadMap(num),
+ assembler->HeapNumberMapConstant()),
+ &if_numisheapnumber, &if_numisnotheapnumber);
+
+ assembler->Bind(&if_numisheapnumber);
+ {
+ // Check if {num} contains a finite, non-NaN value.
+ Node* num_value = assembler->LoadHeapNumberValue(num);
+ assembler->BranchIfFloat64IsNaN(
+ assembler->Float64Sub(num_value, num_value), &return_false,
+ &return_true);
+ }
+
+ assembler->Bind(&if_numisnotheapnumber);
+ {
+ // Need to convert {num} to a Number first.
+ Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_num.Bind(assembler->CallStub(callable, context, num));
+ assembler->Goto(&loop);
+ }
+ }
+
+ assembler->Bind(&return_true);
+ assembler->Return(assembler->BooleanConstant(true));
+
+ assembler->Bind(&return_false);
+ assembler->Return(assembler->BooleanConstant(false));
+}
+
+// ES6 section 18.2.3 isNaN ( number )
+void Builtins::Generate_GlobalIsNaN(CodeStubAssembler* assembler) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* context = assembler->Parameter(4);
+
+ Label return_true(assembler), return_false(assembler);
+
+ // We might need to loop once for ToNumber conversion.
+ Variable var_num(assembler, MachineRepresentation::kTagged);
+ Label loop(assembler, &var_num);
+ var_num.Bind(assembler->Parameter(1));
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ // Load the current {num} value.
+ Node* num = var_num.value();
+
+ // Check if {num} is a Smi or a HeapObject.
+ assembler->GotoIf(assembler->WordIsSmi(num), &return_false);
+
+ // Check if {num} is a HeapNumber.
+ Label if_numisheapnumber(assembler),
+ if_numisnotheapnumber(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(assembler->LoadMap(num),
+ assembler->HeapNumberMapConstant()),
+ &if_numisheapnumber, &if_numisnotheapnumber);
+
+ assembler->Bind(&if_numisheapnumber);
+ {
+ // Check if {num} contains a NaN.
+ Node* num_value = assembler->LoadHeapNumberValue(num);
+ assembler->BranchIfFloat64IsNaN(num_value, &return_true, &return_false);
+ }
+
+ assembler->Bind(&if_numisnotheapnumber);
+ {
+ // Need to convert {num} to a Number first.
+ Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_num.Bind(assembler->CallStub(callable, context, num));
+ assembler->Goto(&loop);
+ }
+ }
+
+ assembler->Bind(&return_true);
+ assembler->Return(assembler->BooleanConstant(true));
+
+ assembler->Bind(&return_false);
+ assembler->Return(assembler->BooleanConstant(false));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-handler.cc b/deps/v8/src/builtins/builtins-handler.cc
index 8b3df7927f..ebbc9784a1 100644
--- a/deps/v8/src/builtins/builtins-handler.cc
+++ b/deps/v8/src/builtins/builtins-handler.cc
@@ -14,6 +14,21 @@ void Builtins::Generate_KeyedLoadIC_Megamorphic(MacroAssembler* masm) {
KeyedLoadIC::GenerateMegamorphic(masm);
}
+void Builtins::Generate_KeyedLoadIC_Megamorphic_TF(
+ CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+ typedef LoadWithVectorDescriptor Descriptor;
+
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* name = assembler->Parameter(Descriptor::kName);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* vector = assembler->Parameter(Descriptor::kVector);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
+ assembler->KeyedLoadICGeneric(&p);
+}
+
void Builtins::Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
KeyedLoadIC::GenerateMiss(masm);
}
@@ -34,7 +49,7 @@ void Builtins::Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
}
void Builtins::Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
- ElementHandlerCompiler::GenerateStoreSlow(masm);
+ KeyedStoreIC::GenerateSlow(masm);
}
void Builtins::Generate_LoadGlobalIC_Miss(CodeStubAssembler* assembler) {
@@ -105,8 +120,8 @@ void Builtins::Generate_StoreIC_Miss(CodeStubAssembler* assembler) {
Node* vector = assembler->Parameter(Descriptor::kVector);
Node* context = assembler->Parameter(Descriptor::kContext);
- assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, receiver, name,
- value, slot, vector);
+ assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
+ vector, receiver, name);
}
void Builtins::Generate_StoreIC_Normal(MacroAssembler* masm) {
diff --git a/deps/v8/src/builtins/builtins-internal.cc b/deps/v8/src/builtins/builtins-internal.cc
index 87c5dd549c..bec6ff3645 100644
--- a/deps/v8/src/builtins/builtins-internal.cc
+++ b/deps/v8/src/builtins/builtins-internal.cc
@@ -64,12 +64,9 @@ void Builtins::Generate_CopyFastSmiOrObjectElements(
// Load the {object}s elements.
Node* source = assembler->LoadObjectField(object, JSObject::kElementsOffset);
- CodeStubAssembler::ParameterMode mode =
- assembler->Is64() ? CodeStubAssembler::INTEGER_PARAMETERS
- : CodeStubAssembler::SMI_PARAMETERS;
- Node* length = (mode == CodeStubAssembler::INTEGER_PARAMETERS)
- ? assembler->LoadAndUntagFixedArrayBaseLength(source)
- : assembler->LoadFixedArrayBaseLength(source);
+ CodeStubAssembler::ParameterMode mode = assembler->OptimalParameterMode();
+ Node* length = assembler->UntagParameter(
+ assembler->LoadFixedArrayBaseLength(source), mode);
// Check if we can allocate in new space.
ElementsKind kind = FAST_ELEMENTS;
@@ -111,9 +108,8 @@ void Builtins::Generate_GrowFastDoubleElements(CodeStubAssembler* assembler) {
Label runtime(assembler, CodeStubAssembler::Label::kDeferred);
Node* elements = assembler->LoadElements(object);
- elements = assembler->CheckAndGrowElementsCapacity(
- context, elements, FAST_DOUBLE_ELEMENTS, key, &runtime);
- assembler->StoreObjectField(object, JSObject::kElementsOffset, elements);
+ elements = assembler->TryGrowElementsCapacity(
+ object, elements, FAST_DOUBLE_ELEMENTS, key, &runtime);
assembler->Return(elements);
assembler->Bind(&runtime);
@@ -132,9 +128,8 @@ void Builtins::Generate_GrowFastSmiOrObjectElements(
Label runtime(assembler, CodeStubAssembler::Label::kDeferred);
Node* elements = assembler->LoadElements(object);
- elements = assembler->CheckAndGrowElementsCapacity(
- context, elements, FAST_ELEMENTS, key, &runtime);
- assembler->StoreObjectField(object, JSObject::kElementsOffset, elements);
+ elements = assembler->TryGrowElementsCapacity(object, elements, FAST_ELEMENTS,
+ key, &runtime);
assembler->Return(elements);
assembler->Bind(&runtime);
diff --git a/deps/v8/src/builtins/builtins-interpreter.cc b/deps/v8/src/builtins/builtins-interpreter.cc
index 900172fd48..16091848c5 100644
--- a/deps/v8/src/builtins/builtins-interpreter.cc
+++ b/deps/v8/src/builtins/builtins-interpreter.cc
@@ -50,5 +50,27 @@ void Builtins::Generate_InterpreterPushArgsAndTailCallFunction(
CallableType::kJSFunction);
}
+Handle<Code> Builtins::InterpreterPushArgsAndConstruct(
+ CallableType function_type) {
+ switch (function_type) {
+ case CallableType::kJSFunction:
+ return InterpreterPushArgsAndConstructFunction();
+ case CallableType::kAny:
+ return InterpreterPushArgsAndConstruct();
+ }
+ UNREACHABLE();
+ return Handle<Code>::null();
+}
+
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+ return Generate_InterpreterPushArgsAndConstructImpl(masm, CallableType::kAny);
+}
+
+void Builtins::Generate_InterpreterPushArgsAndConstructFunction(
+ MacroAssembler* masm) {
+ return Generate_InterpreterPushArgsAndConstructImpl(
+ masm, CallableType::kJSFunction);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-iterator.cc b/deps/v8/src/builtins/builtins-iterator.cc
new file mode 100644
index 0000000000..7b91e364eb
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-iterator.cc
@@ -0,0 +1,17 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+namespace v8 {
+namespace internal {
+
+void Builtins::Generate_IteratorPrototypeIterator(
+ CodeStubAssembler* assembler) {
+ assembler->Return(assembler->Parameter(0));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-number.cc b/deps/v8/src/builtins/builtins-number.cc
index c2af0fdecf..17628445d1 100644
--- a/deps/v8/src/builtins/builtins-number.cc
+++ b/deps/v8/src/builtins/builtins-number.cc
@@ -11,6 +11,144 @@ namespace internal {
// -----------------------------------------------------------------------------
// ES6 section 20.1 Number Objects
+// ES6 section 20.1.2.2 Number.isFinite ( number )
+void Builtins::Generate_NumberIsFinite(CodeStubAssembler* assembler) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Node* number = assembler->Parameter(1);
+
+ Label return_true(assembler), return_false(assembler);
+
+ // Check if {number} is a Smi.
+ assembler->GotoIf(assembler->WordIsSmi(number), &return_true);
+
+ // Check if {number} is a HeapNumber.
+ assembler->GotoUnless(
+ assembler->WordEqual(assembler->LoadMap(number),
+ assembler->HeapNumberMapConstant()),
+ &return_false);
+
+ // Check if {number} contains a finite, non-NaN value.
+ Node* number_value = assembler->LoadHeapNumberValue(number);
+ assembler->BranchIfFloat64IsNaN(
+ assembler->Float64Sub(number_value, number_value), &return_false,
+ &return_true);
+
+ assembler->Bind(&return_true);
+ assembler->Return(assembler->BooleanConstant(true));
+
+ assembler->Bind(&return_false);
+ assembler->Return(assembler->BooleanConstant(false));
+}
+
+// ES6 section 20.1.2.3 Number.isInteger ( number )
+void Builtins::Generate_NumberIsInteger(CodeStubAssembler* assembler) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Node* number = assembler->Parameter(1);
+
+ Label return_true(assembler), return_false(assembler);
+
+ // Check if {number} is a Smi.
+ assembler->GotoIf(assembler->WordIsSmi(number), &return_true);
+
+ // Check if {number} is a HeapNumber.
+ assembler->GotoUnless(
+ assembler->WordEqual(assembler->LoadMap(number),
+ assembler->HeapNumberMapConstant()),
+ &return_false);
+
+ // Load the actual value of {number}.
+ Node* number_value = assembler->LoadHeapNumberValue(number);
+
+ // Truncate the value of {number} to an integer (or an infinity).
+ Node* integer = assembler->Float64Trunc(number_value);
+
+ // Check if {number}s value matches the integer (ruling out the infinities).
+ assembler->BranchIfFloat64Equal(assembler->Float64Sub(number_value, integer),
+ assembler->Float64Constant(0.0), &return_true,
+ &return_false);
+
+ assembler->Bind(&return_true);
+ assembler->Return(assembler->BooleanConstant(true));
+
+ assembler->Bind(&return_false);
+ assembler->Return(assembler->BooleanConstant(false));
+}
+
+// ES6 section 20.1.2.4 Number.isNaN ( number )
+void Builtins::Generate_NumberIsNaN(CodeStubAssembler* assembler) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Node* number = assembler->Parameter(1);
+
+ Label return_true(assembler), return_false(assembler);
+
+ // Check if {number} is a Smi.
+ assembler->GotoIf(assembler->WordIsSmi(number), &return_false);
+
+ // Check if {number} is a HeapNumber.
+ assembler->GotoUnless(
+ assembler->WordEqual(assembler->LoadMap(number),
+ assembler->HeapNumberMapConstant()),
+ &return_false);
+
+ // Check if {number} contains a NaN value.
+ Node* number_value = assembler->LoadHeapNumberValue(number);
+ assembler->BranchIfFloat64IsNaN(number_value, &return_true, &return_false);
+
+ assembler->Bind(&return_true);
+ assembler->Return(assembler->BooleanConstant(true));
+
+ assembler->Bind(&return_false);
+ assembler->Return(assembler->BooleanConstant(false));
+}
+
+// ES6 section 20.1.2.5 Number.isSafeInteger ( number )
+void Builtins::Generate_NumberIsSafeInteger(CodeStubAssembler* assembler) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Node* number = assembler->Parameter(1);
+
+ Label return_true(assembler), return_false(assembler);
+
+ // Check if {number} is a Smi.
+ assembler->GotoIf(assembler->WordIsSmi(number), &return_true);
+
+ // Check if {number} is a HeapNumber.
+ assembler->GotoUnless(
+ assembler->WordEqual(assembler->LoadMap(number),
+ assembler->HeapNumberMapConstant()),
+ &return_false);
+
+ // Load the actual value of {number}.
+ Node* number_value = assembler->LoadHeapNumberValue(number);
+
+ // Truncate the value of {number} to an integer (or an infinity).
+ Node* integer = assembler->Float64Trunc(number_value);
+
+ // Check if {number}s value matches the integer (ruling out the infinities).
+ assembler->GotoUnless(
+ assembler->Float64Equal(assembler->Float64Sub(number_value, integer),
+ assembler->Float64Constant(0.0)),
+ &return_false);
+
+ // Check if the {integer} value is in safe integer range.
+ assembler->BranchIfFloat64LessThanOrEqual(
+ assembler->Float64Abs(integer),
+ assembler->Float64Constant(kMaxSafeInteger), &return_true, &return_false);
+
+ assembler->Bind(&return_true);
+ assembler->Return(assembler->BooleanConstant(true));
+
+ assembler->Bind(&return_false);
+ assembler->Return(assembler->BooleanConstant(false));
+}
+
// ES6 section 20.1.3.2 Number.prototype.toExponential ( fractionDigits )
BUILTIN(NumberPrototypeToExponential) {
HandleScope scope(isolate);
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index c422145a51..671397d9ea 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -35,7 +35,7 @@ void Builtins::Generate_ObjectHasOwnProperty(CodeStubAssembler* assembler) {
Node* map = assembler->LoadMap(object);
Node* instance_type = assembler->LoadMapInstanceType(map);
- Variable var_index(assembler, MachineRepresentation::kWord32);
+ Variable var_index(assembler, MachineType::PointerRepresentation());
Label keyisindex(assembler), if_iskeyunique(assembler);
assembler->TryToName(key, &keyisindex, &var_index, &if_iskeyunique,
@@ -46,6 +46,10 @@ void Builtins::Generate_ObjectHasOwnProperty(CodeStubAssembler* assembler) {
&return_false, &call_runtime);
assembler->Bind(&keyisindex);
+ // Handle negative keys in the runtime.
+ assembler->GotoIf(assembler->IntPtrLessThan(var_index.value(),
+ assembler->IntPtrConstant(0)),
+ &call_runtime);
assembler->TryLookupElement(object, map, instance_type, var_index.value(),
&return_true, &return_false, &call_runtime);
@@ -230,10 +234,8 @@ void IsString(CodeStubAssembler* assembler, compiler::Node* object,
{
Node* instance_type = assembler->LoadInstanceType(object);
- assembler->Branch(
- assembler->Int32LessThan(
- instance_type, assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
- if_string, if_notstring);
+ assembler->Branch(assembler->IsStringInstanceType(instance_type), if_string,
+ if_notstring);
}
}
@@ -259,10 +261,8 @@ void ReturnIfPrimitive(CodeStubAssembler* assembler,
CodeStubAssembler::Label* return_string,
CodeStubAssembler::Label* return_boolean,
CodeStubAssembler::Label* return_number) {
- assembler->GotoIf(
- assembler->Int32LessThan(instance_type,
- assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
- return_string);
+ assembler->GotoIf(assembler->IsStringInstanceType(instance_type),
+ return_string);
assembler->GotoIf(assembler->Word32Equal(
instance_type, assembler->Int32Constant(ODDBALL_TYPE)),
@@ -910,5 +910,18 @@ BUILTIN(ObjectSeal) {
return *object;
}
+// ES6 section 7.3.19 OrdinaryHasInstance ( C, O )
+void Builtins::Generate_OrdinaryHasInstance(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+ typedef CompareDescriptor Descriptor;
+
+ Node* constructor = assembler->Parameter(Descriptor::kLeft);
+ Node* object = assembler->Parameter(Descriptor::kRight);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ assembler->Return(
+ assembler->OrdinaryHasInstance(context, constructor, object));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-regexp.cc b/deps/v8/src/builtins/builtins-regexp.cc
new file mode 100644
index 0000000000..371221fa70
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-regexp.cc
@@ -0,0 +1,441 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+
+#include "src/code-factory.h"
+#include "src/regexp/jsregexp.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ES6 section 21.2 RegExp Objects
+
+namespace {
+
+// ES#sec-isregexp IsRegExp ( argument )
+Maybe<bool> IsRegExp(Isolate* isolate, Handle<Object> object) {
+ if (!object->IsJSReceiver()) return Just(false);
+
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
+
+ if (isolate->regexp_function()->initial_map() == receiver->map()) {
+ // Fast-path for unmodified JSRegExp instances.
+ return Just(true);
+ }
+
+ Handle<Object> match;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, match,
+ JSObject::GetProperty(receiver, isolate->factory()->match_symbol()),
+ Nothing<bool>());
+
+ if (!match->IsUndefined(isolate)) return Just(match->BooleanValue());
+ return Just(object->IsJSRegExp());
+}
+
+Handle<String> PatternFlags(Isolate* isolate, Handle<JSRegExp> regexp) {
+ static const int kMaxFlagsLength = 5 + 1; // 5 flags and '\0';
+ char flags_string[kMaxFlagsLength];
+ int i = 0;
+
+ const JSRegExp::Flags flags = regexp->GetFlags();
+
+ if ((flags & JSRegExp::kGlobal) != 0) flags_string[i++] = 'g';
+ if ((flags & JSRegExp::kIgnoreCase) != 0) flags_string[i++] = 'i';
+ if ((flags & JSRegExp::kMultiline) != 0) flags_string[i++] = 'm';
+ if ((flags & JSRegExp::kUnicode) != 0) flags_string[i++] = 'u';
+ if ((flags & JSRegExp::kSticky) != 0) flags_string[i++] = 'y';
+
+ DCHECK_LT(i, kMaxFlagsLength);
+ memset(&flags_string[i], '\0', kMaxFlagsLength - i);
+
+ return isolate->factory()->NewStringFromAsciiChecked(flags_string);
+}
+
+// ES#sec-regexpinitialize
+// Runtime Semantics: RegExpInitialize ( obj, pattern, flags )
+MaybeHandle<JSRegExp> RegExpInitialize(Isolate* isolate,
+ Handle<JSRegExp> regexp,
+ Handle<Object> pattern,
+ Handle<Object> flags) {
+ Handle<String> pattern_string;
+ if (pattern->IsUndefined(isolate)) {
+ pattern_string = isolate->factory()->empty_string();
+ } else {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, pattern_string,
+ Object::ToString(isolate, pattern), JSRegExp);
+ }
+
+ Handle<String> flags_string;
+ if (flags->IsUndefined(isolate)) {
+ flags_string = isolate->factory()->empty_string();
+ } else {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, flags_string,
+ Object::ToString(isolate, flags), JSRegExp);
+ }
+
+ // TODO(jgruber): We could avoid the flags back and forth conversions.
+ RETURN_RESULT(isolate,
+ JSRegExp::Initialize(regexp, pattern_string, flags_string),
+ JSRegExp);
+}
+
+} // namespace
+
+// ES#sec-regexp-pattern-flags
+// RegExp ( pattern, flags )
+BUILTIN(RegExpConstructor) {
+ HandleScope scope(isolate);
+
+ Handle<HeapObject> new_target = args.new_target();
+ Handle<Object> pattern = args.atOrUndefined(isolate, 1);
+ Handle<Object> flags = args.atOrUndefined(isolate, 2);
+
+ Handle<JSFunction> target = isolate->regexp_function();
+
+ bool pattern_is_regexp;
+ {
+ Maybe<bool> maybe_pattern_is_regexp = IsRegExp(isolate, pattern);
+ if (maybe_pattern_is_regexp.IsNothing()) {
+ DCHECK(isolate->has_pending_exception());
+ return isolate->heap()->exception();
+ }
+ pattern_is_regexp = maybe_pattern_is_regexp.FromJust();
+ }
+
+ if (new_target->IsUndefined(isolate)) {
+ new_target = target;
+
+ // ES6 section 21.2.3.1 step 3.b
+ if (pattern_is_regexp && flags->IsUndefined(isolate)) {
+ Handle<Object> pattern_constructor;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, pattern_constructor,
+ Object::GetProperty(pattern,
+ isolate->factory()->constructor_string()));
+
+ if (pattern_constructor.is_identical_to(new_target)) {
+ return *pattern;
+ }
+ }
+ }
+
+ if (pattern->IsJSRegExp()) {
+ Handle<JSRegExp> regexp_pattern = Handle<JSRegExp>::cast(pattern);
+
+ if (flags->IsUndefined(isolate)) {
+ flags = PatternFlags(isolate, regexp_pattern);
+ }
+ pattern = handle(regexp_pattern->source(), isolate);
+ } else if (pattern_is_regexp) {
+ Handle<Object> pattern_source;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, pattern_source,
+ Object::GetProperty(pattern, isolate->factory()->source_string()));
+
+ if (flags->IsUndefined(isolate)) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, flags,
+ Object::GetProperty(pattern, isolate->factory()->flags_string()));
+ }
+ pattern = pattern_source;
+ }
+
+ Handle<JSReceiver> new_target_receiver = Handle<JSReceiver>::cast(new_target);
+
+ // TODO(jgruber): Fast-path for target == new_target == unmodified JSRegExp.
+
+ Handle<JSObject> object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, object, JSObject::New(target, new_target_receiver));
+ Handle<JSRegExp> regexp = Handle<JSRegExp>::cast(object);
+
+ RETURN_RESULT_OR_FAILURE(isolate,
+ RegExpInitialize(isolate, regexp, pattern, flags));
+}
+
+namespace {
+
+compiler::Node* LoadLastIndex(CodeStubAssembler* a, compiler::Node* context,
+ compiler::Node* has_initialmap,
+ compiler::Node* regexp) {
+ typedef CodeStubAssembler::Variable Variable;
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Variable var_value(a, MachineRepresentation::kTagged);
+
+ Label out(a), if_unmodified(a), if_modified(a, Label::kDeferred);
+ a->Branch(has_initialmap, &if_unmodified, &if_modified);
+
+ a->Bind(&if_unmodified);
+ {
+ // Load the in-object field.
+ static const int field_offset =
+ JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
+ var_value.Bind(a->LoadObjectField(regexp, field_offset));
+ a->Goto(&out);
+ }
+
+ a->Bind(&if_modified);
+ {
+ // Load through the GetProperty stub.
+ Node* const name =
+ a->HeapConstant(a->isolate()->factory()->last_index_string());
+ Callable getproperty_callable = CodeFactory::GetProperty(a->isolate());
+ var_value.Bind(a->CallStub(getproperty_callable, context, regexp, name));
+ a->Goto(&out);
+ }
+
+ a->Bind(&out);
+ return var_value.value();
+}
+
+void StoreLastIndex(CodeStubAssembler* a, compiler::Node* context,
+ compiler::Node* has_initialmap, compiler::Node* regexp,
+ compiler::Node* value) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Label out(a), if_unmodified(a), if_modified(a, Label::kDeferred);
+ a->Branch(has_initialmap, &if_unmodified, &if_modified);
+
+ a->Bind(&if_unmodified);
+ {
+ // Store the in-object field.
+ static const int field_offset =
+ JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
+ a->StoreObjectField(regexp, field_offset, value);
+ a->Goto(&out);
+ }
+
+ a->Bind(&if_modified);
+ {
+ // Store through runtime.
+ // TODO(ishell): Use SetPropertyStub here once available.
+ Node* const name =
+ a->HeapConstant(a->isolate()->factory()->last_index_string());
+ Node* const language_mode = a->SmiConstant(Smi::FromInt(STRICT));
+ a->CallRuntime(Runtime::kSetProperty, context, regexp, name, value,
+ language_mode);
+ a->Goto(&out);
+ }
+
+ a->Bind(&out);
+}
+
+compiler::Node* ConstructNewResultFromMatchInfo(Isolate* isolate,
+ CodeStubAssembler* a,
+ compiler::Node* context,
+ compiler::Node* match_elements,
+ compiler::Node* string) {
+ typedef CodeStubAssembler::Variable Variable;
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Label out(a);
+
+ CodeStubAssembler::ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
+ Node* const num_indices = a->SmiUntag(a->LoadFixedArrayElement(
+ match_elements, a->IntPtrConstant(RegExpImpl::kLastCaptureCount), 0,
+ mode));
+ Node* const num_results = a->SmiTag(a->WordShr(num_indices, 1));
+ Node* const start = a->LoadFixedArrayElement(
+ match_elements, a->IntPtrConstant(RegExpImpl::kFirstCapture), 0, mode);
+ Node* const end = a->LoadFixedArrayElement(
+ match_elements, a->IntPtrConstant(RegExpImpl::kFirstCapture + 1), 0,
+ mode);
+
+ // Calculate the substring of the first match before creating the result array
+ // to avoid an unnecessary write barrier storing the first result.
+ Node* const first = a->SubString(context, string, start, end);
+
+ Node* const result =
+ a->AllocateRegExpResult(context, num_results, start, string);
+ Node* const result_elements = a->LoadElements(result);
+
+ a->StoreFixedArrayElement(result_elements, a->IntPtrConstant(0), first,
+ SKIP_WRITE_BARRIER);
+
+ a->GotoIf(a->SmiEqual(num_results, a->SmiConstant(Smi::FromInt(1))), &out);
+
+ // Store all remaining captures.
+ Node* const limit =
+ a->IntPtrAdd(a->IntPtrConstant(RegExpImpl::kFirstCapture), num_indices);
+
+ Variable var_from_cursor(a, MachineType::PointerRepresentation());
+ Variable var_to_cursor(a, MachineType::PointerRepresentation());
+
+ var_from_cursor.Bind(a->IntPtrConstant(RegExpImpl::kFirstCapture + 2));
+ var_to_cursor.Bind(a->IntPtrConstant(1));
+
+ Variable* vars[] = {&var_from_cursor, &var_to_cursor};
+ Label loop(a, 2, vars);
+
+ a->Goto(&loop);
+ a->Bind(&loop);
+ {
+ Node* const from_cursor = var_from_cursor.value();
+ Node* const to_cursor = var_to_cursor.value();
+ Node* const start = a->LoadFixedArrayElement(match_elements, from_cursor);
+
+ Label next_iter(a);
+ a->GotoIf(a->SmiEqual(start, a->SmiConstant(Smi::FromInt(-1))), &next_iter);
+
+ Node* const from_cursor_plus1 =
+ a->IntPtrAdd(from_cursor, a->IntPtrConstant(1));
+ Node* const end =
+ a->LoadFixedArrayElement(match_elements, from_cursor_plus1);
+
+ Node* const capture = a->SubString(context, string, start, end);
+ a->StoreFixedArrayElement(result_elements, to_cursor, capture);
+ a->Goto(&next_iter);
+
+ a->Bind(&next_iter);
+ var_from_cursor.Bind(a->IntPtrAdd(from_cursor, a->IntPtrConstant(2)));
+ var_to_cursor.Bind(a->IntPtrAdd(to_cursor, a->IntPtrConstant(1)));
+ a->Branch(a->UintPtrLessThan(var_from_cursor.value(), limit), &loop, &out);
+ }
+
+ a->Bind(&out);
+ return result;
+}
+
+} // namespace
+
+// ES#sec-regexp.prototype.exec
+// RegExp.prototype.exec ( string )
+void Builtins::Generate_RegExpPrototypeExec(CodeStubAssembler* a) {
+ typedef CodeStubAssembler::Variable Variable;
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Isolate* const isolate = a->isolate();
+
+ Node* const receiver = a->Parameter(0);
+ Node* const maybe_string = a->Parameter(1);
+ Node* const context = a->Parameter(4);
+
+ Node* const null = a->NullConstant();
+ Node* const int_zero = a->IntPtrConstant(0);
+ Node* const smi_zero = a->SmiConstant(Smi::FromInt(0));
+
+ // Ensure {receiver} is a JSRegExp.
+ Node* const regexp_map = a->ThrowIfNotInstanceType(
+ context, receiver, JS_REGEXP_TYPE, "RegExp.prototype.exec");
+ Node* const regexp = receiver;
+
+ // Check whether the regexp instance is unmodified.
+ Node* const native_context = a->LoadNativeContext(context);
+ Node* const regexp_fun =
+ a->LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+ Node* const initial_map =
+ a->LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const has_initialmap = a->WordEqual(regexp_map, initial_map);
+
+ // Convert {maybe_string} to a string.
+ Callable tostring_callable = CodeFactory::ToString(isolate);
+ Node* const string = a->CallStub(tostring_callable, context, maybe_string);
+ Node* const string_length = a->LoadStringLength(string);
+
+ // Check whether the regexp is global or sticky, which determines whether we
+ // update last index later on.
+ Node* const flags = a->LoadObjectField(regexp, JSRegExp::kFlagsOffset);
+ Node* const is_global_or_sticky =
+ a->WordAnd(a->SmiUntag(flags),
+ a->IntPtrConstant(JSRegExp::kGlobal | JSRegExp::kSticky));
+ Node* const should_update_last_index =
+ a->WordNotEqual(is_global_or_sticky, int_zero);
+
+ // Grab and possibly update last index.
+ Label run_exec(a);
+ Variable var_lastindex(a, MachineRepresentation::kTagged);
+ {
+ Label if_doupdate(a), if_dontupdate(a);
+ a->Branch(should_update_last_index, &if_doupdate, &if_dontupdate);
+
+ a->Bind(&if_doupdate);
+ {
+ Node* const regexp_lastindex =
+ LoadLastIndex(a, context, has_initialmap, regexp);
+
+ Callable tolength_callable = CodeFactory::ToLength(isolate);
+ Node* const lastindex =
+ a->CallStub(tolength_callable, context, regexp_lastindex);
+ var_lastindex.Bind(lastindex);
+
+ Label if_isoob(a, Label::kDeferred);
+ a->GotoUnless(a->WordIsSmi(lastindex), &if_isoob);
+ a->GotoUnless(a->SmiLessThanOrEqual(lastindex, string_length), &if_isoob);
+ a->Goto(&run_exec);
+
+ a->Bind(&if_isoob);
+ {
+ StoreLastIndex(a, context, has_initialmap, regexp, smi_zero);
+ a->Return(null);
+ }
+ }
+
+ a->Bind(&if_dontupdate);
+ {
+ var_lastindex.Bind(smi_zero);
+ a->Goto(&run_exec);
+ }
+ }
+
+ Node* match_indices;
+ Label successful_match(a);
+ a->Bind(&run_exec);
+ {
+ // Get last match info from the context.
+ Node* const last_match_info = a->LoadContextElement(
+ native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
+
+ // Call the exec stub.
+ Callable exec_callable = CodeFactory::RegExpExec(isolate);
+ match_indices = a->CallStub(exec_callable, context, regexp, string,
+ var_lastindex.value(), last_match_info);
+
+ // {match_indices} is either null or the RegExpLastMatchInfo array.
+ // Return early if exec failed, possibly updating last index.
+ a->GotoUnless(a->WordEqual(match_indices, null), &successful_match);
+
+ Label return_null(a);
+ a->GotoUnless(should_update_last_index, &return_null);
+
+ StoreLastIndex(a, context, has_initialmap, regexp, smi_zero);
+ a->Goto(&return_null);
+
+ a->Bind(&return_null);
+ a->Return(null);
+ }
+
+ Label construct_result(a);
+ a->Bind(&successful_match);
+ {
+ Node* const match_elements = a->LoadElements(match_indices);
+
+ a->GotoUnless(should_update_last_index, &construct_result);
+
+ // Update the new last index from {match_indices}.
+ Node* const new_lastindex = a->LoadFixedArrayElement(
+ match_elements, a->IntPtrConstant(RegExpImpl::kFirstCapture + 1));
+
+ StoreLastIndex(a, context, has_initialmap, regexp, new_lastindex);
+ a->Goto(&construct_result);
+
+ a->Bind(&construct_result);
+ {
+ Node* result = ConstructNewResultFromMatchInfo(isolate, a, context,
+ match_elements, string);
+ a->Return(result);
+ }
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
index 23d4f43af2..6aad4daeef 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
@@ -141,6 +141,7 @@ void ValidateAtomicIndex(CodeStubAssembler* a, compiler::Node* index_word,
using namespace compiler;
// Check if the index is in bounds. If not, throw RangeError.
CodeStubAssembler::Label if_inbounds(a), if_notinbounds(a);
+ // TODO(jkummerow): Use unsigned comparison instead of "i<0 || i>length".
a->Branch(
a->WordOr(a->Int32LessThan(index_word, a->Int32Constant(0)),
a->Int32GreaterThanOrEqual(index_word, array_length_word)),
@@ -227,8 +228,7 @@ void Builtins::Generate_AtomicsStore(CodeStubAssembler* a) {
ValidateAtomicIndex(a, index_word32, array_length_word32, context);
Node* index_word = a->ChangeUint32ToWord(index_word32);
- Callable to_integer = CodeFactory::ToInteger(a->isolate());
- Node* value_integer = a->CallStub(to_integer, context, value);
+ Node* value_integer = a->ToInteger(context, value);
Node* value_word32 = a->TruncateTaggedToWord32(context, value_integer);
CodeStubAssembler::Label u8(a), u16(a), u32(a), other(a);
@@ -248,8 +248,8 @@ void Builtins::Generate_AtomicsStore(CodeStubAssembler* a) {
a->Return(value_integer);
a->Bind(&u16);
- a->SmiTag(a->AtomicStore(MachineRepresentation::kWord16, backing_store,
- a->WordShl(index_word, 1), value_word32));
+ a->AtomicStore(MachineRepresentation::kWord16, backing_store,
+ a->WordShl(index_word, 1), value_word32);
a->Return(value_integer);
a->Bind(&u32);
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
index d38f6b069d..68d2bd0c97 100644
--- a/deps/v8/src/builtins/builtins-string.cc
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -10,6 +10,408 @@
namespace v8 {
namespace internal {
+namespace {
+
+enum ResultMode { kDontNegateResult, kNegateResult };
+
+void GenerateStringEqual(CodeStubAssembler* assembler, ResultMode mode) {
+ // Here's pseudo-code for the algorithm below in case of kDontNegateResult
+ // mode; for kNegateResult mode we properly negate the result.
+ //
+ // if (lhs == rhs) return true;
+ // if (lhs->length() != rhs->length()) return false;
+ // if (lhs->IsInternalizedString() && rhs->IsInternalizedString()) {
+ // return false;
+ // }
+ // if (lhs->IsSeqOneByteString() && rhs->IsSeqOneByteString()) {
+ // for (i = 0; i != lhs->length(); ++i) {
+ // if (lhs[i] != rhs[i]) return false;
+ // }
+ // return true;
+ // }
+ // return %StringEqual(lhs, rhs);
+
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* lhs = assembler->Parameter(0);
+ Node* rhs = assembler->Parameter(1);
+ Node* context = assembler->Parameter(2);
+
+ Label if_equal(assembler), if_notequal(assembler);
+
+ // Fast check to see if {lhs} and {rhs} refer to the same String object.
+ Label if_same(assembler), if_notsame(assembler);
+ assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
+
+ assembler->Bind(&if_same);
+ assembler->Goto(&if_equal);
+
+ assembler->Bind(&if_notsame);
+ {
+ // The {lhs} and {rhs} don't refer to the exact same String object.
+
+ // Load the length of {lhs} and {rhs}.
+ Node* lhs_length = assembler->LoadStringLength(lhs);
+ Node* rhs_length = assembler->LoadStringLength(rhs);
+
+ // Check if the lengths of {lhs} and {rhs} are equal.
+ Label if_lengthisequal(assembler), if_lengthisnotequal(assembler);
+ assembler->Branch(assembler->WordEqual(lhs_length, rhs_length),
+ &if_lengthisequal, &if_lengthisnotequal);
+
+ assembler->Bind(&if_lengthisequal);
+ {
+ // Load instance types of {lhs} and {rhs}.
+ Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
+ Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+
+ // Combine the instance types into a single 16-bit value, so we can check
+ // both of them at once.
+ Node* both_instance_types = assembler->Word32Or(
+ lhs_instance_type,
+ assembler->Word32Shl(rhs_instance_type, assembler->Int32Constant(8)));
+
+ // Check if both {lhs} and {rhs} are internalized.
+ int const kBothInternalizedMask =
+ kIsNotInternalizedMask | (kIsNotInternalizedMask << 8);
+ int const kBothInternalizedTag =
+ kInternalizedTag | (kInternalizedTag << 8);
+ Label if_bothinternalized(assembler), if_notbothinternalized(assembler);
+ assembler->Branch(assembler->Word32Equal(
+ assembler->Word32And(both_instance_types,
+ assembler->Int32Constant(
+ kBothInternalizedMask)),
+ assembler->Int32Constant(kBothInternalizedTag)),
+ &if_bothinternalized, &if_notbothinternalized);
+
+ assembler->Bind(&if_bothinternalized);
+ {
+ // Fast negative check for internalized-to-internalized equality.
+ assembler->Goto(&if_notequal);
+ }
+
+ assembler->Bind(&if_notbothinternalized);
+ {
+ // Check that both {lhs} and {rhs} are flat one-byte strings.
+ int const kBothSeqOneByteStringMask =
+ kStringEncodingMask | kStringRepresentationMask |
+ ((kStringEncodingMask | kStringRepresentationMask) << 8);
+ int const kBothSeqOneByteStringTag =
+ kOneByteStringTag | kSeqStringTag |
+ ((kOneByteStringTag | kSeqStringTag) << 8);
+ Label if_bothonebyteseqstrings(assembler),
+ if_notbothonebyteseqstrings(assembler);
+ assembler->Branch(
+ assembler->Word32Equal(
+ assembler->Word32And(
+ both_instance_types,
+ assembler->Int32Constant(kBothSeqOneByteStringMask)),
+ assembler->Int32Constant(kBothSeqOneByteStringTag)),
+ &if_bothonebyteseqstrings, &if_notbothonebyteseqstrings);
+
+ assembler->Bind(&if_bothonebyteseqstrings);
+ {
+ // Compute the effective offset of the first character.
+ Node* begin = assembler->IntPtrConstant(
+ SeqOneByteString::kHeaderSize - kHeapObjectTag);
+
+ // Compute the first offset after the string from the length.
+ Node* end =
+ assembler->IntPtrAdd(begin, assembler->SmiUntag(lhs_length));
+
+ // Loop over the {lhs} and {rhs} strings to see if they are equal.
+ Variable var_offset(assembler, MachineType::PointerRepresentation());
+ Label loop(assembler, &var_offset);
+ var_offset.Bind(begin);
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ // Check if {offset} equals {end}.
+ Node* offset = var_offset.value();
+ Label if_done(assembler), if_notdone(assembler);
+ assembler->Branch(assembler->WordEqual(offset, end), &if_done,
+ &if_notdone);
+
+ assembler->Bind(&if_notdone);
+ {
+ // Load the next characters from {lhs} and {rhs}.
+ Node* lhs_value =
+ assembler->Load(MachineType::Uint8(), lhs, offset);
+ Node* rhs_value =
+ assembler->Load(MachineType::Uint8(), rhs, offset);
+
+ // Check if the characters match.
+ Label if_valueissame(assembler), if_valueisnotsame(assembler);
+ assembler->Branch(assembler->Word32Equal(lhs_value, rhs_value),
+ &if_valueissame, &if_valueisnotsame);
+
+ assembler->Bind(&if_valueissame);
+ {
+ // Advance to next character.
+ var_offset.Bind(
+ assembler->IntPtrAdd(offset, assembler->IntPtrConstant(1)));
+ }
+ assembler->Goto(&loop);
+
+ assembler->Bind(&if_valueisnotsame);
+ assembler->Goto(&if_notequal);
+ }
+
+ assembler->Bind(&if_done);
+ assembler->Goto(&if_equal);
+ }
+ }
+
+ assembler->Bind(&if_notbothonebyteseqstrings);
+ {
+ // TODO(bmeurer): Add fast case support for flattened cons strings;
+ // also add support for two byte string equality checks.
+ Runtime::FunctionId function_id = (mode == kDontNegateResult)
+ ? Runtime::kStringEqual
+ : Runtime::kStringNotEqual;
+ assembler->TailCallRuntime(function_id, context, lhs, rhs);
+ }
+ }
+ }
+
+ assembler->Bind(&if_lengthisnotequal);
+ {
+ // Mismatch in length of {lhs} and {rhs}, cannot be equal.
+ assembler->Goto(&if_notequal);
+ }
+ }
+
+ assembler->Bind(&if_equal);
+ assembler->Return(assembler->BooleanConstant(mode == kDontNegateResult));
+
+ assembler->Bind(&if_notequal);
+ assembler->Return(assembler->BooleanConstant(mode == kNegateResult));
+}
+
+enum RelationalComparisonMode {
+ kLessThan,
+ kLessThanOrEqual,
+ kGreaterThan,
+ kGreaterThanOrEqual
+};
+
+void GenerateStringRelationalComparison(CodeStubAssembler* assembler,
+ RelationalComparisonMode mode) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* lhs = assembler->Parameter(0);
+ Node* rhs = assembler->Parameter(1);
+ Node* context = assembler->Parameter(2);
+
+ Label if_less(assembler), if_equal(assembler), if_greater(assembler);
+
+ // Fast check to see if {lhs} and {rhs} refer to the same String object.
+ Label if_same(assembler), if_notsame(assembler);
+ assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
+
+ assembler->Bind(&if_same);
+ assembler->Goto(&if_equal);
+
+ assembler->Bind(&if_notsame);
+ {
+ // Load instance types of {lhs} and {rhs}.
+ Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
+ Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+
+ // Combine the instance types into a single 16-bit value, so we can check
+ // both of them at once.
+ Node* both_instance_types = assembler->Word32Or(
+ lhs_instance_type,
+ assembler->Word32Shl(rhs_instance_type, assembler->Int32Constant(8)));
+
+ // Check that both {lhs} and {rhs} are flat one-byte strings.
+ int const kBothSeqOneByteStringMask =
+ kStringEncodingMask | kStringRepresentationMask |
+ ((kStringEncodingMask | kStringRepresentationMask) << 8);
+ int const kBothSeqOneByteStringTag =
+ kOneByteStringTag | kSeqStringTag |
+ ((kOneByteStringTag | kSeqStringTag) << 8);
+ Label if_bothonebyteseqstrings(assembler),
+ if_notbothonebyteseqstrings(assembler);
+ assembler->Branch(assembler->Word32Equal(
+ assembler->Word32And(both_instance_types,
+ assembler->Int32Constant(
+ kBothSeqOneByteStringMask)),
+ assembler->Int32Constant(kBothSeqOneByteStringTag)),
+ &if_bothonebyteseqstrings, &if_notbothonebyteseqstrings);
+
+ assembler->Bind(&if_bothonebyteseqstrings);
+ {
+ // Load the length of {lhs} and {rhs}.
+ Node* lhs_length = assembler->LoadStringLength(lhs);
+ Node* rhs_length = assembler->LoadStringLength(rhs);
+
+ // Determine the minimum length.
+ Node* length = assembler->SmiMin(lhs_length, rhs_length);
+
+ // Compute the effective offset of the first character.
+ Node* begin = assembler->IntPtrConstant(SeqOneByteString::kHeaderSize -
+ kHeapObjectTag);
+
+ // Compute the first offset after the string from the length.
+ Node* end = assembler->IntPtrAdd(begin, assembler->SmiUntag(length));
+
+ // Loop over the {lhs} and {rhs} strings to see if they are equal.
+ Variable var_offset(assembler, MachineType::PointerRepresentation());
+ Label loop(assembler, &var_offset);
+ var_offset.Bind(begin);
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ // Check if {offset} equals {end}.
+ Node* offset = var_offset.value();
+ Label if_done(assembler), if_notdone(assembler);
+ assembler->Branch(assembler->WordEqual(offset, end), &if_done,
+ &if_notdone);
+
+ assembler->Bind(&if_notdone);
+ {
+ // Load the next characters from {lhs} and {rhs}.
+ Node* lhs_value = assembler->Load(MachineType::Uint8(), lhs, offset);
+ Node* rhs_value = assembler->Load(MachineType::Uint8(), rhs, offset);
+
+ // Check if the characters match.
+ Label if_valueissame(assembler), if_valueisnotsame(assembler);
+ assembler->Branch(assembler->Word32Equal(lhs_value, rhs_value),
+ &if_valueissame, &if_valueisnotsame);
+
+ assembler->Bind(&if_valueissame);
+ {
+ // Advance to next character.
+ var_offset.Bind(
+ assembler->IntPtrAdd(offset, assembler->IntPtrConstant(1)));
+ }
+ assembler->Goto(&loop);
+
+ assembler->Bind(&if_valueisnotsame);
+ assembler->BranchIf(assembler->Uint32LessThan(lhs_value, rhs_value),
+ &if_less, &if_greater);
+ }
+
+ assembler->Bind(&if_done);
+ {
+ // All characters up to the min length are equal, decide based on
+ // string length.
+ Label if_lengthisequal(assembler), if_lengthisnotequal(assembler);
+ assembler->Branch(assembler->SmiEqual(lhs_length, rhs_length),
+ &if_lengthisequal, &if_lengthisnotequal);
+
+ assembler->Bind(&if_lengthisequal);
+ assembler->Goto(&if_equal);
+
+ assembler->Bind(&if_lengthisnotequal);
+ assembler->BranchIfSmiLessThan(lhs_length, rhs_length, &if_less,
+ &if_greater);
+ }
+ }
+ }
+
+ assembler->Bind(&if_notbothonebyteseqstrings);
+ {
+ // TODO(bmeurer): Add fast case support for flattened cons strings;
+ // also add support for two byte string relational comparisons.
+ switch (mode) {
+ case kLessThan:
+ assembler->TailCallRuntime(Runtime::kStringLessThan, context, lhs,
+ rhs);
+ break;
+ case kLessThanOrEqual:
+ assembler->TailCallRuntime(Runtime::kStringLessThanOrEqual, context,
+ lhs, rhs);
+ break;
+ case kGreaterThan:
+ assembler->TailCallRuntime(Runtime::kStringGreaterThan, context, lhs,
+ rhs);
+ break;
+ case kGreaterThanOrEqual:
+ assembler->TailCallRuntime(Runtime::kStringGreaterThanOrEqual,
+ context, lhs, rhs);
+ break;
+ }
+ }
+ }
+
+ assembler->Bind(&if_less);
+ switch (mode) {
+ case kLessThan:
+ case kLessThanOrEqual:
+ assembler->Return(assembler->BooleanConstant(true));
+ break;
+
+ case kGreaterThan:
+ case kGreaterThanOrEqual:
+ assembler->Return(assembler->BooleanConstant(false));
+ break;
+ }
+
+ assembler->Bind(&if_equal);
+ switch (mode) {
+ case kLessThan:
+ case kGreaterThan:
+ assembler->Return(assembler->BooleanConstant(false));
+ break;
+
+ case kLessThanOrEqual:
+ case kGreaterThanOrEqual:
+ assembler->Return(assembler->BooleanConstant(true));
+ break;
+ }
+
+ assembler->Bind(&if_greater);
+ switch (mode) {
+ case kLessThan:
+ case kLessThanOrEqual:
+ assembler->Return(assembler->BooleanConstant(false));
+ break;
+
+ case kGreaterThan:
+ case kGreaterThanOrEqual:
+ assembler->Return(assembler->BooleanConstant(true));
+ break;
+ }
+}
+
+} // namespace
+
+// static
+void Builtins::Generate_StringEqual(CodeStubAssembler* assembler) {
+ GenerateStringEqual(assembler, kDontNegateResult);
+}
+
+// static
+void Builtins::Generate_StringNotEqual(CodeStubAssembler* assembler) {
+ GenerateStringEqual(assembler, kNegateResult);
+}
+
+// static
+void Builtins::Generate_StringLessThan(CodeStubAssembler* assembler) {
+ GenerateStringRelationalComparison(assembler, kLessThan);
+}
+
+// static
+void Builtins::Generate_StringLessThanOrEqual(CodeStubAssembler* assembler) {
+ GenerateStringRelationalComparison(assembler, kLessThanOrEqual);
+}
+
+// static
+void Builtins::Generate_StringGreaterThan(CodeStubAssembler* assembler) {
+ GenerateStringRelationalComparison(assembler, kGreaterThan);
+}
+
+// static
+void Builtins::Generate_StringGreaterThanOrEqual(CodeStubAssembler* assembler) {
+ GenerateStringRelationalComparison(assembler, kGreaterThanOrEqual);
+}
+
// -----------------------------------------------------------------------------
// ES6 section 21.1 String Objects
@@ -294,7 +696,6 @@ BUILTIN(StringFromCodePoint) {
void Builtins::Generate_StringPrototypeCharAt(CodeStubAssembler* assembler) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
Node* receiver = assembler->Parameter(0);
Node* position = assembler->Parameter(1);
@@ -306,73 +707,24 @@ void Builtins::Generate_StringPrototypeCharAt(CodeStubAssembler* assembler) {
// Convert the {position} to a Smi and check that it's in bounds of the
// {receiver}.
- // TODO(bmeurer): Find an abstraction for this!
{
- // Check if the {position} is already a Smi.
- Variable var_position(assembler, MachineRepresentation::kTagged);
- var_position.Bind(position);
- Label if_positionissmi(assembler),
- if_positionisnotsmi(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordIsSmi(position), &if_positionissmi,
- &if_positionisnotsmi);
- assembler->Bind(&if_positionisnotsmi);
- {
- // Convert the {position} to an Integer via the ToIntegerStub.
- Callable callable = CodeFactory::ToInteger(assembler->isolate());
- Node* index = assembler->CallStub(callable, context, position);
-
- // Check if the resulting {index} is now a Smi.
- Label if_indexissmi(assembler, Label::kDeferred),
- if_indexisnotsmi(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordIsSmi(index), &if_indexissmi,
- &if_indexisnotsmi);
-
- assembler->Bind(&if_indexissmi);
- {
- var_position.Bind(index);
- assembler->Goto(&if_positionissmi);
- }
-
- assembler->Bind(&if_indexisnotsmi);
- {
- // The ToIntegerStub canonicalizes everything in Smi range to Smi
- // representation, so any HeapNumber returned is not in Smi range.
- // The only exception here is -0.0, which we treat as 0.
- Node* index_value = assembler->LoadHeapNumberValue(index);
- Label if_indexiszero(assembler, Label::kDeferred),
- if_indexisnotzero(assembler, Label::kDeferred);
- assembler->Branch(assembler->Float64Equal(
- index_value, assembler->Float64Constant(0.0)),
- &if_indexiszero, &if_indexisnotzero);
-
- assembler->Bind(&if_indexiszero);
- {
- var_position.Bind(assembler->SmiConstant(Smi::FromInt(0)));
- assembler->Goto(&if_positionissmi);
- }
-
- assembler->Bind(&if_indexisnotzero);
- {
- // The {index} is some other integral Number, that is definitely
- // neither -0.0 nor in Smi range.
- assembler->Return(assembler->EmptyStringConstant());
- }
- }
- }
- assembler->Bind(&if_positionissmi);
- position = var_position.value();
+ Label return_emptystring(assembler, Label::kDeferred);
+ position = assembler->ToInteger(context, position,
+ CodeStubAssembler::kTruncateMinusZero);
+ assembler->GotoUnless(assembler->WordIsSmi(position), &return_emptystring);
// Determine the actual length of the {receiver} String.
Node* receiver_length =
assembler->LoadObjectField(receiver, String::kLengthOffset);
// Return "" if the Smi {position} is outside the bounds of the {receiver}.
- Label if_positioninbounds(assembler),
- if_positionnotinbounds(assembler, Label::kDeferred);
+ Label if_positioninbounds(assembler);
assembler->Branch(assembler->SmiAboveOrEqual(position, receiver_length),
- &if_positionnotinbounds, &if_positioninbounds);
- assembler->Bind(&if_positionnotinbounds);
+ &return_emptystring, &if_positioninbounds);
+
+ assembler->Bind(&return_emptystring);
assembler->Return(assembler->EmptyStringConstant());
+
assembler->Bind(&if_positioninbounds);
}
@@ -389,7 +741,6 @@ void Builtins::Generate_StringPrototypeCharCodeAt(
CodeStubAssembler* assembler) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
Node* receiver = assembler->Parameter(0);
Node* position = assembler->Parameter(1);
@@ -401,73 +752,24 @@ void Builtins::Generate_StringPrototypeCharCodeAt(
// Convert the {position} to a Smi and check that it's in bounds of the
// {receiver}.
- // TODO(bmeurer): Find an abstraction for this!
{
- // Check if the {position} is already a Smi.
- Variable var_position(assembler, MachineRepresentation::kTagged);
- var_position.Bind(position);
- Label if_positionissmi(assembler),
- if_positionisnotsmi(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordIsSmi(position), &if_positionissmi,
- &if_positionisnotsmi);
- assembler->Bind(&if_positionisnotsmi);
- {
- // Convert the {position} to an Integer via the ToIntegerStub.
- Callable callable = CodeFactory::ToInteger(assembler->isolate());
- Node* index = assembler->CallStub(callable, context, position);
-
- // Check if the resulting {index} is now a Smi.
- Label if_indexissmi(assembler, Label::kDeferred),
- if_indexisnotsmi(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordIsSmi(index), &if_indexissmi,
- &if_indexisnotsmi);
-
- assembler->Bind(&if_indexissmi);
- {
- var_position.Bind(index);
- assembler->Goto(&if_positionissmi);
- }
-
- assembler->Bind(&if_indexisnotsmi);
- {
- // The ToIntegerStub canonicalizes everything in Smi range to Smi
- // representation, so any HeapNumber returned is not in Smi range.
- // The only exception here is -0.0, which we treat as 0.
- Node* index_value = assembler->LoadHeapNumberValue(index);
- Label if_indexiszero(assembler, Label::kDeferred),
- if_indexisnotzero(assembler, Label::kDeferred);
- assembler->Branch(assembler->Float64Equal(
- index_value, assembler->Float64Constant(0.0)),
- &if_indexiszero, &if_indexisnotzero);
-
- assembler->Bind(&if_indexiszero);
- {
- var_position.Bind(assembler->SmiConstant(Smi::FromInt(0)));
- assembler->Goto(&if_positionissmi);
- }
-
- assembler->Bind(&if_indexisnotzero);
- {
- // The {index} is some other integral Number, that is definitely
- // neither -0.0 nor in Smi range.
- assembler->Return(assembler->NaNConstant());
- }
- }
- }
- assembler->Bind(&if_positionissmi);
- position = var_position.value();
+ Label return_nan(assembler, Label::kDeferred);
+ position = assembler->ToInteger(context, position,
+ CodeStubAssembler::kTruncateMinusZero);
+ assembler->GotoUnless(assembler->WordIsSmi(position), &return_nan);
// Determine the actual length of the {receiver} String.
Node* receiver_length =
assembler->LoadObjectField(receiver, String::kLengthOffset);
// Return NaN if the Smi {position} is outside the bounds of the {receiver}.
- Label if_positioninbounds(assembler),
- if_positionnotinbounds(assembler, Label::kDeferred);
+ Label if_positioninbounds(assembler);
assembler->Branch(assembler->SmiAboveOrEqual(position, receiver_length),
- &if_positionnotinbounds, &if_positioninbounds);
- assembler->Bind(&if_positionnotinbounds);
+ &return_nan, &if_positioninbounds);
+
+ assembler->Bind(&return_nan);
assembler->Return(assembler->NaNConstant());
+
assembler->Bind(&if_positioninbounds);
}
@@ -477,6 +779,333 @@ void Builtins::Generate_StringPrototypeCharCodeAt(
assembler->Return(result);
}
+// ES6 section 21.1.3.9
+// String.prototype.lastIndexOf ( searchString [ , position ] )
+BUILTIN(StringPrototypeLastIndexOf) {
+ HandleScope handle_scope(isolate);
+ return String::LastIndexOf(isolate, args.receiver(),
+ args.atOrUndefined(isolate, 1),
+ args.atOrUndefined(isolate, 2));
+}
+
+// ES6 section 21.1.3.10 String.prototype.localeCompare ( that )
+//
+// This function is implementation specific. For now, we do not
+// do anything locale specific.
+// If internationalization is enabled, then i18n.js will override this function
+// and provide the proper functionality, so this is just a fallback.
+BUILTIN(StringPrototypeLocaleCompare) {
+ HandleScope handle_scope(isolate);
+ DCHECK_EQ(2, args.length());
+
+ TO_THIS_STRING(str1, "String.prototype.localeCompare");
+ Handle<String> str2;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, str2, Object::ToString(isolate, args.at<Object>(1)));
+
+ if (str1.is_identical_to(str2)) return Smi::FromInt(0); // Equal.
+ int str1_length = str1->length();
+ int str2_length = str2->length();
+
+ // Decide trivial cases without flattening.
+ if (str1_length == 0) {
+ if (str2_length == 0) return Smi::FromInt(0); // Equal.
+ return Smi::FromInt(-str2_length);
+ } else {
+ if (str2_length == 0) return Smi::FromInt(str1_length);
+ }
+
+ int end = str1_length < str2_length ? str1_length : str2_length;
+
+ // No need to flatten if we are going to find the answer on the first
+ // character. At this point we know there is at least one character
+ // in each string, due to the trivial case handling above.
+ int d = str1->Get(0) - str2->Get(0);
+ if (d != 0) return Smi::FromInt(d);
+
+ str1 = String::Flatten(str1);
+ str2 = String::Flatten(str2);
+
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat1 = str1->GetFlatContent();
+ String::FlatContent flat2 = str2->GetFlatContent();
+
+ for (int i = 0; i < end; i++) {
+ if (flat1.Get(i) != flat2.Get(i)) {
+ return Smi::FromInt(flat1.Get(i) - flat2.Get(i));
+ }
+ }
+
+ return Smi::FromInt(str1_length - str2_length);
+}
+
+// ES6 section 21.1.3.12 String.prototype.normalize ( [form] )
+//
+// Simply checks the argument is valid and returns the string itself.
+// If internationalization is enabled, then i18n.js will override this function
+// and provide the proper functionality, so this is just a fallback.
+BUILTIN(StringPrototypeNormalize) {
+ HandleScope handle_scope(isolate);
+ TO_THIS_STRING(string, "String.prototype.normalize");
+
+ Handle<Object> form_input = args.atOrUndefined(isolate, 1);
+ if (form_input->IsUndefined(isolate)) return *string;
+
+ Handle<String> form;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, form,
+ Object::ToString(isolate, form_input));
+
+ if (!(String::Equals(form,
+ isolate->factory()->NewStringFromStaticChars("NFC")) ||
+ String::Equals(form,
+ isolate->factory()->NewStringFromStaticChars("NFD")) ||
+ String::Equals(form,
+ isolate->factory()->NewStringFromStaticChars("NFKC")) ||
+ String::Equals(form,
+ isolate->factory()->NewStringFromStaticChars("NFKD")))) {
+ Handle<String> valid_forms =
+ isolate->factory()->NewStringFromStaticChars("NFC, NFD, NFKC, NFKD");
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewRangeError(MessageTemplate::kNormalizationForm, valid_forms));
+ }
+
+ return *string;
+}
+
+// ES6 section B.2.3.1 String.prototype.substr ( start, length )
+void Builtins::Generate_StringPrototypeSubstr(CodeStubAssembler* a) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Label out(a), handle_length(a);
+
+ Variable var_start(a, MachineRepresentation::kTagged);
+ Variable var_length(a, MachineRepresentation::kTagged);
+
+ Node* const receiver = a->Parameter(0);
+ Node* const start = a->Parameter(1);
+ Node* const length = a->Parameter(2);
+ Node* const context = a->Parameter(5);
+
+ Node* const zero = a->SmiConstant(Smi::FromInt(0));
+
+ // Check that {receiver} is coercible to Object and convert it to a String.
+ Node* const string =
+ a->ToThisString(context, receiver, "String.prototype.substr");
+
+ Node* const string_length = a->LoadStringLength(string);
+
+ // Conversions and bounds-checks for {start}.
+ {
+ Node* const start_int =
+ a->ToInteger(context, start, CodeStubAssembler::kTruncateMinusZero);
+
+ Label if_issmi(a), if_isheapnumber(a, Label::kDeferred);
+ a->Branch(a->WordIsSmi(start_int), &if_issmi, &if_isheapnumber);
+
+ a->Bind(&if_issmi);
+ {
+ Node* const length_plus_start = a->SmiAdd(string_length, start_int);
+ var_start.Bind(a->Select(a->SmiLessThan(start_int, zero),
+ a->SmiMax(length_plus_start, zero), start_int));
+ a->Goto(&handle_length);
+ }
+
+ a->Bind(&if_isheapnumber);
+ {
+ // If {start} is a heap number, it is definitely out of bounds. If it is
+ // negative, {start} = max({string_length} + {start}),0) = 0'. If it is
+ // positive, set {start} to {string_length} which ultimately results in
+ // returning an empty string.
+ Node* const float_zero = a->Float64Constant(0.);
+ Node* const start_float = a->LoadHeapNumberValue(start_int);
+ var_start.Bind(a->Select(a->Float64LessThan(start_float, float_zero),
+ zero, string_length));
+ a->Goto(&handle_length);
+ }
+ }
+
+ // Conversions and bounds-checks for {length}.
+ a->Bind(&handle_length);
+ {
+ Label if_issmi(a), if_isheapnumber(a, Label::kDeferred);
+
+ // Default to {string_length} if {length} is undefined.
+ {
+ Label if_isundefined(a, Label::kDeferred), if_isnotundefined(a);
+ a->Branch(a->WordEqual(length, a->UndefinedConstant()), &if_isundefined,
+ &if_isnotundefined);
+
+ a->Bind(&if_isundefined);
+ var_length.Bind(string_length);
+ a->Goto(&if_issmi);
+
+ a->Bind(&if_isnotundefined);
+ var_length.Bind(
+ a->ToInteger(context, length, CodeStubAssembler::kTruncateMinusZero));
+ }
+
+ a->Branch(a->WordIsSmi(var_length.value()), &if_issmi, &if_isheapnumber);
+
+ // Set {length} to min(max({length}, 0), {string_length} - {start}
+ a->Bind(&if_issmi);
+ {
+ Node* const positive_length = a->SmiMax(var_length.value(), zero);
+
+ Node* const minimal_length = a->SmiSub(string_length, var_start.value());
+ var_length.Bind(a->SmiMin(positive_length, minimal_length));
+
+ a->GotoUnless(a->SmiLessThanOrEqual(var_length.value(), zero), &out);
+ a->Return(a->EmptyStringConstant());
+ }
+
+ a->Bind(&if_isheapnumber);
+ {
+ // If {length} is a heap number, it is definitely out of bounds. There are
+ // two cases according to the spec: if it is negative, "" is returned; if
+ // it is positive, then length is set to {string_length} - {start}.
+
+ a->Assert(a->WordEqual(a->LoadMap(var_length.value()),
+ a->HeapNumberMapConstant()));
+
+ Label if_isnegative(a), if_ispositive(a);
+ Node* const float_zero = a->Float64Constant(0.);
+ Node* const length_float = a->LoadHeapNumberValue(var_length.value());
+ a->Branch(a->Float64LessThan(length_float, float_zero), &if_isnegative,
+ &if_ispositive);
+
+ a->Bind(&if_isnegative);
+ a->Return(a->EmptyStringConstant());
+
+ a->Bind(&if_ispositive);
+ {
+ var_length.Bind(a->SmiSub(string_length, var_start.value()));
+ a->GotoUnless(a->SmiLessThanOrEqual(var_length.value(), zero), &out);
+ a->Return(a->EmptyStringConstant());
+ }
+ }
+ }
+
+ a->Bind(&out);
+ {
+ Node* const end = a->SmiAdd(var_start.value(), var_length.value());
+ Node* const result = a->SubString(context, string, var_start.value(), end);
+ a->Return(result);
+ }
+}
+
+namespace {
+
+compiler::Node* ToSmiBetweenZeroAnd(CodeStubAssembler* a,
+ compiler::Node* context,
+ compiler::Node* value,
+ compiler::Node* limit) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Label out(a);
+ Variable var_result(a, MachineRepresentation::kTagged);
+
+ Node* const value_int =
+ a->ToInteger(context, value, CodeStubAssembler::kTruncateMinusZero);
+
+ Label if_issmi(a), if_isnotsmi(a, Label::kDeferred);
+ a->Branch(a->WordIsSmi(value_int), &if_issmi, &if_isnotsmi);
+
+ a->Bind(&if_issmi);
+ {
+ Label if_isinbounds(a), if_isoutofbounds(a, Label::kDeferred);
+ a->Branch(a->SmiAbove(value_int, limit), &if_isoutofbounds, &if_isinbounds);
+
+ a->Bind(&if_isinbounds);
+ {
+ var_result.Bind(value_int);
+ a->Goto(&out);
+ }
+
+ a->Bind(&if_isoutofbounds);
+ {
+ Node* const zero = a->SmiConstant(Smi::FromInt(0));
+ var_result.Bind(a->Select(a->SmiLessThan(value_int, zero), zero, limit));
+ a->Goto(&out);
+ }
+ }
+
+ a->Bind(&if_isnotsmi);
+ {
+ // {value} is a heap number - in this case, it is definitely out of bounds.
+ a->Assert(a->WordEqual(a->LoadMap(value_int), a->HeapNumberMapConstant()));
+
+ Node* const float_zero = a->Float64Constant(0.);
+ Node* const smi_zero = a->SmiConstant(Smi::FromInt(0));
+ Node* const value_float = a->LoadHeapNumberValue(value_int);
+ var_result.Bind(a->Select(a->Float64LessThan(value_float, float_zero),
+ smi_zero, limit));
+ a->Goto(&out);
+ }
+
+ a->Bind(&out);
+ return var_result.value();
+}
+
+} // namespace
+
+// ES6 section 21.1.3.19 String.prototype.substring ( start, end )
+void Builtins::Generate_StringPrototypeSubstring(CodeStubAssembler* a) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Label out(a);
+
+ Variable var_start(a, MachineRepresentation::kTagged);
+ Variable var_end(a, MachineRepresentation::kTagged);
+
+ Node* const receiver = a->Parameter(0);
+ Node* const start = a->Parameter(1);
+ Node* const end = a->Parameter(2);
+ Node* const context = a->Parameter(5);
+
+ // Check that {receiver} is coercible to Object and convert it to a String.
+ Node* const string =
+ a->ToThisString(context, receiver, "String.prototype.substring");
+
+ Node* const length = a->LoadStringLength(string);
+
+ // Conversion and bounds-checks for {start}.
+ var_start.Bind(ToSmiBetweenZeroAnd(a, context, start, length));
+
+ // Conversion and bounds-checks for {end}.
+ {
+ var_end.Bind(length);
+ a->GotoIf(a->WordEqual(end, a->UndefinedConstant()), &out);
+
+ var_end.Bind(ToSmiBetweenZeroAnd(a, context, end, length));
+
+ Label if_endislessthanstart(a);
+ a->Branch(a->SmiLessThan(var_end.value(), var_start.value()),
+ &if_endislessthanstart, &out);
+
+ a->Bind(&if_endislessthanstart);
+ {
+ Node* const tmp = var_end.value();
+ var_end.Bind(var_start.value());
+ var_start.Bind(tmp);
+ a->Goto(&out);
+ }
+ }
+
+ a->Bind(&out);
+ {
+ Node* result =
+ a->SubString(context, string, var_start.value(), var_end.value());
+ a->Return(result);
+ }
+}
+
// ES6 section 21.1.3.25 String.prototype.toString ()
void Builtins::Generate_StringPrototypeToString(CodeStubAssembler* assembler) {
typedef compiler::Node Node;
@@ -522,5 +1151,203 @@ void Builtins::Generate_StringPrototypeValueOf(CodeStubAssembler* assembler) {
assembler->Return(result);
}
+void Builtins::Generate_StringPrototypeIterator(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler->Parameter(0);
+ Node* context = assembler->Parameter(3);
+
+ Node* string = assembler->ToThisString(context, receiver,
+ "String.prototype[Symbol.iterator]");
+
+ Node* native_context = assembler->LoadNativeContext(context);
+ Node* map = assembler->LoadFixedArrayElement(
+ native_context,
+ assembler->IntPtrConstant(Context::STRING_ITERATOR_MAP_INDEX), 0,
+ CodeStubAssembler::INTPTR_PARAMETERS);
+ Node* iterator = assembler->Allocate(JSStringIterator::kSize);
+ assembler->StoreMapNoWriteBarrier(iterator, map);
+ assembler->StoreObjectFieldRoot(iterator, JSValue::kPropertiesOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ assembler->StoreObjectFieldRoot(iterator, JSObject::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ assembler->StoreObjectFieldNoWriteBarrier(
+ iterator, JSStringIterator::kStringOffset, string);
+ Node* index = assembler->SmiConstant(Smi::FromInt(0));
+ assembler->StoreObjectFieldNoWriteBarrier(
+ iterator, JSStringIterator::kNextIndexOffset, index);
+ assembler->Return(iterator);
+}
+
+namespace {
+
+// Return the |word32| codepoint at {index}. Supports SeqStrings and
+// ExternalStrings.
+compiler::Node* LoadSurrogatePairInternal(CodeStubAssembler* assembler,
+ compiler::Node* string,
+ compiler::Node* length,
+ compiler::Node* index,
+ UnicodeEncoding encoding) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+ Label handle_surrogate_pair(assembler), return_result(assembler);
+ Variable var_result(assembler, MachineRepresentation::kWord32);
+ Variable var_trail(assembler, MachineRepresentation::kWord16);
+ var_result.Bind(assembler->StringCharCodeAt(string, index));
+ var_trail.Bind(assembler->Int32Constant(0));
+
+ assembler->GotoIf(assembler->Word32NotEqual(
+ assembler->Word32And(var_result.value(),
+ assembler->Int32Constant(0xFC00)),
+ assembler->Int32Constant(0xD800)),
+ &return_result);
+ Node* next_index =
+ assembler->SmiAdd(index, assembler->SmiConstant(Smi::FromInt(1)));
+
+ assembler->GotoUnless(assembler->SmiLessThan(next_index, length),
+ &return_result);
+ var_trail.Bind(assembler->StringCharCodeAt(string, next_index));
+ assembler->Branch(assembler->Word32Equal(
+ assembler->Word32And(var_trail.value(),
+ assembler->Int32Constant(0xFC00)),
+ assembler->Int32Constant(0xDC00)),
+ &handle_surrogate_pair, &return_result);
+
+ assembler->Bind(&handle_surrogate_pair);
+ {
+ Node* lead = var_result.value();
+ Node* trail = var_trail.value();
+#ifdef ENABLE_SLOW_DCHECKS
+ // Check that this path is only taken if a surrogate pair is found
+ assembler->Assert(assembler->Uint32GreaterThanOrEqual(
+ lead, assembler->Int32Constant(0xD800)));
+ assembler->Assert(
+ assembler->Uint32LessThan(lead, assembler->Int32Constant(0xDC00)));
+ assembler->Assert(assembler->Uint32GreaterThanOrEqual(
+ trail, assembler->Int32Constant(0xDC00)));
+ assembler->Assert(
+ assembler->Uint32LessThan(trail, assembler->Int32Constant(0xE000)));
+#endif
+
+ switch (encoding) {
+ case UnicodeEncoding::UTF16:
+ var_result.Bind(assembler->WordOr(
+// Need to swap the order for big-endian platforms
+#if V8_TARGET_BIG_ENDIAN
+ assembler->WordShl(lead, assembler->Int32Constant(16)), trail));
+#else
+ assembler->WordShl(trail, assembler->Int32Constant(16)), lead));
+#endif
+ break;
+
+ case UnicodeEncoding::UTF32: {
+ // Convert UTF16 surrogate pair into |word32| code point, encoded as
+ // UTF32.
+ Node* surrogate_offset =
+ assembler->Int32Constant(0x10000 - (0xD800 << 10) - 0xDC00);
+
+ // (lead << 10) + trail + SURROGATE_OFFSET
+ var_result.Bind(assembler->Int32Add(
+ assembler->WordShl(lead, assembler->Int32Constant(10)),
+ assembler->Int32Add(trail, surrogate_offset)));
+ break;
+ }
+ }
+ assembler->Goto(&return_result);
+ }
+
+ assembler->Bind(&return_result);
+ return var_result.value();
+}
+
+compiler::Node* LoadSurrogatePairAt(CodeStubAssembler* assembler,
+ compiler::Node* string,
+ compiler::Node* length,
+ compiler::Node* index) {
+ return LoadSurrogatePairInternal(assembler, string, length, index,
+ UnicodeEncoding::UTF16);
+}
+
+} // namespace
+
+void Builtins::Generate_StringIteratorPrototypeNext(
+ CodeStubAssembler* assembler) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Variable var_value(assembler, MachineRepresentation::kTagged);
+ Variable var_done(assembler, MachineRepresentation::kTagged);
+
+ var_value.Bind(assembler->UndefinedConstant());
+ var_done.Bind(assembler->BooleanConstant(true));
+
+ Label throw_bad_receiver(assembler), next_codepoint(assembler),
+ return_result(assembler);
+
+ Node* iterator = assembler->Parameter(0);
+ Node* context = assembler->Parameter(3);
+
+ assembler->GotoIf(assembler->WordIsSmi(iterator), &throw_bad_receiver);
+ assembler->GotoUnless(
+ assembler->WordEqual(assembler->LoadInstanceType(iterator),
+ assembler->Int32Constant(JS_STRING_ITERATOR_TYPE)),
+ &throw_bad_receiver);
+
+ Node* string =
+ assembler->LoadObjectField(iterator, JSStringIterator::kStringOffset);
+ Node* position =
+ assembler->LoadObjectField(iterator, JSStringIterator::kNextIndexOffset);
+ Node* length = assembler->LoadObjectField(string, String::kLengthOffset);
+
+ assembler->Branch(assembler->SmiLessThan(position, length), &next_codepoint,
+ &return_result);
+
+ assembler->Bind(&next_codepoint);
+ {
+ Node* ch = LoadSurrogatePairAt(assembler, string, length, position);
+ Node* value = assembler->StringFromCodePoint(ch, UnicodeEncoding::UTF16);
+ var_value.Bind(value);
+ Node* length = assembler->LoadObjectField(value, String::kLengthOffset);
+ assembler->StoreObjectFieldNoWriteBarrier(
+ iterator, JSStringIterator::kNextIndexOffset,
+ assembler->SmiAdd(position, length));
+ var_done.Bind(assembler->BooleanConstant(false));
+ assembler->Goto(&return_result);
+ }
+
+ assembler->Bind(&return_result);
+ {
+ Node* native_context = assembler->LoadNativeContext(context);
+ Node* map = assembler->LoadFixedArrayElement(
+ native_context,
+ assembler->IntPtrConstant(Context::ITERATOR_RESULT_MAP_INDEX), 0,
+ CodeStubAssembler::INTPTR_PARAMETERS);
+ Node* result = assembler->Allocate(JSIteratorResult::kSize);
+ assembler->StoreMapNoWriteBarrier(result, map);
+ assembler->StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ assembler->StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ assembler->StoreObjectFieldNoWriteBarrier(
+ result, JSIteratorResult::kValueOffset, var_value.value());
+ assembler->StoreObjectFieldNoWriteBarrier(
+ result, JSIteratorResult::kDoneOffset, var_done.value());
+ assembler->Return(result);
+ }
+
+ assembler->Bind(&throw_bad_receiver);
+ {
+ // The {receiver} is not a valid JSGeneratorObject.
+ Node* result = assembler->CallRuntime(
+ Runtime::kThrowIncompatibleMethodReceiver, context,
+ assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
+ "String Iterator.prototype.next", TENURED)),
+ iterator);
+ assembler->Return(result); // Never reached.
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h
index 90b58c79cd..ca1786c4fa 100644
--- a/deps/v8/src/builtins/builtins-utils.h
+++ b/deps/v8/src/builtins/builtins-utils.h
@@ -76,32 +76,31 @@ class BuiltinArguments : public Arguments {
// through the BuiltinArguments object args.
// TODO(cbruni): add global flag to check whether any tracing events have been
// enabled.
-// TODO(cbruni): Convert the IsContext CHECK back to a DCHECK.
-#define BUILTIN(name) \
- MUST_USE_RESULT static Object* Builtin_Impl_##name(BuiltinArguments args, \
- Isolate* isolate); \
- \
- V8_NOINLINE static Object* Builtin_Impl_Stats_##name( \
- int args_length, Object** args_object, Isolate* isolate) { \
- BuiltinArguments args(args_length, args_object); \
- RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Builtin_##name); \
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED( \
- isolate, &tracing::TraceEventStatsTable::Builtin_##name); \
- return Builtin_Impl_##name(args, isolate); \
- } \
- \
- MUST_USE_RESULT Object* Builtin_##name( \
- int args_length, Object** args_object, Isolate* isolate) { \
- CHECK(isolate->context() == nullptr || isolate->context()->IsContext()); \
- if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() || \
- FLAG_runtime_call_stats)) { \
- return Builtin_Impl_Stats_##name(args_length, args_object, isolate); \
- } \
- BuiltinArguments args(args_length, args_object); \
- return Builtin_Impl_##name(args, isolate); \
- } \
- \
- MUST_USE_RESULT static Object* Builtin_Impl_##name(BuiltinArguments args, \
+#define BUILTIN(name) \
+ MUST_USE_RESULT static Object* Builtin_Impl_##name(BuiltinArguments args, \
+ Isolate* isolate); \
+ \
+ V8_NOINLINE static Object* Builtin_Impl_Stats_##name( \
+ int args_length, Object** args_object, Isolate* isolate) { \
+ BuiltinArguments args(args_length, args_object); \
+ RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Builtin_##name); \
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), \
+ "V8.Builtin_" #name); \
+ return Builtin_Impl_##name(args, isolate); \
+ } \
+ \
+ MUST_USE_RESULT Object* Builtin_##name( \
+ int args_length, Object** args_object, Isolate* isolate) { \
+ DCHECK(isolate->context() == nullptr || isolate->context()->IsContext()); \
+ if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() || \
+ FLAG_runtime_call_stats)) { \
+ return Builtin_Impl_Stats_##name(args_length, args_object, isolate); \
+ } \
+ BuiltinArguments args(args_length, args_object); \
+ return Builtin_Impl_##name(args, isolate); \
+ } \
+ \
+ MUST_USE_RESULT static Object* Builtin_Impl_##name(BuiltinArguments args, \
Isolate* isolate)
// ----------------------------------------------------------------------------
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index f8ce2e699f..3579f3c18a 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -49,27 +49,6 @@ namespace internal {
// Args: name
#define BUILTIN_LIST(CPP, API, TFJ, TFS, ASM, ASH, DBG) \
ASM(Abort) \
- /* Handlers */ \
- ASH(KeyedLoadIC_Megamorphic, KEYED_LOAD_IC, kNoExtraICState) \
- ASM(KeyedLoadIC_Miss) \
- ASH(KeyedLoadIC_Slow, HANDLER, Code::KEYED_LOAD_IC) \
- ASH(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, kNoExtraICState) \
- ASH(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC, \
- StoreICState::kStrictModeState) \
- ASM(KeyedStoreIC_Miss) \
- ASH(KeyedStoreIC_Slow, HANDLER, Code::KEYED_STORE_IC) \
- TFS(LoadGlobalIC_Miss, BUILTIN, kNoExtraICState, LoadGlobalWithVector) \
- TFS(LoadGlobalIC_Slow, HANDLER, Code::LOAD_GLOBAL_IC, LoadGlobalWithVector) \
- ASH(LoadIC_Getter_ForDeopt, LOAD_IC, kNoExtraICState) \
- TFS(LoadIC_Miss, BUILTIN, kNoExtraICState, LoadWithVector) \
- ASH(LoadIC_Normal, HANDLER, Code::LOAD_IC) \
- TFS(LoadIC_Slow, HANDLER, Code::LOAD_IC, LoadWithVector) \
- TFS(StoreIC_Miss, BUILTIN, kNoExtraICState, StoreWithVector) \
- ASH(StoreIC_Normal, HANDLER, Code::STORE_IC) \
- ASH(StoreIC_Setter_ForDeopt, STORE_IC, StoreICState::kStrictModeState) \
- TFS(StoreIC_SlowSloppy, HANDLER, Code::STORE_IC, StoreWithVector) \
- TFS(StoreIC_SlowStrict, HANDLER, Code::STORE_IC, StoreWithVector) \
- \
/* Code aging */ \
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, ASM) \
\
@@ -118,14 +97,24 @@ namespace internal {
ASM(InterruptCheck) \
ASM(StackCheck) \
\
+ /* String helpers */ \
+ TFS(StringEqual, BUILTIN, kNoExtraICState, Compare) \
+ TFS(StringNotEqual, BUILTIN, kNoExtraICState, Compare) \
+ TFS(StringLessThan, BUILTIN, kNoExtraICState, Compare) \
+ TFS(StringLessThanOrEqual, BUILTIN, kNoExtraICState, Compare) \
+ TFS(StringGreaterThan, BUILTIN, kNoExtraICState, Compare) \
+ TFS(StringGreaterThanOrEqual, BUILTIN, kNoExtraICState, Compare) \
+ \
/* Interpreter */ \
ASM(InterpreterEntryTrampoline) \
ASM(InterpreterMarkBaselineOnReturn) \
ASM(InterpreterPushArgsAndCall) \
ASM(InterpreterPushArgsAndCallFunction) \
- ASM(InterpreterPushArgsAndConstruct) \
ASM(InterpreterPushArgsAndTailCall) \
ASM(InterpreterPushArgsAndTailCallFunction) \
+ ASM(InterpreterPushArgsAndConstruct) \
+ ASM(InterpreterPushArgsAndConstructFunction) \
+ ASM(InterpreterPushArgsAndConstructArray) \
ASM(InterpreterEnterBytecodeDispatch) \
ASM(InterpreterOnStackReplacement) \
\
@@ -162,6 +151,7 @@ namespace internal {
TFS(GrowFastDoubleElements, BUILTIN, kNoExtraICState, GrowArrayElements) \
TFS(GrowFastSmiOrObjectElements, BUILTIN, kNoExtraICState, \
GrowArrayElements) \
+ TFS(OrdinaryHasInstance, BUILTIN, kNoExtraICState, Compare) \
\
/* Debugger */ \
DBG(FrameDropper_LiveEdit) \
@@ -179,8 +169,33 @@ namespace internal {
TFS(NonPrimitiveToPrimitive_String, BUILTIN, kNoExtraICState, \
TypeConversion) \
TFS(StringToNumber, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(ToName, BUILTIN, kNoExtraICState, TypeConversion) \
TFS(NonNumberToNumber, BUILTIN, kNoExtraICState, TypeConversion) \
- ASM(ToNumber) \
+ TFS(ToNumber, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(ToString, BUILTIN, kNoExtraICState, TypeConversion) \
+ \
+ /* Handlers */ \
+ ASH(KeyedLoadIC_Megamorphic, KEYED_LOAD_IC, kNoExtraICState) \
+ TFS(KeyedLoadIC_Megamorphic_TF, KEYED_LOAD_IC, kNoExtraICState, \
+ LoadWithVector) \
+ ASM(KeyedLoadIC_Miss) \
+ ASH(KeyedLoadIC_Slow, HANDLER, Code::KEYED_LOAD_IC) \
+ ASH(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, kNoExtraICState) \
+ ASH(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC, \
+ StoreICState::kStrictModeState) \
+ ASM(KeyedStoreIC_Miss) \
+ ASH(KeyedStoreIC_Slow, HANDLER, Code::KEYED_STORE_IC) \
+ TFS(LoadGlobalIC_Miss, BUILTIN, kNoExtraICState, LoadGlobalWithVector) \
+ TFS(LoadGlobalIC_Slow, HANDLER, Code::LOAD_GLOBAL_IC, LoadGlobalWithVector) \
+ ASH(LoadIC_Getter_ForDeopt, LOAD_IC, kNoExtraICState) \
+ TFS(LoadIC_Miss, BUILTIN, kNoExtraICState, LoadWithVector) \
+ ASH(LoadIC_Normal, HANDLER, Code::LOAD_IC) \
+ TFS(LoadIC_Slow, HANDLER, Code::LOAD_IC, LoadWithVector) \
+ TFS(StoreIC_Miss, BUILTIN, kNoExtraICState, StoreWithVector) \
+ ASH(StoreIC_Normal, HANDLER, Code::STORE_IC) \
+ ASH(StoreIC_Setter_ForDeopt, STORE_IC, StoreICState::kStrictModeState) \
+ TFS(StoreIC_SlowSloppy, HANDLER, Code::STORE_IC, StoreWithVector) \
+ TFS(StoreIC_SlowStrict, HANDLER, Code::STORE_IC, StoreWithVector) \
\
/* Built-in functions for Javascript */ \
/* Special internal builtins */ \
@@ -244,46 +259,62 @@ namespace internal {
CPP(DataViewPrototypeGetBuffer) \
CPP(DataViewPrototypeGetByteLength) \
CPP(DataViewPrototypeGetByteOffset) \
+ CPP(DataViewPrototypeGetInt8) \
+ CPP(DataViewPrototypeSetInt8) \
+ CPP(DataViewPrototypeGetUint8) \
+ CPP(DataViewPrototypeSetUint8) \
+ CPP(DataViewPrototypeGetInt16) \
+ CPP(DataViewPrototypeSetInt16) \
+ CPP(DataViewPrototypeGetUint16) \
+ CPP(DataViewPrototypeSetUint16) \
+ CPP(DataViewPrototypeGetInt32) \
+ CPP(DataViewPrototypeSetInt32) \
+ CPP(DataViewPrototypeGetUint32) \
+ CPP(DataViewPrototypeSetUint32) \
+ CPP(DataViewPrototypeGetFloat32) \
+ CPP(DataViewPrototypeSetFloat32) \
+ CPP(DataViewPrototypeGetFloat64) \
+ CPP(DataViewPrototypeSetFloat64) \
\
/* Date */ \
CPP(DateConstructor) \
CPP(DateConstructor_ConstructStub) \
/* ES6 section 20.3.4.2 Date.prototype.getDate ( ) */ \
- ASM(DatePrototypeGetDate) \
+ TFJ(DatePrototypeGetDate, 1) \
/* ES6 section 20.3.4.3 Date.prototype.getDay ( ) */ \
- ASM(DatePrototypeGetDay) \
+ TFJ(DatePrototypeGetDay, 1) \
/* ES6 section 20.3.4.4 Date.prototype.getFullYear ( ) */ \
- ASM(DatePrototypeGetFullYear) \
+ TFJ(DatePrototypeGetFullYear, 1) \
/* ES6 section 20.3.4.5 Date.prototype.getHours ( ) */ \
- ASM(DatePrototypeGetHours) \
+ TFJ(DatePrototypeGetHours, 1) \
/* ES6 section 20.3.4.6 Date.prototype.getMilliseconds ( ) */ \
- ASM(DatePrototypeGetMilliseconds) \
+ TFJ(DatePrototypeGetMilliseconds, 1) \
/* ES6 section 20.3.4.7 Date.prototype.getMinutes ( ) */ \
- ASM(DatePrototypeGetMinutes) \
+ TFJ(DatePrototypeGetMinutes, 1) \
/* ES6 section 20.3.4.8 Date.prototype.getMonth */ \
- ASM(DatePrototypeGetMonth) \
+ TFJ(DatePrototypeGetMonth, 1) \
/* ES6 section 20.3.4.9 Date.prototype.getSeconds ( ) */ \
- ASM(DatePrototypeGetSeconds) \
+ TFJ(DatePrototypeGetSeconds, 1) \
/* ES6 section 20.3.4.10 Date.prototype.getTime ( ) */ \
- ASM(DatePrototypeGetTime) \
+ TFJ(DatePrototypeGetTime, 1) \
/* ES6 section 20.3.4.11 Date.prototype.getTimezoneOffset ( ) */ \
- ASM(DatePrototypeGetTimezoneOffset) \
+ TFJ(DatePrototypeGetTimezoneOffset, 1) \
/* ES6 section 20.3.4.12 Date.prototype.getUTCDate ( ) */ \
- ASM(DatePrototypeGetUTCDate) \
+ TFJ(DatePrototypeGetUTCDate, 1) \
/* ES6 section 20.3.4.13 Date.prototype.getUTCDay ( ) */ \
- ASM(DatePrototypeGetUTCDay) \
+ TFJ(DatePrototypeGetUTCDay, 1) \
/* ES6 section 20.3.4.14 Date.prototype.getUTCFullYear ( ) */ \
- ASM(DatePrototypeGetUTCFullYear) \
+ TFJ(DatePrototypeGetUTCFullYear, 1) \
/* ES6 section 20.3.4.15 Date.prototype.getUTCHours ( ) */ \
- ASM(DatePrototypeGetUTCHours) \
+ TFJ(DatePrototypeGetUTCHours, 1) \
/* ES6 section 20.3.4.16 Date.prototype.getUTCMilliseconds ( ) */ \
- ASM(DatePrototypeGetUTCMilliseconds) \
+ TFJ(DatePrototypeGetUTCMilliseconds, 1) \
/* ES6 section 20.3.4.17 Date.prototype.getUTCMinutes ( ) */ \
- ASM(DatePrototypeGetUTCMinutes) \
+ TFJ(DatePrototypeGetUTCMinutes, 1) \
/* ES6 section 20.3.4.18 Date.prototype.getUTCMonth ( ) */ \
- ASM(DatePrototypeGetUTCMonth) \
+ TFJ(DatePrototypeGetUTCMonth, 1) \
/* ES6 section 20.3.4.19 Date.prototype.getUTCSeconds ( ) */ \
- ASM(DatePrototypeGetUTCSeconds) \
+ TFJ(DatePrototypeGetUTCSeconds, 1) \
CPP(DatePrototypeGetYear) \
CPP(DatePrototypeSetYear) \
CPP(DateNow) \
@@ -342,16 +373,21 @@ namespace internal {
TFJ(GeneratorPrototypeThrow, 2) \
CPP(AsyncFunctionConstructor) \
\
- /* Encode and decode */ \
+ /* Global object */ \
CPP(GlobalDecodeURI) \
CPP(GlobalDecodeURIComponent) \
CPP(GlobalEncodeURI) \
CPP(GlobalEncodeURIComponent) \
CPP(GlobalEscape) \
CPP(GlobalUnescape) \
- \
- /* Eval */ \
CPP(GlobalEval) \
+ /* ES6 section 18.2.2 isFinite ( number ) */ \
+ TFJ(GlobalIsFinite, 2) \
+ /* ES6 section 18.2.3 isNaN ( number ) */ \
+ TFJ(GlobalIsNaN, 2) \
+ \
+ /* ES6 #sec-%iteratorprototype%-@@iterator */ \
+ TFJ(IteratorPrototypeIterator, 1) \
\
/* JSON */ \
CPP(JsonParse) \
@@ -432,6 +468,14 @@ namespace internal {
ASM(NumberConstructor) \
/* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Construct]] case */ \
ASM(NumberConstructor_ConstructStub) \
+ /* ES6 section 20.1.2.2 Number.isFinite ( number ) */ \
+ TFJ(NumberIsFinite, 2) \
+ /* ES6 section 20.1.2.3 Number.isInteger ( number ) */ \
+ TFJ(NumberIsInteger, 2) \
+ /* ES6 section 20.1.2.4 Number.isNaN ( number ) */ \
+ TFJ(NumberIsNaN, 2) \
+ /* ES6 section 20.1.2.5 Number.isSafeInteger ( number ) */ \
+ TFJ(NumberIsSafeInteger, 2) \
CPP(NumberPrototypeToExponential) \
CPP(NumberPrototypeToFixed) \
CPP(NumberPrototypeToLocaleString) \
@@ -489,6 +533,10 @@ namespace internal {
CPP(ReflectSet) \
CPP(ReflectSetPrototypeOf) \
\
+ /* RegExp */ \
+ CPP(RegExpConstructor) \
+ TFJ(RegExpPrototypeExec, 2) \
+ \
/* SharedArrayBuffer */ \
CPP(SharedArrayBufferPrototypeGetByteLength) \
TFJ(AtomicsLoad, 3) \
@@ -504,6 +552,17 @@ namespace internal {
TFJ(StringPrototypeCharAt, 2) \
/* ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos ) */ \
TFJ(StringPrototypeCharCodeAt, 2) \
+ /* ES6 section 21.1.3.9 */ \
+ /* String.prototype.lastIndexOf ( searchString [ , position ] ) */ \
+ CPP(StringPrototypeLastIndexOf) \
+ /* ES6 section 21.1.3.10 String.prototype.localeCompare ( that ) */ \
+ CPP(StringPrototypeLocaleCompare) \
+ /* ES6 section 21.1.3.12 String.prototype.normalize ( [form] ) */ \
+ CPP(StringPrototypeNormalize) \
+ /* ES6 section B.2.3.1 String.prototype.substr ( start, length ) */ \
+ TFJ(StringPrototypeSubstr, 3) \
+ /* ES6 section 21.1.3.19 String.prototype.substring ( start, end ) */ \
+ TFJ(StringPrototypeSubstring, 3) \
/* ES6 section 21.1.3.25 String.prototype.toString () */ \
TFJ(StringPrototypeToString, 1) \
CPP(StringPrototypeTrim) \
@@ -511,6 +570,11 @@ namespace internal {
CPP(StringPrototypeTrimRight) \
/* ES6 section 21.1.3.28 String.prototype.valueOf () */ \
TFJ(StringPrototypeValueOf, 1) \
+ /* ES6 #sec-string.prototype-@@iterator */ \
+ TFJ(StringPrototypeIterator, 1) \
+ \
+ /* StringIterator */ \
+ TFJ(StringIteratorPrototypeNext, 1) \
\
/* Symbol */ \
CPP(SymbolConstructor) \
@@ -590,6 +654,7 @@ class Builtins {
Handle<Code> InterpreterPushArgsAndCall(
TailCallMode tail_call_mode,
CallableType function_type = CallableType::kAny);
+ Handle<Code> InterpreterPushArgsAndConstruct(CallableType function_type);
Code* builtin(Name name) {
// Code::cast cannot be used here since we access builtins
@@ -643,7 +708,10 @@ class Builtins {
MacroAssembler* masm, TailCallMode tail_call_mode,
CallableType function_type);
- static void Generate_DatePrototype_GetField(MacroAssembler* masm,
+ static void Generate_InterpreterPushArgsAndConstructImpl(
+ MacroAssembler* masm, CallableType function_type);
+
+ static void Generate_DatePrototype_GetField(CodeStubAssembler* masm,
int field_index);
enum class MathMaxMinKind { kMax, kMin };
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index f31ba6fcf1..9dd621fca8 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -590,6 +590,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmp(ecx, FieldOperand(eax, SharedFunctionInfo::kCodeOffset));
__ j(not_equal, &switch_to_different_code_kind);
+ // Increment invocation count for the function.
+ __ EmitLoadTypeFeedbackVector(ecx);
+ __ add(FieldOperand(ecx,
+ TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+ TypeFeedbackVector::kHeaderSize),
+ Immediate(Smi::FromInt(1)));
+
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
@@ -703,20 +710,47 @@ void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
__ ret(0);
}
+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+ Register scratch1, Register scratch2,
+ Label* stack_overflow,
+ bool include_receiver = false) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ ExternalReference real_stack_limit =
+ ExternalReference::address_of_real_stack_limit(masm->isolate());
+ __ mov(scratch1, Operand::StaticVariable(real_stack_limit));
+ // Make scratch2 the space we have left. The stack might already be overflowed
+ // here which will cause scratch2 to become negative.
+ __ mov(scratch2, esp);
+ __ sub(scratch2, scratch1);
+ // Make scratch1 the space we need for the array when it is unrolled onto the
+ // stack.
+ __ mov(scratch1, num_args);
+ if (include_receiver) {
+ __ add(scratch1, Immediate(1));
+ }
+ __ shl(scratch1, kPointerSizeLog2);
+ // Check if the arguments will overflow the stack.
+ __ cmp(scratch2, scratch1);
+ __ j(less_equal, stack_overflow); // Signed comparison.
+}
+
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
- Register array_limit) {
+ Register array_limit,
+ Register start_address) {
// ----------- S t a t e -------------
- // -- ebx : Pointer to the last argument in the args array.
+ // -- start_address : Pointer to the last argument in the args array.
// -- array_limit : Pointer to one before the first argument in the
// args array.
// -----------------------------------
Label loop_header, loop_check;
__ jmp(&loop_check);
__ bind(&loop_header);
- __ Push(Operand(ebx, 0));
- __ sub(ebx, Immediate(kPointerSize));
+ __ Push(Operand(start_address, 0));
+ __ sub(start_address, Immediate(kPointerSize));
__ bind(&loop_check);
- __ cmp(ebx, array_limit);
+ __ cmp(start_address, array_limit);
__ j(greater, &loop_header, Label::kNear);
}
@@ -731,18 +765,26 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
// they are to be pushed onto the stack.
// -- edi : the target to call (can be any Object).
// -----------------------------------
+ Label stack_overflow;
+ // Compute the expected number of arguments.
+ __ mov(ecx, eax);
+ __ add(ecx, Immediate(1)); // Add one for receiver.
+
+ // Add a stack check before pushing the arguments. We need an extra register
+ // to perform a stack check. So push it onto the stack temporarily. This
+ // might cause stack overflow, but it will be detected by the check.
+ __ Push(edi);
+ Generate_StackOverflowCheck(masm, ecx, edx, edi, &stack_overflow);
+ __ Pop(edi);
// Pop return address to allow tail-call after pushing arguments.
__ Pop(edx);
// Find the address of the last argument.
- __ mov(ecx, eax);
- __ add(ecx, Immediate(1)); // Add one for receiver.
__ shl(ecx, kPointerSizeLog2);
__ neg(ecx);
__ add(ecx, ebx);
-
- Generate_InterpreterPushArgs(masm, ecx);
+ Generate_InterpreterPushArgs(masm, ecx, ebx);
// Call the target.
__ Push(edx); // Re-push return address.
@@ -757,43 +799,210 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
tail_call_mode),
RelocInfo::CODE_TARGET);
}
+
+ __ bind(&stack_overflow);
+ {
+ // Pop the temporary registers, so that return address is on top of stack.
+ __ Pop(edi);
+
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+
+ // This should be unreachable.
+ __ int3();
+ }
+}
+
+namespace {
+
+// This function modified start_addr, and only reads the contents of num_args
+// register. scratch1 and scratch2 are used as temporary registers. Their
+// original values are restored after the use.
+void Generate_InterpreterPushArgsAndReturnAddress(
+ MacroAssembler* masm, Register num_args, Register start_addr,
+ Register scratch1, Register scratch2, bool receiver_in_args,
+ int num_slots_above_ret_addr, Label* stack_overflow) {
+ // We have to move return address and the temporary registers above it
+ // before we can copy arguments onto the stack. To achieve this:
+ // Step 1: Increment the stack pointer by num_args + 1 (for receiver).
+ // Step 2: Move the return address and values above it to the top of stack.
+ // Step 3: Copy the arguments into the correct locations.
+ // current stack =====> required stack layout
+ // | | | scratch1 | (2) <-- esp(1)
+ // | | | .... | (2)
+ // | | | scratch-n | (2)
+ // | | | return addr | (2)
+ // | | | arg N | (3)
+ // | scratch1 | <-- esp | .... |
+ // | .... | | arg 0 |
+ // | scratch-n | | arg 0 |
+ // | return addr | | receiver slot |
+
+ // Check for stack overflow before we increment the stack pointer.
+ Generate_StackOverflowCheck(masm, num_args, scratch1, scratch2,
+ stack_overflow, true);
+
+// Step 1 - Update the stack pointer. scratch1 already contains the required
+// increment to the stack. i.e. num_args + 1 stack slots. This is computed in
+// the Generate_StackOverflowCheck.
+
+#ifdef _MSC_VER
+ // TODO(mythria): Move it to macro assembler.
+ // In windows, we cannot increment the stack size by more than one page
+ // (mimimum page size is 4KB) without accessing at least one byte on the
+ // page. Check this:
+ // https://msdn.microsoft.com/en-us/library/aa227153(v=vs.60).aspx.
+ const int page_size = 4 * 1024;
+ Label check_offset, update_stack_pointer;
+ __ bind(&check_offset);
+ __ cmp(scratch1, page_size);
+ __ j(less, &update_stack_pointer);
+ __ sub(esp, Immediate(page_size));
+ // Just to touch the page, before we increment further.
+ __ mov(Operand(esp, 0), Immediate(0));
+ __ sub(scratch1, Immediate(page_size));
+ __ jmp(&check_offset);
+ __ bind(&update_stack_pointer);
+#endif
+
+ __ sub(esp, scratch1);
+
+ // Step 2 move return_address and slots above it to the correct locations.
+ // Move from top to bottom, otherwise we may overwrite when num_args = 0 or 1,
+ // basically when the source and destination overlap. We at least need one
+ // extra slot for receiver, so no extra checks are required to avoid copy.
+ for (int i = 0; i < num_slots_above_ret_addr + 1; i++) {
+ __ mov(scratch1,
+ Operand(esp, num_args, times_pointer_size, (i + 1) * kPointerSize));
+ __ mov(Operand(esp, i * kPointerSize), scratch1);
+ }
+
+ // Step 3 copy arguments to correct locations.
+ if (receiver_in_args) {
+ __ mov(scratch1, num_args);
+ __ add(scratch1, Immediate(1));
+ } else {
+ // Slot meant for receiver contains return address. Reset it so that
+ // we will not incorrectly interpret return address as an object.
+ __ mov(Operand(esp, num_args, times_pointer_size,
+ (num_slots_above_ret_addr + 1) * kPointerSize),
+ Immediate(0));
+ __ mov(scratch1, num_args);
+ }
+
+ Label loop_header, loop_check;
+ __ jmp(&loop_check);
+ __ bind(&loop_header);
+ __ mov(scratch2, Operand(start_addr, 0));
+ __ mov(Operand(esp, scratch1, times_pointer_size,
+ num_slots_above_ret_addr * kPointerSize),
+ scratch2);
+ __ sub(start_addr, Immediate(kPointerSize));
+ __ sub(scratch1, Immediate(1));
+ __ bind(&loop_check);
+ __ cmp(scratch1, Immediate(0));
+ __ j(greater, &loop_header, Label::kNear);
}
+} // end anonymous namespace
+
// static
-void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
+ MacroAssembler* masm, CallableType construct_type) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edx : the new target
// -- edi : the constructor
- // -- ebx : the address of the first argument to be pushed. Subsequent
+ // -- ebx : allocation site feedback (if available or undefined)
+ // -- ecx : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
// -----------------------------------
+ Label stack_overflow;
+ // We need two scratch registers. Push edi and edx onto stack.
+ __ Push(edi);
+ __ Push(edx);
- // Pop return address to allow tail-call after pushing arguments.
- __ Pop(ecx);
+ // Push arguments and move return address to the top of stack.
+ // The eax register is readonly. The ecx register will be modified. The edx
+ // and edi registers will be modified but restored to their original values.
+ Generate_InterpreterPushArgsAndReturnAddress(masm, eax, ecx, edx, edi, false,
+ 2, &stack_overflow);
- // Push edi in the slot meant for receiver. We need an extra register
- // so store edi temporarily on stack.
- __ Push(edi);
+ // Restore edi and edx
+ __ Pop(edx);
+ __ Pop(edi);
+
+ __ AssertUndefinedOrAllocationSite(ebx);
+ if (construct_type == CallableType::kJSFunction) {
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ AssertFunction(edi);
+
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ jmp(ecx);
+ } else {
+ DCHECK_EQ(construct_type, CallableType::kAny);
- // Find the address of the last argument.
- __ mov(edi, eax);
- __ neg(edi);
- __ shl(edi, kPointerSizeLog2);
- __ add(edi, ebx);
+ // Call the constructor with unmodified eax, edi, edx values.
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
- Generate_InterpreterPushArgs(masm, edi);
+ __ bind(&stack_overflow);
+ {
+ // Pop the temporary registers, so that return address is on top of stack.
+ __ Pop(edx);
+ __ Pop(edi);
- // Restore the constructor from slot on stack. It was pushed at the slot
- // meant for receiver.
- __ mov(edi, Operand(esp, eax, times_pointer_size, 0));
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
- // Re-push return address.
- __ Push(ecx);
+ // This should be unreachable.
+ __ int3();
+ }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstructArray(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : the target to call checked to be Array function.
+ // -- ebx : the allocation site feedback
+ // -- ecx : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -----------------------------------
+ Label stack_overflow;
+ // We need two scratch registers. Register edi is available, push edx onto
+ // stack.
+ __ Push(edx);
- // Call the constructor with unmodified eax, edi, ebi values.
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ // Push arguments and move return address to the top of stack.
+ // The eax register is readonly. The ecx register will be modified. The edx
+ // and edi registers will be modified but restored to their original values.
+ Generate_InterpreterPushArgsAndReturnAddress(masm, eax, ecx, edx, edi, true,
+ 1, &stack_overflow);
+
+ // Restore edx.
+ __ Pop(edx);
+
+ // Array constructor expects constructor in edi. It is same as edx here.
+ __ Move(edi, edx);
+
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ __ bind(&stack_overflow);
+ {
+ // Pop the temporary registers, so that return address is on top of stack.
+ __ Pop(edx);
+
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+
+ // This should be unreachable.
+ __ int3();
+ }
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1222,61 +1431,6 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
- int field_index) {
- // ----------- S t a t e -------------
- // -- eax : number of arguments
- // -- edi : function
- // -- esi : context
- // -- esp[0] : return address
- // -- esp[4] : receiver
- // -----------------------------------
-
- // 1. Load receiver into eax and check that it's actually a JSDate object.
- Label receiver_not_date;
- {
- __ mov(eax, Operand(esp, kPointerSize));
- __ JumpIfSmi(eax, &receiver_not_date);
- __ CmpObjectType(eax, JS_DATE_TYPE, ebx);
- __ j(not_equal, &receiver_not_date);
- }
-
- // 2. Load the specified date field, falling back to the runtime as necessary.
- if (field_index == JSDate::kDateValue) {
- __ mov(eax, FieldOperand(eax, JSDate::kValueOffset));
- } else {
- if (field_index < JSDate::kFirstUncachedField) {
- Label stamp_mismatch;
- __ mov(edx, Operand::StaticVariable(
- ExternalReference::date_cache_stamp(masm->isolate())));
- __ cmp(edx, FieldOperand(eax, JSDate::kCacheStampOffset));
- __ j(not_equal, &stamp_mismatch, Label::kNear);
- __ mov(eax, FieldOperand(
- eax, JSDate::kValueOffset + field_index * kPointerSize));
- __ ret(1 * kPointerSize);
- __ bind(&stamp_mismatch);
- }
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ PrepareCallCFunction(2, ebx);
- __ mov(Operand(esp, 0), eax);
- __ mov(Operand(esp, 1 * kPointerSize),
- Immediate(Smi::FromInt(field_index)));
- __ CallCFunction(
- ExternalReference::get_date_field_function(masm->isolate()), 2);
- }
- __ ret(1 * kPointerSize);
-
- // 3. Raise a TypeError if the receiver is not a date.
- __ bind(&receiver_not_date);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ Move(ebx, Immediate(0));
- __ EnterBuiltinFrame(esi, edi, ebx);
- __ CallRuntime(Runtime::kThrowNotDateError);
- }
-}
-
-// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
@@ -1887,10 +2041,9 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ bind(&to_string);
{
FrameScope scope(masm, StackFrame::MANUAL);
- ToStringStub stub(masm->isolate());
__ SmiTag(ebx);
__ EnterBuiltinFrame(esi, edi, ebx);
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
__ LeaveBuiltinFrame(esi, edi, ebx);
__ SmiUntag(ebx);
}
@@ -1954,11 +2107,10 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&convert);
{
FrameScope scope(masm, StackFrame::MANUAL);
- ToStringStub stub(masm->isolate());
__ SmiTag(ebx);
__ EnterBuiltinFrame(esi, edi, ebx);
__ Push(edx);
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
__ Pop(edx);
__ LeaveBuiltinFrame(esi, edi, ebx);
__ SmiUntag(ebx);
@@ -2009,32 +2161,6 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
}
}
-static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
- Label* stack_overflow) {
- // ----------- S t a t e -------------
- // -- eax : actual number of arguments
- // -- ebx : expected number of arguments
- // -- edx : new target (passed through to callee)
- // -----------------------------------
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- ExternalReference real_stack_limit =
- ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(edi, Operand::StaticVariable(real_stack_limit));
- // Make ecx the space we have left. The stack might already be overflowed
- // here which will cause ecx to become negative.
- __ mov(ecx, esp);
- __ sub(ecx, edi);
- // Make edi the space we need for the array when it is unrolled onto the
- // stack.
- __ mov(edi, ebx);
- __ shl(edi, kPointerSizeLog2);
- // Check if the arguments will overflow the stack.
- __ cmp(ecx, edi);
- __ j(less_equal, stack_overflow); // Signed comparison.
-}
-
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(ebp);
__ mov(ebp, esp);
@@ -2743,24 +2869,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-// static
-void Builtins::Generate_ToNumber(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in eax.
- Label not_smi;
- __ JumpIfNotSmi(eax, &not_smi, Label::kNear);
- __ Ret();
- __ bind(&not_smi);
-
- Label not_heap_number;
- __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &not_heap_number, Label::kNear);
- __ Ret();
- __ bind(&not_heap_number);
-
- __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
- RelocInfo::CODE_TARGET);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : actual number of arguments
@@ -2781,7 +2889,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Enough parameters: Actual >= expected.
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
- ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+ // edi is used as a scratch register. It should be restored from the frame
+ // when needed.
+ Generate_StackOverflowCheck(masm, ebx, ecx, edi, &stack_overflow);
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
@@ -2802,7 +2912,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected.
__ bind(&too_few);
EnterArgumentsAdaptorFrame(masm);
- ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+ // edi is used as a scratch register. It should be restored from the frame
+ // when needed.
+ Generate_StackOverflowCheck(masm, ebx, ecx, edi, &stack_overflow);
// Remember expected arguments in ecx.
__ mov(ecx, ebx);
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 003eeb22e0..a2b6bea626 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -395,10 +395,9 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ bind(&to_string);
{
FrameScope scope(masm, StackFrame::MANUAL);
- ToStringStub stub(masm->isolate());
__ SmiTag(t0);
__ EnterBuiltinFrame(cp, a1, t0);
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
__ LeaveBuiltinFrame(cp, a1, t0);
__ SmiUntag(t0);
}
@@ -459,11 +458,10 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&convert);
{
FrameScope scope(masm, StackFrame::MANUAL);
- ToStringStub stub(masm->isolate());
__ SmiTag(t0);
__ EnterBuiltinFrame(cp, a1, t0);
__ Push(a3);
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
__ Move(a0, v0);
__ Pop(a3);
__ LeaveBuiltinFrame(cp, a1, t0);
@@ -1051,6 +1049,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Branch(&switch_to_different_code_kind, ne, a0,
Operand(masm->CodeObject())); // Self-reference to this code.
+ // Increment invocation count for the function.
+ __ lw(a0, FieldMemOperand(a1, JSFunction::kLiteralsOffset));
+ __ lw(a0, FieldMemOperand(a0, LiteralsArray::kFeedbackVectorOffset));
+ __ lw(t0, FieldMemOperand(
+ a0, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+ TypeFeedbackVector::kHeaderSize));
+ __ Addu(t0, t0, Operand(Smi::FromInt(1)));
+ __ sw(t0, FieldMemOperand(
+ a0, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+ TypeFeedbackVector::kHeaderSize));
+
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ SmiTst(kInterpreterBytecodeArrayRegister, t0);
@@ -1160,6 +1169,45 @@ void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
__ Jump(ra);
}
+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+ Register scratch1, Register scratch2,
+ Label* stack_overflow) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ __ LoadRoot(scratch1, Heap::kRealStackLimitRootIndex);
+ // Make scratch1 the space we have left. The stack might already be overflowed
+ // here which will cause scratch1 to become negative.
+ __ subu(scratch1, sp, scratch1);
+ // Check if the arguments will overflow the stack.
+ __ sll(scratch2, num_args, kPointerSizeLog2);
+ // Signed comparison.
+ __ Branch(stack_overflow, le, scratch1, Operand(scratch2));
+}
+
+static void Generate_InterpreterPushArgs(MacroAssembler* masm,
+ Register num_args, Register index,
+ Register scratch, Register scratch2,
+ Label* stack_overflow) {
+ Generate_StackOverflowCheck(masm, num_args, scratch, scratch2,
+ stack_overflow);
+
+ // Find the address of the last argument.
+ __ mov(scratch2, num_args);
+ __ sll(scratch2, scratch2, kPointerSizeLog2);
+ __ Subu(scratch2, index, Operand(scratch2));
+
+ // Push the arguments.
+ Label loop_header, loop_check;
+ __ Branch(&loop_check);
+ __ bind(&loop_header);
+ __ lw(scratch, MemOperand(index));
+ __ Addu(index, index, Operand(-kPointerSize));
+ __ push(scratch);
+ __ bind(&loop_check);
+ __ Branch(&loop_header, gt, index, Operand(scratch2));
+}
+
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
MacroAssembler* masm, TailCallMode tail_call_mode,
@@ -1171,21 +1219,12 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
// they are to be pushed onto the stack.
// -- a1 : the target to call (can be any Object).
// -----------------------------------
+ Label stack_overflow;
- // Find the address of the last argument.
- __ Addu(a3, a0, Operand(1)); // Add one for receiver.
- __ sll(a3, a3, kPointerSizeLog2);
- __ Subu(a3, a2, Operand(a3));
+ __ Addu(t0, a0, Operand(1)); // Add one for receiver.
- // Push the arguments.
- Label loop_header, loop_check;
- __ Branch(&loop_check);
- __ bind(&loop_header);
- __ lw(t0, MemOperand(a2));
- __ Addu(a2, a2, Operand(-kPointerSize));
- __ push(t0);
- __ bind(&loop_check);
- __ Branch(&loop_header, gt, a2, Operand(a3));
+ // This function modifies a2, t4 and t1.
+ Generate_InterpreterPushArgs(masm, t0, a2, t4, t1, &stack_overflow);
// Call the target.
if (function_type == CallableType::kJSFunction) {
@@ -1198,36 +1237,87 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
tail_call_mode),
RelocInfo::CODE_TARGET);
}
+
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ break_(0xCC);
+ }
}
// static
-void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
+ MacroAssembler* masm, CallableType construct_type) {
// ----------- S t a t e -------------
// -- a0 : argument count (not including receiver)
// -- a3 : new target
// -- a1 : constructor to call
- // -- a2 : address of the first argument
+ // -- a2 : allocation site feedback if available, undefined otherwise.
+ // -- t4 : address of the first argument
// -----------------------------------
-
- // Find the address of the last argument.
- __ sll(t0, a0, kPointerSizeLog2);
- __ Subu(t0, a2, Operand(t0));
+ Label stack_overflow;
// Push a slot for the receiver.
__ push(zero_reg);
- // Push the arguments.
- Label loop_header, loop_check;
- __ Branch(&loop_check);
- __ bind(&loop_header);
- __ lw(t1, MemOperand(a2));
- __ Addu(a2, a2, Operand(-kPointerSize));
- __ push(t1);
- __ bind(&loop_check);
- __ Branch(&loop_header, gt, a2, Operand(t0));
+ // This function modified t4, t1 and t0.
+ Generate_InterpreterPushArgs(masm, a0, t4, t1, t0, &stack_overflow);
+
+ __ AssertUndefinedOrAllocationSite(a2, t0);
+ if (construct_type == CallableType::kJSFunction) {
+ __ AssertFunction(a1);
+
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
+ __ Addu(at, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+ } else {
+ DCHECK_EQ(construct_type, CallableType::kAny);
+ // Call the constructor with a0, a1, and a3 unmodified.
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ break_(0xCC);
+ }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstructArray(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the target to call checked to be Array function.
+ // -- a2 : allocation site feedback.
+ // -- a3 : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -----------------------------------
+ Label stack_overflow;
+
+ __ Addu(t0, a0, Operand(1)); // Add one for receiver.
+
+ // This function modifies a3, t4, and t1.
+ Generate_InterpreterPushArgs(masm, t0, a3, t1, t4, &stack_overflow);
- // Call the constructor with a0, a1, and a3 unmodified.
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ // ArrayConstructor stub expects constructor in a3. Set it here.
+ __ mov(a3, a1);
+
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ break_(0xCC);
+ }
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1805,61 +1895,6 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
- int field_index) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- a1 : function
- // -- cp : context
- // -- sp[0] : receiver
- // -----------------------------------
-
- // 1. Pop receiver into a0 and check that it's actually a JSDate object.
- Label receiver_not_date;
- {
- __ Pop(a0);
- __ JumpIfSmi(a0, &receiver_not_date);
- __ GetObjectType(a0, t0, t0);
- __ Branch(&receiver_not_date, ne, t0, Operand(JS_DATE_TYPE));
- }
-
- // 2. Load the specified date field, falling back to the runtime as necessary.
- if (field_index == JSDate::kDateValue) {
- __ Ret(USE_DELAY_SLOT);
- __ lw(v0, FieldMemOperand(a0, JSDate::kValueOffset)); // In delay slot.
- } else {
- if (field_index < JSDate::kFirstUncachedField) {
- Label stamp_mismatch;
- __ li(a1, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
- __ lw(a1, MemOperand(a1));
- __ lw(t0, FieldMemOperand(a0, JSDate::kCacheStampOffset));
- __ Branch(&stamp_mismatch, ne, t0, Operand(a1));
- __ Ret(USE_DELAY_SLOT);
- __ lw(v0, FieldMemOperand(
- a0, JSDate::kValueOffset +
- field_index * kPointerSize)); // In delay slot.
- __ bind(&stamp_mismatch);
- }
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ PrepareCallCFunction(2, t0);
- __ li(a1, Operand(Smi::FromInt(field_index)));
- __ CallCFunction(
- ExternalReference::get_date_field_function(masm->isolate()), 2);
- }
- __ Ret();
-
- // 3. Raise a TypeError if the receiver is not a date.
- __ bind(&receiver_not_date);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ Push(a0);
- __ Move(a0, Smi::FromInt(0));
- __ EnterBuiltinFrame(cp, a1, a0);
- __ CallRuntime(Runtime::kThrowNotDateError);
- }
-}
-
-// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
@@ -2115,27 +2150,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
}
-static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
- Label* stack_overflow) {
- // ----------- S t a t e -------------
- // -- a0 : actual number of arguments
- // -- a1 : function (passed through to callee)
- // -- a2 : expected number of arguments
- // -- a3 : new target (passed through to callee)
- // -----------------------------------
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- __ LoadRoot(t1, Heap::kRealStackLimitRootIndex);
- // Make t1 the space we have left. The stack might already be overflowed
- // here which will cause t1 to become negative.
- __ subu(t1, sp, t1);
- // Check if the arguments will overflow the stack.
- __ sll(at, a2, kPointerSizeLog2);
- // Signed comparison.
- __ Branch(stack_overflow, le, t1, Operand(at));
-}
-
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ sll(a0, a0, kSmiTagSize);
__ li(t0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -2854,28 +2868,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-// static
-void Builtins::Generate_ToNumber(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in a0.
- Label not_smi;
- __ JumpIfNotSmi(a0, &not_smi);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
- __ bind(&not_smi);
-
- Label not_heap_number;
- __ GetObjectType(a0, a1, a1);
- // a0: receiver
- // a1: receiver instance type
- __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
- __ bind(&not_heap_number);
-
- __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
- RelocInfo::CODE_TARGET);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// State setup as expected by MacroAssembler::InvokePrologue.
// ----------- S t a t e -------------
@@ -2900,7 +2892,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a3: new target (passed through to callee)
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
- ArgumentAdaptorStackCheck(masm, &stack_overflow);
+ Generate_StackOverflowCheck(masm, a2, t1, at, &stack_overflow);
// Calculate copy start address into a0 and copy end address into t1.
__ Lsa(a0, fp, a0, kPointerSizeLog2 - kSmiTagSize);
@@ -2930,7 +2922,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected.
__ bind(&too_few);
EnterArgumentsAdaptorFrame(masm);
- ArgumentAdaptorStackCheck(masm, &stack_overflow);
+ Generate_StackOverflowCheck(masm, a2, t1, at, &stack_overflow);
// Calculate copy start address into a0 and copy end address into t3.
// a0: actual number of arguments as a smi
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index cbdb5c3250..f7225f01cd 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -394,10 +394,9 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ bind(&to_string);
{
FrameScope scope(masm, StackFrame::MANUAL);
- ToStringStub stub(masm->isolate());
__ SmiTag(t0);
__ EnterBuiltinFrame(cp, a1, t0);
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
__ LeaveBuiltinFrame(cp, a1, t0);
__ SmiUntag(t0);
}
@@ -458,11 +457,10 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&convert);
{
FrameScope scope(masm, StackFrame::MANUAL);
- ToStringStub stub(masm->isolate());
__ SmiTag(t0);
__ EnterBuiltinFrame(cp, a1, t0);
__ Push(a3);
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
__ Move(a0, v0);
__ Pop(a3);
__ LeaveBuiltinFrame(cp, a1, t0);
@@ -1043,6 +1041,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Branch(&switch_to_different_code_kind, ne, a0,
Operand(masm->CodeObject())); // Self-reference to this code.
+ // Increment invocation count for the function.
+ __ ld(a0, FieldMemOperand(a1, JSFunction::kLiteralsOffset));
+ __ ld(a0, FieldMemOperand(a0, LiteralsArray::kFeedbackVectorOffset));
+ __ ld(a4, FieldMemOperand(
+ a0, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+ TypeFeedbackVector::kHeaderSize));
+ __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
+ __ sd(a4, FieldMemOperand(
+ a0, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+ TypeFeedbackVector::kHeaderSize));
+
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ SmiTst(kInterpreterBytecodeArrayRegister, a4);
@@ -1152,6 +1161,45 @@ void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
__ Jump(ra);
}
+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+ Register scratch1, Register scratch2,
+ Label* stack_overflow) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ __ LoadRoot(scratch1, Heap::kRealStackLimitRootIndex);
+ // Make scratch1 the space we have left. The stack might already be overflowed
+ // here which will cause scratch1 to become negative.
+ __ dsubu(scratch1, sp, scratch1);
+ // Check if the arguments will overflow the stack.
+ __ dsll(scratch2, num_args, kPointerSizeLog2);
+ // Signed comparison.
+ __ Branch(stack_overflow, le, scratch1, Operand(scratch2));
+}
+
+static void Generate_InterpreterPushArgs(MacroAssembler* masm,
+ Register num_args, Register index,
+ Register scratch, Register scratch2,
+ Label* stack_overflow) {
+ // Generate_StackOverflowCheck(masm, num_args, scratch, scratch2,
+ // stack_overflow);
+
+ // Find the address of the last argument.
+ __ mov(scratch2, num_args);
+ __ dsll(scratch2, scratch2, kPointerSizeLog2);
+ __ Dsubu(scratch2, index, Operand(scratch2));
+
+ // Push the arguments.
+ Label loop_header, loop_check;
+ __ Branch(&loop_check);
+ __ bind(&loop_header);
+ __ ld(scratch, MemOperand(index));
+ __ Daddu(index, index, Operand(-kPointerSize));
+ __ push(scratch);
+ __ bind(&loop_check);
+ __ Branch(&loop_header, gt, index, Operand(scratch2));
+}
+
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
MacroAssembler* masm, TailCallMode tail_call_mode,
@@ -1163,21 +1211,12 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
// they are to be pushed onto the stack.
// -- a1 : the target to call (can be any Object).
// -----------------------------------
+ Label stack_overflow;
- // Find the address of the last argument.
__ Daddu(a3, a0, Operand(1)); // Add one for receiver.
- __ dsll(a3, a3, kPointerSizeLog2);
- __ Dsubu(a3, a2, Operand(a3));
- // Push the arguments.
- Label loop_header, loop_check;
- __ Branch(&loop_check);
- __ bind(&loop_header);
- __ ld(t0, MemOperand(a2));
- __ Daddu(a2, a2, Operand(-kPointerSize));
- __ push(t0);
- __ bind(&loop_check);
- __ Branch(&loop_header, gt, a2, Operand(a3));
+ // This function modifies a2, t0 and a4.
+ Generate_InterpreterPushArgs(masm, a3, a2, a4, t0, &stack_overflow);
// Call the target.
if (function_type == CallableType::kJSFunction) {
@@ -1190,36 +1229,87 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
tail_call_mode),
RelocInfo::CODE_TARGET);
}
+
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ break_(0xCC);
+ }
}
// static
-void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
+ MacroAssembler* masm, CallableType construct_type) {
// ----------- S t a t e -------------
// -- a0 : argument count (not including receiver)
// -- a3 : new target
// -- a1 : constructor to call
- // -- a2 : address of the first argument
+ // -- a2 : allocation site feedback if available, undefined otherwise.
+ // -- a4 : address of the first argument
// -----------------------------------
-
- // Find the address of the last argument.
- __ dsll(t0, a0, kPointerSizeLog2);
- __ Dsubu(t0, a2, Operand(t0));
+ Label stack_overflow;
// Push a slot for the receiver.
__ push(zero_reg);
- // Push the arguments.
- Label loop_header, loop_check;
- __ Branch(&loop_check);
- __ bind(&loop_header);
- __ ld(t1, MemOperand(a2));
- __ Daddu(a2, a2, Operand(-kPointerSize));
- __ push(t1);
- __ bind(&loop_check);
- __ Branch(&loop_header, gt, a2, Operand(t0));
+ // This function modifies t0, a4 and a5.
+ Generate_InterpreterPushArgs(masm, a0, a4, a5, t0, &stack_overflow);
+
+ __ AssertUndefinedOrAllocationSite(a2, t0);
+ if (construct_type == CallableType::kJSFunction) {
+ __ AssertFunction(a1);
+
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
+ __ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+ } else {
+ DCHECK_EQ(construct_type, CallableType::kAny);
+ // Call the constructor with a0, a1, and a3 unmodified.
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ break_(0xCC);
+ }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstructArray(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the target to call checked to be Array function.
+ // -- a2 : allocation site feedback.
+ // -- a3 : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -----------------------------------
+ Label stack_overflow;
+
+ __ Daddu(a4, a0, Operand(1)); // Add one for receiver.
+
+ // This function modifies a3, a5 and a6.
+ Generate_InterpreterPushArgs(masm, a4, a3, a5, a6, &stack_overflow);
- // Call the constructor with a0, a1, and a3 unmodified.
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ // ArrayConstructor stub expects constructor in a3. Set it here.
+ __ mov(a3, a1);
+
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ break_(0xCC);
+ }
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1799,61 +1889,6 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
- int field_index) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- a1 : function
- // -- cp : context
- // -- sp[0] : receiver
- // -----------------------------------
-
- // 1. Pop receiver into a0 and check that it's actually a JSDate object.
- Label receiver_not_date;
- {
- __ Pop(a0);
- __ JumpIfSmi(a0, &receiver_not_date);
- __ GetObjectType(a0, t0, t0);
- __ Branch(&receiver_not_date, ne, t0, Operand(JS_DATE_TYPE));
- }
-
- // 2. Load the specified date field, falling back to the runtime as necessary.
- if (field_index == JSDate::kDateValue) {
- __ Ret(USE_DELAY_SLOT);
- __ ld(v0, FieldMemOperand(a0, JSDate::kValueOffset)); // In delay slot.
- } else {
- if (field_index < JSDate::kFirstUncachedField) {
- Label stamp_mismatch;
- __ li(a1, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
- __ ld(a1, MemOperand(a1));
- __ ld(t0, FieldMemOperand(a0, JSDate::kCacheStampOffset));
- __ Branch(&stamp_mismatch, ne, t0, Operand(a1));
- __ Ret(USE_DELAY_SLOT);
- __ ld(v0, FieldMemOperand(
- a0, JSDate::kValueOffset +
- field_index * kPointerSize)); // In delay slot.
- __ bind(&stamp_mismatch);
- }
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ PrepareCallCFunction(2, t0);
- __ li(a1, Operand(Smi::FromInt(field_index)));
- __ CallCFunction(
- ExternalReference::get_date_field_function(masm->isolate()), 2);
- }
- __ Ret();
-
- // 3. Raise a TypeError if the receiver is not a date.
- __ bind(&receiver_not_date);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ Push(a0);
- __ Move(a0, Smi::FromInt(0));
- __ EnterBuiltinFrame(cp, a1, a0);
- __ CallRuntime(Runtime::kThrowNotDateError);
- }
-}
-
-// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
@@ -2109,27 +2144,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
}
-static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
- Label* stack_overflow) {
- // ----------- S t a t e -------------
- // -- a0 : actual number of arguments
- // -- a1 : function (passed through to callee)
- // -- a2 : expected number of arguments
- // -- a3 : new target (passed through to callee)
- // -----------------------------------
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- __ LoadRoot(a5, Heap::kRealStackLimitRootIndex);
- // Make a5 the space we have left. The stack might already be overflowed
- // here which will cause a5 to become negative.
- __ dsubu(a5, sp, a5);
- // Check if the arguments will overflow the stack.
- __ dsll(at, a2, kPointerSizeLog2);
- // Signed comparison.
- __ Branch(stack_overflow, le, a5, Operand(at));
-}
-
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
// __ sll(a0, a0, kSmiTagSize);
__ dsll32(a0, a0, 0);
@@ -2847,28 +2861,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-// static
-void Builtins::Generate_ToNumber(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in a0.
- Label not_smi;
- __ JumpIfNotSmi(a0, &not_smi);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
- __ bind(&not_smi);
-
- Label not_heap_number;
- __ GetObjectType(a0, a1, a1);
- // a0: receiver
- // a1: receiver instance type
- __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
- __ bind(&not_heap_number);
-
- __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
- RelocInfo::CODE_TARGET);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// State setup as expected by MacroAssembler::InvokePrologue.
// ----------- S t a t e -------------
@@ -2893,7 +2885,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a3: new target (passed through to callee)
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
- ArgumentAdaptorStackCheck(masm, &stack_overflow);
+ Generate_StackOverflowCheck(masm, a2, a5, at, &stack_overflow);
// Calculate copy start address into a0 and copy end address into a4.
__ SmiScale(a0, a0, kPointerSizeLog2);
@@ -2924,7 +2916,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected.
__ bind(&too_few);
EnterArgumentsAdaptorFrame(masm);
- ArgumentAdaptorStackCheck(masm, &stack_overflow);
+ Generate_StackOverflowCheck(masm, a2, a5, at, &stack_overflow);
// Calculate copy start address into a0 and copy end address into a7.
// a0: actual number of arguments as a smi
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index dfea83f2b4..7e2b82c9a3 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -398,10 +398,9 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ bind(&to_string);
{
FrameScope scope(masm, StackFrame::MANUAL);
- ToStringStub stub(masm->isolate());
__ SmiTag(r5);
__ EnterBuiltinFrame(cp, r4, r5);
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
__ LeaveBuiltinFrame(cp, r4, r5);
__ SmiUntag(r5);
}
@@ -462,12 +461,11 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&convert);
{
FrameScope scope(masm, StackFrame::MANUAL);
- ToStringStub stub(masm->isolate());
__ SmiTag(r9);
__ EnterBuiltinFrame(cp, r4, r9);
__ Push(r6);
__ mr(r3, r5);
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
__ mr(r5, r3);
__ Pop(r6);
__ LeaveBuiltinFrame(cp, r4, r9);
@@ -1084,6 +1082,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmp(r3, ip);
__ bne(&switch_to_different_code_kind);
+ // Increment invocation count for the function.
+ __ LoadP(r7, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
+ __ LoadP(r7, FieldMemOperand(r7, LiteralsArray::kFeedbackVectorOffset));
+ __ LoadP(r8, FieldMemOperand(r7, TypeFeedbackVector::kInvocationCountIndex *
+ kPointerSize +
+ TypeFeedbackVector::kHeaderSize));
+ __ AddSmiLiteral(r8, r8, Smi::FromInt(1), r0);
+ __ StoreP(r8, FieldMemOperand(r7, TypeFeedbackVector::kInvocationCountIndex *
+ kPointerSize +
+ TypeFeedbackVector::kHeaderSize),
+ r0);
+
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
@@ -1187,8 +1197,29 @@ void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
__ blr();
}
-static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
- Register count, Register scratch) {
+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+ Register scratch,
+ Label* stack_overflow) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+ // Make scratch the space we have left. The stack might already be overflowed
+ // here which will cause scratch to become negative.
+ __ sub(scratch, sp, scratch);
+ // Check if the arguments will overflow the stack.
+ __ ShiftLeftImm(r0, num_args, Operand(kPointerSizeLog2));
+ __ cmp(scratch, r0);
+ __ ble(stack_overflow); // Signed comparison.
+}
+
+static void Generate_InterpreterPushArgs(MacroAssembler* masm,
+ Register num_args, Register index,
+ Register count, Register scratch,
+ Label* stack_overflow) {
+ // A stack check before pushing arguments.
+ Generate_StackOverflowCheck(masm, num_args, scratch, stack_overflow);
+
Label loop;
__ addi(index, index, Operand(kPointerSize)); // Bias up for LoadPU
__ mtctr(count);
@@ -1209,12 +1240,13 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
// they are to be pushed onto the stack.
// -- r4 : the target to call (can be any Object).
// -----------------------------------
+ Label stack_overflow;
// Calculate number of arguments (add one for receiver).
__ addi(r6, r3, Operand(1));
- // Push the arguments.
- Generate_InterpreterPushArgs(masm, r5, r6, r7);
+ // Push the arguments. r5, r6, r7 will be modified.
+ Generate_InterpreterPushArgs(masm, r6, r5, r6, r7, &stack_overflow);
// Call the target.
if (function_type == CallableType::kJSFunction) {
@@ -1227,16 +1259,26 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
tail_call_mode),
RelocInfo::CODE_TARGET);
}
+
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable Code.
+ __ bkpt(0);
+ }
}
// static
-void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
+ MacroAssembler* masm, CallableType construct_type) {
// ----------- S t a t e -------------
// -- r3 : argument count (not including receiver)
// -- r6 : new target
// -- r4 : constructor to call
- // -- r5 : address of the first argument
+ // -- r5 : allocation site feedback if available, undefined otherwise.
+ // -- r7 : address of the first argument
// -----------------------------------
+ Label stack_overflow;
// Push a slot for the receiver to be constructed.
__ li(r0, Operand::Zero());
@@ -1246,11 +1288,64 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
Label skip;
__ cmpi(r3, Operand::Zero());
__ beq(&skip);
- Generate_InterpreterPushArgs(masm, r5, r3, r7);
+ // Push the arguments. r8, r7, r9 will be modified.
+ Generate_InterpreterPushArgs(masm, r3, r7, r3, r8, &stack_overflow);
__ bind(&skip);
- // Call the constructor with r3, r4, and r6 unmodified.
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ __ AssertUndefinedOrAllocationSite(r5, r8);
+ if (construct_type == CallableType::kJSFunction) {
+ __ AssertFunction(r4);
+
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kConstructStubOffset));
+ // Jump to the construct function.
+ __ addi(ip, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
+
+ } else {
+ DCHECK_EQ(construct_type, CallableType::kAny);
+ // Call the constructor with r3, r4, and r6 unmodified.
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable Code.
+ __ bkpt(0);
+ }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstructArray(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argument count (not including receiver)
+ // -- r4 : target to call verified to be Array function
+ // -- r5 : allocation site feedback if available, undefined otherwise.
+ // -- r6 : address of the first argument
+ // -----------------------------------
+ Label stack_overflow;
+
+ __ addi(r7, r3, Operand(1)); // Add one for receiver.
+
+ // Push the arguments. r6, r8, r3 will be modified.
+ Generate_InterpreterPushArgs(masm, r7, r6, r7, r8, &stack_overflow);
+
+ // Array constructor expects constructor in r6. It is same as r4 here.
+ __ mr(r6, r4);
+
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ bkpt(0);
+ }
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1842,61 +1937,6 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
- int field_index) {
- // ----------- S t a t e -------------
- // -- r3 : number of arguments
- // -- r4 : function
- // -- cp : context
- // -- lr : return address
- // -- sp[0] : receiver
- // -----------------------------------
-
- // 1. Pop receiver into r3 and check that it's actually a JSDate object.
- Label receiver_not_date;
- {
- __ Pop(r3);
- __ JumpIfSmi(r3, &receiver_not_date);
- __ CompareObjectType(r3, r5, r6, JS_DATE_TYPE);
- __ bne(&receiver_not_date);
- }
-
- // 2. Load the specified date field, falling back to the runtime as necessary.
- if (field_index == JSDate::kDateValue) {
- __ LoadP(r3, FieldMemOperand(r3, JSDate::kValueOffset));
- } else {
- if (field_index < JSDate::kFirstUncachedField) {
- Label stamp_mismatch;
- __ mov(r4, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
- __ LoadP(r4, MemOperand(r4));
- __ LoadP(ip, FieldMemOperand(r3, JSDate::kCacheStampOffset));
- __ cmp(r4, ip);
- __ bne(&stamp_mismatch);
- __ LoadP(r3, FieldMemOperand(
- r3, JSDate::kValueOffset + field_index * kPointerSize));
- __ Ret();
- __ bind(&stamp_mismatch);
- }
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ PrepareCallCFunction(2, r4);
- __ LoadSmiLiteral(r4, Smi::FromInt(field_index));
- __ CallCFunction(
- ExternalReference::get_date_field_function(masm->isolate()), 2);
- }
- __ Ret();
-
- // 3. Raise a TypeError if the receiver is not a date.
- __ bind(&receiver_not_date);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ push(r3);
- __ LoadSmiLiteral(r3, Smi::FromInt(0));
- __ EnterBuiltinFrame(cp, r4, r3);
- __ CallRuntime(Runtime::kThrowNotDateError);
- }
-}
-
-// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argc
@@ -2151,27 +2191,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
}
-static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
- Label* stack_overflow) {
- // ----------- S t a t e -------------
- // -- r3 : actual number of arguments
- // -- r4 : function (passed through to callee)
- // -- r5 : expected number of arguments
- // -- r6 : new target (passed through to callee)
- // -----------------------------------
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- __ LoadRoot(r8, Heap::kRealStackLimitRootIndex);
- // Make r8 the space we have left. The stack might already be overflowed
- // here which will cause r8 to become negative.
- __ sub(r8, sp, r8);
- // Check if the arguments will overflow the stack.
- __ ShiftLeftImm(r0, r5, Operand(kPointerSizeLog2));
- __ cmp(r8, r0);
- __ ble(stack_overflow); // Signed comparison.
-}
-
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(r3);
__ LoadSmiLiteral(r7, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
@@ -2433,7 +2452,9 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r6, SharedFunctionInfo::kClassConstructorBits, r0);
+ __ TestBitMask(r6, FunctionKind::kClassConstructor
+ << SharedFunctionInfo::kFunctionKindShift,
+ r0);
__ bne(&class_constructor, cr0);
// Enter the context of the function; ToObject has to run in the function
@@ -2861,22 +2882,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-// static
-void Builtins::Generate_ToNumber(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in r3.
- STATIC_ASSERT(kSmiTag == 0);
- __ TestIfSmi(r3, r0);
- __ Ret(eq, cr0);
-
- __ CompareObjectType(r3, r4, r4, HEAP_NUMBER_TYPE);
- // r3: receiver
- // r4: receiver instance type
- __ Ret(eq);
-
- __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
- RelocInfo::CODE_TARGET);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : actual number of arguments
@@ -2897,7 +2902,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Enough parameters: actual >= expected
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
- ArgumentAdaptorStackCheck(masm, &stack_overflow);
+ Generate_StackOverflowCheck(masm, r5, r8, &stack_overflow);
// Calculate copy start address into r3 and copy end address into r7.
// r3: actual number of arguments as a smi
@@ -2935,7 +2940,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&too_few);
EnterArgumentsAdaptorFrame(masm);
- ArgumentAdaptorStackCheck(masm, &stack_overflow);
+ Generate_StackOverflowCheck(masm, r5, r8, &stack_overflow);
// Calculate copy start address into r0 and copy end address is fp.
// r3: actual number of arguments as a smi
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index c68fcc3e97..91ae2c006b 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -396,10 +396,9 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ bind(&to_string);
{
FrameScope scope(masm, StackFrame::MANUAL);
- ToStringStub stub(masm->isolate());
__ SmiTag(r4);
__ EnterBuiltinFrame(cp, r3, r4);
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
__ LeaveBuiltinFrame(cp, r3, r4);
__ SmiUntag(r4);
}
@@ -459,12 +458,11 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&convert);
{
FrameScope scope(masm, StackFrame::MANUAL);
- ToStringStub stub(masm->isolate());
__ SmiTag(r8);
__ EnterBuiltinFrame(cp, r3, r8);
__ Push(r5);
__ LoadRR(r2, r4);
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
__ LoadRR(r4, r2);
__ Pop(r5);
__ LeaveBuiltinFrame(cp, r3, r8);
@@ -1087,6 +1085,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ CmpP(r2, Operand(masm->CodeObject())); // Self-reference to this code.
__ bne(&switch_to_different_code_kind);
+ // Increment invocation count for the function.
+ __ LoadP(r6, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
+ __ LoadP(r6, FieldMemOperand(r6, LiteralsArray::kFeedbackVectorOffset));
+ __ LoadP(r1, FieldMemOperand(r6, TypeFeedbackVector::kInvocationCountIndex *
+ kPointerSize +
+ TypeFeedbackVector::kHeaderSize));
+ __ AddSmiLiteral(r1, r1, Smi::FromInt(1), r0);
+ __ StoreP(r1, FieldMemOperand(r6, TypeFeedbackVector::kInvocationCountIndex *
+ kPointerSize +
+ TypeFeedbackVector::kHeaderSize));
+
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ TestIfSmi(kInterpreterBytecodeArrayRegister);
@@ -1191,8 +1200,29 @@ void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
__ Ret();
}
-static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
- Register count, Register scratch) {
+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+ Register scratch,
+ Label* stack_overflow) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+ // Make scratch the space we have left. The stack might already be overflowed
+ // here which will cause scratch to become negative.
+ __ SubP(scratch, sp, scratch);
+ // Check if the arguments will overflow the stack.
+ __ ShiftLeftP(r0, num_args, Operand(kPointerSizeLog2));
+ __ CmpP(scratch, r0);
+ __ ble(stack_overflow); // Signed comparison.
+}
+
+static void Generate_InterpreterPushArgs(MacroAssembler* masm,
+ Register num_args, Register index,
+ Register count, Register scratch,
+ Label* stack_overflow) {
+ // Add a stack check before pushing arguments.
+ Generate_StackOverflowCheck(masm, num_args, scratch, stack_overflow);
+
Label loop;
__ AddP(index, index, Operand(kPointerSize)); // Bias up for LoadPU
__ LoadRR(r0, count);
@@ -1215,12 +1245,13 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
// they are to be pushed onto the stack.
// -- r3 : the target to call (can be any Object).
// -----------------------------------
+ Label stack_overflow;
// Calculate number of arguments (AddP one for receiver).
__ AddP(r5, r2, Operand(1));
// Push the arguments.
- Generate_InterpreterPushArgs(masm, r4, r5, r6);
+ Generate_InterpreterPushArgs(masm, r5, r4, r5, r6, &stack_overflow);
// Call the target.
if (function_type == CallableType::kJSFunction) {
@@ -1233,16 +1264,26 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
tail_call_mode),
RelocInfo::CODE_TARGET);
}
+
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable Code.
+ __ bkpt(0);
+ }
}
// static
-void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
+ MacroAssembler* masm, CallableType construct_type) {
// ----------- S t a t e -------------
// -- r2 : argument count (not including receiver)
// -- r5 : new target
// -- r3 : constructor to call
- // -- r4 : address of the first argument
+ // -- r4 : allocation site feedback if available, undefined otherwise.
+ // -- r6 : address of the first argument
// -----------------------------------
+ Label stack_overflow;
// Push a slot for the receiver to be constructed.
__ LoadImmP(r0, Operand::Zero());
@@ -1252,11 +1293,63 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
Label skip;
__ CmpP(r2, Operand::Zero());
__ beq(&skip);
- Generate_InterpreterPushArgs(masm, r4, r2, r6);
+ Generate_InterpreterPushArgs(masm, r2, r6, r2, r7, &stack_overflow);
__ bind(&skip);
- // Call the constructor with r2, r3, and r5 unmodified.
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ __ AssertUndefinedOrAllocationSite(r4, r7);
+ if (construct_type == CallableType::kJSFunction) {
+ __ AssertFunction(r3);
+
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kConstructStubOffset));
+ // Jump to the construct function.
+ __ AddP(ip, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
+
+ } else {
+ DCHECK_EQ(construct_type, CallableType::kAny);
+ // Call the constructor with r2, r3, and r5 unmodified.
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable Code.
+ __ bkpt(0);
+ }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstructArray(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : argument count (not including receiver)
+ // -- r3 : target to call verified to be Array function
+ // -- r4 : allocation site feedback if available, undefined otherwise.
+ // -- r5 : address of the first argument
+ // -----------------------------------
+ Label stack_overflow;
+
+ __ AddP(r6, r2, Operand(1)); // Add one for receiver.
+
+ // Push the arguments. r6, r8, r3 will be modified.
+ Generate_InterpreterPushArgs(masm, r6, r5, r6, r7, &stack_overflow);
+
+ // Array constructor expects constructor in r5. It is same as r3 here.
+ __ LoadRR(r5, r3);
+
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable Code.
+ __ bkpt(0);
+ }
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1844,62 +1937,6 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
- int field_index) {
- // ----------- S t a t e -------------
- // -- r2 : number of arguments
- // -- r3 : function
- // -- cp : context
-
- // -- lr : return address
- // -- sp[0] : receiver
- // -----------------------------------
-
- // 1. Pop receiver into r2 and check that it's actually a JSDate object.
- Label receiver_not_date;
- {
- __ Pop(r2);
- __ JumpIfSmi(r2, &receiver_not_date);
- __ CompareObjectType(r2, r4, r5, JS_DATE_TYPE);
- __ bne(&receiver_not_date);
- }
-
- // 2. Load the specified date field, falling back to the runtime as necessary.
- if (field_index == JSDate::kDateValue) {
- __ LoadP(r2, FieldMemOperand(r2, JSDate::kValueOffset));
- } else {
- if (field_index < JSDate::kFirstUncachedField) {
- Label stamp_mismatch;
- __ mov(r3, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
- __ LoadP(r3, MemOperand(r3));
- __ LoadP(ip, FieldMemOperand(r2, JSDate::kCacheStampOffset));
- __ CmpP(r3, ip);
- __ bne(&stamp_mismatch);
- __ LoadP(r2, FieldMemOperand(
- r2, JSDate::kValueOffset + field_index * kPointerSize));
- __ Ret();
- __ bind(&stamp_mismatch);
- }
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ PrepareCallCFunction(2, r3);
- __ LoadSmiLiteral(r3, Smi::FromInt(field_index));
- __ CallCFunction(
- ExternalReference::get_date_field_function(masm->isolate()), 2);
- }
- __ Ret();
-
- // 3. Raise a TypeError if the receiver is not a date.
- __ bind(&receiver_not_date);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ push(r2);
- __ LoadSmiLiteral(r2, Smi::FromInt(0));
- __ EnterBuiltinFrame(cp, r3, r2);
- __ CallRuntime(Runtime::kThrowNotDateError);
- }
-}
-
-// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argc
@@ -2154,27 +2191,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
}
-static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
- Label* stack_overflow) {
- // ----------- S t a t e -------------
- // -- r2 : actual number of arguments
- // -- r3 : function (passed through to callee)
- // -- r4 : expected number of arguments
- // -- r5 : new target (passed through to callee)
- // -----------------------------------
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- __ LoadRoot(r7, Heap::kRealStackLimitRootIndex);
- // Make r7 the space we have left. The stack might already be overflowed
- // here which will cause r7 to become negative.
- __ SubP(r7, sp, r7);
- // Check if the arguments will overflow the stack.
- __ ShiftLeftP(r0, r4, Operand(kPointerSizeLog2));
- __ CmpP(r7, r0);
- __ ble(stack_overflow); // Signed comparison.
-}
-
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(r2);
__ LoadSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
@@ -2445,7 +2461,9 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r5, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r5, SharedFunctionInfo::kClassConstructorBits, r0);
+ __ TestBitMask(r5, FunctionKind::kClassConstructor
+ << SharedFunctionInfo::kFunctionKindShift,
+ r0);
__ bne(&class_constructor);
// Enter the context of the function; ToObject has to run in the function
@@ -2875,22 +2893,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-// static
-void Builtins::Generate_ToNumber(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in r2.
- STATIC_ASSERT(kSmiTag == 0);
- __ TestIfSmi(r2);
- __ Ret(eq);
-
- __ CompareObjectType(r2, r3, r3, HEAP_NUMBER_TYPE);
- // r2: receiver
- // r3: receiver instance type
- __ Ret(eq);
-
- __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
- RelocInfo::CODE_TARGET);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : actual number of arguments
@@ -2911,7 +2913,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Enough parameters: actual >= expected
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
- ArgumentAdaptorStackCheck(masm, &stack_overflow);
+ Generate_StackOverflowCheck(masm, r4, r7, &stack_overflow);
// Calculate copy start address into r2 and copy end address into r6.
// r2: actual number of arguments as a smi
@@ -2949,7 +2951,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&too_few);
EnterArgumentsAdaptorFrame(masm);
- ArgumentAdaptorStackCheck(masm, &stack_overflow);
+ Generate_StackOverflowCheck(masm, r4, r7, &stack_overflow);
// Calculate copy start address into r0 and copy end address is fp.
// r2: actual number of arguments as a smi
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index 153660407e..beae2d29c3 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -672,6 +672,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmpp(rcx, FieldOperand(rax, SharedFunctionInfo::kCodeOffset));
__ j(not_equal, &switch_to_different_code_kind);
+ // Increment invocation count for the function.
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ movp(rcx, FieldOperand(rcx, LiteralsArray::kFeedbackVectorOffset));
+ __ SmiAddConstant(
+ FieldOperand(rcx,
+ TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+ TypeFeedbackVector::kHeaderSize),
+ Smi::FromInt(1));
+
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
@@ -782,33 +791,44 @@ void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
__ ret(0);
}
-static void Generate_InterpreterPushArgs(MacroAssembler* masm,
- bool push_receiver) {
- // ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
- // -- rbx : the address of the first argument to be pushed. Subsequent
- // arguments should be consecutive above this, in the same order as
- // they are to be pushed onto the stack.
- // -----------------------------------
+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+ Register scratch1, Register scratch2,
+ Label* stack_overflow) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ __ LoadRoot(scratch1, Heap::kRealStackLimitRootIndex);
+ __ movp(scratch2, rsp);
+ // Make scratch2 the space we have left. The stack might already be overflowed
+ // here which will cause scratch2 to become negative.
+ __ subp(scratch2, scratch1);
+ // Make scratch1 the space we need for the array when it is unrolled onto the
+ // stack.
+ __ movp(scratch1, num_args);
+ __ shlp(scratch1, Immediate(kPointerSizeLog2));
+ // Check if the arguments will overflow the stack.
+ __ cmpp(scratch2, scratch1);
+ __ j(less_equal, stack_overflow); // Signed comparison.
+}
+static void Generate_InterpreterPushArgs(MacroAssembler* masm,
+ Register num_args,
+ Register start_address,
+ Register scratch) {
// Find the address of the last argument.
- __ movp(rcx, rax);
- if (push_receiver) {
- __ addp(rcx, Immediate(1)); // Add one for receiver.
- }
-
- __ shlp(rcx, Immediate(kPointerSizeLog2));
- __ negp(rcx);
- __ addp(rcx, rbx);
+ __ Move(scratch, num_args);
+ __ shlp(scratch, Immediate(kPointerSizeLog2));
+ __ negp(scratch);
+ __ addp(scratch, start_address);
// Push the arguments.
Label loop_header, loop_check;
__ j(always, &loop_check);
__ bind(&loop_header);
- __ Push(Operand(rbx, 0));
- __ subp(rbx, Immediate(kPointerSize));
+ __ Push(Operand(start_address, 0));
+ __ subp(start_address, Immediate(kPointerSize));
__ bind(&loop_check);
- __ cmpp(rbx, rcx);
+ __ cmpp(start_address, scratch);
__ j(greater, &loop_header, Label::kNear);
}
@@ -823,11 +843,20 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
// they are to be pushed onto the stack.
// -- rdi : the target to call (can be any Object).
// -----------------------------------
+ Label stack_overflow;
+
+ // Number of values to be pushed.
+ __ Move(rcx, rax);
+ __ addp(rcx, Immediate(1)); // Add one for receiver.
+
+ // Add a stack check before pushing arguments.
+ Generate_StackOverflowCheck(masm, rcx, rdx, r8, &stack_overflow);
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(kScratchRegister);
- Generate_InterpreterPushArgs(masm, true);
+ // rbx and rdx will be modified.
+ Generate_InterpreterPushArgs(masm, rcx, rbx, rdx);
// Call the target.
__ PushReturnAddressFrom(kScratchRegister); // Re-push return address.
@@ -842,19 +871,33 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
tail_call_mode),
RelocInfo::CODE_TARGET);
}
+
+ // Throw stack overflow exception.
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // This should be unreachable.
+ __ int3();
+ }
}
// static
-void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
+ MacroAssembler* masm, CallableType construct_type) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rdx : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -- rdi : the constructor to call (can be any Object)
- // -- rbx : the address of the first argument to be pushed. Subsequent
+ // -- rbx : the allocation site feedback if available, undefined otherwise
+ // -- rcx : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
// -----------------------------------
+ Label stack_overflow;
+
+ // Add a stack check before pushing arguments.
+ Generate_StackOverflowCheck(masm, rax, r8, r9, &stack_overflow);
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(kScratchRegister);
@@ -862,13 +905,80 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
// Push slot for the receiver to be constructed.
__ Push(Immediate(0));
- Generate_InterpreterPushArgs(masm, false);
+ // rcx and r8 will be modified.
+ Generate_InterpreterPushArgs(masm, rax, rcx, r8);
// Push return address in preparation for the tail-call.
__ PushReturnAddressFrom(kScratchRegister);
- // Call the constructor (rax, rdx, rdi passed on).
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ __ AssertUndefinedOrAllocationSite(rbx);
+ if (construct_type == CallableType::kJSFunction) {
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ AssertFunction(rdi);
+
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kConstructStubOffset));
+ __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
+ // Jump to the constructor function (rax, rbx, rdx passed on).
+ __ jmp(rcx);
+ } else {
+ DCHECK_EQ(construct_type, CallableType::kAny);
+ // Call the constructor (rax, rdx, rdi passed on).
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+
+ // Throw stack overflow exception.
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // This should be unreachable.
+ __ int3();
+ }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstructArray(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rdx : the target to call checked to be Array function.
+ // -- rbx : the allocation site feedback
+ // -- rcx : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -----------------------------------
+ Label stack_overflow;
+
+ // Number of values to be pushed.
+ __ Move(r8, rax);
+ __ addp(r8, Immediate(1)); // Add one for receiver.
+
+ // Add a stack check before pushing arguments.
+ Generate_StackOverflowCheck(masm, r8, rdi, r9, &stack_overflow);
+
+ // Pop return address to allow tail-call after pushing arguments.
+ __ PopReturnAddressTo(kScratchRegister);
+
+ // rcx and rdi will be modified.
+ Generate_InterpreterPushArgs(masm, r8, rcx, rdi);
+
+ // Push return address in preparation for the tail-call.
+ __ PushReturnAddressFrom(kScratchRegister);
+
+ // Array constructor expects constructor in rdi. It is same as rdx here.
+ __ Move(rdi, rdx);
+
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ // Throw stack overflow exception.
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // This should be unreachable.
+ __ int3();
+ }
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1275,60 +1385,6 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
- int field_index) {
- // ----------- S t a t e -------------
- // -- rax : number of arguments
- // -- rdi : function
- // -- rsi : context
- // -- rsp[0] : return address
- // -- rsp[8] : receiver
- // -----------------------------------
-
- // 1. Load receiver into rax and check that it's actually a JSDate object.
- Label receiver_not_date;
- {
- StackArgumentsAccessor args(rsp, 0);
- __ movp(rax, args.GetReceiverOperand());
- __ JumpIfSmi(rax, &receiver_not_date);
- __ CmpObjectType(rax, JS_DATE_TYPE, rbx);
- __ j(not_equal, &receiver_not_date);
- }
-
- // 2. Load the specified date field, falling back to the runtime as necessary.
- if (field_index == JSDate::kDateValue) {
- __ movp(rax, FieldOperand(rax, JSDate::kValueOffset));
- } else {
- if (field_index < JSDate::kFirstUncachedField) {
- Label stamp_mismatch;
- __ Load(rdx, ExternalReference::date_cache_stamp(masm->isolate()));
- __ cmpp(rdx, FieldOperand(rax, JSDate::kCacheStampOffset));
- __ j(not_equal, &stamp_mismatch, Label::kNear);
- __ movp(rax, FieldOperand(
- rax, JSDate::kValueOffset + field_index * kPointerSize));
- __ ret(1 * kPointerSize);
- __ bind(&stamp_mismatch);
- }
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ PrepareCallCFunction(2);
- __ Move(arg_reg_1, rax);
- __ Move(arg_reg_2, Smi::FromInt(field_index));
- __ CallCFunction(
- ExternalReference::get_date_field_function(masm->isolate()), 2);
- }
- __ ret(1 * kPointerSize);
-
- // 3. Raise a TypeError if the receiver is not a date.
- __ bind(&receiver_not_date);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ Move(rbx, Smi::FromInt(0));
- __ EnterBuiltinFrame(rsi, rdi, rbx);
- __ CallRuntime(Runtime::kThrowNotDateError);
- }
-}
-
-// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
@@ -1948,9 +2004,8 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ bind(&to_string);
{
FrameScope scope(masm, StackFrame::MANUAL);
- ToStringStub stub(masm->isolate());
__ EnterBuiltinFrame(rsi, rdi, r8);
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
__ LeaveBuiltinFrame(rsi, rdi, r8);
}
__ jmp(&drop_frame_and_ret, Label::kNear);
@@ -2017,11 +2072,10 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&convert);
{
FrameScope scope(masm, StackFrame::MANUAL);
- ToStringStub stub(masm->isolate());
__ EnterBuiltinFrame(rsi, rdi, r8);
__ Push(rdx);
__ Move(rax, rbx);
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
__ Move(rbx, rax);
__ Pop(rdx);
__ LeaveBuiltinFrame(rsi, rdi, r8);
@@ -2061,32 +2115,6 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
}
}
-static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
- Label* stack_overflow) {
- // ----------- S t a t e -------------
- // -- rax : actual number of arguments
- // -- rbx : expected number of arguments
- // -- rdx : new target (passed through to callee)
- // -- rdi : function (passed through to callee)
- // -----------------------------------
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- __ LoadRoot(r8, Heap::kRealStackLimitRootIndex);
- __ movp(rcx, rsp);
- // Make rcx the space we have left. The stack might already be overflowed
- // here which will cause rcx to become negative.
- __ subp(rcx, r8);
- // Make r8 the space we need for the array when it is unrolled onto the
- // stack.
- __ movp(r8, rbx);
- __ shlp(r8, Immediate(kPointerSizeLog2));
- // Check if the arguments will overflow the stack.
- __ cmpp(rcx, r8);
- __ j(less_equal, stack_overflow); // Signed comparison.
-}
-
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ pushq(rbp);
__ movp(rbp, rsp);
@@ -2161,25 +2189,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-// static
-void Builtins::Generate_ToNumber(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in rax.
- Label not_smi;
- __ JumpIfNotSmi(rax, &not_smi, Label::kNear);
- __ Ret();
- __ bind(&not_smi);
-
- Label not_heap_number;
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &not_heap_number, Label::kNear);
- __ Ret();
- __ bind(&not_heap_number);
-
- __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
- RelocInfo::CODE_TARGET);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : actual number of arguments
@@ -2201,7 +2210,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Enough parameters: Actual >= expected.
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
- ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+ // The registers rcx and r8 will be modified. The register rbx is only read.
+ Generate_StackOverflowCheck(masm, rbx, rcx, r8, &stack_overflow);
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
@@ -2222,7 +2232,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&too_few);
EnterArgumentsAdaptorFrame(masm);
- ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+ // The registers rcx and r8 will be modified. The register rbx is only read.
+ Generate_StackOverflowCheck(masm, rbx, rcx, r8, &stack_overflow);
// Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
diff --git a/deps/v8/src/builtins/x87/builtins-x87.cc b/deps/v8/src/builtins/x87/builtins-x87.cc
index 9c46f20ff6..8e096a3d0b 100644
--- a/deps/v8/src/builtins/x87/builtins-x87.cc
+++ b/deps/v8/src/builtins/x87/builtins-x87.cc
@@ -591,6 +591,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmp(ecx, FieldOperand(eax, SharedFunctionInfo::kCodeOffset));
__ j(not_equal, &switch_to_different_code_kind);
+ // Increment invocation count for the function.
+ __ EmitLoadTypeFeedbackVector(ecx);
+ __ add(FieldOperand(ecx,
+ TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+ TypeFeedbackVector::kHeaderSize),
+ Immediate(Smi::FromInt(1)));
+
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
@@ -704,20 +711,47 @@ void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
__ ret(0);
}
+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+ Register scratch1, Register scratch2,
+ Label* stack_overflow,
+ bool include_receiver = false) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ ExternalReference real_stack_limit =
+ ExternalReference::address_of_real_stack_limit(masm->isolate());
+ __ mov(scratch1, Operand::StaticVariable(real_stack_limit));
+ // Make scratch2 the space we have left. The stack might already be overflowed
+ // here which will cause scratch2 to become negative.
+ __ mov(scratch2, esp);
+ __ sub(scratch2, scratch1);
+ // Make scratch1 the space we need for the array when it is unrolled onto the
+ // stack.
+ __ mov(scratch1, num_args);
+ if (include_receiver) {
+ __ add(scratch1, Immediate(1));
+ }
+ __ shl(scratch1, kPointerSizeLog2);
+ // Check if the arguments will overflow the stack.
+ __ cmp(scratch2, scratch1);
+ __ j(less_equal, stack_overflow); // Signed comparison.
+}
+
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
- Register array_limit) {
+ Register array_limit,
+ Register start_address) {
// ----------- S t a t e -------------
- // -- ebx : Pointer to the last argument in the args array.
+ // -- start_address : Pointer to the last argument in the args array.
// -- array_limit : Pointer to one before the first argument in the
// args array.
// -----------------------------------
Label loop_header, loop_check;
__ jmp(&loop_check);
__ bind(&loop_header);
- __ Push(Operand(ebx, 0));
- __ sub(ebx, Immediate(kPointerSize));
+ __ Push(Operand(start_address, 0));
+ __ sub(start_address, Immediate(kPointerSize));
__ bind(&loop_check);
- __ cmp(ebx, array_limit);
+ __ cmp(start_address, array_limit);
__ j(greater, &loop_header, Label::kNear);
}
@@ -732,18 +766,26 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
// they are to be pushed onto the stack.
// -- edi : the target to call (can be any Object).
// -----------------------------------
+ Label stack_overflow;
+ // Compute the expected number of arguments.
+ __ mov(ecx, eax);
+ __ add(ecx, Immediate(1)); // Add one for receiver.
+
+ // Add a stack check before pushing the arguments. We need an extra register
+ // to perform a stack check. So push it onto the stack temporarily. This
+ // might cause stack overflow, but it will be detected by the check.
+ __ Push(edi);
+ Generate_StackOverflowCheck(masm, ecx, edx, edi, &stack_overflow);
+ __ Pop(edi);
// Pop return address to allow tail-call after pushing arguments.
__ Pop(edx);
// Find the address of the last argument.
- __ mov(ecx, eax);
- __ add(ecx, Immediate(1)); // Add one for receiver.
__ shl(ecx, kPointerSizeLog2);
__ neg(ecx);
__ add(ecx, ebx);
-
- Generate_InterpreterPushArgs(masm, ecx);
+ Generate_InterpreterPushArgs(masm, ecx, ebx);
// Call the target.
__ Push(edx); // Re-push return address.
@@ -758,43 +800,210 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
tail_call_mode),
RelocInfo::CODE_TARGET);
}
+
+ __ bind(&stack_overflow);
+ {
+ // Pop the temporary registers, so that return address is on top of stack.
+ __ Pop(edi);
+
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+
+ // This should be unreachable.
+ __ int3();
+ }
}
+namespace {
+
+// This function modified start_addr, and only reads the contents of num_args
+// register. scratch1 and scratch2 are used as temporary registers. Their
+// original values are restored after the use.
+void Generate_InterpreterPushArgsAndReturnAddress(
+ MacroAssembler* masm, Register num_args, Register start_addr,
+ Register scratch1, Register scratch2, bool receiver_in_args,
+ int num_slots_above_ret_addr, Label* stack_overflow) {
+ // We have to move return address and the temporary registers above it
+ // before we can copy arguments onto the stack. To achieve this:
+ // Step 1: Increment the stack pointer by num_args + 1 (for receiver).
+ // Step 2: Move the return address and values above it to the top of stack.
+ // Step 3: Copy the arguments into the correct locations.
+ // current stack =====> required stack layout
+ // | | | scratch1 | (2) <-- esp(1)
+ // | | | .... | (2)
+ // | | | scratch-n | (2)
+ // | | | return addr | (2)
+ // | | | arg N | (3)
+ // | scratch1 | <-- esp | .... |
+ // | .... | | arg 0 |
+ // | scratch-n | | arg 0 |
+ // | return addr | | receiver slot |
+
+ // Check for stack overflow before we increment the stack pointer.
+ Generate_StackOverflowCheck(masm, num_args, scratch1, scratch2,
+ stack_overflow, true);
+
+// Step 1 - Update the stack pointer. scratch1 already contains the required
+// increment to the stack. i.e. num_args + 1 stack slots. This is computed in
+// the Generate_StackOverflowCheck.
+
+#ifdef _MSC_VER
+ // TODO(mythria): Move it to macro assembler.
+ // In windows, we cannot increment the stack size by more than one page
+ // (mimimum page size is 4KB) without accessing at least one byte on the
+ // page. Check this:
+ // https://msdn.microsoft.com/en-us/library/aa227153(v=vs.60).aspx.
+ const int page_size = 4 * 1024;
+ Label check_offset, update_stack_pointer;
+ __ bind(&check_offset);
+ __ cmp(scratch1, page_size);
+ __ j(less, &update_stack_pointer);
+ __ sub(esp, Immediate(page_size));
+ // Just to touch the page, before we increment further.
+ __ mov(Operand(esp, 0), Immediate(0));
+ __ sub(scratch1, Immediate(page_size));
+ __ jmp(&check_offset);
+ __ bind(&update_stack_pointer);
+#endif
+
+ __ sub(esp, scratch1);
+
+ // Step 2 move return_address and slots above it to the correct locations.
+ // Move from top to bottom, otherwise we may overwrite when num_args = 0 or 1,
+ // basically when the source and destination overlap. We at least need one
+ // extra slot for receiver, so no extra checks are required to avoid copy.
+ for (int i = 0; i < num_slots_above_ret_addr + 1; i++) {
+ __ mov(scratch1,
+ Operand(esp, num_args, times_pointer_size, (i + 1) * kPointerSize));
+ __ mov(Operand(esp, i * kPointerSize), scratch1);
+ }
+
+ // Step 3 copy arguments to correct locations.
+ if (receiver_in_args) {
+ __ mov(scratch1, num_args);
+ __ add(scratch1, Immediate(1));
+ } else {
+ // Slot meant for receiver contains return address. Reset it so that
+ // we will not incorrectly interpret return address as an object.
+ __ mov(Operand(esp, num_args, times_pointer_size,
+ (num_slots_above_ret_addr + 1) * kPointerSize),
+ Immediate(0));
+ __ mov(scratch1, num_args);
+ }
+
+ Label loop_header, loop_check;
+ __ jmp(&loop_check);
+ __ bind(&loop_header);
+ __ mov(scratch2, Operand(start_addr, 0));
+ __ mov(Operand(esp, scratch1, times_pointer_size,
+ num_slots_above_ret_addr * kPointerSize),
+ scratch2);
+ __ sub(start_addr, Immediate(kPointerSize));
+ __ sub(scratch1, Immediate(1));
+ __ bind(&loop_check);
+ __ cmp(scratch1, Immediate(0));
+ __ j(greater, &loop_header, Label::kNear);
+}
+
+} // end anonymous namespace
+
// static
-void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
+ MacroAssembler* masm, CallableType construct_type) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edx : the new target
// -- edi : the constructor
- // -- ebx : the address of the first argument to be pushed. Subsequent
+ // -- ebx : allocation site feedback (if available or undefined)
+ // -- ecx : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
// -----------------------------------
+ Label stack_overflow;
+ // We need two scratch registers. Push edi and edx onto stack.
+ __ Push(edi);
+ __ Push(edx);
- // Pop return address to allow tail-call after pushing arguments.
- __ Pop(ecx);
+ // Push arguments and move return address to the top of stack.
+ // The eax register is readonly. The ecx register will be modified. The edx
+ // and edi registers will be modified but restored to their original values.
+ Generate_InterpreterPushArgsAndReturnAddress(masm, eax, ecx, edx, edi, false,
+ 2, &stack_overflow);
- // Push edi in the slot meant for receiver. We need an extra register
- // so store edi temporarily on stack.
- __ Push(edi);
+ // Restore edi and edx
+ __ Pop(edx);
+ __ Pop(edi);
+
+ __ AssertUndefinedOrAllocationSite(ebx);
+ if (construct_type == CallableType::kJSFunction) {
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ AssertFunction(edi);
+
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ jmp(ecx);
+ } else {
+ DCHECK_EQ(construct_type, CallableType::kAny);
- // Find the address of the last argument.
- __ mov(edi, eax);
- __ neg(edi);
- __ shl(edi, kPointerSizeLog2);
- __ add(edi, ebx);
+ // Call the constructor with unmodified eax, edi, edx values.
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+
+ __ bind(&stack_overflow);
+ {
+ // Pop the temporary registers, so that return address is on top of stack.
+ __ Pop(edx);
+ __ Pop(edi);
+
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+
+ // This should be unreachable.
+ __ int3();
+ }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstructArray(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : the target to call checked to be Array function.
+ // -- ebx : the allocation site feedback
+ // -- ecx : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -----------------------------------
+ Label stack_overflow;
+ // We need two scratch registers. Register edi is available, push edx onto
+ // stack.
+ __ Push(edx);
+
+ // Push arguments and move return address to the top of stack.
+ // The eax register is readonly. The ecx register will be modified. The edx
+ // and edi registers will be modified but restored to their original values.
+ Generate_InterpreterPushArgsAndReturnAddress(masm, eax, ecx, edx, edi, true,
+ 1, &stack_overflow);
+
+ // Restore edx.
+ __ Pop(edx);
- Generate_InterpreterPushArgs(masm, edi);
+ // Array constructor expects constructor in edi. It is same as edx here.
+ __ Move(edi, edx);
- // Restore the constructor from slot on stack. It was pushed at the slot
- // meant for receiver.
- __ mov(edi, Operand(esp, eax, times_pointer_size, 0));
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ __ bind(&stack_overflow);
+ {
+ // Pop the temporary registers, so that return address is on top of stack.
+ __ Pop(edx);
- // Re-push return address.
- __ Push(ecx);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
- // Call the constructor with unmodified eax, edi, ebi values.
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ // This should be unreachable.
+ __ int3();
+ }
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1223,61 +1432,6 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
- int field_index) {
- // ----------- S t a t e -------------
- // -- eax : number of arguments
- // -- edi : function
- // -- esi : context
- // -- esp[0] : return address
- // -- esp[4] : receiver
- // -----------------------------------
-
- // 1. Load receiver into eax and check that it's actually a JSDate object.
- Label receiver_not_date;
- {
- __ mov(eax, Operand(esp, kPointerSize));
- __ JumpIfSmi(eax, &receiver_not_date);
- __ CmpObjectType(eax, JS_DATE_TYPE, ebx);
- __ j(not_equal, &receiver_not_date);
- }
-
- // 2. Load the specified date field, falling back to the runtime as necessary.
- if (field_index == JSDate::kDateValue) {
- __ mov(eax, FieldOperand(eax, JSDate::kValueOffset));
- } else {
- if (field_index < JSDate::kFirstUncachedField) {
- Label stamp_mismatch;
- __ mov(edx, Operand::StaticVariable(
- ExternalReference::date_cache_stamp(masm->isolate())));
- __ cmp(edx, FieldOperand(eax, JSDate::kCacheStampOffset));
- __ j(not_equal, &stamp_mismatch, Label::kNear);
- __ mov(eax, FieldOperand(
- eax, JSDate::kValueOffset + field_index * kPointerSize));
- __ ret(1 * kPointerSize);
- __ bind(&stamp_mismatch);
- }
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ PrepareCallCFunction(2, ebx);
- __ mov(Operand(esp, 0), eax);
- __ mov(Operand(esp, 1 * kPointerSize),
- Immediate(Smi::FromInt(field_index)));
- __ CallCFunction(
- ExternalReference::get_date_field_function(masm->isolate()), 2);
- }
- __ ret(1 * kPointerSize);
-
- // 3. Raise a TypeError if the receiver is not a date.
- __ bind(&receiver_not_date);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ Move(ebx, Immediate(0));
- __ EnterBuiltinFrame(esi, edi, ebx);
- __ CallRuntime(Runtime::kThrowNotDateError);
- }
-}
-
-// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
@@ -1904,10 +2058,9 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ bind(&to_string);
{
FrameScope scope(masm, StackFrame::MANUAL);
- ToStringStub stub(masm->isolate());
__ SmiTag(ebx);
__ EnterBuiltinFrame(esi, edi, ebx);
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
__ LeaveBuiltinFrame(esi, edi, ebx);
__ SmiUntag(ebx);
}
@@ -1971,11 +2124,10 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&convert);
{
FrameScope scope(masm, StackFrame::MANUAL);
- ToStringStub stub(masm->isolate());
__ SmiTag(ebx);
__ EnterBuiltinFrame(esi, edi, ebx);
__ Push(edx);
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
__ Pop(edx);
__ LeaveBuiltinFrame(esi, edi, ebx);
__ SmiUntag(ebx);
@@ -2026,32 +2178,6 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
}
}
-static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
- Label* stack_overflow) {
- // ----------- S t a t e -------------
- // -- eax : actual number of arguments
- // -- ebx : expected number of arguments
- // -- edx : new target (passed through to callee)
- // -----------------------------------
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- ExternalReference real_stack_limit =
- ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(edi, Operand::StaticVariable(real_stack_limit));
- // Make ecx the space we have left. The stack might already be overflowed
- // here which will cause ecx to become negative.
- __ mov(ecx, esp);
- __ sub(ecx, edi);
- // Make edi the space we need for the array when it is unrolled onto the
- // stack.
- __ mov(edi, ebx);
- __ shl(edi, kPointerSizeLog2);
- // Check if the arguments will overflow the stack.
- __ cmp(ecx, edi);
- __ j(less_equal, stack_overflow); // Signed comparison.
-}
-
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(ebp);
__ mov(ebp, esp);
@@ -2767,24 +2893,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kAbort);
}
-// static
-void Builtins::Generate_ToNumber(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in eax.
- Label not_smi;
- __ JumpIfNotSmi(eax, &not_smi, Label::kNear);
- __ Ret();
- __ bind(&not_smi);
-
- Label not_heap_number;
- __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &not_heap_number, Label::kNear);
- __ Ret();
- __ bind(&not_heap_number);
-
- __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
- RelocInfo::CODE_TARGET);
-}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : actual number of arguments
@@ -2805,7 +2913,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Enough parameters: Actual >= expected.
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
- ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+ // edi is used as a scratch register. It should be restored from the frame
+ // when needed.
+ Generate_StackOverflowCheck(masm, ebx, ecx, edi, &stack_overflow);
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
@@ -2825,9 +2935,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected.
__ bind(&too_few);
-
EnterArgumentsAdaptorFrame(masm);
- ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+ // edi is used as a scratch register. It should be restored from the frame
+ // when needed.
+ Generate_StackOverflowCheck(masm, ebx, ecx, edi, &stack_overflow);
// Remember expected arguments in ecx.
__ mov(ecx, ebx);
diff --git a/deps/v8/src/checks.h b/deps/v8/src/checks.h
index 80404e8d89..0d7eed3481 100644
--- a/deps/v8/src/checks.h
+++ b/deps/v8/src/checks.h
@@ -7,6 +7,7 @@
#include "include/v8.h"
#include "src/base/logging.h"
+#include "src/globals.h"
namespace v8 {
@@ -17,10 +18,10 @@ namespace internal {
#ifdef ENABLE_SLOW_DCHECKS
#define SLOW_DCHECK(condition) \
CHECK(!v8::internal::FLAG_enable_slow_asserts || (condition))
-extern bool FLAG_enable_slow_asserts;
+V8_EXPORT_PRIVATE extern bool FLAG_enable_slow_asserts;
#else
#define SLOW_DCHECK(condition) ((void) 0)
-const bool FLAG_enable_slow_asserts = false;
+static const bool FLAG_enable_slow_asserts = false;
#endif
} // namespace internal
diff --git a/deps/v8/src/code-events.h b/deps/v8/src/code-events.h
index 9ae1caeb37..94f7dbdfc0 100644
--- a/deps/v8/src/code-events.h
+++ b/deps/v8/src/code-events.h
@@ -7,6 +7,7 @@
#include <unordered_set>
+#include "src/base/platform/mutex.h"
#include "src/globals.h"
namespace v8 {
@@ -114,13 +115,16 @@ class CodeEventDispatcher {
CodeEventDispatcher() {}
bool AddListener(CodeEventListener* listener) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
return listeners_.insert(listener).second;
}
void RemoveListener(CodeEventListener* listener) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
listeners_.erase(listener);
}
-#define CODE_EVENT_DISPATCH(code) \
+#define CODE_EVENT_DISPATCH(code) \
+ base::LockGuard<base::Mutex> guard(&mutex_); \
for (auto it = listeners_.begin(); it != listeners_.end(); ++it) (*it)->code
void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
@@ -173,6 +177,7 @@ class CodeEventDispatcher {
private:
std::unordered_set<CodeEventListener*> listeners_;
+ base::Mutex mutex_;
DISALLOW_COPY_AND_ASSIGN(CodeEventDispatcher);
};
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc
index 018f21d447..7448591856 100644
--- a/deps/v8/src/code-factory.cc
+++ b/deps/v8/src/code-factory.cc
@@ -82,6 +82,10 @@ Callable CodeFactory::KeyedLoadICInOptimizedCode(Isolate* isolate) {
// static
Callable CodeFactory::KeyedLoadIC_Megamorphic(Isolate* isolate) {
+ if (FLAG_tf_load_ic_stub) {
+ return Callable(isolate->builtins()->KeyedLoadIC_Megamorphic_TF(),
+ LoadWithVectorDescriptor(isolate));
+ }
return Callable(isolate->builtins()->KeyedLoadIC_Megamorphic(),
LoadWithVectorDescriptor(isolate));
}
@@ -104,6 +108,10 @@ Callable CodeFactory::CallICInOptimizedCode(Isolate* isolate, int argc,
// static
Callable CodeFactory::StoreIC(Isolate* isolate, LanguageMode language_mode) {
+ if (FLAG_tf_store_ic_stub) {
+ StoreICTrampolineTFStub stub(isolate, StoreICState(language_mode));
+ return make_callable(stub);
+ }
StoreICTrampolineStub stub(isolate, StoreICState(language_mode));
return make_callable(stub);
}
@@ -111,6 +119,10 @@ Callable CodeFactory::StoreIC(Isolate* isolate, LanguageMode language_mode) {
// static
Callable CodeFactory::StoreICInOptimizedCode(Isolate* isolate,
LanguageMode language_mode) {
+ if (FLAG_tf_store_ic_stub) {
+ StoreICTFStub stub(isolate, StoreICState(language_mode));
+ return make_callable(stub);
+ }
StoreICStub stub(isolate, StoreICState(language_mode));
return make_callable(stub);
}
@@ -179,14 +191,14 @@ Callable CodeFactory::StringToNumber(Isolate* isolate) {
// static
Callable CodeFactory::ToString(Isolate* isolate) {
- ToStringStub stub(isolate);
- return make_callable(stub);
+ return Callable(isolate->builtins()->ToString(),
+ TypeConversionDescriptor(isolate));
}
// static
Callable CodeFactory::ToName(Isolate* isolate) {
- ToNameStub stub(isolate);
- return make_callable(stub);
+ return Callable(isolate->builtins()->ToName(),
+ TypeConversionDescriptor(isolate));
}
// static
@@ -228,6 +240,12 @@ Callable CodeFactory::NumberToString(Isolate* isolate) {
}
// static
+Callable CodeFactory::OrdinaryHasInstance(Isolate* isolate) {
+ return Callable(isolate->builtins()->OrdinaryHasInstance(),
+ CompareDescriptor(isolate));
+}
+
+// static
Callable CodeFactory::RegExpConstructResult(Isolate* isolate) {
RegExpConstructResultStub stub(isolate);
return make_callable(stub);
@@ -398,38 +416,38 @@ Callable CodeFactory::StringCompare(Isolate* isolate, Token::Value token) {
// static
Callable CodeFactory::StringEqual(Isolate* isolate) {
- StringEqualStub stub(isolate);
- return make_callable(stub);
+ return Callable(isolate->builtins()->StringEqual(),
+ CompareDescriptor(isolate));
}
// static
Callable CodeFactory::StringNotEqual(Isolate* isolate) {
- StringNotEqualStub stub(isolate);
- return make_callable(stub);
+ return Callable(isolate->builtins()->StringNotEqual(),
+ CompareDescriptor(isolate));
}
// static
Callable CodeFactory::StringLessThan(Isolate* isolate) {
- StringLessThanStub stub(isolate);
- return make_callable(stub);
+ return Callable(isolate->builtins()->StringLessThan(),
+ CompareDescriptor(isolate));
}
// static
Callable CodeFactory::StringLessThanOrEqual(Isolate* isolate) {
- StringLessThanOrEqualStub stub(isolate);
- return make_callable(stub);
+ return Callable(isolate->builtins()->StringLessThanOrEqual(),
+ CompareDescriptor(isolate));
}
// static
Callable CodeFactory::StringGreaterThan(Isolate* isolate) {
- StringGreaterThanStub stub(isolate);
- return make_callable(stub);
+ return Callable(isolate->builtins()->StringGreaterThan(),
+ CompareDescriptor(isolate));
}
// static
Callable CodeFactory::StringGreaterThanOrEqual(Isolate* isolate) {
- StringGreaterThanOrEqualStub stub(isolate);
- return make_callable(stub);
+ return Callable(isolate->builtins()->StringGreaterThanOrEqual(),
+ CompareDescriptor(isolate));
}
// static
@@ -594,9 +612,17 @@ Callable CodeFactory::InterpreterPushArgsAndCall(Isolate* isolate,
}
// static
-Callable CodeFactory::InterpreterPushArgsAndConstruct(Isolate* isolate) {
- return Callable(isolate->builtins()->InterpreterPushArgsAndConstruct(),
- InterpreterPushArgsAndConstructDescriptor(isolate));
+Callable CodeFactory::InterpreterPushArgsAndConstruct(
+ Isolate* isolate, CallableType function_type) {
+ return Callable(
+ isolate->builtins()->InterpreterPushArgsAndConstruct(function_type),
+ InterpreterPushArgsAndConstructDescriptor(isolate));
+}
+
+// static
+Callable CodeFactory::InterpreterPushArgsAndConstructArray(Isolate* isolate) {
+ return Callable(isolate->builtins()->InterpreterPushArgsAndConstructArray(),
+ InterpreterPushArgsAndConstructArrayDescriptor(isolate));
}
// static
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h
index 40b1ea447e..59f069e8bd 100644
--- a/deps/v8/src/code-factory.h
+++ b/deps/v8/src/code-factory.h
@@ -84,6 +84,8 @@ class CodeFactory final {
OrdinaryToPrimitiveHint hint);
static Callable NumberToString(Isolate* isolate);
+ static Callable OrdinaryHasInstance(Isolate* isolate);
+
static Callable RegExpConstructResult(Isolate* isolate);
static Callable RegExpExec(Isolate* isolate);
@@ -160,7 +162,9 @@ class CodeFactory final {
static Callable InterpreterPushArgsAndCall(
Isolate* isolate, TailCallMode tail_call_mode,
CallableType function_type = CallableType::kAny);
- static Callable InterpreterPushArgsAndConstruct(Isolate* isolate);
+ static Callable InterpreterPushArgsAndConstruct(
+ Isolate* isolate, CallableType function_type = CallableType::kAny);
+ static Callable InterpreterPushArgsAndConstructArray(Isolate* isolate);
static Callable InterpreterCEntry(Isolate* isolate, int result_size = 1);
static Callable InterpreterOnStackReplacement(Isolate* isolate);
};
diff --git a/deps/v8/src/code-stub-assembler.cc b/deps/v8/src/code-stub-assembler.cc
index 06552bad26..1efa0ccb11 100644
--- a/deps/v8/src/code-stub-assembler.cc
+++ b/deps/v8/src/code-stub-assembler.cc
@@ -38,41 +38,23 @@ void CodeStubAssembler::Assert(Node* condition) {
#endif
}
-Node* CodeStubAssembler::BooleanMapConstant() {
- return HeapConstant(isolate()->factory()->boolean_map());
-}
-
-Node* CodeStubAssembler::EmptyStringConstant() {
- return LoadRoot(Heap::kempty_stringRootIndex);
-}
-
-Node* CodeStubAssembler::HeapNumberMapConstant() {
- return HeapConstant(isolate()->factory()->heap_number_map());
-}
-
Node* CodeStubAssembler::NoContextConstant() {
return SmiConstant(Smi::FromInt(0));
}
-Node* CodeStubAssembler::MinusZeroConstant() {
- return LoadRoot(Heap::kMinusZeroValueRootIndex);
-}
-
-Node* CodeStubAssembler::NanConstant() {
- return LoadRoot(Heap::kNanValueRootIndex);
-}
-
-Node* CodeStubAssembler::NullConstant() {
- return LoadRoot(Heap::kNullValueRootIndex);
-}
-
-Node* CodeStubAssembler::UndefinedConstant() {
- return LoadRoot(Heap::kUndefinedValueRootIndex);
-}
+#define HEAP_CONSTANT_ACCESSOR(rootName, name) \
+ Node* CodeStubAssembler::name##Constant() { \
+ return LoadRoot(Heap::k##rootName##RootIndex); \
+ }
+HEAP_CONSTANT_LIST(HEAP_CONSTANT_ACCESSOR);
+#undef HEAP_CONSTANT_ACCESSOR
-Node* CodeStubAssembler::TheHoleConstant() {
- return LoadRoot(Heap::kTheHoleValueRootIndex);
-}
+#define HEAP_CONSTANT_TEST(rootName, name) \
+ Node* CodeStubAssembler::Is##name(Node* value) { \
+ return WordEqual(value, name##Constant()); \
+ }
+HEAP_CONSTANT_LIST(HEAP_CONSTANT_TEST);
+#undef HEAP_CONSTANT_TEST
Node* CodeStubAssembler::HashSeed() {
return LoadAndUntagToWord32Root(Heap::kHashSeedRootIndex);
@@ -86,7 +68,7 @@ Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) {
if (mode == SMI_PARAMETERS) {
return SmiConstant(Smi::FromInt(value));
} else {
- DCHECK_EQ(INTEGER_PARAMETERS, mode);
+ DCHECK(mode == INTEGER_PARAMETERS || mode == INTPTR_PARAMETERS);
return IntPtrConstant(value);
}
}
@@ -216,6 +198,37 @@ Node* CodeStubAssembler::Float64Floor(Node* x) {
return var_x.value();
}
+Node* CodeStubAssembler::Float64RoundToEven(Node* x) {
+ if (IsFloat64RoundTiesEvenSupported()) {
+ return Float64RoundTiesEven(x);
+ }
+ // See ES#sec-touint8clamp for details.
+ Node* f = Float64Floor(x);
+ Node* f_and_half = Float64Add(f, Float64Constant(0.5));
+
+ Variable var_result(this, MachineRepresentation::kFloat64);
+ Label return_f(this), return_f_plus_one(this), done(this);
+
+ GotoIf(Float64LessThan(f_and_half, x), &return_f_plus_one);
+ GotoIf(Float64LessThan(x, f_and_half), &return_f);
+ {
+ Node* f_mod_2 = Float64Mod(f, Float64Constant(2.0));
+ Branch(Float64Equal(f_mod_2, Float64Constant(0.0)), &return_f,
+ &return_f_plus_one);
+ }
+
+ Bind(&return_f);
+ var_result.Bind(f);
+ Goto(&done);
+
+ Bind(&return_f_plus_one);
+ var_result.Bind(Float64Add(f, Float64Constant(1.0)));
+ Goto(&done);
+
+ Bind(&done);
+ return var_result.value();
+}
+
Node* CodeStubAssembler::Float64Trunc(Node* x) {
if (IsFloat64RoundTruncateSupported()) {
return Float64RoundTruncate(x);
@@ -284,7 +297,7 @@ Node* CodeStubAssembler::SmiShiftBitsConstant() {
Node* CodeStubAssembler::SmiFromWord32(Node* value) {
value = ChangeInt32ToIntPtr(value);
- return WordShl(value, SmiShiftBitsConstant());
+ return BitcastWordToTaggedSigned(WordShl(value, SmiShiftBitsConstant()));
}
Node* CodeStubAssembler::SmiTag(Node* value) {
@@ -292,15 +305,15 @@ Node* CodeStubAssembler::SmiTag(Node* value) {
if (ToInt32Constant(value, constant_value) && Smi::IsValid(constant_value)) {
return SmiConstant(Smi::FromInt(constant_value));
}
- return WordShl(value, SmiShiftBitsConstant());
+ return BitcastWordToTaggedSigned(WordShl(value, SmiShiftBitsConstant()));
}
Node* CodeStubAssembler::SmiUntag(Node* value) {
- return WordSar(value, SmiShiftBitsConstant());
+ return WordSar(BitcastTaggedToWord(value), SmiShiftBitsConstant());
}
Node* CodeStubAssembler::SmiToWord32(Node* value) {
- Node* result = WordSar(value, SmiShiftBitsConstant());
+ Node* result = SmiUntag(value);
if (Is64()) {
result = TruncateInt64ToInt32(result);
}
@@ -325,10 +338,18 @@ Node* CodeStubAssembler::SmiSubWithOverflow(Node* a, Node* b) {
Node* CodeStubAssembler::SmiEqual(Node* a, Node* b) { return WordEqual(a, b); }
+Node* CodeStubAssembler::SmiAbove(Node* a, Node* b) {
+ return UintPtrGreaterThan(a, b);
+}
+
Node* CodeStubAssembler::SmiAboveOrEqual(Node* a, Node* b) {
return UintPtrGreaterThanOrEqual(a, b);
}
+Node* CodeStubAssembler::SmiBelow(Node* a, Node* b) {
+ return UintPtrLessThan(a, b);
+}
+
Node* CodeStubAssembler::SmiLessThan(Node* a, Node* b) {
return IntPtrLessThan(a, b);
}
@@ -337,19 +358,12 @@ Node* CodeStubAssembler::SmiLessThanOrEqual(Node* a, Node* b) {
return IntPtrLessThanOrEqual(a, b);
}
+Node* CodeStubAssembler::SmiMax(Node* a, Node* b) {
+ return Select(SmiLessThan(a, b), b, a);
+}
+
Node* CodeStubAssembler::SmiMin(Node* a, Node* b) {
- // TODO(bmeurer): Consider using Select once available.
- Variable min(this, MachineRepresentation::kTagged);
- Label if_a(this), if_b(this), join(this);
- BranchIfSmiLessThan(a, b, &if_a, &if_b);
- Bind(&if_a);
- min.Bind(a);
- Goto(&join);
- Bind(&if_b);
- min.Bind(b);
- Goto(&join);
- Bind(&join);
- return min.value();
+ return Select(SmiLessThan(a, b), a, b);
}
Node* CodeStubAssembler::SmiMod(Node* a, Node* b) {
@@ -485,80 +499,6 @@ Node* CodeStubAssembler::WordIsPositiveSmi(Node* a) {
IntPtrConstant(0));
}
-void CodeStubAssembler::BranchIfSameValueZero(Node* a, Node* b, Node* context,
- Label* if_true, Label* if_false) {
- Node* number_map = HeapNumberMapConstant();
- Label a_isnumber(this), a_isnotnumber(this), b_isnumber(this), a_isnan(this),
- float_not_equal(this);
- // If register A and register B are identical, goto `if_true`
- GotoIf(WordEqual(a, b), if_true);
- // If either register A or B are Smis, goto `if_false`
- GotoIf(Word32Or(WordIsSmi(a), WordIsSmi(b)), if_false);
- // GotoIf(WordIsSmi(b), if_false);
-
- Node* a_map = LoadMap(a);
- Node* b_map = LoadMap(b);
- Branch(WordEqual(a_map, number_map), &a_isnumber, &a_isnotnumber);
-
- // If both register A and B are HeapNumbers, return true if they are equal,
- // or if both are NaN
- Bind(&a_isnumber);
- {
- Branch(WordEqual(b_map, number_map), &b_isnumber, if_false);
-
- Bind(&b_isnumber);
- Node* a_value = LoadHeapNumberValue(a);
- Node* b_value = LoadHeapNumberValue(b);
- BranchIfFloat64Equal(a_value, b_value, if_true, &float_not_equal);
-
- Bind(&float_not_equal);
- BranchIfFloat64IsNaN(a_value, &a_isnan, if_false);
-
- Bind(&a_isnan);
- BranchIfFloat64IsNaN(a_value, if_true, if_false);
- }
-
- Bind(&a_isnotnumber);
- {
- Label a_isstring(this), a_isnotstring(this);
- Node* a_instance_type = LoadMapInstanceType(a_map);
-
- Branch(Int32LessThan(a_instance_type, Int32Constant(FIRST_NONSTRING_TYPE)),
- &a_isstring, &a_isnotstring);
-
- Bind(&a_isstring);
- {
- Label b_isstring(this), b_isnotstring(this);
- Node* b_instance_type = LoadInstanceType(b_map);
-
- Branch(
- Int32LessThan(b_instance_type, Int32Constant(FIRST_NONSTRING_TYPE)),
- &b_isstring, if_false);
-
- Bind(&b_isstring);
- {
- Callable callable = CodeFactory::StringEqual(isolate());
- Node* result = CallStub(callable, context, a, b);
- Branch(WordEqual(BooleanConstant(true), result), if_true, if_false);
- }
- }
-
- Bind(&a_isnotstring);
- {
- // Check if {lhs} is a Simd128Value.
- Label a_issimd128value(this);
- Branch(Word32Equal(a_instance_type, Int32Constant(SIMD128_VALUE_TYPE)),
- &a_issimd128value, if_false);
-
- Bind(&a_issimd128value);
- {
- // Load the map of {rhs}.
- BranchIfSimd128Equal(a, a_map, b, b_map, if_true, if_false);
- }
- }
- }
-}
-
void CodeStubAssembler::BranchIfSimd128Equal(Node* lhs, Node* lhs_map,
Node* rhs, Node* rhs_map,
Label* if_equal,
@@ -630,69 +570,61 @@ void CodeStubAssembler::BranchIfSimd128Equal(Node* lhs, Node* lhs_map,
Goto(if_notequal);
}
-void CodeStubAssembler::BranchIfFastJSArray(Node* object, Node* context,
- Label* if_true, Label* if_false) {
- Node* int32_zero = Int32Constant(0);
- Node* int32_one = Int32Constant(1);
-
+void CodeStubAssembler::BranchIfPrototypesHaveNoElements(
+ Node* receiver_map, Label* definitely_no_elements,
+ Label* possibly_elements) {
+ Variable var_map(this, MachineRepresentation::kTagged);
+ var_map.Bind(receiver_map);
+ Label loop_body(this, &var_map);
Node* empty_elements = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
+ Goto(&loop_body);
- Variable last_map(this, MachineRepresentation::kTagged);
- Label check_prototype(this);
+ Bind(&loop_body);
+ {
+ Node* map = var_map.value();
+ Node* prototype = LoadMapPrototype(map);
+ GotoIf(WordEqual(prototype, NullConstant()), definitely_no_elements);
+ Node* prototype_map = LoadMap(prototype);
+ // Pessimistically assume elements if a Proxy, Special API Object,
+ // or JSValue wrapper is found on the prototype chain. After this
+ // instance type check, it's not necessary to check for interceptors or
+ // access checks.
+ GotoIf(Int32LessThanOrEqual(LoadMapInstanceType(prototype_map),
+ Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
+ possibly_elements);
+ GotoIf(WordNotEqual(LoadElements(prototype), empty_elements),
+ possibly_elements);
+ var_map.Bind(prototype_map);
+ Goto(&loop_body);
+ }
+}
- // Bailout if Smi
+void CodeStubAssembler::BranchIfFastJSArray(Node* object, Node* context,
+ Label* if_true, Label* if_false) {
+ // Bailout if receiver is a Smi.
GotoIf(WordIsSmi(object), if_false);
Node* map = LoadMap(object);
- last_map.Bind(map);
- // Bailout if instance type is not JS_ARRAY_TYPE
+ // Bailout if instance type is not JS_ARRAY_TYPE.
GotoIf(WordNotEqual(LoadMapInstanceType(map), Int32Constant(JS_ARRAY_TYPE)),
if_false);
Node* bit_field2 = LoadMapBitField2(map);
Node* elements_kind = BitFieldDecode<Map::ElementsKindBits>(bit_field2);
- // Bailout if slow receiver elements
+ // Bailout if receiver has slow elements.
GotoIf(
Int32GreaterThan(elements_kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
if_false);
+ // Check prototype chain if receiver does not have packed elements.
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == (FAST_SMI_ELEMENTS | 1));
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == (FAST_ELEMENTS | 1));
STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == (FAST_DOUBLE_ELEMENTS | 1));
-
- // Check prototype chain if receiver does not have packed elements
- Node* holey_elements = Word32And(elements_kind, int32_one);
- Branch(Word32Equal(holey_elements, int32_zero), if_true, &check_prototype);
-
- Bind(&check_prototype);
- {
- Label loop_body(this, &last_map);
- Goto(&loop_body);
- Bind(&loop_body);
- Node* current_map = last_map.value();
- Node* proto = LoadObjectField(current_map, Map::kPrototypeOffset);
-
- // End loop
- GotoIf(WordEqual(proto, NullConstant()), if_true);
-
- // ASSERT: proto->IsHeapObject()
- Node* proto_map = LoadMap(proto);
-
- // Bailout if a Proxy, API Object, or JSValue wrapper found in prototype
- // Because of this bailout, it's not necessary to check for interceptors or
- // access checks on the prototype chain.
- GotoIf(Int32LessThanOrEqual(LoadMapInstanceType(proto_map),
- Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
- if_false);
-
- // Bailout if prototype contains non-empty elements
- GotoUnless(WordEqual(LoadElements(proto), empty_elements), if_false);
-
- last_map.Bind(proto_map);
- Goto(&loop_body);
- }
+ Node* holey_elements = Word32And(elements_kind, Int32Constant(1));
+ GotoIf(Word32Equal(holey_elements, Int32Constant(0)), if_true);
+ BranchIfPrototypesHaveNoElements(map, if_true, if_false);
}
Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
@@ -859,9 +791,8 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
// types, the HeapNumber type and everything else.
GotoIf(Word32Equal(value_instance_type, Int32Constant(HEAP_NUMBER_TYPE)),
&if_valueisheapnumber);
- Branch(
- Int32LessThan(value_instance_type, Int32Constant(FIRST_NONSTRING_TYPE)),
- &if_valueisstring, &if_valueisother);
+ Branch(IsStringInstanceType(value_instance_type), &if_valueisstring,
+ &if_valueisother);
Bind(&if_valueisstring);
{
@@ -1008,6 +939,10 @@ Node* CodeStubAssembler::LoadElements(Node* object) {
return LoadObjectField(object, JSObject::kElementsOffset);
}
+Node* CodeStubAssembler::LoadJSArrayLength(compiler::Node* array) {
+ return LoadObjectField(array, JSArray::kLengthOffset);
+}
+
Node* CodeStubAssembler::LoadFixedArrayBaseLength(compiler::Node* array) {
return LoadObjectField(array, FixedArrayBase::kLengthOffset);
}
@@ -1032,6 +967,11 @@ Node* CodeStubAssembler::LoadMapInstanceType(Node* map) {
return LoadObjectField(map, Map::kInstanceTypeOffset, MachineType::Uint8());
}
+Node* CodeStubAssembler::LoadMapElementsKind(Node* map) {
+ Node* bit_field2 = LoadMapBitField2(map);
+ return BitFieldDecode<Map::ElementsKindBits>(bit_field2);
+}
+
Node* CodeStubAssembler::LoadMapDescriptors(Node* map) {
return LoadObjectField(map, Map::kDescriptorsOffset);
}
@@ -1041,7 +981,8 @@ Node* CodeStubAssembler::LoadMapPrototype(Node* map) {
}
Node* CodeStubAssembler::LoadMapInstanceSize(Node* map) {
- return LoadObjectField(map, Map::kInstanceSizeOffset, MachineType::Uint8());
+ return ChangeUint32ToWord(
+ LoadObjectField(map, Map::kInstanceSizeOffset, MachineType::Uint8()));
}
Node* CodeStubAssembler::LoadMapInobjectProperties(Node* map) {
@@ -1049,9 +990,19 @@ Node* CodeStubAssembler::LoadMapInobjectProperties(Node* map) {
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
Assert(Int32GreaterThanOrEqual(LoadMapInstanceType(map),
Int32Constant(FIRST_JS_OBJECT_TYPE)));
- return LoadObjectField(
+ return ChangeUint32ToWord(LoadObjectField(
+ map, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset,
+ MachineType::Uint8()));
+}
+
+Node* CodeStubAssembler::LoadMapConstructorFunctionIndex(Node* map) {
+ // See Map::GetConstructorFunctionIndex() for details.
+ STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
+ Assert(Int32LessThanOrEqual(LoadMapInstanceType(map),
+ Int32Constant(LAST_PRIMITIVE_TYPE)));
+ return ChangeUint32ToWord(LoadObjectField(
map, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset,
- MachineType::Uint8());
+ MachineType::Uint8()));
}
Node* CodeStubAssembler::LoadMapConstructor(Node* map) {
@@ -1081,7 +1032,7 @@ Node* CodeStubAssembler::LoadNameHashField(Node* name) {
Node* CodeStubAssembler::LoadNameHash(Node* name, Label* if_hash_not_computed) {
Node* hash_field = LoadNameHashField(name);
if (if_hash_not_computed != nullptr) {
- GotoIf(WordEqual(
+ GotoIf(Word32Equal(
Word32And(hash_field, Int32Constant(Name::kHashNotComputedMask)),
Int32Constant(0)),
if_hash_not_computed);
@@ -1105,19 +1056,6 @@ Node* CodeStubAssembler::LoadWeakCellValue(Node* weak_cell, Label* if_cleared) {
return value;
}
-Node* CodeStubAssembler::AllocateUninitializedFixedArray(Node* length) {
- Node* header_size = IntPtrConstant(FixedArray::kHeaderSize);
- Node* data_size = WordShl(length, IntPtrConstant(kPointerSizeLog2));
- Node* total_size = IntPtrAdd(data_size, header_size);
-
- Node* result = Allocate(total_size, kNone);
- StoreMapNoWriteBarrier(result, LoadRoot(Heap::kFixedArrayMapRootIndex));
- StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset,
- SmiTag(length));
-
- return result;
-}
-
Node* CodeStubAssembler::LoadFixedArrayElement(Node* object, Node* index_node,
int additional_offset,
ParameterMode parameter_mode) {
@@ -1149,29 +1087,57 @@ Node* CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
Node* CodeStubAssembler::LoadFixedDoubleArrayElement(
Node* object, Node* index_node, MachineType machine_type,
- int additional_offset, ParameterMode parameter_mode) {
+ int additional_offset, ParameterMode parameter_mode, Label* if_hole) {
int32_t header_size =
FixedDoubleArray::kHeaderSize + additional_offset - kHeapObjectTag;
Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_DOUBLE_ELEMENTS,
parameter_mode, header_size);
- return Load(machine_type, object, offset);
+ return LoadDoubleWithHoleCheck(object, offset, if_hole, machine_type);
+}
+
+Node* CodeStubAssembler::LoadDoubleWithHoleCheck(Node* base, Node* offset,
+ Label* if_hole,
+ MachineType machine_type) {
+ if (if_hole) {
+ // TODO(ishell): Compare only the upper part for the hole once the
+ // compiler is able to fold addition of already complex |offset| with
+ // |kIeeeDoubleExponentWordOffset| into one addressing mode.
+ if (Is64()) {
+ Node* element = Load(MachineType::Uint64(), base, offset);
+ GotoIf(Word64Equal(element, Int64Constant(kHoleNanInt64)), if_hole);
+ } else {
+ Node* element_upper = Load(
+ MachineType::Uint32(), base,
+ IntPtrAdd(offset, IntPtrConstant(kIeeeDoubleExponentWordOffset)));
+ GotoIf(Word32Equal(element_upper, Int32Constant(kHoleNanUpper32)),
+ if_hole);
+ }
+ }
+ if (machine_type.IsNone()) {
+ // This means the actual value is not needed.
+ return nullptr;
+ }
+ return Load(machine_type, base, offset);
+}
+
+Node* CodeStubAssembler::LoadContextElement(Node* context, int slot_index) {
+ int offset = Context::SlotOffset(slot_index);
+ return Load(MachineType::AnyTagged(), context, IntPtrConstant(offset));
}
Node* CodeStubAssembler::LoadNativeContext(Node* context) {
- return LoadFixedArrayElement(context,
- Int32Constant(Context::NATIVE_CONTEXT_INDEX));
+ return LoadContextElement(context, Context::NATIVE_CONTEXT_INDEX);
}
Node* CodeStubAssembler::LoadJSArrayElementsMap(ElementsKind kind,
Node* native_context) {
return LoadFixedArrayElement(native_context,
- Int32Constant(Context::ArrayMapIndex(kind)));
+ IntPtrConstant(Context::ArrayMapIndex(kind)));
}
Node* CodeStubAssembler::StoreHeapNumberValue(Node* object, Node* value) {
- return StoreNoWriteBarrier(
- MachineRepresentation::kFloat64, object,
- IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag), value);
+ return StoreObjectFieldNoWriteBarrier(object, HeapNumber::kValueOffset, value,
+ MachineRepresentation::kFloat64);
}
Node* CodeStubAssembler::StoreObjectField(
@@ -1180,12 +1146,32 @@ Node* CodeStubAssembler::StoreObjectField(
IntPtrConstant(offset - kHeapObjectTag), value);
}
+Node* CodeStubAssembler::StoreObjectField(Node* object, Node* offset,
+ Node* value) {
+ int const_offset;
+ if (ToInt32Constant(offset, const_offset)) {
+ return StoreObjectField(object, const_offset, value);
+ }
+ return Store(MachineRepresentation::kTagged, object,
+ IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)), value);
+}
+
Node* CodeStubAssembler::StoreObjectFieldNoWriteBarrier(
Node* object, int offset, Node* value, MachineRepresentation rep) {
return StoreNoWriteBarrier(rep, object,
IntPtrConstant(offset - kHeapObjectTag), value);
}
+Node* CodeStubAssembler::StoreObjectFieldNoWriteBarrier(
+ Node* object, Node* offset, Node* value, MachineRepresentation rep) {
+ int const_offset;
+ if (ToInt32Constant(offset, const_offset)) {
+ return StoreObjectFieldNoWriteBarrier(object, const_offset, value, rep);
+ }
+ return StoreNoWriteBarrier(
+ rep, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)), value);
+}
+
Node* CodeStubAssembler::StoreMapNoWriteBarrier(Node* object, Node* map) {
return StoreNoWriteBarrier(
MachineRepresentation::kTagged, object,
@@ -1227,14 +1213,19 @@ Node* CodeStubAssembler::StoreFixedDoubleArrayElement(
return StoreNoWriteBarrier(rep, object, offset, value);
}
-Node* CodeStubAssembler::AllocateHeapNumber() {
+Node* CodeStubAssembler::AllocateHeapNumber(MutableMode mode) {
Node* result = Allocate(HeapNumber::kSize, kNone);
- StoreMapNoWriteBarrier(result, HeapNumberMapConstant());
+ Heap::RootListIndex heap_map_index =
+ mode == IMMUTABLE ? Heap::kHeapNumberMapRootIndex
+ : Heap::kMutableHeapNumberMapRootIndex;
+ Node* map = LoadRoot(heap_map_index);
+ StoreMapNoWriteBarrier(result, map);
return result;
}
-Node* CodeStubAssembler::AllocateHeapNumberWithValue(Node* value) {
- Node* result = AllocateHeapNumber();
+Node* CodeStubAssembler::AllocateHeapNumberWithValue(Node* value,
+ MutableMode mode) {
+ Node* result = AllocateHeapNumber(mode);
StoreHeapNumberValue(result, value);
return result;
}
@@ -1261,8 +1252,7 @@ Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context, Node* length) {
IntPtrAdd(length, IntPtrConstant(SeqOneByteString::kHeaderSize)),
IntPtrConstant(kObjectAlignmentMask)),
IntPtrConstant(~kObjectAlignmentMask));
- Branch(IntPtrLessThanOrEqual(size,
- IntPtrConstant(Page::kMaxRegularHeapObjectSize)),
+ Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
&if_sizeissmall, &if_notsizeissmall);
Bind(&if_sizeissmall);
@@ -1314,8 +1304,7 @@ Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context, Node* length) {
IntPtrConstant(SeqTwoByteString::kHeaderSize)),
IntPtrConstant(kObjectAlignmentMask)),
IntPtrConstant(~kObjectAlignmentMask));
- Branch(IntPtrLessThanOrEqual(size,
- IntPtrConstant(Page::kMaxRegularHeapObjectSize)),
+ Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
&if_sizeissmall, &if_notsizeissmall);
Bind(&if_sizeissmall);
@@ -1345,51 +1334,166 @@ Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context, Node* length) {
return var_result.value();
}
-Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
- Node* capacity_node, Node* length_node,
- compiler::Node* allocation_site,
- ParameterMode mode) {
- bool is_double = IsFastDoubleElementsKind(kind);
- int base_size = JSArray::kSize + FixedArray::kHeaderSize;
- int elements_offset = JSArray::kSize;
+Node* CodeStubAssembler::AllocateSlicedOneByteString(Node* length, Node* parent,
+ Node* offset) {
+ Node* result = Allocate(SlicedString::kSize);
+ Node* map = LoadRoot(Heap::kSlicedOneByteStringMapRootIndex);
+ StoreMapNoWriteBarrier(result, map);
+ StoreObjectFieldNoWriteBarrier(result, SlicedString::kLengthOffset, length,
+ MachineRepresentation::kTagged);
+ StoreObjectFieldNoWriteBarrier(result, SlicedString::kHashFieldOffset,
+ Int32Constant(String::kEmptyHashField),
+ MachineRepresentation::kWord32);
+ StoreObjectFieldNoWriteBarrier(result, SlicedString::kParentOffset, parent,
+ MachineRepresentation::kTagged);
+ StoreObjectFieldNoWriteBarrier(result, SlicedString::kOffsetOffset, offset,
+ MachineRepresentation::kTagged);
+ return result;
+}
+
+Node* CodeStubAssembler::AllocateSlicedTwoByteString(Node* length, Node* parent,
+ Node* offset) {
+ Node* result = Allocate(SlicedString::kSize);
+ Node* map = LoadRoot(Heap::kSlicedStringMapRootIndex);
+ StoreMapNoWriteBarrier(result, map);
+ StoreObjectFieldNoWriteBarrier(result, SlicedString::kLengthOffset, length,
+ MachineRepresentation::kTagged);
+ StoreObjectFieldNoWriteBarrier(result, SlicedString::kHashFieldOffset,
+ Int32Constant(String::kEmptyHashField),
+ MachineRepresentation::kWord32);
+ StoreObjectFieldNoWriteBarrier(result, SlicedString::kParentOffset, parent,
+ MachineRepresentation::kTagged);
+ StoreObjectFieldNoWriteBarrier(result, SlicedString::kOffsetOffset, offset,
+ MachineRepresentation::kTagged);
+ return result;
+}
+
+Node* CodeStubAssembler::AllocateRegExpResult(Node* context, Node* length,
+ Node* index, Node* input) {
+ Node* const max_length =
+ SmiConstant(Smi::FromInt(JSArray::kInitialMaxFastElementArray));
+ Assert(SmiLessThanOrEqual(length, max_length));
+
+ // Allocate the JSRegExpResult.
+ // TODO(jgruber): Fold JSArray and FixedArray allocations, then remove
+ // unneeded store of elements.
+ Node* const result = Allocate(JSRegExpResult::kSize);
+
+ // TODO(jgruber): Store map as Heap constant?
+ Node* const native_context = LoadNativeContext(context);
+ Node* const map =
+ LoadContextElement(native_context, Context::REGEXP_RESULT_MAP_INDEX);
+ StoreMapNoWriteBarrier(result, map);
+
+ // Initialize the header before allocating the elements.
+ Node* const empty_array = EmptyFixedArrayConstant();
+ DCHECK(Heap::RootIsImmortalImmovable(Heap::kEmptyFixedArrayRootIndex));
+ StoreObjectFieldNoWriteBarrier(result, JSArray::kPropertiesOffset,
+ empty_array);
+ StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset, empty_array);
+ StoreObjectFieldNoWriteBarrier(result, JSArray::kLengthOffset, length);
- Comment("begin allocation of JSArray");
+ StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kIndexOffset, index);
+ StoreObjectField(result, JSRegExpResult::kInputOffset, input);
+ Node* const zero = IntPtrConstant(0);
+ Node* const length_intptr = SmiUntag(length);
+ const ElementsKind elements_kind = FAST_ELEMENTS;
+ const ParameterMode parameter_mode = INTPTR_PARAMETERS;
+
+ Node* const elements =
+ AllocateFixedArray(elements_kind, length_intptr, parameter_mode);
+ StoreObjectField(result, JSArray::kElementsOffset, elements);
+
+ // Fill in the elements with undefined.
+ FillFixedArrayWithValue(elements_kind, elements, zero, length_intptr,
+ Heap::kUndefinedValueRootIndex, parameter_mode);
+
+ return result;
+}
+
+Node* CodeStubAssembler::AllocateUninitializedJSArrayWithoutElements(
+ ElementsKind kind, Node* array_map, Node* length, Node* allocation_site) {
+ Comment("begin allocation of JSArray without elements");
+ int base_size = JSArray::kSize;
if (allocation_site != nullptr) {
base_size += AllocationMemento::kSize;
- elements_offset += AllocationMemento::kSize;
}
- Node* total_size =
- ElementOffsetFromIndex(capacity_node, kind, mode, base_size);
+ Node* size = IntPtrConstant(base_size);
+ Node* array = AllocateUninitializedJSArray(kind, array_map, length,
+ allocation_site, size);
+ return array;
+}
- // Allocate both array and elements object, and initialize the JSArray.
- Heap* heap = isolate()->heap();
- Node* array = Allocate(total_size);
+std::pair<Node*, Node*>
+CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
+ ElementsKind kind, Node* array_map, Node* length, Node* allocation_site,
+ Node* capacity, ParameterMode capacity_mode) {
+ Comment("begin allocation of JSArray with elements");
+ int base_size = JSArray::kSize;
+
+ if (allocation_site != nullptr) {
+ base_size += AllocationMemento::kSize;
+ }
+
+ int elements_offset = base_size;
+
+ // Compute space for elements
+ base_size += FixedArray::kHeaderSize;
+ Node* size = ElementOffsetFromIndex(capacity, kind, capacity_mode, base_size);
+
+ Node* array = AllocateUninitializedJSArray(kind, array_map, length,
+ allocation_site, size);
+
+ Node* elements = InnerAllocate(array, elements_offset);
+ StoreObjectField(array, JSObject::kElementsOffset, elements);
+
+ return {array, elements};
+}
+
+Node* CodeStubAssembler::AllocateUninitializedJSArray(ElementsKind kind,
+ Node* array_map,
+ Node* length,
+ Node* allocation_site,
+ Node* size_in_bytes) {
+ Node* array = Allocate(size_in_bytes);
+
+ Comment("write JSArray headers");
StoreMapNoWriteBarrier(array, array_map);
- Node* empty_properties = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
- StoreObjectFieldNoWriteBarrier(array, JSArray::kPropertiesOffset,
- empty_properties);
- StoreObjectFieldNoWriteBarrier(
- array, JSArray::kLengthOffset,
- mode == SMI_PARAMETERS ? length_node : SmiTag(length_node));
+
+ StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
+
+ StoreObjectFieldRoot(array, JSArray::kPropertiesOffset,
+ Heap::kEmptyFixedArrayRootIndex);
if (allocation_site != nullptr) {
InitializeAllocationMemento(array, JSArray::kSize, allocation_site);
}
+ return array;
+}
+Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
+ Node* capacity, Node* length,
+ Node* allocation_site,
+ ParameterMode capacity_mode) {
+ bool is_double = IsFastDoubleElementsKind(kind);
+
+ // Allocate both array and elements object, and initialize the JSArray.
+ Node *array, *elements;
+ std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
+ kind, array_map, length, allocation_site, capacity, capacity_mode);
// Setup elements object.
- Node* elements = InnerAllocate(array, elements_offset);
- StoreObjectFieldNoWriteBarrier(array, JSArray::kElementsOffset, elements);
+ Heap* heap = isolate()->heap();
Handle<Map> elements_map(is_double ? heap->fixed_double_array_map()
: heap->fixed_array_map());
StoreMapNoWriteBarrier(elements, HeapConstant(elements_map));
- StoreObjectFieldNoWriteBarrier(
- elements, FixedArray::kLengthOffset,
- mode == SMI_PARAMETERS ? capacity_node : SmiTag(capacity_node));
+ StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset,
+ TagParameter(capacity, capacity_mode));
- FillFixedArrayWithHole(kind, elements, IntPtrConstant(0), capacity_node,
- mode);
+ // Fill in the elements with holes.
+ FillFixedArrayWithValue(kind, elements, IntPtrConstant(0), capacity,
+ Heap::kTheHoleValueRootIndex, capacity_mode);
return array;
}
@@ -1398,7 +1502,7 @@ Node* CodeStubAssembler::AllocateFixedArray(ElementsKind kind,
Node* capacity_node,
ParameterMode mode,
AllocationFlags flags) {
- Node* total_size = GetFixedAarrayAllocationSize(capacity_node, kind, mode);
+ Node* total_size = GetFixedArrayAllocationSize(capacity_node, kind, mode);
// Allocate both array and elements object, and initialize the JSArray.
Node* array = Allocate(total_size, flags);
@@ -1411,24 +1515,24 @@ Node* CodeStubAssembler::AllocateFixedArray(ElementsKind kind,
} else {
StoreMapNoWriteBarrier(array, HeapConstant(map));
}
- StoreObjectFieldNoWriteBarrier(
- array, FixedArray::kLengthOffset,
- mode == INTEGER_PARAMETERS ? SmiTag(capacity_node) : capacity_node);
+ StoreObjectFieldNoWriteBarrier(array, FixedArray::kLengthOffset,
+ TagParameter(capacity_node, mode));
return array;
}
-void CodeStubAssembler::FillFixedArrayWithHole(ElementsKind kind,
- compiler::Node* array,
- compiler::Node* from_node,
- compiler::Node* to_node,
- ParameterMode mode) {
- int const first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
- Heap* heap = isolate()->heap();
- Node* hole = HeapConstant(Handle<HeapObject>(heap->the_hole_value()));
+void CodeStubAssembler::FillFixedArrayWithValue(
+ ElementsKind kind, Node* array, Node* from_node, Node* to_node,
+ Heap::RootListIndex value_root_index, ParameterMode mode) {
+ bool is_double = IsFastDoubleElementsKind(kind);
+ DCHECK(value_root_index == Heap::kTheHoleValueRootIndex ||
+ value_root_index == Heap::kUndefinedValueRootIndex);
+ DCHECK_IMPLIES(is_double, value_root_index == Heap::kTheHoleValueRootIndex);
+ STATIC_ASSERT(kHoleNanLower32 == kHoleNanUpper32);
Node* double_hole =
Is64() ? Int64Constant(kHoleNanInt64) : Int32Constant(kHoleNanLower32);
- DCHECK_EQ(kHoleNanLower32, kHoleNanUpper32);
- bool is_double = IsFastDoubleElementsKind(kind);
+ Node* value = LoadRoot(value_root_index);
+
+ const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
int32_t to;
bool constant_to = ToInt32Constant(to_node, to);
int32_t from;
@@ -1436,8 +1540,9 @@ void CodeStubAssembler::FillFixedArrayWithHole(ElementsKind kind,
if (constant_to && constant_from &&
(to - from) <= kElementLoopUnrollThreshold) {
for (int i = from; i < to; ++i) {
+ Node* index = IntPtrConstant(i);
if (is_double) {
- Node* offset = ElementOffsetFromIndex(Int32Constant(i), kind, mode,
+ Node* offset = ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
first_element_offset);
// Don't use doubles to store the hole double, since manipulating the
// signaling NaN used for the hole in C++, e.g. with bit_cast, will
@@ -1453,14 +1558,14 @@ void CodeStubAssembler::FillFixedArrayWithHole(ElementsKind kind,
} else {
StoreNoWriteBarrier(MachineRepresentation::kWord32, array, offset,
double_hole);
- offset = ElementOffsetFromIndex(Int32Constant(i), kind, mode,
+ offset = ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
first_element_offset + kPointerSize);
StoreNoWriteBarrier(MachineRepresentation::kWord32, array, offset,
double_hole);
}
} else {
- StoreFixedArrayElement(array, Int32Constant(i), hole,
- SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(array, index, value, SKIP_WRITE_BARRIER,
+ INTPTR_PARAMETERS);
}
}
} else {
@@ -1477,8 +1582,8 @@ void CodeStubAssembler::FillFixedArrayWithHole(ElementsKind kind,
Bind(&decrement);
current.Bind(IntPtrSub(
current.value(),
- Int32Constant(IsFastDoubleElementsKind(kind) ? kDoubleSize
- : kPointerSize)));
+ IntPtrConstant(IsFastDoubleElementsKind(kind) ? kDoubleSize
+ : kPointerSize)));
if (is_double) {
// Don't use doubles to store the hole double, since manipulating the
// signaling NaN used for the hole in C++, e.g. with bit_cast, will
@@ -1494,15 +1599,13 @@ void CodeStubAssembler::FillFixedArrayWithHole(ElementsKind kind,
} else {
StoreNoWriteBarrier(MachineRepresentation::kWord32, current.value(),
Int32Constant(first_element_offset), double_hole);
- StoreNoWriteBarrier(
- MachineRepresentation::kWord32,
- IntPtrAdd(current.value(),
- Int32Constant(kPointerSize + first_element_offset)),
- double_hole);
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, current.value(),
+ Int32Constant(kPointerSize + first_element_offset),
+ double_hole);
}
} else {
- StoreNoWriteBarrier(MachineRepresentation::kTagged, current.value(),
- IntPtrConstant(first_element_offset), hole);
+ StoreNoWriteBarrier(MachineType::PointerRepresentation(), current.value(),
+ IntPtrConstant(first_element_offset), value);
}
Node* compare = WordNotEqual(current.value(), limit);
Branch(compare, &decrement, &done);
@@ -1511,50 +1614,236 @@ void CodeStubAssembler::FillFixedArrayWithHole(ElementsKind kind,
}
}
-void CodeStubAssembler::CopyFixedArrayElements(ElementsKind kind,
- compiler::Node* from_array,
- compiler::Node* to_array,
- compiler::Node* element_count,
- WriteBarrierMode barrier_mode,
- ParameterMode mode) {
- Label test(this);
+void CodeStubAssembler::CopyFixedArrayElements(
+ ElementsKind from_kind, Node* from_array, ElementsKind to_kind,
+ Node* to_array, Node* element_count, Node* capacity,
+ WriteBarrierMode barrier_mode, ParameterMode mode) {
+ STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
+ const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
+ Comment("[ CopyFixedArrayElements");
+
+ // Typed array elements are not supported.
+ DCHECK(!IsFixedTypedArrayElementsKind(from_kind));
+ DCHECK(!IsFixedTypedArrayElementsKind(to_kind));
+
Label done(this);
- bool double_elements = IsFastDoubleElementsKind(kind);
+ bool from_double_elements = IsFastDoubleElementsKind(from_kind);
+ bool to_double_elements = IsFastDoubleElementsKind(to_kind);
+ bool element_size_matches =
+ Is64() ||
+ IsFastDoubleElementsKind(from_kind) == IsFastDoubleElementsKind(to_kind);
+ bool doubles_to_objects_conversion =
+ IsFastDoubleElementsKind(from_kind) && IsFastObjectElementsKind(to_kind);
bool needs_write_barrier =
- barrier_mode == UPDATE_WRITE_BARRIER && IsFastObjectElementsKind(kind);
+ doubles_to_objects_conversion || (barrier_mode == UPDATE_WRITE_BARRIER &&
+ IsFastObjectElementsKind(to_kind));
+ Node* double_hole =
+ Is64() ? Int64Constant(kHoleNanInt64) : Int32Constant(kHoleNanLower32);
+
+ if (doubles_to_objects_conversion) {
+ // If the copy might trigger a GC, make sure that the FixedArray is
+ // pre-initialized with holes to make sure that it's always in a
+ // consistent state.
+ FillFixedArrayWithValue(to_kind, to_array, IntPtrOrSmiConstant(0, mode),
+ capacity, Heap::kTheHoleValueRootIndex, mode);
+ } else if (element_count != capacity) {
+ FillFixedArrayWithValue(to_kind, to_array, element_count, capacity,
+ Heap::kTheHoleValueRootIndex, mode);
+ }
+
Node* limit_offset = ElementOffsetFromIndex(
- IntPtrConstant(0), kind, mode, FixedArray::kHeaderSize - kHeapObjectTag);
- Variable current_offset(this, MachineType::PointerRepresentation());
- current_offset.Bind(ElementOffsetFromIndex(
- element_count, kind, mode, FixedArray::kHeaderSize - kHeapObjectTag));
- Label decrement(this, &current_offset);
+ IntPtrOrSmiConstant(0, mode), from_kind, mode, first_element_offset);
+ Variable var_from_offset(this, MachineType::PointerRepresentation());
+ var_from_offset.Bind(ElementOffsetFromIndex(element_count, from_kind, mode,
+ first_element_offset));
+ // This second variable is used only when the element sizes of source and
+ // destination arrays do not match.
+ Variable var_to_offset(this, MachineType::PointerRepresentation());
+ if (element_size_matches) {
+ var_to_offset.Bind(var_from_offset.value());
+ } else {
+ var_to_offset.Bind(ElementOffsetFromIndex(element_count, to_kind, mode,
+ first_element_offset));
+ }
- Branch(WordEqual(current_offset.value(), limit_offset), &done, &decrement);
+ Variable* vars[] = {&var_from_offset, &var_to_offset};
+ Label decrement(this, 2, vars);
+
+ Branch(WordEqual(var_from_offset.value(), limit_offset), &done, &decrement);
Bind(&decrement);
{
- current_offset.Bind(IntPtrSub(
- current_offset.value(),
- IntPtrConstant(double_elements ? kDoubleSize : kPointerSize)));
+ Node* from_offset = IntPtrSub(
+ var_from_offset.value(),
+ IntPtrConstant(from_double_elements ? kDoubleSize : kPointerSize));
+ var_from_offset.Bind(from_offset);
+
+ Node* to_offset;
+ if (element_size_matches) {
+ to_offset = from_offset;
+ } else {
+ to_offset = IntPtrSub(
+ var_to_offset.value(),
+ IntPtrConstant(to_double_elements ? kDoubleSize : kPointerSize));
+ var_to_offset.Bind(to_offset);
+ }
+
+ Label next_iter(this), store_double_hole(this);
+ Label* if_hole;
+ if (doubles_to_objects_conversion) {
+ // The target elements array is already preinitialized with holes, so we
+ // can just proceed with the next iteration.
+ if_hole = &next_iter;
+ } else if (IsFastDoubleElementsKind(to_kind)) {
+ if_hole = &store_double_hole;
+ } else {
+ // In all the other cases don't check for holes and copy the data as is.
+ if_hole = nullptr;
+ }
+
+ Node* value = LoadElementAndPrepareForStore(
+ from_array, var_from_offset.value(), from_kind, to_kind, if_hole);
- Node* value =
- Load(double_elements ? MachineType::Float64() : MachineType::Pointer(),
- from_array, current_offset.value());
if (needs_write_barrier) {
- Store(MachineRepresentation::kTagged, to_array,
- current_offset.value(), value);
- } else if (double_elements) {
- StoreNoWriteBarrier(MachineRepresentation::kFloat64, to_array,
- current_offset.value(), value);
+ Store(MachineRepresentation::kTagged, to_array, to_offset, value);
+ } else if (to_double_elements) {
+ StoreNoWriteBarrier(MachineRepresentation::kFloat64, to_array, to_offset,
+ value);
} else {
StoreNoWriteBarrier(MachineType::PointerRepresentation(), to_array,
- current_offset.value(), value);
+ to_offset, value);
+ }
+ Goto(&next_iter);
+
+ if (if_hole == &store_double_hole) {
+ Bind(&store_double_hole);
+ // Don't use doubles to store the hole double, since manipulating the
+ // signaling NaN used for the hole in C++, e.g. with bit_cast, will
+ // change its value on ia32 (the x87 stack is used to return values
+ // and stores to the stack silently clear the signalling bit).
+ //
+ // TODO(danno): When we have a Float32/Float64 wrapper class that
+ // preserves double bits during manipulation, remove this code/change
+ // this to an indexed Float64 store.
+ if (Is64()) {
+ StoreNoWriteBarrier(MachineRepresentation::kWord64, to_array, to_offset,
+ double_hole);
+ } else {
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, to_array, to_offset,
+ double_hole);
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, to_array,
+ IntPtrAdd(to_offset, IntPtrConstant(kPointerSize)),
+ double_hole);
+ }
+ Goto(&next_iter);
}
- Node* compare = WordNotEqual(current_offset.value(), limit_offset);
+
+ Bind(&next_iter);
+ Node* compare = WordNotEqual(from_offset, limit_offset);
Branch(compare, &decrement, &done);
}
Bind(&done);
+ IncrementCounter(isolate()->counters()->inlined_copied_elements(), 1);
+ Comment("] CopyFixedArrayElements");
+}
+
+void CodeStubAssembler::CopyStringCharacters(compiler::Node* from_string,
+ compiler::Node* to_string,
+ compiler::Node* from_index,
+ compiler::Node* character_count,
+ String::Encoding encoding) {
+ Label out(this);
+
+ // Nothing to do for zero characters.
+
+ GotoIf(SmiLessThanOrEqual(character_count, SmiConstant(Smi::FromInt(0))),
+ &out);
+
+ // Calculate offsets into the strings.
+
+ Node* from_offset;
+ Node* limit_offset;
+ Node* to_offset;
+
+ {
+ Node* byte_count = SmiUntag(character_count);
+ Node* from_byte_index = SmiUntag(from_index);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ const int offset = SeqOneByteString::kHeaderSize - kHeapObjectTag;
+ from_offset = IntPtrAdd(IntPtrConstant(offset), from_byte_index);
+ limit_offset = IntPtrAdd(from_offset, byte_count);
+ to_offset = IntPtrConstant(offset);
+ } else {
+ STATIC_ASSERT(2 == sizeof(uc16));
+ byte_count = WordShl(byte_count, 1);
+ from_byte_index = WordShl(from_byte_index, 1);
+
+ const int offset = SeqTwoByteString::kHeaderSize - kHeapObjectTag;
+ from_offset = IntPtrAdd(IntPtrConstant(offset), from_byte_index);
+ limit_offset = IntPtrAdd(from_offset, byte_count);
+ to_offset = IntPtrConstant(offset);
+ }
+ }
+
+ Variable var_from_offset(this, MachineType::PointerRepresentation());
+ Variable var_to_offset(this, MachineType::PointerRepresentation());
+
+ var_from_offset.Bind(from_offset);
+ var_to_offset.Bind(to_offset);
+
+ Variable* vars[] = {&var_from_offset, &var_to_offset};
+ Label decrement(this, 2, vars);
+
+ Label loop(this, 2, vars);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ from_offset = var_from_offset.value();
+ to_offset = var_to_offset.value();
+
+ // TODO(jgruber): We could make this faster through larger copy unit sizes.
+ Node* value = Load(MachineType::Uint8(), from_string, from_offset);
+ StoreNoWriteBarrier(MachineRepresentation::kWord8, to_string, to_offset,
+ value);
+
+ Node* new_from_offset = IntPtrAdd(from_offset, IntPtrConstant(1));
+ var_from_offset.Bind(new_from_offset);
+ var_to_offset.Bind(IntPtrAdd(to_offset, IntPtrConstant(1)));
+
+ Branch(WordNotEqual(new_from_offset, limit_offset), &loop, &out);
+ }
+
+ Bind(&out);
+}
+
+Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
+ Node* offset,
+ ElementsKind from_kind,
+ ElementsKind to_kind,
+ Label* if_hole) {
+ if (IsFastDoubleElementsKind(from_kind)) {
+ Node* value =
+ LoadDoubleWithHoleCheck(array, offset, if_hole, MachineType::Float64());
+ if (!IsFastDoubleElementsKind(to_kind)) {
+ value = AllocateHeapNumberWithValue(value);
+ }
+ return value;
+
+ } else {
+ Node* value = Load(MachineType::Pointer(), array, offset);
+ if (if_hole) {
+ GotoIf(WordEqual(value, TheHoleConstant()), if_hole);
+ }
+ if (IsFastDoubleElementsKind(to_kind)) {
+ if (IsFastSmiElementsKind(from_kind)) {
+ value = SmiToFloat64(value);
+ } else {
+ value = LoadHeapNumberValue(value);
+ }
+ }
+ return value;
+ }
}
Node* CodeStubAssembler::CalculateNewElementsCapacity(Node* old_capacity,
@@ -1563,7 +1852,7 @@ Node* CodeStubAssembler::CalculateNewElementsCapacity(Node* old_capacity,
Node* new_capacity = IntPtrAdd(half_old_capacity, old_capacity);
Node* unconditioned_result =
IntPtrAdd(new_capacity, IntPtrOrSmiConstant(16, mode));
- if (mode == INTEGER_PARAMETERS) {
+ if (mode == INTEGER_PARAMETERS || mode == INTPTR_PARAMETERS) {
return unconditioned_result;
} else {
int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
@@ -1572,50 +1861,64 @@ Node* CodeStubAssembler::CalculateNewElementsCapacity(Node* old_capacity,
}
}
-Node* CodeStubAssembler::CheckAndGrowElementsCapacity(Node* context,
- Node* elements,
- ElementsKind kind,
- Node* key, Label* fail) {
+Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements,
+ ElementsKind kind, Node* key,
+ Label* bailout) {
Node* capacity = LoadFixedArrayBaseLength(elements);
- // On 32-bit platforms, there is a slight performance advantage to doing all
- // of the arithmetic for the new backing store with SMIs, since it's possible
- // to save a few tag/untag operations without paying an extra expense when
- // calculating array offset (the smi math can be folded away) and there are
- // fewer live ranges. Thus only convert |capacity| and |key| to untagged value
- // on 64-bit platforms.
- ParameterMode mode = Is64() ? INTEGER_PARAMETERS : SMI_PARAMETERS;
- if (mode == INTEGER_PARAMETERS) {
- capacity = SmiUntag(capacity);
- key = SmiUntag(key);
- }
+ ParameterMode mode = OptimalParameterMode();
+ capacity = UntagParameter(capacity, mode);
+ key = UntagParameter(key, mode);
+
+ return TryGrowElementsCapacity(object, elements, kind, key, capacity, mode,
+ bailout);
+}
+
+Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements,
+ ElementsKind kind, Node* key,
+ Node* capacity,
+ ParameterMode mode,
+ Label* bailout) {
+ Comment("TryGrowElementsCapacity");
// If the gap growth is too big, fall back to the runtime.
Node* max_gap = IntPtrOrSmiConstant(JSObject::kMaxGap, mode);
Node* max_capacity = IntPtrAdd(capacity, max_gap);
- GotoIf(UintPtrGreaterThanOrEqual(key, max_capacity), fail);
+ GotoIf(UintPtrGreaterThanOrEqual(key, max_capacity), bailout);
- // Calculate the capacity of the new backing tore
+ // Calculate the capacity of the new backing store.
Node* new_capacity = CalculateNewElementsCapacity(
IntPtrAdd(key, IntPtrOrSmiConstant(1, mode)), mode);
+ return GrowElementsCapacity(object, elements, kind, kind, capacity,
+ new_capacity, mode, bailout);
+}
+Node* CodeStubAssembler::GrowElementsCapacity(
+ Node* object, Node* elements, ElementsKind from_kind, ElementsKind to_kind,
+ Node* capacity, Node* new_capacity, ParameterMode mode, Label* bailout) {
+ Comment("[ GrowElementsCapacity");
// If size of the allocation for the new capacity doesn't fit in a page
- // that we can bump-pointer allocate from, fall back to the runtime,
- int max_size = FixedArrayBase::GetMaxLengthForNewSpaceAllocation(kind);
+ // that we can bump-pointer allocate from, fall back to the runtime.
+ int max_size = FixedArrayBase::GetMaxLengthForNewSpaceAllocation(to_kind);
GotoIf(UintPtrGreaterThanOrEqual(new_capacity,
IntPtrOrSmiConstant(max_size, mode)),
- fail);
+ bailout);
// Allocate the new backing store.
- Node* new_elements = AllocateFixedArray(kind, new_capacity, mode);
+ Node* new_elements = AllocateFixedArray(to_kind, new_capacity, mode);
// Fill in the added capacity in the new store with holes.
- FillFixedArrayWithHole(kind, new_elements, capacity, new_capacity, mode);
+ FillFixedArrayWithValue(to_kind, new_elements, capacity, new_capacity,
+ Heap::kTheHoleValueRootIndex, mode);
// Copy the elements from the old elements store to the new.
- CopyFixedArrayElements(kind, elements, new_elements, capacity,
- SKIP_WRITE_BARRIER, mode);
+ // The size-check above guarantees that the |new_elements| is allocated
+ // in new space so we can skip the write barrier.
+ CopyFixedArrayElements(from_kind, elements, to_kind, new_elements, capacity,
+ new_capacity, SKIP_WRITE_BARRIER, mode);
+ StoreObjectField(object, JSObject::kElementsOffset, new_elements);
+ Comment("] GrowElementsCapacity");
return new_elements;
}
@@ -1874,9 +2177,8 @@ Node* CodeStubAssembler::ToThisString(Node* context, Node* value,
// Check if the {value} is already String.
Label if_valueisnotstring(this, Label::kDeferred);
- Branch(
- Int32LessThan(value_instance_type, Int32Constant(FIRST_NONSTRING_TYPE)),
- &if_valueisstring, &if_valueisnotstring);
+ Branch(IsStringInstanceType(value_instance_type), &if_valueisstring,
+ &if_valueisnotstring);
Bind(&if_valueisnotstring);
{
// Check if the {value} is null.
@@ -1969,9 +2271,7 @@ Node* CodeStubAssembler::ToThisValue(Node* context, Node* value,
&done_loop);
break;
case PrimitiveType::kString:
- GotoIf(Int32LessThan(value_instance_type,
- Int32Constant(FIRST_NONSTRING_TYPE)),
- &done_loop);
+ GotoIf(IsStringInstanceType(value_instance_type), &done_loop);
break;
case PrimitiveType::kSymbol:
GotoIf(Word32Equal(value_instance_type, Int32Constant(SYMBOL_TYPE)),
@@ -1995,6 +2295,45 @@ Node* CodeStubAssembler::ToThisValue(Node* context, Node* value,
return var_value.value();
}
+Node* CodeStubAssembler::ThrowIfNotInstanceType(Node* context, Node* value,
+ InstanceType instance_type,
+ char const* method_name) {
+ Label out(this), throw_exception(this, Label::kDeferred);
+ Variable var_value_map(this, MachineRepresentation::kTagged);
+
+ GotoIf(WordIsSmi(value), &throw_exception);
+
+ // Load the instance type of the {value}.
+ var_value_map.Bind(LoadMap(value));
+ Node* const value_instance_type = LoadMapInstanceType(var_value_map.value());
+
+ Branch(Word32Equal(value_instance_type, Int32Constant(instance_type)), &out,
+ &throw_exception);
+
+ // The {value} is not a compatible receiver for this method.
+ Bind(&throw_exception);
+ CallRuntime(
+ Runtime::kThrowIncompatibleMethodReceiver, context,
+ HeapConstant(factory()->NewStringFromAsciiChecked(method_name, TENURED)),
+ value);
+ var_value_map.Bind(UndefinedConstant());
+ Goto(&out); // Never reached.
+
+ Bind(&out);
+ return var_value_map.value();
+}
+
+Node* CodeStubAssembler::IsStringInstanceType(Node* instance_type) {
+ STATIC_ASSERT(INTERNALIZED_STRING_TYPE == FIRST_TYPE);
+ return Int32LessThan(instance_type, Int32Constant(FIRST_NONSTRING_TYPE));
+}
+
+Node* CodeStubAssembler::IsJSReceiverInstanceType(Node* instance_type) {
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ return Int32GreaterThanOrEqual(instance_type,
+ Int32Constant(FIRST_JS_RECEIVER_TYPE));
+}
+
Node* CodeStubAssembler::StringCharCodeAt(Node* string, Node* index) {
// Translate the {index} into a Word.
index = SmiToWord(index);
@@ -2102,14 +2441,14 @@ Node* CodeStubAssembler::StringCharCodeAt(Node* string, Node* index) {
Bind(&if_stringisexternal);
{
// Check if the {string} is a short external string.
- Label if_stringisshort(this),
- if_stringisnotshort(this, Label::kDeferred);
+ Label if_stringisnotshort(this),
+ if_stringisshort(this, Label::kDeferred);
Branch(Word32Equal(Word32And(string_instance_type,
Int32Constant(kShortExternalStringMask)),
Int32Constant(0)),
- &if_stringisshort, &if_stringisnotshort);
+ &if_stringisnotshort, &if_stringisshort);
- Bind(&if_stringisshort);
+ Bind(&if_stringisnotshort);
{
// Load the actual resource data from the {string}.
Node* string_resource_data =
@@ -2139,7 +2478,7 @@ Node* CodeStubAssembler::StringCharCodeAt(Node* string, Node* index) {
}
}
- Bind(&if_stringisnotshort);
+ Bind(&if_stringisshort);
{
// The {string} might be compressed, call the runtime.
var_result.Bind(SmiToWord32(
@@ -2224,6 +2563,586 @@ Node* CodeStubAssembler::StringFromCharCode(Node* code) {
return var_result.value();
}
+namespace {
+
+// A wrapper around CopyStringCharacters which determines the correct string
+// encoding, allocates a corresponding sequential string, and then copies the
+// given character range using CopyStringCharacters.
+// |from_string| must be a sequential string. |from_index| and
+// |character_count| must be Smis s.t.
+// 0 <= |from_index| <= |from_index| + |character_count| < from_string.length.
+Node* AllocAndCopyStringCharacters(CodeStubAssembler* a, Node* context,
+ Node* from, Node* from_instance_type,
+ Node* from_index, Node* character_count) {
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Label end(a), two_byte_sequential(a);
+ Variable var_result(a, MachineRepresentation::kTagged);
+
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
+ a->GotoIf(a->Word32Equal(a->Word32And(from_instance_type,
+ a->Int32Constant(kStringEncodingMask)),
+ a->Int32Constant(0)),
+ &two_byte_sequential);
+
+ // The subject string is a sequential one-byte string.
+ {
+ Node* result =
+ a->AllocateSeqOneByteString(context, a->SmiToWord(character_count));
+ a->CopyStringCharacters(from, result, from_index, character_count,
+ String::ONE_BYTE_ENCODING);
+ var_result.Bind(result);
+
+ a->Goto(&end);
+ }
+
+ // The subject string is a sequential two-byte string.
+ a->Bind(&two_byte_sequential);
+ {
+ Node* result =
+ a->AllocateSeqTwoByteString(context, a->SmiToWord(character_count));
+ a->CopyStringCharacters(from, result, from_index, character_count,
+ String::TWO_BYTE_ENCODING);
+ var_result.Bind(result);
+
+ a->Goto(&end);
+ }
+
+ a->Bind(&end);
+ return var_result.value();
+}
+
+} // namespace
+
+Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
+ Node* to) {
+ Label end(this);
+ Label runtime(this);
+
+ Variable var_instance_type(this, MachineRepresentation::kWord8); // Int32.
+ Variable var_result(this, MachineRepresentation::kTagged); // String.
+ Variable var_from(this, MachineRepresentation::kTagged); // Smi.
+ Variable var_string(this, MachineRepresentation::kTagged); // String.
+
+ var_instance_type.Bind(Int32Constant(0));
+ var_string.Bind(string);
+ var_from.Bind(from);
+
+ // Make sure first argument is a string.
+
+ // Bailout if receiver is a Smi.
+ GotoIf(WordIsSmi(string), &runtime);
+
+ // Load the instance type of the {string}.
+ Node* const instance_type = LoadInstanceType(string);
+ var_instance_type.Bind(instance_type);
+
+ // Check if {string} is a String.
+ GotoUnless(IsStringInstanceType(instance_type), &runtime);
+
+ // Make sure that both from and to are non-negative smis.
+
+ GotoUnless(WordIsPositiveSmi(from), &runtime);
+ GotoUnless(WordIsPositiveSmi(to), &runtime);
+
+ Node* const substr_length = SmiSub(to, from);
+ Node* const string_length = LoadStringLength(string);
+
+ // Begin dispatching based on substring length.
+
+ Label original_string_or_invalid_length(this);
+ GotoIf(SmiAboveOrEqual(substr_length, string_length),
+ &original_string_or_invalid_length);
+
+ // A real substring (substr_length < string_length).
+
+ Label single_char(this);
+ GotoIf(SmiEqual(substr_length, SmiConstant(Smi::FromInt(1))), &single_char);
+
+ // TODO(jgruber): Add an additional case for substring of length == 0?
+
+ // Deal with different string types: update the index if necessary
+ // and put the underlying string into var_string.
+
+ // If the string is not indirect, it can only be sequential or external.
+ STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+ STATIC_ASSERT(kIsIndirectStringMask != 0);
+ Label underlying_unpacked(this);
+ GotoIf(Word32Equal(
+ Word32And(instance_type, Int32Constant(kIsIndirectStringMask)),
+ Int32Constant(0)),
+ &underlying_unpacked);
+
+ // The subject string is either a sliced or cons string.
+
+ Label sliced_string(this);
+ GotoIf(Word32NotEqual(
+ Word32And(instance_type, Int32Constant(kSlicedNotConsMask)),
+ Int32Constant(0)),
+ &sliced_string);
+
+ // Cons string. Check whether it is flat, then fetch first part.
+ // Flat cons strings have an empty second part.
+ {
+ GotoIf(WordNotEqual(LoadObjectField(string, ConsString::kSecondOffset),
+ EmptyStringConstant()),
+ &runtime);
+
+ Node* first_string_part = LoadObjectField(string, ConsString::kFirstOffset);
+ var_string.Bind(first_string_part);
+ var_instance_type.Bind(LoadInstanceType(first_string_part));
+
+ Goto(&underlying_unpacked);
+ }
+
+ Bind(&sliced_string);
+ {
+ // Fetch parent and correct start index by offset.
+ Node* sliced_offset = LoadObjectField(string, SlicedString::kOffsetOffset);
+ var_from.Bind(SmiAdd(from, sliced_offset));
+
+ Node* slice_parent = LoadObjectField(string, SlicedString::kParentOffset);
+ var_string.Bind(slice_parent);
+
+ Node* slice_parent_instance_type = LoadInstanceType(slice_parent);
+ var_instance_type.Bind(slice_parent_instance_type);
+
+ Goto(&underlying_unpacked);
+ }
+
+ // The subject string can only be external or sequential string of either
+ // encoding at this point.
+ Label external_string(this);
+ Bind(&underlying_unpacked);
+ {
+ if (FLAG_string_slices) {
+ Label copy_routine(this);
+
+ // Short slice. Copy instead of slicing.
+ GotoIf(SmiLessThan(substr_length,
+ SmiConstant(Smi::FromInt(SlicedString::kMinLength))),
+ &copy_routine);
+
+ // Allocate new sliced string.
+
+ Label two_byte_slice(this);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+
+ Counters* counters = isolate()->counters();
+ IncrementCounter(counters->sub_string_native(), 1);
+
+ GotoIf(Word32Equal(Word32And(var_instance_type.value(),
+ Int32Constant(kStringEncodingMask)),
+ Int32Constant(0)),
+ &two_byte_slice);
+
+ var_result.Bind(AllocateSlicedOneByteString(
+ substr_length, var_string.value(), var_from.value()));
+ Goto(&end);
+
+ Bind(&two_byte_slice);
+
+ var_result.Bind(AllocateSlicedTwoByteString(
+ substr_length, var_string.value(), var_from.value()));
+ Goto(&end);
+
+ Bind(&copy_routine);
+ }
+
+ // The subject string can only be external or sequential string of either
+ // encoding at this point.
+ STATIC_ASSERT(kExternalStringTag != 0);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ GotoUnless(Word32Equal(Word32And(var_instance_type.value(),
+ Int32Constant(kExternalStringTag)),
+ Int32Constant(0)),
+ &external_string);
+
+ var_result.Bind(AllocAndCopyStringCharacters(
+ this, context, var_string.value(), var_instance_type.value(),
+ var_from.value(), substr_length));
+
+ Counters* counters = isolate()->counters();
+ IncrementCounter(counters->sub_string_native(), 1);
+
+ Goto(&end);
+ }
+
+ // Handle external string.
+ Bind(&external_string);
+ {
+ // Rule out short external strings.
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ GotoIf(Word32NotEqual(Word32And(var_instance_type.value(),
+ Int32Constant(kShortExternalStringMask)),
+ Int32Constant(0)),
+ &runtime);
+
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize ==
+ SeqOneByteString::kHeaderSize);
+
+ Node* resource_data = LoadObjectField(var_string.value(),
+ ExternalString::kResourceDataOffset);
+ Node* const fake_sequential_string = IntPtrSub(
+ resource_data,
+ IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ var_result.Bind(AllocAndCopyStringCharacters(
+ this, context, fake_sequential_string, var_instance_type.value(),
+ var_from.value(), substr_length));
+
+ Counters* counters = isolate()->counters();
+ IncrementCounter(counters->sub_string_native(), 1);
+
+ Goto(&end);
+ }
+
+ // Substrings of length 1 are generated through CharCodeAt and FromCharCode.
+ Bind(&single_char);
+ {
+ Node* char_code = StringCharCodeAt(var_string.value(), var_from.value());
+ var_result.Bind(StringFromCharCode(char_code));
+ Goto(&end);
+ }
+
+ Bind(&original_string_or_invalid_length);
+ {
+ // Longer than original string's length or negative: unsafe arguments.
+ GotoIf(SmiAbove(substr_length, string_length), &runtime);
+
+ // Equal length - check if {from, to} == {0, str.length}.
+ GotoIf(SmiAbove(from, SmiConstant(Smi::FromInt(0))), &runtime);
+
+ // Return the original string (substr_length == string_length).
+
+ Counters* counters = isolate()->counters();
+ IncrementCounter(counters->sub_string_native(), 1);
+
+ var_result.Bind(string);
+ Goto(&end);
+ }
+
+ // Fall back to a runtime call.
+ Bind(&runtime);
+ {
+ var_result.Bind(
+ CallRuntime(Runtime::kSubString, context, string, from, to));
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::StringFromCodePoint(compiler::Node* codepoint,
+ UnicodeEncoding encoding) {
+ Variable var_result(this, MachineRepresentation::kTagged);
+ var_result.Bind(EmptyStringConstant());
+
+ Label if_isword16(this), if_isword32(this), return_result(this);
+
+ Branch(Uint32LessThan(codepoint, Int32Constant(0x10000)), &if_isword16,
+ &if_isword32);
+
+ Bind(&if_isword16);
+ {
+ var_result.Bind(StringFromCharCode(codepoint));
+ Goto(&return_result);
+ }
+
+ Bind(&if_isword32);
+ {
+ switch (encoding) {
+ case UnicodeEncoding::UTF16:
+ break;
+ case UnicodeEncoding::UTF32: {
+ // Convert UTF32 to UTF16 code units, and store as a 32 bit word.
+ Node* lead_offset = Int32Constant(0xD800 - (0x10000 >> 10));
+
+ // lead = (codepoint >> 10) + LEAD_OFFSET
+ Node* lead =
+ Int32Add(WordShr(codepoint, Int32Constant(10)), lead_offset);
+
+ // trail = (codepoint & 0x3FF) + 0xDC00;
+ Node* trail = Int32Add(Word32And(codepoint, Int32Constant(0x3FF)),
+ Int32Constant(0xDC00));
+
+ // codpoint = (trail << 16) | lead;
+ codepoint = Word32Or(WordShl(trail, Int32Constant(16)), lead);
+ break;
+ }
+ }
+
+ Node* value = AllocateSeqTwoByteString(2);
+ StoreNoWriteBarrier(
+ MachineRepresentation::kWord32, value,
+ IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag),
+ codepoint);
+ var_result.Bind(value);
+ Goto(&return_result);
+ }
+
+ Bind(&return_result);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::StringToNumber(Node* context, Node* input) {
+ Label runtime(this, Label::kDeferred);
+ Label end(this);
+
+ Variable var_result(this, MachineRepresentation::kTagged);
+
+ // Check if string has a cached array index.
+ Node* hash = LoadNameHashField(input);
+ Node* bit =
+ Word32And(hash, Int32Constant(String::kContainsCachedArrayIndexMask));
+ GotoIf(Word32NotEqual(bit, Int32Constant(0)), &runtime);
+
+ var_result.Bind(SmiTag(BitFieldDecode<String::ArrayIndexValueBits>(hash)));
+ Goto(&end);
+
+ Bind(&runtime);
+ {
+ var_result.Bind(CallRuntime(Runtime::kStringToNumber, context, input));
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::ToName(Node* context, Node* value) {
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Label end(this);
+ Variable var_result(this, MachineRepresentation::kTagged);
+
+ Label is_number(this);
+ GotoIf(WordIsSmi(value), &is_number);
+
+ Label not_name(this);
+ Node* value_instance_type = LoadInstanceType(value);
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ GotoIf(Int32GreaterThan(value_instance_type, Int32Constant(LAST_NAME_TYPE)),
+ &not_name);
+
+ var_result.Bind(value);
+ Goto(&end);
+
+ Bind(&is_number);
+ {
+ Callable callable = CodeFactory::NumberToString(isolate());
+ var_result.Bind(CallStub(callable, context, value));
+ Goto(&end);
+ }
+
+ Bind(&not_name);
+ {
+ GotoIf(Word32Equal(value_instance_type, Int32Constant(HEAP_NUMBER_TYPE)),
+ &is_number);
+
+ Label not_oddball(this);
+ GotoIf(Word32NotEqual(value_instance_type, Int32Constant(ODDBALL_TYPE)),
+ &not_oddball);
+
+ var_result.Bind(LoadObjectField(value, Oddball::kToStringOffset));
+ Goto(&end);
+
+ Bind(&not_oddball);
+ {
+ var_result.Bind(CallRuntime(Runtime::kToName, context, value));
+ Goto(&end);
+ }
+ }
+
+ Bind(&end);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::NonNumberToNumber(Node* context, Node* input) {
+ // Assert input is a HeapObject (not smi or heap number)
+ Assert(Word32BinaryNot(WordIsSmi(input)));
+ Assert(Word32NotEqual(LoadMap(input), HeapNumberMapConstant()));
+
+ // We might need to loop once here due to ToPrimitive conversions.
+ Variable var_input(this, MachineRepresentation::kTagged);
+ Variable var_result(this, MachineRepresentation::kTagged);
+ Label loop(this, &var_input);
+ Label end(this);
+ var_input.Bind(input);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ // Load the current {input} value (known to be a HeapObject).
+ Node* input = var_input.value();
+
+ // Dispatch on the {input} instance type.
+ Node* input_instance_type = LoadInstanceType(input);
+ Label if_inputisstring(this), if_inputisoddball(this),
+ if_inputisreceiver(this, Label::kDeferred),
+ if_inputisother(this, Label::kDeferred);
+ GotoIf(IsStringInstanceType(input_instance_type), &if_inputisstring);
+ GotoIf(Word32Equal(input_instance_type, Int32Constant(ODDBALL_TYPE)),
+ &if_inputisoddball);
+ Branch(IsJSReceiverInstanceType(input_instance_type), &if_inputisreceiver,
+ &if_inputisother);
+
+ Bind(&if_inputisstring);
+ {
+ // The {input} is a String, use the fast stub to convert it to a Number.
+ var_result.Bind(StringToNumber(context, input));
+ Goto(&end);
+ }
+
+ Bind(&if_inputisoddball);
+ {
+ // The {input} is an Oddball, we just need to load the Number value of it.
+ var_result.Bind(LoadObjectField(input, Oddball::kToNumberOffset));
+ Goto(&end);
+ }
+
+ Bind(&if_inputisreceiver);
+ {
+ // The {input} is a JSReceiver, we need to convert it to a Primitive first
+ // using the ToPrimitive type conversion, preferably yielding a Number.
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+ isolate(), ToPrimitiveHint::kNumber);
+ Node* result = CallStub(callable, context, input);
+
+ // Check if the {result} is already a Number.
+ Label if_resultisnumber(this), if_resultisnotnumber(this);
+ GotoIf(WordIsSmi(result), &if_resultisnumber);
+ Node* result_map = LoadMap(result);
+ Branch(WordEqual(result_map, HeapNumberMapConstant()), &if_resultisnumber,
+ &if_resultisnotnumber);
+
+ Bind(&if_resultisnumber);
+ {
+ // The ToPrimitive conversion already gave us a Number, so we're done.
+ var_result.Bind(result);
+ Goto(&end);
+ }
+
+ Bind(&if_resultisnotnumber);
+ {
+ // We now have a Primitive {result}, but it's not yet a Number.
+ var_input.Bind(result);
+ Goto(&loop);
+ }
+ }
+
+ Bind(&if_inputisother);
+ {
+ // The {input} is something else (i.e. Symbol or Simd128Value), let the
+ // runtime figure out the correct exception.
+ // Note: We cannot tail call to the runtime here, as js-to-wasm
+ // trampolines also use this code currently, and they declare all
+ // outgoing parameters as untagged, while we would push a tagged
+ // object here.
+ var_result.Bind(CallRuntime(Runtime::kToNumber, context, input));
+ Goto(&end);
+ }
+ }
+
+ Bind(&end);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::ToNumber(Node* context, Node* input) {
+ Variable var_result(this, MachineRepresentation::kTagged);
+ Label end(this);
+
+ Label not_smi(this, Label::kDeferred);
+ GotoUnless(WordIsSmi(input), &not_smi);
+ var_result.Bind(input);
+ Goto(&end);
+
+ Bind(&not_smi);
+ {
+ Label not_heap_number(this, Label::kDeferred);
+ Node* input_map = LoadMap(input);
+ GotoIf(Word32NotEqual(input_map, HeapNumberMapConstant()),
+ &not_heap_number);
+
+ var_result.Bind(input);
+ Goto(&end);
+
+ Bind(&not_heap_number);
+ {
+ var_result.Bind(NonNumberToNumber(context, input));
+ Goto(&end);
+ }
+ }
+
+ Bind(&end);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::ToInteger(Node* context, Node* input,
+ ToIntegerTruncationMode mode) {
+ // We might need to loop once for ToNumber conversion.
+ Variable var_arg(this, MachineRepresentation::kTagged);
+ Label loop(this, &var_arg), out(this);
+ var_arg.Bind(input);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ // Shared entry points.
+ Label return_zero(this, Label::kDeferred);
+
+ // Load the current {arg} value.
+ Node* arg = var_arg.value();
+
+ // Check if {arg} is a Smi.
+ GotoIf(WordIsSmi(arg), &out);
+
+ // Check if {arg} is a HeapNumber.
+ Label if_argisheapnumber(this),
+ if_argisnotheapnumber(this, Label::kDeferred);
+ Branch(WordEqual(LoadMap(arg), HeapNumberMapConstant()),
+ &if_argisheapnumber, &if_argisnotheapnumber);
+
+ Bind(&if_argisheapnumber);
+ {
+ // Load the floating-point value of {arg}.
+ Node* arg_value = LoadHeapNumberValue(arg);
+
+ // Check if {arg} is NaN.
+ GotoUnless(Float64Equal(arg_value, arg_value), &return_zero);
+
+ // Truncate {arg} towards zero.
+ Node* value = Float64Trunc(arg_value);
+
+ if (mode == kTruncateMinusZero) {
+ // Truncate -0.0 to 0.
+ GotoIf(Float64Equal(value, Float64Constant(0.0)), &return_zero);
+ }
+
+ var_arg.Bind(ChangeFloat64ToTagged(value));
+ Goto(&out);
+ }
+
+ Bind(&if_argisnotheapnumber);
+ {
+ // Need to convert {arg} to a Number first.
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_arg.Bind(CallStub(callable, context, arg));
+ Goto(&loop);
+ }
+
+ Bind(&return_zero);
+ var_arg.Bind(SmiConstant(Smi::FromInt(0)));
+ Goto(&out);
+ }
+
+ Bind(&out);
+ return var_arg.value();
+}
+
Node* CodeStubAssembler::BitFieldDecode(Node* word32, uint32_t shift,
uint32_t mask) {
return Word32Shr(Word32And(word32, Int32Constant(mask)),
@@ -2265,54 +3184,49 @@ void CodeStubAssembler::Use(Label* label) {
void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex,
Variable* var_index, Label* if_keyisunique,
Label* if_bailout) {
- DCHECK_EQ(MachineRepresentation::kWord32, var_index->rep());
+ DCHECK_EQ(MachineType::PointerRepresentation(), var_index->rep());
Comment("TryToName");
- Label if_keyissmi(this), if_keyisnotsmi(this);
- Branch(WordIsSmi(key), &if_keyissmi, &if_keyisnotsmi);
- Bind(&if_keyissmi);
- {
- // Negative smi keys are named properties. Handle in the runtime.
- GotoUnless(WordIsPositiveSmi(key), if_bailout);
-
- var_index->Bind(SmiToWord32(key));
- Goto(if_keyisindex);
- }
-
- Bind(&if_keyisnotsmi);
+ Label if_hascachedindex(this), if_keyisnotindex(this);
+ // Handle Smi and HeapNumber keys.
+ var_index->Bind(TryToIntptr(key, &if_keyisnotindex));
+ Goto(if_keyisindex);
+ Bind(&if_keyisnotindex);
Node* key_instance_type = LoadInstanceType(key);
// Symbols are unique.
GotoIf(Word32Equal(key_instance_type, Int32Constant(SYMBOL_TYPE)),
if_keyisunique);
-
- Label if_keyisinternalized(this);
- Node* bits =
- WordAnd(key_instance_type,
- Int32Constant(kIsNotStringMask | kIsNotInternalizedMask));
- Branch(Word32Equal(bits, Int32Constant(kStringTag | kInternalizedTag)),
- &if_keyisinternalized, if_bailout);
- Bind(&if_keyisinternalized);
-
- // Check whether the key is an array index passed in as string. Handle
- // uniform with smi keys if so.
- // TODO(verwaest): Also support non-internalized strings.
+ // Miss if |key| is not a String.
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ GotoUnless(IsStringInstanceType(key_instance_type), if_bailout);
+ // |key| is a String. Check if it has a cached array index.
Node* hash = LoadNameHashField(key);
- Node* bit = Word32And(hash, Int32Constant(Name::kIsNotArrayIndexMask));
- GotoIf(Word32NotEqual(bit, Int32Constant(0)), if_keyisunique);
- // Key is an index. Check if it is small enough to be encoded in the
- // hash_field. Handle too big array index in runtime.
- bit = Word32And(hash, Int32Constant(Name::kContainsCachedArrayIndexMask));
- GotoIf(Word32NotEqual(bit, Int32Constant(0)), if_bailout);
+ Node* contains_index =
+ Word32And(hash, Int32Constant(Name::kContainsCachedArrayIndexMask));
+ GotoIf(Word32Equal(contains_index, Int32Constant(0)), &if_hascachedindex);
+ // No cached array index. If the string knows that it contains an index,
+ // then it must be an uncacheable index. Handle this case in the runtime.
+ Node* not_an_index =
+ Word32And(hash, Int32Constant(Name::kIsNotArrayIndexMask));
+ GotoIf(Word32Equal(not_an_index, Int32Constant(0)), if_bailout);
+ // Finally, check if |key| is internalized.
+ STATIC_ASSERT(kNotInternalizedTag != 0);
+ Node* not_internalized =
+ Word32And(key_instance_type, Int32Constant(kIsNotInternalizedMask));
+ GotoIf(Word32NotEqual(not_internalized, Int32Constant(0)), if_bailout);
+ Goto(if_keyisunique);
+
+ Bind(&if_hascachedindex);
var_index->Bind(BitFieldDecode<Name::ArrayIndexValueBits>(hash));
Goto(if_keyisindex);
}
template <typename Dictionary>
Node* CodeStubAssembler::EntryToIndex(Node* entry, int field_index) {
- Node* entry_index = Int32Mul(entry, Int32Constant(Dictionary::kEntrySize));
- return Int32Add(entry_index,
- Int32Constant(Dictionary::kElementsStartIndex + field_index));
+ Node* entry_index = IntPtrMul(entry, IntPtrConstant(Dictionary::kEntrySize));
+ return IntPtrAdd(entry_index, IntPtrConstant(Dictionary::kElementsStartIndex +
+ field_index));
}
template <typename Dictionary>
@@ -2321,34 +3235,36 @@ void CodeStubAssembler::NameDictionaryLookup(Node* dictionary,
Variable* var_name_index,
Label* if_not_found,
int inlined_probes) {
- DCHECK_EQ(MachineRepresentation::kWord32, var_name_index->rep());
+ DCHECK_EQ(MachineType::PointerRepresentation(), var_name_index->rep());
Comment("NameDictionaryLookup");
- Node* capacity = LoadAndUntagToWord32FixedArrayElement(
- dictionary, Int32Constant(Dictionary::kCapacityIndex));
- Node* mask = Int32Sub(capacity, Int32Constant(1));
- Node* hash = LoadNameHash(unique_name);
+ Node* capacity = SmiUntag(LoadFixedArrayElement(
+ dictionary, IntPtrConstant(Dictionary::kCapacityIndex), 0,
+ INTPTR_PARAMETERS));
+ Node* mask = IntPtrSub(capacity, IntPtrConstant(1));
+ Node* hash = ChangeUint32ToWord(LoadNameHash(unique_name));
// See Dictionary::FirstProbe().
- Node* count = Int32Constant(0);
- Node* entry = Word32And(hash, mask);
+ Node* count = IntPtrConstant(0);
+ Node* entry = WordAnd(hash, mask);
for (int i = 0; i < inlined_probes; i++) {
Node* index = EntryToIndex<Dictionary>(entry);
var_name_index->Bind(index);
- Node* current = LoadFixedArrayElement(dictionary, index);
+ Node* current =
+ LoadFixedArrayElement(dictionary, index, 0, INTPTR_PARAMETERS);
GotoIf(WordEqual(current, unique_name), if_found);
// See Dictionary::NextProbe().
- count = Int32Constant(i + 1);
- entry = Word32And(Int32Add(entry, count), mask);
+ count = IntPtrConstant(i + 1);
+ entry = WordAnd(IntPtrAdd(entry, count), mask);
}
Node* undefined = UndefinedConstant();
- Variable var_count(this, MachineRepresentation::kWord32);
- Variable var_entry(this, MachineRepresentation::kWord32);
+ Variable var_count(this, MachineType::PointerRepresentation());
+ Variable var_entry(this, MachineType::PointerRepresentation());
Variable* loop_vars[] = {&var_count, &var_entry, var_name_index};
Label loop(this, 3, loop_vars);
var_count.Bind(count);
@@ -2362,13 +3278,14 @@ void CodeStubAssembler::NameDictionaryLookup(Node* dictionary,
Node* index = EntryToIndex<Dictionary>(entry);
var_name_index->Bind(index);
- Node* current = LoadFixedArrayElement(dictionary, index);
+ Node* current =
+ LoadFixedArrayElement(dictionary, index, 0, INTPTR_PARAMETERS);
GotoIf(WordEqual(current, undefined), if_not_found);
GotoIf(WordEqual(current, unique_name), if_found);
// See Dictionary::NextProbe().
- count = Int32Add(count, Int32Constant(1));
- entry = Word32And(Int32Add(entry, count), mask);
+ count = IntPtrAdd(count, IntPtrConstant(1));
+ entry = WordAnd(IntPtrAdd(entry, count), mask);
var_count.Bind(count);
var_entry.Bind(entry);
@@ -2397,34 +3314,36 @@ Node* CodeStubAssembler::ComputeIntegerHash(Node* key, Node* seed) {
}
template <typename Dictionary>
-void CodeStubAssembler::NumberDictionaryLookup(Node* dictionary, Node* key,
+void CodeStubAssembler::NumberDictionaryLookup(Node* dictionary,
+ Node* intptr_index,
Label* if_found,
Variable* var_entry,
Label* if_not_found) {
- DCHECK_EQ(MachineRepresentation::kWord32, var_entry->rep());
+ DCHECK_EQ(MachineType::PointerRepresentation(), var_entry->rep());
Comment("NumberDictionaryLookup");
- Node* capacity = LoadAndUntagToWord32FixedArrayElement(
- dictionary, Int32Constant(Dictionary::kCapacityIndex));
- Node* mask = Int32Sub(capacity, Int32Constant(1));
+ Node* capacity = SmiUntag(LoadFixedArrayElement(
+ dictionary, IntPtrConstant(Dictionary::kCapacityIndex), 0,
+ INTPTR_PARAMETERS));
+ Node* mask = IntPtrSub(capacity, IntPtrConstant(1));
- Node* seed;
+ Node* int32_seed;
if (Dictionary::ShapeT::UsesSeed) {
- seed = HashSeed();
+ int32_seed = HashSeed();
} else {
- seed = Int32Constant(kZeroHashSeed);
+ int32_seed = Int32Constant(kZeroHashSeed);
}
- Node* hash = ComputeIntegerHash(key, seed);
- Node* key_as_float64 = ChangeUint32ToFloat64(key);
+ Node* hash = ChangeUint32ToWord(ComputeIntegerHash(intptr_index, int32_seed));
+ Node* key_as_float64 = RoundIntPtrToFloat64(intptr_index);
// See Dictionary::FirstProbe().
- Node* count = Int32Constant(0);
- Node* entry = Word32And(hash, mask);
+ Node* count = IntPtrConstant(0);
+ Node* entry = WordAnd(hash, mask);
Node* undefined = UndefinedConstant();
Node* the_hole = TheHoleConstant();
- Variable var_count(this, MachineRepresentation::kWord32);
+ Variable var_count(this, MachineType::PointerRepresentation());
Variable* loop_vars[] = {&var_count, var_entry};
Label loop(this, 2, loop_vars);
var_count.Bind(count);
@@ -2436,7 +3355,8 @@ void CodeStubAssembler::NumberDictionaryLookup(Node* dictionary, Node* key,
Node* entry = var_entry->value();
Node* index = EntryToIndex<Dictionary>(entry);
- Node* current = LoadFixedArrayElement(dictionary, index);
+ Node* current =
+ LoadFixedArrayElement(dictionary, index, 0, INTPTR_PARAMETERS);
GotoIf(WordEqual(current, undefined), if_not_found);
Label next_probe(this);
{
@@ -2444,8 +3364,8 @@ void CodeStubAssembler::NumberDictionaryLookup(Node* dictionary, Node* key,
Branch(WordIsSmi(current), &if_currentissmi, &if_currentisnotsmi);
Bind(&if_currentissmi);
{
- Node* current_value = SmiToWord32(current);
- Branch(Word32Equal(current_value, key), if_found, &next_probe);
+ Node* current_value = SmiUntag(current);
+ Branch(WordEqual(current_value, intptr_index), if_found, &next_probe);
}
Bind(&if_currentisnotsmi);
{
@@ -2459,8 +3379,8 @@ void CodeStubAssembler::NumberDictionaryLookup(Node* dictionary, Node* key,
Bind(&next_probe);
// See Dictionary::NextProbe().
- count = Int32Add(count, Int32Constant(1));
- entry = Word32And(Int32Add(entry, count), mask);
+ count = IntPtrAdd(count, IntPtrConstant(1));
+ entry = WordAnd(IntPtrAdd(entry, count), mask);
var_count.Bind(count);
var_entry->Bind(entry);
@@ -2468,13 +3388,39 @@ void CodeStubAssembler::NumberDictionaryLookup(Node* dictionary, Node* key,
}
}
+void CodeStubAssembler::DescriptorLookupLinear(Node* unique_name,
+ Node* descriptors, Node* nof,
+ Label* if_found,
+ Variable* var_name_index,
+ Label* if_not_found) {
+ Variable var_descriptor(this, MachineType::PointerRepresentation());
+ Label loop(this, &var_descriptor);
+ var_descriptor.Bind(IntPtrConstant(0));
+ Goto(&loop);
+
+ Bind(&loop);
+ {
+ Node* index = var_descriptor.value();
+ Node* name_offset = IntPtrConstant(DescriptorArray::ToKeyIndex(0));
+ Node* factor = IntPtrConstant(DescriptorArray::kDescriptorSize);
+ GotoIf(WordEqual(index, nof), if_not_found);
+ Node* name_index = IntPtrAdd(name_offset, IntPtrMul(index, factor));
+ Node* candidate_name =
+ LoadFixedArrayElement(descriptors, name_index, 0, INTPTR_PARAMETERS);
+ var_name_index->Bind(name_index);
+ GotoIf(WordEqual(candidate_name, unique_name), if_found);
+ var_descriptor.Bind(IntPtrAdd(index, IntPtrConstant(1)));
+ Goto(&loop);
+ }
+}
+
void CodeStubAssembler::TryLookupProperty(
Node* object, Node* map, Node* instance_type, Node* unique_name,
Label* if_found_fast, Label* if_found_dict, Label* if_found_global,
Variable* var_meta_storage, Variable* var_name_index, Label* if_not_found,
Label* if_bailout) {
DCHECK_EQ(MachineRepresentation::kTagged, var_meta_storage->rep());
- DCHECK_EQ(MachineRepresentation::kWord32, var_name_index->rep());
+ DCHECK_EQ(MachineType::PointerRepresentation(), var_name_index->rep());
Label if_objectisspecial(this);
STATIC_ASSERT(JS_GLOBAL_OBJECT_TYPE <= LAST_SPECIAL_RECEIVER_TYPE);
@@ -2494,36 +3440,18 @@ void CodeStubAssembler::TryLookupProperty(
Bind(&if_isfastmap);
{
Comment("DescriptorArrayLookup");
- Node* nof = BitFieldDecode<Map::NumberOfOwnDescriptorsBits>(bit_field3);
+ Node* nof = BitFieldDecodeWord<Map::NumberOfOwnDescriptorsBits>(bit_field3);
// Bail out to the runtime for large numbers of own descriptors. The stub
// only does linear search, which becomes too expensive in that case.
{
static const int32_t kMaxLinear = 210;
- GotoIf(Int32GreaterThan(nof, Int32Constant(kMaxLinear)), if_bailout);
+ GotoIf(UintPtrGreaterThan(nof, IntPtrConstant(kMaxLinear)), if_bailout);
}
Node* descriptors = LoadMapDescriptors(map);
var_meta_storage->Bind(descriptors);
- Variable var_descriptor(this, MachineRepresentation::kWord32);
- Label loop(this, &var_descriptor);
- var_descriptor.Bind(Int32Constant(0));
- Goto(&loop);
- Bind(&loop);
- {
- Node* index = var_descriptor.value();
- Node* name_offset = Int32Constant(DescriptorArray::ToKeyIndex(0));
- Node* factor = Int32Constant(DescriptorArray::kDescriptorSize);
- GotoIf(Word32Equal(index, nof), if_not_found);
-
- Node* name_index = Int32Add(name_offset, Int32Mul(index, factor));
- Node* name = LoadFixedArrayElement(descriptors, name_index);
-
- var_name_index->Bind(name_index);
- GotoIf(WordEqual(name, unique_name), if_found_fast);
-
- var_descriptor.Bind(Int32Add(index, Int32Constant(1)));
- Goto(&loop);
- }
+ DescriptorLookupLinear(unique_name, descriptors, nof, if_found_fast,
+ var_name_index, if_not_found);
}
Bind(&if_isslowmap);
{
@@ -2562,7 +3490,7 @@ void CodeStubAssembler::TryHasOwnProperty(compiler::Node* object,
Label* if_bailout) {
Comment("TryHasOwnProperty");
Variable var_meta_storage(this, MachineRepresentation::kTagged);
- Variable var_name_index(this, MachineRepresentation::kWord32);
+ Variable var_name_index(this, MachineType::PointerRepresentation());
Label if_found_global(this);
TryLookupProperty(object, map, instance_type, unique_name, if_found, if_found,
@@ -2608,7 +3536,7 @@ void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
Bind(&if_in_field);
{
Node* field_index =
- BitFieldDecode<PropertyDetails::FieldIndexField>(details);
+ BitFieldDecodeWord<PropertyDetails::FieldIndexField>(details);
Node* representation =
BitFieldDecode<PropertyDetails::RepresentationField>(details);
@@ -2617,15 +3545,15 @@ void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
Label if_inobject(this), if_backing_store(this);
Variable var_double_value(this, MachineRepresentation::kFloat64);
Label rebox_double(this, &var_double_value);
- BranchIfInt32LessThan(field_index, inobject_properties, &if_inobject,
- &if_backing_store);
+ BranchIfUintPtrLessThan(field_index, inobject_properties, &if_inobject,
+ &if_backing_store);
Bind(&if_inobject);
{
Comment("if_inobject");
- Node* field_offset = ChangeInt32ToIntPtr(
- Int32Mul(Int32Sub(LoadMapInstanceSize(map),
- Int32Sub(inobject_properties, field_index)),
- Int32Constant(kPointerSize)));
+ Node* field_offset =
+ IntPtrMul(IntPtrSub(LoadMapInstanceSize(map),
+ IntPtrSub(inobject_properties, field_index)),
+ IntPtrConstant(kPointerSize));
Label if_double(this), if_tagged(this);
BranchIfWord32NotEqual(representation,
@@ -2652,7 +3580,7 @@ void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
{
Comment("if_backing_store");
Node* properties = LoadProperties(object);
- field_index = Int32Sub(field_index, inobject_properties);
+ field_index = IntPtrSub(field_index, inobject_properties);
Node* value = LoadFixedArrayElement(properties, field_index);
Label if_double(this), if_tagged(this);
@@ -2739,6 +3667,52 @@ void CodeStubAssembler::LoadPropertyFromGlobalDictionary(Node* dictionary,
Comment("] LoadPropertyFromGlobalDictionary");
}
+// |value| is the property backing store's contents, which is either a value
+// or an accessor pair, as specified by |details|.
+// Returns either the original value, or the result of the getter call.
+Node* CodeStubAssembler::CallGetterIfAccessor(Node* value, Node* details,
+ Node* context, Node* receiver,
+ Label* if_bailout) {
+ Variable var_value(this, MachineRepresentation::kTagged);
+ var_value.Bind(value);
+ Label done(this);
+
+ Node* kind = BitFieldDecode<PropertyDetails::KindField>(details);
+ GotoIf(Word32Equal(kind, Int32Constant(kData)), &done);
+
+ // Accessor case.
+ {
+ Node* accessor_pair = value;
+ GotoIf(Word32Equal(LoadInstanceType(accessor_pair),
+ Int32Constant(ACCESSOR_INFO_TYPE)),
+ if_bailout);
+ AssertInstanceType(accessor_pair, ACCESSOR_PAIR_TYPE);
+ Node* getter = LoadObjectField(accessor_pair, AccessorPair::kGetterOffset);
+ Node* getter_map = LoadMap(getter);
+ Node* instance_type = LoadMapInstanceType(getter_map);
+ // FunctionTemplateInfo getters are not supported yet.
+ GotoIf(
+ Word32Equal(instance_type, Int32Constant(FUNCTION_TEMPLATE_INFO_TYPE)),
+ if_bailout);
+
+ // Return undefined if the {getter} is not callable.
+ var_value.Bind(UndefinedConstant());
+ GotoIf(Word32Equal(Word32And(LoadMapBitField(getter_map),
+ Int32Constant(1 << Map::kIsCallable)),
+ Int32Constant(0)),
+ &done);
+
+ // Call the accessor.
+ Callable callable = CodeFactory::Call(isolate());
+ Node* result = CallJS(callable, context, getter, receiver);
+ var_value.Bind(result);
+ Goto(&done);
+ }
+
+ Bind(&done);
+ return var_value.value();
+}
+
void CodeStubAssembler::TryGetOwnProperty(
Node* context, Node* receiver, Node* object, Node* map, Node* instance_type,
Node* unique_name, Label* if_found_value, Variable* var_value,
@@ -2747,7 +3721,7 @@ void CodeStubAssembler::TryGetOwnProperty(
Comment("TryGetOwnProperty");
Variable var_meta_storage(this, MachineRepresentation::kTagged);
- Variable var_entry(this, MachineRepresentation::kWord32);
+ Variable var_entry(this, MachineType::PointerRepresentation());
Label if_found_fast(this), if_found_dict(this), if_found_global(this);
@@ -2786,59 +3760,28 @@ void CodeStubAssembler::TryGetOwnProperty(
// Here we have details and value which could be an accessor.
Bind(&if_found);
{
- Node* details = var_details.value();
- Node* kind = BitFieldDecode<PropertyDetails::KindField>(details);
-
- Label if_accessor(this);
- Branch(Word32Equal(kind, Int32Constant(kData)), if_found_value,
- &if_accessor);
- Bind(&if_accessor);
- {
- Node* accessor_pair = var_value->value();
- GotoIf(Word32Equal(LoadInstanceType(accessor_pair),
- Int32Constant(ACCESSOR_INFO_TYPE)),
- if_bailout);
- AssertInstanceType(accessor_pair, ACCESSOR_PAIR_TYPE);
- Node* getter =
- LoadObjectField(accessor_pair, AccessorPair::kGetterOffset);
- Node* getter_map = LoadMap(getter);
- Node* instance_type = LoadMapInstanceType(getter_map);
- // FunctionTemplateInfo getters are not supported yet.
- GotoIf(Word32Equal(instance_type,
- Int32Constant(FUNCTION_TEMPLATE_INFO_TYPE)),
- if_bailout);
-
- // Return undefined if the {getter} is not callable.
- var_value->Bind(UndefinedConstant());
- GotoIf(Word32Equal(Word32And(LoadMapBitField(getter_map),
- Int32Constant(1 << Map::kIsCallable)),
- Int32Constant(0)),
- if_found_value);
-
- // Call the accessor.
- Callable callable = CodeFactory::Call(isolate());
- Node* result = CallJS(callable, context, getter, receiver);
- var_value->Bind(result);
- Goto(if_found_value);
- }
+ Node* value = CallGetterIfAccessor(var_value->value(), var_details.value(),
+ context, receiver, if_bailout);
+ var_value->Bind(value);
+ Goto(if_found_value);
}
}
void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
- Node* instance_type, Node* index,
- Label* if_found, Label* if_not_found,
+ Node* instance_type,
+ Node* intptr_index, Label* if_found,
+ Label* if_not_found,
Label* if_bailout) {
// Handle special objects in runtime.
GotoIf(Int32LessThanOrEqual(instance_type,
Int32Constant(LAST_SPECIAL_RECEIVER_TYPE)),
if_bailout);
- Node* bit_field2 = LoadMapBitField2(map);
- Node* elements_kind = BitFieldDecode<Map::ElementsKindBits>(bit_field2);
+ Node* elements_kind = LoadMapElementsKind(map);
// TODO(verwaest): Support other elements kinds as well.
Label if_isobjectorsmi(this), if_isdouble(this), if_isdictionary(this),
- if_isfaststringwrapper(this), if_isslowstringwrapper(this);
+ if_isfaststringwrapper(this), if_isslowstringwrapper(this), if_oob(this);
// clang-format off
int32_t values[] = {
// Handled by {if_isobjectorsmi}.
@@ -2873,9 +3816,10 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
Node* elements = LoadElements(object);
Node* length = LoadAndUntagFixedArrayBaseLength(elements);
- GotoUnless(Uint32LessThan(index, length), if_not_found);
+ GotoUnless(UintPtrLessThan(intptr_index, length), &if_oob);
- Node* element = LoadFixedArrayElement(elements, index);
+ Node* element =
+ LoadFixedArrayElement(elements, intptr_index, 0, INTPTR_PARAMETERS);
Node* the_hole = TheHoleConstant();
Branch(WordEqual(element, the_hole), if_not_found, if_found);
}
@@ -2884,48 +3828,48 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
Node* elements = LoadElements(object);
Node* length = LoadAndUntagFixedArrayBaseLength(elements);
- GotoUnless(Uint32LessThan(index, length), if_not_found);
+ GotoUnless(UintPtrLessThan(intptr_index, length), &if_oob);
- if (kPointerSize == kDoubleSize) {
- Node* element =
- LoadFixedDoubleArrayElement(elements, index, MachineType::Uint64());
- Node* the_hole = Int64Constant(kHoleNanInt64);
- Branch(Word64Equal(element, the_hole), if_not_found, if_found);
- } else {
- Node* element_upper =
- LoadFixedDoubleArrayElement(elements, index, MachineType::Uint32(),
- kIeeeDoubleExponentWordOffset);
- Branch(Word32Equal(element_upper, Int32Constant(kHoleNanUpper32)),
- if_not_found, if_found);
- }
+ // Check if the element is a double hole, but don't load it.
+ LoadFixedDoubleArrayElement(elements, intptr_index, MachineType::None(), 0,
+ INTPTR_PARAMETERS, if_not_found);
+ Goto(if_found);
}
Bind(&if_isdictionary);
{
- Variable var_entry(this, MachineRepresentation::kWord32);
+ // Negative keys must be converted to property names.
+ GotoIf(IntPtrLessThan(intptr_index, IntPtrConstant(0)), if_bailout);
+
+ Variable var_entry(this, MachineType::PointerRepresentation());
Node* elements = LoadElements(object);
- NumberDictionaryLookup<SeededNumberDictionary>(elements, index, if_found,
- &var_entry, if_not_found);
+ NumberDictionaryLookup<SeededNumberDictionary>(
+ elements, intptr_index, if_found, &var_entry, if_not_found);
}
Bind(&if_isfaststringwrapper);
{
AssertInstanceType(object, JS_VALUE_TYPE);
Node* string = LoadJSValueValue(object);
- Assert(Int32LessThan(LoadInstanceType(string),
- Int32Constant(FIRST_NONSTRING_TYPE)));
+ Assert(IsStringInstanceType(LoadInstanceType(string)));
Node* length = LoadStringLength(string);
- GotoIf(Uint32LessThan(index, SmiToWord32(length)), if_found);
+ GotoIf(UintPtrLessThan(intptr_index, SmiUntag(length)), if_found);
Goto(&if_isobjectorsmi);
}
Bind(&if_isslowstringwrapper);
{
AssertInstanceType(object, JS_VALUE_TYPE);
Node* string = LoadJSValueValue(object);
- Assert(Int32LessThan(LoadInstanceType(string),
- Int32Constant(FIRST_NONSTRING_TYPE)));
+ Assert(IsStringInstanceType(LoadInstanceType(string)));
Node* length = LoadStringLength(string);
- GotoIf(Uint32LessThan(index, SmiToWord32(length)), if_found);
+ GotoIf(UintPtrLessThan(intptr_index, SmiUntag(length)), if_found);
Goto(&if_isdictionary);
}
+ Bind(&if_oob);
+ {
+ // Positive OOB indices mean "not found", negative indices must be
+ // converted to property names.
+ GotoIf(IntPtrLessThan(intptr_index, IntPtrConstant(0)), if_bailout);
+ Goto(if_not_found);
+ }
}
// Instantiate template methods to workaround GCC compilation issue.
@@ -2955,7 +3899,7 @@ void CodeStubAssembler::TryPrototypeChainLookup(
Bind(&if_objectisreceiver);
}
- Variable var_index(this, MachineRepresentation::kWord32);
+ Variable var_index(this, MachineType::PointerRepresentation());
Label if_keyisindex(this), if_iskeyunique(this);
TryToName(key, &if_keyisindex, &var_index, &if_iskeyunique, if_bailout);
@@ -3183,19 +4127,22 @@ compiler::Node* CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
ElementsKind kind,
ParameterMode mode,
int base_size) {
- bool is_double = IsFastDoubleElementsKind(kind);
- int element_size_shift = is_double ? kDoubleSizeLog2 : kPointerSizeLog2;
+ int element_size_shift = ElementsKindToShiftSize(kind);
int element_size = 1 << element_size_shift;
int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
- int32_t index = 0;
+ intptr_t index = 0;
bool constant_index = false;
if (mode == SMI_PARAMETERS) {
element_size_shift -= kSmiShiftBits;
- intptr_t temp = 0;
- constant_index = ToIntPtrConstant(index_node, temp);
- index = temp >> kSmiShiftBits;
+ constant_index = ToIntPtrConstant(index_node, index);
+ index = index >> kSmiShiftBits;
+ } else if (mode == INTEGER_PARAMETERS) {
+ int32_t temp = 0;
+ constant_index = ToInt32Constant(index_node, temp);
+ index = static_cast<intptr_t>(temp);
} else {
- constant_index = ToInt32Constant(index_node, index);
+ DCHECK(mode == INTPTR_PARAMETERS);
+ constant_index = ToIntPtrConstant(index_node, index);
}
if (constant_index) {
return IntPtrConstant(base_size + element_size * index);
@@ -3225,32 +4172,16 @@ compiler::Node* CodeStubAssembler::LoadTypeFeedbackVectorForStub() {
void CodeStubAssembler::UpdateFeedback(compiler::Node* feedback,
compiler::Node* type_feedback_vector,
compiler::Node* slot_id) {
- Label combine_feedback(this), record_feedback(this), end(this);
-
+ // This method is used for binary op and compare feedback. These
+ // vector nodes are initialized with a smi 0, so we can simply OR
+ // our new feedback in place.
+ // TODO(interpreter): Consider passing the feedback as Smi already to avoid
+ // the tagging completely.
Node* previous_feedback =
LoadFixedArrayElement(type_feedback_vector, slot_id);
- Node* is_uninitialized = WordEqual(
- previous_feedback,
- HeapConstant(TypeFeedbackVector::UninitializedSentinel(isolate())));
- BranchIf(is_uninitialized, &record_feedback, &combine_feedback);
-
- Bind(&record_feedback);
- {
- StoreFixedArrayElement(type_feedback_vector, slot_id, SmiTag(feedback),
- SKIP_WRITE_BARRIER);
- Goto(&end);
- }
-
- Bind(&combine_feedback);
- {
- Node* untagged_previous_feedback = SmiUntag(previous_feedback);
- Node* combined_feedback = Word32Or(untagged_previous_feedback, feedback);
- StoreFixedArrayElement(type_feedback_vector, slot_id,
- SmiTag(combined_feedback), SKIP_WRITE_BARRIER);
- Goto(&end);
- }
-
- Bind(&end);
+ Node* combined_feedback = SmiOr(previous_feedback, SmiFromWord32(feedback));
+ StoreFixedArrayElement(type_feedback_vector, slot_id, combined_feedback,
+ SKIP_WRITE_BARRIER);
}
compiler::Node* CodeStubAssembler::LoadReceiverMap(compiler::Node* receiver) {
@@ -3275,23 +4206,23 @@ compiler::Node* CodeStubAssembler::LoadReceiverMap(compiler::Node* receiver) {
}
compiler::Node* CodeStubAssembler::TryMonomorphicCase(
- const LoadICParameters* p, compiler::Node* receiver_map, Label* if_handler,
- Variable* var_handler, Label* if_miss) {
+ compiler::Node* slot, compiler::Node* vector, compiler::Node* receiver_map,
+ Label* if_handler, Variable* var_handler, Label* if_miss) {
DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
// TODO(ishell): add helper class that hides offset computations for a series
// of loads.
int32_t header_size = FixedArray::kHeaderSize - kHeapObjectTag;
- Node* offset = ElementOffsetFromIndex(p->slot, FAST_HOLEY_ELEMENTS,
+ Node* offset = ElementOffsetFromIndex(slot, FAST_HOLEY_ELEMENTS,
SMI_PARAMETERS, header_size);
- Node* feedback = Load(MachineType::AnyTagged(), p->vector, offset);
+ Node* feedback = Load(MachineType::AnyTagged(), vector, offset);
// Try to quickly handle the monomorphic case without knowing for sure
// if we have a weak cell in feedback. We do know it's safe to look
// at WeakCell::kValueOffset.
GotoUnless(WordEqual(receiver_map, LoadWeakCellValue(feedback)), if_miss);
- Node* handler = Load(MachineType::AnyTagged(), p->vector,
+ Node* handler = Load(MachineType::AnyTagged(), vector,
IntPtrAdd(offset, IntPtrConstant(kPointerSize)));
var_handler->Bind(handler);
@@ -3300,9 +4231,8 @@ compiler::Node* CodeStubAssembler::TryMonomorphicCase(
}
void CodeStubAssembler::HandlePolymorphicCase(
- const LoadICParameters* p, compiler::Node* receiver_map,
- compiler::Node* feedback, Label* if_handler, Variable* var_handler,
- Label* if_miss, int unroll_count) {
+ compiler::Node* receiver_map, compiler::Node* feedback, Label* if_handler,
+ Variable* var_handler, Label* if_miss, int unroll_count) {
DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
// Iterate {feedback} array.
@@ -3310,13 +4240,13 @@ void CodeStubAssembler::HandlePolymorphicCase(
for (int i = 0; i < unroll_count; i++) {
Label next_entry(this);
- Node* cached_map = LoadWeakCellValue(
- LoadFixedArrayElement(feedback, Int32Constant(i * kEntrySize)));
+ Node* cached_map = LoadWeakCellValue(LoadFixedArrayElement(
+ feedback, IntPtrConstant(i * kEntrySize), 0, INTPTR_PARAMETERS));
GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
// Found, now call handler.
- Node* handler =
- LoadFixedArrayElement(feedback, Int32Constant(i * kEntrySize + 1));
+ Node* handler = LoadFixedArrayElement(
+ feedback, IntPtrConstant(i * kEntrySize + 1), 0, INTPTR_PARAMETERS);
var_handler->Bind(handler);
Goto(if_handler);
@@ -3325,28 +4255,29 @@ void CodeStubAssembler::HandlePolymorphicCase(
Node* length = LoadAndUntagFixedArrayBaseLength(feedback);
// Loop from {unroll_count}*kEntrySize to {length}.
- Variable var_index(this, MachineRepresentation::kWord32);
+ Variable var_index(this, MachineType::PointerRepresentation());
Label loop(this, &var_index);
- var_index.Bind(Int32Constant(unroll_count * kEntrySize));
+ var_index.Bind(IntPtrConstant(unroll_count * kEntrySize));
Goto(&loop);
Bind(&loop);
{
Node* index = var_index.value();
- GotoIf(Int32GreaterThanOrEqual(index, length), if_miss);
+ GotoIf(UintPtrGreaterThanOrEqual(index, length), if_miss);
- Node* cached_map =
- LoadWeakCellValue(LoadFixedArrayElement(feedback, index));
+ Node* cached_map = LoadWeakCellValue(
+ LoadFixedArrayElement(feedback, index, 0, INTPTR_PARAMETERS));
Label next_entry(this);
GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
// Found, now call handler.
- Node* handler = LoadFixedArrayElement(feedback, index, kPointerSize);
+ Node* handler =
+ LoadFixedArrayElement(feedback, index, kPointerSize, INTPTR_PARAMETERS);
var_handler->Bind(handler);
Goto(if_handler);
Bind(&next_entry);
- var_index.Bind(Int32Add(index, Int32Constant(kEntrySize)));
+ var_index.Bind(IntPtrAdd(index, IntPtrConstant(kEntrySize)));
Goto(&loop);
}
}
@@ -3357,7 +4288,7 @@ compiler::Node* CodeStubAssembler::StubCachePrimaryOffset(compiler::Node* name,
STATIC_ASSERT(StubCache::kCacheIndexShift == Name::kHashShift);
// Compute the hash of the name (use entire hash field).
Node* hash_field = LoadNameHashField(name);
- Assert(WordEqual(
+ Assert(Word32Equal(
Word32And(hash_field, Int32Constant(Name::kHashNotComputedMask)),
Int32Constant(0)));
@@ -3369,7 +4300,7 @@ compiler::Node* CodeStubAssembler::StubCachePrimaryOffset(compiler::Node* name,
hash = Word32Xor(hash, Int32Constant(StubCache::kPrimaryMagic));
uint32_t mask = (StubCache::kPrimaryTableSize - 1)
<< StubCache::kCacheIndexShift;
- return Word32And(hash, Int32Constant(mask));
+ return ChangeUint32ToWord(Word32And(hash, Int32Constant(mask)));
}
compiler::Node* CodeStubAssembler::StubCacheSecondaryOffset(
@@ -3381,7 +4312,7 @@ compiler::Node* CodeStubAssembler::StubCacheSecondaryOffset(
hash = Int32Add(hash, Int32Constant(StubCache::kSecondaryMagic));
int32_t mask = (StubCache::kSecondaryTableSize - 1)
<< StubCache::kCacheIndexShift;
- return Word32And(hash, Int32Constant(mask));
+ return ChangeUint32ToWord(Word32And(hash, Int32Constant(mask)));
}
enum CodeStubAssembler::StubCacheTable : int {
@@ -3406,7 +4337,7 @@ void CodeStubAssembler::TryProbeStubCacheTable(
// The {table_offset} holds the entry offset times four (due to masking
// and shifting optimizations).
const int kMultiplier = sizeof(StubCache::Entry) >> Name::kHashShift;
- entry_offset = Int32Mul(entry_offset, Int32Constant(kMultiplier));
+ entry_offset = IntPtrMul(entry_offset, IntPtrConstant(kMultiplier));
// Check that the key in the entry matches the name.
Node* key_base =
@@ -3419,13 +4350,13 @@ void CodeStubAssembler::TryProbeStubCacheTable(
stub_cache->key_reference(table).address());
Node* entry_map =
Load(MachineType::Pointer(), key_base,
- Int32Add(entry_offset, Int32Constant(kPointerSize * 2)));
+ IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize * 2)));
GotoIf(WordNotEqual(map, entry_map), if_miss);
DCHECK_EQ(kPointerSize, stub_cache->value_reference(table).address() -
stub_cache->key_reference(table).address());
Node* code = Load(MachineType::Pointer(), key_base,
- Int32Add(entry_offset, Int32Constant(kPointerSize)));
+ IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize)));
// We found the handler.
var_handler->Bind(code);
@@ -3489,41 +4420,43 @@ Node* CodeStubAssembler::TryToIntptr(Node* key, Label* miss) {
return var_intptr_key.value();
}
-// |is_jsarray| should be non-zero for JSArrays.
-void CodeStubAssembler::EmitBoundsCheck(Node* object, Node* elements,
- Node* intptr_key, Node* is_jsarray,
- Label* miss) {
- Variable var_length(this, MachineRepresentation::kTagged);
+void CodeStubAssembler::EmitFastElementsBoundsCheck(Node* object,
+ Node* elements,
+ Node* intptr_index,
+ Node* is_jsarray_condition,
+ Label* miss) {
+ Variable var_length(this, MachineType::PointerRepresentation());
Label if_array(this), length_loaded(this, &var_length);
- GotoUnless(WordEqual(is_jsarray, IntPtrConstant(0)), &if_array);
+ GotoIf(is_jsarray_condition, &if_array);
{
var_length.Bind(SmiUntag(LoadFixedArrayBaseLength(elements)));
Goto(&length_loaded);
}
Bind(&if_array);
{
- var_length.Bind(SmiUntag(LoadObjectField(object, JSArray::kLengthOffset)));
+ var_length.Bind(SmiUntag(LoadJSArrayLength(object)));
Goto(&length_loaded);
}
Bind(&length_loaded);
- GotoUnless(UintPtrLessThan(intptr_key, var_length.value()), miss);
+ GotoUnless(UintPtrLessThan(intptr_index, var_length.value()), miss);
}
-// |key| should be untagged (int32).
void CodeStubAssembler::EmitElementLoad(Node* object, Node* elements,
- Node* elements_kind, Node* key,
+ Node* elements_kind, Node* intptr_index,
+ Node* is_jsarray_condition,
Label* if_hole, Label* rebox_double,
Variable* var_double_value,
- Label* miss) {
+ Label* unimplemented_elements_kind,
+ Label* out_of_bounds, Label* miss) {
Label if_typed_array(this), if_fast_packed(this), if_fast_holey(this),
- if_fast_double(this), if_fast_holey_double(this),
- unimplemented_elements_kind(this);
- STATIC_ASSERT(LAST_ELEMENTS_KIND == LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
+ if_fast_double(this), if_fast_holey_double(this), if_nonfast(this),
+ if_dictionary(this), unreachable(this);
GotoIf(
- IntPtrGreaterThanOrEqual(
- elements_kind, IntPtrConstant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)),
- &if_typed_array);
+ IntPtrGreaterThan(elements_kind, IntPtrConstant(LAST_FAST_ELEMENTS_KIND)),
+ &if_nonfast);
+ EmitFastElementsBoundsCheck(object, elements, intptr_index,
+ is_jsarray_condition, out_of_bounds);
int32_t kinds[] = {// Handled by if_fast_packed.
FAST_SMI_ELEMENTS, FAST_ELEMENTS,
// Handled by if_fast_holey.
@@ -3540,28 +4473,20 @@ void CodeStubAssembler::EmitElementLoad(Node* object, Node* elements,
&if_fast_double,
// FAST_HOLEY_DOUBLE_ELEMENTS
&if_fast_holey_double};
- Switch(elements_kind, &unimplemented_elements_kind, kinds, labels,
+ Switch(elements_kind, unimplemented_elements_kind, kinds, labels,
arraysize(kinds));
- Bind(&unimplemented_elements_kind);
- {
- // Crash if we get here.
- DebugBreak();
- Goto(miss);
- }
Bind(&if_fast_packed);
{
Comment("fast packed elements");
- // TODO(jkummerow): The Load*Element helpers add movsxlq instructions
- // on x64 which we don't need here, because |key| is an IntPtr already.
- // Do something about that.
- Return(LoadFixedArrayElement(elements, key));
+ Return(LoadFixedArrayElement(elements, intptr_index, 0, INTPTR_PARAMETERS));
}
Bind(&if_fast_holey);
{
Comment("fast holey elements");
- Node* element = LoadFixedArrayElement(elements, key);
+ Node* element =
+ LoadFixedArrayElement(elements, intptr_index, 0, INTPTR_PARAMETERS);
GotoIf(WordEqual(element, TheHoleConstant()), if_hole);
Return(element);
}
@@ -3569,30 +4494,56 @@ void CodeStubAssembler::EmitElementLoad(Node* object, Node* elements,
Bind(&if_fast_double);
{
Comment("packed double elements");
- var_double_value->Bind(
- LoadFixedDoubleArrayElement(elements, key, MachineType::Float64()));
+ var_double_value->Bind(LoadFixedDoubleArrayElement(
+ elements, intptr_index, MachineType::Float64(), 0, INTPTR_PARAMETERS));
Goto(rebox_double);
}
Bind(&if_fast_holey_double);
{
Comment("holey double elements");
- if (kPointerSize == kDoubleSize) {
- Node* raw_element =
- LoadFixedDoubleArrayElement(elements, key, MachineType::Uint64());
- Node* the_hole = Int64Constant(kHoleNanInt64);
- GotoIf(Word64Equal(raw_element, the_hole), if_hole);
- } else {
- Node* element_upper = LoadFixedDoubleArrayElement(
- elements, key, MachineType::Uint32(), kIeeeDoubleExponentWordOffset);
- GotoIf(Word32Equal(element_upper, Int32Constant(kHoleNanUpper32)),
- if_hole);
- }
- var_double_value->Bind(
- LoadFixedDoubleArrayElement(elements, key, MachineType::Float64()));
+ Node* value = LoadFixedDoubleArrayElement(elements, intptr_index,
+ MachineType::Float64(), 0,
+ INTPTR_PARAMETERS, if_hole);
+ var_double_value->Bind(value);
Goto(rebox_double);
}
+ Bind(&if_nonfast);
+ {
+ STATIC_ASSERT(LAST_ELEMENTS_KIND == LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
+ GotoIf(IntPtrGreaterThanOrEqual(
+ elements_kind,
+ IntPtrConstant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)),
+ &if_typed_array);
+ GotoIf(IntPtrEqual(elements_kind, IntPtrConstant(DICTIONARY_ELEMENTS)),
+ &if_dictionary);
+ Goto(unimplemented_elements_kind);
+ }
+
+ Bind(&if_dictionary);
+ {
+ Comment("dictionary elements");
+ GotoIf(IntPtrLessThan(intptr_index, IntPtrConstant(0)), out_of_bounds);
+ Variable var_entry(this, MachineType::PointerRepresentation());
+ Label if_found(this);
+ NumberDictionaryLookup<SeededNumberDictionary>(
+ elements, intptr_index, &if_found, &var_entry, if_hole);
+ Bind(&if_found);
+ // Check that the value is a data property.
+ Node* details_index = EntryToIndex<SeededNumberDictionary>(
+ var_entry.value(), SeededNumberDictionary::kEntryDetailsIndex);
+ Node* details = SmiToWord32(
+ LoadFixedArrayElement(elements, details_index, 0, INTPTR_PARAMETERS));
+ Node* kind = BitFieldDecode<PropertyDetails::KindField>(details);
+ // TODO(jkummerow): Support accessors without missing?
+ GotoUnless(Word32Equal(kind, Int32Constant(kData)), miss);
+ // Finally, load the value.
+ Node* value_index = EntryToIndex<SeededNumberDictionary>(
+ var_entry.value(), SeededNumberDictionary::kEntryValueIndex);
+ Return(LoadFixedArrayElement(elements, value_index, 0, INTPTR_PARAMETERS));
+ }
+
Bind(&if_typed_array);
{
Comment("typed elements");
@@ -3603,6 +4554,12 @@ void CodeStubAssembler::EmitElementLoad(Node* object, Node* elements,
Node* neutered_bit =
Word32And(bitfield, Int32Constant(JSArrayBuffer::WasNeutered::kMask));
GotoUnless(Word32Equal(neutered_bit, Int32Constant(0)), miss);
+
+ // Bounds check.
+ Node* length =
+ SmiUntag(LoadObjectField(object, JSTypedArray::kLengthOffset));
+ GotoUnless(UintPtrLessThan(intptr_index, length), out_of_bounds);
+
// Backing store = external_pointer + base_pointer.
Node* external_pointer =
LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
@@ -3632,43 +4589,43 @@ void CodeStubAssembler::EmitElementLoad(Node* object, Node* elements,
Bind(&uint8_elements);
{
Comment("UINT8_ELEMENTS"); // Handles UINT8_CLAMPED_ELEMENTS too.
- Return(SmiTag(Load(MachineType::Uint8(), backing_store, key)));
+ Return(SmiTag(Load(MachineType::Uint8(), backing_store, intptr_index)));
}
Bind(&int8_elements);
{
Comment("INT8_ELEMENTS");
- Return(SmiTag(Load(MachineType::Int8(), backing_store, key)));
+ Return(SmiTag(Load(MachineType::Int8(), backing_store, intptr_index)));
}
Bind(&uint16_elements);
{
Comment("UINT16_ELEMENTS");
- Node* index = WordShl(key, IntPtrConstant(1));
+ Node* index = WordShl(intptr_index, IntPtrConstant(1));
Return(SmiTag(Load(MachineType::Uint16(), backing_store, index)));
}
Bind(&int16_elements);
{
Comment("INT16_ELEMENTS");
- Node* index = WordShl(key, IntPtrConstant(1));
+ Node* index = WordShl(intptr_index, IntPtrConstant(1));
Return(SmiTag(Load(MachineType::Int16(), backing_store, index)));
}
Bind(&uint32_elements);
{
Comment("UINT32_ELEMENTS");
- Node* index = WordShl(key, IntPtrConstant(2));
+ Node* index = WordShl(intptr_index, IntPtrConstant(2));
Node* element = Load(MachineType::Uint32(), backing_store, index);
Return(ChangeUint32ToTagged(element));
}
Bind(&int32_elements);
{
Comment("INT32_ELEMENTS");
- Node* index = WordShl(key, IntPtrConstant(2));
+ Node* index = WordShl(intptr_index, IntPtrConstant(2));
Node* element = Load(MachineType::Int32(), backing_store, index);
Return(ChangeInt32ToTagged(element));
}
Bind(&float32_elements);
{
Comment("FLOAT32_ELEMENTS");
- Node* index = WordShl(key, IntPtrConstant(2));
+ Node* index = WordShl(intptr_index, IntPtrConstant(2));
Node* element = Load(MachineType::Float32(), backing_store, index);
var_double_value->Bind(ChangeFloat32ToFloat64(element));
Goto(rebox_double);
@@ -3676,7 +4633,7 @@ void CodeStubAssembler::EmitElementLoad(Node* object, Node* elements,
Bind(&float64_elements);
{
Comment("FLOAT64_ELEMENTS");
- Node* index = WordShl(key, IntPtrConstant(3));
+ Node* index = WordShl(intptr_index, IntPtrConstant(3));
Node* element = Load(MachineType::Float64(), backing_store, index);
var_double_value->Bind(element);
Goto(rebox_double);
@@ -3707,17 +4664,26 @@ void CodeStubAssembler::HandleLoadICHandlerCase(
&property);
Comment("element_load");
- Node* key = TryToIntptr(p->name, miss);
+ Node* intptr_index = TryToIntptr(p->name, miss);
Node* elements = LoadElements(p->receiver);
Node* is_jsarray =
WordAnd(handler_word, IntPtrConstant(KeyedLoadIsJsArray::kMask));
- EmitBoundsCheck(p->receiver, elements, key, is_jsarray, miss);
- Label if_hole(this);
-
+ Node* is_jsarray_condition = WordNotEqual(is_jsarray, IntPtrConstant(0));
Node* elements_kind = BitFieldDecode<KeyedLoadElementsKind>(handler_word);
-
- EmitElementLoad(p->receiver, elements, elements_kind, key, &if_hole,
- &rebox_double, &var_double_value, miss);
+ Label if_hole(this), unimplemented_elements_kind(this);
+ Label* out_of_bounds = miss;
+ EmitElementLoad(p->receiver, elements, elements_kind, intptr_index,
+ is_jsarray_condition, &if_hole, &rebox_double,
+ &var_double_value, &unimplemented_elements_kind,
+ out_of_bounds, miss);
+
+ Bind(&unimplemented_elements_kind);
+ {
+ // Smi handlers should only be installed for supported elements kinds.
+ // Crash if we get here.
+ DebugBreak();
+ Goto(miss);
+ }
Bind(&if_hole);
{
@@ -3799,8 +4765,9 @@ void CodeStubAssembler::LoadIC(const LoadICParameters* p) {
Node* receiver_map = LoadReceiverMap(p->receiver);
// Check monomorphic case.
- Node* feedback = TryMonomorphicCase(p, receiver_map, &if_handler,
- &var_handler, &try_polymorphic);
+ Node* feedback =
+ TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+ &var_handler, &try_polymorphic);
Bind(&if_handler);
{
HandleLoadICHandlerCase(p, var_handler.value(), &miss);
@@ -3810,10 +4777,9 @@ void CodeStubAssembler::LoadIC(const LoadICParameters* p) {
{
// Check polymorphic case.
Comment("LoadIC_try_polymorphic");
- GotoUnless(
- WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
- &try_megamorphic);
- HandlePolymorphicCase(p, receiver_map, feedback, &if_handler, &var_handler,
+ GotoUnless(WordEqual(LoadMap(feedback), FixedArrayMapConstant()),
+ &try_megamorphic);
+ HandlePolymorphicCase(receiver_map, feedback, &if_handler, &var_handler,
&miss, 2);
}
@@ -3845,8 +4811,9 @@ void CodeStubAssembler::KeyedLoadIC(const LoadICParameters* p) {
Node* receiver_map = LoadReceiverMap(p->receiver);
// Check monomorphic case.
- Node* feedback = TryMonomorphicCase(p, receiver_map, &if_handler,
- &var_handler, &try_polymorphic);
+ Node* feedback =
+ TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+ &var_handler, &try_polymorphic);
Bind(&if_handler);
{
HandleLoadICHandlerCase(p, var_handler.value(), &miss, kSupportElements);
@@ -3856,10 +4823,9 @@ void CodeStubAssembler::KeyedLoadIC(const LoadICParameters* p) {
{
// Check polymorphic case.
Comment("KeyedLoadIC_try_polymorphic");
- GotoUnless(
- WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
- &try_megamorphic);
- HandlePolymorphicCase(p, receiver_map, feedback, &if_handler, &var_handler,
+ GotoUnless(WordEqual(LoadMap(feedback), FixedArrayMapConstant()),
+ &try_megamorphic);
+ HandlePolymorphicCase(receiver_map, feedback, &if_handler, &var_handler,
&miss, 2);
}
@@ -3885,8 +4851,8 @@ void CodeStubAssembler::KeyedLoadIC(const LoadICParameters* p) {
p->slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS,
FixedArray::kHeaderSize + kPointerSize - kHeapObjectTag);
Node* array = Load(MachineType::AnyTagged(), p->vector, offset);
- HandlePolymorphicCase(p, receiver_map, array, &if_handler, &var_handler,
- &miss, 1);
+ HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler, &miss,
+ 1);
}
Bind(&miss);
{
@@ -3896,6 +4862,210 @@ void CodeStubAssembler::KeyedLoadIC(const LoadICParameters* p) {
}
}
+void CodeStubAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
+ Variable var_index(this, MachineType::PointerRepresentation());
+ Variable var_details(this, MachineRepresentation::kWord32);
+ Variable var_value(this, MachineRepresentation::kTagged);
+ Label if_index(this), if_unique_name(this), if_element_hole(this),
+ if_oob(this), slow(this), stub_cache_miss(this),
+ if_property_dictionary(this), if_found_on_receiver(this);
+
+ Node* receiver = p->receiver;
+ GotoIf(WordIsSmi(receiver), &slow);
+ Node* receiver_map = LoadMap(receiver);
+ Node* instance_type = LoadMapInstanceType(receiver_map);
+ // Receivers requiring non-standard element accesses (interceptors, access
+ // checks, strings and string wrappers, proxies) are handled in the runtime.
+ GotoIf(Int32LessThanOrEqual(instance_type,
+ Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
+ &slow);
+
+ Node* key = p->name;
+ TryToName(key, &if_index, &var_index, &if_unique_name, &slow);
+
+ Bind(&if_index);
+ {
+ Comment("integer index");
+ Node* index = var_index.value();
+ Node* elements = LoadElements(receiver);
+ Node* elements_kind = LoadMapElementsKind(receiver_map);
+ Node* is_jsarray_condition =
+ Word32Equal(instance_type, Int32Constant(JS_ARRAY_TYPE));
+ Variable var_double_value(this, MachineRepresentation::kFloat64);
+ Label rebox_double(this, &var_double_value);
+
+ // Unimplemented elements kinds fall back to a runtime call.
+ Label* unimplemented_elements_kind = &slow;
+ IncrementCounter(isolate()->counters()->ic_keyed_load_generic_smi(), 1);
+ EmitElementLoad(receiver, elements, elements_kind, index,
+ is_jsarray_condition, &if_element_hole, &rebox_double,
+ &var_double_value, unimplemented_elements_kind, &if_oob,
+ &slow);
+
+ Bind(&rebox_double);
+ Return(AllocateHeapNumberWithValue(var_double_value.value()));
+ }
+
+ Bind(&if_oob);
+ {
+ Comment("out of bounds");
+ Node* index = var_index.value();
+ // Negative keys can't take the fast OOB path.
+ GotoIf(IntPtrLessThan(index, IntPtrConstant(0)), &slow);
+ // Positive OOB indices are effectively the same as hole loads.
+ Goto(&if_element_hole);
+ }
+
+ Bind(&if_element_hole);
+ {
+ Comment("found the hole");
+ Label return_undefined(this);
+ BranchIfPrototypesHaveNoElements(receiver_map, &return_undefined, &slow);
+
+ Bind(&return_undefined);
+ Return(UndefinedConstant());
+ }
+
+ Node* properties = nullptr;
+ Bind(&if_unique_name);
+ {
+ Comment("key is unique name");
+ // Check if the receiver has fast or slow properties.
+ properties = LoadProperties(receiver);
+ Node* properties_map = LoadMap(properties);
+ GotoIf(WordEqual(properties_map, LoadRoot(Heap::kHashTableMapRootIndex)),
+ &if_property_dictionary);
+
+ // Try looking up the property on the receiver; if unsuccessful, look
+ // for a handler in the stub cache.
+ Comment("DescriptorArray lookup");
+
+ // Skip linear search if there are too many descriptors.
+ // TODO(jkummerow): Consider implementing binary search.
+ // See also TryLookupProperty() which has the same limitation.
+ const int32_t kMaxLinear = 210;
+ Label stub_cache(this);
+ Node* bitfield3 = LoadMapBitField3(receiver_map);
+ Node* nof = BitFieldDecodeWord<Map::NumberOfOwnDescriptorsBits>(bitfield3);
+ GotoIf(UintPtrGreaterThan(nof, IntPtrConstant(kMaxLinear)), &stub_cache);
+ Node* descriptors = LoadMapDescriptors(receiver_map);
+ Variable var_name_index(this, MachineType::PointerRepresentation());
+ Label if_descriptor_found(this);
+ DescriptorLookupLinear(key, descriptors, nof, &if_descriptor_found,
+ &var_name_index, &stub_cache);
+
+ Bind(&if_descriptor_found);
+ {
+ LoadPropertyFromFastObject(receiver, receiver_map, descriptors,
+ var_name_index.value(), &var_details,
+ &var_value);
+ Goto(&if_found_on_receiver);
+ }
+
+ Bind(&stub_cache);
+ {
+ Comment("stub cache probe for fast property load");
+ Variable var_handler(this, MachineRepresentation::kTagged);
+ Label found_handler(this, &var_handler), stub_cache_miss(this);
+ TryProbeStubCache(isolate()->load_stub_cache(), receiver, key,
+ &found_handler, &var_handler, &stub_cache_miss);
+ Bind(&found_handler);
+ { HandleLoadICHandlerCase(p, var_handler.value(), &slow); }
+
+ Bind(&stub_cache_miss);
+ {
+ Comment("KeyedLoadGeneric_miss");
+ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, p->context, p->receiver,
+ p->name, p->slot, p->vector);
+ }
+ }
+ }
+
+ Bind(&if_property_dictionary);
+ {
+ Comment("dictionary property load");
+ // We checked for LAST_CUSTOM_ELEMENTS_RECEIVER before, which rules out
+ // seeing global objects here (which would need special handling).
+
+ Variable var_name_index(this, MachineType::PointerRepresentation());
+ Label dictionary_found(this, &var_name_index);
+ NameDictionaryLookup<NameDictionary>(properties, key, &dictionary_found,
+ &var_name_index, &slow);
+ Bind(&dictionary_found);
+ {
+ LoadPropertyFromNameDictionary(properties, var_name_index.value(),
+ &var_details, &var_value);
+ Goto(&if_found_on_receiver);
+ }
+ }
+
+ Bind(&if_found_on_receiver);
+ {
+ Node* value = CallGetterIfAccessor(var_value.value(), var_details.value(),
+ p->context, receiver, &slow);
+ IncrementCounter(isolate()->counters()->ic_keyed_load_generic_symbol(), 1);
+ Return(value);
+ }
+
+ Bind(&slow);
+ {
+ Comment("KeyedLoadGeneric_slow");
+ IncrementCounter(isolate()->counters()->ic_keyed_load_generic_slow(), 1);
+ // TODO(jkummerow): Should we use the GetProperty TF stub instead?
+ TailCallRuntime(Runtime::kKeyedGetProperty, p->context, p->receiver,
+ p->name);
+ }
+}
+
+void CodeStubAssembler::StoreIC(const StoreICParameters* p) {
+ Variable var_handler(this, MachineRepresentation::kTagged);
+ // TODO(ishell): defer blocks when it works.
+ Label if_handler(this, &var_handler), try_polymorphic(this),
+ try_megamorphic(this /*, Label::kDeferred*/),
+ miss(this /*, Label::kDeferred*/);
+
+ Node* receiver_map = LoadReceiverMap(p->receiver);
+
+ // Check monomorphic case.
+ Node* feedback =
+ TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+ &var_handler, &try_polymorphic);
+ Bind(&if_handler);
+ {
+ Comment("StoreIC_if_handler");
+ StoreWithVectorDescriptor descriptor(isolate());
+ TailCallStub(descriptor, var_handler.value(), p->context, p->receiver,
+ p->name, p->value, p->slot, p->vector);
+ }
+
+ Bind(&try_polymorphic);
+ {
+ // Check polymorphic case.
+ Comment("StoreIC_try_polymorphic");
+ GotoUnless(
+ WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
+ &try_megamorphic);
+ HandlePolymorphicCase(receiver_map, feedback, &if_handler, &var_handler,
+ &miss, 2);
+ }
+
+ Bind(&try_megamorphic);
+ {
+ // Check megamorphic case.
+ GotoUnless(
+ WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+ &miss);
+
+ TryProbeStubCache(isolate()->store_stub_cache(), p->receiver, p->name,
+ &if_handler, &var_handler, &miss);
+ }
+ Bind(&miss);
+ {
+ TailCallRuntime(Runtime::kStoreIC_Miss, p->context, p->value, p->slot,
+ p->vector, p->receiver, p->name);
+ }
+}
+
void CodeStubAssembler::LoadGlobalIC(const LoadICParameters* p) {
Label try_handler(this), miss(this);
Node* weak_cell =
@@ -3921,8 +5091,8 @@ void CodeStubAssembler::LoadGlobalIC(const LoadICParameters* p) {
AssertInstanceType(handler, CODE_TYPE);
LoadWithVectorDescriptor descriptor(isolate());
Node* native_context = LoadNativeContext(p->context);
- Node* receiver = LoadFixedArrayElement(
- native_context, Int32Constant(Context::EXTENSION_INDEX));
+ Node* receiver =
+ LoadContextElement(native_context, Context::EXTENSION_INDEX);
Node* fake_name = IntPtrConstant(0);
TailCallStub(descriptor, handler, p->context, receiver, fake_name, p->slot,
p->vector);
@@ -3934,6 +5104,651 @@ void CodeStubAssembler::LoadGlobalIC(const LoadICParameters* p) {
}
}
+void CodeStubAssembler::ExtendPropertiesBackingStore(compiler::Node* object) {
+ Node* properties = LoadProperties(object);
+ Node* length = LoadFixedArrayBaseLength(properties);
+
+ ParameterMode mode = OptimalParameterMode();
+ length = UntagParameter(length, mode);
+
+ Node* delta = IntPtrOrSmiConstant(JSObject::kFieldsAdded, mode);
+ Node* new_capacity = IntPtrAdd(length, delta);
+
+ // Grow properties array.
+ ElementsKind kind = FAST_ELEMENTS;
+ DCHECK(kMaxNumberOfDescriptors + JSObject::kFieldsAdded <
+ FixedArrayBase::GetMaxLengthForNewSpaceAllocation(kind));
+ // The size of a new properties backing store is guaranteed to be small
+ // enough that the new backing store will be allocated in new space.
+ Assert(UintPtrLessThan(new_capacity, IntPtrConstant(kMaxNumberOfDescriptors +
+ JSObject::kFieldsAdded)));
+
+ Node* new_properties = AllocateFixedArray(kind, new_capacity, mode);
+
+ FillFixedArrayWithValue(kind, new_properties, length, new_capacity,
+ Heap::kUndefinedValueRootIndex, mode);
+
+ // |new_properties| is guaranteed to be in new space, so we can skip
+ // the write barrier.
+ CopyFixedArrayElements(kind, properties, new_properties, length,
+ SKIP_WRITE_BARRIER, mode);
+
+ StoreObjectField(object, JSObject::kPropertiesOffset, new_properties);
+}
+
+Node* CodeStubAssembler::PrepareValueForWrite(Node* value,
+ Representation representation,
+ Label* bailout) {
+ if (representation.IsDouble()) {
+ Variable var_value(this, MachineRepresentation::kFloat64);
+ Label if_smi(this), if_heap_object(this), done(this);
+ Branch(WordIsSmi(value), &if_smi, &if_heap_object);
+ Bind(&if_smi);
+ {
+ var_value.Bind(SmiToFloat64(value));
+ Goto(&done);
+ }
+ Bind(&if_heap_object);
+ {
+ GotoUnless(
+ Word32Equal(LoadInstanceType(value), Int32Constant(HEAP_NUMBER_TYPE)),
+ bailout);
+ var_value.Bind(LoadHeapNumberValue(value));
+ Goto(&done);
+ }
+ Bind(&done);
+ value = var_value.value();
+ } else if (representation.IsHeapObject()) {
+ // Field type is checked by the handler, here we only check if the value
+ // is a heap object.
+ GotoIf(WordIsSmi(value), bailout);
+ } else if (representation.IsSmi()) {
+ GotoUnless(WordIsSmi(value), bailout);
+ } else {
+ DCHECK(representation.IsTagged());
+ }
+ return value;
+}
+
+void CodeStubAssembler::StoreNamedField(Node* object, FieldIndex index,
+ Representation representation,
+ Node* value, bool transition_to_field) {
+ DCHECK_EQ(index.is_double(), representation.IsDouble());
+
+ StoreNamedField(object, IntPtrConstant(index.offset()), index.is_inobject(),
+ representation, value, transition_to_field);
+}
+
+void CodeStubAssembler::StoreNamedField(Node* object, Node* offset,
+ bool is_inobject,
+ Representation representation,
+ Node* value, bool transition_to_field) {
+ bool store_value_as_double = representation.IsDouble();
+ Node* property_storage = object;
+ if (!is_inobject) {
+ property_storage = LoadProperties(object);
+ }
+
+ if (representation.IsDouble()) {
+ if (!FLAG_unbox_double_fields || !is_inobject) {
+ if (transition_to_field) {
+ Node* heap_number = AllocateHeapNumberWithValue(value, MUTABLE);
+ // Store the new mutable heap number into the object.
+ value = heap_number;
+ store_value_as_double = false;
+ } else {
+ // Load the heap number.
+ property_storage = LoadObjectField(property_storage, offset);
+ // Store the double value into it.
+ offset = IntPtrConstant(HeapNumber::kValueOffset);
+ }
+ }
+ }
+
+ if (store_value_as_double) {
+ StoreObjectFieldNoWriteBarrier(property_storage, offset, value,
+ MachineRepresentation::kFloat64);
+ } else if (representation.IsSmi()) {
+ StoreObjectFieldNoWriteBarrier(property_storage, offset, value);
+ } else {
+ StoreObjectField(property_storage, offset, value);
+ }
+}
+
+Node* CodeStubAssembler::EmitKeyedSloppyArguments(Node* receiver, Node* key,
+ Node* value, Label* bailout) {
+ // Mapped arguments are actual arguments. Unmapped arguments are values added
+ // to the arguments object after it was created for the call. Mapped arguments
+ // are stored in the context at indexes given by elements[key + 2]. Unmapped
+ // arguments are stored as regular indexed properties in the arguments array,
+ // held at elements[1]. See NewSloppyArguments() in runtime.cc for a detailed
+ // look at argument object construction.
+ //
+ // The sloppy arguments elements array has a special format:
+ //
+ // 0: context
+ // 1: unmapped arguments array
+ // 2: mapped_index0,
+ // 3: mapped_index1,
+ // ...
+ //
+ // length is 2 + min(number_of_actual_arguments, number_of_formal_arguments).
+ // If key + 2 >= elements.length then attempt to look in the unmapped
+ // arguments array (given by elements[1]) and return the value at key, missing
+ // to the runtime if the unmapped arguments array is not a fixed array or if
+ // key >= unmapped_arguments_array.length.
+ //
+ // Otherwise, t = elements[key + 2]. If t is the hole, then look up the value
+ // in the unmapped arguments array, as described above. Otherwise, t is a Smi
+ // index into the context array given at elements[0]. Return the value at
+ // context[t].
+
+ bool is_load = value == nullptr;
+
+ GotoUnless(WordIsSmi(key), bailout);
+ key = SmiUntag(key);
+ GotoIf(IntPtrLessThan(key, IntPtrConstant(0)), bailout);
+
+ Node* elements = LoadElements(receiver);
+ Node* elements_length = LoadAndUntagFixedArrayBaseLength(elements);
+
+ Variable var_result(this, MachineRepresentation::kTagged);
+ if (!is_load) {
+ var_result.Bind(value);
+ }
+ Label if_mapped(this), if_unmapped(this), end(this, &var_result);
+ Node* intptr_two = IntPtrConstant(2);
+ Node* adjusted_length = IntPtrSub(elements_length, intptr_two);
+
+ GotoIf(UintPtrGreaterThanOrEqual(key, adjusted_length), &if_unmapped);
+
+ Node* mapped_index = LoadFixedArrayElement(
+ elements, IntPtrAdd(key, intptr_two), 0, INTPTR_PARAMETERS);
+ Branch(WordEqual(mapped_index, TheHoleConstant()), &if_unmapped, &if_mapped);
+
+ Bind(&if_mapped);
+ {
+ Assert(WordIsSmi(mapped_index));
+ mapped_index = SmiUntag(mapped_index);
+ Node* the_context = LoadFixedArrayElement(elements, IntPtrConstant(0), 0,
+ INTPTR_PARAMETERS);
+ // Assert that we can use LoadFixedArrayElement/StoreFixedArrayElement
+ // methods for accessing Context.
+ STATIC_ASSERT(Context::kHeaderSize == FixedArray::kHeaderSize);
+ DCHECK_EQ(Context::SlotOffset(0) + kHeapObjectTag,
+ FixedArray::OffsetOfElementAt(0));
+ if (is_load) {
+ Node* result = LoadFixedArrayElement(the_context, mapped_index, 0,
+ INTPTR_PARAMETERS);
+ Assert(WordNotEqual(result, TheHoleConstant()));
+ var_result.Bind(result);
+ } else {
+ StoreFixedArrayElement(the_context, mapped_index, value,
+ UPDATE_WRITE_BARRIER, INTPTR_PARAMETERS);
+ }
+ Goto(&end);
+ }
+
+ Bind(&if_unmapped);
+ {
+ Node* backing_store = LoadFixedArrayElement(elements, IntPtrConstant(1), 0,
+ INTPTR_PARAMETERS);
+ GotoIf(WordNotEqual(LoadMap(backing_store), FixedArrayMapConstant()),
+ bailout);
+
+ Node* backing_store_length =
+ LoadAndUntagFixedArrayBaseLength(backing_store);
+ GotoIf(UintPtrGreaterThanOrEqual(key, backing_store_length), bailout);
+
+ // The key falls into unmapped range.
+ if (is_load) {
+ Node* result =
+ LoadFixedArrayElement(backing_store, key, 0, INTPTR_PARAMETERS);
+ GotoIf(WordEqual(result, TheHoleConstant()), bailout);
+ var_result.Bind(result);
+ } else {
+ StoreFixedArrayElement(backing_store, key, value, UPDATE_WRITE_BARRIER,
+ INTPTR_PARAMETERS);
+ }
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::LoadScriptContext(Node* context, int context_index) {
+ Node* native_context = LoadNativeContext(context);
+ Node* script_context_table =
+ LoadContextElement(native_context, Context::SCRIPT_CONTEXT_TABLE_INDEX);
+
+ int offset =
+ ScriptContextTable::GetContextOffset(context_index) - kHeapObjectTag;
+ return Load(MachineType::AnyTagged(), script_context_table,
+ IntPtrConstant(offset));
+}
+
+namespace {
+
+// Converts typed array elements kind to a machine representations.
+MachineRepresentation ElementsKindToMachineRepresentation(ElementsKind kind) {
+ switch (kind) {
+ case UINT8_CLAMPED_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ return MachineRepresentation::kWord8;
+ case UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ return MachineRepresentation::kWord16;
+ case UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ return MachineRepresentation::kWord32;
+ case FLOAT32_ELEMENTS:
+ return MachineRepresentation::kFloat32;
+ case FLOAT64_ELEMENTS:
+ return MachineRepresentation::kFloat64;
+ default:
+ UNREACHABLE();
+ return MachineRepresentation::kNone;
+ }
+}
+
+} // namespace
+
+void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind,
+ Node* index, Node* value,
+ ParameterMode mode) {
+ if (IsFixedTypedArrayElementsKind(kind)) {
+ if (kind == UINT8_CLAMPED_ELEMENTS) {
+#ifdef DEBUG
+ Assert(Word32Equal(value, Word32And(Int32Constant(0xff), value)));
+#endif
+ }
+ Node* offset = ElementOffsetFromIndex(index, kind, mode, 0);
+ MachineRepresentation rep = ElementsKindToMachineRepresentation(kind);
+ StoreNoWriteBarrier(rep, elements, offset, value);
+ return;
+ }
+
+ WriteBarrierMode barrier_mode =
+ IsFastSmiElementsKind(kind) ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
+ if (IsFastDoubleElementsKind(kind)) {
+ // Make sure we do not store signalling NaNs into double arrays.
+ value = Float64SilenceNaN(value);
+ StoreFixedDoubleArrayElement(elements, index, value, mode);
+ } else {
+ StoreFixedArrayElement(elements, index, value, barrier_mode, mode);
+ }
+}
+
+Node* CodeStubAssembler::Int32ToUint8Clamped(Node* int32_value) {
+ Label done(this);
+ Node* int32_zero = Int32Constant(0);
+ Node* int32_255 = Int32Constant(255);
+ Variable var_value(this, MachineRepresentation::kWord32);
+ var_value.Bind(int32_value);
+ GotoIf(Uint32LessThanOrEqual(int32_value, int32_255), &done);
+ var_value.Bind(int32_zero);
+ GotoIf(Int32LessThan(int32_value, int32_zero), &done);
+ var_value.Bind(int32_255);
+ Goto(&done);
+ Bind(&done);
+ return var_value.value();
+}
+
+Node* CodeStubAssembler::Float64ToUint8Clamped(Node* float64_value) {
+ Label done(this);
+ Variable var_value(this, MachineRepresentation::kWord32);
+ var_value.Bind(Int32Constant(0));
+ GotoIf(Float64LessThanOrEqual(float64_value, Float64Constant(0.0)), &done);
+ var_value.Bind(Int32Constant(255));
+ GotoIf(Float64LessThanOrEqual(Float64Constant(255.0), float64_value), &done);
+ {
+ Node* rounded_value = Float64RoundToEven(float64_value);
+ var_value.Bind(TruncateFloat64ToWord32(rounded_value));
+ Goto(&done);
+ }
+ Bind(&done);
+ return var_value.value();
+}
+
+Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
+ Node* input, ElementsKind elements_kind, Label* bailout) {
+ DCHECK(IsFixedTypedArrayElementsKind(elements_kind));
+
+ MachineRepresentation rep;
+ switch (elements_kind) {
+ case UINT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ case UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ case UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ rep = MachineRepresentation::kWord32;
+ break;
+ case FLOAT32_ELEMENTS:
+ rep = MachineRepresentation::kFloat32;
+ break;
+ case FLOAT64_ELEMENTS:
+ rep = MachineRepresentation::kFloat64;
+ break;
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+
+ Variable var_result(this, rep);
+ Label done(this, &var_result), if_smi(this);
+ GotoIf(WordIsSmi(input), &if_smi);
+ // Try to convert a heap number to a Smi.
+ GotoUnless(IsHeapNumberMap(LoadMap(input)), bailout);
+ {
+ Node* value = LoadHeapNumberValue(input);
+ if (rep == MachineRepresentation::kWord32) {
+ if (elements_kind == UINT8_CLAMPED_ELEMENTS) {
+ value = Float64ToUint8Clamped(value);
+ } else {
+ value = TruncateFloat64ToWord32(value);
+ }
+ } else if (rep == MachineRepresentation::kFloat32) {
+ value = TruncateFloat64ToFloat32(value);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat64, rep);
+ }
+ var_result.Bind(value);
+ Goto(&done);
+ }
+
+ Bind(&if_smi);
+ {
+ Node* value = SmiToWord32(input);
+ if (rep == MachineRepresentation::kFloat32) {
+ value = RoundInt32ToFloat32(value);
+ } else if (rep == MachineRepresentation::kFloat64) {
+ value = ChangeInt32ToFloat64(value);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kWord32, rep);
+ if (elements_kind == UINT8_CLAMPED_ELEMENTS) {
+ value = Int32ToUint8Clamped(value);
+ }
+ }
+ var_result.Bind(value);
+ Goto(&done);
+ }
+
+ Bind(&done);
+ return var_result.value();
+}
+
+void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
+ bool is_jsarray,
+ ElementsKind elements_kind,
+ KeyedAccessStoreMode store_mode,
+ Label* bailout) {
+ Node* elements = LoadElements(object);
+ if (IsFastSmiOrObjectElementsKind(elements_kind) &&
+ store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
+ // Bailout in case of COW elements.
+ GotoIf(WordNotEqual(LoadMap(elements),
+ LoadRoot(Heap::kFixedArrayMapRootIndex)),
+ bailout);
+ }
+ // TODO(ishell): introduce TryToIntPtrOrSmi() and use OptimalParameterMode().
+ ParameterMode parameter_mode = INTPTR_PARAMETERS;
+ key = TryToIntptr(key, bailout);
+
+ if (IsFixedTypedArrayElementsKind(elements_kind)) {
+ Label done(this);
+ // TODO(ishell): call ToNumber() on value and don't bailout but be careful
+ // to call it only once if we decide to bailout because of bounds checks.
+
+ value = PrepareValueForWriteToTypedArray(value, elements_kind, bailout);
+
+ // There must be no allocations between the buffer load and
+ // and the actual store to backing store, because GC may decide that
+ // the buffer is not alive or move the elements.
+ // TODO(ishell): introduce DisallowHeapAllocationCode scope here.
+
+ // Check if buffer has been neutered.
+ Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
+ Node* bitfield = LoadObjectField(buffer, JSArrayBuffer::kBitFieldOffset,
+ MachineType::Uint32());
+ Node* neutered_bit =
+ Word32And(bitfield, Int32Constant(JSArrayBuffer::WasNeutered::kMask));
+ GotoUnless(Word32Equal(neutered_bit, Int32Constant(0)), bailout);
+
+ // Bounds check.
+ Node* length = UntagParameter(
+ LoadObjectField(object, JSTypedArray::kLengthOffset), parameter_mode);
+
+ if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
+ // Skip the store if we write beyond the length.
+ GotoUnless(IntPtrLessThan(key, length), &done);
+ // ... but bailout if the key is negative.
+ } else {
+ DCHECK_EQ(STANDARD_STORE, store_mode);
+ }
+ GotoUnless(UintPtrLessThan(key, length), bailout);
+
+ // Backing store = external_pointer + base_pointer.
+ Node* external_pointer =
+ LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
+ MachineType::Pointer());
+ Node* base_pointer =
+ LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
+ Node* backing_store = IntPtrAdd(external_pointer, base_pointer);
+ StoreElement(backing_store, elements_kind, key, value, parameter_mode);
+ Goto(&done);
+
+ Bind(&done);
+ return;
+ }
+ DCHECK(IsFastSmiOrObjectElementsKind(elements_kind) ||
+ IsFastDoubleElementsKind(elements_kind));
+
+ Node* length = is_jsarray ? LoadObjectField(object, JSArray::kLengthOffset)
+ : LoadFixedArrayBaseLength(elements);
+ length = UntagParameter(length, parameter_mode);
+
+ // In case value is stored into a fast smi array, assure that the value is
+ // a smi before manipulating the backing store. Otherwise the backing store
+ // may be left in an invalid state.
+ if (IsFastSmiElementsKind(elements_kind)) {
+ GotoUnless(WordIsSmi(value), bailout);
+ } else if (IsFastDoubleElementsKind(elements_kind)) {
+ value = PrepareValueForWrite(value, Representation::Double(), bailout);
+ }
+
+ if (IsGrowStoreMode(store_mode)) {
+ elements = CheckForCapacityGrow(object, elements, elements_kind, length,
+ key, parameter_mode, is_jsarray, bailout);
+ } else {
+ GotoUnless(UintPtrLessThan(key, length), bailout);
+
+ if ((store_mode == STORE_NO_TRANSITION_HANDLE_COW) &&
+ IsFastSmiOrObjectElementsKind(elements_kind)) {
+ elements = CopyElementsOnWrite(object, elements, elements_kind, length,
+ parameter_mode, bailout);
+ }
+ }
+ StoreElement(elements, elements_kind, key, value, parameter_mode);
+}
+
+Node* CodeStubAssembler::CheckForCapacityGrow(Node* object, Node* elements,
+ ElementsKind kind, Node* length,
+ Node* key, ParameterMode mode,
+ bool is_js_array,
+ Label* bailout) {
+ Variable checked_elements(this, MachineRepresentation::kTagged);
+ Label grow_case(this), no_grow_case(this), done(this);
+
+ Node* condition;
+ if (IsHoleyElementsKind(kind)) {
+ condition = UintPtrGreaterThanOrEqual(key, length);
+ } else {
+ condition = WordEqual(key, length);
+ }
+ Branch(condition, &grow_case, &no_grow_case);
+
+ Bind(&grow_case);
+ {
+ Node* current_capacity =
+ UntagParameter(LoadFixedArrayBaseLength(elements), mode);
+
+ checked_elements.Bind(elements);
+
+ Label fits_capacity(this);
+ GotoIf(UintPtrLessThan(key, current_capacity), &fits_capacity);
+ {
+ Node* new_elements = TryGrowElementsCapacity(
+ object, elements, kind, key, current_capacity, mode, bailout);
+
+ checked_elements.Bind(new_elements);
+ Goto(&fits_capacity);
+ }
+ Bind(&fits_capacity);
+
+ if (is_js_array) {
+ Node* new_length = IntPtrAdd(key, IntPtrOrSmiConstant(1, mode));
+ StoreObjectFieldNoWriteBarrier(object, JSArray::kLengthOffset,
+ TagParameter(new_length, mode));
+ }
+ Goto(&done);
+ }
+
+ Bind(&no_grow_case);
+ {
+ GotoUnless(UintPtrLessThan(key, length), bailout);
+ checked_elements.Bind(elements);
+ Goto(&done);
+ }
+
+ Bind(&done);
+ return checked_elements.value();
+}
+
+Node* CodeStubAssembler::CopyElementsOnWrite(Node* object, Node* elements,
+ ElementsKind kind, Node* length,
+ ParameterMode mode,
+ Label* bailout) {
+ Variable new_elements_var(this, MachineRepresentation::kTagged);
+ Label done(this);
+
+ new_elements_var.Bind(elements);
+ GotoUnless(
+ WordEqual(LoadMap(elements), LoadRoot(Heap::kFixedCOWArrayMapRootIndex)),
+ &done);
+ {
+ Node* capacity = UntagParameter(LoadFixedArrayBaseLength(elements), mode);
+ Node* new_elements = GrowElementsCapacity(object, elements, kind, kind,
+ length, capacity, mode, bailout);
+
+ new_elements_var.Bind(new_elements);
+ Goto(&done);
+ }
+
+ Bind(&done);
+ return new_elements_var.value();
+}
+
+void CodeStubAssembler::TransitionElementsKind(
+ compiler::Node* object, compiler::Node* map, ElementsKind from_kind,
+ ElementsKind to_kind, bool is_jsarray, Label* bailout) {
+ DCHECK(!IsFastHoleyElementsKind(from_kind) ||
+ IsFastHoleyElementsKind(to_kind));
+ if (AllocationSite::GetMode(from_kind, to_kind) == TRACK_ALLOCATION_SITE) {
+ TrapAllocationMemento(object, bailout);
+ }
+
+ if (!IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ Comment("Non-simple map transition");
+ Node* elements = LoadElements(object);
+
+ Node* empty_fixed_array =
+ HeapConstant(isolate()->factory()->empty_fixed_array());
+
+ Label done(this);
+ GotoIf(WordEqual(elements, empty_fixed_array), &done);
+
+ // TODO(ishell): Use OptimalParameterMode().
+ ParameterMode mode = INTPTR_PARAMETERS;
+ Node* elements_length = SmiUntag(LoadFixedArrayBaseLength(elements));
+ Node* array_length =
+ is_jsarray ? SmiUntag(LoadObjectField(object, JSArray::kLengthOffset))
+ : elements_length;
+
+ GrowElementsCapacity(object, elements, from_kind, to_kind, array_length,
+ elements_length, mode, bailout);
+ Goto(&done);
+ Bind(&done);
+ }
+
+ StoreObjectField(object, JSObject::kMapOffset, map);
+}
+
+void CodeStubAssembler::TrapAllocationMemento(Node* object,
+ Label* memento_found) {
+ Comment("[ TrapAllocationMemento");
+ Label no_memento_found(this);
+ Label top_check(this), map_check(this);
+
+ Node* new_space_top_address = ExternalConstant(
+ ExternalReference::new_space_allocation_top_address(isolate()));
+ const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
+ const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+
+ // Bail out if the object is not in new space.
+ Node* object_page = PageFromAddress(object);
+ {
+ const int mask =
+ (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
+ Node* page_flags = Load(MachineType::IntPtr(), object_page);
+ GotoIf(
+ WordEqual(WordAnd(page_flags, IntPtrConstant(mask)), IntPtrConstant(0)),
+ &no_memento_found);
+ }
+
+ Node* memento_end = IntPtrAdd(object, IntPtrConstant(kMementoEndOffset));
+ Node* memento_end_page = PageFromAddress(memento_end);
+
+ Node* new_space_top = Load(MachineType::Pointer(), new_space_top_address);
+ Node* new_space_top_page = PageFromAddress(new_space_top);
+
+ // If the object is in new space, we need to check whether it is and
+ // respective potential memento object on the same page as the current top.
+ GotoIf(WordEqual(memento_end_page, new_space_top_page), &top_check);
+
+ // The object is on a different page than allocation top. Bail out if the
+ // object sits on the page boundary as no memento can follow and we cannot
+ // touch the memory following it.
+ Branch(WordEqual(object_page, memento_end_page), &map_check,
+ &no_memento_found);
+
+ // If top is on the same page as the current object, we need to check whether
+ // we are below top.
+ Bind(&top_check);
+ {
+ Branch(UintPtrGreaterThan(memento_end, new_space_top), &no_memento_found,
+ &map_check);
+ }
+
+ // Memento map check.
+ Bind(&map_check);
+ {
+ Node* memento_map = LoadObjectField(object, kMementoMapOffset);
+ Branch(
+ WordEqual(memento_map, LoadRoot(Heap::kAllocationMementoMapRootIndex)),
+ memento_found, &no_memento_found);
+ }
+ Bind(&no_memento_found);
+ Comment("] TrapAllocationMemento");
+}
+
+Node* CodeStubAssembler::PageFromAddress(Node* address) {
+ return WordAnd(address, IntPtrConstant(~Page::kPageAlignmentMask));
+}
+
Node* CodeStubAssembler::EnumLength(Node* map) {
Node* bitfield_3 = LoadMapBitField3(map);
Node* enum_length = BitFieldDecode<Map::EnumLengthBits>(bitfield_3);
@@ -4001,6 +5816,52 @@ void CodeStubAssembler::CheckEnumCache(Node* receiver, Label* use_cache,
}
}
+Node* CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
+ Node* feedback_vector, Node* slot) {
+ Node* size = IntPtrConstant(AllocationSite::kSize);
+ Node* site = Allocate(size, CodeStubAssembler::kPretenured);
+
+ // Store the map
+ StoreObjectFieldRoot(site, AllocationSite::kMapOffset,
+ Heap::kAllocationSiteMapRootIndex);
+ Node* kind = SmiConstant(Smi::FromInt(GetInitialFastElementsKind()));
+ StoreObjectFieldNoWriteBarrier(site, AllocationSite::kTransitionInfoOffset,
+ kind);
+
+ // Unlike literals, constructed arrays don't have nested sites
+ Node* zero = IntPtrConstant(0);
+ StoreObjectFieldNoWriteBarrier(site, AllocationSite::kNestedSiteOffset, zero);
+
+ // Pretenuring calculation field.
+ StoreObjectFieldNoWriteBarrier(site, AllocationSite::kPretenureDataOffset,
+ zero);
+
+ // Pretenuring memento creation count field.
+ StoreObjectFieldNoWriteBarrier(
+ site, AllocationSite::kPretenureCreateCountOffset, zero);
+
+ // Store an empty fixed array for the code dependency.
+ StoreObjectFieldRoot(site, AllocationSite::kDependentCodeOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+
+ // Link the object to the allocation site list
+ Node* site_list = ExternalConstant(
+ ExternalReference::allocation_sites_list_address(isolate()));
+ Node* next_site = LoadBufferObject(site_list, 0);
+
+ // TODO(mvstanton): This is a store to a weak pointer, which we may want to
+ // mark as such in order to skip the write barrier, once we have a unified
+ // system for weakness. For now we decided to keep it like this because having
+ // an initial write barrier backed store makes this pointer strong until the
+ // next GC, and allocation sites are designed to survive several GCs anyway.
+ StoreObjectField(site, AllocationSite::kWeakNextOffset, next_site);
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, site_list, site);
+
+ StoreFixedArrayElement(feedback_vector, slot, site, UPDATE_WRITE_BARRIER,
+ CodeStubAssembler::SMI_PARAMETERS);
+ return site;
+}
+
Node* CodeStubAssembler::CreateWeakCellInFeedbackVector(Node* feedback_vector,
Node* slot,
Node* value) {
diff --git a/deps/v8/src/code-stub-assembler.h b/deps/v8/src/code-stub-assembler.h
index 4bad541129..25c7d5a8c8 100644
--- a/deps/v8/src/code-stub-assembler.h
+++ b/deps/v8/src/code-stub-assembler.h
@@ -19,6 +19,20 @@ class StubCache;
enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
+#define HEAP_CONSTANT_LIST(V) \
+ V(BooleanMap, BooleanMap) \
+ V(empty_string, EmptyString) \
+ V(EmptyFixedArray, EmptyFixedArray) \
+ V(FixedArrayMap, FixedArrayMap) \
+ V(FixedCOWArrayMap, FixedCOWArrayMap) \
+ V(FixedDoubleArrayMap, FixedDoubleArrayMap) \
+ V(HeapNumberMap, HeapNumberMap) \
+ V(MinusZeroValue, MinusZero) \
+ V(NanValue, Nan) \
+ V(NullValue, Null) \
+ V(TheHoleValue, TheHole) \
+ V(UndefinedValue, Undefined)
+
// Provides JavaScript-specific "macro-assembler" functionality on top of the
// CodeAssembler. By factoring the JavaScript-isms out of the CodeAssembler,
// it's possible to add JavaScript-specific useful CodeAssembler "macros"
@@ -46,17 +60,40 @@ class CodeStubAssembler : public compiler::CodeAssembler {
typedef base::Flags<AllocationFlag> AllocationFlags;
- enum ParameterMode { INTEGER_PARAMETERS, SMI_PARAMETERS };
+ // TODO(ishell): Fix all loads/stores from arrays by int32 offsets/indices
+ // and eventually remove INTEGER_PARAMETERS in favour of INTPTR_PARAMETERS.
+ enum ParameterMode { INTEGER_PARAMETERS, SMI_PARAMETERS, INTPTR_PARAMETERS };
+
+ // On 32-bit platforms, there is a slight performance advantage to doing all
+ // of the array offset/index arithmetic with SMIs, since it's possible
+ // to save a few tag/untag operations without paying an extra expense when
+ // calculating array offset (the smi math can be folded away) and there are
+ // fewer live ranges. Thus only convert indices to untagged value on 64-bit
+ // platforms.
+ ParameterMode OptimalParameterMode() const {
+ return Is64() ? INTPTR_PARAMETERS : SMI_PARAMETERS;
+ }
+
+ compiler::Node* UntagParameter(compiler::Node* value, ParameterMode mode) {
+ if (mode != SMI_PARAMETERS) value = SmiUntag(value);
+ return value;
+ }
+
+ compiler::Node* TagParameter(compiler::Node* value, ParameterMode mode) {
+ if (mode != SMI_PARAMETERS) value = SmiTag(value);
+ return value;
+ }
- compiler::Node* BooleanMapConstant();
- compiler::Node* EmptyStringConstant();
- compiler::Node* HeapNumberMapConstant();
compiler::Node* NoContextConstant();
- compiler::Node* NanConstant();
- compiler::Node* NullConstant();
- compiler::Node* MinusZeroConstant();
- compiler::Node* UndefinedConstant();
- compiler::Node* TheHoleConstant();
+#define HEAP_CONSTANT_ACCESSOR(rootName, name) compiler::Node* name##Constant();
+ HEAP_CONSTANT_LIST(HEAP_CONSTANT_ACCESSOR)
+#undef HEAP_CONSTANT_ACCESSOR
+
+#define HEAP_CONSTANT_TEST(rootName, name) \
+ compiler::Node* Is##name(compiler::Node* value);
+ HEAP_CONSTANT_LIST(HEAP_CONSTANT_TEST)
+#undef HEAP_CONSTANT_TEST
+
compiler::Node* HashSeed();
compiler::Node* StaleRegisterConstant();
@@ -66,6 +103,7 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* Float64Ceil(compiler::Node* x);
compiler::Node* Float64Floor(compiler::Node* x);
compiler::Node* Float64Round(compiler::Node* x);
+ compiler::Node* Float64RoundToEven(compiler::Node* x);
compiler::Node* Float64Trunc(compiler::Node* x);
// Tag a Word as a Smi value.
@@ -86,14 +124,20 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* SmiSub(compiler::Node* a, compiler::Node* b);
compiler::Node* SmiSubWithOverflow(compiler::Node* a, compiler::Node* b);
compiler::Node* SmiEqual(compiler::Node* a, compiler::Node* b);
+ compiler::Node* SmiAbove(compiler::Node* a, compiler::Node* b);
compiler::Node* SmiAboveOrEqual(compiler::Node* a, compiler::Node* b);
+ compiler::Node* SmiBelow(compiler::Node* a, compiler::Node* b);
compiler::Node* SmiLessThan(compiler::Node* a, compiler::Node* b);
compiler::Node* SmiLessThanOrEqual(compiler::Node* a, compiler::Node* b);
+ compiler::Node* SmiMax(compiler::Node* a, compiler::Node* b);
compiler::Node* SmiMin(compiler::Node* a, compiler::Node* b);
// Computes a % b for Smi inputs a and b; result is not necessarily a Smi.
compiler::Node* SmiMod(compiler::Node* a, compiler::Node* b);
// Computes a * b for Smi inputs a and b; result is not necessarily a Smi.
compiler::Node* SmiMul(compiler::Node* a, compiler::Node* b);
+ compiler::Node* SmiOr(compiler::Node* a, compiler::Node* b) {
+ return WordOr(a, b);
+ }
// Allocate an object of the given size.
compiler::Node* Allocate(compiler::Node* size, AllocationFlags flags = kNone);
@@ -106,7 +150,7 @@ class CodeStubAssembler : public compiler::CodeAssembler {
// Check a value for smi-ness
compiler::Node* WordIsSmi(compiler::Node* a);
- // Check that the value is a positive smi.
+ // Check that the value is a non-negative smi.
compiler::Node* WordIsPositiveSmi(compiler::Node* a);
void BranchIfSmiEqual(compiler::Node* a, compiler::Node* b, Label* if_true,
@@ -143,10 +187,6 @@ class CodeStubAssembler : public compiler::CodeAssembler {
if_notequal);
}
- void BranchIfSameValueZero(compiler::Node* a, compiler::Node* b,
- compiler::Node* context, Label* if_true,
- Label* if_false);
-
void BranchIfFastJSArray(compiler::Node* object, compiler::Node* context,
Label* if_true, Label* if_false);
@@ -188,6 +228,8 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* LoadProperties(compiler::Node* object);
// Load the elements backing store of a JSObject.
compiler::Node* LoadElements(compiler::Node* object);
+ // Load the length of a JSArray instance.
+ compiler::Node* LoadJSArrayLength(compiler::Node* array);
// Load the length of a fixed array base instance.
compiler::Node* LoadFixedArrayBaseLength(compiler::Node* array);
// Load the length of a fixed array base instance.
@@ -200,6 +242,8 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* LoadMapBitField3(compiler::Node* map);
// Load the instance type of a map.
compiler::Node* LoadMapInstanceType(compiler::Node* map);
+ // Load the ElementsKind of a map.
+ compiler::Node* LoadMapElementsKind(compiler::Node* map);
// Load the instance descriptors of a map.
compiler::Node* LoadMapDescriptors(compiler::Node* map);
// Load the prototype of a map.
@@ -208,13 +252,16 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* LoadMapInstanceSize(compiler::Node* map);
// Load the inobject properties count of a Map (valid only for JSObjects).
compiler::Node* LoadMapInobjectProperties(compiler::Node* map);
+ // Load the constructor function index of a Map (only for primitive maps).
+ compiler::Node* LoadMapConstructorFunctionIndex(compiler::Node* map);
// Load the constructor of a Map (equivalent to Map::GetConstructor()).
compiler::Node* LoadMapConstructor(compiler::Node* map);
- // Load the hash field of a name.
+ // Load the hash field of a name as an uint32 value.
compiler::Node* LoadNameHashField(compiler::Node* name);
- // Load the hash value of a name. If {if_hash_not_computed} label
- // is specified then it also checks if hash is actually computed.
+ // Load the hash value of a name as an uint32 value.
+ // If {if_hash_not_computed} label is specified then it also checks if
+ // hash is actually computed.
compiler::Node* LoadNameHash(compiler::Node* name,
Label* if_hash_not_computed = nullptr);
@@ -226,25 +273,30 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* LoadWeakCellValue(compiler::Node* weak_cell,
Label* if_cleared = nullptr);
- compiler::Node* AllocateUninitializedFixedArray(compiler::Node* length);
-
// Load an array element from a FixedArray.
compiler::Node* LoadFixedArrayElement(
- compiler::Node* object, compiler::Node* int32_index,
- int additional_offset = 0,
+ compiler::Node* object, compiler::Node* index, int additional_offset = 0,
ParameterMode parameter_mode = INTEGER_PARAMETERS);
// Load an array element from a FixedArray, untag it and return it as Word32.
compiler::Node* LoadAndUntagToWord32FixedArrayElement(
- compiler::Node* object, compiler::Node* int32_index,
- int additional_offset = 0,
+ compiler::Node* object, compiler::Node* index, int additional_offset = 0,
ParameterMode parameter_mode = INTEGER_PARAMETERS);
// Load an array element from a FixedDoubleArray.
compiler::Node* LoadFixedDoubleArrayElement(
- compiler::Node* object, compiler::Node* int32_index,
- MachineType machine_type, int additional_offset = 0,
- ParameterMode parameter_mode = INTEGER_PARAMETERS);
+ compiler::Node* object, compiler::Node* index, MachineType machine_type,
+ int additional_offset = 0,
+ ParameterMode parameter_mode = INTEGER_PARAMETERS,
+ Label* if_hole = nullptr);
+
+ // Load Float64 value by |base| + |offset| address. If the value is a double
+ // hole then jump to |if_hole|. If |machine_type| is None then only the hole
+ // check is generated.
+ compiler::Node* LoadDoubleWithHoleCheck(
+ compiler::Node* base, compiler::Node* offset, Label* if_hole,
+ MachineType machine_type = MachineType::Float64());
// Context manipulation
+ compiler::Node* LoadContextElement(compiler::Node* context, int slot_index);
compiler::Node* LoadNativeContext(compiler::Node* context);
compiler::Node* LoadJSArrayElementsMap(ElementsKind kind,
@@ -256,9 +308,15 @@ class CodeStubAssembler : public compiler::CodeAssembler {
// Store a field to an object on the heap.
compiler::Node* StoreObjectField(
compiler::Node* object, int offset, compiler::Node* value);
+ compiler::Node* StoreObjectField(compiler::Node* object,
+ compiler::Node* offset,
+ compiler::Node* value);
compiler::Node* StoreObjectFieldNoWriteBarrier(
compiler::Node* object, int offset, compiler::Node* value,
MachineRepresentation rep = MachineRepresentation::kTagged);
+ compiler::Node* StoreObjectFieldNoWriteBarrier(
+ compiler::Node* object, compiler::Node* offset, compiler::Node* value,
+ MachineRepresentation rep = MachineRepresentation::kTagged);
// Store the Map of an HeapObject.
compiler::Node* StoreMapNoWriteBarrier(compiler::Node* object,
compiler::Node* map);
@@ -275,9 +333,10 @@ class CodeStubAssembler : public compiler::CodeAssembler {
ParameterMode parameter_mode = INTEGER_PARAMETERS);
// Allocate a HeapNumber without initializing its value.
- compiler::Node* AllocateHeapNumber();
+ compiler::Node* AllocateHeapNumber(MutableMode mode = IMMUTABLE);
// Allocate a HeapNumber with a specific value.
- compiler::Node* AllocateHeapNumberWithValue(compiler::Node* value);
+ compiler::Node* AllocateHeapNumberWithValue(compiler::Node* value,
+ MutableMode mode = IMMUTABLE);
// Allocate a SeqOneByteString with the given length.
compiler::Node* AllocateSeqOneByteString(int length);
compiler::Node* AllocateSeqOneByteString(compiler::Node* context,
@@ -286,37 +345,122 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* AllocateSeqTwoByteString(int length);
compiler::Node* AllocateSeqTwoByteString(compiler::Node* context,
compiler::Node* length);
- // Allocated an JSArray
- compiler::Node* AllocateJSArray(ElementsKind kind, compiler::Node* array_map,
- compiler::Node* capacity,
- compiler::Node* length,
- compiler::Node* allocation_site = nullptr,
- ParameterMode mode = INTEGER_PARAMETERS);
+
+ // Allocate a SlicedOneByteString with the given length, parent and offset.
+ // |length| and |offset| are expected to be tagged.
+ compiler::Node* AllocateSlicedOneByteString(compiler::Node* length,
+ compiler::Node* parent,
+ compiler::Node* offset);
+ // Allocate a SlicedTwoByteString with the given length, parent and offset.
+ // |length| and |offset| are expected to be tagged.
+ compiler::Node* AllocateSlicedTwoByteString(compiler::Node* length,
+ compiler::Node* parent,
+ compiler::Node* offset);
+
+ // Allocate a RegExpResult with the given length (the number of captures,
+ // including the match itself), index (the index where the match starts),
+ // and input string. |length| and |index| are expected to be tagged, and
+ // |input| must be a string.
+ compiler::Node* AllocateRegExpResult(compiler::Node* context,
+ compiler::Node* length,
+ compiler::Node* index,
+ compiler::Node* input);
+
+ // Allocate a JSArray without elements and initialize the header fields.
+ compiler::Node* AllocateUninitializedJSArrayWithoutElements(
+ ElementsKind kind, compiler::Node* array_map, compiler::Node* length,
+ compiler::Node* allocation_site);
+ // Allocate and return a JSArray with initialized header fields and its
+ // uninitialized elements.
+ // The ParameterMode argument is only used for the capacity parameter.
+ std::pair<compiler::Node*, compiler::Node*>
+ AllocateUninitializedJSArrayWithElements(
+ ElementsKind kind, compiler::Node* array_map, compiler::Node* length,
+ compiler::Node* allocation_site, compiler::Node* capacity,
+ ParameterMode capacity_mode = INTEGER_PARAMETERS);
+ // Allocate a JSArray and fill elements with the hole.
+ // The ParameterMode argument is only used for the capacity parameter.
+ compiler::Node* AllocateJSArray(
+ ElementsKind kind, compiler::Node* array_map, compiler::Node* capacity,
+ compiler::Node* length, compiler::Node* allocation_site = nullptr,
+ ParameterMode capacity_mode = INTEGER_PARAMETERS);
compiler::Node* AllocateFixedArray(ElementsKind kind,
compiler::Node* capacity,
ParameterMode mode = INTEGER_PARAMETERS,
AllocationFlags flags = kNone);
- void FillFixedArrayWithHole(ElementsKind kind, compiler::Node* array,
- compiler::Node* from_index,
- compiler::Node* to_index,
- ParameterMode mode = INTEGER_PARAMETERS);
+ void FillFixedArrayWithValue(ElementsKind kind, compiler::Node* array,
+ compiler::Node* from_index,
+ compiler::Node* to_index,
+ Heap::RootListIndex value_root_index,
+ ParameterMode mode = INTEGER_PARAMETERS);
+ // Copies all elements from |from_array| of |length| size to
+ // |to_array| of the same size respecting the elements kind.
void CopyFixedArrayElements(
ElementsKind kind, compiler::Node* from_array, compiler::Node* to_array,
- compiler::Node* element_count,
+ compiler::Node* length,
+ WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
+ ParameterMode mode = INTEGER_PARAMETERS) {
+ CopyFixedArrayElements(kind, from_array, kind, to_array, length, length,
+ barrier_mode, mode);
+ }
+
+ // Copies |element_count| elements from |from_array| to |to_array| of
+ // |capacity| size respecting both array's elements kinds.
+ void CopyFixedArrayElements(
+ ElementsKind from_kind, compiler::Node* from_array, ElementsKind to_kind,
+ compiler::Node* to_array, compiler::Node* element_count,
+ compiler::Node* capacity,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
ParameterMode mode = INTEGER_PARAMETERS);
+ // Copies |character_count| elements from |from_string| to |to_string|
+ // starting at the |from_index|'th character. |from_index| and
+ // |character_count| must be Smis s.t.
+ // 0 <= |from_index| <= |from_index| + |character_count| < from_string.length.
+ void CopyStringCharacters(compiler::Node* from_string,
+ compiler::Node* to_string,
+ compiler::Node* from_index,
+ compiler::Node* character_count,
+ String::Encoding encoding);
+
+ // Loads an element from |array| of |from_kind| elements by given |offset|
+ // (NOTE: not index!), does a hole check if |if_hole| is provided and
+ // converts the value so that it becomes ready for storing to array of
+ // |to_kind| elements.
+ compiler::Node* LoadElementAndPrepareForStore(compiler::Node* array,
+ compiler::Node* offset,
+ ElementsKind from_kind,
+ ElementsKind to_kind,
+ Label* if_hole);
+
compiler::Node* CalculateNewElementsCapacity(
compiler::Node* old_capacity, ParameterMode mode = INTEGER_PARAMETERS);
- compiler::Node* CheckAndGrowElementsCapacity(compiler::Node* context,
- compiler::Node* elements,
- ElementsKind kind,
- compiler::Node* key,
- Label* fail);
+ // Tries to grow the |elements| array of given |object| to store the |key|
+ // or bails out if the growing gap is too big. Returns new elements.
+ compiler::Node* TryGrowElementsCapacity(compiler::Node* object,
+ compiler::Node* elements,
+ ElementsKind kind,
+ compiler::Node* key, Label* bailout);
+
+ // Tries to grow the |capacity|-length |elements| array of given |object|
+ // to store the |key| or bails out if the growing gap is too big. Returns
+ // new elements.
+ compiler::Node* TryGrowElementsCapacity(compiler::Node* object,
+ compiler::Node* elements,
+ ElementsKind kind,
+ compiler::Node* key,
+ compiler::Node* capacity,
+ ParameterMode mode, Label* bailout);
+
+ // Grows elements capacity of given object. Returns new elements.
+ compiler::Node* GrowElementsCapacity(
+ compiler::Node* object, compiler::Node* elements, ElementsKind from_kind,
+ ElementsKind to_kind, compiler::Node* capacity,
+ compiler::Node* new_capacity, ParameterMode mode, Label* bailout);
// Allocation site manipulation
void InitializeAllocationMemento(compiler::Node* base_allocation,
@@ -347,19 +491,67 @@ class CodeStubAssembler : public compiler::CodeAssembler {
PrimitiveType primitive_type,
char const* method_name);
+ // Throws a TypeError for {method_name} if {value} is not of the given
+ // instance type. Returns {value}'s map.
+ compiler::Node* ThrowIfNotInstanceType(compiler::Node* context,
+ compiler::Node* value,
+ InstanceType instance_type,
+ char const* method_name);
+
+ // Type checks.
+ compiler::Node* IsStringInstanceType(compiler::Node* instance_type);
+ compiler::Node* IsJSReceiverInstanceType(compiler::Node* instance_type);
+
// String helpers.
// Load a character from a String (might flatten a ConsString).
compiler::Node* StringCharCodeAt(compiler::Node* string,
compiler::Node* smi_index);
// Return the single character string with only {code}.
compiler::Node* StringFromCharCode(compiler::Node* code);
+ // Return a new string object which holds a substring containing the range
+ // [from,to[ of string. |from| and |to| are expected to be tagged.
+ compiler::Node* SubString(compiler::Node* context, compiler::Node* string,
+ compiler::Node* from, compiler::Node* to);
+
+ compiler::Node* StringFromCodePoint(compiler::Node* codepoint,
+ UnicodeEncoding encoding);
+
+ // Type conversion helpers.
+ // Convert a String to a Number.
+ compiler::Node* StringToNumber(compiler::Node* context,
+ compiler::Node* input);
+ // Convert an object to a name.
+ compiler::Node* ToName(compiler::Node* context, compiler::Node* input);
+ // Convert a Non-Number object to a Number.
+ compiler::Node* NonNumberToNumber(compiler::Node* context,
+ compiler::Node* input);
+ // Convert any object to a Number.
+ compiler::Node* ToNumber(compiler::Node* context, compiler::Node* input);
+
+ enum ToIntegerTruncationMode {
+ kNoTruncation,
+ kTruncateMinusZero,
+ };
+
+ // Convert any object to an Integer.
+ compiler::Node* ToInteger(compiler::Node* context, compiler::Node* input,
+ ToIntegerTruncationMode mode = kNoTruncation);
- // Returns a node that is true if the given bit is set in |word32|.
+ // Returns a node that contains a decoded (unsigned!) value of a bit
+ // field |T| in |word32|. Returns result as an uint32 node.
template <typename T>
compiler::Node* BitFieldDecode(compiler::Node* word32) {
return BitFieldDecode(word32, T::kShift, T::kMask);
}
+ // Returns a node that contains a decoded (unsigned!) value of a bit
+ // field |T| in |word32|. Returns result as a word-size node.
+ template <typename T>
+ compiler::Node* BitFieldDecodeWord(compiler::Node* word32) {
+ return ChangeUint32ToWord(BitFieldDecode<T>(word32));
+ }
+
+ // Decodes an unsigned (!) value from |word32| to an uint32 node.
compiler::Node* BitFieldDecode(compiler::Node* word32, uint32_t shift,
uint32_t mask);
@@ -399,9 +591,9 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* ComputeIntegerHash(compiler::Node* key, compiler::Node* seed);
template <typename Dictionary>
- void NumberDictionaryLookup(compiler::Node* dictionary, compiler::Node* key,
- Label* if_found, Variable* var_entry,
- Label* if_not_found);
+ void NumberDictionaryLookup(compiler::Node* dictionary,
+ compiler::Node* intptr_index, Label* if_found,
+ Variable* var_entry, Label* if_not_found);
// Tries to check if {object} has own {unique_name} property.
void TryHasOwnProperty(compiler::Node* object, compiler::Node* map,
@@ -454,9 +646,9 @@ class CodeStubAssembler : public compiler::CodeAssembler {
Label* if_not_found, Label* if_bailout);
void TryLookupElement(compiler::Node* object, compiler::Node* map,
- compiler::Node* instance_type, compiler::Node* index,
- Label* if_found, Label* if_not_found,
- Label* if_bailout);
+ compiler::Node* instance_type,
+ compiler::Node* intptr_index, Label* if_found,
+ Label* if_not_found, Label* if_bailout);
// This is a type of a lookup in holder generator function. In case of a
// property lookup the {key} is guaranteed to be a unique name and in case of
@@ -484,7 +676,7 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* callable,
compiler::Node* object);
- // LoadIC helpers.
+ // Load/StoreIC helpers.
struct LoadICParameters {
LoadICParameters(compiler::Node* context, compiler::Node* receiver,
compiler::Node* name, compiler::Node* slot,
@@ -502,6 +694,15 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* vector;
};
+ struct StoreICParameters : public LoadICParameters {
+ StoreICParameters(compiler::Node* context, compiler::Node* receiver,
+ compiler::Node* name, compiler::Node* value,
+ compiler::Node* slot, compiler::Node* vector)
+ : LoadICParameters(context, receiver, name, slot, vector),
+ value(value) {}
+ compiler::Node* value;
+ };
+
// Load type feedback vector from the stub caller's frame.
compiler::Node* LoadTypeFeedbackVectorForStub();
@@ -513,12 +714,12 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* LoadReceiverMap(compiler::Node* receiver);
// Checks monomorphic case. Returns {feedback} entry of the vector.
- compiler::Node* TryMonomorphicCase(const LoadICParameters* p,
+ compiler::Node* TryMonomorphicCase(compiler::Node* slot,
+ compiler::Node* vector,
compiler::Node* receiver_map,
Label* if_handler, Variable* var_handler,
Label* if_miss);
- void HandlePolymorphicCase(const LoadICParameters* p,
- compiler::Node* receiver_map,
+ void HandlePolymorphicCase(compiler::Node* receiver_map,
compiler::Node* feedback, Label* if_handler,
Variable* var_handler, Label* if_miss,
int unroll_count);
@@ -543,9 +744,80 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* name, Label* if_handler,
Variable* var_handler, Label* if_miss);
+ // Extends properties backing store by JSObject::kFieldsAdded elements.
+ void ExtendPropertiesBackingStore(compiler::Node* object);
+
+ compiler::Node* PrepareValueForWrite(compiler::Node* value,
+ Representation representation,
+ Label* bailout);
+
+ void StoreNamedField(compiler::Node* object, FieldIndex index,
+ Representation representation, compiler::Node* value,
+ bool transition_to_field);
+
+ void StoreNamedField(compiler::Node* object, compiler::Node* offset,
+ bool is_inobject, Representation representation,
+ compiler::Node* value, bool transition_to_field);
+
+ // Emits keyed sloppy arguments load. Returns either the loaded value.
+ compiler::Node* LoadKeyedSloppyArguments(compiler::Node* receiver,
+ compiler::Node* key,
+ Label* bailout) {
+ return EmitKeyedSloppyArguments(receiver, key, nullptr, bailout);
+ }
+
+ // Emits keyed sloppy arguments store.
+ void StoreKeyedSloppyArguments(compiler::Node* receiver, compiler::Node* key,
+ compiler::Node* value, Label* bailout) {
+ DCHECK_NOT_NULL(value);
+ EmitKeyedSloppyArguments(receiver, key, value, bailout);
+ }
+
+ // Loads script context from the script context table.
+ compiler::Node* LoadScriptContext(compiler::Node* context, int context_index);
+
+ compiler::Node* Int32ToUint8Clamped(compiler::Node* int32_value);
+ compiler::Node* Float64ToUint8Clamped(compiler::Node* float64_value);
+
+ compiler::Node* PrepareValueForWriteToTypedArray(compiler::Node* key,
+ ElementsKind elements_kind,
+ Label* bailout);
+
+ // Store value to an elements array with given elements kind.
+ void StoreElement(compiler::Node* elements, ElementsKind kind,
+ compiler::Node* index, compiler::Node* value,
+ ParameterMode mode);
+
+ void EmitElementStore(compiler::Node* object, compiler::Node* key,
+ compiler::Node* value, bool is_jsarray,
+ ElementsKind elements_kind,
+ KeyedAccessStoreMode store_mode, Label* bailout);
+
+ compiler::Node* CheckForCapacityGrow(compiler::Node* object,
+ compiler::Node* elements,
+ ElementsKind kind,
+ compiler::Node* length,
+ compiler::Node* key, ParameterMode mode,
+ bool is_js_array, Label* bailout);
+
+ compiler::Node* CopyElementsOnWrite(compiler::Node* object,
+ compiler::Node* elements,
+ ElementsKind kind, compiler::Node* length,
+ ParameterMode mode, Label* bailout);
+
void LoadIC(const LoadICParameters* p);
void LoadGlobalIC(const LoadICParameters* p);
void KeyedLoadIC(const LoadICParameters* p);
+ void KeyedLoadICGeneric(const LoadICParameters* p);
+ void StoreIC(const StoreICParameters* p);
+
+ void TransitionElementsKind(compiler::Node* object, compiler::Node* map,
+ ElementsKind from_kind, ElementsKind to_kind,
+ bool is_jsarray, Label* bailout);
+
+ void TrapAllocationMemento(compiler::Node* object, Label* memento_found);
+
+ compiler::Node* PageFromAddress(compiler::Node* address);
// Get the enumerable length from |map| and return the result as a Smi.
compiler::Node* EnumLength(compiler::Node* map);
@@ -562,9 +834,13 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* feedback_vector, compiler::Node* slot,
compiler::Node* value);
- compiler::Node* GetFixedAarrayAllocationSize(compiler::Node* element_count,
- ElementsKind kind,
- ParameterMode mode) {
+ // Create a new AllocationSite and install it into a feedback vector.
+ compiler::Node* CreateAllocationSiteInFeedbackVector(
+ compiler::Node* feedback_vector, compiler::Node* slot);
+
+ compiler::Node* GetFixedArrayAllocationSize(compiler::Node* element_count,
+ ElementsKind kind,
+ ParameterMode mode) {
return ElementOffsetFromIndex(element_count, kind, mode,
FixedArray::kHeaderSize);
}
@@ -572,17 +848,34 @@ class CodeStubAssembler : public compiler::CodeAssembler {
private:
enum ElementSupport { kOnlyProperties, kSupportElements };
+ void DescriptorLookupLinear(compiler::Node* unique_name,
+ compiler::Node* descriptors, compiler::Node* nof,
+ Label* if_found, Variable* var_name_index,
+ Label* if_not_found);
+ compiler::Node* CallGetterIfAccessor(compiler::Node* value,
+ compiler::Node* details,
+ compiler::Node* context,
+ compiler::Node* receiver,
+ Label* if_bailout);
+
void HandleLoadICHandlerCase(
const LoadICParameters* p, compiler::Node* handler, Label* miss,
ElementSupport support_elements = kOnlyProperties);
compiler::Node* TryToIntptr(compiler::Node* key, Label* miss);
- void EmitBoundsCheck(compiler::Node* object, compiler::Node* elements,
- compiler::Node* intptr_key, compiler::Node* is_jsarray,
- Label* miss);
+ void EmitFastElementsBoundsCheck(compiler::Node* object,
+ compiler::Node* elements,
+ compiler::Node* intptr_index,
+ compiler::Node* is_jsarray_condition,
+ Label* miss);
void EmitElementLoad(compiler::Node* object, compiler::Node* elements,
compiler::Node* elements_kind, compiler::Node* key,
- Label* if_hole, Label* rebox_double,
- Variable* var_double_value, Label* miss);
+ compiler::Node* is_jsarray_condition, Label* if_hole,
+ Label* rebox_double, Variable* var_double_value,
+ Label* unimplemented_elements_kind, Label* out_of_bounds,
+ Label* miss);
+ void BranchIfPrototypesHaveNoElements(compiler::Node* receiver_map,
+ Label* definitely_no_elements,
+ Label* possibly_elements);
compiler::Node* ElementOffsetFromIndex(compiler::Node* index,
ElementsKind kind, ParameterMode mode,
@@ -596,9 +889,23 @@ class CodeStubAssembler : public compiler::CodeAssembler {
AllocationFlags flags,
compiler::Node* top_adddress,
compiler::Node* limit_address);
+ // Allocate and return a JSArray of given total size in bytes with header
+ // fields initialized.
+ compiler::Node* AllocateUninitializedJSArray(ElementsKind kind,
+ compiler::Node* array_map,
+ compiler::Node* length,
+ compiler::Node* allocation_site,
+ compiler::Node* size_in_bytes);
compiler::Node* SmiShiftBitsConstant();
+ // Emits keyed sloppy arguments load if the |value| is nullptr or store
+ // otherwise. Returns either the loaded value or |value|.
+ compiler::Node* EmitKeyedSloppyArguments(compiler::Node* receiver,
+ compiler::Node* key,
+ compiler::Node* value,
+ Label* bailout);
+
static const int kElementLoopUnrollThreshold = 8;
};
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index fa7a49ebc4..a294d56c7a 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -7,6 +7,7 @@
#include <memory>
#include "src/bailout-reason.h"
+#include "src/code-factory.h"
#include "src/crankshaft/hydrogen.h"
#include "src/crankshaft/lithium.h"
#include "src/field-index.h"
@@ -37,7 +38,7 @@ static LChunk* OptimizeGraph(HGraph* graph) {
class CodeStubGraphBuilderBase : public HGraphBuilder {
public:
explicit CodeStubGraphBuilderBase(CompilationInfo* info, CodeStub* code_stub)
- : HGraphBuilder(info, code_stub->GetCallInterfaceDescriptor()),
+ : HGraphBuilder(info, code_stub->GetCallInterfaceDescriptor(), false),
arguments_length_(NULL),
info_(info),
code_stub_(code_stub),
@@ -59,7 +60,8 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
return parameters_[parameter];
}
Representation GetParameterRepresentation(int parameter) {
- return RepresentationFromType(descriptor_.GetParameterType(parameter));
+ return RepresentationFromMachineType(
+ descriptor_.GetParameterType(parameter));
}
bool IsParameterCountRegister(int index) const {
return descriptor_.GetRegisterParameter(index)
@@ -83,10 +85,6 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
HValue* BuildPushElement(HValue* object, HValue* argc,
HValue* argument_elements, ElementsKind kind);
- HValue* UnmappedCase(HValue* elements, HValue* key, HValue* value);
- HValue* EmitKeyedSloppyArguments(HValue* receiver, HValue* key,
- HValue* value);
-
HValue* BuildToString(HValue* input, bool convert);
HValue* BuildToPrimitive(HValue* input, HValue* input_map);
@@ -129,8 +127,8 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
HParameter::STACK_PARAMETER, r);
} else {
param = Add<HParameter>(i, HParameter::REGISTER_PARAMETER, r);
- start_environment->Bind(i, param);
}
+ start_environment->Bind(i, param);
parameters_[i] = param;
if (i < register_param_count && IsParameterCountRegister(i)) {
param->set_type(HType::Smi());
@@ -334,7 +332,7 @@ template <>
HValue* CodeStubGraphBuilder<NumberToStringStub>::BuildCodeStub() {
info()->MarkAsSavesCallerDoubles();
HValue* number = GetParameter(Descriptor::kArgument);
- return BuildNumberToString(number, Type::Number());
+ return BuildNumberToString(number, AstType::Number());
}
@@ -342,119 +340,6 @@ Handle<Code> NumberToStringStub::GenerateCode() {
return DoGenerateCode(this);
}
-
-template <>
-HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
- Factory* factory = isolate()->factory();
- HValue* undefined = graph()->GetConstantUndefined();
- AllocationSiteMode alloc_site_mode = casted_stub()->allocation_site_mode();
- HValue* closure = GetParameter(Descriptor::kClosure);
- HValue* literal_index = GetParameter(Descriptor::kLiteralIndex);
-
- // TODO(turbofan): This codestub has regressed to need a frame on ia32 at some
- // point and wasn't caught since it wasn't built in the snapshot. We should
- // probably just replace with a TurboFan stub rather than fixing it.
-#if !(V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87)
- // This stub is very performance sensitive, the generated code must be tuned
- // so that it doesn't build and eager frame.
- info()->MarkMustNotHaveEagerFrame();
-#endif
-
- HValue* literals_array = Add<HLoadNamedField>(
- closure, nullptr, HObjectAccess::ForLiteralsPointer());
-
- HInstruction* allocation_site = Add<HLoadKeyed>(
- literals_array, literal_index, nullptr, nullptr, FAST_ELEMENTS,
- NEVER_RETURN_HOLE, LiteralsArray::kOffsetToFirstLiteral - kHeapObjectTag);
- IfBuilder checker(this);
- checker.IfNot<HCompareObjectEqAndBranch, HValue*>(allocation_site,
- undefined);
- checker.Then();
-
- HObjectAccess access = HObjectAccess::ForAllocationSiteOffset(
- AllocationSite::kTransitionInfoOffset);
- HInstruction* boilerplate =
- Add<HLoadNamedField>(allocation_site, nullptr, access);
- HValue* elements = AddLoadElements(boilerplate);
- HValue* capacity = AddLoadFixedArrayLength(elements);
- IfBuilder zero_capacity(this);
- zero_capacity.If<HCompareNumericAndBranch>(capacity, graph()->GetConstant0(),
- Token::EQ);
- zero_capacity.Then();
- Push(BuildCloneShallowArrayEmpty(boilerplate,
- allocation_site,
- alloc_site_mode));
- zero_capacity.Else();
- IfBuilder if_fixed_cow(this);
- if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map());
- if_fixed_cow.Then();
- Push(BuildCloneShallowArrayCow(boilerplate,
- allocation_site,
- alloc_site_mode,
- FAST_ELEMENTS));
- if_fixed_cow.Else();
- IfBuilder if_fixed(this);
- if_fixed.If<HCompareMap>(elements, factory->fixed_array_map());
- if_fixed.Then();
- Push(BuildCloneShallowArrayNonEmpty(boilerplate,
- allocation_site,
- alloc_site_mode,
- FAST_ELEMENTS));
-
- if_fixed.Else();
- Push(BuildCloneShallowArrayNonEmpty(boilerplate,
- allocation_site,
- alloc_site_mode,
- FAST_DOUBLE_ELEMENTS));
- if_fixed.End();
- if_fixed_cow.End();
- zero_capacity.End();
-
- checker.ElseDeopt(DeoptimizeReason::kUninitializedBoilerplateLiterals);
- checker.End();
-
- return environment()->Pop();
-}
-
-
-Handle<Code> FastCloneShallowArrayStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-template <>
-HValue* CodeStubGraphBuilder<LoadScriptContextFieldStub>::BuildCodeStub() {
- int context_index = casted_stub()->context_index();
- int slot_index = casted_stub()->slot_index();
-
- HValue* script_context = BuildGetScriptContext(context_index);
- return Add<HLoadNamedField>(script_context, nullptr,
- HObjectAccess::ForContextSlot(slot_index));
-}
-
-
-Handle<Code> LoadScriptContextFieldStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<StoreScriptContextFieldStub>::BuildCodeStub() {
- int context_index = casted_stub()->context_index();
- int slot_index = casted_stub()->slot_index();
-
- HValue* script_context = BuildGetScriptContext(context_index);
- Add<HStoreNamedField>(script_context,
- HObjectAccess::ForContextSlot(slot_index),
- GetParameter(2), STORE_TO_INITIALIZED_ENTRY);
- // TODO(ishell): Remove this unused stub.
- return GetParameter(2);
-}
-
-
-Handle<Code> StoreScriptContextFieldStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
HValue* CodeStubGraphBuilderBase::BuildPushElement(HValue* object, HValue* argc,
HValue* argument_elements,
ElementsKind kind) {
@@ -505,6 +390,7 @@ template <>
HValue* CodeStubGraphBuilder<FastArrayPushStub>::BuildCodeStub() {
// TODO(verwaest): Fix deoptimizer messages.
HValue* argc = GetArgumentsLength();
+
HInstruction* argument_elements = Add<HArgumentsElements>(false, false);
HInstruction* object = Add<HAccessArgumentsAt>(argument_elements, argc,
graph()->GetConstantMinus1());
@@ -904,155 +790,6 @@ HValue* CodeStubGraphBuilder<LoadConstantStub>::BuildCodeStub() {
Handle<Code> LoadConstantStub::GenerateCode() { return DoGenerateCode(this); }
-HValue* CodeStubGraphBuilderBase::UnmappedCase(HValue* elements, HValue* key,
- HValue* value) {
- HValue* result = NULL;
- HInstruction* backing_store =
- Add<HLoadKeyed>(elements, graph()->GetConstant1(), nullptr, nullptr,
- FAST_ELEMENTS, ALLOW_RETURN_HOLE);
- Add<HCheckMaps>(backing_store, isolate()->factory()->fixed_array_map());
- HValue* backing_store_length = Add<HLoadNamedField>(
- backing_store, nullptr, HObjectAccess::ForFixedArrayLength());
- IfBuilder in_unmapped_range(this);
- in_unmapped_range.If<HCompareNumericAndBranch>(key, backing_store_length,
- Token::LT);
- in_unmapped_range.Then();
- {
- if (value == NULL) {
- result = Add<HLoadKeyed>(backing_store, key, nullptr, nullptr,
- FAST_HOLEY_ELEMENTS, NEVER_RETURN_HOLE);
- } else {
- Add<HStoreKeyed>(backing_store, key, value, nullptr, FAST_HOLEY_ELEMENTS);
- }
- }
- in_unmapped_range.ElseDeopt(DeoptimizeReason::kOutsideOfRange);
- in_unmapped_range.End();
- return result;
-}
-
-
-HValue* CodeStubGraphBuilderBase::EmitKeyedSloppyArguments(HValue* receiver,
- HValue* key,
- HValue* value) {
- // Mapped arguments are actual arguments. Unmapped arguments are values added
- // to the arguments object after it was created for the call. Mapped arguments
- // are stored in the context at indexes given by elements[key + 2]. Unmapped
- // arguments are stored as regular indexed properties in the arguments array,
- // held at elements[1]. See NewSloppyArguments() in runtime.cc for a detailed
- // look at argument object construction.
- //
- // The sloppy arguments elements array has a special format:
- //
- // 0: context
- // 1: unmapped arguments array
- // 2: mapped_index0,
- // 3: mapped_index1,
- // ...
- //
- // length is 2 + min(number_of_actual_arguments, number_of_formal_arguments).
- // If key + 2 >= elements.length then attempt to look in the unmapped
- // arguments array (given by elements[1]) and return the value at key, missing
- // to the runtime if the unmapped arguments array is not a fixed array or if
- // key >= unmapped_arguments_array.length.
- //
- // Otherwise, t = elements[key + 2]. If t is the hole, then look up the value
- // in the unmapped arguments array, as described above. Otherwise, t is a Smi
- // index into the context array given at elements[0]. Return the value at
- // context[t].
-
- bool is_load = value == NULL;
-
- key = AddUncasted<HForceRepresentation>(key, Representation::Smi());
- IfBuilder positive_smi(this);
- positive_smi.If<HCompareNumericAndBranch>(key, graph()->GetConstant0(),
- Token::LT);
- positive_smi.ThenDeopt(DeoptimizeReason::kKeyIsNegative);
- positive_smi.End();
-
- HValue* constant_two = Add<HConstant>(2);
- HValue* elements = AddLoadElements(receiver, nullptr);
- HValue* elements_length = Add<HLoadNamedField>(
- elements, nullptr, HObjectAccess::ForFixedArrayLength());
- HValue* adjusted_length = AddUncasted<HSub>(elements_length, constant_two);
- IfBuilder in_range(this);
- in_range.If<HCompareNumericAndBranch>(key, adjusted_length, Token::LT);
- in_range.Then();
- {
- HValue* index = AddUncasted<HAdd>(key, constant_two);
- HInstruction* mapped_index =
- Add<HLoadKeyed>(elements, index, nullptr, nullptr, FAST_HOLEY_ELEMENTS,
- ALLOW_RETURN_HOLE);
-
- IfBuilder is_valid(this);
- is_valid.IfNot<HCompareObjectEqAndBranch>(mapped_index,
- graph()->GetConstantHole());
- is_valid.Then();
- {
- // TODO(mvstanton): I'd like to assert from this point, that if the
- // mapped_index is not the hole that it is indeed, a smi. An unnecessary
- // smi check is being emitted.
- HValue* the_context = Add<HLoadKeyed>(elements, graph()->GetConstant0(),
- nullptr, nullptr, FAST_ELEMENTS);
- STATIC_ASSERT(Context::kHeaderSize == FixedArray::kHeaderSize);
- if (is_load) {
- HValue* result =
- Add<HLoadKeyed>(the_context, mapped_index, nullptr, nullptr,
- FAST_ELEMENTS, ALLOW_RETURN_HOLE);
- environment()->Push(result);
- } else {
- DCHECK(value != NULL);
- Add<HStoreKeyed>(the_context, mapped_index, value, nullptr,
- FAST_ELEMENTS);
- environment()->Push(value);
- }
- }
- is_valid.Else();
- {
- HValue* result = UnmappedCase(elements, key, value);
- environment()->Push(is_load ? result : value);
- }
- is_valid.End();
- }
- in_range.Else();
- {
- HValue* result = UnmappedCase(elements, key, value);
- environment()->Push(is_load ? result : value);
- }
- in_range.End();
-
- return environment()->Pop();
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<KeyedLoadSloppyArgumentsStub>::BuildCodeStub() {
- HValue* receiver = GetParameter(Descriptor::kReceiver);
- HValue* key = GetParameter(Descriptor::kName);
-
- return EmitKeyedSloppyArguments(receiver, key, NULL);
-}
-
-
-Handle<Code> KeyedLoadSloppyArgumentsStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<KeyedStoreSloppyArgumentsStub>::BuildCodeStub() {
- HValue* receiver = GetParameter(Descriptor::kReceiver);
- HValue* key = GetParameter(Descriptor::kName);
- HValue* value = GetParameter(Descriptor::kValue);
-
- return EmitKeyedSloppyArguments(receiver, key, value);
-}
-
-
-Handle<Code> KeyedStoreSloppyArgumentsStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
void CodeStubGraphBuilderBase::BuildStoreNamedField(
HValue* object, HValue* value, FieldIndex index,
Representation representation, bool transition_to_field) {
@@ -1099,99 +836,6 @@ void CodeStubGraphBuilderBase::BuildStoreNamedField(
template <>
-HValue* CodeStubGraphBuilder<StoreFieldStub>::BuildCodeStub() {
- BuildStoreNamedField(GetParameter(Descriptor::kReceiver),
- GetParameter(Descriptor::kValue), casted_stub()->index(),
- casted_stub()->representation(), false);
- return GetParameter(Descriptor::kValue);
-}
-
-
-Handle<Code> StoreFieldStub::GenerateCode() { return DoGenerateCode(this); }
-
-
-template <>
-HValue* CodeStubGraphBuilder<StoreTransitionStub>::BuildCodeStub() {
- HValue* object = GetParameter(StoreTransitionHelper::ReceiverIndex());
- HValue* value = GetParameter(StoreTransitionHelper::ValueIndex());
- StoreTransitionStub::StoreMode store_mode = casted_stub()->store_mode();
-
- if (store_mode != StoreTransitionStub::StoreMapOnly) {
- value = GetParameter(StoreTransitionHelper::ValueIndex());
- Representation representation = casted_stub()->representation();
- if (representation.IsDouble()) {
- // In case we are storing a double, assure that the value is a double
- // before manipulating the properties backing store. Otherwise the actual
- // store may deopt, leaving the backing store in an overallocated state.
- value = AddUncasted<HForceRepresentation>(value, representation);
- }
- }
-
- switch (store_mode) {
- case StoreTransitionStub::ExtendStorageAndStoreMapAndValue: {
- HValue* properties = Add<HLoadNamedField>(
- object, nullptr, HObjectAccess::ForPropertiesPointer());
- HValue* length = AddLoadFixedArrayLength(properties);
- HValue* delta =
- Add<HConstant>(static_cast<int32_t>(JSObject::kFieldsAdded));
- HValue* new_capacity = AddUncasted<HAdd>(length, delta);
-
- // Grow properties array.
- ElementsKind kind = FAST_ELEMENTS;
- Add<HBoundsCheck>(new_capacity,
- Add<HConstant>((Page::kMaxRegularHeapObjectSize -
- FixedArray::kHeaderSize) >>
- ElementsKindToShiftSize(kind)));
-
- // Reuse this code for properties backing store allocation.
- HValue* new_properties =
- BuildAllocateAndInitializeArray(kind, new_capacity);
-
- BuildCopyProperties(properties, new_properties, length, new_capacity);
-
- Add<HStoreNamedField>(object, HObjectAccess::ForPropertiesPointer(),
- new_properties);
- }
- // Fall through.
- case StoreTransitionStub::StoreMapAndValue:
- // Store the new value into the "extended" object.
- BuildStoreNamedField(object, value, casted_stub()->index(),
- casted_stub()->representation(), true);
- // Fall through.
-
- case StoreTransitionStub::StoreMapOnly:
- // And finally update the map.
- Add<HStoreNamedField>(object, HObjectAccess::ForMap(),
- GetParameter(StoreTransitionHelper::MapIndex()));
- break;
- }
- return value;
-}
-
-
-Handle<Code> StoreTransitionStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<StoreFastElementStub>::BuildCodeStub() {
- BuildUncheckedMonomorphicElementAccess(
- GetParameter(Descriptor::kReceiver), GetParameter(Descriptor::kName),
- GetParameter(Descriptor::kValue), casted_stub()->is_js_array(),
- casted_stub()->elements_kind(), STORE, NEVER_RETURN_HOLE,
- casted_stub()->store_mode());
-
- return GetParameter(Descriptor::kValue);
-}
-
-
-Handle<Code> StoreFastElementStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
ElementsKind const from_kind = casted_stub()->from_kind();
ElementsKind const to_kind = casted_stub()->to_kind();
@@ -1262,26 +906,26 @@ HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
HValue* left = GetParameter(Descriptor::kLeft);
HValue* right = GetParameter(Descriptor::kRight);
- Type* left_type = state.GetLeftType();
- Type* right_type = state.GetRightType();
- Type* result_type = state.GetResultType();
+ AstType* left_type = state.GetLeftType();
+ AstType* right_type = state.GetRightType();
+ AstType* result_type = state.GetResultType();
- DCHECK(!left_type->Is(Type::None()) && !right_type->Is(Type::None()) &&
- (state.HasSideEffects() || !result_type->Is(Type::None())));
+ DCHECK(!left_type->Is(AstType::None()) && !right_type->Is(AstType::None()) &&
+ (state.HasSideEffects() || !result_type->Is(AstType::None())));
HValue* result = NULL;
HAllocationMode allocation_mode(NOT_TENURED);
- if (state.op() == Token::ADD &&
- (left_type->Maybe(Type::String()) || right_type->Maybe(Type::String())) &&
- !left_type->Is(Type::String()) && !right_type->Is(Type::String())) {
+ if (state.op() == Token::ADD && (left_type->Maybe(AstType::String()) ||
+ right_type->Maybe(AstType::String())) &&
+ !left_type->Is(AstType::String()) && !right_type->Is(AstType::String())) {
// For the generic add stub a fast case for string addition is performance
// critical.
- if (left_type->Maybe(Type::String())) {
+ if (left_type->Maybe(AstType::String())) {
IfBuilder if_leftisstring(this);
if_leftisstring.If<HIsStringAndBranch>(left);
if_leftisstring.Then();
{
- Push(BuildBinaryOperation(state.op(), left, right, Type::String(),
+ Push(BuildBinaryOperation(state.op(), left, right, AstType::String(),
right_type, result_type,
state.fixed_right_arg(), allocation_mode));
}
@@ -1299,7 +943,7 @@ HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
if_rightisstring.Then();
{
Push(BuildBinaryOperation(state.op(), left, right, left_type,
- Type::String(), result_type,
+ AstType::String(), result_type,
state.fixed_right_arg(), allocation_mode));
}
if_rightisstring.Else();
@@ -1340,9 +984,9 @@ HValue* CodeStubGraphBuilder<BinaryOpWithAllocationSiteStub>::BuildCodeStub() {
HValue* left = GetParameter(Descriptor::kLeft);
HValue* right = GetParameter(Descriptor::kRight);
- Type* left_type = state.GetLeftType();
- Type* right_type = state.GetRightType();
- Type* result_type = state.GetResultType();
+ AstType* left_type = state.GetLeftType();
+ AstType* right_type = state.GetRightType();
+ AstType* result_type = state.GetResultType();
HAllocationMode allocation_mode(allocation_site);
return BuildBinaryOperation(state.op(), left, right, left_type, right_type,
@@ -1363,7 +1007,7 @@ HValue* CodeStubGraphBuilderBase::BuildToString(HValue* input, bool convert) {
if_inputissmi.Then();
{
// Convert the input smi to a string.
- Push(BuildNumberToString(input, Type::SignedSmall()));
+ Push(BuildNumberToString(input, AstType::SignedSmall()));
}
if_inputissmi.Else();
{
@@ -1399,10 +1043,10 @@ HValue* CodeStubGraphBuilderBase::BuildToString(HValue* input, bool convert) {
}
if_inputisprimitive.End();
// Convert the primitive to a string value.
- ToStringStub stub(isolate());
HValue* values[] = {context(), Pop()};
- Push(AddUncasted<HCallWithDescriptor>(Add<HConstant>(stub.GetCode()), 0,
- stub.GetCallInterfaceDescriptor(),
+ Callable toString = CodeFactory::ToString(isolate());
+ Push(AddUncasted<HCallWithDescriptor>(Add<HConstant>(toString.code()), 0,
+ toString.descriptor(),
ArrayVector(values)));
}
if_inputisstring.End();
@@ -1531,134 +1175,6 @@ HValue* CodeStubGraphBuilder<ToBooleanICStub>::BuildCodeInitializedStub() {
Handle<Code> ToBooleanICStub::GenerateCode() { return DoGenerateCode(this); }
template <>
-HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
- StoreGlobalStub* stub = casted_stub();
- HParameter* value = GetParameter(Descriptor::kValue);
- if (stub->check_global()) {
- // Check that the map of the global has not changed: use a placeholder map
- // that will be replaced later with the global object's map.
- HParameter* proxy = GetParameter(Descriptor::kReceiver);
- HValue* proxy_map =
- Add<HLoadNamedField>(proxy, nullptr, HObjectAccess::ForMap());
- HValue* global =
- Add<HLoadNamedField>(proxy_map, nullptr, HObjectAccess::ForPrototype());
- HValue* map_cell = Add<HConstant>(isolate()->factory()->NewWeakCell(
- StoreGlobalStub::global_map_placeholder(isolate())));
- HValue* expected_map = Add<HLoadNamedField>(
- map_cell, nullptr, HObjectAccess::ForWeakCellValue());
- HValue* map =
- Add<HLoadNamedField>(global, nullptr, HObjectAccess::ForMap());
- IfBuilder map_check(this);
- map_check.IfNot<HCompareObjectEqAndBranch>(expected_map, map);
- map_check.ThenDeopt(DeoptimizeReason::kUnknownMap);
- map_check.End();
- }
-
- HValue* weak_cell = Add<HConstant>(isolate()->factory()->NewWeakCell(
- StoreGlobalStub::property_cell_placeholder(isolate())));
- HValue* cell = Add<HLoadNamedField>(weak_cell, nullptr,
- HObjectAccess::ForWeakCellValue());
- Add<HCheckHeapObject>(cell);
- HObjectAccess access = HObjectAccess::ForPropertyCellValue();
- // Load the payload of the global parameter cell. A hole indicates that the
- // cell has been invalidated and that the store must be handled by the
- // runtime.
- HValue* cell_contents = Add<HLoadNamedField>(cell, nullptr, access);
-
- auto cell_type = stub->cell_type();
- if (cell_type == PropertyCellType::kConstant ||
- cell_type == PropertyCellType::kUndefined) {
- // This is always valid for all states a cell can be in.
- IfBuilder builder(this);
- builder.If<HCompareObjectEqAndBranch>(cell_contents, value);
- builder.Then();
- builder.ElseDeopt(
- DeoptimizeReason::kUnexpectedCellContentsInConstantGlobalStore);
- builder.End();
- } else {
- IfBuilder builder(this);
- HValue* hole_value = graph()->GetConstantHole();
- builder.If<HCompareObjectEqAndBranch>(cell_contents, hole_value);
- builder.Then();
- builder.Deopt(DeoptimizeReason::kUnexpectedCellContentsInGlobalStore);
- builder.Else();
- // When dealing with constant types, the type may be allowed to change, as
- // long as optimized code remains valid.
- if (cell_type == PropertyCellType::kConstantType) {
- switch (stub->constant_type()) {
- case PropertyCellConstantType::kSmi:
- access = access.WithRepresentation(Representation::Smi());
- break;
- case PropertyCellConstantType::kStableMap: {
- // It is sufficient here to check that the value and cell contents
- // have identical maps, no matter if they are stable or not or if they
- // are the maps that were originally in the cell or not. If optimized
- // code will deopt when a cell has a unstable map and if it has a
- // dependency on a stable map, it will deopt if the map destabilizes.
- Add<HCheckHeapObject>(value);
- Add<HCheckHeapObject>(cell_contents);
- HValue* expected_map = Add<HLoadNamedField>(cell_contents, nullptr,
- HObjectAccess::ForMap());
- HValue* map =
- Add<HLoadNamedField>(value, nullptr, HObjectAccess::ForMap());
- IfBuilder map_check(this);
- map_check.IfNot<HCompareObjectEqAndBranch>(expected_map, map);
- map_check.ThenDeopt(DeoptimizeReason::kUnknownMap);
- map_check.End();
- access = access.WithRepresentation(Representation::HeapObject());
- break;
- }
- }
- }
- Add<HStoreNamedField>(cell, access, value);
- builder.End();
- }
-
- return value;
-}
-
-
-Handle<Code> StoreGlobalStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
- HValue* object = GetParameter(StoreTransitionHelper::ReceiverIndex());
- HValue* key = GetParameter(StoreTransitionHelper::NameIndex());
- HValue* value = GetParameter(StoreTransitionHelper::ValueIndex());
- HValue* map = GetParameter(StoreTransitionHelper::MapIndex());
-
- if (FLAG_trace_elements_transitions) {
- // Tracing elements transitions is the job of the runtime.
- Add<HDeoptimize>(DeoptimizeReason::kTracingElementsTransitions,
- Deoptimizer::EAGER);
- } else {
- info()->MarkAsSavesCallerDoubles();
-
- BuildTransitionElementsKind(object, map,
- casted_stub()->from_kind(),
- casted_stub()->to_kind(),
- casted_stub()->is_jsarray());
-
- BuildUncheckedMonomorphicElementAccess(object, key, value,
- casted_stub()->is_jsarray(),
- casted_stub()->to_kind(),
- STORE, ALLOW_RETURN_HOLE,
- casted_stub()->store_mode());
- }
-
- return value;
-}
-
-
-Handle<Code> ElementsTransitionAndStoreStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
HValue* CodeStubGraphBuilder<LoadDictionaryElementStub>::BuildCodeStub() {
HValue* receiver = GetParameter(Descriptor::kReceiver);
HValue* key = GetParameter(Descriptor::kName);
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 2b71716dc3..b899943e98 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -6,6 +6,7 @@
#include <sstream>
+#include "src/ast/ast.h"
#include "src/bootstrapper.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
@@ -14,7 +15,6 @@
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
-#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
@@ -498,6 +498,140 @@ void KeyedLoadICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
assembler->KeyedLoadIC(&p);
}
+void StoreICTrampolineTFStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* name = assembler->Parameter(Descriptor::kName);
+ Node* value = assembler->Parameter(Descriptor::kValue);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* vector = assembler->LoadTypeFeedbackVectorForStub();
+
+ CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
+ vector);
+ assembler->StoreIC(&p);
+}
+
+void StoreICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* name = assembler->Parameter(Descriptor::kName);
+ Node* value = assembler->Parameter(Descriptor::kValue);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* vector = assembler->Parameter(Descriptor::kVector);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
+ vector);
+ assembler->StoreIC(&p);
+}
+
+void StoreMapStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* map = assembler->Parameter(Descriptor::kMap);
+ Node* value = assembler->Parameter(Descriptor::kValue);
+
+ assembler->StoreObjectField(receiver, JSObject::kMapOffset, map);
+ assembler->Return(value);
+}
+
+void StoreTransitionStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* name = assembler->Parameter(Descriptor::kName);
+ Node* offset =
+ assembler->SmiUntag(assembler->Parameter(Descriptor::kFieldOffset));
+ Node* value = assembler->Parameter(Descriptor::kValue);
+ Node* map = assembler->Parameter(Descriptor::kMap);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* vector = assembler->Parameter(Descriptor::kVector);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ Label miss(assembler);
+
+ Representation representation = this->representation();
+ assembler->Comment("StoreTransitionStub: is_inobject: %d: representation: %s",
+ is_inobject(), representation.Mnemonic());
+
+ Node* prepared_value =
+ assembler->PrepareValueForWrite(value, representation, &miss);
+
+ if (store_mode() == StoreTransitionStub::ExtendStorageAndStoreMapAndValue) {
+ assembler->Comment("Extend storage");
+ assembler->ExtendPropertiesBackingStore(receiver);
+ } else {
+ DCHECK(store_mode() == StoreTransitionStub::StoreMapAndValue);
+ }
+
+ // Store the new value into the "extended" object.
+ assembler->Comment("Store value");
+ assembler->StoreNamedField(receiver, offset, is_inobject(), representation,
+ prepared_value, true);
+
+ // And finally update the map.
+ assembler->Comment("Store map");
+ assembler->StoreObjectField(receiver, JSObject::kMapOffset, map);
+ assembler->Return(value);
+
+ // Only store to tagged field never bails out.
+ if (!representation.IsTagged()) {
+ assembler->Bind(&miss);
+ {
+ assembler->Comment("Miss");
+ assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
+ vector, receiver, name);
+ }
+ }
+}
+
+void ElementsTransitionAndStoreStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* key = assembler->Parameter(Descriptor::kName);
+ Node* value = assembler->Parameter(Descriptor::kValue);
+ Node* map = assembler->Parameter(Descriptor::kMap);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* vector = assembler->Parameter(Descriptor::kVector);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ assembler->Comment(
+ "ElementsTransitionAndStoreStub: from_kind=%s, to_kind=%s,"
+ " is_jsarray=%d, store_mode=%d",
+ ElementsKindToString(from_kind()), ElementsKindToString(to_kind()),
+ is_jsarray(), store_mode());
+
+ Label miss(assembler);
+
+ if (FLAG_trace_elements_transitions) {
+ // Tracing elements transitions is the job of the runtime.
+ assembler->Goto(&miss);
+ } else {
+ assembler->TransitionElementsKind(receiver, map, from_kind(), to_kind(),
+ is_jsarray(), &miss);
+ assembler->EmitElementStore(receiver, key, value, is_jsarray(), to_kind(),
+ store_mode(), &miss);
+ assembler->Return(value);
+ }
+
+ assembler->Bind(&miss);
+ {
+ assembler->Comment("Miss");
+ assembler->TailCallRuntime(Runtime::kElementsTransitionAndStoreIC_Miss,
+ context, receiver, key, value, map, slot,
+ vector);
+ }
+}
+
void AllocateHeapNumberStub::GenerateAssembly(
CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
@@ -599,9 +733,8 @@ compiler::Node* AddStub::Generate(CodeStubAssembler* assembler,
// Check if the {rhs} is a HeapNumber.
Label if_rhsisnumber(assembler),
if_rhsisnotnumber(assembler, Label::kDeferred);
- Node* number_map = assembler->HeapNumberMapConstant();
- assembler->Branch(assembler->WordEqual(rhs_map, number_map),
- &if_rhsisnumber, &if_rhsisnotnumber);
+ assembler->Branch(assembler->IsHeapNumberMap(rhs_map), &if_rhsisnumber,
+ &if_rhsisnotnumber);
assembler->Bind(&if_rhsisnumber);
{
@@ -618,9 +751,7 @@ compiler::Node* AddStub::Generate(CodeStubAssembler* assembler,
// Check if the {rhs} is a String.
Label if_rhsisstring(assembler, Label::kDeferred),
if_rhsisnotstring(assembler, Label::kDeferred);
- assembler->Branch(assembler->Int32LessThan(
- rhs_instance_type,
- assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+ assembler->Branch(assembler->IsStringInstanceType(rhs_instance_type),
&if_rhsisstring, &if_rhsisnotstring);
assembler->Bind(&if_rhsisstring);
@@ -636,9 +767,7 @@ compiler::Node* AddStub::Generate(CodeStubAssembler* assembler,
Label if_rhsisreceiver(assembler, Label::kDeferred),
if_rhsisnotreceiver(assembler, Label::kDeferred);
assembler->Branch(
- assembler->Int32LessThanOrEqual(
- assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
- rhs_instance_type),
+ assembler->IsJSReceiverInstanceType(rhs_instance_type),
&if_rhsisreceiver, &if_rhsisnotreceiver);
assembler->Bind(&if_rhsisreceiver);
@@ -670,9 +799,7 @@ compiler::Node* AddStub::Generate(CodeStubAssembler* assembler,
// Check if {lhs} is a String.
Label if_lhsisstring(assembler), if_lhsisnotstring(assembler);
- assembler->Branch(assembler->Int32LessThan(
- lhs_instance_type,
- assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+ assembler->Branch(assembler->IsStringInstanceType(lhs_instance_type),
&if_lhsisstring, &if_lhsisnotstring);
assembler->Bind(&if_lhsisstring);
@@ -714,9 +841,7 @@ compiler::Node* AddStub::Generate(CodeStubAssembler* assembler,
Label if_lhsisreceiver(assembler, Label::kDeferred),
if_lhsisnotreceiver(assembler, Label::kDeferred);
assembler->Branch(
- assembler->Int32LessThanOrEqual(
- assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
- lhs_instance_type),
+ assembler->IsJSReceiverInstanceType(lhs_instance_type),
&if_lhsisreceiver, &if_lhsisnotreceiver);
assembler->Bind(&if_lhsisreceiver);
@@ -746,9 +871,7 @@ compiler::Node* AddStub::Generate(CodeStubAssembler* assembler,
// Check if {rhs} is a String.
Label if_rhsisstring(assembler), if_rhsisnotstring(assembler);
- assembler->Branch(assembler->Int32LessThan(
- rhs_instance_type,
- assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+ assembler->Branch(assembler->IsStringInstanceType(rhs_instance_type),
&if_rhsisstring, &if_rhsisnotstring);
assembler->Bind(&if_rhsisstring);
@@ -791,9 +914,7 @@ compiler::Node* AddStub::Generate(CodeStubAssembler* assembler,
Label if_rhsisreceiver(assembler, Label::kDeferred),
if_rhsisnotreceiver(assembler, Label::kDeferred);
assembler->Branch(
- assembler->Int32LessThanOrEqual(
- assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
- rhs_instance_type),
+ assembler->IsJSReceiverInstanceType(rhs_instance_type),
&if_rhsisreceiver, &if_rhsisnotreceiver);
assembler->Bind(&if_rhsisreceiver);
@@ -822,9 +943,7 @@ compiler::Node* AddStub::Generate(CodeStubAssembler* assembler,
Label if_lhsisreceiver(assembler, Label::kDeferred),
if_lhsisnotreceiver(assembler);
assembler->Branch(
- assembler->Int32LessThanOrEqual(
- assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
- lhs_instance_type),
+ assembler->IsJSReceiverInstanceType(lhs_instance_type),
&if_lhsisreceiver, &if_lhsisnotreceiver);
assembler->Bind(&if_lhsisreceiver);
@@ -842,9 +961,7 @@ compiler::Node* AddStub::Generate(CodeStubAssembler* assembler,
Label if_rhsisreceiver(assembler, Label::kDeferred),
if_rhsisnotreceiver(assembler, Label::kDeferred);
assembler->Branch(
- assembler->Int32LessThanOrEqual(
- assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
- rhs_instance_type),
+ assembler->IsJSReceiverInstanceType(rhs_instance_type),
&if_rhsisreceiver, &if_rhsisnotreceiver);
assembler->Bind(&if_rhsisreceiver);
@@ -917,7 +1034,7 @@ compiler::Node* AddWithFeedbackStub::Generate(
// Shared entry for floating point addition.
Label do_fadd(assembler), end(assembler),
- call_add_stub(assembler, Label::kDeferred);
+ do_add_any(assembler, Label::kDeferred), call_add_stub(assembler);
Variable var_fadd_lhs(assembler, MachineRepresentation::kFloat64),
var_fadd_rhs(assembler, MachineRepresentation::kFloat64),
var_type_feedback(assembler, MachineRepresentation::kWord32),
@@ -965,9 +1082,7 @@ compiler::Node* AddWithFeedbackStub::Generate(
Node* rhs_map = assembler->LoadMap(rhs);
// Check if the {rhs} is a HeapNumber.
- assembler->GotoUnless(
- assembler->WordEqual(rhs_map, assembler->HeapNumberMapConstant()),
- &call_add_stub);
+ assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map), &do_add_any);
var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
@@ -977,14 +1092,14 @@ compiler::Node* AddWithFeedbackStub::Generate(
assembler->Bind(&if_lhsisnotsmi);
{
+ Label check_string(assembler);
+
// Load the map of {lhs}.
Node* lhs_map = assembler->LoadMap(lhs);
// Check if {lhs} is a HeapNumber.
Label if_lhsisnumber(assembler), if_lhsisnotnumber(assembler);
- assembler->GotoUnless(
- assembler->WordEqual(lhs_map, assembler->HeapNumberMapConstant()),
- &call_add_stub);
+ assembler->GotoUnless(assembler->IsHeapNumberMap(lhs_map), &check_string);
// Check if the {rhs} is Smi.
Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
@@ -1003,14 +1118,34 @@ compiler::Node* AddWithFeedbackStub::Generate(
Node* rhs_map = assembler->LoadMap(rhs);
// Check if the {rhs} is a HeapNumber.
- Node* number_map = assembler->HeapNumberMapConstant();
- assembler->GotoUnless(assembler->WordEqual(rhs_map, number_map),
- &call_add_stub);
+ assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map), &do_add_any);
var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
assembler->Goto(&do_fadd);
}
+
+ assembler->Bind(&check_string);
+ {
+ // Check if the {rhs} is a smi, and exit the string check early if it is.
+ assembler->GotoIf(assembler->WordIsSmi(rhs), &do_add_any);
+
+ Node* lhs_instance_type = assembler->LoadMapInstanceType(lhs_map);
+
+ // Exit unless {lhs} is a string
+ assembler->GotoUnless(assembler->IsStringInstanceType(lhs_instance_type),
+ &do_add_any);
+
+ Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+
+ // Exit unless {rhs} is a string
+ assembler->GotoUnless(assembler->IsStringInstanceType(rhs_instance_type),
+ &do_add_any);
+
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kString));
+ assembler->Goto(&call_add_stub);
+ }
}
assembler->Bind(&do_fadd);
@@ -1024,10 +1159,15 @@ compiler::Node* AddWithFeedbackStub::Generate(
assembler->Goto(&end);
}
- assembler->Bind(&call_add_stub);
+ assembler->Bind(&do_add_any);
{
var_type_feedback.Bind(
assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ assembler->Goto(&call_add_stub);
+ }
+
+ assembler->Bind(&call_add_stub);
+ {
Callable callable = CodeFactory::Add(assembler->isolate());
var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
assembler->Goto(&end);
@@ -1111,9 +1251,8 @@ compiler::Node* SubtractStub::Generate(CodeStubAssembler* assembler,
// Check if {rhs} is a HeapNumber.
Label if_rhsisnumber(assembler),
if_rhsisnotnumber(assembler, Label::kDeferred);
- Node* number_map = assembler->HeapNumberMapConstant();
- assembler->Branch(assembler->WordEqual(rhs_map, number_map),
- &if_rhsisnumber, &if_rhsisnotnumber);
+ assembler->Branch(assembler->IsHeapNumberMap(rhs_map), &if_rhsisnumber,
+ &if_rhsisnotnumber);
assembler->Bind(&if_rhsisnumber);
{
@@ -1274,9 +1413,8 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
Node* rhs_map = assembler->LoadMap(rhs);
// Check if {rhs} is a HeapNumber.
- assembler->GotoUnless(
- assembler->WordEqual(rhs_map, assembler->HeapNumberMapConstant()),
- &call_subtract_stub);
+ assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
+ &call_subtract_stub);
// Perform a floating point subtraction.
var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
@@ -1291,9 +1429,8 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
Node* lhs_map = assembler->LoadMap(lhs);
// Check if the {lhs} is a HeapNumber.
- assembler->GotoUnless(
- assembler->WordEqual(lhs_map, assembler->HeapNumberMapConstant()),
- &call_subtract_stub);
+ assembler->GotoUnless(assembler->IsHeapNumberMap(lhs_map),
+ &call_subtract_stub);
// Check if the {rhs} is a Smi.
Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
@@ -1313,9 +1450,8 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
Node* rhs_map = assembler->LoadMap(rhs);
// Check if the {rhs} is a HeapNumber.
- assembler->GotoUnless(
- assembler->WordEqual(rhs_map, assembler->HeapNumberMapConstant()),
- &call_subtract_stub);
+ assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
+ &call_subtract_stub);
// Perform a floating point subtraction.
var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
@@ -1713,7 +1849,7 @@ compiler::Node* DivideStub::Generate(CodeStubAssembler* assembler,
Node* untagged_result =
assembler->Int32Div(untagged_dividend, untagged_divisor);
Node* truncated =
- assembler->IntPtrMul(untagged_result, untagged_divisor);
+ assembler->Int32Mul(untagged_result, untagged_divisor);
// Do floating point division if the remainder is not 0.
assembler->GotoIf(
assembler->Word32NotEqual(untagged_dividend, truncated), &bailout);
@@ -1916,7 +2052,7 @@ compiler::Node* DivideWithFeedbackStub::Generate(
Node* untagged_result =
assembler->Int32Div(untagged_dividend, untagged_divisor);
- Node* truncated = assembler->IntPtrMul(untagged_result, untagged_divisor);
+ Node* truncated = assembler->Int32Mul(untagged_result, untagged_divisor);
// Do floating point division if the remainder is not 0.
assembler->GotoIf(assembler->Word32NotEqual(untagged_dividend, truncated),
&bailout);
@@ -2441,8 +2577,7 @@ compiler::Node* IncStub::Generate(CodeStubAssembler* assembler,
Label if_valueisnumber(assembler),
if_valuenotnumber(assembler, Label::kDeferred);
Node* value_map = assembler->LoadMap(value);
- Node* number_map = assembler->HeapNumberMapConstant();
- assembler->Branch(assembler->WordEqual(value_map, number_map),
+ assembler->Branch(assembler->IsHeapNumberMap(value_map),
&if_valueisnumber, &if_valuenotnumber);
assembler->Bind(&if_valueisnumber);
@@ -2545,8 +2680,7 @@ compiler::Node* DecStub::Generate(CodeStubAssembler* assembler,
Label if_valueisnumber(assembler),
if_valuenotnumber(assembler, Label::kDeferred);
Node* value_map = assembler->LoadMap(value);
- Node* number_map = assembler->HeapNumberMapConstant();
- assembler->Branch(assembler->WordEqual(value_map, number_map),
+ assembler->Branch(assembler->IsHeapNumberMap(value_map),
&if_valueisnumber, &if_valuenotnumber);
assembler->Bind(&if_valueisnumber);
@@ -2587,6 +2721,15 @@ compiler::Node* DecStub::Generate(CodeStubAssembler* assembler,
return result_var.value();
}
+// ES6 section 21.1.3.19 String.prototype.substring ( start, end )
+compiler::Node* SubStringStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* string,
+ compiler::Node* from,
+ compiler::Node* to,
+ compiler::Node* context) {
+ return assembler->SubString(context, string, from, to);
+}
+
// ES6 section 7.1.13 ToObject (argument)
void ToObjectStub::GenerateAssembly(CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
@@ -2601,43 +2744,38 @@ void ToObjectStub::GenerateAssembly(CodeStubAssembler* assembler) const {
Node* context = assembler->Parameter(Descriptor::kContext);
Variable constructor_function_index_var(assembler,
- MachineRepresentation::kWord32);
+ MachineType::PointerRepresentation());
assembler->Branch(assembler->WordIsSmi(object), &if_number, &if_notsmi);
assembler->Bind(&if_notsmi);
Node* map = assembler->LoadMap(object);
- assembler->GotoIf(
- assembler->WordEqual(map, assembler->HeapNumberMapConstant()),
- &if_number);
+ assembler->GotoIf(assembler->IsHeapNumberMap(map), &if_number);
Node* instance_type = assembler->LoadMapInstanceType(map);
- assembler->GotoIf(
- assembler->Int32GreaterThanOrEqual(
- instance_type, assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE)),
- &if_jsreceiver);
-
- Node* constructor_function_index = assembler->LoadObjectField(
- map, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset,
- MachineType::Uint8());
- assembler->GotoIf(
- assembler->Word32Equal(
- constructor_function_index,
- assembler->Int32Constant(Map::kNoConstructorFunctionIndex)),
- &if_noconstructor);
+ assembler->GotoIf(assembler->IsJSReceiverInstanceType(instance_type),
+ &if_jsreceiver);
+
+ Node* constructor_function_index =
+ assembler->LoadMapConstructorFunctionIndex(map);
+ assembler->GotoIf(assembler->WordEqual(constructor_function_index,
+ assembler->IntPtrConstant(
+ Map::kNoConstructorFunctionIndex)),
+ &if_noconstructor);
constructor_function_index_var.Bind(constructor_function_index);
assembler->Goto(&if_wrapjsvalue);
assembler->Bind(&if_number);
constructor_function_index_var.Bind(
- assembler->Int32Constant(Context::NUMBER_FUNCTION_INDEX));
+ assembler->IntPtrConstant(Context::NUMBER_FUNCTION_INDEX));
assembler->Goto(&if_wrapjsvalue);
assembler->Bind(&if_wrapjsvalue);
Node* native_context = assembler->LoadNativeContext(context);
Node* constructor = assembler->LoadFixedArrayElement(
- native_context, constructor_function_index_var.value());
+ native_context, constructor_function_index_var.value(), 0,
+ CodeStubAssembler::INTPTR_PARAMETERS);
Node* initial_map = assembler->LoadObjectField(
constructor, JSFunction::kPrototypeOrInitialMapOffset);
Node* js_value = assembler->Allocate(JSValue::kSize);
@@ -2679,9 +2817,7 @@ compiler::Node* TypeofStub::Generate(CodeStubAssembler* assembler,
Node* map = assembler->LoadMap(value);
- assembler->GotoIf(
- assembler->WordEqual(map, assembler->HeapNumberMapConstant()),
- &return_number);
+ assembler->GotoIf(assembler->IsHeapNumberMap(map), &return_number);
Node* instance_type = assembler->LoadMapInstanceType(map);
@@ -2703,15 +2839,11 @@ compiler::Node* TypeofStub::Generate(CodeStubAssembler* assembler,
assembler->Int32Constant(0)),
&return_undefined);
- assembler->GotoIf(
- assembler->Int32GreaterThanOrEqual(
- instance_type, assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE)),
- &return_object);
+ assembler->GotoIf(assembler->IsJSReceiverInstanceType(instance_type),
+ &return_object);
- assembler->GotoIf(
- assembler->Int32LessThan(instance_type,
- assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
- &return_string);
+ assembler->GotoIf(assembler->IsStringInstanceType(instance_type),
+ &return_string);
#define SIMD128_BRANCH(TYPE, Type, type, lane_count, lane_type) \
Label return_##type(assembler); \
@@ -2908,11 +3040,10 @@ compiler::Node* GenerateAbstractRelationalComparison(
Node* rhs_map = assembler->LoadMap(rhs);
// Check if the {rhs} is a HeapNumber.
- Node* number_map = assembler->HeapNumberMapConstant();
Label if_rhsisnumber(assembler),
if_rhsisnotnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(rhs_map, number_map),
- &if_rhsisnumber, &if_rhsisnotnumber);
+ assembler->Branch(assembler->IsHeapNumberMap(rhs_map), &if_rhsisnumber,
+ &if_rhsisnotnumber);
assembler->Bind(&if_rhsisnumber);
{
@@ -3028,9 +3159,7 @@ compiler::Node* GenerateAbstractRelationalComparison(
// Check if {lhs} is a String.
Label if_lhsisstring(assembler),
if_lhsisnotstring(assembler, Label::kDeferred);
- assembler->Branch(assembler->Int32LessThan(
- lhs_instance_type,
- assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+ assembler->Branch(assembler->IsStringInstanceType(lhs_instance_type),
&if_lhsisstring, &if_lhsisnotstring);
assembler->Bind(&if_lhsisstring);
@@ -3041,10 +3170,9 @@ compiler::Node* GenerateAbstractRelationalComparison(
// Check if {rhs} is also a String.
Label if_rhsisstring(assembler, Label::kDeferred),
if_rhsisnotstring(assembler, Label::kDeferred);
- assembler->Branch(assembler->Int32LessThan(
- rhs_instance_type, assembler->Int32Constant(
- FIRST_NONSTRING_TYPE)),
- &if_rhsisstring, &if_rhsisnotstring);
+ assembler->Branch(
+ assembler->IsStringInstanceType(rhs_instance_type),
+ &if_rhsisstring, &if_rhsisnotstring);
assembler->Bind(&if_rhsisstring);
{
@@ -3088,9 +3216,7 @@ compiler::Node* GenerateAbstractRelationalComparison(
Label if_rhsisreceiver(assembler, Label::kDeferred),
if_rhsisnotreceiver(assembler, Label::kDeferred);
assembler->Branch(
- assembler->Int32LessThanOrEqual(
- assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
- rhs_instance_type),
+ assembler->IsJSReceiverInstanceType(rhs_instance_type),
&if_rhsisreceiver, &if_rhsisnotreceiver);
assembler->Bind(&if_rhsisreceiver);
@@ -3122,9 +3248,7 @@ compiler::Node* GenerateAbstractRelationalComparison(
Label if_lhsisreceiver(assembler, Label::kDeferred),
if_lhsisnotreceiver(assembler, Label::kDeferred);
assembler->Branch(
- assembler->Int32LessThanOrEqual(
- assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
- lhs_instance_type),
+ assembler->IsJSReceiverInstanceType(lhs_instance_type),
&if_lhsisreceiver, &if_lhsisnotreceiver);
assembler->Bind(&if_lhsisreceiver);
@@ -3218,10 +3342,9 @@ void GenerateEqual_Same(CodeStubAssembler* assembler, compiler::Node* value,
Node* value_map = assembler->LoadMap(value);
// Check if {value} (and therefore {rhs}) is a HeapNumber.
- Node* number_map = assembler->HeapNumberMapConstant();
Label if_valueisnumber(assembler), if_valueisnotnumber(assembler);
- assembler->Branch(assembler->WordEqual(value_map, number_map),
- &if_valueisnumber, &if_valueisnotnumber);
+ assembler->Branch(assembler->IsHeapNumberMap(value_map), &if_valueisnumber,
+ &if_valueisnotnumber);
assembler->Bind(&if_valueisnumber);
{
@@ -3342,10 +3465,9 @@ compiler::Node* GenerateEqual(CodeStubAssembler* assembler, ResultMode mode,
// Check if the {rhs} is a String.
Label if_rhsisstring(assembler, Label::kDeferred),
if_rhsisnotstring(assembler);
- assembler->Branch(assembler->Int32LessThan(
- rhs_instance_type, assembler->Int32Constant(
- FIRST_NONSTRING_TYPE)),
- &if_rhsisstring, &if_rhsisnotstring);
+ assembler->Branch(
+ assembler->IsStringInstanceType(rhs_instance_type),
+ &if_rhsisstring, &if_rhsisnotstring);
assembler->Bind(&if_rhsisstring);
{
@@ -3358,9 +3480,8 @@ compiler::Node* GenerateEqual(CodeStubAssembler* assembler, ResultMode mode,
assembler->Bind(&if_rhsisnotstring);
{
// Check if the {rhs} is a Boolean.
- Node* boolean_map = assembler->BooleanMapConstant();
Label if_rhsisboolean(assembler), if_rhsisnotboolean(assembler);
- assembler->Branch(assembler->WordEqual(rhs_map, boolean_map),
+ assembler->Branch(assembler->IsBooleanMap(rhs_map),
&if_rhsisboolean, &if_rhsisnotboolean);
assembler->Bind(&if_rhsisboolean);
@@ -3378,9 +3499,7 @@ compiler::Node* GenerateEqual(CodeStubAssembler* assembler, ResultMode mode,
Label if_rhsisreceiver(assembler, Label::kDeferred),
if_rhsisnotreceiver(assembler);
assembler->Branch(
- assembler->Int32LessThanOrEqual(
- assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
- rhs_instance_type),
+ assembler->IsJSReceiverInstanceType(rhs_instance_type),
&if_rhsisreceiver, &if_rhsisnotreceiver);
assembler->Bind(&if_rhsisreceiver);
@@ -3462,10 +3581,9 @@ compiler::Node* GenerateEqual(CodeStubAssembler* assembler, ResultMode mode,
// Check if {rhs} is also a String.
Label if_rhsisstring(assembler, Label::kDeferred),
if_rhsisnotstring(assembler);
- assembler->Branch(assembler->Int32LessThan(
- rhs_instance_type, assembler->Int32Constant(
- FIRST_NONSTRING_TYPE)),
- &if_rhsisstring, &if_rhsisnotstring);
+ assembler->Branch(
+ assembler->IsStringInstanceType(rhs_instance_type),
+ &if_rhsisstring, &if_rhsisnotstring);
assembler->Bind(&if_rhsisstring);
{
@@ -3514,9 +3632,7 @@ compiler::Node* GenerateEqual(CodeStubAssembler* assembler, ResultMode mode,
Label if_rhsisstring(assembler, Label::kDeferred),
if_rhsisnotstring(assembler);
assembler->Branch(
- assembler->Int32LessThan(
- rhs_instance_type,
- assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+ assembler->IsStringInstanceType(rhs_instance_type),
&if_rhsisstring, &if_rhsisnotstring);
assembler->Bind(&if_rhsisstring);
@@ -3534,9 +3650,7 @@ compiler::Node* GenerateEqual(CodeStubAssembler* assembler, ResultMode mode,
if_rhsisnotreceiver(assembler);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
assembler->Branch(
- assembler->Int32LessThanOrEqual(
- assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
- rhs_instance_type),
+ assembler->IsJSReceiverInstanceType(rhs_instance_type),
&if_rhsisreceiver, &if_rhsisnotreceiver);
assembler->Bind(&if_rhsisreceiver);
@@ -3556,8 +3670,7 @@ compiler::Node* GenerateEqual(CodeStubAssembler* assembler, ResultMode mode,
// Check if {rhs} is a Boolean.
Label if_rhsisboolean(assembler),
if_rhsisnotboolean(assembler);
- Node* boolean_map = assembler->BooleanMapConstant();
- assembler->Branch(assembler->WordEqual(rhs_map, boolean_map),
+ assembler->Branch(assembler->IsBooleanMap(rhs_map),
&if_rhsisboolean, &if_rhsisnotboolean);
assembler->Bind(&if_rhsisboolean);
@@ -3625,9 +3738,7 @@ compiler::Node* GenerateEqual(CodeStubAssembler* assembler, ResultMode mode,
Label if_rhsisreceiver(assembler), if_rhsisnotreceiver(assembler);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
assembler->Branch(
- assembler->Int32LessThanOrEqual(
- assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
- rhs_instance_type),
+ assembler->IsJSReceiverInstanceType(rhs_instance_type),
&if_rhsisreceiver, &if_rhsisnotreceiver);
assembler->Bind(&if_rhsisreceiver);
@@ -3672,9 +3783,7 @@ compiler::Node* GenerateEqual(CodeStubAssembler* assembler, ResultMode mode,
Label if_rhsisreceiver(assembler), if_rhsisnotreceiver(assembler);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
assembler->Branch(
- assembler->Int32LessThanOrEqual(
- assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
- rhs_instance_type),
+ assembler->IsJSReceiverInstanceType(rhs_instance_type),
&if_rhsisreceiver, &if_rhsisnotreceiver);
assembler->Bind(&if_rhsisreceiver);
@@ -3702,9 +3811,7 @@ compiler::Node* GenerateEqual(CodeStubAssembler* assembler, ResultMode mode,
Label if_rhsisreceiver(assembler), if_rhsisnotreceiver(assembler);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
assembler->Branch(
- assembler->Int32LessThanOrEqual(
- assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
- rhs_instance_type),
+ assembler->IsJSReceiverInstanceType(rhs_instance_type),
&if_rhsisreceiver, &if_rhsisnotreceiver);
assembler->Bind(&if_rhsisreceiver);
@@ -3940,9 +4047,7 @@ compiler::Node* GenerateStrictEqual(CodeStubAssembler* assembler,
// Check if {lhs} is a String.
Label if_lhsisstring(assembler), if_lhsisnotstring(assembler);
- assembler->Branch(assembler->Int32LessThan(
- lhs_instance_type,
- assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+ assembler->Branch(assembler->IsStringInstanceType(lhs_instance_type),
&if_lhsisstring, &if_lhsisnotstring);
assembler->Bind(&if_lhsisstring);
@@ -3953,10 +4058,9 @@ compiler::Node* GenerateStrictEqual(CodeStubAssembler* assembler,
// Check if {rhs} is also a String.
Label if_rhsisstring(assembler, Label::kDeferred),
if_rhsisnotstring(assembler);
- assembler->Branch(assembler->Int32LessThan(
- rhs_instance_type, assembler->Int32Constant(
- FIRST_NONSTRING_TYPE)),
- &if_rhsisstring, &if_rhsisnotstring);
+ assembler->Branch(
+ assembler->IsStringInstanceType(rhs_instance_type),
+ &if_rhsisstring, &if_rhsisnotstring);
assembler->Bind(&if_rhsisstring);
{
@@ -4057,381 +4161,235 @@ compiler::Node* GenerateStrictEqual(CodeStubAssembler* assembler,
return result.value();
}
-void GenerateStringRelationalComparison(CodeStubAssembler* assembler,
- RelationalComparisonMode mode) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- Node* lhs = assembler->Parameter(0);
- Node* rhs = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
+} // namespace
- Label if_less(assembler), if_equal(assembler), if_greater(assembler);
+void LoadApiGetterStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ // For now we only support receiver_is_holder.
+ DCHECK(receiver_is_holder());
+ Node* holder = receiver;
+ Node* map = assembler->LoadMap(receiver);
+ Node* descriptors = assembler->LoadMapDescriptors(map);
+ Node* value_index =
+ assembler->IntPtrConstant(DescriptorArray::ToValueIndex(index()));
+ Node* callback = assembler->LoadFixedArrayElement(
+ descriptors, value_index, 0, CodeStubAssembler::INTPTR_PARAMETERS);
+ assembler->TailCallStub(CodeFactory::ApiGetter(isolate()), context, receiver,
+ holder, callback);
+}
- // Fast check to see if {lhs} and {rhs} refer to the same String object.
- Label if_same(assembler), if_notsame(assembler);
- assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
+void StoreFieldStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
- assembler->Bind(&if_same);
- assembler->Goto(&if_equal);
+ FieldIndex index = this->index();
+ Representation representation = this->representation();
- assembler->Bind(&if_notsame);
- {
- // Load instance types of {lhs} and {rhs}.
- Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
- Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
-
- // Combine the instance types into a single 16-bit value, so we can check
- // both of them at once.
- Node* both_instance_types = assembler->Word32Or(
- lhs_instance_type,
- assembler->Word32Shl(rhs_instance_type, assembler->Int32Constant(8)));
-
- // Check that both {lhs} and {rhs} are flat one-byte strings.
- int const kBothSeqOneByteStringMask =
- kStringEncodingMask | kStringRepresentationMask |
- ((kStringEncodingMask | kStringRepresentationMask) << 8);
- int const kBothSeqOneByteStringTag =
- kOneByteStringTag | kSeqStringTag |
- ((kOneByteStringTag | kSeqStringTag) << 8);
- Label if_bothonebyteseqstrings(assembler),
- if_notbothonebyteseqstrings(assembler);
- assembler->Branch(assembler->Word32Equal(
- assembler->Word32And(both_instance_types,
- assembler->Int32Constant(
- kBothSeqOneByteStringMask)),
- assembler->Int32Constant(kBothSeqOneByteStringTag)),
- &if_bothonebyteseqstrings, &if_notbothonebyteseqstrings);
-
- assembler->Bind(&if_bothonebyteseqstrings);
- {
- // Load the length of {lhs} and {rhs}.
- Node* lhs_length = assembler->LoadStringLength(lhs);
- Node* rhs_length = assembler->LoadStringLength(rhs);
+ assembler->Comment("StoreFieldStub: inobject=%d, offset=%d, rep=%s",
+ index.is_inobject(), index.offset(),
+ representation.Mnemonic());
- // Determine the minimum length.
- Node* length = assembler->SmiMin(lhs_length, rhs_length);
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* name = assembler->Parameter(Descriptor::kName);
+ Node* value = assembler->Parameter(Descriptor::kValue);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* vector = assembler->Parameter(Descriptor::kVector);
+ Node* context = assembler->Parameter(Descriptor::kContext);
- // Compute the effective offset of the first character.
- Node* begin = assembler->IntPtrConstant(SeqOneByteString::kHeaderSize -
- kHeapObjectTag);
+ Label miss(assembler);
- // Compute the first offset after the string from the length.
- Node* end = assembler->IntPtrAdd(begin, assembler->SmiUntag(length));
+ Node* prepared_value =
+ assembler->PrepareValueForWrite(value, representation, &miss);
+ assembler->StoreNamedField(receiver, index, representation, prepared_value,
+ false);
+ assembler->Return(value);
- // Loop over the {lhs} and {rhs} strings to see if they are equal.
- Variable var_offset(assembler, MachineType::PointerRepresentation());
- Label loop(assembler, &var_offset);
- var_offset.Bind(begin);
- assembler->Goto(&loop);
- assembler->Bind(&loop);
- {
- // Check if {offset} equals {end}.
- Node* offset = var_offset.value();
- Label if_done(assembler), if_notdone(assembler);
- assembler->Branch(assembler->WordEqual(offset, end), &if_done,
- &if_notdone);
+ // Only stores to tagged field can't bailout.
+ if (!representation.IsTagged()) {
+ assembler->Bind(&miss);
+ {
+ assembler->Comment("Miss");
+ assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
+ vector, receiver, name);
+ }
+ }
+}
- assembler->Bind(&if_notdone);
- {
- // Load the next characters from {lhs} and {rhs}.
- Node* lhs_value = assembler->Load(MachineType::Uint8(), lhs, offset);
- Node* rhs_value = assembler->Load(MachineType::Uint8(), rhs, offset);
+void StoreGlobalStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
- // Check if the characters match.
- Label if_valueissame(assembler), if_valueisnotsame(assembler);
- assembler->Branch(assembler->Word32Equal(lhs_value, rhs_value),
- &if_valueissame, &if_valueisnotsame);
+ assembler->Comment(
+ "StoreGlobalStub: cell_type=%d, constant_type=%d, check_global=%d",
+ cell_type(), PropertyCellType::kConstantType == cell_type()
+ ? static_cast<int>(constant_type())
+ : -1,
+ check_global());
- assembler->Bind(&if_valueissame);
- {
- // Advance to next character.
- var_offset.Bind(
- assembler->IntPtrAdd(offset, assembler->IntPtrConstant(1)));
- }
- assembler->Goto(&loop);
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* name = assembler->Parameter(Descriptor::kName);
+ Node* value = assembler->Parameter(Descriptor::kValue);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* vector = assembler->Parameter(Descriptor::kVector);
+ Node* context = assembler->Parameter(Descriptor::kContext);
- assembler->Bind(&if_valueisnotsame);
- assembler->BranchIf(assembler->Uint32LessThan(lhs_value, rhs_value),
- &if_less, &if_greater);
- }
+ Label miss(assembler);
+
+ if (check_global()) {
+ // Check that the map of the global has not changed: use a placeholder map
+ // that will be replaced later with the global object's map.
+ Node* proxy_map = assembler->LoadMap(receiver);
+ Node* global = assembler->LoadObjectField(proxy_map, Map::kPrototypeOffset);
+ Node* map_cell = assembler->HeapConstant(isolate()->factory()->NewWeakCell(
+ StoreGlobalStub::global_map_placeholder(isolate())));
+ Node* expected_map = assembler->LoadWeakCellValue(map_cell);
+ Node* map = assembler->LoadMap(global);
+ assembler->GotoIf(assembler->WordNotEqual(expected_map, map), &miss);
+ }
- assembler->Bind(&if_done);
- {
- // All characters up to the min length are equal, decide based on
- // string length.
- Label if_lengthisequal(assembler), if_lengthisnotequal(assembler);
- assembler->Branch(assembler->SmiEqual(lhs_length, rhs_length),
- &if_lengthisequal, &if_lengthisnotequal);
-
- assembler->Bind(&if_lengthisequal);
- assembler->Goto(&if_equal);
-
- assembler->Bind(&if_lengthisnotequal);
- assembler->BranchIfSmiLessThan(lhs_length, rhs_length, &if_less,
- &if_greater);
- }
- }
- }
+ Node* weak_cell = assembler->HeapConstant(isolate()->factory()->NewWeakCell(
+ StoreGlobalStub::property_cell_placeholder(isolate())));
+ Node* cell = assembler->LoadWeakCellValue(weak_cell);
+ assembler->GotoIf(assembler->WordIsSmi(cell), &miss);
- assembler->Bind(&if_notbothonebyteseqstrings);
- {
- // TODO(bmeurer): Add fast case support for flattened cons strings;
- // also add support for two byte string relational comparisons.
- switch (mode) {
- case kLessThan:
- assembler->TailCallRuntime(Runtime::kStringLessThan, context, lhs,
- rhs);
- break;
- case kLessThanOrEqual:
- assembler->TailCallRuntime(Runtime::kStringLessThanOrEqual, context,
- lhs, rhs);
- break;
- case kGreaterThan:
- assembler->TailCallRuntime(Runtime::kStringGreaterThan, context, lhs,
- rhs);
+ // Load the payload of the global parameter cell. A hole indicates that the
+ // cell has been invalidated and that the store must be handled by the
+ // runtime.
+ Node* cell_contents =
+ assembler->LoadObjectField(cell, PropertyCell::kValueOffset);
+
+ PropertyCellType cell_type = this->cell_type();
+ if (cell_type == PropertyCellType::kConstant ||
+ cell_type == PropertyCellType::kUndefined) {
+ // This is always valid for all states a cell can be in.
+ assembler->GotoIf(assembler->WordNotEqual(cell_contents, value), &miss);
+ } else {
+ assembler->GotoIf(assembler->IsTheHole(cell_contents), &miss);
+
+ // When dealing with constant types, the type may be allowed to change, as
+ // long as optimized code remains valid.
+ bool value_is_smi = false;
+ if (cell_type == PropertyCellType::kConstantType) {
+ switch (constant_type()) {
+ case PropertyCellConstantType::kSmi:
+ assembler->GotoUnless(assembler->WordIsSmi(value), &miss);
+ value_is_smi = true;
break;
- case kGreaterThanOrEqual:
- assembler->TailCallRuntime(Runtime::kStringGreaterThanOrEqual,
- context, lhs, rhs);
+ case PropertyCellConstantType::kStableMap: {
+ // It is sufficient here to check that the value and cell contents
+ // have identical maps, no matter if they are stable or not or if they
+ // are the maps that were originally in the cell or not. If optimized
+ // code will deopt when a cell has a unstable map and if it has a
+ // dependency on a stable map, it will deopt if the map destabilizes.
+ assembler->GotoIf(assembler->WordIsSmi(value), &miss);
+ assembler->GotoIf(assembler->WordIsSmi(cell_contents), &miss);
+ Node* expected_map = assembler->LoadMap(cell_contents);
+ Node* map = assembler->LoadMap(value);
+ assembler->GotoIf(assembler->WordNotEqual(expected_map, map), &miss);
break;
+ }
}
}
+ if (value_is_smi) {
+ assembler->StoreObjectFieldNoWriteBarrier(
+ cell, PropertyCell::kValueOffset, value);
+ } else {
+ assembler->StoreObjectField(cell, PropertyCell::kValueOffset, value);
+ }
}
- assembler->Bind(&if_less);
- switch (mode) {
- case kLessThan:
- case kLessThanOrEqual:
- assembler->Return(assembler->BooleanConstant(true));
- break;
-
- case kGreaterThan:
- case kGreaterThanOrEqual:
- assembler->Return(assembler->BooleanConstant(false));
- break;
- }
-
- assembler->Bind(&if_equal);
- switch (mode) {
- case kLessThan:
- case kGreaterThan:
- assembler->Return(assembler->BooleanConstant(false));
- break;
-
- case kLessThanOrEqual:
- case kGreaterThanOrEqual:
- assembler->Return(assembler->BooleanConstant(true));
- break;
- }
-
- assembler->Bind(&if_greater);
- switch (mode) {
- case kLessThan:
- case kLessThanOrEqual:
- assembler->Return(assembler->BooleanConstant(false));
- break;
+ assembler->Return(value);
- case kGreaterThan:
- case kGreaterThanOrEqual:
- assembler->Return(assembler->BooleanConstant(true));
- break;
+ assembler->Bind(&miss);
+ {
+ assembler->Comment("Miss");
+ assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
+ vector, receiver, name);
}
}
-void GenerateStringEqual(CodeStubAssembler* assembler, ResultMode mode) {
- // Here's pseudo-code for the algorithm below in case of kDontNegateResult
- // mode; for kNegateResult mode we properly negate the result.
- //
- // if (lhs == rhs) return true;
- // if (lhs->length() != rhs->length()) return false;
- // if (lhs->IsInternalizedString() && rhs->IsInternalizedString()) {
- // return false;
- // }
- // if (lhs->IsSeqOneByteString() && rhs->IsSeqOneByteString()) {
- // for (i = 0; i != lhs->length(); ++i) {
- // if (lhs[i] != rhs[i]) return false;
- // }
- // return true;
- // }
- // return %StringEqual(lhs, rhs);
-
+void KeyedLoadSloppyArgumentsStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
- Node* lhs = assembler->Parameter(0);
- Node* rhs = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* key = assembler->Parameter(Descriptor::kName);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* vector = assembler->Parameter(Descriptor::kVector);
+ Node* context = assembler->Parameter(Descriptor::kContext);
- Label if_equal(assembler), if_notequal(assembler);
+ Label miss(assembler);
- // Fast check to see if {lhs} and {rhs} refer to the same String object.
- Label if_same(assembler), if_notsame(assembler);
- assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
-
- assembler->Bind(&if_same);
- assembler->Goto(&if_equal);
+ Node* result = assembler->LoadKeyedSloppyArguments(receiver, key, &miss);
+ assembler->Return(result);
- assembler->Bind(&if_notsame);
+ assembler->Bind(&miss);
{
- // The {lhs} and {rhs} don't refer to the exact same String object.
-
- // Load the length of {lhs} and {rhs}.
- Node* lhs_length = assembler->LoadStringLength(lhs);
- Node* rhs_length = assembler->LoadStringLength(rhs);
-
- // Check if the lengths of {lhs} and {rhs} are equal.
- Label if_lengthisequal(assembler), if_lengthisnotequal(assembler);
- assembler->Branch(assembler->WordEqual(lhs_length, rhs_length),
- &if_lengthisequal, &if_lengthisnotequal);
-
- assembler->Bind(&if_lengthisequal);
- {
- // Load instance types of {lhs} and {rhs}.
- Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
- Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
-
- // Combine the instance types into a single 16-bit value, so we can check
- // both of them at once.
- Node* both_instance_types = assembler->Word32Or(
- lhs_instance_type,
- assembler->Word32Shl(rhs_instance_type, assembler->Int32Constant(8)));
-
- // Check if both {lhs} and {rhs} are internalized.
- int const kBothInternalizedMask =
- kIsNotInternalizedMask | (kIsNotInternalizedMask << 8);
- int const kBothInternalizedTag =
- kInternalizedTag | (kInternalizedTag << 8);
- Label if_bothinternalized(assembler), if_notbothinternalized(assembler);
- assembler->Branch(assembler->Word32Equal(
- assembler->Word32And(both_instance_types,
- assembler->Int32Constant(
- kBothInternalizedMask)),
- assembler->Int32Constant(kBothInternalizedTag)),
- &if_bothinternalized, &if_notbothinternalized);
-
- assembler->Bind(&if_bothinternalized);
- {
- // Fast negative check for internalized-to-internalized equality.
- assembler->Goto(&if_notequal);
- }
+ assembler->Comment("Miss");
+ assembler->TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver,
+ key, slot, vector);
+ }
+}
- assembler->Bind(&if_notbothinternalized);
- {
- // Check that both {lhs} and {rhs} are flat one-byte strings.
- int const kBothSeqOneByteStringMask =
- kStringEncodingMask | kStringRepresentationMask |
- ((kStringEncodingMask | kStringRepresentationMask) << 8);
- int const kBothSeqOneByteStringTag =
- kOneByteStringTag | kSeqStringTag |
- ((kOneByteStringTag | kSeqStringTag) << 8);
- Label if_bothonebyteseqstrings(assembler),
- if_notbothonebyteseqstrings(assembler);
- assembler->Branch(
- assembler->Word32Equal(
- assembler->Word32And(
- both_instance_types,
- assembler->Int32Constant(kBothSeqOneByteStringMask)),
- assembler->Int32Constant(kBothSeqOneByteStringTag)),
- &if_bothonebyteseqstrings, &if_notbothonebyteseqstrings);
+void KeyedStoreSloppyArgumentsStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
- assembler->Bind(&if_bothonebyteseqstrings);
- {
- // Compute the effective offset of the first character.
- Node* begin = assembler->IntPtrConstant(
- SeqOneByteString::kHeaderSize - kHeapObjectTag);
-
- // Compute the first offset after the string from the length.
- Node* end =
- assembler->IntPtrAdd(begin, assembler->SmiUntag(lhs_length));
-
- // Loop over the {lhs} and {rhs} strings to see if they are equal.
- Variable var_offset(assembler, MachineType::PointerRepresentation());
- Label loop(assembler, &var_offset);
- var_offset.Bind(begin);
- assembler->Goto(&loop);
- assembler->Bind(&loop);
- {
- // Check if {offset} equals {end}.
- Node* offset = var_offset.value();
- Label if_done(assembler), if_notdone(assembler);
- assembler->Branch(assembler->WordEqual(offset, end), &if_done,
- &if_notdone);
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* key = assembler->Parameter(Descriptor::kName);
+ Node* value = assembler->Parameter(Descriptor::kValue);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* vector = assembler->Parameter(Descriptor::kVector);
+ Node* context = assembler->Parameter(Descriptor::kContext);
- assembler->Bind(&if_notdone);
- {
- // Load the next characters from {lhs} and {rhs}.
- Node* lhs_value =
- assembler->Load(MachineType::Uint8(), lhs, offset);
- Node* rhs_value =
- assembler->Load(MachineType::Uint8(), rhs, offset);
-
- // Check if the characters match.
- Label if_valueissame(assembler), if_valueisnotsame(assembler);
- assembler->Branch(assembler->Word32Equal(lhs_value, rhs_value),
- &if_valueissame, &if_valueisnotsame);
-
- assembler->Bind(&if_valueissame);
- {
- // Advance to next character.
- var_offset.Bind(
- assembler->IntPtrAdd(offset, assembler->IntPtrConstant(1)));
- }
- assembler->Goto(&loop);
+ Label miss(assembler);
- assembler->Bind(&if_valueisnotsame);
- assembler->Goto(&if_notequal);
- }
+ assembler->StoreKeyedSloppyArguments(receiver, key, value, &miss);
+ assembler->Return(value);
- assembler->Bind(&if_done);
- assembler->Goto(&if_equal);
- }
- }
+ assembler->Bind(&miss);
+ {
+ assembler->Comment("Miss");
+ assembler->TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value,
+ slot, vector, receiver, key);
+ }
+}
- assembler->Bind(&if_notbothonebyteseqstrings);
- {
- // TODO(bmeurer): Add fast case support for flattened cons strings;
- // also add support for two byte string equality checks.
- Runtime::FunctionId function_id = (mode == kDontNegateResult)
- ? Runtime::kStringEqual
- : Runtime::kStringNotEqual;
- assembler->TailCallRuntime(function_id, context, lhs, rhs);
- }
- }
- }
+void LoadScriptContextFieldStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
- assembler->Bind(&if_lengthisnotequal);
- {
- // Mismatch in length of {lhs} and {rhs}, cannot be equal.
- assembler->Goto(&if_notequal);
- }
- }
+ assembler->Comment("LoadScriptContextFieldStub: context_index=%d, slot=%d",
+ context_index(), slot_index());
- assembler->Bind(&if_equal);
- assembler->Return(assembler->BooleanConstant(mode == kDontNegateResult));
+ Node* context = assembler->Parameter(Descriptor::kContext);
- assembler->Bind(&if_notequal);
- assembler->Return(assembler->BooleanConstant(mode == kNegateResult));
+ Node* script_context = assembler->LoadScriptContext(context, context_index());
+ Node* result = assembler->LoadFixedArrayElement(
+ script_context, assembler->IntPtrConstant(slot_index()), 0,
+ CodeStubAssembler::INTPTR_PARAMETERS);
+ assembler->Return(result);
}
-} // namespace
-
-void LoadApiGetterStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+void StoreScriptContextFieldStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
+
+ assembler->Comment("StoreScriptContextFieldStub: context_index=%d, slot=%d",
+ context_index(), slot_index());
+
+ Node* value = assembler->Parameter(Descriptor::kValue);
Node* context = assembler->Parameter(Descriptor::kContext);
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- // For now we only support receiver_is_holder.
- DCHECK(receiver_is_holder());
- Node* holder = receiver;
- Node* map = assembler->LoadMap(receiver);
- Node* descriptors = assembler->LoadMapDescriptors(map);
- Node* offset =
- assembler->Int32Constant(DescriptorArray::ToValueIndex(index()));
- Node* callback = assembler->LoadFixedArrayElement(descriptors, offset);
- assembler->TailCallStub(CodeFactory::ApiGetter(isolate()), context, receiver,
- holder, callback);
+
+ Node* script_context = assembler->LoadScriptContext(context, context_index());
+ assembler->StoreFixedArrayElement(
+ script_context, assembler->IntPtrConstant(slot_index()), value,
+ UPDATE_WRITE_BARRIER, CodeStubAssembler::INTPTR_PARAMETERS);
+ assembler->Return(value);
}
// static
@@ -4499,33 +4457,6 @@ compiler::Node* StrictNotEqualStub::Generate(CodeStubAssembler* assembler,
return GenerateStrictEqual(assembler, kNegateResult, lhs, rhs, context);
}
-void StringEqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- GenerateStringEqual(assembler, kDontNegateResult);
-}
-
-void StringNotEqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- GenerateStringEqual(assembler, kNegateResult);
-}
-
-void StringLessThanStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- GenerateStringRelationalComparison(assembler, kLessThan);
-}
-
-void StringLessThanOrEqualStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
- GenerateStringRelationalComparison(assembler, kLessThanOrEqual);
-}
-
-void StringGreaterThanStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
- GenerateStringRelationalComparison(assembler, kGreaterThan);
-}
-
-void StringGreaterThanOrEqualStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
- GenerateStringRelationalComparison(assembler, kGreaterThanOrEqual);
-}
-
void ToLengthStub::GenerateAssembly(CodeStubAssembler* assembler) const {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
@@ -4557,8 +4488,7 @@ void ToLengthStub::GenerateAssembly(CodeStubAssembler* assembler) const {
// Check if {len} is a HeapNumber.
Label if_lenisheapnumber(assembler),
if_lenisnotheapnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(assembler->LoadMap(len),
- assembler->HeapNumberMapConstant()),
+ assembler->Branch(assembler->IsHeapNumberMap(assembler->LoadMap(len)),
&if_lenisheapnumber, &if_lenisnotheapnumber);
assembler->Bind(&if_lenisheapnumber);
@@ -4603,64 +4533,12 @@ void ToLengthStub::GenerateAssembly(CodeStubAssembler* assembler) const {
}
void ToIntegerStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- Node* context = assembler->Parameter(1);
-
- // We might need to loop once for ToNumber conversion.
- Variable var_arg(assembler, MachineRepresentation::kTagged);
- Label loop(assembler, &var_arg);
- var_arg.Bind(assembler->Parameter(0));
- assembler->Goto(&loop);
- assembler->Bind(&loop);
- {
- // Shared entry points.
- Label return_arg(assembler), return_zero(assembler, Label::kDeferred);
-
- // Load the current {arg} value.
- Node* arg = var_arg.value();
-
- // Check if {arg} is a Smi.
- assembler->GotoIf(assembler->WordIsSmi(arg), &return_arg);
-
- // Check if {arg} is a HeapNumber.
- Label if_argisheapnumber(assembler),
- if_argisnotheapnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(assembler->LoadMap(arg),
- assembler->HeapNumberMapConstant()),
- &if_argisheapnumber, &if_argisnotheapnumber);
-
- assembler->Bind(&if_argisheapnumber);
- {
- // Load the floating-point value of {arg}.
- Node* arg_value = assembler->LoadHeapNumberValue(arg);
- // Check if {arg} is NaN.
- assembler->GotoUnless(assembler->Float64Equal(arg_value, arg_value),
- &return_zero);
-
- // Truncate {arg} towards zero.
- Node* value = assembler->Float64Trunc(arg_value);
- var_arg.Bind(assembler->ChangeFloat64ToTagged(value));
- assembler->Goto(&return_arg);
- }
-
- assembler->Bind(&if_argisnotheapnumber);
- {
- // Need to convert {arg} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate());
- var_arg.Bind(assembler->CallStub(callable, context, arg));
- assembler->Goto(&loop);
- }
-
- assembler->Bind(&return_arg);
- assembler->Return(var_arg.value());
+ Node* input = assembler->Parameter(Descriptor::kArgument);
+ Node* context = assembler->Parameter(Descriptor::kContext);
- assembler->Bind(&return_zero);
- assembler->Return(assembler->SmiConstant(Smi::FromInt(0)));
- }
+ assembler->Return(assembler->ToInteger(context, input));
}
void StoreInterceptorStub::GenerateAssembly(
@@ -4727,15 +4605,13 @@ compiler::Node* FastCloneShallowObjectStub::GenerateFastPath(
typedef compiler::CodeAssembler::Label Label;
typedef compiler::CodeAssembler::Variable Variable;
- Node* undefined = assembler->UndefinedConstant();
Node* literals_array =
assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset);
Node* allocation_site = assembler->LoadFixedArrayElement(
literals_array, literals_index,
LiteralsArray::kFirstLiteralIndex * kPointerSize,
CodeStubAssembler::SMI_PARAMETERS);
- assembler->GotoIf(assembler->WordEqual(allocation_site, undefined),
- call_runtime);
+ assembler->GotoIf(assembler->IsUndefined(allocation_site), call_runtime);
// Calculate the object and allocation size based on the properties count.
Node* object_size = assembler->IntPtrAdd(
@@ -4886,14 +4762,10 @@ void KeyedLoadGenericStub::InitializeDescriptor(
void HandlerStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- if (kind() == Code::STORE_IC) {
- descriptor->Initialize(FUNCTION_ADDR(Runtime_StoreIC_MissFromStubFailure));
- } else if (kind() == Code::KEYED_LOAD_IC) {
+ DCHECK(kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC);
+ if (kind() == Code::KEYED_LOAD_IC) {
descriptor->Initialize(
FUNCTION_ADDR(Runtime_KeyedLoadIC_MissFromStubFailure));
- } else if (kind() == Code::KEYED_STORE_IC) {
- descriptor->Initialize(
- FUNCTION_ADDR(Runtime_KeyedStoreIC_MissFromStubFailure));
}
}
@@ -4908,39 +4780,12 @@ CallInterfaceDescriptor HandlerStub::GetCallInterfaceDescriptor() const {
}
-void StoreFastElementStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- descriptor->Initialize(
- FUNCTION_ADDR(Runtime_KeyedStoreIC_MissFromStubFailure));
-}
-
-
-void ElementsTransitionAndStoreStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- descriptor->Initialize(
- FUNCTION_ADDR(Runtime_ElementsTransitionAndStoreIC_Miss));
-}
-
-void StoreTransitionStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- descriptor->Initialize(
- FUNCTION_ADDR(Runtime_TransitionStoreIC_MissFromStubFailure));
-}
-
void NumberToStringStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(
Runtime::FunctionForId(Runtime::kNumberToString)->entry);
descriptor->SetMissHandler(Runtime::kNumberToString);
}
-
-void FastCloneShallowArrayStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- FastCloneShallowArrayDescriptor call_descriptor(isolate());
- descriptor->Initialize(
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry);
- descriptor->SetMissHandler(Runtime::kCreateArrayLiteralStubBailout);
-}
-
void RegExpConstructResultStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
descriptor->Initialize(
@@ -5088,9 +4933,7 @@ compiler::Node* ForInFilterStub::Generate(CodeStubAssembler* assembler,
assembler->Bind(&return_to_name);
{
- // TODO(cbruni): inline ToName here.
- Callable callable = CodeFactory::ToName(assembler->isolate());
- var_result.Bind(assembler->CallStub(callable, context, key));
+ var_result.Bind(assembler->ToName(context, key));
assembler->Goto(&end);
}
@@ -5193,43 +5036,49 @@ compiler::Node* FastNewClosureStub::Generate(CodeStubAssembler* assembler,
Label if_normal(assembler), if_generator(assembler), if_async(assembler),
if_class_constructor(assembler), if_function_without_prototype(assembler),
load_map(assembler);
- Variable map_index(assembler, MachineRepresentation::kTagged);
+ Variable map_index(assembler, MachineType::PointerRepresentation());
+ STATIC_ASSERT(FunctionKind::kNormalFunction == 0);
Node* is_not_normal = assembler->Word32And(
compiler_hints,
- assembler->Int32Constant(SharedFunctionInfo::kFunctionKindMaskBits));
+ assembler->Int32Constant(SharedFunctionInfo::kAllFunctionKindBitsMask));
assembler->GotoUnless(is_not_normal, &if_normal);
Node* is_generator = assembler->Word32And(
compiler_hints,
- assembler->Int32Constant(1 << SharedFunctionInfo::kIsGeneratorBit));
+ assembler->Int32Constant(FunctionKind::kGeneratorFunction
+ << SharedFunctionInfo::kFunctionKindShift));
assembler->GotoIf(is_generator, &if_generator);
Node* is_async = assembler->Word32And(
compiler_hints,
- assembler->Int32Constant(1 << SharedFunctionInfo::kIsAsyncFunctionBit));
+ assembler->Int32Constant(FunctionKind::kAsyncFunction
+ << SharedFunctionInfo::kFunctionKindShift));
assembler->GotoIf(is_async, &if_async);
Node* is_class_constructor = assembler->Word32And(
compiler_hints,
- assembler->Int32Constant(SharedFunctionInfo::kClassConstructorBits));
+ assembler->Int32Constant(FunctionKind::kClassConstructor
+ << SharedFunctionInfo::kFunctionKindShift));
assembler->GotoIf(is_class_constructor, &if_class_constructor);
if (FLAG_debug_code) {
// Function must be a function without a prototype.
assembler->Assert(assembler->Word32And(
- compiler_hints, assembler->Int32Constant(
- SharedFunctionInfo::kAccessorFunctionBits |
- (1 << SharedFunctionInfo::kIsArrowBit) |
- (1 << SharedFunctionInfo::kIsConciseMethodBit))));
+ compiler_hints,
+ assembler->Int32Constant((FunctionKind::kAccessorFunction |
+ FunctionKind::kArrowFunction |
+ FunctionKind::kConciseMethod)
+ << SharedFunctionInfo::kFunctionKindShift)));
}
assembler->Goto(&if_function_without_prototype);
assembler->Bind(&if_normal);
{
map_index.Bind(assembler->Select(
- is_strict, assembler->Int32Constant(Context::STRICT_FUNCTION_MAP_INDEX),
- assembler->Int32Constant(Context::SLOPPY_FUNCTION_MAP_INDEX)));
+ is_strict,
+ assembler->IntPtrConstant(Context::STRICT_FUNCTION_MAP_INDEX),
+ assembler->IntPtrConstant(Context::SLOPPY_FUNCTION_MAP_INDEX)));
assembler->Goto(&load_map);
}
@@ -5237,8 +5086,8 @@ compiler::Node* FastNewClosureStub::Generate(CodeStubAssembler* assembler,
{
map_index.Bind(assembler->Select(
is_strict,
- assembler->Int32Constant(Context::STRICT_GENERATOR_FUNCTION_MAP_INDEX),
- assembler->Int32Constant(
+ assembler->IntPtrConstant(Context::STRICT_GENERATOR_FUNCTION_MAP_INDEX),
+ assembler->IntPtrConstant(
Context::SLOPPY_GENERATOR_FUNCTION_MAP_INDEX)));
assembler->Goto(&load_map);
}
@@ -5247,21 +5096,21 @@ compiler::Node* FastNewClosureStub::Generate(CodeStubAssembler* assembler,
{
map_index.Bind(assembler->Select(
is_strict,
- assembler->Int32Constant(Context::STRICT_ASYNC_FUNCTION_MAP_INDEX),
- assembler->Int32Constant(Context::SLOPPY_ASYNC_FUNCTION_MAP_INDEX)));
+ assembler->IntPtrConstant(Context::STRICT_ASYNC_FUNCTION_MAP_INDEX),
+ assembler->IntPtrConstant(Context::SLOPPY_ASYNC_FUNCTION_MAP_INDEX)));
assembler->Goto(&load_map);
}
assembler->Bind(&if_class_constructor);
{
map_index.Bind(
- assembler->Int32Constant(Context::STRICT_FUNCTION_MAP_INDEX));
+ assembler->IntPtrConstant(Context::STRICT_FUNCTION_MAP_INDEX));
assembler->Goto(&load_map);
}
assembler->Bind(&if_function_without_prototype);
{
- map_index.Bind(assembler->Int32Constant(
+ map_index.Bind(assembler->IntPtrConstant(
Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
assembler->Goto(&load_map);
}
@@ -5272,7 +5121,8 @@ compiler::Node* FastNewClosureStub::Generate(CodeStubAssembler* assembler,
// as the map of the allocated object.
Node* native_context = assembler->LoadNativeContext(context);
Node* map_slot_value =
- assembler->LoadFixedArrayElement(native_context, map_index.value());
+ assembler->LoadFixedArrayElement(native_context, map_index.value(), 0,
+ CodeStubAssembler::INTPTR_PARAMETERS);
assembler->StoreMapNoWriteBarrier(result, map_slot_value);
// Initialize the rest of the function.
@@ -5405,15 +5255,13 @@ compiler::Node* FastCloneRegExpStub::Generate(CodeStubAssembler* assembler,
Variable result(assembler, MachineRepresentation::kTagged);
- Node* undefined = assembler->UndefinedConstant();
Node* literals_array =
assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset);
Node* boilerplate = assembler->LoadFixedArrayElement(
literals_array, literal_index,
LiteralsArray::kFirstLiteralIndex * kPointerSize,
CodeStubAssembler::SMI_PARAMETERS);
- assembler->GotoIf(assembler->WordEqual(boilerplate, undefined),
- &call_runtime);
+ assembler->GotoIf(assembler->IsUndefined(boilerplate), &call_runtime);
{
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
@@ -5449,6 +5297,191 @@ void FastCloneRegExpStub::GenerateAssembly(CodeStubAssembler* assembler) const {
Generate(assembler, closure, literal_index, pattern, flags, context));
}
+namespace {
+
+compiler::Node* NonEmptyShallowClone(CodeStubAssembler* assembler,
+ compiler::Node* boilerplate,
+ compiler::Node* boilerplate_map,
+ compiler::Node* boilerplate_elements,
+ compiler::Node* allocation_site,
+ compiler::Node* capacity,
+ ElementsKind kind) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::ParameterMode ParameterMode;
+
+ ParameterMode param_mode = CodeStubAssembler::SMI_PARAMETERS;
+
+ Node* length = assembler->LoadJSArrayLength(boilerplate);
+
+ if (assembler->Is64()) {
+ capacity = assembler->SmiUntag(capacity);
+ param_mode = CodeStubAssembler::INTEGER_PARAMETERS;
+ }
+
+ Node *array, *elements;
+ std::tie(array, elements) =
+ assembler->AllocateUninitializedJSArrayWithElements(
+ kind, boilerplate_map, length, allocation_site, capacity, param_mode);
+
+ assembler->Comment("copy elements header");
+ for (int offset = 0; offset < FixedArrayBase::kHeaderSize;
+ offset += kPointerSize) {
+ Node* value = assembler->LoadObjectField(boilerplate_elements, offset);
+ assembler->StoreObjectField(elements, offset, value);
+ }
+
+ if (assembler->Is64()) {
+ length = assembler->SmiUntag(length);
+ }
+
+ assembler->Comment("copy boilerplate elements");
+ assembler->CopyFixedArrayElements(kind, boilerplate_elements, elements,
+ length, SKIP_WRITE_BARRIER, param_mode);
+ assembler->IncrementCounter(
+ assembler->isolate()->counters()->inlined_copied_elements(), 1);
+
+ return array;
+}
+
+} // namespace
+
+// static
+compiler::Node* FastCloneShallowArrayStub::Generate(
+ CodeStubAssembler* assembler, compiler::Node* closure,
+ compiler::Node* literal_index, compiler::Node* context,
+ CodeStubAssembler::Label* call_runtime,
+ AllocationSiteMode allocation_site_mode) {
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+ typedef compiler::Node Node;
+
+ Label zero_capacity(assembler), cow_elements(assembler),
+ fast_elements(assembler), return_result(assembler);
+ Variable result(assembler, MachineRepresentation::kTagged);
+
+ Node* literals_array =
+ assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset);
+ Node* allocation_site = assembler->LoadFixedArrayElement(
+ literals_array, literal_index,
+ LiteralsArray::kFirstLiteralIndex * kPointerSize,
+ CodeStubAssembler::SMI_PARAMETERS);
+
+ assembler->GotoIf(assembler->IsUndefined(allocation_site), call_runtime);
+ allocation_site = assembler->LoadFixedArrayElement(
+ literals_array, literal_index,
+ LiteralsArray::kFirstLiteralIndex * kPointerSize,
+ CodeStubAssembler::SMI_PARAMETERS);
+
+ Node* boilerplate = assembler->LoadObjectField(
+ allocation_site, AllocationSite::kTransitionInfoOffset);
+ Node* boilerplate_map = assembler->LoadMap(boilerplate);
+ Node* boilerplate_elements = assembler->LoadElements(boilerplate);
+ Node* capacity = assembler->LoadFixedArrayBaseLength(boilerplate_elements);
+ allocation_site =
+ allocation_site_mode == TRACK_ALLOCATION_SITE ? allocation_site : nullptr;
+
+ Node* zero = assembler->SmiConstant(Smi::FromInt(0));
+ assembler->GotoIf(assembler->SmiEqual(capacity, zero), &zero_capacity);
+
+ Node* elements_map = assembler->LoadMap(boilerplate_elements);
+ assembler->GotoIf(assembler->IsFixedCOWArrayMap(elements_map), &cow_elements);
+
+ assembler->GotoIf(assembler->IsFixedArrayMap(elements_map), &fast_elements);
+ {
+ assembler->Comment("fast double elements path");
+ if (FLAG_debug_code) {
+ Label correct_elements_map(assembler), abort(assembler, Label::kDeferred);
+ assembler->BranchIf(assembler->IsFixedDoubleArrayMap(elements_map),
+ &correct_elements_map, &abort);
+
+ assembler->Bind(&abort);
+ {
+ Node* abort_id = assembler->SmiConstant(
+ Smi::FromInt(BailoutReason::kExpectedFixedDoubleArrayMap));
+ assembler->TailCallRuntime(Runtime::kAbort, context, abort_id);
+ }
+ assembler->Bind(&correct_elements_map);
+ }
+
+ Node* array = NonEmptyShallowClone(assembler, boilerplate, boilerplate_map,
+ boilerplate_elements, allocation_site,
+ capacity, FAST_DOUBLE_ELEMENTS);
+ result.Bind(array);
+ assembler->Goto(&return_result);
+ }
+
+ assembler->Bind(&fast_elements);
+ {
+ assembler->Comment("fast elements path");
+ Node* array = NonEmptyShallowClone(assembler, boilerplate, boilerplate_map,
+ boilerplate_elements, allocation_site,
+ capacity, FAST_ELEMENTS);
+ result.Bind(array);
+ assembler->Goto(&return_result);
+ }
+
+ Variable length(assembler, MachineRepresentation::kTagged),
+ elements(assembler, MachineRepresentation::kTagged);
+ Label allocate_without_elements(assembler);
+
+ assembler->Bind(&cow_elements);
+ {
+ assembler->Comment("fixed cow path");
+ length.Bind(assembler->LoadJSArrayLength(boilerplate));
+ elements.Bind(boilerplate_elements);
+
+ assembler->Goto(&allocate_without_elements);
+ }
+
+ assembler->Bind(&zero_capacity);
+ {
+ assembler->Comment("zero capacity path");
+ length.Bind(zero);
+ elements.Bind(assembler->LoadRoot(Heap::kEmptyFixedArrayRootIndex));
+
+ assembler->Goto(&allocate_without_elements);
+ }
+
+ assembler->Bind(&allocate_without_elements);
+ {
+ Node* array = assembler->AllocateUninitializedJSArrayWithoutElements(
+ FAST_ELEMENTS, boilerplate_map, length.value(), allocation_site);
+ assembler->StoreObjectField(array, JSObject::kElementsOffset,
+ elements.value());
+ result.Bind(array);
+ assembler->Goto(&return_result);
+ }
+
+ assembler->Bind(&return_result);
+ return result.value();
+}
+
+void FastCloneShallowArrayStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ Node* closure = assembler->Parameter(Descriptor::kClosure);
+ Node* literal_index = assembler->Parameter(Descriptor::kLiteralIndex);
+ Node* constant_elements = assembler->Parameter(Descriptor::kConstantElements);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+ Label call_runtime(assembler, Label::kDeferred);
+ assembler->Return(Generate(assembler, closure, literal_index, context,
+ &call_runtime, allocation_site_mode()));
+
+ assembler->Bind(&call_runtime);
+ {
+ assembler->Comment("call runtime");
+ Node* flags = assembler->SmiConstant(
+ Smi::FromInt(ArrayLiteral::kShallowElements |
+ (allocation_site_mode() == TRACK_ALLOCATION_SITE
+ ? 0
+ : ArrayLiteral::kDisableMementos)));
+ assembler->Return(assembler->CallRuntime(Runtime::kCreateArrayLiteral,
+ context, closure, literal_index,
+ constant_elements, flags));
+ }
+}
+
void CreateAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
CreateAllocationSiteStub stub(isolate);
stub.GetCode();
@@ -5463,9 +5496,38 @@ void CreateWeakCellStub::GenerateAheadOfTime(Isolate* isolate) {
void StoreElementStub::Generate(MacroAssembler* masm) {
DCHECK_EQ(DICTIONARY_ELEMENTS, elements_kind());
- ElementHandlerCompiler::GenerateStoreSlow(masm);
+ KeyedStoreIC::GenerateSlow(masm);
}
+void StoreFastElementStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ assembler->Comment(
+ "StoreFastElementStub: js_array=%d, elements_kind=%s, store_mode=%d",
+ is_js_array(), ElementsKindToString(elements_kind()), store_mode());
+
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* key = assembler->Parameter(Descriptor::kName);
+ Node* value = assembler->Parameter(Descriptor::kValue);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* vector = assembler->Parameter(Descriptor::kVector);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ Label miss(assembler);
+
+ assembler->EmitElementStore(receiver, key, value, is_js_array(),
+ elements_kind(), store_mode(), &miss);
+ assembler->Return(value);
+
+ assembler->Bind(&miss);
+ {
+ assembler->Comment("Miss");
+ assembler->TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value,
+ slot, vector, receiver, key);
+ }
+}
// static
void StoreFastElementStub::GenerateAheadOfTime(Isolate* isolate) {
@@ -5597,58 +5659,9 @@ void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
void CreateAllocationSiteStub::GenerateAssembly(
CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
- Node* size = assembler->IntPtrConstant(AllocationSite::kSize);
- Node* site = assembler->Allocate(size, CodeStubAssembler::kPretenured);
-
- // Store the map
- assembler->StoreObjectFieldRoot(site, AllocationSite::kMapOffset,
- Heap::kAllocationSiteMapRootIndex);
-
- Node* kind =
- assembler->SmiConstant(Smi::FromInt(GetInitialFastElementsKind()));
- assembler->StoreObjectFieldNoWriteBarrier(
- site, AllocationSite::kTransitionInfoOffset, kind);
-
- // Unlike literals, constructed arrays don't have nested sites
- Node* zero = assembler->IntPtrConstant(0);
- assembler->StoreObjectFieldNoWriteBarrier(
- site, AllocationSite::kNestedSiteOffset, zero);
-
- // Pretenuring calculation field.
- assembler->StoreObjectFieldNoWriteBarrier(
- site, AllocationSite::kPretenureDataOffset, zero);
-
- // Pretenuring memento creation count field.
- assembler->StoreObjectFieldNoWriteBarrier(
- site, AllocationSite::kPretenureCreateCountOffset, zero);
-
- // Store an empty fixed array for the code dependency.
- assembler->StoreObjectFieldRoot(site, AllocationSite::kDependentCodeOffset,
- Heap::kEmptyFixedArrayRootIndex);
-
- // Link the object to the allocation site list
- Node* site_list = assembler->ExternalConstant(
- ExternalReference::allocation_sites_list_address(isolate()));
- Node* next_site = assembler->LoadBufferObject(site_list, 0);
-
- // TODO(mvstanton): This is a store to a weak pointer, which we may want to
- // mark as such in order to skip the write barrier, once we have a unified
- // system for weakness. For now we decided to keep it like this because having
- // an initial write barrier backed store makes this pointer strong until the
- // next GC, and allocation sites are designed to survive several GCs anyway.
- assembler->StoreObjectField(site, AllocationSite::kWeakNextOffset, next_site);
- assembler->StoreNoWriteBarrier(MachineRepresentation::kTagged, site_list,
- site);
-
- Node* feedback_vector = assembler->Parameter(Descriptor::kVector);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
-
- assembler->StoreFixedArrayElement(feedback_vector, slot, site,
- UPDATE_WRITE_BARRIER,
- CodeStubAssembler::SMI_PARAMETERS);
-
- assembler->Return(site);
+ assembler->Return(assembler->CreateAllocationSiteInFeedbackVector(
+ assembler->Parameter(Descriptor::kVector),
+ assembler->Parameter(Descriptor::kSlot)));
}
void CreateWeakCellStub::GenerateAssembly(CodeStubAssembler* assembler) const {
@@ -5674,7 +5687,7 @@ void ArrayNoArgumentConstructorStub::GenerateAssembly(
Node* array = assembler->AllocateJSArray(
elements_kind(), array_map,
assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
- assembler->IntPtrConstant(0), allocation_site);
+ assembler->SmiConstant(Smi::FromInt(0)), allocation_site);
assembler->Return(array);
}
@@ -5687,7 +5700,7 @@ void InternalArrayNoArgumentConstructorStub::GenerateAssembly(
Node* array = assembler->AllocateJSArray(
elements_kind(), array_map,
assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
- assembler->IntPtrConstant(0), nullptr);
+ assembler->SmiConstant(Smi::FromInt(0)), nullptr);
assembler->Return(array);
}
@@ -5727,8 +5740,8 @@ void SingleArgumentConstructorCommon(CodeStubAssembler* assembler,
int element_size =
IsFastDoubleElementsKind(elements_kind) ? kDoubleSize : kPointerSize;
int max_fast_elements =
- (Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize -
- JSArray::kSize - AllocationMemento::kSize) /
+ (kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - JSArray::kSize -
+ AllocationMemento::kSize) /
element_size;
assembler->Branch(
assembler->SmiAboveOrEqual(
@@ -5796,9 +5809,8 @@ void GrowArrayElementsStub::GenerateAssembly(
ElementsKind kind = elements_kind();
Node* elements = assembler->LoadElements(object);
- Node* new_elements = assembler->CheckAndGrowElementsCapacity(
- context, elements, kind, key, &runtime);
- assembler->StoreObjectField(object, JSObject::kElementsOffset, new_elements);
+ Node* new_elements =
+ assembler->TryGrowElementsCapacity(object, elements, kind, key, &runtime);
assembler->Return(new_elements);
assembler->Bind(&runtime);
@@ -5837,20 +5849,19 @@ ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate,
InternalArrayConstructorStub::InternalArrayConstructorStub(Isolate* isolate)
: PlatformCodeStub(isolate) {}
-Representation RepresentationFromType(Type* type) {
- if (type->Is(Type::UntaggedIntegral())) {
+Representation RepresentationFromMachineType(MachineType type) {
+ if (type == MachineType::Int32()) {
return Representation::Integer32();
}
- if (type->Is(Type::TaggedSigned())) {
+ if (type == MachineType::TaggedSigned()) {
return Representation::Smi();
}
- if (type->Is(Type::UntaggedPointer())) {
+ if (type == MachineType::Pointer()) {
return Representation::External();
}
- DCHECK(!type->Is(Type::Untagged()));
return Representation::Tagged();
}
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 4793d74f96..5c83fdebb0 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+class ObjectLiteral;
+
// List of code stubs used on all platforms.
#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
/* --- PlatformCodeStubs --- */ \
@@ -41,8 +43,6 @@ namespace internal {
V(StoreBufferOverflow) \
V(StoreElement) \
V(SubString) \
- V(ToString) \
- V(ToName) \
V(StoreIC) \
V(KeyedStoreIC) \
V(KeyedLoadIC) \
@@ -66,12 +66,8 @@ namespace internal {
V(KeyedStoreICTrampoline) \
V(StoreICTrampoline) \
/* --- HydrogenCodeStubs --- */ \
- V(ElementsTransitionAndStore) \
- V(FastCloneShallowArray) \
V(NumberToString) \
V(StringAdd) \
- V(ToObject) \
- V(Typeof) \
/* These builtins w/ JS linkage are */ \
/* just fast-cases of C++ builtins. They */ \
/* require varg support from TF */ \
@@ -81,18 +77,10 @@ namespace internal {
/* as part of the new IC system, ask */ \
/* ishell before doing anything */ \
V(KeyedLoadGeneric) \
- V(KeyedLoadSloppyArguments) \
- V(KeyedStoreSloppyArguments) \
V(LoadConstant) \
V(LoadDictionaryElement) \
V(LoadFastElement) \
V(LoadField) \
- V(LoadScriptContextField) \
- V(StoreFastElement) \
- V(StoreField) \
- V(StoreGlobal) \
- V(StoreScriptContextField) \
- V(StoreTransition) \
/* These should never be ported to TF */ \
/* because they are either used only by */ \
/* FCG/Crankshaft or are deprecated */ \
@@ -140,8 +128,10 @@ namespace internal {
V(InternalArrayNoArgumentConstructor) \
V(InternalArraySingleArgumentConstructor) \
V(Dec) \
- V(FastCloneShallowObject) \
+ V(ElementsTransitionAndStore) \
V(FastCloneRegExp) \
+ V(FastCloneShallowArray) \
+ V(FastCloneShallowObject) \
V(FastNewClosure) \
V(FastNewFunctionContext) \
V(InstanceOf) \
@@ -151,14 +141,12 @@ namespace internal {
V(GreaterThanOrEqual) \
V(Equal) \
V(NotEqual) \
+ V(KeyedLoadSloppyArguments) \
+ V(KeyedStoreSloppyArguments) \
+ V(LoadScriptContextField) \
+ V(StoreScriptContextField) \
V(StrictEqual) \
V(StrictNotEqual) \
- V(StringEqual) \
- V(StringNotEqual) \
- V(StringLessThan) \
- V(StringLessThanOrEqual) \
- V(StringGreaterThan) \
- V(StringGreaterThanOrEqual) \
V(ToInteger) \
V(ToLength) \
V(HasProperty) \
@@ -166,16 +154,25 @@ namespace internal {
V(GetProperty) \
V(LoadICTF) \
V(KeyedLoadICTF) \
+ V(StoreFastElement) \
+ V(StoreField) \
+ V(StoreGlobal) \
+ V(StoreICTF) \
V(StoreInterceptor) \
+ V(StoreMap) \
+ V(StoreTransition) \
V(LoadApiGetter) \
V(LoadIndexedInterceptor) \
V(GrowArrayElements) \
+ V(ToObject) \
+ V(Typeof) \
/* These are only called from FGC and */ \
/* can be removed when we use ignition */ \
/* only */ \
V(LoadICTrampolineTF) \
V(LoadGlobalICTrampoline) \
- V(KeyedLoadICTrampolineTF)
+ V(KeyedLoadICTrampolineTF) \
+ V(StoreICTrampolineTF)
// List of code stubs only used on ARM 32 bits platforms.
#if V8_TARGET_ARCH_ARM
@@ -487,12 +484,6 @@ class CodeStub BASE_EMBEDDED {
return Descriptor(isolate()); \
}
-#define DEFINE_ON_STACK_CALL_INTERFACE_DESCRIPTOR(PARAMETER_COUNT) \
- public: \
- CallInterfaceDescriptor GetCallInterfaceDescriptor() const override { \
- return OnStackArgsDescriptorBase::ForArgs(isolate(), PARAMETER_COUNT); \
- }
-
// There are some code stubs we just can't describe right now with a
// CallInterfaceDescriptor. Isolate behavior for those cases with this macro.
// An attempt to retrieve a descriptor will fail.
@@ -564,7 +555,7 @@ class CodeStubDescriptor {
return call_descriptor().GetRegisterParameter(index);
}
- Type* GetParameterType(int index) const {
+ MachineType GetParameterType(int index) const {
return call_descriptor().GetParameterType(index);
}
@@ -993,57 +984,6 @@ class StrictNotEqualStub final : public TurboFanCodeStub {
DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(StrictNotEqual, TurboFanCodeStub);
};
-class StringEqualStub final : public TurboFanCodeStub {
- public:
- explicit StringEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
- DEFINE_TURBOFAN_CODE_STUB(StringEqual, TurboFanCodeStub);
-};
-
-class StringNotEqualStub final : public TurboFanCodeStub {
- public:
- explicit StringNotEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
- DEFINE_TURBOFAN_CODE_STUB(StringNotEqual, TurboFanCodeStub);
-};
-
-class StringLessThanStub final : public TurboFanCodeStub {
- public:
- explicit StringLessThanStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
- DEFINE_TURBOFAN_CODE_STUB(StringLessThan, TurboFanCodeStub);
-};
-
-class StringLessThanOrEqualStub final : public TurboFanCodeStub {
- public:
- explicit StringLessThanOrEqualStub(Isolate* isolate)
- : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
- DEFINE_TURBOFAN_CODE_STUB(StringLessThanOrEqual, TurboFanCodeStub);
-};
-
-class StringGreaterThanStub final : public TurboFanCodeStub {
- public:
- explicit StringGreaterThanStub(Isolate* isolate)
- : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
- DEFINE_TURBOFAN_CODE_STUB(StringGreaterThan, TurboFanCodeStub);
-};
-
-class StringGreaterThanOrEqualStub final : public TurboFanCodeStub {
- public:
- explicit StringGreaterThanOrEqualStub(Isolate* isolate)
- : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
- DEFINE_TURBOFAN_CODE_STUB(StringGreaterThanOrEqual, TurboFanCodeStub);
-};
-
class ToIntegerStub final : public TurboFanCodeStub {
public:
explicit ToIntegerStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
@@ -1174,7 +1114,7 @@ class FastNewFunctionContextStub final : public TurboFanCodeStub {
// FastNewFunctionContextStub can only allocate closures which fit in the
// new space.
STATIC_ASSERT(((kMaximumSlots + Context::MIN_CONTEXT_SLOTS) * kPointerSize +
- FixedArray::kHeaderSize) < Page::kMaxRegularHeapObjectSize);
+ FixedArray::kHeaderSize) < kMaxRegularHeapObjectSize);
DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewFunctionContext);
DEFINE_TURBOFAN_CODE_STUB(FastNewFunctionContext, TurboFanCodeStub);
@@ -1269,24 +1209,30 @@ class FastCloneRegExpStub final : public TurboFanCodeStub {
DEFINE_TURBOFAN_CODE_STUB(FastCloneRegExp, TurboFanCodeStub);
};
-
-class FastCloneShallowArrayStub : public HydrogenCodeStub {
+class FastCloneShallowArrayStub : public TurboFanCodeStub {
public:
FastCloneShallowArrayStub(Isolate* isolate,
AllocationSiteMode allocation_site_mode)
- : HydrogenCodeStub(isolate) {
- set_sub_minor_key(AllocationSiteModeBits::encode(allocation_site_mode));
+ : TurboFanCodeStub(isolate) {
+ minor_key_ = AllocationSiteModeBits::encode(allocation_site_mode);
}
+ static compiler::Node* Generate(CodeStubAssembler* assembler,
+ compiler::Node* closure,
+ compiler::Node* literal_index,
+ compiler::Node* context,
+ CodeStubAssembler::Label* call_runtime,
+ AllocationSiteMode allocation_site_mode);
+
AllocationSiteMode allocation_site_mode() const {
- return AllocationSiteModeBits::decode(sub_minor_key());
+ return AllocationSiteModeBits::decode(minor_key_);
}
private:
class AllocationSiteModeBits: public BitField<AllocationSiteMode, 0, 1> {};
DEFINE_CALL_INTERFACE_DESCRIPTOR(FastCloneShallowArray);
- DEFINE_HYDROGEN_CODE_STUB(FastCloneShallowArray, HydrogenCodeStub);
+ DEFINE_TURBOFAN_CODE_STUB(FastCloneShallowArray, TurboFanCodeStub);
};
class FastCloneShallowObjectStub : public TurboFanCodeStub {
@@ -1556,35 +1502,36 @@ class LoadFieldStub: public HandlerStub {
DEFINE_HANDLER_CODE_STUB(LoadField, HandlerStub);
};
-
-class KeyedLoadSloppyArgumentsStub : public HandlerStub {
+class KeyedLoadSloppyArgumentsStub : public TurboFanCodeStub {
public:
explicit KeyedLoadSloppyArgumentsStub(Isolate* isolate)
- : HandlerStub(isolate) {}
+ : TurboFanCodeStub(isolate) {}
- protected:
- Code::Kind kind() const override { return Code::KEYED_LOAD_IC; }
+ Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+ ExtraICState GetExtraICState() const override { return Code::LOAD_IC; }
+ protected:
DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
- DEFINE_HANDLER_CODE_STUB(KeyedLoadSloppyArguments, HandlerStub);
+ DEFINE_TURBOFAN_CODE_STUB(KeyedLoadSloppyArguments, TurboFanCodeStub);
};
class CommonStoreModeBits : public BitField<KeyedAccessStoreMode, 0, 3> {};
-class KeyedStoreSloppyArgumentsStub : public HandlerStub {
+class KeyedStoreSloppyArgumentsStub : public TurboFanCodeStub {
public:
explicit KeyedStoreSloppyArgumentsStub(Isolate* isolate,
KeyedAccessStoreMode mode)
- : HandlerStub(isolate) {
- set_sub_minor_key(CommonStoreModeBits::encode(mode));
+ : TurboFanCodeStub(isolate) {
+ minor_key_ = CommonStoreModeBits::encode(mode);
}
- protected:
- Code::Kind kind() const override { return Code::KEYED_STORE_IC; }
+ Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+ ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
+ protected:
DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
- DEFINE_HANDLER_CODE_STUB(KeyedStoreSloppyArguments, HandlerStub);
+ DEFINE_TURBOFAN_CODE_STUB(KeyedStoreSloppyArguments, TurboFanCodeStub);
};
@@ -1637,161 +1584,107 @@ class LoadApiGetterStub : public TurboFanCodeStub {
DEFINE_TURBOFAN_CODE_STUB(LoadApiGetter, TurboFanCodeStub);
};
-class StoreFieldStub : public HandlerStub {
+class StoreFieldStub : public TurboFanCodeStub {
public:
StoreFieldStub(Isolate* isolate, FieldIndex index,
Representation representation)
- : HandlerStub(isolate) {
+ : TurboFanCodeStub(isolate) {
int property_index_key = index.GetFieldAccessStubKey();
- uint8_t repr = PropertyDetails::EncodeRepresentation(representation);
- set_sub_minor_key(StoreFieldByIndexBits::encode(property_index_key) |
- RepresentationBits::encode(repr));
+ minor_key_ = StoreFieldByIndexBits::encode(property_index_key) |
+ RepresentationBits::encode(representation.kind());
}
+ Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+ ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
+
FieldIndex index() const {
- int property_index_key = StoreFieldByIndexBits::decode(sub_minor_key());
+ int property_index_key = StoreFieldByIndexBits::decode(minor_key_);
return FieldIndex::FromFieldAccessStubKey(property_index_key);
}
- Representation representation() {
- uint8_t repr = RepresentationBits::decode(sub_minor_key());
- return PropertyDetails::DecodeRepresentation(repr);
+ Representation representation() const {
+ return Representation::FromKind(RepresentationBits::decode(minor_key_));
}
- protected:
- Code::Kind kind() const override { return Code::STORE_IC; }
-
private:
class StoreFieldByIndexBits : public BitField<int, 0, 13> {};
- class RepresentationBits : public BitField<uint8_t, 13, 4> {};
+ class RepresentationBits
+ : public BitField<Representation::Kind, StoreFieldByIndexBits::kNext, 4> {
+ };
+ STATIC_ASSERT(Representation::kNumRepresentations - 1 <
+ RepresentationBits::kMax);
- // TODO(ishell): The stub uses only kReceiver and kValue parameters.
DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
- DEFINE_HANDLER_CODE_STUB(StoreField, HandlerStub);
+ DEFINE_TURBOFAN_CODE_STUB(StoreField, TurboFanCodeStub);
};
-
-// Register and parameter access methods are specified here instead of in
-// the CallInterfaceDescriptor because the stub uses a different descriptor
-// if FLAG_vector_stores is on.
-class StoreTransitionHelper {
+class StoreMapStub : public TurboFanCodeStub {
public:
- static Register ReceiverRegister() {
- return StoreTransitionDescriptor::ReceiverRegister();
- }
-
- static Register NameRegister() {
- return StoreTransitionDescriptor::NameRegister();
- }
-
- static Register ValueRegister() {
- return StoreTransitionDescriptor::ValueRegister();
- }
-
- static Register SlotRegister() {
- return VectorStoreTransitionDescriptor::SlotRegister();
- }
-
- static Register VectorRegister() {
- return VectorStoreTransitionDescriptor::VectorRegister();
- }
-
- static Register MapRegister() {
- return VectorStoreTransitionDescriptor::MapRegister();
- }
-
- static int ReceiverIndex() { return StoreTransitionDescriptor::kReceiver; }
-
- static int NameIndex() { return StoreTransitionDescriptor::kReceiver; }
-
- static int ValueIndex() { return StoreTransitionDescriptor::kValue; }
-
- static int MapIndex() {
- DCHECK(static_cast<int>(VectorStoreTransitionDescriptor::kMap) ==
- static_cast<int>(StoreTransitionDescriptor::kMap));
- return StoreTransitionDescriptor::kMap;
- }
+ explicit StoreMapStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
- static int VectorIndex() {
- if (HasVirtualSlotArg()) {
- return VectorStoreTransitionDescriptor::kVirtualSlotVector;
- }
- return VectorStoreTransitionDescriptor::kVector;
- }
+ Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+ ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
- // Some platforms don't have a slot arg.
- static bool HasVirtualSlotArg() {
- return SlotRegister().is(no_reg);
- }
+ private:
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreTransition);
+ DEFINE_TURBOFAN_CODE_STUB(StoreMap, TurboFanCodeStub);
};
-
-class StoreTransitionStub : public HandlerStub {
+class StoreTransitionStub : public TurboFanCodeStub {
public:
enum StoreMode {
- StoreMapOnly,
StoreMapAndValue,
ExtendStorageAndStoreMapAndValue
};
- explicit StoreTransitionStub(Isolate* isolate) : HandlerStub(isolate) {
- set_sub_minor_key(StoreModeBits::encode(StoreMapOnly));
- }
-
- StoreTransitionStub(Isolate* isolate, FieldIndex index,
+ StoreTransitionStub(Isolate* isolate, bool is_inobject,
Representation representation, StoreMode store_mode)
- : HandlerStub(isolate) {
- DCHECK(store_mode != StoreMapOnly);
- int property_index_key = index.GetFieldAccessStubKey();
- uint8_t repr = PropertyDetails::EncodeRepresentation(representation);
- set_sub_minor_key(StoreFieldByIndexBits::encode(property_index_key) |
- RepresentationBits::encode(repr) |
- StoreModeBits::encode(store_mode));
+ : TurboFanCodeStub(isolate) {
+ minor_key_ = IsInobjectBits::encode(is_inobject) |
+ RepresentationBits::encode(representation.kind()) |
+ StoreModeBits::encode(store_mode);
}
- FieldIndex index() const {
- DCHECK(store_mode() != StoreMapOnly);
- int property_index_key = StoreFieldByIndexBits::decode(sub_minor_key());
- return FieldIndex::FromFieldAccessStubKey(property_index_key);
- }
+ Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+ ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
- Representation representation() {
- DCHECK(store_mode() != StoreMapOnly);
- uint8_t repr = RepresentationBits::decode(sub_minor_key());
- return PropertyDetails::DecodeRepresentation(repr);
- }
+ bool is_inobject() const { return IsInobjectBits::decode(minor_key_); }
- StoreMode store_mode() const {
- return StoreModeBits::decode(sub_minor_key());
+ Representation representation() const {
+ return Representation::FromKind(RepresentationBits::decode(minor_key_));
}
- protected:
- Code::Kind kind() const override { return Code::STORE_IC; }
- void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
+ StoreMode store_mode() const { return StoreModeBits::decode(minor_key_); }
private:
- class StoreFieldByIndexBits : public BitField<int, 0, 13> {};
- class RepresentationBits : public BitField<uint8_t, 13, 4> {};
- class StoreModeBits : public BitField<StoreMode, 17, 2> {};
+ class IsInobjectBits : public BitField<bool, 0, 1> {};
+ class RepresentationBits
+ : public BitField<Representation::Kind, IsInobjectBits::kNext, 4> {};
+ STATIC_ASSERT(Representation::kNumRepresentations - 1 <
+ RepresentationBits::kMax);
+ class StoreModeBits
+ : public BitField<StoreMode, RepresentationBits::kNext, 1> {};
- DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorStoreTransition);
- DEFINE_HANDLER_CODE_STUB(StoreTransition, HandlerStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreNamedTransition);
+ DEFINE_TURBOFAN_CODE_STUB(StoreTransition, TurboFanCodeStub);
};
-
-class StoreGlobalStub : public HandlerStub {
+class StoreGlobalStub : public TurboFanCodeStub {
public:
StoreGlobalStub(Isolate* isolate, PropertyCellType type,
Maybe<PropertyCellConstantType> constant_type,
bool check_global)
- : HandlerStub(isolate) {
+ : TurboFanCodeStub(isolate) {
PropertyCellConstantType encoded_constant_type =
constant_type.FromMaybe(PropertyCellConstantType::kSmi);
- set_sub_minor_key(CellTypeBits::encode(type) |
- ConstantTypeBits::encode(encoded_constant_type) |
- CheckGlobalBits::encode(check_global));
+ minor_key_ = CellTypeBits::encode(type) |
+ ConstantTypeBits::encode(encoded_constant_type) |
+ CheckGlobalBits::encode(check_global);
}
+ Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+ ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
+
static Handle<HeapObject> property_cell_placeholder(Isolate* isolate) {
return isolate->factory()->uninitialized_value();
}
@@ -1812,37 +1705,25 @@ class StoreGlobalStub : public HandlerStub {
return CodeStub::GetCodeCopy(pattern);
}
- Code::Kind kind() const override { return Code::STORE_IC; }
-
PropertyCellType cell_type() const {
- return CellTypeBits::decode(sub_minor_key());
+ return CellTypeBits::decode(minor_key_);
}
PropertyCellConstantType constant_type() const {
DCHECK(PropertyCellType::kConstantType == cell_type());
- return ConstantTypeBits::decode(sub_minor_key());
+ return ConstantTypeBits::decode(minor_key_);
}
- bool check_global() const { return CheckGlobalBits::decode(sub_minor_key()); }
-
- Representation representation() {
- return Representation::FromKind(
- RepresentationBits::decode(sub_minor_key()));
- }
-
- void set_representation(Representation r) {
- set_sub_minor_key(RepresentationBits::update(sub_minor_key(), r.kind()));
- }
+ bool check_global() const { return CheckGlobalBits::decode(minor_key_); }
private:
class CellTypeBits : public BitField<PropertyCellType, 0, 2> {};
- class ConstantTypeBits : public BitField<PropertyCellConstantType, 2, 2> {};
- class RepresentationBits : public BitField<Representation::Kind, 4, 8> {};
- class CheckGlobalBits : public BitField<bool, 12, 1> {};
+ class ConstantTypeBits
+ : public BitField<PropertyCellConstantType, CellTypeBits::kNext, 2> {};
+ class CheckGlobalBits : public BitField<bool, ConstantTypeBits::kNext, 1> {};
- // TODO(ishell): The stub uses only kValue parameter.
DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
- DEFINE_HANDLER_CODE_STUB(StoreGlobal, HandlerStub);
+ DEFINE_TURBOFAN_CODE_STUB(StoreGlobal, TurboFanCodeStub);
};
// TODO(ishell): remove, once StoreGlobalIC is implemented.
@@ -1889,10 +1770,6 @@ class CallApiCallbackStub : public PlatformCodeStub {
: CallApiCallbackStub(isolate, argc, false, call_data_undefined,
is_lazy) {}
- CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
- return ApiCallbackDescriptorBase::ForArgs(isolate(), argc());
- }
-
private:
CallApiCallbackStub(Isolate* isolate, int argc, bool is_store,
bool call_data_undefined, bool is_lazy)
@@ -1916,6 +1793,7 @@ class CallApiCallbackStub : public PlatformCodeStub {
class ArgumentBits : public BitField<int, 2, kArgBits> {};
class IsLazyAccessorBits : public BitField<bool, 3 + kArgBits, 1> {};
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ApiCallback);
DEFINE_PLATFORM_CODE_STUB(CallApiCallback, PlatformCodeStub);
};
@@ -2195,11 +2073,11 @@ class RegExpExecStub: public PlatformCodeStub {
public:
explicit RegExpExecStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
- DEFINE_ON_STACK_CALL_INTERFACE_DESCRIPTOR(4);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(RegExpExec);
DEFINE_PLATFORM_CODE_STUB(RegExpExec, PlatformCodeStub);
};
-
+// TODO(jgruber): Remove this once all uses in regexp.js have been removed.
class RegExpConstructResultStub final : public HydrogenCodeStub {
public:
explicit RegExpConstructResultStub(Isolate* isolate)
@@ -2490,15 +2368,34 @@ class StoreICTrampolineStub : public PlatformCodeStub {
}
protected:
- StoreICState state() const {
- return StoreICState(static_cast<ExtraICState>(minor_key_));
- }
+ StoreICState state() const { return StoreICState(GetExtraICState()); }
private:
DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
DEFINE_PLATFORM_CODE_STUB(StoreICTrampoline, PlatformCodeStub);
};
+class StoreICTrampolineTFStub : public TurboFanCodeStub {
+ public:
+ StoreICTrampolineTFStub(Isolate* isolate, const StoreICState& state)
+ : TurboFanCodeStub(isolate) {
+ minor_key_ = state.GetExtraICState();
+ }
+
+ void GenerateAssembly(CodeStubAssembler* assembler) const override;
+
+ Code::Kind GetCodeKind() const override { return Code::STORE_IC; }
+ ExtraICState GetExtraICState() const final {
+ return static_cast<ExtraICState>(minor_key_);
+ }
+
+ protected:
+ StoreICState state() const { return StoreICState(GetExtraICState()); }
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
+ DEFINE_CODE_STUB(StoreICTrampolineTF, TurboFanCodeStub);
+};
+
class KeyedStoreICTrampolineStub : public StoreICTrampolineStub {
public:
KeyedStoreICTrampolineStub(Isolate* isolate, const StoreICState& state)
@@ -2627,6 +2524,24 @@ class StoreICStub : public PlatformCodeStub {
void GenerateImpl(MacroAssembler* masm, bool in_frame);
};
+class StoreICTFStub : public TurboFanCodeStub {
+ public:
+ StoreICTFStub(Isolate* isolate, const StoreICState& state)
+ : TurboFanCodeStub(isolate) {
+ minor_key_ = state.GetExtraICState();
+ }
+
+ void GenerateAssembly(CodeStubAssembler* assembler) const override;
+
+ Code::Kind GetCodeKind() const override { return Code::STORE_IC; }
+ ExtraICState GetExtraICState() const final {
+ return static_cast<ExtraICState>(minor_key_);
+ }
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
+ DEFINE_CODE_STUB(StoreICTF, TurboFanCodeStub);
+};
+
class KeyedStoreICStub : public PlatformCodeStub {
public:
KeyedStoreICStub(Isolate* isolate, const StoreICState& state)
@@ -2696,23 +2611,21 @@ class DoubleToIStub : public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(DoubleToI, PlatformCodeStub);
};
-
-class ScriptContextFieldStub : public HandlerStub {
+class ScriptContextFieldStub : public TurboFanCodeStub {
public:
ScriptContextFieldStub(Isolate* isolate,
const ScriptContextTable::LookupResult* lookup_result)
- : HandlerStub(isolate) {
+ : TurboFanCodeStub(isolate) {
DCHECK(Accepted(lookup_result));
- STATIC_ASSERT(kContextIndexBits + kSlotIndexBits <= kSubMinorKeyBits);
- set_sub_minor_key(ContextIndexBits::encode(lookup_result->context_index) |
- SlotIndexBits::encode(lookup_result->slot_index));
+ minor_key_ = ContextIndexBits::encode(lookup_result->context_index) |
+ SlotIndexBits::encode(lookup_result->slot_index);
}
- int context_index() const {
- return ContextIndexBits::decode(sub_minor_key());
- }
+ Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+
+ int context_index() const { return ContextIndexBits::decode(minor_key_); }
- int slot_index() const { return SlotIndexBits::decode(sub_minor_key()); }
+ int slot_index() const { return SlotIndexBits::decode(minor_key_); }
static bool Accepted(const ScriptContextTable::LookupResult* lookup_result) {
return ContextIndexBits::is_valid(lookup_result->context_index) &&
@@ -2726,7 +2639,7 @@ class ScriptContextFieldStub : public HandlerStub {
class SlotIndexBits
: public BitField<int, kContextIndexBits, kSlotIndexBits> {};
- DEFINE_CODE_STUB_BASE(ScriptContextFieldStub, HandlerStub);
+ DEFINE_CODE_STUB_BASE(ScriptContextFieldStub, TurboFanCodeStub);
};
@@ -2736,11 +2649,11 @@ class LoadScriptContextFieldStub : public ScriptContextFieldStub {
Isolate* isolate, const ScriptContextTable::LookupResult* lookup_result)
: ScriptContextFieldStub(isolate, lookup_result) {}
- private:
- Code::Kind kind() const override { return Code::LOAD_IC; }
+ ExtraICState GetExtraICState() const override { return Code::LOAD_IC; }
+ private:
DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
- DEFINE_HANDLER_CODE_STUB(LoadScriptContextField, ScriptContextFieldStub);
+ DEFINE_TURBOFAN_CODE_STUB(LoadScriptContextField, ScriptContextFieldStub);
};
@@ -2750,11 +2663,11 @@ class StoreScriptContextFieldStub : public ScriptContextFieldStub {
Isolate* isolate, const ScriptContextTable::LookupResult* lookup_result)
: ScriptContextFieldStub(isolate, lookup_result) {}
- private:
- Code::Kind kind() const override { return Code::STORE_IC; }
+ ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
+ private:
DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
- DEFINE_HANDLER_CODE_STUB(StoreScriptContextField, ScriptContextFieldStub);
+ DEFINE_TURBOFAN_CODE_STUB(StoreScriptContextField, ScriptContextFieldStub);
};
@@ -2790,38 +2703,38 @@ class LoadFastElementStub : public HandlerStub {
DEFINE_HANDLER_CODE_STUB(LoadFastElement, HandlerStub);
};
-
-class StoreFastElementStub : public HydrogenCodeStub {
+class StoreFastElementStub : public TurboFanCodeStub {
public:
StoreFastElementStub(Isolate* isolate, bool is_js_array,
ElementsKind elements_kind, KeyedAccessStoreMode mode)
- : HydrogenCodeStub(isolate) {
- set_sub_minor_key(CommonStoreModeBits::encode(mode) |
- ElementsKindBits::encode(elements_kind) |
- IsJSArrayBits::encode(is_js_array));
+ : TurboFanCodeStub(isolate) {
+ minor_key_ = CommonStoreModeBits::encode(mode) |
+ ElementsKindBits::encode(elements_kind) |
+ IsJSArrayBits::encode(is_js_array);
}
static void GenerateAheadOfTime(Isolate* isolate);
- bool is_js_array() const { return IsJSArrayBits::decode(sub_minor_key()); }
+ bool is_js_array() const { return IsJSArrayBits::decode(minor_key_); }
ElementsKind elements_kind() const {
- return ElementsKindBits::decode(sub_minor_key());
+ return ElementsKindBits::decode(minor_key_);
}
KeyedAccessStoreMode store_mode() const {
- return CommonStoreModeBits::decode(sub_minor_key());
+ return CommonStoreModeBits::decode(minor_key_);
}
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
ExtraICState GetExtraICState() const override { return Code::KEYED_STORE_IC; }
private:
- class ElementsKindBits : public BitField<ElementsKind, 3, 8> {};
- class IsJSArrayBits : public BitField<bool, 11, 1> {};
+ class ElementsKindBits
+ : public BitField<ElementsKind, CommonStoreModeBits::kNext, 8> {};
+ class IsJSArrayBits : public BitField<bool, ElementsKindBits::kNext, 1> {};
DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
- DEFINE_HYDROGEN_CODE_STUB(StoreFastElement, HydrogenCodeStub);
+ DEFINE_TURBOFAN_CODE_STUB(StoreFastElement, TurboFanCodeStub);
};
@@ -3008,10 +2921,6 @@ class StoreElementStub : public PlatformCodeStub {
CommonStoreModeBits::encode(mode);
}
- CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
- return StoreWithVectorDescriptor(isolate());
- }
-
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
ExtraICState GetExtraICState() const override { return Code::KEYED_STORE_IC; }
@@ -3020,8 +2929,10 @@ class StoreElementStub : public PlatformCodeStub {
return ElementsKindBits::decode(minor_key_);
}
- class ElementsKindBits : public BitField<ElementsKind, 3, 8> {};
+ class ElementsKindBits
+ : public BitField<ElementsKind, CommonStoreModeBits::kNext, 8> {};
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
DEFINE_PLATFORM_CODE_STUB(StoreElement, PlatformCodeStub);
};
@@ -3098,34 +3009,35 @@ class ToBooleanICStub : public HydrogenCodeStub {
std::ostream& operator<<(std::ostream& os, const ToBooleanICStub::Types& t);
-class ElementsTransitionAndStoreStub : public HydrogenCodeStub {
+class ElementsTransitionAndStoreStub : public TurboFanCodeStub {
public:
ElementsTransitionAndStoreStub(Isolate* isolate, ElementsKind from_kind,
ElementsKind to_kind, bool is_jsarray,
KeyedAccessStoreMode store_mode)
- : HydrogenCodeStub(isolate) {
- set_sub_minor_key(CommonStoreModeBits::encode(store_mode) |
- FromBits::encode(from_kind) | ToBits::encode(to_kind) |
- IsJSArrayBits::encode(is_jsarray));
+ : TurboFanCodeStub(isolate) {
+ minor_key_ = CommonStoreModeBits::encode(store_mode) |
+ FromBits::encode(from_kind) | ToBits::encode(to_kind) |
+ IsJSArrayBits::encode(is_jsarray);
}
- ElementsKind from_kind() const { return FromBits::decode(sub_minor_key()); }
- ElementsKind to_kind() const { return ToBits::decode(sub_minor_key()); }
- bool is_jsarray() const { return IsJSArrayBits::decode(sub_minor_key()); }
+ ElementsKind from_kind() const { return FromBits::decode(minor_key_); }
+ ElementsKind to_kind() const { return ToBits::decode(minor_key_); }
+ bool is_jsarray() const { return IsJSArrayBits::decode(minor_key_); }
KeyedAccessStoreMode store_mode() const {
- return CommonStoreModeBits::decode(sub_minor_key());
+ return CommonStoreModeBits::decode(minor_key_);
}
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
ExtraICState GetExtraICState() const override { return Code::KEYED_STORE_IC; }
private:
- class FromBits : public BitField<ElementsKind, 3, 8> {};
+ class FromBits
+ : public BitField<ElementsKind, CommonStoreModeBits::kNext, 8> {};
class ToBits : public BitField<ElementsKind, 11, 8> {};
class IsJSArrayBits : public BitField<bool, 19, 1> {};
- DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorStoreTransition);
- DEFINE_HYDROGEN_CODE_STUB(ElementsTransitionAndStore, HydrogenCodeStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreTransition);
+ DEFINE_TURBOFAN_CODE_STUB(ElementsTransitionAndStore, TurboFanCodeStub);
};
@@ -3191,29 +3103,24 @@ class StoreBufferOverflowStub : public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(StoreBufferOverflow, PlatformCodeStub);
};
-
-class SubStringStub : public PlatformCodeStub {
- public:
- explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
- DEFINE_ON_STACK_CALL_INTERFACE_DESCRIPTOR(3);
- DEFINE_PLATFORM_CODE_STUB(SubString, PlatformCodeStub);
-};
-
-class ToStringStub final : public PlatformCodeStub {
+class SubStringStub : public TurboFanCodeStub {
public:
- explicit ToStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+ explicit SubStringStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
- DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
- DEFINE_PLATFORM_CODE_STUB(ToString, PlatformCodeStub);
-};
+ static compiler::Node* Generate(CodeStubAssembler* assembler,
+ compiler::Node* string, compiler::Node* from,
+ compiler::Node* to, compiler::Node* context);
-class ToNameStub final : public PlatformCodeStub {
- public:
- explicit ToNameStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+ void GenerateAssembly(CodeStubAssembler* assembler) const override {
+ assembler->Return(Generate(assembler,
+ assembler->Parameter(Descriptor::kString),
+ assembler->Parameter(Descriptor::kFrom),
+ assembler->Parameter(Descriptor::kTo),
+ assembler->Parameter(Descriptor::kContext)));
+ }
- DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
- DEFINE_PLATFORM_CODE_STUB(ToName, PlatformCodeStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(SubString);
+ DEFINE_CODE_STUB(SubString, TurboFanCodeStub);
};
class ToObjectStub final : public TurboFanCodeStub {
@@ -3231,7 +3138,7 @@ class ToObjectStub final : public TurboFanCodeStub {
#undef DEFINE_CODE_STUB
#undef DEFINE_CODE_STUB_BASE
-extern Representation RepresentationFromType(Type* type);
+extern Representation RepresentationFromMachineType(MachineType type);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index e47db10f70..afd8a6f592 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -12,10 +12,9 @@
#include "src/ast/prettyprinter.h"
#include "src/bootstrapper.h"
-#include "src/compiler.h"
+#include "src/compilation-info.h"
#include "src/debug/debug.h"
#include "src/eh-frame.h"
-#include "src/parsing/parser.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -147,7 +146,8 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
isolate->bootstrapper()->IsActive()
? FLAG_print_builtin_code
: (FLAG_print_code || (info->IsStub() && FLAG_print_code_stubs) ||
- (info->IsOptimizing() && FLAG_print_opt_code));
+ (info->IsOptimizing() && FLAG_print_opt_code &&
+ info->shared_info()->PassesFilter(FLAG_print_opt_code_filter)));
if (print_code) {
std::unique_ptr<char[]> debug_name = info->GetDebugName();
CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
diff --git a/deps/v8/src/collector.h b/deps/v8/src/collector.h
index 8454aae19d..abb2fbb25b 100644
--- a/deps/v8/src/collector.h
+++ b/deps/v8/src/collector.h
@@ -6,7 +6,7 @@
#define V8_COLLECTOR_H_
#include "src/checks.h"
-#include "src/list.h"
+#include "src/list-inl.h"
#include "src/vector.h"
namespace v8 {
diff --git a/deps/v8/src/compilation-dependencies.cc b/deps/v8/src/compilation-dependencies.cc
index 96b3859e9a..dfd7cfe57e 100644
--- a/deps/v8/src/compilation-dependencies.cc
+++ b/deps/v8/src/compilation-dependencies.cc
@@ -8,7 +8,7 @@
#include "src/handles-inl.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compilation-info.cc b/deps/v8/src/compilation-info.cc
new file mode 100644
index 0000000000..2e0934a2cd
--- /dev/null
+++ b/deps/v8/src/compilation-info.cc
@@ -0,0 +1,214 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compilation-info.h"
+
+#include "src/api.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
+#include "src/isolate.h"
+#include "src/parsing/parse-info.h"
+
+namespace v8 {
+namespace internal {
+
+#define PARSE_INFO_GETTER(type, name) \
+ type CompilationInfo::name() const { \
+ CHECK(parse_info()); \
+ return parse_info()->name(); \
+ }
+
+#define PARSE_INFO_GETTER_WITH_DEFAULT(type, name, def) \
+ type CompilationInfo::name() const { \
+ return parse_info() ? parse_info()->name() : def; \
+ }
+
+PARSE_INFO_GETTER(Handle<Script>, script)
+PARSE_INFO_GETTER(FunctionLiteral*, literal)
+PARSE_INFO_GETTER_WITH_DEFAULT(DeclarationScope*, scope, nullptr)
+PARSE_INFO_GETTER(Handle<SharedFunctionInfo>, shared_info)
+
+#undef PARSE_INFO_GETTER
+#undef PARSE_INFO_GETTER_WITH_DEFAULT
+
+bool CompilationInfo::has_shared_info() const {
+ return parse_info_ && !parse_info_->shared_info().is_null();
+}
+
+CompilationInfo::CompilationInfo(ParseInfo* parse_info,
+ Handle<JSFunction> closure)
+ : CompilationInfo(parse_info, {}, Code::ComputeFlags(Code::FUNCTION), BASE,
+ parse_info->isolate(), parse_info->zone()) {
+ closure_ = closure;
+
+ // Compiling for the snapshot typically results in different code than
+ // compiling later on. This means that code recompiled with deoptimization
+ // support won't be "equivalent" (as defined by SharedFunctionInfo::
+ // EnableDeoptimizationSupport), so it will replace the old code and all
+ // its type feedback. To avoid this, always compile functions in the snapshot
+ // with deoptimization support.
+ if (isolate_->serializer_enabled()) EnableDeoptimizationSupport();
+
+ if (FLAG_function_context_specialization) MarkAsFunctionContextSpecializing();
+ if (FLAG_turbo_source_positions) MarkAsSourcePositionsEnabled();
+ if (FLAG_turbo_splitting) MarkAsSplittingEnabled();
+}
+
+CompilationInfo::CompilationInfo(Vector<const char> debug_name,
+ Isolate* isolate, Zone* zone,
+ Code::Flags code_flags)
+ : CompilationInfo(nullptr, debug_name, code_flags, STUB, isolate, zone) {}
+
+CompilationInfo::CompilationInfo(ParseInfo* parse_info,
+ Vector<const char> debug_name,
+ Code::Flags code_flags, Mode mode,
+ Isolate* isolate, Zone* zone)
+ : parse_info_(parse_info),
+ isolate_(isolate),
+ flags_(0),
+ code_flags_(code_flags),
+ mode_(mode),
+ osr_ast_id_(BailoutId::None()),
+ zone_(zone),
+ deferred_handles_(nullptr),
+ dependencies_(isolate, zone),
+ bailout_reason_(kNoReason),
+ prologue_offset_(Code::kPrologueOffsetNotSet),
+ parameter_count_(0),
+ optimization_id_(-1),
+ osr_expr_stack_height_(-1),
+ debug_name_(debug_name) {}
+
+CompilationInfo::~CompilationInfo() {
+ if (GetFlag(kDisableFutureOptimization) && has_shared_info()) {
+ shared_info()->DisableOptimization(bailout_reason());
+ }
+ dependencies()->Rollback();
+ delete deferred_handles_;
+}
+
+int CompilationInfo::num_parameters() const {
+ return !IsStub() ? scope()->num_parameters() : parameter_count_;
+}
+
+int CompilationInfo::num_parameters_including_this() const {
+ return num_parameters() + (is_this_defined() ? 1 : 0);
+}
+
+bool CompilationInfo::is_this_defined() const { return !IsStub(); }
+
+// Primitive functions are unlikely to be picked up by the stack-walking
+// profiler, so they trigger their own optimization when they're called
+// for the SharedFunctionInfo::kCallsUntilPrimitiveOptimization-th time.
+bool CompilationInfo::ShouldSelfOptimize() {
+ return FLAG_crankshaft &&
+ !(literal()->flags() & AstProperties::kDontSelfOptimize) &&
+ !literal()->dont_optimize() &&
+ literal()->scope()->AllowsLazyCompilation() &&
+ !shared_info()->optimization_disabled();
+}
+
+void CompilationInfo::ReopenHandlesInNewHandleScope() {
+ closure_ = Handle<JSFunction>(*closure_);
+}
+
+bool CompilationInfo::has_simple_parameters() {
+ return scope()->has_simple_parameters();
+}
+
+std::unique_ptr<char[]> CompilationInfo::GetDebugName() const {
+ if (parse_info() && parse_info()->literal()) {
+ AllowHandleDereference allow_deref;
+ return parse_info()->literal()->debug_name()->ToCString();
+ }
+ if (parse_info() && !parse_info()->shared_info().is_null()) {
+ return parse_info()->shared_info()->DebugName()->ToCString();
+ }
+ Vector<const char> name_vec = debug_name_;
+ if (name_vec.is_empty()) name_vec = ArrayVector("unknown");
+ std::unique_ptr<char[]> name(new char[name_vec.length() + 1]);
+ memcpy(name.get(), name_vec.start(), name_vec.length());
+ name[name_vec.length()] = '\0';
+ return name;
+}
+
+StackFrame::Type CompilationInfo::GetOutputStackFrameType() const {
+ switch (output_code_kind()) {
+ case Code::STUB:
+ case Code::BYTECODE_HANDLER:
+ case Code::HANDLER:
+ case Code::BUILTIN:
+#define CASE_KIND(kind) case Code::kind:
+ IC_KIND_LIST(CASE_KIND)
+#undef CASE_KIND
+ return StackFrame::STUB;
+ case Code::WASM_FUNCTION:
+ return StackFrame::WASM;
+ case Code::JS_TO_WASM_FUNCTION:
+ return StackFrame::JS_TO_WASM;
+ case Code::WASM_TO_JS_FUNCTION:
+ return StackFrame::WASM_TO_JS;
+ default:
+ UNIMPLEMENTED();
+ return StackFrame::NONE;
+ }
+}
+
+int CompilationInfo::GetDeclareGlobalsFlags() const {
+ DCHECK(DeclareGlobalsLanguageMode::is_valid(parse_info()->language_mode()));
+ return DeclareGlobalsEvalFlag::encode(parse_info()->is_eval()) |
+ DeclareGlobalsNativeFlag::encode(parse_info()->is_native()) |
+ DeclareGlobalsLanguageMode::encode(parse_info()->language_mode());
+}
+
+SourcePositionTableBuilder::RecordingMode
+CompilationInfo::SourcePositionRecordingMode() const {
+ return parse_info() && parse_info()->is_native()
+ ? SourcePositionTableBuilder::OMIT_SOURCE_POSITIONS
+ : SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS;
+}
+
+bool CompilationInfo::ExpectsJSReceiverAsReceiver() {
+ return is_sloppy(parse_info()->language_mode()) && !parse_info()->is_native();
+}
+
+bool CompilationInfo::has_context() const { return !closure().is_null(); }
+
+Context* CompilationInfo::context() const {
+ return has_context() ? closure()->context() : nullptr;
+}
+
+bool CompilationInfo::has_native_context() const {
+ return !closure().is_null() && (closure()->native_context() != nullptr);
+}
+
+Context* CompilationInfo::native_context() const {
+ return has_native_context() ? closure()->native_context() : nullptr;
+}
+
+bool CompilationInfo::has_global_object() const { return has_native_context(); }
+
+JSGlobalObject* CompilationInfo::global_object() const {
+ return has_global_object() ? native_context()->global_object() : nullptr;
+}
+
+void CompilationInfo::SetOptimizing() {
+ DCHECK(has_shared_info());
+ SetMode(OPTIMIZE);
+ optimization_id_ = isolate()->NextOptimizationId();
+ code_flags_ = Code::KindField::update(code_flags_, Code::OPTIMIZED_FUNCTION);
+}
+
+void CompilationInfo::AddInlinedFunction(
+ Handle<SharedFunctionInfo> inlined_function) {
+ inlined_functions_.push_back(InlinedFunctionHolder(
+ inlined_function, handle(inlined_function->code())));
+}
+
+Code::Kind CompilationInfo::output_code_kind() const {
+ return Code::ExtractKindFromFlags(code_flags_);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compilation-info.h b/deps/v8/src/compilation-info.h
new file mode 100644
index 0000000000..88477ae75e
--- /dev/null
+++ b/deps/v8/src/compilation-info.h
@@ -0,0 +1,400 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILATION_INFO_H_
+#define V8_COMPILATION_INFO_H_
+
+#include <memory>
+
+#include "src/compilation-dependencies.h"
+#include "src/frames.h"
+#include "src/handles.h"
+#include "src/objects.h"
+#include "src/source-position-table.h"
+#include "src/utils.h"
+#include "src/vector.h"
+
+namespace v8 {
+namespace internal {
+
+class DeclarationScope;
+class DeferredHandles;
+class FunctionLiteral;
+class JavaScriptFrame;
+class ParseInfo;
+class Isolate;
+class Zone;
+
+// CompilationInfo encapsulates some information known at compile time. It
+// is constructed based on the resources available at compile-time.
+class CompilationInfo final {
+ public:
+ // Various configuration flags for a compilation, as well as some properties
+ // of the compiled code produced by a compilation.
+ enum Flag {
+ kDeferredCalling = 1 << 0,
+ kNonDeferredCalling = 1 << 1,
+ kSavesCallerDoubles = 1 << 2,
+ kRequiresFrame = 1 << 3,
+ kMustNotHaveEagerFrame = 1 << 4,
+ kDeoptimizationSupport = 1 << 5,
+ kDebug = 1 << 6,
+ kSerializing = 1 << 7,
+ kFunctionContextSpecializing = 1 << 8,
+ kFrameSpecializing = 1 << 9,
+ kNativeContextSpecializing = 1 << 10,
+ kInliningEnabled = 1 << 11,
+ kDisableFutureOptimization = 1 << 12,
+ kSplittingEnabled = 1 << 13,
+ kDeoptimizationEnabled = 1 << 14,
+ kSourcePositionsEnabled = 1 << 15,
+ kBailoutOnUninitialized = 1 << 16,
+ kOptimizeFromBytecode = 1 << 17,
+ kTypeFeedbackEnabled = 1 << 18,
+ kAccessorInliningEnabled = 1 << 19,
+ };
+
+ CompilationInfo(ParseInfo* parse_info, Handle<JSFunction> closure);
+ CompilationInfo(Vector<const char> debug_name, Isolate* isolate, Zone* zone,
+ Code::Flags code_flags);
+ ~CompilationInfo();
+
+ ParseInfo* parse_info() const { return parse_info_; }
+
+ // -----------------------------------------------------------
+ // TODO(titzer): inline and delete accessors of ParseInfo
+ // -----------------------------------------------------------
+ Handle<Script> script() const;
+ FunctionLiteral* literal() const;
+ DeclarationScope* scope() const;
+ Handle<SharedFunctionInfo> shared_info() const;
+ bool has_shared_info() const;
+ // -----------------------------------------------------------
+
+ Isolate* isolate() const { return isolate_; }
+ Zone* zone() { return zone_; }
+ bool is_osr() const { return !osr_ast_id_.IsNone(); }
+ Handle<JSFunction> closure() const { return closure_; }
+ Handle<Code> code() const { return code_; }
+ Code::Flags code_flags() const { return code_flags_; }
+ BailoutId osr_ast_id() const { return osr_ast_id_; }
+ JavaScriptFrame* osr_frame() const { return osr_frame_; }
+ int num_parameters() const;
+ int num_parameters_including_this() const;
+ bool is_this_defined() const;
+
+ void set_parameter_count(int parameter_count) {
+ DCHECK(IsStub());
+ parameter_count_ = parameter_count;
+ }
+
+ bool has_bytecode_array() const { return !bytecode_array_.is_null(); }
+ Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
+
+ bool is_calling() const {
+ return GetFlag(kDeferredCalling) || GetFlag(kNonDeferredCalling);
+ }
+
+ void MarkAsDeferredCalling() { SetFlag(kDeferredCalling); }
+
+ bool is_deferred_calling() const { return GetFlag(kDeferredCalling); }
+
+ void MarkAsNonDeferredCalling() { SetFlag(kNonDeferredCalling); }
+
+ bool is_non_deferred_calling() const { return GetFlag(kNonDeferredCalling); }
+
+ void MarkAsSavesCallerDoubles() { SetFlag(kSavesCallerDoubles); }
+
+ bool saves_caller_doubles() const { return GetFlag(kSavesCallerDoubles); }
+
+ void MarkAsRequiresFrame() { SetFlag(kRequiresFrame); }
+
+ bool requires_frame() const { return GetFlag(kRequiresFrame); }
+
+ void MarkMustNotHaveEagerFrame() { SetFlag(kMustNotHaveEagerFrame); }
+
+ bool GetMustNotHaveEagerFrame() const {
+ return GetFlag(kMustNotHaveEagerFrame);
+ }
+
+ // Compiles marked as debug produce unoptimized code with debug break slots.
+ // Inner functions that cannot be compiled w/o context are compiled eagerly.
+ // Always include deoptimization support to avoid having to recompile again.
+ void MarkAsDebug() {
+ SetFlag(kDebug);
+ SetFlag(kDeoptimizationSupport);
+ }
+
+ bool is_debug() const { return GetFlag(kDebug); }
+
+ void PrepareForSerializing() { SetFlag(kSerializing); }
+
+ bool will_serialize() const { return GetFlag(kSerializing); }
+
+ void MarkAsFunctionContextSpecializing() {
+ SetFlag(kFunctionContextSpecializing);
+ }
+
+ bool is_function_context_specializing() const {
+ return GetFlag(kFunctionContextSpecializing);
+ }
+
+ void MarkAsFrameSpecializing() { SetFlag(kFrameSpecializing); }
+
+ bool is_frame_specializing() const { return GetFlag(kFrameSpecializing); }
+
+ void MarkAsNativeContextSpecializing() {
+ SetFlag(kNativeContextSpecializing);
+ }
+
+ bool is_native_context_specializing() const {
+ return GetFlag(kNativeContextSpecializing);
+ }
+
+ void MarkAsDeoptimizationEnabled() { SetFlag(kDeoptimizationEnabled); }
+
+ bool is_deoptimization_enabled() const {
+ return GetFlag(kDeoptimizationEnabled);
+ }
+
+ void MarkAsTypeFeedbackEnabled() { SetFlag(kTypeFeedbackEnabled); }
+
+ bool is_type_feedback_enabled() const {
+ return GetFlag(kTypeFeedbackEnabled);
+ }
+
+ void MarkAsAccessorInliningEnabled() { SetFlag(kAccessorInliningEnabled); }
+
+ bool is_accessor_inlining_enabled() const {
+ return GetFlag(kAccessorInliningEnabled);
+ }
+
+ void MarkAsSourcePositionsEnabled() { SetFlag(kSourcePositionsEnabled); }
+
+ bool is_source_positions_enabled() const {
+ return GetFlag(kSourcePositionsEnabled);
+ }
+
+ void MarkAsInliningEnabled() { SetFlag(kInliningEnabled); }
+
+ bool is_inlining_enabled() const { return GetFlag(kInliningEnabled); }
+
+ void MarkAsSplittingEnabled() { SetFlag(kSplittingEnabled); }
+
+ bool is_splitting_enabled() const { return GetFlag(kSplittingEnabled); }
+
+ void MarkAsBailoutOnUninitialized() { SetFlag(kBailoutOnUninitialized); }
+
+ bool is_bailout_on_uninitialized() const {
+ return GetFlag(kBailoutOnUninitialized);
+ }
+
+ void MarkAsOptimizeFromBytecode() { SetFlag(kOptimizeFromBytecode); }
+
+ bool is_optimizing_from_bytecode() const {
+ return GetFlag(kOptimizeFromBytecode);
+ }
+
+ bool GeneratePreagedPrologue() const {
+ // Generate a pre-aged prologue if we are optimizing for size, which
+ // will make code flushing more aggressive. Only apply to Code::FUNCTION,
+ // since StaticMarkingVisitor::IsFlushable only flushes proper functions.
+ return FLAG_optimize_for_size && FLAG_age_code && !is_debug() &&
+ output_code_kind() == Code::FUNCTION;
+ }
+
+ void SetCode(Handle<Code> code) { code_ = code; }
+
+ void SetBytecodeArray(Handle<BytecodeArray> bytecode_array) {
+ bytecode_array_ = bytecode_array;
+ }
+
+ bool ShouldTrapOnDeopt() const {
+ return (FLAG_trap_on_deopt && IsOptimizing()) ||
+ (FLAG_trap_on_stub_deopt && IsStub());
+ }
+
+ bool has_context() const;
+ Context* context() const;
+
+ bool has_native_context() const;
+ Context* native_context() const;
+
+ bool has_global_object() const;
+ JSGlobalObject* global_object() const;
+
+ // Accessors for the different compilation modes.
+ bool IsOptimizing() const { return mode_ == OPTIMIZE; }
+ bool IsStub() const { return mode_ == STUB; }
+ void SetOptimizing();
+ void SetOptimizingForOsr(BailoutId osr_ast_id, JavaScriptFrame* osr_frame) {
+ SetOptimizing();
+ osr_ast_id_ = osr_ast_id;
+ osr_frame_ = osr_frame;
+ }
+
+ // Deoptimization support.
+ bool HasDeoptimizationSupport() const {
+ return GetFlag(kDeoptimizationSupport);
+ }
+ void EnableDeoptimizationSupport() {
+ DCHECK_EQ(BASE, mode_);
+ SetFlag(kDeoptimizationSupport);
+ }
+ bool ShouldEnsureSpaceForLazyDeopt() { return !IsStub(); }
+
+ bool ExpectsJSReceiverAsReceiver();
+
+ // Determines whether or not to insert a self-optimization header.
+ bool ShouldSelfOptimize();
+
+ void set_deferred_handles(DeferredHandles* deferred_handles) {
+ DCHECK(deferred_handles_ == NULL);
+ deferred_handles_ = deferred_handles;
+ }
+
+ void ReopenHandlesInNewHandleScope();
+
+ void AbortOptimization(BailoutReason reason) {
+ DCHECK(reason != kNoReason);
+ if (bailout_reason_ == kNoReason) bailout_reason_ = reason;
+ SetFlag(kDisableFutureOptimization);
+ }
+
+ void RetryOptimization(BailoutReason reason) {
+ DCHECK(reason != kNoReason);
+ if (GetFlag(kDisableFutureOptimization)) return;
+ bailout_reason_ = reason;
+ }
+
+ BailoutReason bailout_reason() const { return bailout_reason_; }
+
+ int prologue_offset() const {
+ DCHECK_NE(Code::kPrologueOffsetNotSet, prologue_offset_);
+ return prologue_offset_;
+ }
+
+ void set_prologue_offset(int prologue_offset) {
+ DCHECK_EQ(Code::kPrologueOffsetNotSet, prologue_offset_);
+ prologue_offset_ = prologue_offset;
+ }
+
+ CompilationDependencies* dependencies() { return &dependencies_; }
+
+ int optimization_id() const { return optimization_id_; }
+
+ int osr_expr_stack_height() { return osr_expr_stack_height_; }
+ void set_osr_expr_stack_height(int height) {
+ DCHECK(height >= 0);
+ osr_expr_stack_height_ = height;
+ }
+
+ bool has_simple_parameters();
+
+ struct InlinedFunctionHolder {
+ Handle<SharedFunctionInfo> shared_info;
+
+ // Root that holds the unoptimized code of the inlined function alive
+ // (and out of reach of code flushing) until we finish compilation.
+ // Do not remove.
+ Handle<Code> inlined_code_object_root;
+
+ InlinedFunctionHolder(Handle<SharedFunctionInfo> inlined_shared_info,
+ Handle<Code> inlined_code_object_root)
+ : shared_info(inlined_shared_info),
+ inlined_code_object_root(inlined_code_object_root) {}
+ };
+
+ typedef std::vector<InlinedFunctionHolder> InlinedFunctionList;
+ InlinedFunctionList const& inlined_functions() const {
+ return inlined_functions_;
+ }
+
+ void AddInlinedFunction(Handle<SharedFunctionInfo> inlined_function);
+
+ std::unique_ptr<char[]> GetDebugName() const;
+
+ Code::Kind output_code_kind() const;
+
+ StackFrame::Type GetOutputStackFrameType() const;
+
+ int GetDeclareGlobalsFlags() const;
+
+ SourcePositionTableBuilder::RecordingMode SourcePositionRecordingMode() const;
+
+ private:
+ // Compilation mode.
+ // BASE is generated by the full codegen, optionally prepared for bailouts.
+ // OPTIMIZE is optimized code generated by the Hydrogen-based backend.
+ enum Mode { BASE, OPTIMIZE, STUB };
+
+ CompilationInfo(ParseInfo* parse_info, Vector<const char> debug_name,
+ Code::Flags code_flags, Mode mode, Isolate* isolate,
+ Zone* zone);
+
+ ParseInfo* parse_info_;
+ Isolate* isolate_;
+
+ void SetMode(Mode mode) { mode_ = mode; }
+
+ void SetFlag(Flag flag) { flags_ |= flag; }
+
+ void SetFlag(Flag flag, bool value) {
+ flags_ = value ? flags_ | flag : flags_ & ~flag;
+ }
+
+ bool GetFlag(Flag flag) const { return (flags_ & flag) != 0; }
+
+ unsigned flags_;
+
+ Code::Flags code_flags_;
+
+ Handle<JSFunction> closure_;
+
+ // The compiled code.
+ Handle<Code> code_;
+
+ // Compilation mode flag and whether deoptimization is allowed.
+ Mode mode_;
+ BailoutId osr_ast_id_;
+
+ // Holds the bytecode array generated by the interpreter.
+ // TODO(rmcilroy/mstarzinger): Temporary work-around until compiler.cc is
+ // refactored to avoid us needing to carry the BytcodeArray around.
+ Handle<BytecodeArray> bytecode_array_;
+
+ // The zone from which the compilation pipeline working on this
+ // CompilationInfo allocates.
+ Zone* zone_;
+
+ DeferredHandles* deferred_handles_;
+
+ // Dependencies for this compilation, e.g. stable maps.
+ CompilationDependencies dependencies_;
+
+ BailoutReason bailout_reason_;
+
+ int prologue_offset_;
+
+ InlinedFunctionList inlined_functions_;
+
+ // Number of parameters used for compilation of stubs that require arguments.
+ int parameter_count_;
+
+ int optimization_id_;
+
+ int osr_expr_stack_height_;
+
+ // The current OSR frame for specialization or {nullptr}.
+ JavaScriptFrame* osr_frame_ = nullptr;
+
+ Vector<const char> debug_name_;
+
+ DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILATION_INFO_H_
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc
index 923793665a..96956aec97 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc
@@ -5,6 +5,8 @@
#include "src/compiler-dispatcher/compiler-dispatcher-job.h"
#include "src/assert-scope.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
#include "src/global-handles.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
@@ -12,21 +14,22 @@
#include "src/parsing/parser.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/unicode-cache.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
CompilerDispatcherJob::CompilerDispatcherJob(Isolate* isolate,
- Handle<JSFunction> function,
+ Handle<SharedFunctionInfo> shared,
size_t max_stack_size)
: isolate_(isolate),
- function_(Handle<JSFunction>::cast(
- isolate_->global_handles()->Create(*function))),
- max_stack_size_(max_stack_size) {
+ shared_(Handle<SharedFunctionInfo>::cast(
+ isolate_->global_handles()->Create(*shared))),
+ max_stack_size_(max_stack_size),
+ can_compile_on_background_thread_(false) {
HandleScope scope(isolate_);
- Handle<SharedFunctionInfo> shared(function_->shared(), isolate_);
- Handle<Script> script(Script::cast(shared->script()), isolate_);
+ DCHECK(!shared_->outer_scope_info()->IsTheHole(isolate_));
+ Handle<Script> script(Script::cast(shared_->script()), isolate_);
Handle<String> source(String::cast(script->source()), isolate_);
can_parse_on_background_thread_ =
source->IsExternalTwoByteString() || source->IsExternalOneByteString();
@@ -36,7 +39,7 @@ CompilerDispatcherJob::~CompilerDispatcherJob() {
DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
DCHECK(status_ == CompileJobStatus::kInitial ||
status_ == CompileJobStatus::kDone);
- i::GlobalHandles::Destroy(Handle<Object>::cast(function_).location());
+ i::GlobalHandles::Destroy(Handle<Object>::cast(shared_).location());
}
void CompilerDispatcherJob::PrepareToParseOnMainThread() {
@@ -45,46 +48,42 @@ void CompilerDispatcherJob::PrepareToParseOnMainThread() {
HandleScope scope(isolate_);
unicode_cache_.reset(new UnicodeCache());
zone_.reset(new Zone(isolate_->allocator()));
- Handle<SharedFunctionInfo> shared(function_->shared(), isolate_);
- Handle<Script> script(Script::cast(shared->script()), isolate_);
+ Handle<Script> script(Script::cast(shared_->script()), isolate_);
DCHECK(script->type() != Script::TYPE_NATIVE);
Handle<String> source(String::cast(script->source()), isolate_);
- if (source->IsExternalTwoByteString()) {
- character_stream_.reset(new ExternalTwoByteStringUtf16CharacterStream(
- Handle<ExternalTwoByteString>::cast(source), shared->start_position(),
- shared->end_position()));
- } else if (source->IsExternalOneByteString()) {
- character_stream_.reset(new ExternalOneByteStringUtf16CharacterStream(
- Handle<ExternalOneByteString>::cast(source), shared->start_position(),
- shared->end_position()));
+ if (source->IsExternalTwoByteString() || source->IsExternalOneByteString()) {
+ character_stream_.reset(ScannerStream::For(
+ source, shared_->start_position(), shared_->end_position()));
} else {
source = String::Flatten(source);
// Have to globalize the reference here, so it survives between function
// calls.
source_ = Handle<String>::cast(isolate_->global_handles()->Create(*source));
- character_stream_.reset(new GenericStringUtf16CharacterStream(
- source_, shared->start_position(), shared->end_position()));
+ character_stream_.reset(ScannerStream::For(
+ source_, shared_->start_position(), shared_->end_position()));
}
parse_info_.reset(new ParseInfo(zone_.get()));
parse_info_->set_isolate(isolate_);
parse_info_->set_character_stream(character_stream_.get());
parse_info_->set_lazy();
parse_info_->set_hash_seed(isolate_->heap()->HashSeed());
- parse_info_->set_is_named_expression(shared->is_named_expression());
- parse_info_->set_calls_eval(shared->scope_info()->CallsEval());
- parse_info_->set_compiler_hints(shared->compiler_hints());
- parse_info_->set_start_position(shared->start_position());
- parse_info_->set_end_position(shared->end_position());
+ parse_info_->set_is_named_expression(shared_->is_named_expression());
+ parse_info_->set_compiler_hints(shared_->compiler_hints());
+ parse_info_->set_start_position(shared_->start_position());
+ parse_info_->set_end_position(shared_->end_position());
parse_info_->set_unicode_cache(unicode_cache_.get());
- parse_info_->set_language_mode(shared->language_mode());
+ parse_info_->set_language_mode(shared_->language_mode());
parser_.reset(new Parser(parse_info_.get()));
- parser_->DeserializeScopeChain(
- parse_info_.get(), handle(function_->context(), isolate_),
- Scope::DeserializationMode::kDeserializeOffHeap);
+ Handle<ScopeInfo> outer_scope_info(
+ handle(ScopeInfo::cast(shared_->outer_scope_info())));
+ parser_->DeserializeScopeChain(parse_info_.get(),
+ outer_scope_info->length() > 0
+ ? MaybeHandle<ScopeInfo>(outer_scope_info)
+ : MaybeHandle<ScopeInfo>());
- Handle<String> name(String::cast(shared->name()));
+ Handle<String> name(String::cast(shared_->name()));
parse_info_->set_function_name(
parse_info_->ast_value_factory()->GetString(name));
status_ = CompileJobStatus::kReadyToParse;
@@ -108,8 +107,7 @@ void CompilerDispatcherJob::Parse() {
// use it.
parse_info_->set_isolate(nullptr);
- uintptr_t stack_limit =
- reinterpret_cast<uintptr_t>(&stack_limit) - max_stack_size_ * KB;
+ uintptr_t stack_limit = GetCurrentStackPosition() - max_stack_size_ * KB;
parser_->set_stack_limit(stack_limit);
parser_->ParseOnBackground(parse_info_.get());
@@ -131,25 +129,32 @@ bool CompilerDispatcherJob::FinalizeParsingOnMainThread() {
if (parse_info_->literal() == nullptr) {
status_ = CompileJobStatus::kFailed;
} else {
- status_ = CompileJobStatus::kReadyToCompile;
+ status_ = CompileJobStatus::kReadyToAnalyse;
}
DeferredHandleScope scope(isolate_);
{
- // Create a canonical handle scope before internalizing parsed values if
- // compiling bytecode. This is required for off-thread bytecode generation.
- std::unique_ptr<CanonicalHandleScope> canonical;
- if (FLAG_ignition) canonical.reset(new CanonicalHandleScope(isolate_));
-
- Handle<SharedFunctionInfo> shared(function_->shared(), isolate_);
- Handle<Script> script(Script::cast(shared->script()), isolate_);
+ Handle<Script> script(Script::cast(shared_->script()), isolate_);
parse_info_->set_script(script);
- parse_info_->set_context(handle(function_->context(), isolate_));
+ Handle<ScopeInfo> outer_scope_info(
+ handle(ScopeInfo::cast(shared_->outer_scope_info())));
+ if (outer_scope_info->length() > 0) {
+ parse_info_->set_outer_scope_info(outer_scope_info);
+ }
+ parse_info_->set_shared_info(shared_);
+
+ {
+ // Create a canonical handle scope if compiling ignition bytecode. This is
+ // required by the constant array builder to de-duplicate objects without
+ // dereferencing handles.
+ std::unique_ptr<CanonicalHandleScope> canonical;
+ if (FLAG_ignition) canonical.reset(new CanonicalHandleScope(isolate_));
- // Do the parsing tasks which need to be done on the main thread. This will
- // also handle parse errors.
- parser_->Internalize(isolate_, script, parse_info_->literal() == nullptr);
+ // Do the parsing tasks which need to be done on the main thread. This
+ // will also handle parse errors.
+ parser_->Internalize(isolate_, script, parse_info_->literal() == nullptr);
+ }
parser_->HandleSourceURLComments(isolate_, script);
parse_info_->set_character_stream(nullptr);
@@ -163,6 +168,72 @@ bool CompilerDispatcherJob::FinalizeParsingOnMainThread() {
return status_ != CompileJobStatus::kFailed;
}
+bool CompilerDispatcherJob::PrepareToCompileOnMainThread() {
+ DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
+ DCHECK(status() == CompileJobStatus::kReadyToAnalyse);
+
+ compile_info_.reset(
+ new CompilationInfo(parse_info_.get(), Handle<JSFunction>::null()));
+
+ DeferredHandleScope scope(isolate_);
+ if (Compiler::Analyze(parse_info_.get())) {
+ compile_job_.reset(
+ Compiler::PrepareUnoptimizedCompilationJob(compile_info_.get()));
+ }
+ compile_info_->set_deferred_handles(scope.Detach());
+
+ if (!compile_job_.get()) {
+ if (!isolate_->has_pending_exception()) isolate_->StackOverflow();
+ status_ = CompileJobStatus::kFailed;
+ return false;
+ }
+
+ can_compile_on_background_thread_ =
+ compile_job_->can_execute_on_background_thread();
+ status_ = CompileJobStatus::kReadyToCompile;
+ return true;
+}
+
+void CompilerDispatcherJob::Compile() {
+ DCHECK(status() == CompileJobStatus::kReadyToCompile);
+ DCHECK(can_compile_on_background_thread_ ||
+ ThreadId::Current().Equals(isolate_->thread_id()));
+
+ // Disallowing of handle dereference and heap access dealt with in
+ // CompilationJob::ExecuteJob.
+
+ uintptr_t stack_limit = GetCurrentStackPosition() - max_stack_size_ * KB;
+ compile_job_->set_stack_limit(stack_limit);
+
+ CompilationJob::Status status = compile_job_->ExecuteJob();
+ USE(status);
+
+ // Always transition to kCompiled - errors will be reported by
+ // FinalizeCompilingOnMainThread.
+ status_ = CompileJobStatus::kCompiled;
+}
+
+bool CompilerDispatcherJob::FinalizeCompilingOnMainThread() {
+ DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
+ DCHECK(status() == CompileJobStatus::kCompiled);
+
+ if (compile_job_->state() == CompilationJob::State::kFailed ||
+ !Compiler::FinalizeCompilationJob(compile_job_.release())) {
+ if (!isolate_->has_pending_exception()) isolate_->StackOverflow();
+ status_ = CompileJobStatus::kFailed;
+ return false;
+ }
+
+ zone_.reset();
+ parse_info_.reset();
+ compile_info_.reset();
+ compile_job_.reset();
+ handles_from_parsing_.reset();
+
+ status_ = CompileJobStatus::kDone;
+ return true;
+}
+
void CompilerDispatcherJob::ResetOnMainThread() {
DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
@@ -172,6 +243,8 @@ void CompilerDispatcherJob::ResetOnMainThread() {
parse_info_.reset();
zone_.reset();
handles_from_parsing_.reset();
+ compile_info_.reset();
+ compile_job_.reset();
if (!source_.is_null()) {
i::GlobalHandles::Destroy(Handle<Object>::cast(source_).location());
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
index 50414af639..f3aaf939e0 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
@@ -15,10 +15,11 @@ namespace v8 {
namespace internal {
class CompilationInfo;
+class CompilationJob;
class Isolate;
-class JSFunction;
class ParseInfo;
class Parser;
+class SharedFunctionInfo;
class String;
class UnicodeCache;
class Utf16CharacterStream;
@@ -28,14 +29,16 @@ enum class CompileJobStatus {
kInitial,
kReadyToParse,
kParsed,
+ kReadyToAnalyse,
kReadyToCompile,
+ kCompiled,
kFailed,
kDone,
};
class CompilerDispatcherJob {
public:
- CompilerDispatcherJob(Isolate* isolate, Handle<JSFunction> function,
+ CompilerDispatcherJob(Isolate* isolate, Handle<SharedFunctionInfo> shared,
size_t max_stack_size);
~CompilerDispatcherJob();
@@ -43,6 +46,11 @@ class CompilerDispatcherJob {
bool can_parse_on_background_thread() const {
return can_parse_on_background_thread_;
}
+ // Should only be called after kReadyToCompile.
+ bool can_compile_on_background_thread() const {
+ DCHECK(compile_job_.get());
+ return can_compile_on_background_thread_;
+ }
// Transition from kInitial to kReadyToParse.
void PrepareToParseOnMainThread();
@@ -50,10 +58,21 @@ class CompilerDispatcherJob {
// Transition from kReadyToParse to kParsed.
void Parse();
- // Transition from kParsed to kReadyToCompile (or kFailed). Returns false
+ // Transition from kParsed to kReadyToAnalyse (or kFailed). Returns false
// when transitioning to kFailed. In that case, an exception is pending.
bool FinalizeParsingOnMainThread();
+ // Transition from kReadyToAnalyse to kReadyToCompile (or kFailed). Returns
+ // false when transitioning to kFailed. In that case, an exception is pending.
+ bool PrepareToCompileOnMainThread();
+
+ // Transition from kReadyToCompile to kCompiled.
+ void Compile();
+
+ // Transition from kCompiled to kDone (or kFailed). Returns false when
+ // transitioning to kFailed. In that case, an exception is pending.
+ bool FinalizeCompilingOnMainThread();
+
// Transition from any state to kInitial and free all resources.
void ResetOnMainThread();
@@ -62,7 +81,7 @@ class CompilerDispatcherJob {
CompileJobStatus status_ = CompileJobStatus::kInitial;
Isolate* isolate_;
- Handle<JSFunction> function_; // Global handle.
+ Handle<SharedFunctionInfo> shared_; // Global handle.
Handle<String> source_; // Global handle.
size_t max_stack_size_;
@@ -74,7 +93,12 @@ class CompilerDispatcherJob {
std::unique_ptr<Parser> parser_;
std::unique_ptr<DeferredHandles> handles_from_parsing_;
+ // Members required for compiling.
+ std::unique_ptr<CompilationInfo> compile_info_;
+ std::unique_ptr<CompilationJob> compile_job_;
+
bool can_parse_on_background_thread_;
+ bool can_compile_on_background_thread_;
DISALLOW_COPY_AND_ASSIGN(CompilerDispatcherJob);
};
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index be81047976..75c50eec7d 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -5,6 +5,8 @@
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/base/atomicops.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
#include "src/full-codegen/full-codegen.h"
#include "src/isolate.h"
#include "src/tracing/trace-event.h"
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 9a5afe99da..ec402fa822 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -40,33 +40,11 @@ namespace v8 {
namespace internal {
-#define PARSE_INFO_GETTER(type, name) \
- type CompilationInfo::name() const { \
- CHECK(parse_info()); \
- return parse_info()->name(); \
- }
-
-
-#define PARSE_INFO_GETTER_WITH_DEFAULT(type, name, def) \
- type CompilationInfo::name() const { \
- return parse_info() ? parse_info()->name() : def; \
- }
-
-
-PARSE_INFO_GETTER(Handle<Script>, script)
-PARSE_INFO_GETTER(FunctionLiteral*, literal)
-PARSE_INFO_GETTER_WITH_DEFAULT(DeclarationScope*, scope, nullptr)
-PARSE_INFO_GETTER_WITH_DEFAULT(Handle<Context>, context,
- Handle<Context>::null())
-PARSE_INFO_GETTER(Handle<SharedFunctionInfo>, shared_info)
-
-#undef PARSE_INFO_GETTER
-#undef PARSE_INFO_GETTER_WITH_DEFAULT
// A wrapper around a CompilationInfo that detaches the Handles from
// the underlying DeferredHandleScope and stores them in info_ on
// destruction.
-class CompilationHandleScope BASE_EMBEDDED {
+class CompilationHandleScope final {
public:
explicit CompilationHandleScope(CompilationInfo* info)
: deferred_(info->isolate()), info_(info) {}
@@ -91,154 +69,6 @@ struct ScopedTimer {
};
// ----------------------------------------------------------------------------
-// Implementation of CompilationInfo
-
-bool CompilationInfo::has_shared_info() const {
- return parse_info_ && !parse_info_->shared_info().is_null();
-}
-
-CompilationInfo::CompilationInfo(ParseInfo* parse_info,
- Handle<JSFunction> closure)
- : CompilationInfo(parse_info, {}, Code::ComputeFlags(Code::FUNCTION), BASE,
- parse_info->isolate(), parse_info->zone()) {
- closure_ = closure;
-
- // Compiling for the snapshot typically results in different code than
- // compiling later on. This means that code recompiled with deoptimization
- // support won't be "equivalent" (as defined by SharedFunctionInfo::
- // EnableDeoptimizationSupport), so it will replace the old code and all
- // its type feedback. To avoid this, always compile functions in the snapshot
- // with deoptimization support.
- if (isolate_->serializer_enabled()) EnableDeoptimizationSupport();
-
- if (FLAG_function_context_specialization) MarkAsFunctionContextSpecializing();
- if (FLAG_turbo_inlining) MarkAsInliningEnabled();
- if (FLAG_turbo_source_positions) MarkAsSourcePositionsEnabled();
- if (FLAG_turbo_splitting) MarkAsSplittingEnabled();
-}
-
-CompilationInfo::CompilationInfo(Vector<const char> debug_name,
- Isolate* isolate, Zone* zone,
- Code::Flags code_flags)
- : CompilationInfo(nullptr, debug_name, code_flags, STUB, isolate, zone) {}
-
-CompilationInfo::CompilationInfo(ParseInfo* parse_info,
- Vector<const char> debug_name,
- Code::Flags code_flags, Mode mode,
- Isolate* isolate, Zone* zone)
- : parse_info_(parse_info),
- isolate_(isolate),
- flags_(0),
- code_flags_(code_flags),
- mode_(mode),
- osr_ast_id_(BailoutId::None()),
- zone_(zone),
- deferred_handles_(nullptr),
- dependencies_(isolate, zone),
- bailout_reason_(kNoReason),
- prologue_offset_(Code::kPrologueOffsetNotSet),
- track_positions_(FLAG_hydrogen_track_positions ||
- isolate->is_profiling()),
- parameter_count_(0),
- optimization_id_(-1),
- osr_expr_stack_height_(0),
- debug_name_(debug_name) {}
-
-CompilationInfo::~CompilationInfo() {
- if (GetFlag(kDisableFutureOptimization) && has_shared_info()) {
- shared_info()->DisableOptimization(bailout_reason());
- }
- dependencies()->Rollback();
- delete deferred_handles_;
-}
-
-
-int CompilationInfo::num_parameters() const {
- return !IsStub() ? scope()->num_parameters() : parameter_count_;
-}
-
-
-int CompilationInfo::num_parameters_including_this() const {
- return num_parameters() + (is_this_defined() ? 1 : 0);
-}
-
-
-bool CompilationInfo::is_this_defined() const { return !IsStub(); }
-
-
-// Primitive functions are unlikely to be picked up by the stack-walking
-// profiler, so they trigger their own optimization when they're called
-// for the SharedFunctionInfo::kCallsUntilPrimitiveOptimization-th time.
-bool CompilationInfo::ShouldSelfOptimize() {
- return FLAG_crankshaft &&
- !(literal()->flags() & AstProperties::kDontSelfOptimize) &&
- !literal()->dont_optimize() &&
- literal()->scope()->AllowsLazyCompilation() &&
- !shared_info()->optimization_disabled();
-}
-
-
-bool CompilationInfo::has_simple_parameters() {
- return scope()->has_simple_parameters();
-}
-
-std::unique_ptr<char[]> CompilationInfo::GetDebugName() const {
- if (parse_info() && parse_info()->literal()) {
- AllowHandleDereference allow_deref;
- return parse_info()->literal()->debug_name()->ToCString();
- }
- if (parse_info() && !parse_info()->shared_info().is_null()) {
- return parse_info()->shared_info()->DebugName()->ToCString();
- }
- Vector<const char> name_vec = debug_name_;
- if (name_vec.is_empty()) name_vec = ArrayVector("unknown");
- std::unique_ptr<char[]> name(new char[name_vec.length() + 1]);
- memcpy(name.get(), name_vec.start(), name_vec.length());
- name[name_vec.length()] = '\0';
- return name;
-}
-
-StackFrame::Type CompilationInfo::GetOutputStackFrameType() const {
- switch (output_code_kind()) {
- case Code::STUB:
- case Code::BYTECODE_HANDLER:
- case Code::HANDLER:
- case Code::BUILTIN:
-#define CASE_KIND(kind) case Code::kind:
- IC_KIND_LIST(CASE_KIND)
-#undef CASE_KIND
- return StackFrame::STUB;
- case Code::WASM_FUNCTION:
- return StackFrame::WASM;
- case Code::JS_TO_WASM_FUNCTION:
- return StackFrame::JS_TO_WASM;
- case Code::WASM_TO_JS_FUNCTION:
- return StackFrame::WASM_TO_JS;
- default:
- UNIMPLEMENTED();
- return StackFrame::NONE;
- }
-}
-
-int CompilationInfo::GetDeclareGlobalsFlags() const {
- DCHECK(DeclareGlobalsLanguageMode::is_valid(parse_info()->language_mode()));
- return DeclareGlobalsEvalFlag::encode(parse_info()->is_eval()) |
- DeclareGlobalsNativeFlag::encode(parse_info()->is_native()) |
- DeclareGlobalsLanguageMode::encode(parse_info()->language_mode());
-}
-
-SourcePositionTableBuilder::RecordingMode
-CompilationInfo::SourcePositionRecordingMode() const {
- return parse_info() && parse_info()->is_native()
- ? SourcePositionTableBuilder::OMIT_SOURCE_POSITIONS
- : SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS;
-}
-
-bool CompilationInfo::ExpectsJSReceiverAsReceiver() {
- return is_sloppy(parse_info()->language_mode()) && !parse_info()->is_native();
-}
-
-// ----------------------------------------------------------------------------
// Implementation of CompilationJob
CompilationJob::Status CompilationJob::PrepareJob() {
@@ -260,10 +90,18 @@ CompilationJob::Status CompilationJob::PrepareJob() {
}
CompilationJob::Status CompilationJob::ExecuteJob() {
- DisallowHeapAllocation no_allocation;
- DisallowHandleAllocation no_handles;
- DisallowHandleDereference no_deref;
- DisallowCodeDependencyChange no_dependency_change;
+ std::unique_ptr<DisallowHeapAllocation> no_allocation;
+ std::unique_ptr<DisallowHandleAllocation> no_handles;
+ std::unique_ptr<DisallowHandleDereference> no_deref;
+ std::unique_ptr<DisallowCodeDependencyChange> no_dependency_change;
+ if (can_execute_on_background_thread()) {
+ no_allocation.reset(new DisallowHeapAllocation());
+ no_handles.reset(new DisallowHandleAllocation());
+ no_deref.reset(new DisallowHandleDereference());
+ no_dependency_change.reset(new DisallowCodeDependencyChange());
+ } else {
+ DCHECK(ThreadId::Current().Equals(info()->isolate()->thread_id()));
+ }
// Delegate to the underlying implementation.
DCHECK(state() == State::kReadyToExecute);
@@ -283,6 +121,73 @@ CompilationJob::Status CompilationJob::FinalizeJob() {
return UpdateState(FinalizeJobImpl(), State::kSucceeded);
}
+CompilationJob::Status CompilationJob::RetryOptimization(BailoutReason reason) {
+ DCHECK(info_->IsOptimizing());
+ info_->RetryOptimization(reason);
+ state_ = State::kFailed;
+ return FAILED;
+}
+
+CompilationJob::Status CompilationJob::AbortOptimization(BailoutReason reason) {
+ DCHECK(info_->IsOptimizing());
+ info_->AbortOptimization(reason);
+ state_ = State::kFailed;
+ return FAILED;
+}
+
+void CompilationJob::RecordUnoptimizedCompilationStats() const {
+ int code_size;
+ if (info()->has_bytecode_array()) {
+ code_size = info()->bytecode_array()->SizeIncludingMetadata();
+ } else {
+ code_size = info()->code()->SizeIncludingMetadata();
+ }
+
+ Counters* counters = isolate()->counters();
+ // TODO(4280): Rename counters from "baseline" to "unoptimized" eventually.
+ counters->total_baseline_code_size()->Increment(code_size);
+ counters->total_baseline_compile_count()->Increment(1);
+
+ // TODO(5203): Add timers for each phase of compilation.
+}
+
+void CompilationJob::RecordOptimizedCompilationStats() const {
+ DCHECK(info()->IsOptimizing());
+ Handle<JSFunction> function = info()->closure();
+ if (!function->IsOptimized()) {
+ // Concurrent recompilation and OSR may race. Increment only once.
+ int opt_count = function->shared()->opt_count();
+ function->shared()->set_opt_count(opt_count + 1);
+ }
+ double ms_creategraph = time_taken_to_prepare_.InMillisecondsF();
+ double ms_optimize = time_taken_to_execute_.InMillisecondsF();
+ double ms_codegen = time_taken_to_finalize_.InMillisecondsF();
+ if (FLAG_trace_opt) {
+ PrintF("[optimizing ");
+ function->ShortPrint();
+ PrintF(" - took %0.3f, %0.3f, %0.3f ms]\n", ms_creategraph, ms_optimize,
+ ms_codegen);
+ }
+ if (FLAG_trace_opt_stats) {
+ static double compilation_time = 0.0;
+ static int compiled_functions = 0;
+ static int code_size = 0;
+
+ compilation_time += (ms_creategraph + ms_optimize + ms_codegen);
+ compiled_functions++;
+ code_size += function->shared()->SourceSize();
+ PrintF("Compiled: %d functions with %d byte source size in %fms.\n",
+ compiled_functions, code_size, compilation_time);
+ }
+ if (FLAG_hydrogen_stats) {
+ isolate()->GetHStatistics()->IncrementSubtotals(time_taken_to_prepare_,
+ time_taken_to_execute_,
+ time_taken_to_finalize_);
+ }
+}
+
+Isolate* CompilationJob::isolate() const { return info()->isolate(); }
+
namespace {
void AddWeakObjectToCodeDependency(Isolate* isolate, Handle<HeapObject> object,
@@ -341,41 +246,6 @@ void CompilationJob::RegisterWeakObjectsInOptimizedCode(Handle<Code> code) {
code->set_can_have_weak_objects(true);
}
-void CompilationJob::RecordOptimizationStats() {
- DCHECK(info()->IsOptimizing());
- Handle<JSFunction> function = info()->closure();
- if (!function->IsOptimized()) {
- // Concurrent recompilation and OSR may race. Increment only once.
- int opt_count = function->shared()->opt_count();
- function->shared()->set_opt_count(opt_count + 1);
- }
- double ms_creategraph = time_taken_to_prepare_.InMillisecondsF();
- double ms_optimize = time_taken_to_execute_.InMillisecondsF();
- double ms_codegen = time_taken_to_finalize_.InMillisecondsF();
- if (FLAG_trace_opt) {
- PrintF("[optimizing ");
- function->ShortPrint();
- PrintF(" - took %0.3f, %0.3f, %0.3f ms]\n", ms_creategraph, ms_optimize,
- ms_codegen);
- }
- if (FLAG_trace_opt_stats) {
- static double compilation_time = 0.0;
- static int compiled_functions = 0;
- static int code_size = 0;
-
- compilation_time += (ms_creategraph + ms_optimize + ms_codegen);
- compiled_functions++;
- code_size += function->shared()->SourceSize();
- PrintF("Compiled: %d functions with %d byte source size in %fms.\n",
- compiled_functions, code_size, compilation_time);
- }
- if (FLAG_hydrogen_stats) {
- isolate()->GetHStatistics()->IncrementSubtotals(time_taken_to_prepare_,
- time_taken_to_execute_,
- time_taken_to_finalize_);
- }
-}
-
// ----------------------------------------------------------------------------
// Local helper methods that make up the compilation pipeline.
@@ -387,6 +257,16 @@ bool IsEvalToplevel(Handle<SharedFunctionInfo> shared) {
Script::COMPILATION_TYPE_EVAL;
}
+bool Parse(ParseInfo* info) {
+ // Create a canonical handle scope if compiling ignition bytecode. This is
+ // required by the constant array builder to de-duplicate objects without
+ // dereferencing handles.
+ std::unique_ptr<CanonicalHandleScope> canonical;
+ if (FLAG_ignition) canonical.reset(new CanonicalHandleScope(info->isolate()));
+
+ return Parser::ParseStatic(info);
+}
+
void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
CompilationInfo* info) {
// Log the code generation. If source information is available include
@@ -466,18 +346,24 @@ bool ShouldUseIgnition(CompilationInfo* info) {
return info->shared_info()->PassesFilter(FLAG_ignition_filter);
}
-int CodeAndMetadataSize(CompilationInfo* info) {
- if (info->has_bytecode_array()) {
- return info->bytecode_array()->SizeIncludingMetadata();
+CompilationJob* GetUnoptimizedCompilationJob(CompilationInfo* info) {
+ // Function should have been parsed and analyzed before creating a compilation
+ // job.
+ DCHECK_NOT_NULL(info->literal());
+ DCHECK_NOT_NULL(info->scope());
+
+ EnsureFeedbackMetadata(info);
+ if (ShouldUseIgnition(info)) {
+ return interpreter::Interpreter::NewCompilationJob(info);
+ } else {
+ return FullCodeGenerator::NewCompilationJob(info);
}
- return info->code()->SizeIncludingMetadata();
}
bool GenerateUnoptimizedCode(CompilationInfo* info) {
- bool success;
- EnsureFeedbackMetadata(info);
if (FLAG_validate_asm && info->scope()->asm_module() &&
!info->shared_info()->is_asm_wasm_broken()) {
+ EnsureFeedbackMetadata(info);
MaybeHandle<FixedArray> wasm_data;
wasm_data = AsmJs::ConvertAsmToWasm(info->parse_info());
if (!wasm_data.is_null()) {
@@ -486,19 +372,13 @@ bool GenerateUnoptimizedCode(CompilationInfo* info) {
return true;
}
}
- if (ShouldUseIgnition(info)) {
- success = interpreter::Interpreter::MakeBytecode(info);
- } else {
- success = FullCodeGenerator::MakeCode(info);
- }
- if (success) {
- Isolate* isolate = info->isolate();
- Counters* counters = isolate->counters();
- // TODO(4280): Rename counters from "baseline" to "unoptimized" eventually.
- counters->total_baseline_code_size()->Increment(CodeAndMetadataSize(info));
- counters->total_baseline_compile_count()->Increment(1);
- }
- return success;
+
+ std::unique_ptr<CompilationJob> job(GetUnoptimizedCompilationJob(info));
+ if (job->PrepareJob() != CompilationJob::SUCCEEDED) return false;
+ if (job->ExecuteJob() != CompilationJob::SUCCEEDED) return false;
+ if (job->FinalizeJob() != CompilationJob::SUCCEEDED) return false;
+ job->RecordUnoptimizedCompilationStats();
+ return true;
}
bool CompileUnoptimizedCode(CompilationInfo* info) {
@@ -514,8 +394,12 @@ bool CompileUnoptimizedCode(CompilationInfo* info) {
void InstallSharedScopeInfo(CompilationInfo* info,
Handle<SharedFunctionInfo> shared) {
- Handle<ScopeInfo> scope_info = info->scope()->GetScopeInfo(info->isolate());
+ Handle<ScopeInfo> scope_info = info->scope()->scope_info();
shared->set_scope_info(*scope_info);
+ Scope* outer_scope = info->scope()->GetOuterScopeWithContext();
+ if (outer_scope) {
+ shared->set_outer_scope_info(*outer_scope->scope_info());
+ }
}
void InstallSharedCompilationResult(CompilationInfo* info,
@@ -534,22 +418,8 @@ void InstallSharedCompilationResult(CompilationInfo* info,
}
}
-MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCode(CompilationInfo* info) {
- VMState<COMPILER> state(info->isolate());
- PostponeInterruptsScope postpone(info->isolate());
-
- // Create a canonical handle scope before internalizing parsed values if
- // compiling bytecode. This is required for off-thread bytecode generation.
- std::unique_ptr<CanonicalHandleScope> canonical;
- if (FLAG_ignition) canonical.reset(new CanonicalHandleScope(info->isolate()));
-
- // Parse and update CompilationInfo with the results.
- if (!Parser::ParseStatic(info->parse_info())) return MaybeHandle<Code>();
+void InstallUnoptimizedCode(CompilationInfo* info) {
Handle<SharedFunctionInfo> shared = info->shared_info();
- DCHECK_EQ(shared->language_mode(), info->literal()->language_mode());
-
- // Compile either unoptimized code or bytecode for the interpreter.
- if (!CompileUnoptimizedCode(info)) return MaybeHandle<Code>();
// Update the shared function info with the scope info.
InstallSharedScopeInfo(info, shared);
@@ -559,10 +429,35 @@ MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCode(CompilationInfo* info) {
// Record the function compilation event.
RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, info);
+}
+
+MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCode(CompilationInfo* info) {
+ VMState<COMPILER> state(info->isolate());
+ PostponeInterruptsScope postpone(info->isolate());
+
+ // Parse and update CompilationInfo with the results.
+ if (!Parse(info->parse_info())) return MaybeHandle<Code>();
+ DCHECK_EQ(info->shared_info()->language_mode(),
+ info->literal()->language_mode());
+
+ // Compile either unoptimized code or bytecode for the interpreter.
+ if (!CompileUnoptimizedCode(info)) return MaybeHandle<Code>();
+
+ InstallUnoptimizedCode(info);
return info->code();
}
+CompilationJob::Status FinalizeUnoptimizedCompilationJob(CompilationJob* job) {
+ CompilationJob::Status status = job->FinalizeJob();
+ if (status == CompilationJob::SUCCEEDED) {
+ DCHECK(!job->info()->shared_info()->is_compiled());
+ InstallUnoptimizedCode(job->info());
+ job->RecordUnoptimizedCompilationStats();
+ }
+ return status;
+}
+
MUST_USE_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeMap(
Handle<JSFunction> function, BailoutId osr_ast_id) {
Handle<SharedFunctionInfo> shared(function->shared());
@@ -615,6 +510,14 @@ void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
}
bool Renumber(ParseInfo* parse_info) {
+ // Create a canonical handle scope if compiling ignition bytecode. This is
+ // required by the constant array builder to de-duplicate objects without
+ // dereferencing handles.
+ std::unique_ptr<CanonicalHandleScope> canonical;
+ if (FLAG_ignition) {
+ canonical.reset(new CanonicalHandleScope(parse_info->isolate()));
+ }
+
if (!AstNumbering::Renumber(parse_info->isolate(), parse_info->zone(),
parse_info->literal())) {
return false;
@@ -669,8 +572,8 @@ bool GetOptimizedCodeNow(CompilationJob* job) {
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
RuntimeCallTimerScope runtimeTimer(isolate,
&RuntimeCallStats::RecompileSynchronous);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate, &tracing::TraceEventStatsTable::RecompileSynchronous);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.RecompileSynchronous");
if (job->PrepareJob() != CompilationJob::SUCCEEDED ||
job->ExecuteJob() != CompilationJob::SUCCEEDED ||
@@ -684,7 +587,7 @@ bool GetOptimizedCodeNow(CompilationJob* job) {
}
// Success!
- job->RecordOptimizationStats();
+ job->RecordOptimizedCompilationStats();
DCHECK(!isolate->has_pending_exception());
InsertCodeIntoOptimizedCodeMap(info);
RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, info);
@@ -713,10 +616,6 @@ bool GetOptimizedCodeLater(CompilationJob* job) {
return false;
}
- // All handles below this point will be allocated in a deferred handle scope
- // that is detached and handed off to the background thread when we return.
- CompilationHandleScope handle_scope(info);
-
// Parsing is not required when optimizing from existing bytecode.
if (!info->is_optimizing_from_bytecode()) {
if (!Compiler::ParseAndAnalyze(info->parse_info())) return false;
@@ -725,15 +624,11 @@ bool GetOptimizedCodeLater(CompilationJob* job) {
JSFunction::EnsureLiterals(info->closure());
- // Reopen handles in the new CompilationHandleScope.
- info->ReopenHandlesInNewHandleScope();
- info->parse_info()->ReopenHandlesInNewHandleScope();
-
TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
RuntimeCallTimerScope runtimeTimer(info->isolate(),
&RuntimeCallStats::RecompileSynchronous);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate, &tracing::TraceEventStatsTable::RecompileSynchronous);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.RecompileSynchronous");
if (job->PrepareJob() != CompilationJob::SUCCEEDED) return false;
isolate->optimizing_compile_dispatcher()->QueueForOptimization(job);
@@ -808,14 +703,13 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
return MaybeHandle<Code>();
}
- CanonicalHandleScope canonical(isolate);
TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
RuntimeCallTimerScope runtimeTimer(isolate, &RuntimeCallStats::OptimizeCode);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate, &tracing::TraceEventStatsTable::OptimizeCode);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.OptimizeCode");
// TurboFan can optimize directly from existing bytecode.
if (FLAG_turbo_from_bytecode && use_turbofan && ShouldUseIgnition(info)) {
+ if (info->is_osr() && !ignition_osr) return MaybeHandle<Code>();
if (!Compiler::EnsureBytecode(info)) {
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
return MaybeHandle<Code>();
@@ -831,6 +725,32 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
parse_info->set_lazy(false);
}
+ // Verify that OSR compilations are delegated to the correct graph builder.
+ // Depending on the underlying frame the semantics of the {BailoutId} differ
+ // and the various graph builders hard-code a certain semantic:
+ // - Interpreter : The BailoutId represents a bytecode offset.
+ // - FullCodegen : The BailoutId represents the id of an AST node.
+ DCHECK_IMPLIES(info->is_osr() && ignition_osr,
+ info->is_optimizing_from_bytecode());
+ DCHECK_IMPLIES(info->is_osr() && !ignition_osr,
+ !info->is_optimizing_from_bytecode());
+
+ // In case of concurrent recompilation, all handles below this point will be
+ // allocated in a deferred handle scope that is detached and handed off to
+ // the background thread when we return.
+ std::unique_ptr<CompilationHandleScope> compilation;
+ if (mode == Compiler::CONCURRENT) {
+ compilation.reset(new CompilationHandleScope(info));
+ }
+
+ // In case of TurboFan, all handles below will be canonicalized.
+ std::unique_ptr<CanonicalHandleScope> canonical;
+ if (use_turbofan) canonical.reset(new CanonicalHandleScope(info->isolate()));
+
+ // Reopen handles in the new CompilationHandleScope.
+ info->ReopenHandlesInNewHandleScope();
+ parse_info->ReopenHandlesInNewHandleScope();
+
if (mode == Compiler::CONCURRENT) {
if (GetOptimizedCodeLater(job.get())) {
job.release(); // The background recompile job owns this now.
@@ -844,6 +764,60 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
return MaybeHandle<Code>();
}
+CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job) {
+ CompilationInfo* info = job->info();
+ Isolate* isolate = info->isolate();
+
+ TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
+ RuntimeCallTimerScope runtimeTimer(isolate,
+ &RuntimeCallStats::RecompileSynchronous);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.RecompileSynchronous");
+
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ shared->code()->set_profiler_ticks(0);
+
+ DCHECK(!shared->HasDebugInfo());
+
+ // 1) Optimization on the concurrent thread may have failed.
+ // 2) The function may have already been optimized by OSR. Simply continue.
+ // Except when OSR already disabled optimization for some reason.
+ // 3) The code may have already been invalidated due to dependency change.
+ // 4) Code generation may have failed.
+ if (job->state() == CompilationJob::State::kReadyToFinalize) {
+ if (shared->optimization_disabled()) {
+ job->RetryOptimization(kOptimizationDisabled);
+ } else if (info->dependencies()->HasAborted()) {
+ job->RetryOptimization(kBailedOutDueToDependencyChange);
+ } else if (job->FinalizeJob() == CompilationJob::SUCCEEDED) {
+ job->RecordOptimizedCompilationStats();
+ RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, info);
+ if (shared
+ ->SearchOptimizedCodeMap(info->context()->native_context(),
+ info->osr_ast_id())
+ .code == nullptr) {
+ InsertCodeIntoOptimizedCodeMap(info);
+ }
+ if (FLAG_trace_opt) {
+ PrintF("[completed optimizing ");
+ info->closure()->ShortPrint();
+ PrintF("]\n");
+ }
+ info->closure()->ReplaceCode(*info->code());
+ return CompilationJob::SUCCEEDED;
+ }
+ }
+
+ DCHECK(job->state() == CompilationJob::State::kFailed);
+ if (FLAG_trace_opt) {
+ PrintF("[aborted optimizing ");
+ info->closure()->ShortPrint();
+ PrintF(" because: %s]\n", GetBailoutReason(info->bailout_reason()));
+ }
+ info->closure()->ReplaceCode(shared->code());
+ return CompilationJob::FAILED;
+}
+
class InterpreterActivationsFinder : public ThreadVisitor,
public OptimizedFunctionVisitor {
public:
@@ -942,7 +916,7 @@ MaybeHandle<Code> GetBaselineCode(Handle<JSFunction> function) {
// baseline code because there might be suspended activations stored in
// generator objects on the heap. We could eventually go directly to
// TurboFan in this case.
- if (function->shared()->is_resumable()) {
+ if (IsResumableFunction(function->shared()->kind())) {
return MaybeHandle<Code>();
}
@@ -978,7 +952,7 @@ MaybeHandle<Code> GetBaselineCode(Handle<JSFunction> function) {
}
// Parse and update CompilationInfo with the results.
- if (!Parser::ParseStatic(info.parse_info())) return MaybeHandle<Code>();
+ if (!Parse(info.parse_info())) return MaybeHandle<Code>();
Handle<SharedFunctionInfo> shared = info.shared_info();
DCHECK_EQ(shared->language_mode(), info.literal()->language_mode());
@@ -1014,22 +988,19 @@ MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
TimerEventScope<TimerEventCompileCode> compile_timer(isolate);
RuntimeCallTimerScope runtimeTimer(isolate,
&RuntimeCallStats::CompileCodeLazy);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate, &tracing::TraceEventStatsTable::CompileCodeLazy);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
AggregatedHistogramTimerScope timer(isolate->counters()->compile_lazy());
- if (FLAG_turbo_cache_shared_code) {
- Handle<Code> cached_code;
- if (GetCodeFromOptimizedCodeMap(function, BailoutId::None())
- .ToHandle(&cached_code)) {
- if (FLAG_trace_opt) {
- PrintF("[found optimized code for ");
- function->ShortPrint();
- PrintF(" during unoptimized compile]\n");
- }
- DCHECK(function->shared()->is_compiled());
- return cached_code;
+ Handle<Code> cached_code;
+ if (GetCodeFromOptimizedCodeMap(function, BailoutId::None())
+ .ToHandle(&cached_code)) {
+ if (FLAG_trace_opt) {
+ PrintF("[found optimized code for ");
+ function->ShortPrint();
+ PrintF(" during unoptimized compile]\n");
}
+ DCHECK(function->shared()->is_compiled());
+ return cached_code;
}
if (function->shared()->is_compiled()) {
@@ -1076,18 +1047,12 @@ Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
Isolate* isolate = info->isolate();
TimerEventScope<TimerEventCompileCode> timer(isolate);
RuntimeCallTimerScope runtimeTimer(isolate, &RuntimeCallStats::CompileCode);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate, &tracing::TraceEventStatsTable::CompileCode);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
PostponeInterruptsScope postpone(isolate);
DCHECK(!isolate->native_context().is_null());
ParseInfo* parse_info = info->parse_info();
Handle<Script> script = parse_info->script();
- // Create a canonical handle scope before internalizing parsed values if
- // compiling bytecode. This is required for off-thread bytecode generation.
- std::unique_ptr<CanonicalHandleScope> canonical;
- if (FLAG_ignition) canonical.reset(new CanonicalHandleScope(isolate));
-
// TODO(svenpanne) Obscure place for this, perhaps move to OnBeforeCompile?
FixedArray* array = isolate->native_context()->embedder_data();
script->set_context_data(array->get(v8::Context::kDebugIdIndex));
@@ -1131,7 +1096,7 @@ Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
parse_info->set_compile_options(ScriptCompiler::kNoCompileOptions);
}
- if (!Parser::ParseStatic(parse_info)) {
+ if (!Parse(parse_info)) {
return Handle<SharedFunctionInfo>::null();
}
}
@@ -1150,10 +1115,8 @@ Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
? info->isolate()->counters()->compile_eval()
: info->isolate()->counters()->compile();
HistogramTimerScope timer(rate);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate,
- (parse_info->is_eval() ? &tracing::TraceEventStatsTable::CompileEval
- : &tracing::TraceEventStatsTable::Compile));
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ parse_info->is_eval() ? "V8.CompileEval" : "V8.Compile");
// Allocate a shared function info object.
DCHECK_EQ(kNoSourcePosition, lit->function_token_position());
@@ -1203,14 +1166,14 @@ Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
bool Compiler::Analyze(ParseInfo* info) {
DCHECK_NOT_NULL(info->literal());
if (!Rewriter::Rewrite(info)) return false;
- Scope::Analyze(info);
+ DeclarationScope::Analyze(info, AnalyzeMode::kRegular);
if (!Renumber(info)) return false;
DCHECK_NOT_NULL(info->scope());
return true;
}
bool Compiler::ParseAndAnalyze(ParseInfo* info) {
- if (!Parser::ParseStatic(info)) return false;
+ if (!Parse(info)) return false;
if (!Compiler::Analyze(info)) return false;
DCHECK_NOT_NULL(info->literal());
DCHECK_NOT_NULL(info->scope());
@@ -1390,10 +1353,18 @@ MaybeHandle<JSArray> Compiler::CompileForLiveEdit(Handle<Script> script) {
}
bool Compiler::EnsureBytecode(CompilationInfo* info) {
- DCHECK(ShouldUseIgnition(info));
+ if (!ShouldUseIgnition(info)) return false;
if (!info->shared_info()->HasBytecodeArray()) {
- DCHECK(!info->shared_info()->is_compiled());
+ Handle<Code> original_code(info->shared_info()->code());
if (GetUnoptimizedCode(info).is_null()) return false;
+ if (info->shared_info()->HasAsmWasmData()) return false;
+ DCHECK(info->shared_info()->is_compiled());
+ if (original_code->kind() == Code::FUNCTION) {
+ // Generating bytecode will install the {InterpreterEntryTrampoline} as
+ // shared code on the function. To avoid an implicit tier down we restore
+ // original baseline code in case it existed beforehand.
+ info->shared_info()->ReplaceCode(*original_code);
+ }
}
DCHECK(info->shared_info()->HasBytecodeArray());
return true;
@@ -1414,7 +1385,7 @@ bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
// baseline code because there might be suspended activations stored in
// generator objects on the heap. We could eventually go directly to
// TurboFan in this case.
- if (shared->is_resumable()) return false;
+ if (IsResumableFunction(shared->kind())) return false;
// TODO(4280): For now we disable switching to baseline code in the presence
// of interpreter activations of the given function. The reasons is that the
@@ -1513,7 +1484,9 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
if (context->IsNativeContext()) parse_info.set_global();
parse_info.set_language_mode(language_mode);
parse_info.set_parse_restriction(restriction);
- parse_info.set_context(context);
+ if (!context->IsNativeContext()) {
+ parse_info.set_outer_scope_info(handle(context->scope_info()));
+ }
shared_info = CompileToplevel(&info);
@@ -1629,8 +1602,8 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
HistogramTimerScope timer(isolate->counters()->compile_deserialize());
RuntimeCallTimerScope runtimeTimer(isolate,
&RuntimeCallStats::CompileDeserialize);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate, &tracing::TraceEventStatsTable::CompileDeserialize);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompileDeserialize");
Handle<SharedFunctionInfo> result;
if (CodeSerializer::Deserialize(isolate, *cached_data, source)
.ToHandle(&result)) {
@@ -1686,7 +1659,9 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
}
parse_info.set_compile_options(compile_options);
parse_info.set_extension(extension);
- parse_info.set_context(context);
+ if (!context->IsNativeContext()) {
+ parse_info.set_outer_scope_info(handle(context->scope_info()));
+ }
if (FLAG_serialize_toplevel &&
compile_options == ScriptCompiler::kProduceCodeCache) {
info.PrepareForSerializing();
@@ -1703,8 +1678,8 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
isolate->counters()->compile_serialize());
RuntimeCallTimerScope runtimeTimer(isolate,
&RuntimeCallStats::CompileSerialize);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate, &tracing::TraceEventStatsTable::CompileSerialize);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompileSerialize");
*cached_data = CodeSerializer::Serialize(isolate, result, source);
if (FLAG_profile_deserialization) {
PrintF("[Compiling and serializing took %0.3f ms]\n",
@@ -1822,17 +1797,14 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
// Generate code
TimerEventScope<TimerEventCompileCode> timer(isolate);
RuntimeCallTimerScope runtimeTimer(isolate, &RuntimeCallStats::CompileCode);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate, &tracing::TraceEventStatsTable::CompileCode);
-
- // Create a canonical handle scope if compiling ignition bytecode. This is
- // required by the constant array builder to de-duplicate common objects
- // without dereferencing handles.
- std::unique_ptr<CanonicalHandleScope> canonical;
- if (FLAG_ignition) canonical.reset(new CanonicalHandleScope(info.isolate()));
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
if (lazy) {
info.SetCode(isolate->builtins()->CompileLazy());
+ Scope* outer_scope = literal->scope()->GetOuterScopeWithContext();
+ if (outer_scope) {
+ result->set_outer_scope_info(*outer_scope->scope_info());
+ }
} else if (Renumber(info.parse_info()) && GenerateUnoptimizedCode(&info)) {
// Code generation will ensure that the feedback vector is present and
// appropriately sized.
@@ -1876,6 +1848,7 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForNative(
Handle<SharedFunctionInfo> shared = isolate->factory()->NewSharedFunctionInfo(
name, fun->shared()->num_literals(), FunctionKind::kNormalFunction, code,
Handle<ScopeInfo>(fun->shared()->scope_info()));
+ shared->set_outer_scope_info(fun->shared()->outer_scope_info());
shared->SetConstructStub(*construct_stub);
shared->set_feedback_metadata(fun->shared()->feedback_metadata());
@@ -1895,58 +1868,28 @@ MaybeHandle<Code> Compiler::GetOptimizedCodeForOSR(Handle<JSFunction> function,
return GetOptimizedCode(function, NOT_CONCURRENT, osr_ast_id, osr_frame);
}
-void Compiler::FinalizeCompilationJob(CompilationJob* raw_job) {
+CompilationJob* Compiler::PrepareUnoptimizedCompilationJob(
+ CompilationInfo* info) {
+ VMState<COMPILER> state(info->isolate());
+ std::unique_ptr<CompilationJob> job(GetUnoptimizedCompilationJob(info));
+ if (job->PrepareJob() != CompilationJob::SUCCEEDED) {
+ return nullptr;
+ }
+ return job.release();
+}
+
+bool Compiler::FinalizeCompilationJob(CompilationJob* raw_job) {
// Take ownership of compilation job. Deleting job also tears down the zone.
std::unique_ptr<CompilationJob> job(raw_job);
- CompilationInfo* info = job->info();
- Isolate* isolate = info->isolate();
-
- VMState<COMPILER> state(isolate);
- TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
- RuntimeCallTimerScope runtimeTimer(isolate,
- &RuntimeCallStats::RecompileSynchronous);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate, &tracing::TraceEventStatsTable::RecompileSynchronous);
-
- Handle<SharedFunctionInfo> shared = info->shared_info();
- shared->code()->set_profiler_ticks(0);
-
- DCHECK(!shared->HasDebugInfo());
- // 1) Optimization on the concurrent thread may have failed.
- // 2) The function may have already been optimized by OSR. Simply continue.
- // Except when OSR already disabled optimization for some reason.
- // 3) The code may have already been invalidated due to dependency change.
- // 4) Code generation may have failed.
- if (job->state() == CompilationJob::State::kReadyToFinalize) {
- if (shared->optimization_disabled()) {
- job->RetryOptimization(kOptimizationDisabled);
- } else if (info->dependencies()->HasAborted()) {
- job->RetryOptimization(kBailedOutDueToDependencyChange);
- } else if (job->FinalizeJob() == CompilationJob::SUCCEEDED) {
- job->RecordOptimizationStats();
- RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, info);
- if (shared->SearchOptimizedCodeMap(info->context()->native_context(),
- info->osr_ast_id()).code == nullptr) {
- InsertCodeIntoOptimizedCodeMap(info);
- }
- if (FLAG_trace_opt) {
- PrintF("[completed optimizing ");
- info->closure()->ShortPrint();
- PrintF("]\n");
- }
- info->closure()->ReplaceCode(*info->code());
- return;
- }
- }
-
- DCHECK(job->state() == CompilationJob::State::kFailed);
- if (FLAG_trace_opt) {
- PrintF("[aborted optimizing ");
- info->closure()->ShortPrint();
- PrintF(" because: %s]\n", GetBailoutReason(info->bailout_reason()));
+ VMState<COMPILER> state(job->info()->isolate());
+ if (job->info()->IsOptimizing()) {
+ return FinalizeOptimizedCompilationJob(job.get()) ==
+ CompilationJob::SUCCEEDED;
+ } else {
+ return FinalizeUnoptimizedCompilationJob(job.get()) ==
+ CompilationJob::SUCCEEDED;
}
- info->closure()->ReplaceCode(shared->code());
}
void Compiler::PostInstantiation(Handle<JSFunction> function,
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 55215733c1..bfeaa8e7c3 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -9,14 +9,9 @@
#include "src/allocation.h"
#include "src/bailout-reason.h"
-#include "src/compilation-dependencies.h"
#include "src/contexts.h"
-#include "src/frames.h"
#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/source-position-table.h"
-#include "src/source-position.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
@@ -57,8 +52,12 @@ class Compiler : public AllStatic {
static bool CompileDebugCode(Handle<SharedFunctionInfo> shared);
static MaybeHandle<JSArray> CompileForLiveEdit(Handle<Script> script);
+ // Prepare a compilation job for unoptimized code. Requires ParseAndAnalyse.
+ static CompilationJob* PrepareUnoptimizedCompilationJob(
+ CompilationInfo* info);
+
// Generate and install code from previously queued compilation job.
- static void FinalizeCompilationJob(CompilationJob* job);
+ static bool FinalizeCompilationJob(CompilationJob* job);
// Give the compiler a chance to perform low-latency initialization tasks of
// the given {function} on its instantiation. Note that only the runtime will
@@ -138,405 +137,6 @@ class Compiler : public AllStatic {
JavaScriptFrame* osr_frame);
};
-
-// CompilationInfo encapsulates some information known at compile time. It
-// is constructed based on the resources available at compile-time.
-class CompilationInfo final {
- public:
- // Various configuration flags for a compilation, as well as some properties
- // of the compiled code produced by a compilation.
- enum Flag {
- kDeferredCalling = 1 << 0,
- kNonDeferredCalling = 1 << 1,
- kSavesCallerDoubles = 1 << 2,
- kRequiresFrame = 1 << 3,
- kMustNotHaveEagerFrame = 1 << 4,
- kDeoptimizationSupport = 1 << 5,
- kDebug = 1 << 6,
- kSerializing = 1 << 7,
- kFunctionContextSpecializing = 1 << 8,
- kFrameSpecializing = 1 << 9,
- kNativeContextSpecializing = 1 << 10,
- kInliningEnabled = 1 << 11,
- kDisableFutureOptimization = 1 << 12,
- kSplittingEnabled = 1 << 13,
- kDeoptimizationEnabled = 1 << 14,
- kSourcePositionsEnabled = 1 << 15,
- kBailoutOnUninitialized = 1 << 16,
- kOptimizeFromBytecode = 1 << 17,
- kTypeFeedbackEnabled = 1 << 18,
- kAccessorInliningEnabled = 1 << 19,
- };
-
- CompilationInfo(ParseInfo* parse_info, Handle<JSFunction> closure);
- CompilationInfo(Vector<const char> debug_name, Isolate* isolate, Zone* zone,
- Code::Flags code_flags = Code::ComputeFlags(Code::STUB));
- ~CompilationInfo();
-
- ParseInfo* parse_info() const { return parse_info_; }
-
- // -----------------------------------------------------------
- // TODO(titzer): inline and delete accessors of ParseInfo
- // -----------------------------------------------------------
- Handle<Script> script() const;
- FunctionLiteral* literal() const;
- DeclarationScope* scope() const;
- Handle<Context> context() const;
- Handle<SharedFunctionInfo> shared_info() const;
- bool has_shared_info() const;
- // -----------------------------------------------------------
-
- Isolate* isolate() const {
- return isolate_;
- }
- Zone* zone() { return zone_; }
- bool is_osr() const { return !osr_ast_id_.IsNone(); }
- Handle<JSFunction> closure() const { return closure_; }
- Handle<Code> code() const { return code_; }
- Code::Flags code_flags() const { return code_flags_; }
- BailoutId osr_ast_id() const { return osr_ast_id_; }
- JavaScriptFrame* osr_frame() const { return osr_frame_; }
- int num_parameters() const;
- int num_parameters_including_this() const;
- bool is_this_defined() const;
-
- void set_parameter_count(int parameter_count) {
- DCHECK(IsStub());
- parameter_count_ = parameter_count;
- }
-
- bool has_bytecode_array() const { return !bytecode_array_.is_null(); }
- Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
-
- bool is_tracking_positions() const { return track_positions_; }
-
- bool is_calling() const {
- return GetFlag(kDeferredCalling) || GetFlag(kNonDeferredCalling);
- }
-
- void MarkAsDeferredCalling() { SetFlag(kDeferredCalling); }
-
- bool is_deferred_calling() const { return GetFlag(kDeferredCalling); }
-
- void MarkAsNonDeferredCalling() { SetFlag(kNonDeferredCalling); }
-
- bool is_non_deferred_calling() const { return GetFlag(kNonDeferredCalling); }
-
- void MarkAsSavesCallerDoubles() { SetFlag(kSavesCallerDoubles); }
-
- bool saves_caller_doubles() const { return GetFlag(kSavesCallerDoubles); }
-
- void MarkAsRequiresFrame() { SetFlag(kRequiresFrame); }
-
- bool requires_frame() const { return GetFlag(kRequiresFrame); }
-
- void MarkMustNotHaveEagerFrame() { SetFlag(kMustNotHaveEagerFrame); }
-
- bool GetMustNotHaveEagerFrame() const {
- return GetFlag(kMustNotHaveEagerFrame);
- }
-
- // Compiles marked as debug produce unoptimized code with debug break slots.
- // Inner functions that cannot be compiled w/o context are compiled eagerly.
- // Always include deoptimization support to avoid having to recompile again.
- void MarkAsDebug() {
- SetFlag(kDebug);
- SetFlag(kDeoptimizationSupport);
- }
-
- bool is_debug() const { return GetFlag(kDebug); }
-
- void PrepareForSerializing() { SetFlag(kSerializing); }
-
- bool will_serialize() const { return GetFlag(kSerializing); }
-
- void MarkAsFunctionContextSpecializing() {
- SetFlag(kFunctionContextSpecializing);
- }
-
- bool is_function_context_specializing() const {
- return GetFlag(kFunctionContextSpecializing);
- }
-
- void MarkAsFrameSpecializing() { SetFlag(kFrameSpecializing); }
-
- bool is_frame_specializing() const { return GetFlag(kFrameSpecializing); }
-
- void MarkAsNativeContextSpecializing() {
- SetFlag(kNativeContextSpecializing);
- }
-
- bool is_native_context_specializing() const {
- return GetFlag(kNativeContextSpecializing);
- }
-
- void MarkAsDeoptimizationEnabled() { SetFlag(kDeoptimizationEnabled); }
-
- bool is_deoptimization_enabled() const {
- return GetFlag(kDeoptimizationEnabled);
- }
-
- void MarkAsTypeFeedbackEnabled() { SetFlag(kTypeFeedbackEnabled); }
-
- bool is_type_feedback_enabled() const {
- return GetFlag(kTypeFeedbackEnabled);
- }
-
- void MarkAsAccessorInliningEnabled() { SetFlag(kAccessorInliningEnabled); }
-
- bool is_accessor_inlining_enabled() const {
- return GetFlag(kAccessorInliningEnabled);
- }
-
- void MarkAsSourcePositionsEnabled() { SetFlag(kSourcePositionsEnabled); }
-
- bool is_source_positions_enabled() const {
- return GetFlag(kSourcePositionsEnabled);
- }
-
- void MarkAsInliningEnabled() { SetFlag(kInliningEnabled); }
-
- bool is_inlining_enabled() const { return GetFlag(kInliningEnabled); }
-
- void MarkAsSplittingEnabled() { SetFlag(kSplittingEnabled); }
-
- bool is_splitting_enabled() const { return GetFlag(kSplittingEnabled); }
-
- void MarkAsBailoutOnUninitialized() { SetFlag(kBailoutOnUninitialized); }
-
- bool is_bailout_on_uninitialized() const {
- return GetFlag(kBailoutOnUninitialized);
- }
-
- void MarkAsOptimizeFromBytecode() { SetFlag(kOptimizeFromBytecode); }
-
- bool is_optimizing_from_bytecode() const {
- return GetFlag(kOptimizeFromBytecode);
- }
-
- bool GeneratePreagedPrologue() const {
- // Generate a pre-aged prologue if we are optimizing for size, which
- // will make code flushing more aggressive. Only apply to Code::FUNCTION,
- // since StaticMarkingVisitor::IsFlushable only flushes proper functions.
- return FLAG_optimize_for_size && FLAG_age_code && !is_debug() &&
- output_code_kind() == Code::FUNCTION;
- }
-
- void SetCode(Handle<Code> code) { code_ = code; }
-
- void SetBytecodeArray(Handle<BytecodeArray> bytecode_array) {
- bytecode_array_ = bytecode_array;
- }
-
- bool ShouldTrapOnDeopt() const {
- return (FLAG_trap_on_deopt && IsOptimizing()) ||
- (FLAG_trap_on_stub_deopt && IsStub());
- }
-
- bool has_native_context() const {
- return !closure().is_null() && (closure()->native_context() != nullptr);
- }
-
- Context* native_context() const {
- return has_native_context() ? closure()->native_context() : nullptr;
- }
-
- bool has_global_object() const { return has_native_context(); }
-
- JSGlobalObject* global_object() const {
- return has_global_object() ? native_context()->global_object() : nullptr;
- }
-
- // Accessors for the different compilation modes.
- bool IsOptimizing() const { return mode_ == OPTIMIZE; }
- bool IsStub() const { return mode_ == STUB; }
- void SetOptimizing() {
- DCHECK(has_shared_info());
- SetMode(OPTIMIZE);
- optimization_id_ = isolate()->NextOptimizationId();
- code_flags_ =
- Code::KindField::update(code_flags_, Code::OPTIMIZED_FUNCTION);
- }
- void SetOptimizingForOsr(BailoutId osr_ast_id, JavaScriptFrame* osr_frame) {
- SetOptimizing();
- osr_ast_id_ = osr_ast_id;
- osr_frame_ = osr_frame;
- }
-
- // Deoptimization support.
- bool HasDeoptimizationSupport() const {
- return GetFlag(kDeoptimizationSupport);
- }
- void EnableDeoptimizationSupport() {
- DCHECK_EQ(BASE, mode_);
- SetFlag(kDeoptimizationSupport);
- }
- bool ShouldEnsureSpaceForLazyDeopt() { return !IsStub(); }
-
- bool ExpectsJSReceiverAsReceiver();
-
- // Determines whether or not to insert a self-optimization header.
- bool ShouldSelfOptimize();
-
- void set_deferred_handles(DeferredHandles* deferred_handles) {
- DCHECK(deferred_handles_ == NULL);
- deferred_handles_ = deferred_handles;
- }
-
- void ReopenHandlesInNewHandleScope() {
- closure_ = Handle<JSFunction>(*closure_);
- }
-
- void AbortOptimization(BailoutReason reason) {
- DCHECK(reason != kNoReason);
- if (bailout_reason_ == kNoReason) bailout_reason_ = reason;
- SetFlag(kDisableFutureOptimization);
- }
-
- void RetryOptimization(BailoutReason reason) {
- DCHECK(reason != kNoReason);
- if (GetFlag(kDisableFutureOptimization)) return;
- bailout_reason_ = reason;
- }
-
- BailoutReason bailout_reason() const { return bailout_reason_; }
-
- int prologue_offset() const {
- DCHECK_NE(Code::kPrologueOffsetNotSet, prologue_offset_);
- return prologue_offset_;
- }
-
- void set_prologue_offset(int prologue_offset) {
- DCHECK_EQ(Code::kPrologueOffsetNotSet, prologue_offset_);
- prologue_offset_ = prologue_offset;
- }
-
- CompilationDependencies* dependencies() { return &dependencies_; }
-
- int optimization_id() const { return optimization_id_; }
-
- int osr_expr_stack_height() { return osr_expr_stack_height_; }
- void set_osr_expr_stack_height(int height) {
- DCHECK(height >= 0);
- osr_expr_stack_height_ = height;
- }
-
- bool has_simple_parameters();
-
- struct InlinedFunctionHolder {
- Handle<SharedFunctionInfo> shared_info;
-
- // Root that holds the unoptimized code of the inlined function alive
- // (and out of reach of code flushing) until we finish compilation.
- // Do not remove.
- Handle<Code> inlined_code_object_root;
-
- explicit InlinedFunctionHolder(
- Handle<SharedFunctionInfo> inlined_shared_info)
- : shared_info(inlined_shared_info),
- inlined_code_object_root(inlined_shared_info->code()) {}
- };
-
- typedef std::vector<InlinedFunctionHolder> InlinedFunctionList;
- InlinedFunctionList const& inlined_functions() const {
- return inlined_functions_;
- }
-
- void AddInlinedFunction(Handle<SharedFunctionInfo> inlined_function) {
- inlined_functions_.push_back(InlinedFunctionHolder(inlined_function));
- }
-
- std::unique_ptr<char[]> GetDebugName() const;
-
- Code::Kind output_code_kind() const {
- return Code::ExtractKindFromFlags(code_flags_);
- }
-
- StackFrame::Type GetOutputStackFrameType() const;
-
- int GetDeclareGlobalsFlags() const;
-
- SourcePositionTableBuilder::RecordingMode SourcePositionRecordingMode() const;
-
- private:
- // Compilation mode.
- // BASE is generated by the full codegen, optionally prepared for bailouts.
- // OPTIMIZE is optimized code generated by the Hydrogen-based backend.
- enum Mode {
- BASE,
- OPTIMIZE,
- STUB
- };
-
- CompilationInfo(ParseInfo* parse_info, Vector<const char> debug_name,
- Code::Flags code_flags, Mode mode, Isolate* isolate,
- Zone* zone);
-
- ParseInfo* parse_info_;
- Isolate* isolate_;
-
- void SetMode(Mode mode) {
- mode_ = mode;
- }
-
- void SetFlag(Flag flag) { flags_ |= flag; }
-
- void SetFlag(Flag flag, bool value) {
- flags_ = value ? flags_ | flag : flags_ & ~flag;
- }
-
- bool GetFlag(Flag flag) const { return (flags_ & flag) != 0; }
-
- unsigned flags_;
-
- Code::Flags code_flags_;
-
- Handle<JSFunction> closure_;
-
- // The compiled code.
- Handle<Code> code_;
-
- // Compilation mode flag and whether deoptimization is allowed.
- Mode mode_;
- BailoutId osr_ast_id_;
-
- // Holds the bytecode array generated by the interpreter.
- // TODO(rmcilroy/mstarzinger): Temporary work-around until compiler.cc is
- // refactored to avoid us needing to carry the BytcodeArray around.
- Handle<BytecodeArray> bytecode_array_;
-
- // The zone from which the compilation pipeline working on this
- // CompilationInfo allocates.
- Zone* zone_;
-
- DeferredHandles* deferred_handles_;
-
- // Dependencies for this compilation, e.g. stable maps.
- CompilationDependencies dependencies_;
-
- BailoutReason bailout_reason_;
-
- int prologue_offset_;
-
- bool track_positions_;
-
- InlinedFunctionList inlined_functions_;
-
- // Number of parameters used for compilation of stubs that require arguments.
- int parameter_count_;
-
- int optimization_id_;
-
- int osr_expr_stack_height_;
-
- // The current OSR frame for specialization or {nullptr}.
- JavaScriptFrame* osr_frame_ = nullptr;
-
- Vector<const char> debug_name_;
-
- DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
-};
-
// A base class for compilation jobs intended to run concurrent to the main
// thread. The job is split into three phases which are called in sequence on
// different threads and with different limitations:
@@ -557,15 +157,20 @@ class CompilationJob {
kFailed,
};
- explicit CompilationJob(CompilationInfo* info, const char* compiler_name,
- State initial_state = State::kReadyToPrepare)
- : info_(info), compiler_name_(compiler_name), state_(initial_state) {}
+ CompilationJob(Isolate* isolate, CompilationInfo* info,
+ const char* compiler_name,
+ State initial_state = State::kReadyToPrepare)
+ : info_(info),
+ compiler_name_(compiler_name),
+ state_(initial_state),
+ stack_limit_(isolate->stack_guard()->real_climit()) {}
virtual ~CompilationJob() {}
// Prepare the compile job. Must be called on the main thread.
MUST_USE_RESULT Status PrepareJob();
- // Executes the compile job. Can be called off the main thread.
+ // Executes the compile job. Can be called on a background thread if
+ // can_execute_on_background_thread() returns true.
MUST_USE_RESULT Status ExecuteJob();
// Finalizes the compile job. Must be called on the main thread.
@@ -573,27 +178,23 @@ class CompilationJob {
// Report a transient failure, try again next time. Should only be called on
// optimization compilation jobs.
- Status RetryOptimization(BailoutReason reason) {
- DCHECK(info_->IsOptimizing());
- info_->RetryOptimization(reason);
- state_ = State::kFailed;
- return FAILED;
- }
+ Status RetryOptimization(BailoutReason reason);
// Report a persistent failure, disable future optimization on the function.
// Should only be called on optimization compilation jobs.
- Status AbortOptimization(BailoutReason reason) {
- DCHECK(info_->IsOptimizing());
- info_->AbortOptimization(reason);
- state_ = State::kFailed;
- return FAILED;
- }
+ Status AbortOptimization(BailoutReason reason);
+
+ void RecordOptimizedCompilationStats() const;
+ void RecordUnoptimizedCompilationStats() const;
+
+ virtual bool can_execute_on_background_thread() const { return true; }
- void RecordOptimizationStats();
+ void set_stack_limit(uintptr_t stack_limit) { stack_limit_ = stack_limit; }
+ uintptr_t stack_limit() const { return stack_limit_; }
State state() const { return state_; }
CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info()->isolate(); }
+ Isolate* isolate() const;
protected:
// Overridden by the actual implementation.
@@ -612,6 +213,7 @@ class CompilationJob {
base::TimeDelta time_taken_to_finalize_;
const char* compiler_name_;
State state_;
+ uintptr_t stack_limit_;
MUST_USE_RESULT Status UpdateState(Status status, State next_state) {
if (status == SUCCEEDED) {
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index c43a53fba7..530143440d 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -4,21 +4,29 @@
#include "src/compiler/access-builder.h"
+#include "src/compiler/type-cache.h"
#include "src/contexts.h"
#include "src/frames.h"
#include "src/handles-inl.h"
#include "src/heap/heap.h"
-#include "src/type-cache.h"
namespace v8 {
namespace internal {
namespace compiler {
// static
+FieldAccess AccessBuilder::ForExternalDoubleValue() {
+ FieldAccess access = {kUntaggedBase, 0,
+ MaybeHandle<Name>(), Type::Number(),
+ MachineType::Float64(), kNoWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForMap() {
FieldAccess access = {
- kTaggedBase, HeapObject::kMapOffset, MaybeHandle<Name>(),
- Type::OtherInternal(), MachineType::AnyTagged(), kMapWriteBarrier};
+ kTaggedBase, HeapObject::kMapOffset, MaybeHandle<Name>(),
+ Type::OtherInternal(), MachineType::TaggedPointer(), kMapWriteBarrier};
return access;
}
@@ -38,8 +46,8 @@ FieldAccess AccessBuilder::ForHeapNumberValue() {
// static
FieldAccess AccessBuilder::ForJSObjectProperties() {
FieldAccess access = {
- kTaggedBase, JSObject::kPropertiesOffset, MaybeHandle<Name>(),
- Type::Internal(), MachineType::AnyTagged(), kPointerWriteBarrier};
+ kTaggedBase, JSObject::kPropertiesOffset, MaybeHandle<Name>(),
+ Type::Internal(), MachineType::TaggedPointer(), kPointerWriteBarrier};
return access;
}
@@ -47,8 +55,8 @@ FieldAccess AccessBuilder::ForJSObjectProperties() {
// static
FieldAccess AccessBuilder::ForJSObjectElements() {
FieldAccess access = {
- kTaggedBase, JSObject::kElementsOffset, MaybeHandle<Name>(),
- Type::Internal(), MachineType::AnyTagged(), kPointerWriteBarrier};
+ kTaggedBase, JSObject::kElementsOffset, MaybeHandle<Name>(),
+ Type::Internal(), MachineType::TaggedPointer(), kPointerWriteBarrier};
return access;
}
@@ -60,7 +68,7 @@ FieldAccess AccessBuilder::ForJSObjectInObjectProperty(Handle<Map> map,
FieldAccess access = {kTaggedBase,
offset,
MaybeHandle<Name>(),
- Type::Tagged(),
+ Type::NonInternal(),
MachineType::AnyTagged(),
kFullWriteBarrier};
return access;
@@ -93,7 +101,7 @@ FieldAccess AccessBuilder::ForJSFunctionSharedFunctionInfo() {
JSFunction::kSharedFunctionInfoOffset,
Handle<Name>(),
Type::OtherInternal(),
- MachineType::AnyTagged(),
+ MachineType::TaggedPointer(),
kPointerWriteBarrier};
return access;
}
@@ -101,19 +109,16 @@ FieldAccess AccessBuilder::ForJSFunctionSharedFunctionInfo() {
// static
FieldAccess AccessBuilder::ForJSFunctionLiterals() {
FieldAccess access = {
- kTaggedBase, JSFunction::kLiteralsOffset, Handle<Name>(),
- Type::Internal(), MachineType::AnyTagged(), kPointerWriteBarrier};
+ kTaggedBase, JSFunction::kLiteralsOffset, Handle<Name>(),
+ Type::Internal(), MachineType::TaggedPointer(), kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSFunctionCodeEntry() {
- FieldAccess access = {kTaggedBase,
- JSFunction::kCodeEntryOffset,
- Handle<Name>(),
- Type::UntaggedPointer(),
- MachineType::Pointer(),
- kNoWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSFunction::kCodeEntryOffset, Handle<Name>(),
+ Type::OtherInternal(), MachineType::Pointer(), kNoWriteBarrier};
return access;
}
@@ -134,7 +139,7 @@ FieldAccess AccessBuilder::ForJSGeneratorObjectContext() {
JSGeneratorObject::kContextOffset,
Handle<Name>(),
Type::Internal(),
- MachineType::AnyTagged(),
+ MachineType::TaggedPointer(),
kPointerWriteBarrier};
return access;
}
@@ -146,7 +151,7 @@ FieldAccess AccessBuilder::ForJSGeneratorObjectContinuation() {
JSGeneratorObject::kContinuationOffset,
Handle<Name>(),
type_cache.kSmi,
- MachineType::AnyTagged(),
+ MachineType::TaggedSigned(),
kNoWriteBarrier};
return access;
}
@@ -176,12 +181,9 @@ FieldAccess AccessBuilder::ForJSGeneratorObjectOperandStack() {
// static
FieldAccess AccessBuilder::ForJSGeneratorObjectResumeMode() {
TypeCache const& type_cache = TypeCache::Get();
- FieldAccess access = {kTaggedBase,
- JSGeneratorObject::kResumeModeOffset,
- Handle<Name>(),
- type_cache.kSmi,
- MachineType::AnyTagged(),
- kNoWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSGeneratorObject::kResumeModeOffset, Handle<Name>(),
+ type_cache.kSmi, MachineType::TaggedSigned(), kNoWriteBarrier};
return access;
}
@@ -192,7 +194,7 @@ FieldAccess AccessBuilder::ForJSArrayLength(ElementsKind elements_kind) {
JSArray::kLengthOffset,
Handle<Name>(),
type_cache.kJSArrayLengthType,
- MachineType::AnyTagged(),
+ MachineType::TaggedSigned(),
kFullWriteBarrier};
if (IsFastDoubleElementsKind(elements_kind)) {
access.type = type_cache.kFixedDoubleArrayLengthType;
@@ -210,7 +212,7 @@ FieldAccess AccessBuilder::ForJSArrayBufferBackingStore() {
FieldAccess access = {kTaggedBase,
JSArrayBuffer::kBackingStoreOffset,
MaybeHandle<Name>(),
- Type::UntaggedPointer(),
+ Type::OtherInternal(),
MachineType::Pointer(),
kNoWriteBarrier};
return access;
@@ -229,8 +231,8 @@ FieldAccess AccessBuilder::ForJSArrayBufferViewBuffer() {
FieldAccess access = {kTaggedBase,
JSArrayBufferView::kBufferOffset,
MaybeHandle<Name>(),
- Type::TaggedPointer(),
- MachineType::AnyTagged(),
+ Type::OtherInternal(),
+ MachineType::TaggedPointer(),
kPointerWriteBarrier};
return access;
}
@@ -263,12 +265,23 @@ FieldAccess AccessBuilder::ForJSTypedArrayLength() {
JSTypedArray::kLengthOffset,
MaybeHandle<Name>(),
TypeCache::Get().kJSTypedArrayLengthType,
- MachineType::AnyTagged(),
+ MachineType::TaggedSigned(),
kNoWriteBarrier};
return access;
}
// static
+FieldAccess AccessBuilder::ForJSDateValue() {
+ FieldAccess access = {kTaggedBase,
+ JSDate::kValueOffset,
+ MaybeHandle<Name>(),
+ TypeCache::Get().kJSDateValueType,
+ MachineType::AnyTagged(),
+ kFullWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForJSDateField(JSDate::FieldIndex index) {
FieldAccess access = {kTaggedBase,
JSDate::kValueOffset + index * kPointerSize,
@@ -301,8 +314,8 @@ FieldAccess AccessBuilder::ForJSIteratorResultValue() {
// static
FieldAccess AccessBuilder::ForJSRegExpFlags() {
FieldAccess access = {
- kTaggedBase, JSRegExp::kFlagsOffset, MaybeHandle<Name>(),
- Type::Tagged(), MachineType::AnyTagged(), kFullWriteBarrier};
+ kTaggedBase, JSRegExp::kFlagsOffset, MaybeHandle<Name>(),
+ Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
return access;
}
@@ -310,8 +323,8 @@ FieldAccess AccessBuilder::ForJSRegExpFlags() {
// static
FieldAccess AccessBuilder::ForJSRegExpSource() {
FieldAccess access = {
- kTaggedBase, JSRegExp::kSourceOffset, MaybeHandle<Name>(),
- Type::Tagged(), MachineType::AnyTagged(), kFullWriteBarrier};
+ kTaggedBase, JSRegExp::kSourceOffset, MaybeHandle<Name>(),
+ Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
return access;
}
@@ -322,7 +335,7 @@ FieldAccess AccessBuilder::ForFixedArrayLength() {
FixedArray::kLengthOffset,
MaybeHandle<Name>(),
TypeCache::Get().kFixedArrayLengthType,
- MachineType::AnyTagged(),
+ MachineType::TaggedSigned(),
kNoWriteBarrier};
return access;
}
@@ -332,7 +345,7 @@ FieldAccess AccessBuilder::ForFixedTypedArrayBaseBasePointer() {
FieldAccess access = {kTaggedBase,
FixedTypedArrayBase::kBasePointerOffset,
MaybeHandle<Name>(),
- Type::Tagged(),
+ Type::OtherInternal(),
MachineType::AnyTagged(),
kPointerWriteBarrier};
return access;
@@ -343,7 +356,7 @@ FieldAccess AccessBuilder::ForFixedTypedArrayBaseExternalPointer() {
FieldAccess access = {kTaggedBase,
FixedTypedArrayBase::kExternalPointerOffset,
MaybeHandle<Name>(),
- Type::UntaggedPointer(),
+ Type::OtherInternal(),
MachineType::Pointer(),
kNoWriteBarrier};
return access;
@@ -354,8 +367,8 @@ FieldAccess AccessBuilder::ForDescriptorArrayEnumCache() {
FieldAccess access = {kTaggedBase,
DescriptorArray::kEnumCacheOffset,
Handle<Name>(),
- Type::TaggedPointer(),
- MachineType::AnyTagged(),
+ Type::OtherInternal(),
+ MachineType::TaggedPointer(),
kPointerWriteBarrier};
return access;
}
@@ -366,8 +379,8 @@ FieldAccess AccessBuilder::ForDescriptorArrayEnumCacheBridgeCache() {
FieldAccess access = {kTaggedBase,
DescriptorArray::kEnumCacheBridgeCacheOffset,
Handle<Name>(),
- Type::TaggedPointer(),
- MachineType::AnyTagged(),
+ Type::OtherInternal(),
+ MachineType::TaggedPointer(),
kPointerWriteBarrier};
return access;
}
@@ -393,9 +406,12 @@ FieldAccess AccessBuilder::ForMapBitField3() {
// static
FieldAccess AccessBuilder::ForMapDescriptors() {
- FieldAccess access = {
- kTaggedBase, Map::kDescriptorsOffset, Handle<Name>(),
- Type::TaggedPointer(), MachineType::AnyTagged(), kPointerWriteBarrier};
+ FieldAccess access = {kTaggedBase,
+ Map::kDescriptorsOffset,
+ Handle<Name>(),
+ Type::OtherInternal(),
+ MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
@@ -412,8 +428,8 @@ FieldAccess AccessBuilder::ForMapInstanceType() {
// static
FieldAccess AccessBuilder::ForMapPrototype() {
FieldAccess access = {
- kTaggedBase, Map::kPrototypeOffset, Handle<Name>(),
- Type::TaggedPointer(), MachineType::AnyTagged(), kPointerWriteBarrier};
+ kTaggedBase, Map::kPrototypeOffset, Handle<Name>(),
+ Type::Any(), MachineType::TaggedPointer(), kPointerWriteBarrier};
return access;
}
@@ -432,7 +448,7 @@ FieldAccess AccessBuilder::ForStringLength() {
String::kLengthOffset,
Handle<Name>(),
TypeCache::Get().kStringLengthType,
- MachineType::AnyTagged(),
+ MachineType::TaggedSigned(),
kNoWriteBarrier};
return access;
}
@@ -440,16 +456,16 @@ FieldAccess AccessBuilder::ForStringLength() {
// static
FieldAccess AccessBuilder::ForConsStringFirst() {
FieldAccess access = {
- kTaggedBase, ConsString::kFirstOffset, Handle<Name>(),
- Type::String(), MachineType::AnyTagged(), kPointerWriteBarrier};
+ kTaggedBase, ConsString::kFirstOffset, Handle<Name>(),
+ Type::String(), MachineType::TaggedPointer(), kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForConsStringSecond() {
FieldAccess access = {
- kTaggedBase, ConsString::kSecondOffset, Handle<Name>(),
- Type::String(), MachineType::AnyTagged(), kPointerWriteBarrier};
+ kTaggedBase, ConsString::kSecondOffset, Handle<Name>(),
+ Type::String(), MachineType::TaggedPointer(), kPointerWriteBarrier};
return access;
}
@@ -457,15 +473,15 @@ FieldAccess AccessBuilder::ForConsStringSecond() {
FieldAccess AccessBuilder::ForSlicedStringOffset() {
FieldAccess access = {
kTaggedBase, SlicedString::kOffsetOffset, Handle<Name>(),
- Type::SignedSmall(), MachineType::AnyTagged(), kNoWriteBarrier};
+ Type::SignedSmall(), MachineType::TaggedSigned(), kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForSlicedStringParent() {
FieldAccess access = {
- kTaggedBase, SlicedString::kParentOffset, Handle<Name>(),
- Type::String(), MachineType::AnyTagged(), kPointerWriteBarrier};
+ kTaggedBase, SlicedString::kParentOffset, Handle<Name>(),
+ Type::String(), MachineType::TaggedPointer(), kPointerWriteBarrier};
return access;
}
@@ -474,7 +490,7 @@ FieldAccess AccessBuilder::ForExternalStringResourceData() {
FieldAccess access = {kTaggedBase,
ExternalString::kResourceDataOffset,
Handle<Name>(),
- Type::UntaggedPointer(),
+ Type::OtherInternal(),
MachineType::Pointer(),
kNoWriteBarrier};
return access;
@@ -516,7 +532,7 @@ FieldAccess AccessBuilder::ForJSGlobalObjectGlobalProxy() {
JSGlobalObject::kGlobalProxyOffset,
Handle<Name>(),
Type::Receiver(),
- MachineType::AnyTagged(),
+ MachineType::TaggedPointer(),
kPointerWriteBarrier};
return access;
}
@@ -527,11 +543,29 @@ FieldAccess AccessBuilder::ForJSGlobalObjectNativeContext() {
JSGlobalObject::kNativeContextOffset,
Handle<Name>(),
Type::Internal(),
- MachineType::AnyTagged(),
+ MachineType::TaggedPointer(),
kPointerWriteBarrier};
return access;
}
+// static
+FieldAccess AccessBuilder::ForJSStringIteratorString() {
+ FieldAccess access = {
+ kTaggedBase, JSStringIterator::kStringOffset, Handle<Name>(),
+ Type::String(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSStringIteratorIndex() {
+ FieldAccess access = {kTaggedBase,
+ JSStringIterator::kNextIndexOffset,
+ Handle<Name>(),
+ TypeCache::Get().kStringLengthType,
+ MachineType::TaggedSigned(),
+ kNoWriteBarrier};
+ return access;
+}
// static
FieldAccess AccessBuilder::ForValue() {
@@ -590,24 +624,28 @@ FieldAccess AccessBuilder::ForContextSlot(size_t index) {
return access;
}
-
// static
-FieldAccess AccessBuilder::ForPropertyCellValue() {
- return ForPropertyCellValue(Type::Tagged());
+FieldAccess AccessBuilder::ForContextExtensionScopeInfo() {
+ FieldAccess access = {kTaggedBase,
+ ContextExtension::kScopeInfoOffset,
+ Handle<Name>(),
+ Type::OtherInternal(),
+ MachineType::AnyTagged(),
+ kFullWriteBarrier};
+ return access;
}
-
// static
-FieldAccess AccessBuilder::ForPropertyCellValue(Type* type) {
+FieldAccess AccessBuilder::ForContextExtensionExtension() {
FieldAccess access = {
- kTaggedBase, PropertyCell::kValueOffset, Handle<Name>(),
- type, MachineType::AnyTagged(), kFullWriteBarrier};
+ kTaggedBase, ContextExtension::kExtensionOffset, Handle<Name>(),
+ Type::Any(), MachineType::AnyTagged(), kFullWriteBarrier};
return access;
}
// static
ElementAccess AccessBuilder::ForFixedArrayElement() {
- ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Tagged(),
+ ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
MachineType::AnyTagged(), kFullWriteBarrier};
return access;
}
@@ -619,6 +657,7 @@ ElementAccess AccessBuilder::ForFixedArrayElement(ElementsKind kind) {
switch (kind) {
case FAST_SMI_ELEMENTS:
access.type = TypeCache::Get().kSmi;
+ access.machine_type = MachineType::TaggedSigned();
access.write_barrier_kind = kNoWriteBarrier;
break;
case FAST_HOLEY_SMI_ELEMENTS:
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index caaf8f8c06..96f3200ee2 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -18,6 +18,12 @@ namespace compiler {
class AccessBuilder final : public AllStatic {
public:
// ===========================================================================
+ // Access to external values (based on external references).
+
+ // Provides access to a double field identified by an external reference.
+ static FieldAccess ForExternalDoubleValue();
+
+ // ===========================================================================
// Access to heap object fields and elements (based on tagged pointer).
// Provides access to HeapObject::map() field.
@@ -89,6 +95,9 @@ class AccessBuilder final : public AllStatic {
// Provides access to JSTypedArray::length() field.
static FieldAccess ForJSTypedArrayLength();
+ // Provides access to JSDate::value() field.
+ static FieldAccess ForJSDateValue();
+
// Provides access to JSDate fields.
static FieldAccess ForJSDateField(JSDate::FieldIndex index);
@@ -173,6 +182,12 @@ class AccessBuilder final : public AllStatic {
// Provides access to JSGlobalObject::native_context() field.
static FieldAccess ForJSGlobalObjectNativeContext();
+ // Provides access to JSStringIterator::string() field.
+ static FieldAccess ForJSStringIteratorString();
+
+ // Provides access to JSStringIterator::index() field.
+ static FieldAccess ForJSStringIteratorIndex();
+
// Provides access to JSValue::value() field.
static FieldAccess ForValue();
@@ -186,9 +201,9 @@ class AccessBuilder final : public AllStatic {
// Provides access to Context slots.
static FieldAccess ForContextSlot(size_t index);
- // Provides access to PropertyCell::value() field.
- static FieldAccess ForPropertyCellValue();
- static FieldAccess ForPropertyCellValue(Type* type);
+ // Provides access to ContextExtension fields.
+ static FieldAccess ForContextExtensionScopeInfo();
+ static FieldAccess ForContextExtensionExtension();
// Provides access to FixedArray elements.
static ElementAccess ForFixedArrayElement();
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 97de25bd4c..329cb93fe5 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -7,10 +7,10 @@
#include "src/accessors.h"
#include "src/compilation-dependencies.h"
#include "src/compiler/access-info.h"
+#include "src/compiler/type-cache.h"
#include "src/field-index-inl.h"
#include "src/field-type.h"
#include "src/objects-inl.h"
-#include "src/type-cache.h"
namespace v8 {
namespace internal {
@@ -79,9 +79,12 @@ PropertyAccessInfo PropertyAccessInfo::DataConstant(
// static
PropertyAccessInfo PropertyAccessInfo::DataField(
- MapList const& receiver_maps, FieldIndex field_index, Type* field_type,
- MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map) {
- return PropertyAccessInfo(holder, transition_map, field_index, field_type,
+ MapList const& receiver_maps, FieldIndex field_index,
+ MachineRepresentation field_representation, Type* field_type,
+ MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
+ MaybeHandle<Map> transition_map) {
+ return PropertyAccessInfo(holder, transition_map, field_index,
+ field_representation, field_type, field_map,
receiver_maps);
}
@@ -93,13 +96,16 @@ PropertyAccessInfo PropertyAccessInfo::AccessorConstant(
}
PropertyAccessInfo::PropertyAccessInfo()
- : kind_(kInvalid), field_type_(Type::None()) {}
+ : kind_(kInvalid),
+ field_representation_(MachineRepresentation::kNone),
+ field_type_(Type::None()) {}
PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
MapList const& receiver_maps)
: kind_(kNotFound),
receiver_maps_(receiver_maps),
holder_(holder),
+ field_representation_(MachineRepresentation::kNone),
field_type_(Type::None()) {}
PropertyAccessInfo::PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
@@ -109,18 +115,21 @@ PropertyAccessInfo::PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
receiver_maps_(receiver_maps),
constant_(constant),
holder_(holder),
+ field_representation_(MachineRepresentation::kNone),
field_type_(Type::Any()) {}
-PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
- MaybeHandle<Map> transition_map,
- FieldIndex field_index, Type* field_type,
- MapList const& receiver_maps)
+PropertyAccessInfo::PropertyAccessInfo(
+ MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map,
+ FieldIndex field_index, MachineRepresentation field_representation,
+ Type* field_type, MaybeHandle<Map> field_map, MapList const& receiver_maps)
: kind_(kDataField),
receiver_maps_(receiver_maps),
transition_map_(transition_map),
holder_(holder),
field_index_(field_index),
- field_type_(field_type) {}
+ field_representation_(field_representation),
+ field_type_(field_type),
+ field_map_(field_map) {}
bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that) {
if (this->kind_ != that->kind_) return false;
@@ -138,7 +147,8 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that) {
if (this->transition_map_.address() == that->transition_map_.address() &&
this->field_index_ == that->field_index_ &&
this->field_type_->Is(that->field_type_) &&
- that->field_type_->Is(this->field_type_)) {
+ that->field_type_->Is(this->field_type_) &&
+ this->field_representation_ == that->field_representation_) {
this->receiver_maps_.insert(this->receiver_maps_.end(),
that->receiver_maps_.begin(),
that->receiver_maps_.end());
@@ -283,41 +293,45 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
}
case DATA: {
int index = descriptors->GetFieldIndex(number);
- Representation field_representation = details.representation();
+ Representation details_representation = details.representation();
FieldIndex field_index = FieldIndex::ForPropertyIndex(
- *map, index, field_representation.IsDouble());
- Type* field_type = Type::Tagged();
- if (field_representation.IsSmi()) {
+ *map, index, details_representation.IsDouble());
+ Type* field_type = Type::NonInternal();
+ MachineRepresentation field_representation =
+ MachineRepresentation::kTagged;
+ MaybeHandle<Map> field_map;
+ if (details_representation.IsSmi()) {
field_type = type_cache_.kSmi;
- } else if (field_representation.IsDouble()) {
+ field_representation = MachineRepresentation::kTaggedSigned;
+ } else if (details_representation.IsDouble()) {
field_type = type_cache_.kFloat64;
- } else if (field_representation.IsHeapObject()) {
+ field_representation = MachineRepresentation::kFloat64;
+ } else if (details_representation.IsHeapObject()) {
// Extract the field type from the property details (make sure its
// representation is TaggedPointer to reflect the heap object case).
- field_type = Type::Intersect(
- descriptors->GetFieldType(number)->Convert(zone()),
- Type::TaggedPointer(), zone());
- if (field_type->Is(Type::None())) {
+ field_representation = MachineRepresentation::kTaggedPointer;
+ Handle<FieldType> descriptors_field_type(
+ descriptors->GetFieldType(number), isolate());
+ if (descriptors_field_type->IsNone()) {
// Store is not safe if the field type was cleared.
if (access_mode == AccessMode::kStore) return false;
// The field type was cleared by the GC, so we don't know anything
// about the contents now.
- // TODO(bmeurer): It would be awesome to make this saner in the
- // runtime/GC interaction.
- field_type = Type::TaggedPointer();
- } else if (!Type::Any()->Is(field_type)) {
+ } else if (descriptors_field_type->IsClass()) {
// Add proper code dependencies in case of stable field map(s).
Handle<Map> field_owner_map(map->FindFieldOwner(number),
isolate());
dependencies()->AssumeFieldType(field_owner_map);
- }
- if (access_mode == AccessMode::kLoad) {
- field_type = Type::Any();
+
+ // Remember the field map, and try to infer a useful type.
+ field_type = Type::For(descriptors_field_type->AsClass());
+ field_map = descriptors_field_type->AsClass();
}
}
*access_info = PropertyAccessInfo::DataField(
- MapList{receiver_map}, field_index, field_type, holder);
+ MapList{receiver_map}, field_index, field_representation,
+ field_type, field_map, holder);
return true;
}
case ACCESSOR_CONSTANT: {
@@ -423,12 +437,14 @@ bool AccessInfoFactory::LookupSpecialFieldAccessor(
int offset;
if (Accessors::IsJSObjectFieldAccessor(map, name, &offset)) {
FieldIndex field_index = FieldIndex::ForInObjectOffset(offset);
- Type* field_type = Type::Tagged();
+ Type* field_type = Type::NonInternal();
+ MachineRepresentation field_representation = MachineRepresentation::kTagged;
if (map->IsStringMap()) {
DCHECK(Name::Equals(factory()->length_string(), name));
// The String::length property is always a smi in the range
// [0, String::kMaxLength].
field_type = type_cache_.kStringLengthType;
+ field_representation = MachineRepresentation::kTaggedSigned;
} else if (map->IsJSArrayMap()) {
DCHECK(Name::Equals(factory()->length_string(), name));
// The JSArray::length property is a smi in the range
@@ -438,14 +454,16 @@ bool AccessInfoFactory::LookupSpecialFieldAccessor(
// case of other arrays.
if (IsFastDoubleElementsKind(map->elements_kind())) {
field_type = type_cache_.kFixedDoubleArrayLengthType;
+ field_representation = MachineRepresentation::kTaggedSigned;
} else if (IsFastElementsKind(map->elements_kind())) {
field_type = type_cache_.kFixedArrayLengthType;
+ field_representation = MachineRepresentation::kTaggedSigned;
} else {
field_type = type_cache_.kJSArrayLengthType;
}
}
- *access_info =
- PropertyAccessInfo::DataField(MapList{map}, field_index, field_type);
+ *access_info = PropertyAccessInfo::DataField(
+ MapList{map}, field_index, field_representation, field_type);
return true;
}
return false;
@@ -468,35 +486,43 @@ bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
// TODO(bmeurer): Handle transition to data constant?
if (details.type() != DATA) return false;
int const index = details.field_index();
- Representation field_representation = details.representation();
+ Representation details_representation = details.representation();
FieldIndex field_index = FieldIndex::ForPropertyIndex(
- *transition_map, index, field_representation.IsDouble());
- Type* field_type = Type::Tagged();
- if (field_representation.IsSmi()) {
+ *transition_map, index, details_representation.IsDouble());
+ Type* field_type = Type::NonInternal();
+ MaybeHandle<Map> field_map;
+ MachineRepresentation field_representation = MachineRepresentation::kTagged;
+ if (details_representation.IsSmi()) {
field_type = type_cache_.kSmi;
- } else if (field_representation.IsDouble()) {
+ field_representation = MachineRepresentation::kTaggedSigned;
+ } else if (details_representation.IsDouble()) {
field_type = type_cache_.kFloat64;
- } else if (field_representation.IsHeapObject()) {
+ field_representation = MachineRepresentation::kFloat64;
+ } else if (details_representation.IsHeapObject()) {
// Extract the field type from the property details (make sure its
// representation is TaggedPointer to reflect the heap object case).
- field_type = Type::Intersect(
- transition_map->instance_descriptors()->GetFieldType(number)->Convert(
- zone()),
- Type::TaggedPointer(), zone());
- if (field_type->Is(Type::None())) {
+ field_representation = MachineRepresentation::kTaggedPointer;
+ Handle<FieldType> descriptors_field_type(
+ transition_map->instance_descriptors()->GetFieldType(number),
+ isolate());
+ if (descriptors_field_type->IsNone()) {
// Store is not safe if the field type was cleared.
return false;
- } else if (!Type::Any()->Is(field_type)) {
+ } else if (descriptors_field_type->IsClass()) {
// Add proper code dependencies in case of stable field map(s).
Handle<Map> field_owner_map(transition_map->FindFieldOwner(number),
isolate());
dependencies()->AssumeFieldType(field_owner_map);
+
+ // Remember the field map, and try to infer a useful type.
+ field_type = Type::For(descriptors_field_type->AsClass());
+ field_map = descriptors_field_type->AsClass();
}
- DCHECK(field_type->Is(Type::TaggedPointer()));
}
dependencies()->AssumeMapNotDeprecated(transition_map);
*access_info = PropertyAccessInfo::DataField(
- MapList{map}, field_index, field_type, holder, transition_map);
+ MapList{map}, field_index, field_representation, field_type, field_map,
+ holder, transition_map);
return true;
}
return false;
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index daa872286f..ac186fb144 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -8,8 +8,9 @@
#include <iosfwd>
#include "src/field-index.h"
+#include "src/machine-type.h"
#include "src/objects.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -17,10 +18,13 @@ namespace internal {
// Forward declarations.
class CompilationDependencies;
class Factory;
-class TypeCache;
namespace compiler {
+// Forward declarations.
+class Type;
+class TypeCache;
+
// Whether we are loading a property or storing to a property.
enum class AccessMode { kLoad, kStore };
@@ -66,7 +70,9 @@ class PropertyAccessInfo final {
Handle<Object> constant,
MaybeHandle<JSObject> holder);
static PropertyAccessInfo DataField(
- MapList const& receiver_maps, FieldIndex field_index, Type* field_type,
+ MapList const& receiver_maps, FieldIndex field_index,
+ MachineRepresentation field_representation, Type* field_type,
+ MaybeHandle<Map> field_map = MaybeHandle<Map>(),
MaybeHandle<JSObject> holder = MaybeHandle<JSObject>(),
MaybeHandle<Map> transition_map = MaybeHandle<Map>());
static PropertyAccessInfo AccessorConstant(MapList const& receiver_maps,
@@ -90,6 +96,10 @@ class PropertyAccessInfo final {
Handle<Object> constant() const { return constant_; }
FieldIndex field_index() const { return field_index_; }
Type* field_type() const { return field_type_; }
+ MachineRepresentation field_representation() const {
+ return field_representation_;
+ }
+ MaybeHandle<Map> field_map() const { return field_map_; }
MapList const& receiver_maps() const { return receiver_maps_; }
private:
@@ -99,7 +109,9 @@ class PropertyAccessInfo final {
Handle<Object> constant, MapList const& receiver_maps);
PropertyAccessInfo(MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map, FieldIndex field_index,
- Type* field_type, MapList const& receiver_maps);
+ MachineRepresentation field_representation,
+ Type* field_type, MaybeHandle<Map> field_map,
+ MapList const& receiver_maps);
Kind kind_;
MapList receiver_maps_;
@@ -107,7 +119,9 @@ class PropertyAccessInfo final {
MaybeHandle<Map> transition_map_;
MaybeHandle<JSObject> holder_;
FieldIndex field_index_;
+ MachineRepresentation field_representation_;
Type* field_type_;
+ MaybeHandle<Map> field_map_;
};
diff --git a/deps/v8/src/compiler/all-nodes.cc b/deps/v8/src/compiler/all-nodes.cc
index 8040897fd3..eada0cff8c 100644
--- a/deps/v8/src/compiler/all-nodes.cc
+++ b/deps/v8/src/compiler/all-nodes.cc
@@ -14,13 +14,26 @@ AllNodes::AllNodes(Zone* local_zone, const Graph* graph, bool only_inputs)
: reachable(local_zone),
is_reachable_(graph->NodeCount(), false, local_zone),
only_inputs_(only_inputs) {
- Node* end = graph->end();
+ Mark(local_zone, graph->end(), graph);
+}
+
+AllNodes::AllNodes(Zone* local_zone, Node* end, const Graph* graph,
+ bool only_inputs)
+ : reachable(local_zone),
+ is_reachable_(graph->NodeCount(), false, local_zone),
+ only_inputs_(only_inputs) {
+ Mark(local_zone, end, graph);
+}
+
+void AllNodes::Mark(Zone* local_zone, Node* end, const Graph* graph) {
+ DCHECK_LT(end->id(), graph->NodeCount());
is_reachable_[end->id()] = true;
reachable.push_back(end);
- // Find all nodes reachable from end.
+ // Find all nodes reachable from {end}.
for (size_t i = 0; i < reachable.size(); i++) {
- for (Node* input : reachable[i]->inputs()) {
- if (input == nullptr || input->id() >= graph->NodeCount()) {
+ for (Node* const input : reachable[i]->inputs()) {
+ if (input == nullptr) {
+ // TODO(titzer): print a warning.
continue;
}
if (!is_reachable_[input->id()]) {
@@ -28,7 +41,7 @@ AllNodes::AllNodes(Zone* local_zone, const Graph* graph, bool only_inputs)
reachable.push_back(input);
}
}
- if (!only_inputs) {
+ if (!only_inputs_) {
for (Node* use : reachable[i]->uses()) {
if (use == nullptr || use->id() >= graph->NodeCount()) {
continue;
diff --git a/deps/v8/src/compiler/all-nodes.h b/deps/v8/src/compiler/all-nodes.h
index 36f02e9582..7c70bf75f6 100644
--- a/deps/v8/src/compiler/all-nodes.h
+++ b/deps/v8/src/compiler/all-nodes.h
@@ -6,7 +6,7 @@
#define V8_COMPILER_ALL_NODES_H_
#include "src/compiler/node.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -16,9 +16,13 @@ namespace compiler {
// from end.
class AllNodes {
public:
- // Constructor. Traverses the graph and builds the {reachable} sets. When
- // {only_inputs} is true, find the nodes reachable through input edges;
- // these are all live nodes.
+ // Constructor. Traverses the graph and builds the {reachable} set of nodes
+ // reachable from {end}. When {only_inputs} is true, find the nodes
+ // reachable through input edges; these are all live nodes.
+ AllNodes(Zone* local_zone, Node* end, const Graph* graph,
+ bool only_inputs = true);
+ // Constructor. Traverses the graph and builds the {reachable} set of nodes
+ // reachable from the End node.
AllNodes(Zone* local_zone, const Graph* graph, bool only_inputs = true);
bool IsLive(Node* node) {
@@ -35,6 +39,8 @@ class AllNodes {
NodeVector reachable; // Nodes reachable from end.
private:
+ void Mark(Zone* local_zone, Node* end, const Graph* graph);
+
BoolVector is_reachable_;
const bool only_inputs_;
};
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index 4ae282a0d1..dbe182802a 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -5,7 +5,7 @@
#include "src/compiler/code-generator.h"
#include "src/arm/macro-assembler-arm.h"
-#include "src/ast/scopes.h"
+#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
@@ -271,6 +271,37 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
UnwindingInfoWriter* const unwinding_info_writer_;
};
+template <typename T>
+class OutOfLineFloatMin final : public OutOfLineCode {
+ public:
+ OutOfLineFloatMin(CodeGenerator* gen, T result, T left, T right)
+ : OutOfLineCode(gen), result_(result), left_(left), right_(right) {}
+
+ void Generate() final { __ FloatMinOutOfLine(result_, left_, right_); }
+
+ private:
+ T const result_;
+ T const left_;
+ T const right_;
+};
+typedef OutOfLineFloatMin<SwVfpRegister> OutOfLineFloat32Min;
+typedef OutOfLineFloatMin<DwVfpRegister> OutOfLineFloat64Min;
+
+template <typename T>
+class OutOfLineFloatMax final : public OutOfLineCode {
+ public:
+ OutOfLineFloatMax(CodeGenerator* gen, T result, T left, T right)
+ : OutOfLineCode(gen), result_(result), left_(left), right_(right) {}
+
+ void Generate() final { __ FloatMaxOutOfLine(result_, left_, right_); }
+
+ private:
+ T const result_;
+ T const left_;
+ T const right_;
+};
+typedef OutOfLineFloatMax<SwVfpRegister> OutOfLineFloat32Max;
+typedef OutOfLineFloatMax<DwVfpRegister> OutOfLineFloat64Max;
Condition FlagsConditionToCondition(FlagsCondition condition) {
switch (condition) {
@@ -707,9 +738,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDebugBreak:
__ stop("kArchDebugBreak");
break;
- case kArchImpossible:
- __ Abort(kConversionFromImpossibleValue);
- break;
case kArchComment: {
Address comment_string = i.InputExternalReference(0).address();
__ RecordComment(reinterpret_cast<const char*>(comment_string));
@@ -725,8 +753,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- CodeGenResult result =
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result = AssembleDeoptimizerCall(
+ deopt_state_id, bailout_type, current_source_position_);
if (result != kSuccess) return result;
break;
}
@@ -1199,33 +1227,51 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmVnegF64:
__ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
- case kArmVrintmF32:
+ case kArmVrintmF32: {
+ CpuFeatureScope scope(masm(), ARMv8);
__ vrintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
- case kArmVrintmF64:
+ }
+ case kArmVrintmF64: {
+ CpuFeatureScope scope(masm(), ARMv8);
__ vrintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
- case kArmVrintpF32:
+ }
+ case kArmVrintpF32: {
+ CpuFeatureScope scope(masm(), ARMv8);
__ vrintp(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
- case kArmVrintpF64:
+ }
+ case kArmVrintpF64: {
+ CpuFeatureScope scope(masm(), ARMv8);
__ vrintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
- case kArmVrintzF32:
+ }
+ case kArmVrintzF32: {
+ CpuFeatureScope scope(masm(), ARMv8);
__ vrintz(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
- case kArmVrintzF64:
+ }
+ case kArmVrintzF64: {
+ CpuFeatureScope scope(masm(), ARMv8);
__ vrintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
- case kArmVrintaF64:
+ }
+ case kArmVrintaF64: {
+ CpuFeatureScope scope(masm(), ARMv8);
__ vrinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
- case kArmVrintnF32:
+ }
+ case kArmVrintnF32: {
+ CpuFeatureScope scope(masm(), ARMv8);
__ vrintn(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
- case kArmVrintnF64:
+ }
+ case kArmVrintnF64: {
+ CpuFeatureScope scope(masm(), ARMv8);
__ vrintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
+ }
case kArmVcvtF32F64: {
__ vcvt_f32_f64(i.OutputFloat32Register(), i.InputDoubleRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -1380,145 +1426,59 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmFloat32Max: {
- FloatRegister left_reg = i.InputFloat32Register(0);
- FloatRegister right_reg = i.InputFloat32Register(1);
- FloatRegister result_reg = i.OutputFloat32Register();
- Label result_is_nan, return_left, return_right, check_zero, done;
- __ VFPCompareAndSetFlags(left_reg, right_reg);
- __ b(mi, &return_right);
- __ b(gt, &return_left);
- __ b(vs, &result_is_nan);
- // Left equals right => check for -0.
- __ VFPCompareAndSetFlags(left_reg, 0.0);
- if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
- __ b(ne, &done); // left == right != 0.
+ SwVfpRegister result = i.OutputFloat32Register();
+ SwVfpRegister left = i.InputFloat32Register(0);
+ SwVfpRegister right = i.InputFloat32Register(1);
+ if (left.is(right)) {
+ __ Move(result, left);
} else {
- __ b(ne, &return_left); // left == right != 0.
+ auto ool = new (zone()) OutOfLineFloat32Max(this, result, left, right);
+ __ FloatMax(result, left, right, ool->entry());
+ __ bind(ool->exit());
}
- // At this point, both left and right are either 0 or -0.
- // Since we operate on +0 and/or -0, vadd and vand have the same effect;
- // the decision for vadd is easy because vand is a NEON instruction.
- __ vadd(result_reg, left_reg, right_reg);
- __ b(&done);
- __ bind(&result_is_nan);
- __ vadd(result_reg, left_reg, right_reg);
- __ b(&done);
- __ bind(&return_right);
- __ Move(result_reg, right_reg);
- if (!left_reg.is(result_reg)) __ b(&done);
- __ bind(&return_left);
- __ Move(result_reg, left_reg);
- __ bind(&done);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmFloat64Max: {
- DwVfpRegister left_reg = i.InputDoubleRegister(0);
- DwVfpRegister right_reg = i.InputDoubleRegister(1);
- DwVfpRegister result_reg = i.OutputDoubleRegister();
- Label result_is_nan, return_left, return_right, check_zero, done;
- __ VFPCompareAndSetFlags(left_reg, right_reg);
- __ b(mi, &return_right);
- __ b(gt, &return_left);
- __ b(vs, &result_is_nan);
- // Left equals right => check for -0.
- __ VFPCompareAndSetFlags(left_reg, 0.0);
- if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
- __ b(ne, &done); // left == right != 0.
+ DwVfpRegister result = i.OutputDoubleRegister();
+ DwVfpRegister left = i.InputDoubleRegister(0);
+ DwVfpRegister right = i.InputDoubleRegister(1);
+ if (left.is(right)) {
+ __ Move(result, left);
} else {
- __ b(ne, &return_left); // left == right != 0.
+ auto ool = new (zone()) OutOfLineFloat64Max(this, result, left, right);
+ __ FloatMax(result, left, right, ool->entry());
+ __ bind(ool->exit());
}
- // At this point, both left and right are either 0 or -0.
- // Since we operate on +0 and/or -0, vadd and vand have the same effect;
- // the decision for vadd is easy because vand is a NEON instruction.
- __ vadd(result_reg, left_reg, right_reg);
- __ b(&done);
- __ bind(&result_is_nan);
- __ vadd(result_reg, left_reg, right_reg);
- __ b(&done);
- __ bind(&return_right);
- __ Move(result_reg, right_reg);
- if (!left_reg.is(result_reg)) __ b(&done);
- __ bind(&return_left);
- __ Move(result_reg, left_reg);
- __ bind(&done);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmFloat32Min: {
- FloatRegister left_reg = i.InputFloat32Register(0);
- FloatRegister right_reg = i.InputFloat32Register(1);
- FloatRegister result_reg = i.OutputFloat32Register();
- Label result_is_nan, return_left, return_right, check_zero, done;
- __ VFPCompareAndSetFlags(left_reg, right_reg);
- __ b(mi, &return_left);
- __ b(gt, &return_right);
- __ b(vs, &result_is_nan);
- // Left equals right => check for -0.
- __ VFPCompareAndSetFlags(left_reg, 0.0);
- if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
- __ b(ne, &done); // left == right != 0.
+ SwVfpRegister result = i.OutputFloat32Register();
+ SwVfpRegister left = i.InputFloat32Register(0);
+ SwVfpRegister right = i.InputFloat32Register(1);
+ if (left.is(right)) {
+ __ Move(result, left);
} else {
- __ b(ne, &return_left); // left == right != 0.
+ auto ool = new (zone()) OutOfLineFloat32Min(this, result, left, right);
+ __ FloatMin(result, left, right, ool->entry());
+ __ bind(ool->exit());
}
- // At this point, both left and right are either 0 or -0.
- // We could use a single 'vorr' instruction here if we had NEON support.
- // The algorithm is: -((-L) + (-R)), which in case of L and R being
- // different registers is most efficiently expressed as -((-L) - R).
- __ vneg(left_reg, left_reg);
- if (left_reg.is(right_reg)) {
- __ vadd(result_reg, left_reg, right_reg);
- } else {
- __ vsub(result_reg, left_reg, right_reg);
- }
- __ vneg(result_reg, result_reg);
- __ b(&done);
- __ bind(&result_is_nan);
- __ vadd(result_reg, left_reg, right_reg);
- __ b(&done);
- __ bind(&return_right);
- __ Move(result_reg, right_reg);
- if (!left_reg.is(result_reg)) __ b(&done);
- __ bind(&return_left);
- __ Move(result_reg, left_reg);
- __ bind(&done);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmFloat64Min: {
- DwVfpRegister left_reg = i.InputDoubleRegister(0);
- DwVfpRegister right_reg = i.InputDoubleRegister(1);
- DwVfpRegister result_reg = i.OutputDoubleRegister();
- Label result_is_nan, return_left, return_right, check_zero, done;
- __ VFPCompareAndSetFlags(left_reg, right_reg);
- __ b(mi, &return_left);
- __ b(gt, &return_right);
- __ b(vs, &result_is_nan);
- // Left equals right => check for -0.
- __ VFPCompareAndSetFlags(left_reg, 0.0);
- if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
- __ b(ne, &done); // left == right != 0.
- } else {
- __ b(ne, &return_left); // left == right != 0.
- }
- // At this point, both left and right are either 0 or -0.
- // We could use a single 'vorr' instruction here if we had NEON support.
- // The algorithm is: -((-L) + (-R)), which in case of L and R being
- // different registers is most efficiently expressed as -((-L) - R).
- __ vneg(left_reg, left_reg);
- if (left_reg.is(right_reg)) {
- __ vadd(result_reg, left_reg, right_reg);
+ DwVfpRegister result = i.OutputDoubleRegister();
+ DwVfpRegister left = i.InputDoubleRegister(0);
+ DwVfpRegister right = i.InputDoubleRegister(1);
+ if (left.is(right)) {
+ __ Move(result, left);
} else {
- __ vsub(result_reg, left_reg, right_reg);
+ auto ool = new (zone()) OutOfLineFloat64Min(this, result, left, right);
+ __ FloatMin(result, left, right, ool->entry());
+ __ bind(ool->exit());
}
- __ vneg(result_reg, result_reg);
- __ b(&done);
- __ bind(&result_is_nan);
- __ vadd(result_reg, left_reg, right_reg);
- __ b(&done);
- __ bind(&return_right);
- __ Move(result_reg, right_reg);
- if (!left_reg.is(result_reg)) __ b(&done);
- __ bind(&return_left);
- __ Move(result_reg, left_reg);
- __ bind(&done);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmFloat64SilenceNaN: {
@@ -1679,7 +1639,8 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
- int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type,
+ SourcePosition pos) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
// TODO(turbofan): We should be able to generate better code by sharing the
@@ -1688,7 +1649,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
- __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
__ CheckConstPool(false, false);
return kSuccess;
@@ -1967,33 +1928,31 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ vstr(temp_1, src);
} else if (source->IsFPRegister()) {
LowDwVfpRegister temp = kScratchDoubleReg;
- DwVfpRegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- DwVfpRegister dst = g.ToDoubleRegister(destination);
- __ Move(temp, src);
- __ Move(src, dst);
- __ Move(dst, temp);
- } else {
- DCHECK(destination->IsFPStackSlot());
- MemOperand dst = g.ToMemOperand(destination);
- __ Move(temp, src);
- __ vldr(src, dst);
- __ vstr(temp, dst);
- }
+ DwVfpRegister src = g.ToDoubleRegister(source);
+ if (destination->IsFPRegister()) {
+ DwVfpRegister dst = g.ToDoubleRegister(destination);
+ __ vswp(src, dst);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Move(temp, src);
+ __ vldr(src, dst);
+ __ vstr(temp, dst);
+ }
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPStackSlot());
Register temp_0 = kScratchReg;
LowDwVfpRegister temp_1 = kScratchDoubleReg;
MemOperand src0 = g.ToMemOperand(source);
MemOperand dst0 = g.ToMemOperand(destination);
- MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
- MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
- __ vldr(temp_1, dst0); // Save destination in temp_1.
- __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
- __ str(temp_0, dst0);
- __ ldr(temp_0, src1);
- __ str(temp_0, dst1);
- __ vstr(temp_1, src0);
+ MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
+ MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
+ __ vldr(temp_1, dst0); // Save destination in temp_1.
+ __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ str(temp_0, dst0);
+ __ ldr(temp_0, src1);
+ __ str(temp_0, dst1);
+ __ vstr(temp_1, src0);
} else {
// No other combinations are possible.
UNREACHABLE();
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index 4b0b6afb44..ceb5b2507f 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -252,14 +252,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.Label(cont->false_block());
}
- if (cont->IsDeoptimize()) {
- // If we can deoptimize as a result of the binop, we need to make sure that
- // the deopt inputs are not overwritten by the binop result. One way
- // to achieve that is to declare the output register as same-as-first.
- outputs[output_count++] = g.DefineSameAsFirst(node);
- } else {
- outputs[output_count++] = g.DefineAsRegister(node);
- }
+ outputs[output_count++] = g.DefineAsRegister(node);
if (cont->IsSet()) {
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
@@ -419,6 +412,10 @@ void InstructionSelector::VisitLoad(Node* node) {
EmitLoad(this, opcode, &output, base, index);
}
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
void InstructionSelector::VisitStore(Node* node) {
ArmOperandGenerator g(this);
@@ -431,7 +428,7 @@ void InstructionSelector::VisitStore(Node* node) {
MachineRepresentation rep = store_rep.representation();
if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ DCHECK(CanBeTaggedPointer(rep));
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
@@ -1516,46 +1513,55 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
VisitRR(this, kArmVrintmF32, node);
}
void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
VisitRR(this, kArmVrintmF64, node);
}
void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
VisitRR(this, kArmVrintpF32, node);
}
void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
VisitRR(this, kArmVrintpF64, node);
}
void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
VisitRR(this, kArmVrintzF32, node);
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
VisitRR(this, kArmVrintzF64, node);
}
void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
VisitRR(this, kArmVrintaF64, node);
}
void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
VisitRR(this, kArmVrintnF32, node);
}
void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
VisitRR(this, kArmVrintnF64, node);
}
@@ -1965,6 +1971,10 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
break;
}
+ if (user->opcode() == IrOpcode::kWord32Equal) {
+ return VisitWordCompare(selector, user, cont);
+ }
+
// Continuation could not be combined with a compare, emit compare against 0.
ArmOperandGenerator g(selector);
InstructionCode const opcode =
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index 35f7e43fdc..f543b18682 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -6,7 +6,7 @@
#include "src/arm64/frames-arm64.h"
#include "src/arm64/macro-assembler-arm64.h"
-#include "src/ast/scopes.h"
+#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
@@ -766,9 +766,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDebugBreak:
__ Debug("kArchDebugBreak", 0, BREAK);
break;
- case kArchImpossible:
- __ Abort(kConversionFromImpossibleValue);
- break;
case kArchComment: {
Address comment_string = i.InputExternalReference(0).address();
__ RecordComment(reinterpret_cast<const char*>(comment_string));
@@ -783,8 +780,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- CodeGenResult result =
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result = AssembleDeoptimizerCall(
+ deopt_state_id, bailout_type, current_source_position_);
if (result != kSuccess) return result;
break;
}
@@ -1755,13 +1752,14 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
- int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type,
+ SourcePosition pos) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
- __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
@@ -1956,10 +1954,14 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ Fmov(dst, src.ToFloat32());
} else {
DCHECK(destination->IsFPStackSlot());
- UseScratchRegisterScope scope(masm());
- FPRegister temp = scope.AcquireS();
- __ Fmov(temp, src.ToFloat32());
- __ Str(temp, g.ToMemOperand(destination, masm()));
+ if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
+ __ Str(wzr, g.ToMemOperand(destination, masm()));
+ } else {
+ UseScratchRegisterScope scope(masm());
+ FPRegister temp = scope.AcquireS();
+ __ Fmov(temp, src.ToFloat32());
+ __ Str(temp, g.ToMemOperand(destination, masm()));
+ }
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
@@ -1968,10 +1970,14 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ Fmov(dst, src.ToFloat64());
} else {
DCHECK(destination->IsFPStackSlot());
- UseScratchRegisterScope scope(masm());
- FPRegister temp = scope.AcquireD();
- __ Fmov(temp, src.ToFloat64());
- __ Str(temp, g.ToMemOperand(destination, masm()));
+ if (bit_cast<int64_t>(src.ToFloat64()) == 0) {
+ __ Str(xzr, g.ToMemOperand(destination, masm()));
+ } else {
+ UseScratchRegisterScope scope(masm());
+ FPRegister temp = scope.AcquireD();
+ __ Fmov(temp, src.ToFloat64());
+ __ Str(temp, g.ToMemOperand(destination, masm()));
+ }
}
}
} else if (source->IsFPRegister()) {
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index 9bc5385d43..da27be8626 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -434,24 +434,18 @@ void VisitBinop(InstructionSelector* selector, Node* node,
} else if (TryMatchAnyShift(selector, node, right_node, &opcode,
!is_add_sub)) {
Matcher m_shift(right_node);
- inputs[input_count++] = cont->IsDeoptimize()
- ? g.UseRegister(left_node)
- : g.UseRegisterOrImmediateZero(left_node);
+ inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node());
inputs[input_count++] = g.UseImmediate(m_shift.right().node());
} else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
!is_add_sub)) {
if (must_commute_cond) cont->Commute();
Matcher m_shift(left_node);
- inputs[input_count++] = cont->IsDeoptimize()
- ? g.UseRegister(right_node)
- : g.UseRegisterOrImmediateZero(right_node);
+ inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node());
inputs[input_count++] = g.UseImmediate(m_shift.right().node());
} else {
- inputs[input_count++] = cont->IsDeoptimize()
- ? g.UseRegister(left_node)
- : g.UseRegisterOrImmediateZero(left_node);
+ inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
inputs[input_count++] = g.UseRegister(right_node);
}
@@ -461,14 +455,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
}
if (!IsComparisonField::decode(properties)) {
- if (cont->IsDeoptimize()) {
- // If we can deoptimize as a result of the binop, we need to make sure
- // that the deopt inputs are not overwritten by the binop result. One way
- // to achieve that is to declare the output register as same-as-first.
- outputs[output_count++] = g.DefineSameAsFirst(node);
- } else {
- outputs[output_count++] = g.DefineAsRegister(node);
- }
+ outputs[output_count++] = g.DefineAsRegister(node);
}
if (cont->IsSet()) {
@@ -606,6 +593,10 @@ void InstructionSelector::VisitLoad(Node* node) {
EmitLoad(this, node, opcode, immediate_mode, rep);
}
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
void InstructionSelector::VisitStore(Node* node) {
Arm64OperandGenerator g(this);
@@ -619,7 +610,7 @@ void InstructionSelector::VisitStore(Node* node) {
// TODO(arm64): I guess this could be done in a better way.
if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ DCHECK(CanBeTaggedPointer(rep));
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
@@ -2128,11 +2119,101 @@ void MaybeReplaceCmpZeroWithFlagSettingBinop(InstructionSelector* selector,
}
}
+// Map {cond} to kEqual or kNotEqual, so that we can select
+// either TBZ or TBNZ when generating code for:
+// (x cmp 0), b.{cond}
+FlagsCondition MapForTbz(FlagsCondition cond) {
+ switch (cond) {
+ case kSignedLessThan: // generate TBNZ
+ return kNotEqual;
+ case kSignedGreaterThanOrEqual: // generate TBZ
+ return kEqual;
+ default:
+ UNREACHABLE();
+ return cond;
+ }
+}
+
+// Map {cond} to kEqual or kNotEqual, so that we can select
+// either CBZ or CBNZ when generating code for:
+// (x cmp 0), b.{cond}
+FlagsCondition MapForCbz(FlagsCondition cond) {
+ switch (cond) {
+ case kEqual: // generate CBZ
+ case kNotEqual: // generate CBNZ
+ return cond;
+ case kUnsignedLessThanOrEqual: // generate CBZ
+ return kEqual;
+ case kUnsignedGreaterThan: // generate CBNZ
+ return kNotEqual;
+ default:
+ UNREACHABLE();
+ return cond;
+ }
+}
+
+// Try to emit TBZ, TBNZ, CBZ or CBNZ for certain comparisons of {node}
+// against zero, depending on the condition.
+bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, Node* user,
+ FlagsCondition cond, FlagsContinuation* cont) {
+ Int32BinopMatcher m_user(user);
+ USE(m_user);
+ DCHECK(m_user.right().Is(0) || m_user.left().Is(0));
+
+ // Only handle branches.
+ if (!cont->IsBranch()) return false;
+
+ switch (cond) {
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual: {
+ Arm64OperandGenerator g(selector);
+ cont->Overwrite(MapForTbz(cond));
+ Int32Matcher m(node);
+ if (m.IsFloat64ExtractHighWord32() && selector->CanCover(user, node)) {
+ // SignedLessThan(Float64ExtractHighWord32(x), 0) and
+ // SignedGreaterThanOrEqual(Float64ExtractHighWord32(x), 0) essentially
+ // check the sign bit of a 64-bit floating point value.
+ InstructionOperand temp = g.TempRegister();
+ selector->Emit(kArm64U64MoveFloat64, temp,
+ g.UseRegister(node->InputAt(0)));
+ selector->Emit(cont->Encode(kArm64TestAndBranch), g.NoOutput(), temp,
+ g.TempImmediate(63), g.Label(cont->true_block()),
+ g.Label(cont->false_block()));
+ return true;
+ }
+ selector->Emit(cont->Encode(kArm64TestAndBranch32), g.NoOutput(),
+ g.UseRegister(node), g.TempImmediate(31),
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
+ return true;
+ }
+ case kEqual:
+ case kNotEqual:
+ case kUnsignedLessThanOrEqual:
+ case kUnsignedGreaterThan: {
+ Arm64OperandGenerator g(selector);
+ cont->Overwrite(MapForCbz(cond));
+ selector->Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
+ g.UseRegister(node), g.Label(cont->true_block()),
+ g.Label(cont->false_block()));
+ return true;
+ }
+ default:
+ return false;
+ }
+}
+
void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Int32BinopMatcher m(node);
ArchOpcode opcode = kArm64Cmp32;
FlagsCondition cond = cont->condition();
+ if (m.right().Is(0)) {
+ if (TryEmitCbzOrTbz(selector, m.left().node(), node, cond, cont)) return;
+ } else if (m.left().Is(0)) {
+ FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
+ if (TryEmitCbzOrTbz(selector, m.right().node(), node, commuted_cond, cont))
+ return;
+ }
ImmediateMode immediate_mode = kArithmeticImm;
if (m.right().Is(0) && (m.left().IsInt32Add() || m.left().IsWord32And())) {
// Emit flag setting add/and instructions for comparisons against zero.
@@ -2145,14 +2226,18 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node,
(m.right().IsInt32Add() || m.right().IsWord32And())) {
// Same as above, but we need to commute the condition before we
// continue with the rest of the checks.
- cond = CommuteFlagsCondition(cond);
- if (CanUseFlagSettingBinop(cond)) {
+ FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
+ if (CanUseFlagSettingBinop(commuted_cond)) {
Node* binop = m.right().node();
MaybeReplaceCmpZeroWithFlagSettingBinop(selector, &node, binop, &opcode,
- cond, cont, &immediate_mode);
+ commuted_cond, cont,
+ &immediate_mode);
}
- } else if (m.right().IsInt32Sub()) {
+ } else if (m.right().IsInt32Sub() && (cond == kEqual || cond == kNotEqual)) {
// Select negated compare for comparisons with negated right input.
+ // Only do this for kEqual and kNotEqual, which do not depend on the
+ // C and V flags, as those flags will be different with CMN when the
+ // right-hand side of the original subtraction is INT_MIN.
Node* sub = m.right().node();
Int32BinopMatcher msub(sub);
if (msub.left().Is(0)) {
diff --git a/deps/v8/src/compiler/ast-graph-builder.cc b/deps/v8/src/compiler/ast-graph-builder.cc
index 0f1fb291eb..b292a2e49e 100644
--- a/deps/v8/src/compiler/ast-graph-builder.cc
+++ b/deps/v8/src/compiler/ast-graph-builder.cc
@@ -4,7 +4,9 @@
#include "src/compiler/ast-graph-builder.h"
+#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
+#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/compiler/control-builders.h"
@@ -16,7 +18,6 @@
#include "src/compiler/operator-properties.h"
#include "src/compiler/state-values-utils.h"
#include "src/compiler/type-hint-analyzer.h"
-#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
@@ -410,14 +411,15 @@ class AstGraphBuilder::ControlScopeForFinally : public ControlScope {
TryFinallyBuilder* control_;
};
-
AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
- JSGraph* jsgraph, LoopAssignmentAnalysis* loop,
+ JSGraph* jsgraph, float invocation_frequency,
+ LoopAssignmentAnalysis* loop,
TypeHintAnalysis* type_hint_analysis)
: isolate_(info->isolate()),
local_zone_(local_zone),
info_(info),
jsgraph_(jsgraph),
+ invocation_frequency_(invocation_frequency),
environment_(nullptr),
ast_context_(nullptr),
globals_(0, local_zone),
@@ -535,12 +537,11 @@ bool AstGraphBuilder::CreateGraph(bool stack_check) {
// TODO(mstarzinger): For now we cannot assume that the {this} parameter is
// not {the_hole}, because for derived classes {this} has a TDZ and the
// JSConstructStubForDerived magically passes {the_hole} as a receiver.
- if (scope->has_this_declaration() && scope->receiver()->is_const_mode()) {
+ if (scope->has_this_declaration() && scope->receiver()->mode() == CONST) {
env.RawParameterBind(0, jsgraph()->TheHoleConstant());
}
- // Build local context only if there are context allocated variables.
- if (scope->num_heap_slots() > 0) {
+ if (scope->NeedsContext()) {
// Push a new inner context scope for the current activation.
Node* inner_context = BuildLocalActivationContext(GetFunctionContext());
ContextScope top_context(this, scope, inner_context);
@@ -573,9 +574,8 @@ void AstGraphBuilder::CreateGraphBody(bool stack_check) {
BuildArgumentsObject(scope->arguments());
// Build rest arguments array if it is used.
- int rest_index;
- Variable* rest_parameter = scope->rest_parameter(&rest_index);
- BuildRestArgumentsArray(rest_parameter, rest_index);
+ Variable* rest_parameter = scope->rest_parameter();
+ BuildRestArgumentsArray(rest_parameter);
// Build assignment to {.this_function} variable if it is used.
BuildThisFunctionVariable(scope->this_function_var());
@@ -629,8 +629,7 @@ void AstGraphBuilder::ClearNonLiveSlotsInFrameStates() {
// Gets the bailout id just before reading a variable proxy, but only for
// unallocated variables.
static BailoutId BeforeId(VariableProxy* proxy) {
- return proxy->var()->IsUnallocatedOrGlobalSlot() ? proxy->BeforeId()
- : BailoutId::None();
+ return proxy->var()->IsUnallocated() ? proxy->BeforeId() : BailoutId::None();
}
static const char* GetDebugParameterName(Zone* zone, DeclarationScope* scope,
@@ -788,6 +787,10 @@ AstGraphBuilder::Environment::CopyAsUnreachable() {
return env;
}
+AstGraphBuilder::Environment* AstGraphBuilder::Environment::CopyForOsrEntry() {
+ return new (zone())
+ Environment(this, builder_->liveness_analyzer()->NewBlock());
+}
AstGraphBuilder::Environment*
AstGraphBuilder::Environment::CopyAndShareLiveness() {
@@ -802,8 +805,15 @@ AstGraphBuilder::Environment::CopyAndShareLiveness() {
AstGraphBuilder::Environment* AstGraphBuilder::Environment::CopyForLoop(
BitVector* assigned, bool is_osr) {
- PrepareForLoop(assigned, is_osr);
- return CopyAndShareLiveness();
+ PrepareForLoop(assigned);
+ Environment* loop = CopyAndShareLiveness();
+ if (is_osr) {
+ // Create and merge the OSR entry if necessary.
+ Environment* osr_env = CopyForOsrEntry();
+ osr_env->PrepareForOsrEntry();
+ loop->Merge(osr_env);
+ }
+ return loop;
}
@@ -1085,7 +1095,6 @@ void AstGraphBuilder::Visit(Expression* expr) {
void AstGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
Variable* variable = decl->proxy()->var();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
@@ -1125,7 +1134,6 @@ void AstGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
void AstGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* decl) {
Variable* variable = decl->proxy()->var();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
Handle<SharedFunctionInfo> function = Compiler::GetSharedFunctionInfo(
decl->fun(), info()->script(), info());
@@ -1240,7 +1248,8 @@ void AstGraphBuilder::VisitWithStatement(WithStatement* stmt) {
VisitForValue(stmt->expression());
Node* value = environment()->Pop();
Node* object = BuildToObject(value, stmt->ToObjectId());
- const Operator* op = javascript()->CreateWithContext();
+ Handle<ScopeInfo> scope_info = stmt->scope()->scope_info();
+ const Operator* op = javascript()->CreateWithContext(scope_info);
Node* context = NewNode(op, object, GetFunctionClosureForContext());
PrepareFrameState(context, stmt->EntryId());
VisitInScope(stmt->statement(), stmt->scope(), context);
@@ -1394,9 +1403,14 @@ void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
Node* cache_type = environment()->Peek(3);
Node* object = environment()->Peek(4);
- // Check loop termination condition.
- Node* exit_cond = NewNode(javascript()->ForInDone(), index, cache_length);
- for_loop.BreakWhen(exit_cond);
+ // Check loop termination condition (we know that the {index} is always
+ // in Smi range, so we can just set the hint on the comparison below).
+ PrepareEagerCheckpoint(stmt->EntryId());
+ Node* exit_cond =
+ NewNode(javascript()->LessThan(CompareOperationHint::kSignedSmall),
+ index, cache_length);
+ PrepareFrameState(exit_cond, BailoutId::None());
+ for_loop.BreakUnless(exit_cond);
// Compute the next enumerated value.
Node* value = NewNode(javascript()->ForInNext(), object, cache_array,
@@ -1424,9 +1438,13 @@ void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
test_value.End();
for_loop.EndBody();
- // Increment counter and continue.
+ // Increment counter and continue (we know that the {index} is always
+ // in Smi range, so we can just set the hint on the increment below).
index = environment()->Peek(0);
- index = NewNode(javascript()->ForInStep(), index);
+ PrepareEagerCheckpoint(stmt->IncrementId());
+ index = NewNode(javascript()->Add(BinaryOperationHint::kSignedSmall),
+ index, jsgraph()->OneConstant());
+ PrepareFrameState(index, BailoutId::None());
environment()->Poke(0, index);
}
for_loop.EndLoop();
@@ -1475,7 +1493,8 @@ void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
// Create a catch scope that binds the exception.
Node* exception = try_control.GetExceptionNode();
Handle<String> name = stmt->variable()->name();
- const Operator* op = javascript()->CreateCatchContext(name);
+ Handle<ScopeInfo> scope_info = stmt->scope()->scope_info();
+ const Operator* op = javascript()->CreateCatchContext(name, scope_info);
Node* context = NewNode(op, exception, GetFunctionClosureForContext());
// Evaluate the catch-block.
@@ -1595,7 +1614,7 @@ void AstGraphBuilder::VisitClassLiteral(ClassLiteral* expr) {
// Create nodes to store method values into the literal.
for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
+ ClassLiteral::Property* property = expr->properties()->at(i);
environment()->Push(environment()->Peek(property->is_static() ? 1 : 0));
VisitForValue(property->key());
@@ -1620,11 +1639,7 @@ void AstGraphBuilder::VisitClassLiteral(ClassLiteral* expr) {
BuildSetHomeObject(value, receiver, property);
switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE();
- case ObjectLiteral::Property::COMPUTED: {
+ case ClassLiteral::Property::METHOD: {
Node* attr = jsgraph()->Constant(DONT_ENUM);
Node* set_function_name =
jsgraph()->Constant(property->NeedsSetFunctionName());
@@ -1634,20 +1649,24 @@ void AstGraphBuilder::VisitClassLiteral(ClassLiteral* expr) {
PrepareFrameState(call, BailoutId::None());
break;
}
- case ObjectLiteral::Property::GETTER: {
+ case ClassLiteral::Property::GETTER: {
Node* attr = jsgraph()->Constant(DONT_ENUM);
const Operator* op = javascript()->CallRuntime(
Runtime::kDefineGetterPropertyUnchecked, 4);
NewNode(op, receiver, key, value, attr);
break;
}
- case ObjectLiteral::Property::SETTER: {
+ case ClassLiteral::Property::SETTER: {
Node* attr = jsgraph()->Constant(DONT_ENUM);
const Operator* op = javascript()->CallRuntime(
Runtime::kDefineSetterPropertyUnchecked, 4);
NewNode(op, receiver, key, value, attr);
break;
}
+ case ClassLiteral::Property::FIELD: {
+ UNREACHABLE();
+ break;
+ }
}
}
@@ -1945,8 +1964,8 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
// Create nodes to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
- int array_index = 0;
- for (; array_index < expr->values()->length(); array_index++) {
+ for (int array_index = 0; array_index < expr->values()->length();
+ array_index++) {
Expression* subexpr = expr->values()->at(array_index);
DCHECK(!subexpr->IsSpread());
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
@@ -1961,26 +1980,6 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
OutputFrameStateCombine::Ignore());
}
- // In case the array literal contains spread expressions it has two parts. The
- // first part is the "static" array which has a literal index is handled
- // above. The second part is the part after the first spread expression
- // (inclusive) and these elements gets appended to the array. Note that the
- // number elements an iterable produces is unknown ahead of time.
- for (; array_index < expr->values()->length(); array_index++) {
- Expression* subexpr = expr->values()->at(array_index);
- DCHECK(!subexpr->IsSpread());
-
- VisitForValue(subexpr);
- {
- Node* value = environment()->Pop();
- Node* array = environment()->Pop();
- const Operator* op = javascript()->CallRuntime(Runtime::kAppendElement);
- Node* result = NewNode(op, array, value);
- PrepareFrameState(result, expr->GetIdForElement(array_index));
- environment()->Push(result);
- }
- }
-
ast_context()->ProduceValue(expr, environment()->Pop());
}
@@ -2447,12 +2446,17 @@ void AstGraphBuilder::VisitCall(Call* expr) {
}
// Create node to perform the function call.
+ float const frequency = ComputeCallFrequency(expr->CallFeedbackICSlot());
VectorSlotPair feedback = CreateVectorSlotPair(expr->CallFeedbackICSlot());
- const Operator* call = javascript()->CallFunction(
- args->length() + 2, feedback, receiver_hint, expr->tail_call_mode());
+ const Operator* call =
+ javascript()->CallFunction(args->length() + 2, frequency, feedback,
+ receiver_hint, expr->tail_call_mode());
PrepareEagerCheckpoint(possibly_eval ? expr->EvalId() : expr->CallId());
Node* value = ProcessArguments(call, args->length() + 2);
- environment()->Push(value->InputAt(0)); // The callee passed to the call.
+ // The callee passed to the call, we just need to push something here to
+ // satisfy the bailout location contract. The fullcodegen code will not
+ // ever look at this value, so we just push optimized_out here.
+ environment()->Push(jsgraph()->OptimizedOutConstant());
PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
environment()->Drop(1);
ast_context()->ProduceValue(expr, value);
@@ -2480,7 +2484,7 @@ void AstGraphBuilder::VisitCallSuper(Call* expr) {
// Create node to perform the super call.
const Operator* call =
- javascript()->CallConstruct(args->length() + 2, VectorSlotPair());
+ javascript()->CallConstruct(args->length() + 2, 0.0f, VectorSlotPair());
Node* value = ProcessArguments(call, args->length() + 2);
PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
ast_context()->ProduceValue(expr, value);
@@ -2498,9 +2502,10 @@ void AstGraphBuilder::VisitCallNew(CallNew* expr) {
environment()->Push(environment()->Peek(args->length()));
// Create node to perform the construct call.
+ float const frequency = ComputeCallFrequency(expr->CallNewFeedbackSlot());
VectorSlotPair feedback = CreateVectorSlotPair(expr->CallNewFeedbackSlot());
const Operator* call =
- javascript()->CallConstruct(args->length() + 2, feedback);
+ javascript()->CallConstruct(args->length() + 2, frequency, feedback);
Node* value = ProcessArguments(call, args->length() + 2);
PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
ast_context()->ProduceValue(expr, value);
@@ -3086,7 +3091,7 @@ uint32_t AstGraphBuilder::ComputeBitsetForDynamicGlobal(Variable* variable) {
DCHECK_EQ(DYNAMIC_GLOBAL, variable->mode());
uint32_t check_depths = 0;
for (Scope* s = current_scope(); s != nullptr; s = s->outer_scope()) {
- if (s->num_heap_slots() <= 0) continue;
+ if (!s->NeedsContext()) continue;
if (!s->calls_sloppy_eval()) continue;
int depth = current_scope()->ContextChainLength(s);
if (depth > kMaxCheckDepth) return kFullCheckRequired;
@@ -3100,7 +3105,7 @@ uint32_t AstGraphBuilder::ComputeBitsetForDynamicContext(Variable* variable) {
DCHECK_EQ(DYNAMIC_LOCAL, variable->mode());
uint32_t check_depths = 0;
for (Scope* s = current_scope(); s != nullptr; s = s->outer_scope()) {
- if (s->num_heap_slots() <= 0) continue;
+ if (!s->NeedsContext()) continue;
if (!s->calls_sloppy_eval() && s != variable->scope()) continue;
int depth = current_scope()->ContextChainLength(s);
if (depth > kMaxCheckDepth) return kFullCheckRequired;
@@ -3110,6 +3115,13 @@ uint32_t AstGraphBuilder::ComputeBitsetForDynamicContext(Variable* variable) {
return check_depths;
}
+float AstGraphBuilder::ComputeCallFrequency(FeedbackVectorSlot slot) const {
+ if (slot.IsInvalid()) return 0.0f;
+ Handle<TypeFeedbackVector> feedback_vector(
+ info()->closure()->feedback_vector(), isolate());
+ CallICNexus nexus(feedback_vector, slot);
+ return nexus.ComputeCallFrequency() * invocation_frequency_;
+}
Node* AstGraphBuilder::ProcessArguments(const Operator* op, int arity) {
DCHECK(environment()->stack_height() >= arity);
@@ -3171,7 +3183,7 @@ Node* AstGraphBuilder::BuildLocalScriptContext(Scope* scope) {
DCHECK(scope->is_script_scope());
// Allocate a new local context.
- Handle<ScopeInfo> scope_info = scope->GetScopeInfo(isolate());
+ Handle<ScopeInfo> scope_info = scope->scope_info();
const Operator* op = javascript()->CreateScriptContext(scope_info);
Node* local_context = NewNode(op, GetFunctionClosure());
PrepareFrameState(local_context, BailoutId::ScriptContext(),
@@ -3185,7 +3197,7 @@ Node* AstGraphBuilder::BuildLocalBlockContext(Scope* scope) {
DCHECK(scope->is_block_scope());
// Allocate a new local context.
- Handle<ScopeInfo> scope_info = scope->GetScopeInfo(isolate());
+ Handle<ScopeInfo> scope_info = scope->scope_info();
const Operator* op = javascript()->CreateBlockContext(scope_info);
Node* local_context = NewNode(op, GetFunctionClosureForContext());
@@ -3213,8 +3225,7 @@ Node* AstGraphBuilder::BuildArgumentsObject(Variable* arguments) {
return object;
}
-
-Node* AstGraphBuilder::BuildRestArgumentsArray(Variable* rest, int index) {
+Node* AstGraphBuilder::BuildRestArgumentsArray(Variable* rest) {
if (rest == nullptr) return nullptr;
// Allocate and initialize a new arguments object.
@@ -3321,7 +3332,6 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
TypeofMode typeof_mode) {
Node* the_hole = jsgraph()->TheHoleConstant();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
// Global var, const, or let variable.
Handle<Name> name = variable->name();
@@ -3383,7 +3393,6 @@ Node* AstGraphBuilder::BuildVariableDelete(Variable* variable,
BailoutId bailout_id,
OutputFrameStateCombine combine) {
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
// Global var, const, or let variable.
Node* global = BuildLoadGlobalObject();
@@ -3422,7 +3431,6 @@ Node* AstGraphBuilder::BuildVariableAssignment(
Node* the_hole = jsgraph()->TheHoleConstant();
VariableMode mode = variable->mode();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
// Global var, const, or let variable.
Handle<Name> name = variable->name();
@@ -3433,15 +3441,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
// Local var, const, or let variable.
- if (mode == CONST_LEGACY && op != Token::INIT) {
- // Non-initializing assignment to legacy const is
- // - exception in strict mode.
- // - ignored in sloppy mode.
- if (is_strict(language_mode())) {
- return BuildThrowConstAssignError(bailout_id);
- }
- return value;
- } else if (mode == LET && op == Token::INIT) {
+ if (mode == LET && op == Token::INIT) {
// No initialization check needed because scoping guarantees it. Note
// that we still perform a lookup to keep the variable live, because
// baseline code might contain debug code that inspects the variable.
@@ -3464,6 +3464,16 @@ Node* AstGraphBuilder::BuildVariableAssignment(
if (current->op() != the_hole->op() && variable->is_this()) {
value = BuildHoleCheckElseThrow(current, variable, value, bailout_id);
}
+ } else if (mode == CONST && op != Token::INIT &&
+ variable->is_sloppy_function_name()) {
+ // Non-initializing assignment to sloppy function names is
+ // - exception in strict mode.
+ // - ignored in sloppy mode.
+ DCHECK(!variable->binding_needs_init());
+ if (variable->throw_on_const_assignment(language_mode())) {
+ return BuildThrowConstAssignError(bailout_id);
+ }
+ return value;
} else if (mode == CONST && op != Token::INIT) {
if (variable->binding_needs_init()) {
Node* current = environment()->Lookup(variable);
@@ -3481,16 +3491,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
case VariableLocation::CONTEXT: {
// Context variable (potentially up the context chain).
int depth = current_scope()->ContextChainLength(variable->scope());
- if (mode == CONST_LEGACY && op != Token::INIT) {
- // Non-initializing assignment to legacy const is
- // - exception in strict mode.
- // - ignored in sloppy mode.
- if (is_strict(language_mode())) {
- return BuildThrowConstAssignError(bailout_id);
- }
- return value;
- } else if (mode == LET && op != Token::INIT &&
- variable->binding_needs_init()) {
+ if (mode == LET && op != Token::INIT && variable->binding_needs_init()) {
// Perform an initialization check for let declared variables.
const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
@@ -3506,6 +3507,16 @@ Node* AstGraphBuilder::BuildVariableAssignment(
Node* current = NewNode(op, current_context());
value = BuildHoleCheckElseThrow(current, variable, value, bailout_id);
}
+ } else if (mode == CONST && op != Token::INIT &&
+ variable->is_sloppy_function_name()) {
+ // Non-initializing assignment to sloppy function names is
+ // - exception in strict mode.
+ // - ignored in sloppy mode.
+ DCHECK(!variable->binding_needs_init());
+ if (variable->throw_on_const_assignment(language_mode())) {
+ return BuildThrowConstAssignError(bailout_id);
+ }
+ return value;
} else if (mode == CONST && op != Token::INIT) {
if (variable->binding_needs_init()) {
const Operator* op =
@@ -3688,9 +3699,8 @@ Node* AstGraphBuilder::BuildToObject(Node* input, BailoutId bailout_id) {
return object;
}
-
Node* AstGraphBuilder::BuildSetHomeObject(Node* value, Node* home_object,
- ObjectLiteralProperty* property,
+ LiteralProperty* property,
int slot_number) {
Expression* expr = property->value();
if (!FunctionLiteral::NeedsHomeObject(expr)) return value;
@@ -3989,8 +3999,8 @@ Node* AstGraphBuilder::TryFastToName(Node* input) {
bool AstGraphBuilder::CheckOsrEntry(IterationStatement* stmt) {
if (info()->osr_ast_id() == stmt->OsrEntryId()) {
- info()->set_osr_expr_stack_height(std::max(
- environment()->stack_height(), info()->osr_expr_stack_height()));
+ DCHECK_EQ(-1, info()->osr_expr_stack_height());
+ info()->set_osr_expr_stack_height(environment()->stack_height());
return true;
}
return false;
@@ -4183,9 +4193,39 @@ void AstGraphBuilder::Environment::Merge(Environment* other) {
}
}
+void AstGraphBuilder::Environment::PrepareForOsrEntry() {
+ int size = static_cast<int>(values()->size());
+ Graph* graph = builder_->graph();
+
+ // Set the control and effect to the OSR loop entry.
+ Node* osr_loop_entry = graph->NewNode(builder_->common()->OsrLoopEntry(),
+ graph->start(), graph->start());
+ UpdateControlDependency(osr_loop_entry);
+ UpdateEffectDependency(osr_loop_entry);
+ // Set OSR values.
+ for (int i = 0; i < size; ++i) {
+ values()->at(i) =
+ graph->NewNode(builder_->common()->OsrValue(i), osr_loop_entry);
+ }
+
+ // Set the contexts.
+ // The innermost context is the OSR value, and the outer contexts are
+ // reconstructed by dynamically walking up the context chain.
+ Node* osr_context = nullptr;
+ const Operator* op =
+ builder_->javascript()->LoadContext(0, Context::PREVIOUS_INDEX, true);
+ const Operator* op_inner =
+ builder_->common()->OsrValue(Linkage::kOsrContextSpillSlotIndex);
+ int last = static_cast<int>(contexts()->size() - 1);
+ for (int i = last; i >= 0; i--) {
+ osr_context = (i == last) ? graph->NewNode(op_inner, osr_loop_entry)
+ : graph->NewNode(op, osr_context, osr_context,
+ osr_loop_entry);
+ contexts()->at(i) = osr_context;
+ }
+}
-void AstGraphBuilder::Environment::PrepareForLoop(BitVector* assigned,
- bool is_osr) {
+void AstGraphBuilder::Environment::PrepareForLoop(BitVector* assigned) {
int size = static_cast<int>(values()->size());
Node* control = builder_->NewLoop();
@@ -4220,40 +4260,6 @@ void AstGraphBuilder::Environment::PrepareForLoop(BitVector* assigned,
contexts()->at(i) = builder_->NewPhi(1, context, control);
}
}
-
- if (is_osr) {
- // Merge OSR values as inputs to the phis of the loop.
- Graph* graph = builder_->graph();
- Node* osr_loop_entry = builder_->graph()->NewNode(
- builder_->common()->OsrLoopEntry(), graph->start(), graph->start());
-
- builder_->MergeControl(control, osr_loop_entry);
- builder_->MergeEffect(effect, osr_loop_entry, control);
-
- for (int i = 0; i < size; ++i) {
- Node* value = values()->at(i);
- Node* osr_value =
- graph->NewNode(builder_->common()->OsrValue(i), osr_loop_entry);
- values()->at(i) = builder_->MergeValue(value, osr_value, control);
- }
-
- // Rename all the contexts in the environment.
- // The innermost context is the OSR value, and the outer contexts are
- // reconstructed by dynamically walking up the context chain.
- Node* osr_context = nullptr;
- const Operator* op =
- builder_->javascript()->LoadContext(0, Context::PREVIOUS_INDEX, true);
- const Operator* op_inner =
- builder_->common()->OsrValue(Linkage::kOsrContextSpillSlotIndex);
- int last = static_cast<int>(contexts()->size() - 1);
- for (int i = last; i >= 0; i--) {
- Node* context = contexts()->at(i);
- osr_context = (i == last) ? graph->NewNode(op_inner, osr_loop_entry)
- : graph->NewNode(op, osr_context, osr_context,
- osr_loop_entry);
- contexts()->at(i) = builder_->MergeValue(context, osr_context, control);
- }
- }
}
diff --git a/deps/v8/src/compiler/ast-graph-builder.h b/deps/v8/src/compiler/ast-graph-builder.h
index bd307ba29a..27f2c9b4c6 100644
--- a/deps/v8/src/compiler/ast-graph-builder.h
+++ b/deps/v8/src/compiler/ast-graph-builder.h
@@ -37,6 +37,7 @@ class TypeHintAnalysis;
class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
public:
AstGraphBuilder(Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
+ float invocation_frequency,
LoopAssignmentAnalysis* loop_assignment = nullptr,
TypeHintAnalysis* type_hint_analysis = nullptr);
virtual ~AstGraphBuilder() {}
@@ -80,6 +81,7 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
Zone* local_zone_;
CompilationInfo* info_;
JSGraph* jsgraph_;
+ float const invocation_frequency_;
Environment* environment_;
AstContext* ast_context_;
@@ -264,6 +266,9 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
uint32_t ComputeBitsetForDynamicGlobal(Variable* variable);
uint32_t ComputeBitsetForDynamicContext(Variable* variable);
+ // Computes the frequency for JSCallFunction and JSCallConstruct nodes.
+ float ComputeCallFrequency(FeedbackVectorSlot slot) const;
+
// ===========================================================================
// The following build methods all generate graph fragments and return one
// resulting node. The operand stack height remains the same, variables and
@@ -278,8 +283,8 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
// Builder to create an arguments object if it is used.
Node* BuildArgumentsObject(Variable* arguments);
- // Builder to create an array of rest parameters if used
- Node* BuildRestArgumentsArray(Variable* rest, int index);
+ // Builder to create an array of rest parameters if used.
+ Node* BuildRestArgumentsArray(Variable* rest);
// Builder that assigns to the {.this_function} internal variable if needed.
Node* BuildThisFunctionVariable(Variable* this_function_var);
@@ -342,8 +347,7 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
// Builder for adding the [[HomeObject]] to a value if the value came from a
// function literal and needs a home object. Do nothing otherwise.
Node* BuildSetHomeObject(Node* value, Node* home_object,
- ObjectLiteralProperty* property,
- int slot_number = 0);
+ LiteralProperty* property, int slot_number = 0);
// Builders for error reporting at runtime.
Node* BuildThrowError(Node* exception, BailoutId bailout_id);
@@ -575,6 +579,11 @@ class AstGraphBuilder::Environment : public ZoneObject {
// Copies this environment at a loop header control-flow point.
Environment* CopyForLoop(BitVector* assigned, bool is_osr = false);
+ // Copies this environment for Osr entry. This only produces environment
+ // of the right shape, the caller is responsible for filling in the right
+ // values and dependencies.
+ Environment* CopyForOsrEntry();
+
private:
AstGraphBuilder* builder_;
int parameters_count_;
@@ -604,7 +613,8 @@ class AstGraphBuilder::Environment : public ZoneObject {
bool IsLivenessBlockConsistent();
// Prepare environment to be used as loop header.
- void PrepareForLoop(BitVector* assigned, bool is_osr = false);
+ void PrepareForLoop(BitVector* assigned);
+ void PrepareForOsrEntry();
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
index f1469f76f7..82eaeb28a4 100644
--- a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
+++ b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
@@ -3,8 +3,8 @@
// found in the LICENSE file.
#include "src/compiler/ast-loop-assignment-analyzer.h"
-#include "src/compiler.h"
-#include "src/parsing/parser.h"
+#include "src/ast/scopes.h"
+#include "src/compilation-info.h"
namespace v8 {
namespace internal {
@@ -122,7 +122,7 @@ void ALAA::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
void ALAA::VisitClassLiteral(ClassLiteral* e) {
VisitIfNotNull(e->extends());
VisitIfNotNull(e->constructor());
- ZoneList<ObjectLiteralProperty*>* properties = e->properties();
+ ZoneList<ClassLiteralProperty*>* properties = e->properties();
for (int i = 0; i < properties->length(); i++) {
Visit(properties->at(i)->key());
Visit(properties->at(i)->value());
diff --git a/deps/v8/src/compiler/ast-loop-assignment-analyzer.h b/deps/v8/src/compiler/ast-loop-assignment-analyzer.h
index 0893fd1074..44ad7befc6 100644
--- a/deps/v8/src/compiler/ast-loop-assignment-analyzer.h
+++ b/deps/v8/src/compiler/ast-loop-assignment-analyzer.h
@@ -7,7 +7,7 @@
#include "src/ast/ast.h"
#include "src/bit-vector.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/basic-block-instrumentor.cc b/deps/v8/src/compiler/basic-block-instrumentor.cc
index a966a5b262..40f0a29132 100644
--- a/deps/v8/src/compiler/basic-block-instrumentor.cc
+++ b/deps/v8/src/compiler/basic-block-instrumentor.cc
@@ -6,13 +6,14 @@
#include <sstream>
-#include "src/compiler.h"
+#include "src/compilation-info.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/schedule.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index a17947a246..d26ff93e3f 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -4,10 +4,14 @@
#include "src/compiler/bytecode-graph-builder.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
+#include "src/compilation-info.h"
#include "src/compiler/bytecode-branch-analysis.h"
#include "src/compiler/linkage.h"
#include "src/compiler/operator-properties.h"
#include "src/interpreter/bytecodes.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -26,6 +30,7 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
Node* LookupAccumulator() const;
Node* LookupRegister(interpreter::Register the_register) const;
+ void MarkAllRegistersLive();
void BindAccumulator(Node* node, FrameStateBeforeAndAfter* states = nullptr);
void BindRegister(interpreter::Register the_register, Node* node,
@@ -42,7 +47,8 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
// Preserve a checkpoint of the environment for the IR graph. Any
// further mutation of the environment will not affect checkpoints.
- Node* Checkpoint(BailoutId bytecode_offset, OutputFrameStateCombine combine);
+ Node* Checkpoint(BailoutId bytecode_offset, OutputFrameStateCombine combine,
+ bool owner_has_exception);
// Returns true if the state values are up to date with the current
// environment.
@@ -57,27 +63,36 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
Node* Context() const { return context_; }
void SetContext(Node* new_context) { context_ = new_context; }
- Environment* CopyForConditional() const;
+ Environment* CopyForConditional();
Environment* CopyForLoop();
+ Environment* CopyForOsrEntry();
void Merge(Environment* other);
- void PrepareForOsr();
+ void PrepareForOsrEntry();
void PrepareForLoopExit(Node* loop);
private:
- explicit Environment(const Environment* copy);
+ Environment(const Environment* copy, LivenessAnalyzerBlock* liveness_block);
void PrepareForLoop();
+
+ enum { kNotCached, kCached };
+
bool StateValuesAreUpToDate(Node** state_values, int offset, int count,
- int output_poke_start, int output_poke_end);
+ int output_poke_start, int output_poke_end,
+ int cached = kNotCached);
bool StateValuesRequireUpdate(Node** state_values, int offset, int count);
void UpdateStateValues(Node** state_values, int offset, int count);
+ void UpdateStateValuesWithCache(Node** state_values, int offset, int count);
int RegisterToValuesIndex(interpreter::Register the_register) const;
+ bool IsLivenessBlockConsistent() const;
+
Zone* zone() const { return builder_->local_zone(); }
Graph* graph() const { return builder_->graph(); }
CommonOperatorBuilder* common() const { return builder_->common(); }
BytecodeGraphBuilder* builder() const { return builder_; }
+ LivenessAnalyzerBlock* liveness_block() const { return liveness_block_; }
const NodeVector* values() const { return &values_; }
NodeVector* values() { return &values_; }
int register_base() const { return register_base_; }
@@ -86,6 +101,7 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
BytecodeGraphBuilder* builder_;
int register_count_;
int parameter_count_;
+ LivenessAnalyzerBlock* liveness_block_;
Node* context_;
Node* control_dependency_;
Node* effect_dependency_;
@@ -109,7 +125,7 @@ class BytecodeGraphBuilder::FrameStateBeforeAndAfter {
output_poke_count_(0) {
BailoutId id_before(builder->bytecode_iterator().current_offset());
frame_state_before_ = builder_->environment()->Checkpoint(
- id_before, OutputFrameStateCombine::Ignore());
+ id_before, OutputFrameStateCombine::Ignore(), false);
id_after_ = BailoutId(id_before.ToInt() +
builder->bytecode_iterator().current_bytecode_size());
// Create an explicit checkpoint node for before the operation.
@@ -136,8 +152,9 @@ class BytecodeGraphBuilder::FrameStateBeforeAndAfter {
// Add the frame state for after the operation.
DCHECK_EQ(IrOpcode::kDead,
NodeProperties::GetFrameStateInput(node)->opcode());
- Node* frame_state_after =
- builder_->environment()->Checkpoint(id_after_, combine);
+ bool has_exception = NodeProperties::IsExceptionalCall(node);
+ Node* frame_state_after = builder_->environment()->Checkpoint(
+ id_after_, combine, has_exception);
NodeProperties::ReplaceFrameStateInput(node, frame_state_after);
}
@@ -171,6 +188,9 @@ BytecodeGraphBuilder::Environment::Environment(BytecodeGraphBuilder* builder,
: builder_(builder),
register_count_(register_count),
parameter_count_(parameter_count),
+ liveness_block_(builder->is_liveness_analysis_enabled_
+ ? builder_->liveness_analyzer()->NewBlock()
+ : nullptr),
context_(context),
control_dependency_(control_dependency),
effect_dependency_(control_dependency),
@@ -204,12 +224,13 @@ BytecodeGraphBuilder::Environment::Environment(BytecodeGraphBuilder* builder,
values()->push_back(undefined_constant);
}
-
BytecodeGraphBuilder::Environment::Environment(
- const BytecodeGraphBuilder::Environment* other)
+ const BytecodeGraphBuilder::Environment* other,
+ LivenessAnalyzerBlock* liveness_block)
: builder_(other->builder_),
register_count_(other->register_count_),
parameter_count_(other->parameter_count_),
+ liveness_block_(liveness_block),
context_(other->context_),
control_dependency_(other->control_dependency_),
effect_dependency_(other->effect_dependency_),
@@ -232,6 +253,10 @@ int BytecodeGraphBuilder::Environment::RegisterToValuesIndex(
}
}
+bool BytecodeGraphBuilder::Environment::IsLivenessBlockConsistent() const {
+ return !builder_->IsLivenessAnalysisEnabled() ==
+ (liveness_block() == nullptr);
+}
Node* BytecodeGraphBuilder::Environment::LookupAccumulator() const {
return values()->at(accumulator_base_);
@@ -248,10 +273,22 @@ Node* BytecodeGraphBuilder::Environment::LookupRegister(
return builder()->GetNewTarget();
} else {
int values_index = RegisterToValuesIndex(the_register);
+ if (liveness_block() != nullptr && !the_register.is_parameter()) {
+ DCHECK(IsLivenessBlockConsistent());
+ liveness_block()->Lookup(the_register.index());
+ }
return values()->at(values_index);
}
}
+void BytecodeGraphBuilder::Environment::MarkAllRegistersLive() {
+ DCHECK(IsLivenessBlockConsistent());
+ if (liveness_block() != nullptr) {
+ for (int i = 0; i < register_count(); ++i) {
+ liveness_block()->Lookup(i);
+ }
+ }
+}
void BytecodeGraphBuilder::Environment::BindAccumulator(
Node* node, FrameStateBeforeAndAfter* states) {
@@ -271,6 +308,10 @@ void BytecodeGraphBuilder::Environment::BindRegister(
values_index));
}
values()->at(values_index) = node;
+ if (liveness_block() != nullptr && !the_register.is_parameter()) {
+ DCHECK(IsLivenessBlockConsistent());
+ liveness_block()->Bind(the_register.index());
+ }
}
@@ -298,18 +339,41 @@ void BytecodeGraphBuilder::Environment::RecordAfterState(
BytecodeGraphBuilder::Environment*
BytecodeGraphBuilder::Environment::CopyForLoop() {
PrepareForLoop();
- return new (zone()) Environment(this);
+ if (liveness_block() != nullptr) {
+ // Finish the current block before copying.
+ liveness_block_ = builder_->liveness_analyzer()->NewBlock(liveness_block());
+ }
+ return new (zone()) Environment(this, liveness_block());
}
+BytecodeGraphBuilder::Environment*
+BytecodeGraphBuilder::Environment::CopyForOsrEntry() {
+ return new (zone())
+ Environment(this, builder_->liveness_analyzer()->NewBlock());
+}
BytecodeGraphBuilder::Environment*
-BytecodeGraphBuilder::Environment::CopyForConditional() const {
- return new (zone()) Environment(this);
+BytecodeGraphBuilder::Environment::CopyForConditional() {
+ LivenessAnalyzerBlock* copy_liveness_block = nullptr;
+ if (liveness_block() != nullptr) {
+ copy_liveness_block =
+ builder_->liveness_analyzer()->NewBlock(liveness_block());
+ liveness_block_ = builder_->liveness_analyzer()->NewBlock(liveness_block());
+ }
+ return new (zone()) Environment(this, copy_liveness_block);
}
void BytecodeGraphBuilder::Environment::Merge(
BytecodeGraphBuilder::Environment* other) {
+ if (builder_->is_liveness_analysis_enabled_) {
+ if (GetControlDependency()->opcode() != IrOpcode::kLoop) {
+ liveness_block_ =
+ builder()->liveness_analyzer()->NewBlock(liveness_block());
+ }
+ liveness_block()->AddPredecessor(other->liveness_block());
+ }
+
// Create a merge of the control dependencies of both environments and update
// the current environment's control dependency accordingly.
Node* control = builder()->MergeControl(GetControlDependency(),
@@ -352,34 +416,27 @@ void BytecodeGraphBuilder::Environment::PrepareForLoop() {
builder()->exit_controls_.push_back(terminate);
}
-void BytecodeGraphBuilder::Environment::PrepareForOsr() {
+void BytecodeGraphBuilder::Environment::PrepareForOsrEntry() {
DCHECK_EQ(IrOpcode::kLoop, GetControlDependency()->opcode());
DCHECK_EQ(1, GetControlDependency()->InputCount());
+
Node* start = graph()->start();
- // Create a control node for the OSR entry point and merge it into the loop
- // header. Update the current environment's control dependency accordingly.
+ // Create a control node for the OSR entry point and update the current
+ // environment's dependencies accordingly.
Node* entry = graph()->NewNode(common()->OsrLoopEntry(), start, start);
- Node* control = builder()->MergeControl(GetControlDependency(), entry);
- UpdateControlDependency(control);
-
- // Create a merge of the effect from the OSR entry and the existing effect
- // dependency. Update the current environment's effect dependency accordingly.
- Node* effect = builder()->MergeEffect(GetEffectDependency(), entry, control);
- UpdateEffectDependency(effect);
+ UpdateControlDependency(entry);
+ UpdateEffectDependency(entry);
- // Rename all values in the environment which will extend or introduce Phi
- // nodes to contain the OSR values available at the entry point.
- Node* osr_context = graph()->NewNode(
- common()->OsrValue(Linkage::kOsrContextSpillSlotIndex), entry);
- context_ = builder()->MergeValue(context_, osr_context, control);
+ // Create OSR values for each environment value.
+ SetContext(graph()->NewNode(
+ common()->OsrValue(Linkage::kOsrContextSpillSlotIndex), entry));
int size = static_cast<int>(values()->size());
for (int i = 0; i < size; i++) {
int idx = i; // Indexing scheme follows {StandardFrame}, adapt accordingly.
if (i >= register_base()) idx += InterpreterFrameConstants::kExtraSlotCount;
if (i >= accumulator_base()) idx = Linkage::kOsrAccumulatorRegisterIndex;
- Node* osr_value = graph()->NewNode(common()->OsrValue(idx), entry);
- values_[i] = builder()->MergeValue(values_[i], osr_value, control);
+ values()->at(i) = graph()->NewNode(common()->OsrValue(idx), entry);
}
}
@@ -434,13 +491,19 @@ void BytecodeGraphBuilder::Environment::UpdateStateValues(Node** state_values,
}
}
+void BytecodeGraphBuilder::Environment::UpdateStateValuesWithCache(
+ Node** state_values, int offset, int count) {
+ Node** env_values = (count == 0) ? nullptr : &values()->at(offset);
+ *state_values = builder_->state_values_cache_.GetNodeForValues(
+ env_values, static_cast<size_t>(count));
+}
Node* BytecodeGraphBuilder::Environment::Checkpoint(
- BailoutId bailout_id, OutputFrameStateCombine combine) {
- // TODO(rmcilroy): Consider using StateValuesCache for some state values.
+ BailoutId bailout_id, OutputFrameStateCombine combine,
+ bool owner_has_exception) {
UpdateStateValues(&parameters_state_values_, 0, parameter_count());
- UpdateStateValues(&registers_state_values_, register_base(),
- register_count());
+ UpdateStateValuesWithCache(&registers_state_values_, register_base(),
+ register_count());
UpdateStateValues(&accumulator_state_values_, accumulator_base(), 1);
const Operator* op = common()->FrameState(
@@ -450,19 +513,42 @@ Node* BytecodeGraphBuilder::Environment::Checkpoint(
accumulator_state_values_, Context(), builder()->GetFunctionClosure(),
builder()->graph()->start());
+ if (liveness_block() != nullptr) {
+ // If the owning node has an exception, register the checkpoint to the
+ // predecessor so that the checkpoint is used for both the normal and the
+ // exceptional paths. Yes, this is a terrible hack and we might want
+ // to use an explicit frame state for the exceptional path.
+ if (owner_has_exception) {
+ liveness_block()->GetPredecessor()->Checkpoint(result);
+ } else {
+ liveness_block()->Checkpoint(result);
+ }
+ }
+
return result;
}
-
bool BytecodeGraphBuilder::Environment::StateValuesAreUpToDate(
Node** state_values, int offset, int count, int output_poke_start,
- int output_poke_end) {
+ int output_poke_end, int cached) {
DCHECK_LE(static_cast<size_t>(offset + count), values()->size());
- for (int i = 0; i < count; i++, offset++) {
- if (offset < output_poke_start || offset >= output_poke_end) {
- if ((*state_values)->InputAt(i) != values()->at(offset)) {
- return false;
+ if (cached == kNotCached) {
+ for (int i = 0; i < count; i++, offset++) {
+ if (offset < output_poke_start || offset >= output_poke_end) {
+ if ((*state_values)->InputAt(i) != values()->at(offset)) {
+ return false;
+ }
+ }
+ }
+ } else {
+ for (StateValuesAccess::TypedNode state_value :
+ StateValuesAccess(*state_values)) {
+ if (offset < output_poke_start || offset >= output_poke_end) {
+ if (state_value.node != values()->at(offset)) {
+ return false;
+ }
}
+ ++offset;
}
}
return true;
@@ -478,16 +564,18 @@ bool BytecodeGraphBuilder::Environment::StateValuesAreUpToDate(
output_poke_start, output_poke_end) &&
StateValuesAreUpToDate(&registers_state_values_, register_base(),
register_count(), output_poke_start,
- output_poke_end) &&
+ output_poke_end, kCached) &&
StateValuesAreUpToDate(&accumulator_state_values_, accumulator_base(),
1, output_poke_start, output_poke_end);
}
BytecodeGraphBuilder::BytecodeGraphBuilder(Zone* local_zone,
CompilationInfo* info,
- JSGraph* jsgraph)
+ JSGraph* jsgraph,
+ float invocation_frequency)
: local_zone_(local_zone),
jsgraph_(jsgraph),
+ invocation_frequency_(invocation_frequency),
bytecode_array_(handle(info->shared_info()->bytecode_array())),
exception_handler_table_(
handle(HandlerTable::cast(bytecode_array()->handler_table()))),
@@ -502,7 +590,13 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(Zone* local_zone,
current_exception_handler_(0),
input_buffer_size_(0),
input_buffer_(nullptr),
- exit_controls_(local_zone) {}
+ exit_controls_(local_zone),
+ is_liveness_analysis_enabled_(FLAG_analyze_environment_liveness &&
+ info->is_deoptimization_enabled()),
+ state_values_cache_(jsgraph),
+ liveness_analyzer_(
+ static_cast<size_t>(bytecode_array()->register_count()), local_zone) {
+}
Node* BytecodeGraphBuilder::GetNewTarget() {
if (!new_target_.is_set()) {
@@ -556,10 +650,6 @@ VectorSlotPair BytecodeGraphBuilder::CreateVectorSlotPair(int slot_id) {
}
bool BytecodeGraphBuilder::CreateGraph() {
- // Set up the basic structure of the graph. Outputs for {Start} are
- // the formal parameters (including the receiver) plus context and
- // closure.
-
// Set up the basic structure of the graph. Outputs for {Start} are the formal
// parameters (including the receiver) plus new target, number of arguments,
// context and closure.
@@ -571,10 +661,6 @@ bool BytecodeGraphBuilder::CreateGraph() {
GetFunctionContext());
set_environment(&env);
- // For OSR add an {OsrNormalEntry} as the start of the top-level environment.
- // It will be replaced with {Dead} after typing and optimizations.
- if (!osr_ast_id_.IsNone()) NewNode(common()->OsrNormalEntry());
-
VisitBytecodes();
// Finish the basic structure of the graph.
@@ -584,9 +670,25 @@ bool BytecodeGraphBuilder::CreateGraph() {
Node* end = graph()->NewNode(common()->End(input_count), input_count, inputs);
graph()->SetEnd(end);
+ ClearNonLiveSlotsInFrameStates();
+
return true;
}
+void BytecodeGraphBuilder::ClearNonLiveSlotsInFrameStates() {
+ if (!IsLivenessAnalysisEnabled()) {
+ return;
+ }
+ NonLiveFrameStateSlotReplacer replacer(
+ &state_values_cache_, jsgraph()->OptimizedOutConstant(),
+ liveness_analyzer()->local_count(), local_zone());
+ liveness_analyzer()->Run(&replacer);
+ if (FLAG_trace_environment_liveness) {
+ OFStream os(stdout);
+ liveness_analyzer()->Print(os);
+ }
+}
+
void BytecodeGraphBuilder::VisitBytecodes() {
BytecodeBranchAnalysis analysis(bytecode_array(), local_zone());
BytecodeLoopAnalysis loop_analysis(bytecode_array(), &analysis, local_zone());
@@ -596,12 +698,14 @@ void BytecodeGraphBuilder::VisitBytecodes() {
set_loop_analysis(&loop_analysis);
interpreter::BytecodeArrayIterator iterator(bytecode_array());
set_bytecode_iterator(&iterator);
+ BuildOSRNormalEntryPoint();
while (!iterator.done()) {
int current_offset = iterator.current_offset();
EnterAndExitExceptionHandlers(current_offset);
SwitchToMergeEnvironment(current_offset);
if (environment() != nullptr) {
BuildLoopHeaderEnvironment(current_offset);
+ BuildOSRLoopEntryPoint(current_offset);
switch (iterator.current_bytecode()) {
#define BYTECODE_CASE(name, ...) \
@@ -682,9 +786,9 @@ void BytecodeGraphBuilder::VisitMov() {
environment()->BindRegister(bytecode_iterator().GetRegisterOperand(1), value);
}
-Node* BytecodeGraphBuilder::BuildLoadGlobal(TypeofMode typeof_mode) {
- VectorSlotPair feedback =
- CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(0));
+Node* BytecodeGraphBuilder::BuildLoadGlobal(uint32_t feedback_slot_index,
+ TypeofMode typeof_mode) {
+ VectorSlotPair feedback = CreateVectorSlotPair(feedback_slot_index);
DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
feedback_vector()->GetKind(feedback.slot()));
Handle<Name> name(feedback_vector()->GetName(feedback.slot()));
@@ -694,20 +798,23 @@ Node* BytecodeGraphBuilder::BuildLoadGlobal(TypeofMode typeof_mode) {
void BytecodeGraphBuilder::VisitLdaGlobal() {
FrameStateBeforeAndAfter states(this);
- Node* node = BuildLoadGlobal(TypeofMode::NOT_INSIDE_TYPEOF);
+ Node* node = BuildLoadGlobal(bytecode_iterator().GetIndexOperand(0),
+ TypeofMode::NOT_INSIDE_TYPEOF);
environment()->BindAccumulator(node, &states);
}
void BytecodeGraphBuilder::VisitLdrGlobal() {
FrameStateBeforeAndAfter states(this);
- Node* node = BuildLoadGlobal(TypeofMode::NOT_INSIDE_TYPEOF);
+ Node* node = BuildLoadGlobal(bytecode_iterator().GetIndexOperand(0),
+ TypeofMode::NOT_INSIDE_TYPEOF);
environment()->BindRegister(bytecode_iterator().GetRegisterOperand(1), node,
&states);
}
void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
FrameStateBeforeAndAfter states(this);
- Node* node = BuildLoadGlobal(TypeofMode::INSIDE_TYPEOF);
+ Node* node = BuildLoadGlobal(bytecode_iterator().GetIndexOperand(0),
+ TypeofMode::INSIDE_TYPEOF);
environment()->BindAccumulator(node, &states);
}
@@ -733,14 +840,12 @@ void BytecodeGraphBuilder::VisitStaGlobalStrict() {
}
Node* BytecodeGraphBuilder::BuildLoadContextSlot() {
- // TODO(mythria): LoadContextSlots are unrolled by the required depth when
- // generating bytecode. Hence the value of depth is always 0. Update this
- // code, when the implementation changes.
// TODO(mythria): immutable flag is also set to false. This information is not
// available in bytecode array. update this code when the implementation
// changes.
const Operator* op = javascript()->LoadContext(
- 0, bytecode_iterator().GetIndexOperand(1), false);
+ bytecode_iterator().GetUnsignedImmediateOperand(2),
+ bytecode_iterator().GetIndexOperand(1), false);
Node* context =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
return NewNode(op, context);
@@ -753,15 +858,13 @@ void BytecodeGraphBuilder::VisitLdaContextSlot() {
void BytecodeGraphBuilder::VisitLdrContextSlot() {
Node* node = BuildLoadContextSlot();
- environment()->BindRegister(bytecode_iterator().GetRegisterOperand(2), node);
+ environment()->BindRegister(bytecode_iterator().GetRegisterOperand(3), node);
}
void BytecodeGraphBuilder::VisitStaContextSlot() {
- // TODO(mythria): LoadContextSlots are unrolled by the required depth when
- // generating bytecode. Hence the value of depth is always 0. Update this
- // code, when the implementation changes.
- const Operator* op =
- javascript()->StoreContext(0, bytecode_iterator().GetIndexOperand(1));
+ const Operator* op = javascript()->StoreContext(
+ bytecode_iterator().GetUnsignedImmediateOperand(2),
+ bytecode_iterator().GetIndexOperand(1));
Node* context =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* value = environment()->LookupAccumulator();
@@ -788,6 +891,150 @@ void BytecodeGraphBuilder::VisitLdaLookupSlotInsideTypeof() {
BuildLdaLookupSlot(TypeofMode::INSIDE_TYPEOF);
}
+BytecodeGraphBuilder::Environment* BytecodeGraphBuilder::CheckContextExtensions(
+ uint32_t depth) {
+ // Output environment where the context has an extension
+ Environment* slow_environment = nullptr;
+
+ // We only need to check up to the last-but-one depth, because the an eval in
+ // the same scope as the variable itself has no way of shadowing it.
+ for (uint32_t d = 0; d < depth; d++) {
+ Node* extension_slot =
+ NewNode(javascript()->LoadContext(d, Context::EXTENSION_INDEX, false),
+ environment()->Context());
+
+ Node* check_no_extension =
+ NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
+ extension_slot, jsgraph()->TheHoleConstant());
+
+ NewBranch(check_no_extension);
+ Environment* true_environment = environment()->CopyForConditional();
+
+ {
+ NewIfFalse();
+ // If there is an extension, merge into the slow path.
+ if (slow_environment == nullptr) {
+ slow_environment = environment();
+ NewMerge();
+ } else {
+ slow_environment->Merge(environment());
+ }
+ }
+
+ {
+ set_environment(true_environment);
+ NewIfTrue();
+ // Do nothing on if there is no extension, eventually falling through to
+ // the fast path.
+ }
+ }
+
+ // The depth can be zero, in which case no slow-path checks are built, and the
+ // slow path environment can be null.
+ DCHECK(depth == 0 || slow_environment != nullptr);
+
+ return slow_environment;
+}
+
+void BytecodeGraphBuilder::BuildLdaLookupContextSlot(TypeofMode typeof_mode) {
+ uint32_t depth = bytecode_iterator().GetUnsignedImmediateOperand(2);
+
+ // Check if any context in the depth has an extension.
+ Environment* slow_environment = CheckContextExtensions(depth);
+
+ // Fast path, do a context load.
+ {
+ uint32_t slot_index = bytecode_iterator().GetIndexOperand(1);
+
+ const Operator* op = javascript()->LoadContext(depth, slot_index, false);
+ Node* context = environment()->Context();
+ environment()->BindAccumulator(NewNode(op, context));
+ }
+
+ // Only build the slow path if there were any slow-path checks.
+ if (slow_environment != nullptr) {
+ // Add a merge to the fast environment.
+ NewMerge();
+ Environment* fast_environment = environment();
+
+ // Slow path, do a runtime load lookup.
+ set_environment(slow_environment);
+ {
+ FrameStateBeforeAndAfter states(this);
+
+ Node* name = jsgraph()->Constant(
+ bytecode_iterator().GetConstantForIndexOperand(0));
+
+ const Operator* op =
+ javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
+ ? Runtime::kLoadLookupSlot
+ : Runtime::kLoadLookupSlotInsideTypeof);
+ Node* value = NewNode(op, name);
+ environment()->BindAccumulator(value, &states);
+ }
+
+ fast_environment->Merge(environment());
+ set_environment(fast_environment);
+ }
+}
+
+void BytecodeGraphBuilder::VisitLdaLookupContextSlot() {
+ BuildLdaLookupContextSlot(TypeofMode::NOT_INSIDE_TYPEOF);
+}
+
+void BytecodeGraphBuilder::VisitLdaLookupContextSlotInsideTypeof() {
+ BuildLdaLookupContextSlot(TypeofMode::INSIDE_TYPEOF);
+}
+
+void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
+ uint32_t depth = bytecode_iterator().GetUnsignedImmediateOperand(2);
+
+ // Check if any context in the depth has an extension.
+ Environment* slow_environment = CheckContextExtensions(depth);
+
+ // Fast path, do a global load.
+ {
+ FrameStateBeforeAndAfter states(this);
+ Node* node =
+ BuildLoadGlobal(bytecode_iterator().GetIndexOperand(1), typeof_mode);
+ environment()->BindAccumulator(node, &states);
+ }
+
+ // Only build the slow path if there were any slow-path checks.
+ if (slow_environment != nullptr) {
+ // Add a merge to the fast environment.
+ NewMerge();
+ Environment* fast_environment = environment();
+
+ // Slow path, do a runtime load lookup.
+ set_environment(slow_environment);
+ {
+ FrameStateBeforeAndAfter states(this);
+
+ Node* name = jsgraph()->Constant(
+ bytecode_iterator().GetConstantForIndexOperand(0));
+
+ const Operator* op =
+ javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
+ ? Runtime::kLoadLookupSlot
+ : Runtime::kLoadLookupSlotInsideTypeof);
+ Node* value = NewNode(op, name);
+ environment()->BindAccumulator(value, &states);
+ }
+
+ fast_environment->Merge(environment());
+ set_environment(fast_environment);
+ }
+}
+
+void BytecodeGraphBuilder::VisitLdaLookupGlobalSlot() {
+ BuildLdaLookupGlobalSlot(TypeofMode::NOT_INSIDE_TYPEOF);
+}
+
+void BytecodeGraphBuilder::VisitLdaLookupGlobalSlotInsideTypeof() {
+ BuildLdaLookupGlobalSlot(TypeofMode::INSIDE_TYPEOF);
+}
+
void BytecodeGraphBuilder::BuildStaLookupSlot(LanguageMode language_mode) {
FrameStateBeforeAndAfter states(this);
Node* value = environment()->LookupAccumulator();
@@ -920,7 +1167,10 @@ void BytecodeGraphBuilder::VisitCreateClosure() {
Handle<SharedFunctionInfo> shared_info = Handle<SharedFunctionInfo>::cast(
bytecode_iterator().GetConstantForIndexOperand(0));
PretenureFlag tenured =
- bytecode_iterator().GetFlagOperand(1) ? TENURED : NOT_TENURED;
+ interpreter::CreateClosureFlags::PretenuredBit::decode(
+ bytecode_iterator().GetFlagOperand(1))
+ ? TENURED
+ : NOT_TENURED;
const Operator* op = javascript()->CreateClosure(shared_info, tenured);
Node* closure = NewNode(op);
environment()->BindAccumulator(closure);
@@ -936,7 +1186,7 @@ void BytecodeGraphBuilder::VisitCreateBlockContext() {
}
void BytecodeGraphBuilder::VisitCreateFunctionContext() {
- uint32_t slots = bytecode_iterator().GetIndexOperand(0);
+ uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(0);
const Operator* op = javascript()->CreateFunctionContext(slots);
Node* context = NewNode(op, GetFunctionClosure());
environment()->BindAccumulator(context);
@@ -947,9 +1197,11 @@ void BytecodeGraphBuilder::VisitCreateCatchContext() {
Node* exception = environment()->LookupRegister(reg);
Handle<String> name =
Handle<String>::cast(bytecode_iterator().GetConstantForIndexOperand(1));
+ Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(2));
Node* closure = environment()->LookupAccumulator();
- const Operator* op = javascript()->CreateCatchContext(name);
+ const Operator* op = javascript()->CreateCatchContext(name, scope_info);
Node* context = NewNode(op, exception, closure);
environment()->BindAccumulator(context);
}
@@ -957,8 +1209,10 @@ void BytecodeGraphBuilder::VisitCreateCatchContext() {
void BytecodeGraphBuilder::VisitCreateWithContext() {
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(1));
- const Operator* op = javascript()->CreateWithContext();
+ const Operator* op = javascript()->CreateWithContext(scope_info);
Node* context = NewNode(op, object, environment()->LookupAccumulator());
environment()->BindAccumulator(context);
}
@@ -1003,6 +1257,11 @@ void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
bytecode_iterator().GetConstantForIndexOperand(0));
int literal_index = bytecode_iterator().GetIndexOperand(1);
int literal_flags = bytecode_iterator().GetFlagOperand(2);
+ // Disable allocation site mementos. Only unoptimized code will collect
+ // feedback about allocation site. Once the code is optimized we expect the
+ // data to converge. So, we disable allocation site mementos in optimized
+ // code. We can revisit this when we have data to the contrary.
+ literal_flags |= ArrayLiteral::kDisableMementos;
int number_of_elements = constant_elements->length();
const Operator* op = javascript()->CreateLiteralArray(
constant_elements, literal_flags, literal_index, number_of_elements);
@@ -1054,11 +1313,12 @@ void BytecodeGraphBuilder::BuildCall(TailCallMode tail_call_mode) {
// Slot index of 0 is used indicate no feedback slot is available. Assert
// the assumption that slot index 0 is never a valid feedback slot.
STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
- VectorSlotPair feedback =
- CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(3));
+ int const slot_id = bytecode_iterator().GetIndexOperand(3);
+ VectorSlotPair feedback = CreateVectorSlotPair(slot_id);
+ float const frequency = ComputeCallFrequency(slot_id);
const Operator* call = javascript()->CallFunction(
- arg_count + 1, feedback, receiver_hint, tail_call_mode);
+ arg_count + 1, frequency, feedback, receiver_hint, tail_call_mode);
Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 1);
environment()->BindAccumulator(value, &states);
}
@@ -1142,13 +1402,13 @@ Node* BytecodeGraphBuilder::ProcessCallNewArguments(
const Operator* call_new_op, Node* callee, Node* new_target,
interpreter::Register first_arg, size_t arity) {
Node** all = local_zone()->NewArray<Node*>(arity);
- all[0] = new_target;
+ all[0] = callee;
int first_arg_index = first_arg.index();
for (int i = 1; i < static_cast<int>(arity) - 1; ++i) {
all[i] = environment()->LookupRegister(
interpreter::Register(first_arg_index + i - 1));
}
- all[arity - 1] = callee;
+ all[arity - 1] = new_target;
Node* value = MakeNode(call_new_op, static_cast<int>(arity), all, false);
return value;
}
@@ -1158,12 +1418,18 @@ void BytecodeGraphBuilder::VisitNew() {
interpreter::Register callee_reg = bytecode_iterator().GetRegisterOperand(0);
interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
+ // Slot index of 0 is used indicate no feedback slot is available. Assert
+ // the assumption that slot index 0 is never a valid feedback slot.
+ STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
+ int const slot_id = bytecode_iterator().GetIndexOperand(3);
+ VectorSlotPair feedback = CreateVectorSlotPair(slot_id);
Node* new_target = environment()->LookupAccumulator();
Node* callee = environment()->LookupRegister(callee_reg);
- // TODO(turbofan): Pass the feedback here.
+
+ float const frequency = ComputeCallFrequency(slot_id);
const Operator* call = javascript()->CallConstruct(
- static_cast<int>(arg_count) + 2, VectorSlotPair());
+ static_cast<int>(arg_count) + 2, frequency, feedback);
Node* value = ProcessCallNewArguments(call, callee, new_target, first_arg,
arg_count + 2);
environment()->BindAccumulator(value, &states);
@@ -1207,13 +1473,33 @@ BinaryOperationHint BytecodeGraphBuilder::GetBinaryOperationHint(
int operand_index) {
FeedbackVectorSlot slot = feedback_vector()->ToSlot(
bytecode_iterator().GetIndexOperand(operand_index));
- DCHECK_EQ(FeedbackVectorSlotKind::GENERAL, feedback_vector()->GetKind(slot));
- Object* feedback = feedback_vector()->Get(slot);
- BinaryOperationHint hint = BinaryOperationHint::kAny;
- if (feedback->IsSmi()) {
- hint = BinaryOperationHintFromFeedback((Smi::cast(feedback))->value());
+ DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC,
+ feedback_vector()->GetKind(slot));
+ BinaryOpICNexus nexus(feedback_vector(), slot);
+ return nexus.GetBinaryOperationFeedback();
+}
+
+// Helper function to create compare operation hint from the recorded type
+// feedback.
+CompareOperationHint BytecodeGraphBuilder::GetCompareOperationHint() {
+ int slot_index = bytecode_iterator().GetIndexOperand(1);
+ if (slot_index == 0) {
+ return CompareOperationHint::kAny;
+ }
+ FeedbackVectorSlot slot =
+ feedback_vector()->ToSlot(bytecode_iterator().GetIndexOperand(1));
+ DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC,
+ feedback_vector()->GetKind(slot));
+ CompareICNexus nexus(feedback_vector(), slot);
+ return nexus.GetCompareOperationFeedback();
+}
+
+float BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const {
+ if (slot_id >= TypeFeedbackVector::kReservedIndexCount) {
+ CallICNexus nexus(feedback_vector(), feedback_vector()->ToSlot(slot_id));
+ return nexus.ComputeCallFrequency() * invocation_frequency_;
}
- return hint;
+ return 0.0f;
}
void BytecodeGraphBuilder::VisitAdd() {
@@ -1379,38 +1665,31 @@ void BytecodeGraphBuilder::BuildCompareOp(const Operator* js_op) {
}
void BytecodeGraphBuilder::VisitTestEqual() {
- CompareOperationHint hint = CompareOperationHint::kAny;
- BuildCompareOp(javascript()->Equal(hint));
+ BuildCompareOp(javascript()->Equal(GetCompareOperationHint()));
}
void BytecodeGraphBuilder::VisitTestNotEqual() {
- CompareOperationHint hint = CompareOperationHint::kAny;
- BuildCompareOp(javascript()->NotEqual(hint));
+ BuildCompareOp(javascript()->NotEqual(GetCompareOperationHint()));
}
void BytecodeGraphBuilder::VisitTestEqualStrict() {
- CompareOperationHint hint = CompareOperationHint::kAny;
- BuildCompareOp(javascript()->StrictEqual(hint));
+ BuildCompareOp(javascript()->StrictEqual(GetCompareOperationHint()));
}
void BytecodeGraphBuilder::VisitTestLessThan() {
- CompareOperationHint hint = CompareOperationHint::kAny;
- BuildCompareOp(javascript()->LessThan(hint));
+ BuildCompareOp(javascript()->LessThan(GetCompareOperationHint()));
}
void BytecodeGraphBuilder::VisitTestGreaterThan() {
- CompareOperationHint hint = CompareOperationHint::kAny;
- BuildCompareOp(javascript()->GreaterThan(hint));
+ BuildCompareOp(javascript()->GreaterThan(GetCompareOperationHint()));
}
void BytecodeGraphBuilder::VisitTestLessThanOrEqual() {
- CompareOperationHint hint = CompareOperationHint::kAny;
- BuildCompareOp(javascript()->LessThanOrEqual(hint));
+ BuildCompareOp(javascript()->LessThanOrEqual(GetCompareOperationHint()));
}
void BytecodeGraphBuilder::VisitTestGreaterThanOrEqual() {
- CompareOperationHint hint = CompareOperationHint::kAny;
- BuildCompareOp(javascript()->GreaterThanOrEqual(hint));
+ BuildCompareOp(javascript()->GreaterThanOrEqual(GetCompareOperationHint()));
}
void BytecodeGraphBuilder::VisitTestIn() {
@@ -1444,37 +1723,28 @@ void BytecodeGraphBuilder::VisitJump() { BuildJump(); }
void BytecodeGraphBuilder::VisitJumpConstant() { BuildJump(); }
+void BytecodeGraphBuilder::VisitJumpIfTrue() { BuildJumpIfTrue(); }
-void BytecodeGraphBuilder::VisitJumpIfTrue() {
- BuildJumpIfEqual(jsgraph()->TrueConstant());
-}
-
-void BytecodeGraphBuilder::VisitJumpIfTrueConstant() {
- BuildJumpIfEqual(jsgraph()->TrueConstant());
-}
+void BytecodeGraphBuilder::VisitJumpIfTrueConstant() { BuildJumpIfTrue(); }
-void BytecodeGraphBuilder::VisitJumpIfFalse() {
- BuildJumpIfEqual(jsgraph()->FalseConstant());
-}
+void BytecodeGraphBuilder::VisitJumpIfFalse() { BuildJumpIfFalse(); }
-void BytecodeGraphBuilder::VisitJumpIfFalseConstant() {
- BuildJumpIfEqual(jsgraph()->FalseConstant());
-}
+void BytecodeGraphBuilder::VisitJumpIfFalseConstant() { BuildJumpIfFalse(); }
void BytecodeGraphBuilder::VisitJumpIfToBooleanTrue() {
- BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
+ BuildJumpIfToBooleanTrue();
}
void BytecodeGraphBuilder::VisitJumpIfToBooleanTrueConstant() {
- BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
+ BuildJumpIfToBooleanTrue();
}
void BytecodeGraphBuilder::VisitJumpIfToBooleanFalse() {
- BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
+ BuildJumpIfToBooleanFalse();
}
void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstant() {
- BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
+ BuildJumpIfToBooleanFalse();
}
void BytecodeGraphBuilder::VisitJumpIfNotHole() { BuildJumpIfNotHole(); }
@@ -1499,21 +1769,14 @@ void BytecodeGraphBuilder::VisitJumpIfUndefinedConstant() {
BuildJumpIfEqual(jsgraph()->UndefinedConstant());
}
+void BytecodeGraphBuilder::VisitJumpLoop() { BuildJump(); }
+
void BytecodeGraphBuilder::VisitStackCheck() {
FrameStateBeforeAndAfter states(this);
Node* node = NewNode(javascript()->StackCheck());
environment()->RecordAfterState(node, &states);
}
-void BytecodeGraphBuilder::VisitOsrPoll() {
- // TODO(4764): This should be moved into the {VisitBytecodes} once we merge
- // the polling with existing bytecode. This will also guarantee that we are
- // not missing the OSR entry point, which we wouldn't catch right now.
- if (osr_ast_id_.ToInt() == bytecode_iterator().current_offset()) {
- environment()->PrepareForOsr();
- }
-}
-
void BytecodeGraphBuilder::VisitReturn() {
BuildLoopExitsForFunctionExit();
Node* control =
@@ -1526,6 +1789,7 @@ void BytecodeGraphBuilder::VisitDebugger() {
Node* call =
NewNode(javascript()->CallRuntime(Runtime::kHandleDebuggerStatement));
environment()->BindAccumulator(call, &states);
+ environment()->MarkAllRegistersLive();
}
// We cannot create a graph from the debugger copy of the bytecode array.
@@ -1545,13 +1809,15 @@ void BytecodeGraphBuilder::BuildForInPrepare() {
void BytecodeGraphBuilder::VisitForInPrepare() { BuildForInPrepare(); }
-void BytecodeGraphBuilder::VisitForInDone() {
+void BytecodeGraphBuilder::VisitForInContinue() {
FrameStateBeforeAndAfter states(this);
Node* index =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* cache_length =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
- Node* exit_cond = NewNode(javascript()->ForInDone(), index, cache_length);
+ Node* exit_cond =
+ NewNode(javascript()->LessThan(CompareOperationHint::kSignedSmall), index,
+ cache_length);
environment()->BindAccumulator(exit_cond, &states);
}
@@ -1578,7 +1844,8 @@ void BytecodeGraphBuilder::VisitForInStep() {
FrameStateBeforeAndAfter states(this);
Node* index =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- index = NewNode(javascript()->ForInStep(), index);
+ index = NewNode(javascript()->Add(BinaryOperationHint::kSignedSmall), index,
+ jsgraph()->OneConstant());
environment()->BindAccumulator(index, &states);
}
@@ -1681,6 +1948,28 @@ void BytecodeGraphBuilder::MergeControlToLeaveFunction(Node* exit) {
set_environment(nullptr);
}
+void BytecodeGraphBuilder::BuildOSRLoopEntryPoint(int current_offset) {
+ if (!osr_ast_id_.IsNone() && osr_ast_id_.ToInt() == current_offset) {
+ // For OSR add a special {OsrLoopEntry} node into the current loop header.
+ // It will be turned into a usable entry by the OSR deconstruction.
+ Environment* loop_env = merge_environments_[current_offset];
+ Environment* osr_env = loop_env->CopyForOsrEntry();
+ osr_env->PrepareForOsrEntry();
+ loop_env->Merge(osr_env);
+ }
+}
+
+void BytecodeGraphBuilder::BuildOSRNormalEntryPoint() {
+ if (!osr_ast_id_.IsNone()) {
+ // For OSR add an {OsrNormalEntry} as the the top-level environment start.
+ // It will be replaced with {Dead} by the OSR deconstruction.
+ NewNode(common()->OsrNormalEntry());
+ // Note that the requested OSR entry point must be the target of a backward
+ // branch, otherwise there will not be a proper loop header available.
+ DCHECK(branch_analysis()->backward_branches_target(osr_ast_id_.ToInt()));
+ }
+}
+
void BytecodeGraphBuilder::BuildLoopExitsForBranch(int target_offset) {
int origin_offset = bytecode_iterator().current_offset();
// Only build loop exits for forward edges.
@@ -1707,8 +1996,7 @@ void BytecodeGraphBuilder::BuildJump() {
MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
}
-
-void BytecodeGraphBuilder::BuildConditionalJump(Node* condition) {
+void BytecodeGraphBuilder::BuildJumpIf(Node* condition) {
NewBranch(condition);
Environment* if_false_environment = environment()->CopyForConditional();
NewIfTrue();
@@ -1717,24 +2005,43 @@ void BytecodeGraphBuilder::BuildConditionalJump(Node* condition) {
NewIfFalse();
}
+void BytecodeGraphBuilder::BuildJumpIfNot(Node* condition) {
+ NewBranch(condition);
+ Environment* if_true_environment = environment()->CopyForConditional();
+ NewIfFalse();
+ MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
+ set_environment(if_true_environment);
+ NewIfTrue();
+}
void BytecodeGraphBuilder::BuildJumpIfEqual(Node* comperand) {
Node* accumulator = environment()->LookupAccumulator();
Node* condition =
NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
accumulator, comperand);
- BuildConditionalJump(condition);
+ BuildJumpIf(condition);
}
+void BytecodeGraphBuilder::BuildJumpIfFalse() {
+ BuildJumpIfNot(environment()->LookupAccumulator());
+}
-void BytecodeGraphBuilder::BuildJumpIfToBooleanEqual(Node* comperand) {
+void BytecodeGraphBuilder::BuildJumpIfTrue() {
+ BuildJumpIf(environment()->LookupAccumulator());
+}
+
+void BytecodeGraphBuilder::BuildJumpIfToBooleanTrue() {
Node* accumulator = environment()->LookupAccumulator();
- Node* to_boolean =
+ Node* condition =
NewNode(javascript()->ToBoolean(ToBooleanHint::kAny), accumulator);
+ BuildJumpIf(condition);
+}
+
+void BytecodeGraphBuilder::BuildJumpIfToBooleanFalse() {
+ Node* accumulator = environment()->LookupAccumulator();
Node* condition =
- NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), to_boolean,
- comperand);
- BuildConditionalJump(condition);
+ NewNode(javascript()->ToBoolean(ToBooleanHint::kAny), accumulator);
+ BuildJumpIfNot(condition);
}
void BytecodeGraphBuilder::BuildJumpIfNotHole() {
@@ -1742,10 +2049,7 @@ void BytecodeGraphBuilder::BuildJumpIfNotHole() {
Node* condition =
NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
accumulator, jsgraph()->TheHoleConstant());
- Node* node =
- NewNode(common()->Select(MachineRepresentation::kTagged), condition,
- jsgraph()->FalseConstant(), jsgraph()->TrueConstant());
- BuildConditionalJump(node);
+ BuildJumpIfNot(condition);
}
Node** BytecodeGraphBuilder::EnsureInputBufferSize(int size) {
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index 2f3acc1bca..53582f73d7 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -5,10 +5,11 @@
#ifndef V8_COMPILER_BYTECODE_GRAPH_BUILDER_H_
#define V8_COMPILER_BYTECODE_GRAPH_BUILDER_H_
-#include "src/compiler.h"
#include "src/compiler/bytecode-branch-analysis.h"
#include "src/compiler/bytecode-loop-analysis.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/liveness-analyzer.h"
+#include "src/compiler/state-values-utils.h"
#include "src/compiler/type-hint-analyzer.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-flags.h"
@@ -16,6 +17,9 @@
namespace v8 {
namespace internal {
+
+class CompilationInfo;
+
namespace compiler {
// The BytecodeGraphBuilder produces a high-level IR graph based on
@@ -23,7 +27,7 @@ namespace compiler {
class BytecodeGraphBuilder {
public:
BytecodeGraphBuilder(Zone* local_zone, CompilationInfo* info,
- JSGraph* jsgraph);
+ JSGraph* jsgraph, float invocation_frequency);
// Creates a graph by visiting bytecodes.
bool CreateGraph();
@@ -113,16 +117,22 @@ class BytecodeGraphBuilder {
interpreter::Register first_arg,
size_t arity);
+ // Computes register liveness and replaces dead ones in frame states with the
+ // undefined values.
+ void ClearNonLiveSlotsInFrameStates();
+
void BuildCreateLiteral(const Operator* op);
void BuildCreateArguments(CreateArgumentsType type);
Node* BuildLoadContextSlot();
- Node* BuildLoadGlobal(TypeofMode typeof_mode);
+ Node* BuildLoadGlobal(uint32_t feedback_slot_index, TypeofMode typeof_mode);
void BuildStoreGlobal(LanguageMode language_mode);
Node* BuildNamedLoad();
void BuildNamedStore(LanguageMode language_mode);
Node* BuildKeyedLoad();
void BuildKeyedStore(LanguageMode language_mode);
void BuildLdaLookupSlot(TypeofMode typeof_mode);
+ void BuildLdaLookupContextSlot(TypeofMode typeof_mode);
+ void BuildLdaLookupGlobalSlot(TypeofMode typeof_mode);
void BuildStaLookupSlot(LanguageMode language_mode);
void BuildCall(TailCallMode tail_call_mode);
void BuildThrow();
@@ -135,15 +145,30 @@ class BytecodeGraphBuilder {
void BuildForInNext();
void BuildInvokeIntrinsic();
+ // Check the context chain for extensions, for lookup fast paths.
+ Environment* CheckContextExtensions(uint32_t depth);
+
// Helper function to create binary operation hint from the recorded
// type feedback.
BinaryOperationHint GetBinaryOperationHint(int operand_index);
+ // Helper function to create compare operation hint from the recorded
+ // type feedback.
+ CompareOperationHint GetCompareOperationHint();
+
+ // Helper function to compute call frequency from the recorded type
+ // feedback.
+ float ComputeCallFrequency(int slot_id) const;
+
// Control flow plumbing.
void BuildJump();
- void BuildConditionalJump(Node* condition);
+ void BuildJumpIf(Node* condition);
+ void BuildJumpIfNot(Node* condition);
void BuildJumpIfEqual(Node* comperand);
- void BuildJumpIfToBooleanEqual(Node* boolean_comperand);
+ void BuildJumpIfTrue();
+ void BuildJumpIfFalse();
+ void BuildJumpIfToBooleanTrue();
+ void BuildJumpIfToBooleanFalse();
void BuildJumpIfNotHole();
// Simulates control flow by forward-propagating environments.
@@ -154,6 +179,10 @@ class BytecodeGraphBuilder {
// Simulates control flow that exits the function body.
void MergeControlToLeaveFunction(Node* exit);
+ // Builds entry points that are used by OSR deconstruction.
+ void BuildOSRLoopEntryPoint(int current_offset);
+ void BuildOSRNormalEntryPoint();
+
// Builds loop exit nodes for every exited loop between the current bytecode
// offset and {target_offset}.
void BuildLoopExitsForBranch(int target_offset);
@@ -221,12 +250,19 @@ class BytecodeGraphBuilder {
loop_analysis_ = loop_analysis;
}
+ LivenessAnalyzer* liveness_analyzer() { return &liveness_analyzer_; }
+
+ bool IsLivenessAnalysisEnabled() const {
+ return this->is_liveness_analysis_enabled_;
+ }
+
#define DECLARE_VISIT_BYTECODE(name, ...) void Visit##name();
BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
#undef DECLARE_VISIT_BYTECODE
Zone* local_zone_;
JSGraph* jsgraph_;
+ float const invocation_frequency_;
Handle<BytecodeArray> bytecode_array_;
Handle<HandlerTable> exception_handler_table_;
Handle<TypeFeedbackVector> feedback_vector_;
@@ -258,6 +294,13 @@ class BytecodeGraphBuilder {
// Control nodes that exit the function body.
ZoneVector<Node*> exit_controls_;
+ bool const is_liveness_analysis_enabled_;
+
+ StateValuesCache state_values_cache_;
+
+ // Analyzer of register liveness.
+ LivenessAnalyzer liveness_analyzer_;
+
static int const kBinaryOperationHintIndex = 1;
static int const kCountOperationHintIndex = 0;
static int const kBinaryOperationSmiHintIndex = 2;
diff --git a/deps/v8/src/compiler/bytecode-loop-analysis.h b/deps/v8/src/compiler/bytecode-loop-analysis.h
index 59fabcef7b..1a86d7b81f 100644
--- a/deps/v8/src/compiler/bytecode-loop-analysis.h
+++ b/deps/v8/src/compiler/bytecode-loop-analysis.h
@@ -6,7 +6,7 @@
#define V8_COMPILER_BYTECODE_LOOP_ANALYSIS_H_
#include "src/handles.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index f79497a6e1..690a52be15 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -7,7 +7,7 @@
#include "src/compiler/linkage.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 4dd7e790fa..46dc84dc7f 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -20,7 +20,7 @@
#include "src/machine-type.h"
#include "src/macro-assembler.h"
#include "src/utils.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
@@ -86,6 +86,10 @@ bool CodeAssembler::IsFloat64RoundDownSupported() const {
return raw_assembler_->machine()->Float64RoundDown().IsSupported();
}
+bool CodeAssembler::IsFloat64RoundTiesEvenSupported() const {
+ return raw_assembler_->machine()->Float64RoundTiesEven().IsSupported();
+}
+
bool CodeAssembler::IsFloat64RoundTruncateSupported() const {
return raw_assembler_->machine()->Float64RoundTruncate().IsSupported();
}
@@ -234,6 +238,13 @@ Node* CodeAssembler::ChangeInt32ToIntPtr(Node* value) {
return value;
}
+Node* CodeAssembler::RoundIntPtrToFloat64(Node* value) {
+ if (raw_assembler_->machine()->Is64()) {
+ return raw_assembler_->RoundInt64ToFloat64(value);
+ }
+ return raw_assembler_->ChangeInt32ToFloat64(value);
+}
+
#define DEFINE_CODE_ASSEMBLER_UNARY_OP(name) \
Node* CodeAssembler::name(Node* a) { return raw_assembler_->name(a); }
CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP)
@@ -299,6 +310,10 @@ Node* CodeAssembler::StoreRoot(Heap::RootListIndex root_index, Node* value) {
IntPtrConstant(root_index * kPointerSize), value);
}
+Node* CodeAssembler::Retain(Node* value) {
+ return raw_assembler_->Retain(value);
+}
+
Node* CodeAssembler::Projection(int index, Node* value) {
return raw_assembler_->Projection(index, value);
}
@@ -425,6 +440,14 @@ Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
arg5, context);
}
+Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4, Node* arg5,
+ Node* arg6) {
+ return raw_assembler_->TailCallRuntime6(function_id, arg1, arg2, arg3, arg4,
+ arg5, arg6, context);
+}
+
Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
Node* arg1, size_t result_size) {
Node* target = HeapConstant(callable.code());
@@ -446,6 +469,14 @@ Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
result_size);
}
+Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
+ Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+ size_t result_size) {
+ Node* target = HeapConstant(callable.code());
+ return CallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
+ arg4, result_size);
+}
+
Node* CodeAssembler::CallStubN(Callable const& callable, Node** args,
size_t result_size) {
Node* target = HeapConstant(callable.code());
@@ -638,9 +669,11 @@ Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
}
Node* CodeAssembler::CallStubN(const CallInterfaceDescriptor& descriptor,
- Node* target, Node** args, size_t result_size) {
+ int js_parameter_count, Node* target,
+ Node** args, size_t result_size) {
CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ isolate(), zone(), descriptor,
+ descriptor.GetStackParameterCount() + js_parameter_count,
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), result_size);
@@ -745,6 +778,26 @@ Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
}
Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, Node* arg4,
+ Node* arg5, size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(6);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ args[3] = arg4;
+ args[4] = arg5;
+ args[5] = context;
+
+ return raw_assembler_->TailCallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
Node* target, Node* context, const Arg& arg1,
const Arg& arg2, const Arg& arg3,
const Arg& arg4, size_t result_size) {
@@ -803,10 +856,6 @@ Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
Node* function, Node* receiver,
size_t result_size) {
const int argc = 0;
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), argc + 1,
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
Node* target = HeapConstant(callable.code());
Node** args = zone()->NewArray<Node*>(argc + 4);
@@ -815,17 +864,13 @@ Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
args[2] = receiver;
args[3] = context;
- return CallN(call_descriptor, target, args);
+ return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
}
Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
Node* function, Node* receiver, Node* arg1,
size_t result_size) {
const int argc = 1;
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), argc + 1,
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
Node* target = HeapConstant(callable.code());
Node** args = zone()->NewArray<Node*>(argc + 4);
@@ -835,17 +880,13 @@ Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
args[3] = arg1;
args[4] = context;
- return CallN(call_descriptor, target, args);
+ return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
}
Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
Node* function, Node* receiver, Node* arg1,
Node* arg2, size_t result_size) {
const int argc = 2;
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), argc + 1,
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
Node* target = HeapConstant(callable.code());
Node** args = zone()->NewArray<Node*>(argc + 4);
@@ -856,7 +897,15 @@ Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
args[4] = arg2;
args[5] = context;
- return CallN(call_descriptor, target, args);
+ return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
+}
+
+Node* CodeAssembler::CallCFunction2(MachineType return_type,
+ MachineType arg0_type,
+ MachineType arg1_type, Node* function,
+ Node* arg0, Node* arg1) {
+ return raw_assembler_->CallCFunction2(return_type, arg0_type, arg1_type,
+ function, arg0, arg1);
}
void CodeAssembler::Goto(CodeAssembler::Label* label) {
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index bea999b705..646a6d1c65 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -15,7 +15,7 @@
#include "src/heap/heap.h"
#include "src/machine-type.h"
#include "src/runtime/runtime.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -54,8 +54,10 @@ class RawMachineLabel;
V(IntPtrGreaterThanOrEqual) \
V(IntPtrEqual) \
V(Uint32LessThan) \
+ V(Uint32LessThanOrEqual) \
V(Uint32GreaterThanOrEqual) \
V(UintPtrLessThan) \
+ V(UintPtrGreaterThan) \
V(UintPtrGreaterThanOrEqual) \
V(WordEqual) \
V(WordNotEqual) \
@@ -133,7 +135,9 @@ class RawMachineLabel;
V(Float64Tanh) \
V(Float64ExtractLowWord32) \
V(Float64ExtractHighWord32) \
+ V(BitcastTaggedToWord) \
V(BitcastWordToTagged) \
+ V(BitcastWordToTaggedSigned) \
V(TruncateFloat64ToFloat32) \
V(TruncateFloat64ToWord32) \
V(TruncateInt64ToInt32) \
@@ -144,10 +148,14 @@ class RawMachineLabel;
V(ChangeUint32ToFloat64) \
V(ChangeUint32ToUint64) \
V(RoundFloat64ToInt32) \
+ V(RoundInt32ToFloat32) \
+ V(Float64SilenceNaN) \
V(Float64RoundDown) \
V(Float64RoundUp) \
+ V(Float64RoundTiesEven) \
V(Float64RoundTruncate) \
- V(Word32Clz)
+ V(Word32Clz) \
+ V(Word32BinaryNot)
// A "public" interface used by components outside of compiler directory to
// create code objects with TurboFan's backend. This class is mostly a thin shim
@@ -185,6 +193,7 @@ class CodeAssembler {
bool Is64() const;
bool IsFloat64RoundUpSupported() const;
bool IsFloat64RoundDownSupported() const;
+ bool IsFloat64RoundTiesEvenSupported() const;
bool IsFloat64RoundTruncateSupported() const;
class Label;
@@ -283,11 +292,19 @@ class CodeAssembler {
CODE_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_ASSEMBLER_UNARY_OP)
#undef DECLARE_CODE_ASSEMBLER_UNARY_OP
+ // Changes an intptr_t to a double, e.g. for storing an element index
+ // outside Smi range in a HeapNumber. Lossless on 32-bit,
+ // rounds on 64-bit (which doesn't affect valid element indices).
+ Node* RoundIntPtrToFloat64(Node* value);
// No-op on 32-bit, otherwise zero extend.
Node* ChangeUint32ToWord(Node* value);
// No-op on 32-bit, otherwise sign extend.
Node* ChangeInt32ToIntPtr(Node* value);
+ // No-op that guarantees that the value is kept alive till this point even
+ // if GC happens.
+ Node* Retain(Node* value);
+
// Projections
Node* Projection(int index, Node* value);
@@ -315,6 +332,9 @@ class CodeAssembler {
Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
Node* arg1, Node* arg2, Node* arg3, Node* arg4,
Node* arg5);
+ Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+ Node* arg5, Node* arg6);
// A pair of a zero-based argument index and a value.
// It helps writing arguments order independent code.
@@ -331,6 +351,8 @@ class CodeAssembler {
Node* arg2, size_t result_size = 1);
Node* CallStub(Callable const& callable, Node* context, Node* arg1,
Node* arg2, Node* arg3, size_t result_size = 1);
+ Node* CallStub(Callable const& callable, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, Node* arg4, size_t result_size = 1);
Node* CallStubN(Callable const& callable, Node** args,
size_t result_size = 1);
@@ -364,8 +386,13 @@ class CodeAssembler {
const Arg& arg3, const Arg& arg4, const Arg& arg5,
size_t result_size = 1);
+ Node* CallStubN(const CallInterfaceDescriptor& descriptor,
+ int js_parameter_count, Node* target, Node** args,
+ size_t result_size = 1);
Node* CallStubN(const CallInterfaceDescriptor& descriptor, Node* target,
- Node** args, size_t result_size = 1);
+ Node** args, size_t result_size = 1) {
+ return CallStubN(descriptor, 0, target, args, result_size);
+ }
Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
size_t result_size = 1);
@@ -387,6 +414,9 @@ class CodeAssembler {
Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, Node* arg2, Node* arg3,
Node* arg4, size_t result_size = 1);
+ Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, Node* arg1, Node* arg2, Node* arg3,
+ Node* arg4, Node* arg5, size_t result_size = 1);
Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, const Arg& arg1, const Arg& arg2,
@@ -406,6 +436,11 @@ class CodeAssembler {
Node* CallJS(Callable const& callable, Node* context, Node* function,
Node* receiver, Node* arg1, Node* arg2, size_t result_size = 1);
+ // Call to a C function with two arguments.
+ Node* CallCFunction2(MachineType return_type, MachineType arg0_type,
+ MachineType arg1_type, Node* function, Node* arg0,
+ Node* arg1);
+
// Exception handling support.
void GotoIfException(Node* node, Label* if_exception,
Variable* exception_var = nullptr);
diff --git a/deps/v8/src/compiler/code-generator-impl.h b/deps/v8/src/compiler/code-generator-impl.h
index 4dccdc912c..8bf3a9ea4e 100644
--- a/deps/v8/src/compiler/code-generator-impl.h
+++ b/deps/v8/src/compiler/code-generator-impl.h
@@ -170,15 +170,17 @@ class InstructionOperandConverter {
// Eager deoptimization exit.
class DeoptimizationExit : public ZoneObject {
public:
- explicit DeoptimizationExit(int deoptimization_id)
- : deoptimization_id_(deoptimization_id) {}
+ explicit DeoptimizationExit(int deoptimization_id, SourcePosition pos)
+ : deoptimization_id_(deoptimization_id), pos_(pos) {}
int deoptimization_id() const { return deoptimization_id_; }
Label* label() { return &label_; }
+ SourcePosition pos() const { return pos_; }
private:
int const deoptimization_id_;
Label label_;
+ SourcePosition const pos_;
};
// Generator for out-of-line code that is emitted after the main code is done.
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 03136a7c2c..043582b17e 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -6,6 +6,7 @@
#include "src/address-map.h"
#include "src/base/adapters.h"
+#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
@@ -63,6 +64,8 @@ CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
CreateFrameAccessState(frame);
}
+Isolate* CodeGenerator::isolate() const { return info_->isolate(); }
+
void CodeGenerator::CreateFrameAccessState(Frame* frame) {
FinishFrame(frame);
frame_access_state_ = new (code()->zone()) FrameAccessState(frame);
@@ -185,7 +188,8 @@ Handle<Code> CodeGenerator::GenerateCode() {
// Assemble all eager deoptimization exits.
for (DeoptimizationExit* exit : deoptimization_exits_) {
masm()->bind(exit->label());
- AssembleDeoptimizerCall(exit->deoptimization_id(), Deoptimizer::EAGER);
+ AssembleDeoptimizerCall(exit->deoptimization_id(), Deoptimizer::EAGER,
+ exit->pos());
}
// Ensure there is space for lazy deoptimization in the code.
@@ -805,7 +809,7 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
} else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
type == MachineType::Uint32()) {
translation->StoreUint32StackSlot(LocationOperand::cast(op)->index());
- } else if (type.representation() == MachineRepresentation::kTagged) {
+ } else if (IsAnyTagged(type.representation())) {
translation->StoreStackSlot(LocationOperand::cast(op)->index());
} else {
CHECK(false);
@@ -827,7 +831,7 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
} else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
type == MachineType::Uint32()) {
translation->StoreUint32Register(converter.ToRegister(op));
- } else if (type.representation() == MachineRepresentation::kTagged) {
+ } else if (IsAnyTagged(type.representation())) {
translation->StoreRegister(converter.ToRegister(op));
} else {
CHECK(false);
@@ -846,7 +850,8 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
Handle<Object> constant_object;
switch (constant.type()) {
case Constant::kInt32:
- if (type.representation() == MachineRepresentation::kTagged) {
+ if (type.representation() == MachineRepresentation::kTagged ||
+ type.representation() == MachineRepresentation::kTaggedSigned) {
// When pointers are 4 bytes, we can use int32 constants to represent
// Smis.
DCHECK_EQ(4, kPointerSize);
@@ -868,24 +873,33 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
case Constant::kInt64:
// When pointers are 8 bytes, we can use int64 constants to represent
// Smis.
- DCHECK_EQ(type.representation(), MachineRepresentation::kTagged);
+ DCHECK(type.representation() == MachineRepresentation::kTagged ||
+ type.representation() == MachineRepresentation::kTaggedSigned);
DCHECK_EQ(8, kPointerSize);
constant_object =
handle(reinterpret_cast<Smi*>(constant.ToInt64()), isolate());
DCHECK(constant_object->IsSmi());
break;
case Constant::kFloat32:
- DCHECK(type.representation() == MachineRepresentation::kFloat32 ||
- type.representation() == MachineRepresentation::kTagged);
+ if (type.representation() == MachineRepresentation::kTaggedSigned) {
+ DCHECK(IsSmiDouble(constant.ToFloat32()));
+ } else {
+ DCHECK(type.representation() == MachineRepresentation::kFloat32 ||
+ CanBeTaggedPointer(type.representation()));
+ }
constant_object = isolate()->factory()->NewNumber(constant.ToFloat32());
break;
case Constant::kFloat64:
- DCHECK(type.representation() == MachineRepresentation::kFloat64 ||
- type.representation() == MachineRepresentation::kTagged);
+ if (type.representation() == MachineRepresentation::kTaggedSigned) {
+ DCHECK(IsSmiDouble(constant.ToFloat64()));
+ } else {
+ DCHECK(type.representation() == MachineRepresentation::kFloat64 ||
+ CanBeTaggedPointer(type.representation()));
+ }
constant_object = isolate()->factory()->NewNumber(constant.ToFloat64());
break;
case Constant::kHeapObject:
- DCHECK(type.representation() == MachineRepresentation::kTagged);
+ DCHECK(CanBeTaggedPointer(type.representation()));
constant_object = constant.ToHeapObject();
break;
default:
@@ -911,8 +925,8 @@ DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
Instruction* instr, size_t frame_state_offset) {
int const deoptimization_id = BuildTranslation(
instr, -1, frame_state_offset, OutputFrameStateCombine::Ignore());
- DeoptimizationExit* const exit =
- new (zone()) DeoptimizationExit(deoptimization_id);
+ DeoptimizationExit* const exit = new (zone())
+ DeoptimizationExit(deoptimization_id, current_source_position_);
deoptimization_exits_.push_back(exit);
return exit;
}
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index 21c13f8d61..3032163d34 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -5,7 +5,6 @@
#ifndef V8_COMPILER_CODE_GENERATOR_H_
#define V8_COMPILER_CODE_GENERATOR_H_
-#include "src/compiler.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/instruction.h"
#include "src/compiler/unwinding-info-writer.h"
@@ -16,6 +15,9 @@
namespace v8 {
namespace internal {
+
+class CompilationInfo;
+
namespace compiler {
// Forward declarations.
@@ -58,7 +60,7 @@ class CodeGenerator final : public GapResolver::Assembler {
InstructionSequence* code() const { return code_; }
FrameAccessState* frame_access_state() const { return frame_access_state_; }
const Frame* frame() const { return frame_access_state_->frame(); }
- Isolate* isolate() const { return info_->isolate(); }
+ Isolate* isolate() const;
Linkage* linkage() const { return linkage_; }
Label* GetLabel(RpoNumber rpo) { return &labels_[rpo.ToSize()]; }
@@ -118,7 +120,8 @@ class CodeGenerator final : public GapResolver::Assembler {
void AssembleArchTableSwitch(Instruction* instr);
CodeGenResult AssembleDeoptimizerCall(int deoptimization_id,
- Deoptimizer::BailoutType bailout_type);
+ Deoptimizer::BailoutType bailout_type,
+ SourcePosition pos);
// Generates an architecture-specific, descriptor-specific prologue
// to set up a stack frame.
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index 9527c754e4..c5ced20373 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -77,8 +77,12 @@ Reduction CommonOperatorReducer::ReduceBranch(Node* node) {
// Swap IfTrue/IfFalse on {branch} if {cond} is a BooleanNot and use the input
// to BooleanNot as new condition for {branch}. Note we assume that {cond} was
// already properly optimized before we get here (as guaranteed by the graph
- // reduction logic).
- if (cond->opcode() == IrOpcode::kBooleanNot) {
+ // reduction logic). The same applies if {cond} is a Select acting as boolean
+ // not (i.e. true being returned in the false case and vice versa).
+ if (cond->opcode() == IrOpcode::kBooleanNot ||
+ (cond->opcode() == IrOpcode::kSelect &&
+ DecideCondition(cond->InputAt(1)) == Decision::kFalse &&
+ DecideCondition(cond->InputAt(2)) == Decision::kTrue)) {
for (Node* const use : node->uses()) {
switch (use->opcode()) {
case IrOpcode::kIfTrue:
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index f732375a68..e57160a3f8 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -10,7 +10,7 @@
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
#include "src/handles-inl.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 9e4d259bc9..2db0bfa7d1 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -9,7 +9,7 @@
#include "src/compiler/frame-states.h"
#include "src/deoptimize-reason.h"
#include "src/machine-type.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -19,7 +19,7 @@ namespace compiler {
class CallDescriptor;
struct CommonOperatorGlobalCache;
class Operator;
-
+class Type;
// Prediction hint for branches.
enum class BranchHint : uint8_t { kNone, kTrue, kFalse };
diff --git a/deps/v8/src/compiler/control-equivalence.h b/deps/v8/src/compiler/control-equivalence.h
index 478e48b46d..4fb9c2718d 100644
--- a/deps/v8/src/compiler/control-equivalence.h
+++ b/deps/v8/src/compiler/control-equivalence.h
@@ -7,7 +7,7 @@
#include "src/compiler/graph.h"
#include "src/compiler/node.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/control-flow-optimizer.h b/deps/v8/src/compiler/control-flow-optimizer.h
index f72fa58ad7..61785a0fc0 100644
--- a/deps/v8/src/compiler/control-flow-optimizer.h
+++ b/deps/v8/src/compiler/control-flow-optimizer.h
@@ -6,7 +6,7 @@
#define V8_COMPILER_CONTROL_FLOW_OPTIMIZER_H_
#include "src/compiler/node-marker.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 9cc6ddc4f9..4e53e5dcec 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -265,7 +265,6 @@ void TryCloneBranch(Node* node, BasicBlock* block, Graph* graph,
Node* phi_false = graph->NewNode(phi->op(), input_count + 1, inputs);
if (phi->UseCount() == 0) {
DCHECK_EQ(phi->opcode(), IrOpcode::kEffectPhi);
- DCHECK_EQ(input_count, block->SuccessorCount());
} else {
for (Edge edge : phi->use_edges()) {
Node* control = NodeProperties::GetControlInput(edge.from());
@@ -616,6 +615,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kChangeTaggedToFloat64:
state = LowerChangeTaggedToFloat64(node, *effect, *control);
break;
+ case IrOpcode::kTruncateTaggedToBit:
+ state = LowerTruncateTaggedToBit(node, *effect, *control);
+ break;
case IrOpcode::kTruncateTaggedToFloat64:
state = LowerTruncateTaggedToFloat64(node, *effect, *control);
break;
@@ -634,11 +636,8 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckIf:
state = LowerCheckIf(node, frame_state, *effect, *control);
break;
- case IrOpcode::kCheckTaggedPointer:
- state = LowerCheckTaggedPointer(node, frame_state, *effect, *control);
- break;
- case IrOpcode::kCheckTaggedSigned:
- state = LowerCheckTaggedSigned(node, frame_state, *effect, *control);
+ case IrOpcode::kCheckHeapObject:
+ state = LowerCheckHeapObject(node, frame_state, *effect, *control);
break;
case IrOpcode::kCheckedInt32Add:
state = LowerCheckedInt32Add(node, frame_state, *effect, *control);
@@ -661,9 +660,17 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckedInt32Mul:
state = LowerCheckedInt32Mul(node, frame_state, *effect, *control);
break;
+ case IrOpcode::kCheckedInt32ToTaggedSigned:
+ state =
+ LowerCheckedInt32ToTaggedSigned(node, frame_state, *effect, *control);
+ break;
case IrOpcode::kCheckedUint32ToInt32:
state = LowerCheckedUint32ToInt32(node, frame_state, *effect, *control);
break;
+ case IrOpcode::kCheckedUint32ToTaggedSigned:
+ state = LowerCheckedUint32ToTaggedSigned(node, frame_state, *effect,
+ *control);
+ break;
case IrOpcode::kCheckedFloat64ToInt32:
state = LowerCheckedFloat64ToInt32(node, frame_state, *effect, *control);
break;
@@ -677,6 +684,10 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckedTaggedToFloat64:
state = LowerCheckedTaggedToFloat64(node, frame_state, *effect, *control);
break;
+ case IrOpcode::kCheckedTaggedToTaggedSigned:
+ state = LowerCheckedTaggedToTaggedSigned(node, frame_state, *effect,
+ *control);
+ break;
case IrOpcode::kTruncateTaggedToWord32:
state = LowerTruncateTaggedToWord32(node, *effect, *control);
break;
@@ -702,12 +713,27 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kObjectIsUndetectable:
state = LowerObjectIsUndetectable(node, *effect, *control);
break;
+ case IrOpcode::kArrayBufferWasNeutered:
+ state = LowerArrayBufferWasNeutered(node, *effect, *control);
+ break;
case IrOpcode::kStringFromCharCode:
state = LowerStringFromCharCode(node, *effect, *control);
break;
+ case IrOpcode::kStringFromCodePoint:
+ state = LowerStringFromCodePoint(node, *effect, *control);
+ break;
case IrOpcode::kStringCharCodeAt:
state = LowerStringCharCodeAt(node, *effect, *control);
break;
+ case IrOpcode::kStringEqual:
+ state = LowerStringEqual(node, *effect, *control);
+ break;
+ case IrOpcode::kStringLessThan:
+ state = LowerStringLessThan(node, *effect, *control);
+ break;
+ case IrOpcode::kStringLessThanOrEqual:
+ state = LowerStringLessThanOrEqual(node, *effect, *control);
+ break;
case IrOpcode::kCheckFloat64Hole:
state = LowerCheckFloat64Hole(node, frame_state, *effect, *control);
break;
@@ -762,75 +788,8 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
EffectControlLinearizer::ValueEffectControl
EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node, Node* effect,
Node* control) {
- CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
Node* value = node->InputAt(0);
-
- Node* value32 = graph()->NewNode(machine()->RoundFloat64ToInt32(), value);
- Node* check_same = graph()->NewNode(
- machine()->Float64Equal(), value,
- graph()->NewNode(machine()->ChangeInt32ToFloat64(), value32));
- Node* branch_same = graph()->NewNode(common()->Branch(), check_same, control);
-
- Node* if_smi = graph()->NewNode(common()->IfTrue(), branch_same);
- Node* vsmi;
- Node* if_box = graph()->NewNode(common()->IfFalse(), branch_same);
-
- if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
- // Check if {value} is -0.
- Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value32,
- jsgraph()->Int32Constant(0));
- Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check_zero, if_smi);
-
- Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
- Node* if_notzero = graph()->NewNode(common()->IfFalse(), branch_zero);
-
- // In case of 0, we need to check the high bits for the IEEE -0 pattern.
- Node* check_negative = graph()->NewNode(
- machine()->Int32LessThan(),
- graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
- jsgraph()->Int32Constant(0));
- Node* branch_negative = graph()->NewNode(
- common()->Branch(BranchHint::kFalse), check_negative, if_zero);
-
- Node* if_negative = graph()->NewNode(common()->IfTrue(), branch_negative);
- Node* if_notnegative =
- graph()->NewNode(common()->IfFalse(), branch_negative);
-
- // We need to create a box for negative 0.
- if_smi = graph()->NewNode(common()->Merge(2), if_notzero, if_notnegative);
- if_box = graph()->NewNode(common()->Merge(2), if_box, if_negative);
- }
-
- // On 64-bit machines we can just wrap the 32-bit integer in a smi, for 32-bit
- // machines we need to deal with potential overflow and fallback to boxing.
- if (machine()->Is64()) {
- vsmi = ChangeInt32ToSmi(value32);
- } else {
- Node* smi_tag = graph()->NewNode(machine()->Int32AddWithOverflow(), value32,
- value32, if_smi);
-
- Node* check_ovf =
- graph()->NewNode(common()->Projection(1), smi_tag, if_smi);
- Node* branch_ovf = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check_ovf, if_smi);
-
- Node* if_ovf = graph()->NewNode(common()->IfTrue(), branch_ovf);
- if_box = graph()->NewNode(common()->Merge(2), if_ovf, if_box);
-
- if_smi = graph()->NewNode(common()->IfFalse(), branch_ovf);
- vsmi = graph()->NewNode(common()->Projection(0), smi_tag, if_smi);
- }
-
- // Allocate the box for the {value}.
- ValueEffectControl box = AllocateHeapNumberWithValue(value, effect, if_box);
-
- control = graph()->NewNode(common()->Merge(2), if_smi, box.control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vsmi, box.value, control);
- effect =
- graph()->NewNode(common()->EffectPhi(2), effect, box.effect, control);
- return ValueEffectControl(value, effect, control);
+ return AllocateHeapNumberWithValue(value, effect, control);
}
EffectControlLinearizer::ValueEffectControl
@@ -939,6 +898,157 @@ EffectControlLinearizer::LowerChangeTaggedToBit(Node* node, Node* effect,
}
EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+ Node* one = jsgraph()->Int32Constant(1);
+ Node* zero = jsgraph()->Int32Constant(0);
+ Node* fzero = jsgraph()->Float64Constant(0.0);
+
+ // Collect effect/control/value triples.
+ int count = 0;
+ Node* values[7];
+ Node* effects[7];
+ Node* controls[6];
+
+ // Check if {value} is a Smi.
+ Node* check_smi = ObjectIsSmi(value);
+ Node* branch_smi = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check_smi, control);
+
+ // If {value} is a Smi, then we only need to check that it's not zero.
+ Node* if_smi = graph()->NewNode(common()->IfTrue(), branch_smi);
+ Node* esmi = effect;
+ {
+ controls[count] = if_smi;
+ effects[count] = esmi;
+ values[count] =
+ graph()->NewNode(machine()->Word32Equal(),
+ graph()->NewNode(machine()->WordEqual(), value,
+ jsgraph()->ZeroConstant()),
+ zero);
+ count++;
+ }
+ control = graph()->NewNode(common()->IfFalse(), branch_smi);
+
+ // Load the map instance type of {value}.
+ Node* value_map = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
+ Node* value_instance_type = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
+ effect, control);
+
+ // Check if {value} is an Oddball.
+ Node* check_oddball =
+ graph()->NewNode(machine()->Word32Equal(), value_instance_type,
+ jsgraph()->Int32Constant(ODDBALL_TYPE));
+ Node* branch_oddball = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check_oddball, control);
+
+ // The only Oddball {value} that is trueish is true itself.
+ Node* if_oddball = graph()->NewNode(common()->IfTrue(), branch_oddball);
+ Node* eoddball = effect;
+ {
+ controls[count] = if_oddball;
+ effects[count] = eoddball;
+ values[count] = graph()->NewNode(machine()->WordEqual(), value,
+ jsgraph()->TrueConstant());
+ count++;
+ }
+ control = graph()->NewNode(common()->IfFalse(), branch_oddball);
+
+ // Check if {value} is a String.
+ Node* check_string =
+ graph()->NewNode(machine()->Int32LessThan(), value_instance_type,
+ jsgraph()->Int32Constant(FIRST_NONSTRING_TYPE));
+ Node* branch_string =
+ graph()->NewNode(common()->Branch(), check_string, control);
+
+ // For String {value}, we need to check that the length is not zero.
+ Node* if_string = graph()->NewNode(common()->IfTrue(), branch_string);
+ Node* estring = effect;
+ {
+ // Load the {value} length.
+ Node* value_length = estring = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForStringLength()), value,
+ estring, if_string);
+
+ controls[count] = if_string;
+ effects[count] = estring;
+ values[count] =
+ graph()->NewNode(machine()->Word32Equal(),
+ graph()->NewNode(machine()->WordEqual(), value_length,
+ jsgraph()->ZeroConstant()),
+ zero);
+ count++;
+ }
+ control = graph()->NewNode(common()->IfFalse(), branch_string);
+
+ // Check if {value} is a HeapNumber.
+ Node* check_heapnumber =
+ graph()->NewNode(machine()->Word32Equal(), value_instance_type,
+ jsgraph()->Int32Constant(HEAP_NUMBER_TYPE));
+ Node* branch_heapnumber =
+ graph()->NewNode(common()->Branch(), check_heapnumber, control);
+
+ // For HeapNumber {value}, just check that its value is not 0.0, -0.0 or NaN.
+ Node* if_heapnumber = graph()->NewNode(common()->IfTrue(), branch_heapnumber);
+ Node* eheapnumber = effect;
+ {
+ // Load the raw value of {value}.
+ Node* value_value = eheapnumber = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
+ eheapnumber, if_heapnumber);
+
+ // Check if {value} is either less than 0.0 or greater than 0.0.
+ Node* check =
+ graph()->NewNode(machine()->Float64LessThan(), fzero, value_value);
+ Node* branch = graph()->NewNode(common()->Branch(), check, if_heapnumber);
+
+ controls[count] = graph()->NewNode(common()->IfTrue(), branch);
+ effects[count] = eheapnumber;
+ values[count] = one;
+ count++;
+
+ controls[count] = graph()->NewNode(common()->IfFalse(), branch);
+ effects[count] = eheapnumber;
+ values[count] =
+ graph()->NewNode(machine()->Float64LessThan(), value_value, fzero);
+ count++;
+ }
+ control = graph()->NewNode(common()->IfFalse(), branch_heapnumber);
+
+ // The {value} is either a JSReceiver, a Symbol or some Simd128Value. In
+ // those cases we can just the undetectable bit on the map, which will only
+ // be set for certain JSReceivers, i.e. document.all.
+ {
+ // Load the {value} map bit field.
+ Node* value_map_bitfield = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapBitField()), value_map,
+ effect, control);
+
+ controls[count] = control;
+ effects[count] = effect;
+ values[count] = graph()->NewNode(
+ machine()->Word32Equal(),
+ graph()->NewNode(machine()->Word32And(), value_map_bitfield,
+ jsgraph()->Int32Constant(1 << Map::kIsUndetectable)),
+ zero);
+ count++;
+ }
+
+ // Merge the different controls.
+ control = graph()->NewNode(common()->Merge(count), count, controls);
+ effects[count] = control;
+ effect = graph()->NewNode(common()->EffectPhi(count), count + 1, effects);
+ values[count] = control;
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, count),
+ count + 1, values);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
EffectControlLinearizer::LowerChangeTaggedToInt32(Node* node, Node* effect,
Node* control) {
Node* value = node->InputAt(0);
@@ -1164,8 +1274,8 @@ EffectControlLinearizer::LowerCheckIf(Node* node, Node* frame_state,
}
EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckTaggedPointer(Node* node, Node* frame_state,
- Node* effect, Node* control) {
+EffectControlLinearizer::LowerCheckHeapObject(Node* node, Node* frame_state,
+ Node* effect, Node* control) {
Node* value = node->InputAt(0);
Node* check = ObjectIsSmi(value);
@@ -1177,19 +1287,6 @@ EffectControlLinearizer::LowerCheckTaggedPointer(Node* node, Node* frame_state,
}
EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckTaggedSigned(Node* node, Node* frame_state,
- Node* effect, Node* control) {
- Node* value = node->InputAt(0);
-
- Node* check = ObjectIsSmi(value);
- control = effect =
- graph()->NewNode(common()->DeoptimizeUnless(DeoptimizeReason::kNotASmi),
- check, frame_state, effect, control);
-
- return ValueEffectControl(value, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
EffectControlLinearizer::LowerCheckedInt32Add(Node* node, Node* frame_state,
Node* effect, Node* control) {
Node* lhs = node->InputAt(0);
@@ -1515,6 +1612,27 @@ EffectControlLinearizer::LowerCheckedInt32Mul(Node* node, Node* frame_state,
}
EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned(Node* node,
+ Node* frame_state,
+ Node* effect,
+ Node* control) {
+ DCHECK(SmiValuesAre31Bits());
+ Node* value = node->InputAt(0);
+
+ Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), value, value,
+ control);
+
+ Node* check = graph()->NewNode(common()->Projection(1), add, control);
+ control = effect =
+ graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
+ check, frame_state, effect, control);
+
+ value = graph()->NewNode(common()->Projection(0), add, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
Node* frame_state,
Node* effect,
@@ -1531,6 +1649,22 @@ EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
}
EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedUint32ToTaggedSigned(Node* node,
+ Node* frame_state,
+ Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+ Node* check = graph()->NewNode(machine()->Uint32LessThanOrEqual(), value,
+ SmiMaxValueConstant());
+ control = effect = graph()->NewNode(
+ common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), check,
+ frame_state, effect, control);
+ value = ChangeUint32ToSmi(value);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
EffectControlLinearizer::BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode,
Node* value,
Node* frame_state,
@@ -1667,8 +1801,8 @@ EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
break;
}
case CheckTaggedInputMode::kNumberOrOddball: {
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check_number, control);
+ Node* branch =
+ graph()->NewNode(common()->Branch(), check_number, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* etrue = effect;
@@ -1710,8 +1844,7 @@ EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
Node* value = node->InputAt(0);
Node* check = ObjectIsSmi(value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
// In the Smi case, just convert to int32 and then float64.
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
@@ -1736,6 +1869,21 @@ EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
}
EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedTaggedToTaggedSigned(Node* node,
+ Node* frame_state,
+ Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ control = effect =
+ graph()->NewNode(common()->DeoptimizeUnless(DeoptimizeReason::kNotASmi),
+ check, frame_state, effect, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node, Node* effect,
Node* control) {
Node* value = node->InputAt(0);
@@ -1996,6 +2144,26 @@ EffectControlLinearizer::LowerObjectIsUndetectable(Node* node, Node* effect,
}
EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerArrayBufferWasNeutered(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* value_bit_field = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()), value,
+ effect, control);
+ value = graph()->NewNode(
+ machine()->Word32Equal(),
+ graph()->NewNode(machine()->Word32Equal(),
+ graph()->NewNode(machine()->Word32And(), value_bit_field,
+ jsgraph()->Int32Constant(
+ JSArrayBuffer::WasNeutered::kMask)),
+ jsgraph()->Int32Constant(0)),
+ jsgraph()->Int32Constant(0));
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
EffectControlLinearizer::LowerStringCharCodeAt(Node* node, Node* effect,
Node* control) {
Node* subject = node->InputAt(0);
@@ -2382,6 +2550,236 @@ EffectControlLinearizer::LowerStringFromCharCode(Node* node, Node* effect,
}
EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerStringFromCodePoint(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+ Node* code = value;
+
+ Node* etrue0 = effect;
+ Node* vtrue0;
+
+ // Check if the {code} is a single code unit
+ Node* check0 = graph()->NewNode(machine()->Uint32LessThanOrEqual(), code,
+ jsgraph()->Uint32Constant(0xFFFF));
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ {
+ // Check if the {code} is a one byte character
+ Node* check1 = graph()->NewNode(
+ machine()->Uint32LessThanOrEqual(), code,
+ jsgraph()->Uint32Constant(String::kMaxOneByteCharCode));
+ Node* branch1 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check1, if_true0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = etrue0;
+ Node* vtrue1;
+ {
+ // Load the isolate wide single character string cache.
+ Node* cache =
+ jsgraph()->HeapConstant(factory()->single_character_string_cache());
+
+ // Compute the {cache} index for {code}.
+ Node* index =
+ machine()->Is32()
+ ? code
+ : graph()->NewNode(machine()->ChangeUint32ToUint64(), code);
+
+ // Check if we have an entry for the {code} in the single character string
+ // cache already.
+ Node* entry = etrue1 = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
+ cache, index, etrue1, if_true1);
+
+ Node* check2 = graph()->NewNode(machine()->WordEqual(), entry,
+ jsgraph()->UndefinedConstant());
+ Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check2, if_true1);
+
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* etrue2 = etrue1;
+ Node* vtrue2;
+ {
+ // Allocate a new SeqOneByteString for {code}.
+ vtrue2 = etrue2 = graph()->NewNode(
+ simplified()->Allocate(NOT_TENURED),
+ jsgraph()->Int32Constant(SeqOneByteString::SizeFor(1)), etrue2,
+ if_true2);
+ etrue2 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), vtrue2,
+ jsgraph()->HeapConstant(factory()->one_byte_string_map()), etrue2,
+ if_true2);
+ etrue2 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForNameHashField()), vtrue2,
+ jsgraph()->IntPtrConstant(Name::kEmptyHashField), etrue2, if_true2);
+ etrue2 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForStringLength()), vtrue2,
+ jsgraph()->SmiConstant(1), etrue2, if_true2);
+ etrue2 = graph()->NewNode(
+ machine()->Store(StoreRepresentation(MachineRepresentation::kWord8,
+ kNoWriteBarrier)),
+ vtrue2, jsgraph()->IntPtrConstant(SeqOneByteString::kHeaderSize -
+ kHeapObjectTag),
+ code, etrue2, if_true2);
+
+ // Remember it in the {cache}.
+ etrue2 = graph()->NewNode(
+ simplified()->StoreElement(AccessBuilder::ForFixedArrayElement()),
+ cache, index, vtrue2, etrue2, if_true2);
+ }
+
+ // Use the {entry} from the {cache}.
+ Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+ Node* efalse2 = etrue0;
+ Node* vfalse2 = entry;
+
+ if_true1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+ etrue1 =
+ graph()->NewNode(common()->EffectPhi(2), etrue2, efalse2, if_true1);
+ vtrue1 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue2, vfalse2, if_true1);
+ }
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = effect;
+ Node* vfalse1;
+ {
+ // Allocate a new SeqTwoByteString for {code}.
+ vfalse1 = efalse1 = graph()->NewNode(
+ simplified()->Allocate(NOT_TENURED),
+ jsgraph()->Int32Constant(SeqTwoByteString::SizeFor(1)), efalse1,
+ if_false1);
+ efalse1 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), vfalse1,
+ jsgraph()->HeapConstant(factory()->string_map()), efalse1, if_false1);
+ efalse1 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForNameHashField()), vfalse1,
+ jsgraph()->IntPtrConstant(Name::kEmptyHashField), efalse1, if_false1);
+ efalse1 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForStringLength()), vfalse1,
+ jsgraph()->SmiConstant(1), efalse1, if_false1);
+ efalse1 = graph()->NewNode(
+ machine()->Store(StoreRepresentation(MachineRepresentation::kWord16,
+ kNoWriteBarrier)),
+ vfalse1, jsgraph()->IntPtrConstant(SeqTwoByteString::kHeaderSize -
+ kHeapObjectTag),
+ code, efalse1, if_false1);
+ }
+
+ if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ etrue0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
+ vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue1, vfalse1, if_true0);
+ }
+
+ // Generate surrogate pair string
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+ Node* vfalse0;
+ {
+ switch (UnicodeEncodingOf(node->op())) {
+ case UnicodeEncoding::UTF16:
+ break;
+
+ case UnicodeEncoding::UTF32: {
+ // Convert UTF32 to UTF16 code units, and store as a 32 bit word.
+ Node* lead_offset = jsgraph()->Int32Constant(0xD800 - (0x10000 >> 10));
+
+ // lead = (codepoint >> 10) + LEAD_OFFSET
+ Node* lead =
+ graph()->NewNode(machine()->Int32Add(),
+ graph()->NewNode(machine()->Word32Shr(), code,
+ jsgraph()->Int32Constant(10)),
+ lead_offset);
+
+ // trail = (codepoint & 0x3FF) + 0xDC00;
+ Node* trail =
+ graph()->NewNode(machine()->Int32Add(),
+ graph()->NewNode(machine()->Word32And(), code,
+ jsgraph()->Int32Constant(0x3FF)),
+ jsgraph()->Int32Constant(0xDC00));
+
+ // codpoint = (trail << 16) | lead;
+ code = graph()->NewNode(machine()->Word32Or(),
+ graph()->NewNode(machine()->Word32Shl(), trail,
+ jsgraph()->Int32Constant(16)),
+ lead);
+ break;
+ }
+ }
+
+ // Allocate a new SeqTwoByteString for {code}.
+ vfalse0 = efalse0 =
+ graph()->NewNode(simplified()->Allocate(NOT_TENURED),
+ jsgraph()->Int32Constant(SeqTwoByteString::SizeFor(2)),
+ efalse0, if_false0);
+ efalse0 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), vfalse0,
+ jsgraph()->HeapConstant(factory()->string_map()), efalse0, if_false0);
+ efalse0 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForNameHashField()), vfalse0,
+ jsgraph()->IntPtrConstant(Name::kEmptyHashField), efalse0, if_false0);
+ efalse0 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForStringLength()), vfalse0,
+ jsgraph()->SmiConstant(2), efalse0, if_false0);
+ efalse0 = graph()->NewNode(
+ machine()->Store(StoreRepresentation(MachineRepresentation::kWord32,
+ kNoWriteBarrier)),
+ vfalse0, jsgraph()->IntPtrConstant(SeqTwoByteString::kHeaderSize -
+ kHeapObjectTag),
+ code, efalse0, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue0, vfalse0, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerStringComparison(Callable const& callable,
+ Node* node, Node* effect,
+ Node* control) {
+ Operator::Properties properties = Operator::kEliminatable;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->AppendInput(graph()->zone(), jsgraph()->NoContextConstant());
+ node->AppendInput(graph()->zone(), effect);
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ return ValueEffectControl(node, node, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerStringEqual(Node* node, Node* effect,
+ Node* control) {
+ return LowerStringComparison(CodeFactory::StringEqual(isolate()), node,
+ effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerStringLessThan(Node* node, Node* effect,
+ Node* control) {
+ return LowerStringComparison(CodeFactory::StringLessThan(isolate()), node,
+ effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerStringLessThanOrEqual(Node* node, Node* effect,
+ Node* control) {
+ return LowerStringComparison(CodeFactory::StringLessThanOrEqual(isolate()),
+ node, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
EffectControlLinearizer::LowerCheckFloat64Hole(Node* node, Node* frame_state,
Node* effect, Node* control) {
// If we reach this point w/o eliminating the {node} that's marked
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index 98f08c7b12..0199fd0886 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -12,6 +12,8 @@
namespace v8 {
namespace internal {
+// Forward declarations.
+class Callable;
class Zone;
namespace compiler {
@@ -71,10 +73,8 @@ class EffectControlLinearizer {
Node* effect, Node* control);
ValueEffectControl LowerCheckIf(Node* node, Node* frame_state, Node* effect,
Node* control);
- ValueEffectControl LowerCheckTaggedPointer(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckTaggedSigned(Node* node, Node* frame_state,
- Node* effect, Node* control);
+ ValueEffectControl LowerCheckHeapObject(Node* node, Node* frame_state,
+ Node* effect, Node* control);
ValueEffectControl LowerCheckedInt32Add(Node* node, Node* frame_state,
Node* effect, Node* control);
ValueEffectControl LowerCheckedInt32Sub(Node* node, Node* frame_state,
@@ -89,8 +89,16 @@ class EffectControlLinearizer {
Node* effect, Node* control);
ValueEffectControl LowerCheckedInt32Mul(Node* node, Node* frame_state,
Node* effect, Node* control);
+ ValueEffectControl LowerCheckedInt32ToTaggedSigned(Node* node,
+ Node* frame_state,
+ Node* effect,
+ Node* control);
ValueEffectControl LowerCheckedUint32ToInt32(Node* node, Node* frame_state,
Node* effect, Node* control);
+ ValueEffectControl LowerCheckedUint32ToTaggedSigned(Node* node,
+ Node* frame_state,
+ Node* effect,
+ Node* control);
ValueEffectControl LowerCheckedFloat64ToInt32(Node* node, Node* frame_state,
Node* effect, Node* control);
ValueEffectControl LowerCheckedTaggedSignedToInt32(Node* node,
@@ -101,8 +109,14 @@ class EffectControlLinearizer {
Node* effect, Node* control);
ValueEffectControl LowerCheckedTaggedToFloat64(Node* node, Node* frame_state,
Node* effect, Node* control);
+ ValueEffectControl LowerCheckedTaggedToTaggedSigned(Node* node,
+ Node* frame_state,
+ Node* effect,
+ Node* control);
ValueEffectControl LowerChangeTaggedToFloat64(Node* node, Node* effect,
Node* control);
+ ValueEffectControl LowerTruncateTaggedToBit(Node* node, Node* effect,
+ Node* control);
ValueEffectControl LowerTruncateTaggedToFloat64(Node* node, Node* effect,
Node* control);
ValueEffectControl LowerTruncateTaggedToWord32(Node* node, Node* effect,
@@ -122,10 +136,19 @@ class EffectControlLinearizer {
Node* control);
ValueEffectControl LowerObjectIsUndetectable(Node* node, Node* effect,
Node* control);
+ ValueEffectControl LowerArrayBufferWasNeutered(Node* node, Node* effect,
+ Node* control);
ValueEffectControl LowerStringCharCodeAt(Node* node, Node* effect,
Node* control);
ValueEffectControl LowerStringFromCharCode(Node* node, Node* effect,
Node* control);
+ ValueEffectControl LowerStringFromCodePoint(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerStringEqual(Node* node, Node* effect, Node* control);
+ ValueEffectControl LowerStringLessThan(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerStringLessThanOrEqual(Node* node, Node* effect,
+ Node* control);
ValueEffectControl LowerCheckFloat64Hole(Node* node, Node* frame_state,
Node* effect, Node* control);
ValueEffectControl LowerCheckTaggedHole(Node* node, Node* frame_state,
@@ -165,6 +188,8 @@ class EffectControlLinearizer {
ValueEffectControl BuildCheckedHeapNumberOrOddballToFloat64(
CheckTaggedInputMode mode, Node* value, Node* frame_state, Node* effect,
Node* control);
+ ValueEffectControl LowerStringComparison(Callable const& callable, Node* node,
+ Node* effect, Node* control);
Node* ChangeInt32ToSmi(Node* value);
Node* ChangeUint32ToSmi(Node* value);
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index c69b86c488..d997813d01 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -97,6 +97,22 @@ Reduction EscapeAnalysisReducer::Reduce(Node* node) {
return NoChange();
}
+namespace {
+
+Node* MaybeGuard(JSGraph* jsgraph, Node* original, Node* replacement) {
+ // We might need to guard the replacement if the type of the {replacement}
+ // node is not in a sub-type relation to the type of the the {original} node.
+ Type* const replacement_type = NodeProperties::GetType(replacement);
+ Type* const original_type = NodeProperties::GetType(original);
+ if (!replacement_type->Is(original_type)) {
+ Node* const control = NodeProperties::GetControlInput(original);
+ replacement = jsgraph->graph()->NewNode(
+ jsgraph->common()->TypeGuard(original_type), replacement, control);
+ }
+ return replacement;
+}
+
+} // namespace
Reduction EscapeAnalysisReducer::ReduceLoad(Node* node) {
DCHECK(node->opcode() == IrOpcode::kLoadField ||
@@ -104,12 +120,15 @@ Reduction EscapeAnalysisReducer::ReduceLoad(Node* node) {
if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
fully_reduced_.Add(node->id());
}
- if (Node* rep = escape_analysis()->GetReplacement(node)) {
- isolate()->counters()->turbo_escape_loads_replaced()->Increment();
- TRACE("Replaced #%d (%s) with #%d (%s)\n", node->id(),
- node->op()->mnemonic(), rep->id(), rep->op()->mnemonic());
- ReplaceWithValue(node, rep);
- return Replace(rep);
+ if (escape_analysis()->IsVirtual(NodeProperties::GetValueInput(node, 0))) {
+ if (Node* rep = escape_analysis()->GetReplacement(node)) {
+ isolate()->counters()->turbo_escape_loads_replaced()->Increment();
+ TRACE("Replaced #%d (%s) with #%d (%s)\n", node->id(),
+ node->op()->mnemonic(), rep->id(), rep->op()->mnemonic());
+ rep = MaybeGuard(jsgraph(), node, rep);
+ ReplaceWithValue(node, rep);
+ return Replace(rep);
+ }
}
return NoChange();
}
@@ -305,6 +324,11 @@ Node* EscapeAnalysisReducer::ReduceStateValueInput(Node* node, int node_index,
if (input->opcode() == IrOpcode::kFinishRegion ||
input->opcode() == IrOpcode::kAllocate) {
if (escape_analysis()->IsVirtual(input)) {
+ if (escape_analysis()->IsCyclicObjectState(effect, input)) {
+ // TODO(mstarzinger): Represent cyclic object states differently to
+ // ensure the scheduler can properly handle such object states.
+ FATAL("Cyclic object state detected by escape analysis.");
+ }
if (Node* object_state =
escape_analysis()->GetOrCreateObjectState(effect, input)) {
if (node_multiused || (multiple_users && !already_cloned)) {
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index 437c01fd15..3f889ccbac 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -12,13 +12,13 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-operator.h"
-#include "src/compiler/node.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
+#include "src/compiler/type-cache.h"
#include "src/objects-inl.h"
-#include "src/type-cache.h"
namespace v8 {
namespace internal {
@@ -795,8 +795,16 @@ bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep,
case IrOpcode::kSelect:
// TODO(mstarzinger): The following list of operators will eventually be
// handled by the EscapeAnalysisReducer (similar to ObjectIsSmi).
+ case IrOpcode::kStringEqual:
+ case IrOpcode::kStringLessThan:
+ case IrOpcode::kStringLessThanOrEqual:
+ case IrOpcode::kPlainPrimitiveToNumber:
+ case IrOpcode::kPlainPrimitiveToWord32:
+ case IrOpcode::kPlainPrimitiveToFloat64:
+ case IrOpcode::kStringCharCodeAt:
case IrOpcode::kObjectIsCallable:
case IrOpcode::kObjectIsNumber:
+ case IrOpcode::kObjectIsReceiver:
case IrOpcode::kObjectIsString:
case IrOpcode::kObjectIsUndetectable:
if (SetEscaped(rep)) {
@@ -853,6 +861,7 @@ EscapeAnalysis::EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common,
status_analysis_(new (zone) EscapeStatusAnalysis(this, graph, zone)),
virtual_states_(zone),
replacements_(zone),
+ cycle_detection_(zone),
cache_(nullptr) {}
EscapeAnalysis::~EscapeAnalysis() {}
@@ -1456,13 +1465,13 @@ void EscapeAnalysis::ProcessStoreField(Node* node) {
int offset = OffsetForFieldAccess(node);
if (static_cast<size_t>(offset) >= object->field_count()) return;
Node* val = ResolveReplacement(NodeProperties::GetValueInput(node, 1));
- // TODO(mstarzinger): The following is a workaround to not track the code
- // entry field in virtual JSFunction objects. We only ever store the inner
- // pointer into the compile lazy stub in this field and the deoptimizer has
- // this assumption hard-coded in {TranslatedState::MaterializeAt} as well.
+ // TODO(mstarzinger): The following is a workaround to not track some well
+ // known raw fields. We only ever store default initial values into these
+ // fields which are hard-coded in {TranslatedState::MaterializeAt} as well.
if (val->opcode() == IrOpcode::kInt32Constant ||
val->opcode() == IrOpcode::kInt64Constant) {
- DCHECK_EQ(JSFunction::kCodeEntryOffset, FieldAccessOf(node->op()).offset);
+ DCHECK(FieldAccessOf(node->op()).offset == JSFunction::kCodeEntryOffset ||
+ FieldAccessOf(node->op()).offset == Name::kHashFieldOffset);
val = slot_not_analyzed_;
}
if (object->GetField(offset) != val) {
@@ -1557,6 +1566,27 @@ Node* EscapeAnalysis::GetOrCreateObjectState(Node* effect, Node* node) {
return nullptr;
}
+bool EscapeAnalysis::IsCyclicObjectState(Node* effect, Node* node) {
+ if ((node->opcode() == IrOpcode::kFinishRegion ||
+ node->opcode() == IrOpcode::kAllocate) &&
+ IsVirtual(node)) {
+ if (VirtualObject* vobj = GetVirtualObject(virtual_states_[effect->id()],
+ ResolveReplacement(node))) {
+ if (cycle_detection_.find(vobj) != cycle_detection_.end()) return true;
+ cycle_detection_.insert(vobj);
+ bool cycle_detected = false;
+ for (size_t i = 0; i < vobj->field_count(); ++i) {
+ if (Node* field = vobj->GetField(i)) {
+ if (IsCyclicObjectState(effect, field)) cycle_detected = true;
+ }
+ }
+ cycle_detection_.erase(vobj);
+ return cycle_detected;
+ }
+ }
+ return false;
+}
+
void EscapeAnalysis::DebugPrintState(VirtualState* state) {
PrintF("Dumping virtual state %p\n", static_cast<void*>(state));
for (Alias alias = 0; alias < status_analysis_->AliasCount(); ++alias) {
diff --git a/deps/v8/src/compiler/escape-analysis.h b/deps/v8/src/compiler/escape-analysis.h
index 839e54ccd3..ec5154e8b1 100644
--- a/deps/v8/src/compiler/escape-analysis.h
+++ b/deps/v8/src/compiler/escape-analysis.h
@@ -32,6 +32,7 @@ class EscapeAnalysis {
bool IsEscaped(Node* node);
bool CompareVirtualObjects(Node* left, Node* right);
Node* GetOrCreateObjectState(Node* effect, Node* node);
+ bool IsCyclicObjectState(Node* effect, Node* node);
bool ExistsVirtualAllocate();
private:
@@ -75,6 +76,7 @@ class EscapeAnalysis {
EscapeStatusAnalysis* status_analysis_;
ZoneVector<VirtualState*> virtual_states_;
ZoneVector<Node*> replacements_;
+ ZoneSet<VirtualObject*> cycle_detection_;
MergeCache* cache_;
DISALLOW_COPY_AND_ASSIGN(EscapeAnalysis);
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index 2ac60a6d1d..a089c12fbf 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -6,7 +6,7 @@
#define V8_COMPILER_GRAPH_REDUCER_H_
#include "src/compiler/node-marker.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index 9fd80ea488..d810c3785a 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -9,7 +9,7 @@
#include <string>
#include "src/code-stubs.h"
-#include "src/compiler.h"
+#include "src/compilation-info.h"
#include "src/compiler/all-nodes.h"
#include "src/compiler/graph.h"
#include "src/compiler/node-properties.h"
@@ -239,7 +239,7 @@ class JSONGraphEdgeWriter {
std::ostream& operator<<(std::ostream& os, const AsJSON& ad) {
- base::AccountingAllocator allocator;
+ AccountingAllocator allocator;
Zone tmp_zone(&allocator);
os << "{\n\"nodes\":[";
JSONGraphNodeWriter(os, &tmp_zone, &ad.graph, ad.positions).Print();
@@ -629,7 +629,7 @@ void GraphC1Visualizer::PrintLiveRange(const LiveRange* range, const char* type,
std::ostream& operator<<(std::ostream& os, const AsC1VCompilation& ac) {
- base::AccountingAllocator allocator;
+ AccountingAllocator allocator;
Zone tmp_zone(&allocator);
GraphC1Visualizer(os, &tmp_zone).PrintCompilation(ac.info_);
return os;
@@ -637,7 +637,7 @@ std::ostream& operator<<(std::ostream& os, const AsC1VCompilation& ac) {
std::ostream& operator<<(std::ostream& os, const AsC1V& ac) {
- base::AccountingAllocator allocator;
+ AccountingAllocator allocator;
Zone tmp_zone(&allocator);
GraphC1Visualizer(os, &tmp_zone)
.PrintSchedule(ac.phase_, ac.schedule_, ac.positions_, ac.instructions_);
@@ -647,7 +647,7 @@ std::ostream& operator<<(std::ostream& os, const AsC1V& ac) {
std::ostream& operator<<(std::ostream& os,
const AsC1VRegisterAllocationData& ac) {
- base::AccountingAllocator allocator;
+ AccountingAllocator allocator;
Zone tmp_zone(&allocator);
GraphC1Visualizer(os, &tmp_zone).PrintLiveRanges(ac.phase_, ac.data_);
return os;
@@ -658,7 +658,7 @@ const int kOnStack = 1;
const int kVisited = 2;
std::ostream& operator<<(std::ostream& os, const AsRPO& ar) {
- base::AccountingAllocator allocator;
+ AccountingAllocator allocator;
Zone local_zone(&allocator);
// Do a post-order depth-first search on the RPO graph. For every node,
diff --git a/deps/v8/src/compiler/graph.h b/deps/v8/src/compiler/graph.h
index a694a0b414..1d9e85e91d 100644
--- a/deps/v8/src/compiler/graph.h
+++ b/deps/v8/src/compiler/graph.h
@@ -5,8 +5,8 @@
#ifndef V8_COMPILER_GRAPH_H_
#define V8_COMPILER_GRAPH_H_
-#include "src/zone.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index ad1a9922dc..428570a8c8 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -4,7 +4,7 @@
#include "src/compiler/code-generator.h"
-#include "src/ast/scopes.h"
+#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
@@ -637,9 +637,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDebugBreak:
__ int3();
break;
- case kArchImpossible:
- __ Abort(kConversionFromImpossibleValue);
- break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
@@ -649,8 +646,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- CodeGenResult result =
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result = AssembleDeoptimizerCall(
+ deopt_state_id, bailout_type, current_source_position_);
if (result != kSuccess) return result;
break;
}
@@ -1786,13 +1783,14 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
- int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type,
+ SourcePosition pos) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
- __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
diff --git a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
index 1c62de5792..ad7535c7c7 100644
--- a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -28,8 +28,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Imul:
case kIA32ImulHigh:
case kIA32UmulHigh:
- case kIA32Idiv:
- case kIA32Udiv:
case kIA32Not:
case kIA32Neg:
case kIA32Shl:
@@ -103,6 +101,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
+ case kIA32Idiv:
+ case kIA32Udiv:
+ return (instr->addressing_mode() == kMode_None)
+ ? kMayNeedDeoptCheck
+ : kMayNeedDeoptCheck | kIsLoadOperation | kHasSideEffect;
+
case kIA32Movsxbl:
case kIA32Movzxbl:
case kIA32Movb:
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index 4a1e19bddd..7e98023f5d 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -250,6 +250,10 @@ void InstructionSelector::VisitLoad(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
void InstructionSelector::VisitStore(Node* node) {
IA32OperandGenerator g(this);
@@ -262,7 +266,7 @@ void InstructionSelector::VisitStore(Node* node) {
MachineRepresentation rep = store_rep.representation();
if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ DCHECK(CanBeTaggedPointer(rep));
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index c6689d8e18..22279fea7a 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -57,7 +57,6 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
V(ArchTableSwitch) \
V(ArchNop) \
V(ArchDebugBreak) \
- V(ArchImpossible) \
V(ArchComment) \
V(ArchThrowTerminator) \
V(ArchDeoptimize) \
diff --git a/deps/v8/src/compiler/instruction-scheduler.cc b/deps/v8/src/compiler/instruction-scheduler.cc
index 2e10794d69..c7fd1ccd66 100644
--- a/deps/v8/src/compiler/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/instruction-scheduler.cc
@@ -11,11 +11,16 @@ namespace v8 {
namespace internal {
namespace compiler {
-// Compare the two nodes and return true if node1 is a better candidate than
-// node2 (i.e. node1 should be scheduled before node2).
-bool InstructionScheduler::CriticalPathFirstQueue::CompareNodes(
- ScheduleGraphNode *node1, ScheduleGraphNode *node2) const {
- return node1->total_latency() > node2->total_latency();
+void InstructionScheduler::SchedulingQueueBase::AddNode(
+ ScheduleGraphNode* node) {
+ // We keep the ready list sorted by total latency so that we can quickly find
+ // the next best candidate to schedule.
+ auto it = nodes_.begin();
+ while ((it != nodes_.end()) &&
+ ((*it)->total_latency() >= node->total_latency())) {
+ ++it;
+ }
+ nodes_.insert(it, node);
}
@@ -24,12 +29,10 @@ InstructionScheduler::CriticalPathFirstQueue::PopBestCandidate(int cycle) {
DCHECK(!IsEmpty());
auto candidate = nodes_.end();
for (auto iterator = nodes_.begin(); iterator != nodes_.end(); ++iterator) {
- // We only consider instructions that have all their operands ready and
- // we try to schedule the critical path first.
+ // We only consider instructions that have all their operands ready.
if (cycle >= (*iterator)->start_cycle()) {
- if ((candidate == nodes_.end()) || CompareNodes(*iterator, *candidate)) {
- candidate = iterator;
- }
+ candidate = iterator;
+ break;
}
}
@@ -133,9 +136,9 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
last_live_in_reg_marker_->AddSuccessor(new_node);
}
- // Make sure that new instructions are not scheduled before the last
- // deoptimization point.
- if (last_deopt_ != nullptr) {
+ // Make sure that instructions are not scheduled before the last
+ // deoptimization point when they depend on it.
+ if ((last_deopt_ != nullptr) && DependsOnDeoptimization(instr)) {
last_deopt_->AddSuccessor(new_node);
}
@@ -242,7 +245,6 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchTruncateDoubleToI:
case kArchStackSlot:
case kArchDebugBreak:
- case kArchImpossible:
case kArchComment:
case kIeee754Float64Acos:
case kIeee754Float64Acosh:
diff --git a/deps/v8/src/compiler/instruction-scheduler.h b/deps/v8/src/compiler/instruction-scheduler.h
index 271aa0d0d7..7660520b6d 100644
--- a/deps/v8/src/compiler/instruction-scheduler.h
+++ b/deps/v8/src/compiler/instruction-scheduler.h
@@ -6,7 +6,7 @@
#define V8_COMPILER_INSTRUCTION_SCHEDULER_H_
#include "src/compiler/instruction.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -21,9 +21,12 @@ enum ArchOpcodeFlags {
kHasSideEffect = 2, // The instruction has some side effects (memory
// store, function call...)
kIsLoadOperation = 4, // The instruction is a memory load.
+ kMayNeedDeoptCheck = 8, // The instruction might be associated with a deopt
+ // check. This is the case of instruction which can
+ // blow up with particular inputs (e.g.: division by
+ // zero on Intel platforms).
};
-
class InstructionScheduler final : public ZoneObject {
public:
InstructionScheduler(Zone* zone, InstructionSequence* sequence);
@@ -101,9 +104,7 @@ class InstructionScheduler final : public ZoneObject {
nodes_(scheduler->zone()) {
}
- void AddNode(ScheduleGraphNode* node) {
- nodes_.push_back(node);
- }
+ void AddNode(ScheduleGraphNode* node);
bool IsEmpty() const {
return nodes_.empty();
@@ -125,11 +126,6 @@ class InstructionScheduler final : public ZoneObject {
// Look for the best candidate to schedule, remove it from the queue and
// return it.
ScheduleGraphNode* PopBestCandidate(int cycle);
-
- private:
- // Compare the two nodes and return true if node1 is a better candidate than
- // node2 (i.e. node1 should be scheduled before node2).
- bool CompareNodes(ScheduleGraphNode *node1, ScheduleGraphNode *node2) const;
};
// A queue which pop a random node from the queue to perform stress tests on
@@ -162,12 +158,25 @@ class InstructionScheduler final : public ZoneObject {
// Check whether the given instruction has side effects (e.g. function call,
// memory store).
bool HasSideEffect(const Instruction* instr) const {
- return GetInstructionFlags(instr) & kHasSideEffect;
+ return (GetInstructionFlags(instr) & kHasSideEffect) != 0;
}
// Return true if the instruction is a memory load.
bool IsLoadOperation(const Instruction* instr) const {
- return GetInstructionFlags(instr) & kIsLoadOperation;
+ return (GetInstructionFlags(instr) & kIsLoadOperation) != 0;
+ }
+
+ // Return true if this instruction is usually associated with a deopt check
+ // to validate its input.
+ bool MayNeedDeoptCheck(const Instruction* instr) const {
+ return (GetInstructionFlags(instr) & kMayNeedDeoptCheck) != 0;
+ }
+
+ // Return true if the instruction cannot be moved before the last deopt
+ // point we encountered.
+ bool DependsOnDeoptimization(const Instruction* instr) const {
+ return MayNeedDeoptCheck(instr) || instr->IsDeoptimizeCall() ||
+ HasSideEffect(instr) || IsLoadOperation(instr);
}
// Identify nops used as a definition point for live-in registers at
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/instruction-selector-impl.h
index 25d8a99e86..673d1b0dcb 100644
--- a/deps/v8/src/compiler/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/instruction-selector-impl.h
@@ -90,6 +90,12 @@ class OperandGenerator {
GetVReg(node)));
}
+ InstructionOperand UseAnyAtEnd(Node* node) {
+ return Use(node, UnallocatedOperand(UnallocatedOperand::ANY,
+ UnallocatedOperand::USED_AT_END,
+ GetVReg(node)));
+ }
+
InstructionOperand UseAny(Node* node) {
return Use(node, UnallocatedOperand(UnallocatedOperand::ANY,
UnallocatedOperand::USED_AT_START,
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index ac8e64a58a..b150725b2b 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -22,7 +22,9 @@ InstructionSelector::InstructionSelector(
Zone* zone, size_t node_count, Linkage* linkage,
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions, Frame* frame,
- SourcePositionMode source_position_mode, Features features)
+ SourcePositionMode source_position_mode, Features features,
+ EnableScheduling enable_scheduling,
+ EnableSerialization enable_serialization)
: zone_(zone),
linkage_(linkage),
sequence_(sequence),
@@ -37,13 +39,16 @@ InstructionSelector::InstructionSelector(
effect_level_(node_count, 0, zone),
virtual_registers_(node_count,
InstructionOperand::kInvalidVirtualRegister, zone),
+ virtual_register_rename_(zone),
scheduler_(nullptr),
- frame_(frame) {
+ enable_scheduling_(enable_scheduling),
+ enable_serialization_(enable_serialization),
+ frame_(frame),
+ instruction_selection_failed_(false) {
instructions_.reserve(node_count);
}
-
-void InstructionSelector::SelectInstructions() {
+bool InstructionSelector::SelectInstructions() {
// Mark the inputs of all phis in loop headers as used.
BasicBlockVector* blocks = schedule()->rpo_order();
for (auto const block : *blocks) {
@@ -62,22 +67,26 @@ void InstructionSelector::SelectInstructions() {
// Visit each basic block in post order.
for (auto i = blocks->rbegin(); i != blocks->rend(); ++i) {
VisitBlock(*i);
+ if (instruction_selection_failed()) return false;
}
// Schedule the selected instructions.
- if (FLAG_turbo_instruction_scheduling &&
- InstructionScheduler::SchedulerSupported()) {
+ if (UseInstructionScheduling()) {
scheduler_ = new (zone()) InstructionScheduler(zone(), sequence());
}
for (auto const block : *blocks) {
InstructionBlock* instruction_block =
sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
+ for (size_t i = 0; i < instruction_block->phis().size(); i++) {
+ UpdateRenamesInPhi(instruction_block->PhiAt(i));
+ }
size_t end = instruction_block->code_end();
size_t start = instruction_block->code_start();
DCHECK_LE(end, start);
StartBlock(RpoNumber::FromInt(block->rpo_number()));
while (start-- > end) {
+ UpdateRenames(instructions_[start]);
AddInstruction(instructions_[start]);
}
EndBlock(RpoNumber::FromInt(block->rpo_number()));
@@ -85,11 +94,11 @@ void InstructionSelector::SelectInstructions() {
#if DEBUG
sequence()->ValidateSSA();
#endif
+ return true;
}
void InstructionSelector::StartBlock(RpoNumber rpo) {
- if (FLAG_turbo_instruction_scheduling &&
- InstructionScheduler::SchedulerSupported()) {
+ if (UseInstructionScheduling()) {
DCHECK_NOT_NULL(scheduler_);
scheduler_->StartBlock(rpo);
} else {
@@ -99,8 +108,7 @@ void InstructionSelector::StartBlock(RpoNumber rpo) {
void InstructionSelector::EndBlock(RpoNumber rpo) {
- if (FLAG_turbo_instruction_scheduling &&
- InstructionScheduler::SchedulerSupported()) {
+ if (UseInstructionScheduling()) {
DCHECK_NOT_NULL(scheduler_);
scheduler_->EndBlock(rpo);
} else {
@@ -110,8 +118,7 @@ void InstructionSelector::EndBlock(RpoNumber rpo) {
void InstructionSelector::AddInstruction(Instruction* instr) {
- if (FLAG_turbo_instruction_scheduling &&
- InstructionScheduler::SchedulerSupported()) {
+ if (UseInstructionScheduling()) {
DCHECK_NOT_NULL(scheduler_);
scheduler_->AddInstruction(instr);
} else {
@@ -206,6 +213,13 @@ Instruction* InstructionSelector::Emit(
InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
size_t input_count, InstructionOperand* inputs, size_t temp_count,
InstructionOperand* temps) {
+ if (output_count >= Instruction::kMaxOutputCount ||
+ input_count >= Instruction::kMaxInputCount ||
+ temp_count >= Instruction::kMaxTempCount) {
+ set_instruction_selection_failed();
+ return nullptr;
+ }
+
Instruction* instr =
Instruction::New(instruction_zone(), opcode, output_count, outputs,
input_count, inputs, temp_count, temps);
@@ -255,6 +269,53 @@ bool InstructionSelector::IsOnlyUserOfNodeInSameBlock(Node* user,
return true;
}
+void InstructionSelector::UpdateRenames(Instruction* instruction) {
+ for (size_t i = 0; i < instruction->InputCount(); i++) {
+ TryRename(instruction->InputAt(i));
+ }
+}
+
+void InstructionSelector::UpdateRenamesInPhi(PhiInstruction* phi) {
+ for (size_t i = 0; i < phi->operands().size(); i++) {
+ int vreg = phi->operands()[i];
+ int renamed = GetRename(vreg);
+ if (vreg != renamed) {
+ phi->RenameInput(i, renamed);
+ }
+ }
+}
+
+int InstructionSelector::GetRename(int virtual_register) {
+ int rename = virtual_register;
+ while (true) {
+ if (static_cast<size_t>(rename) >= virtual_register_rename_.size()) break;
+ int next = virtual_register_rename_[rename];
+ if (next == InstructionOperand::kInvalidVirtualRegister) {
+ break;
+ }
+ rename = next;
+ }
+ return rename;
+}
+
+void InstructionSelector::TryRename(InstructionOperand* op) {
+ if (!op->IsUnallocated()) return;
+ int vreg = UnallocatedOperand::cast(op)->virtual_register();
+ int rename = GetRename(vreg);
+ if (rename != vreg) {
+ UnallocatedOperand::cast(op)->set_virtual_register(rename);
+ }
+}
+
+void InstructionSelector::SetRename(const Node* node, const Node* rename) {
+ int vreg = GetVirtualRegister(node);
+ if (static_cast<size_t>(vreg) >= virtual_register_rename_.size()) {
+ int invalid = InstructionOperand::kInvalidVirtualRegister;
+ virtual_register_rename_.resize(vreg + 1, invalid);
+ }
+ virtual_register_rename_[vreg] = GetVirtualRegister(rename);
+}
+
int InstructionSelector::GetVirtualRegister(const Node* node) {
DCHECK_NOT_NULL(node);
size_t const id = node->id();
@@ -330,6 +391,12 @@ void InstructionSelector::SetEffectLevel(Node* node, int effect_level) {
effect_level_[id] = effect_level;
}
+bool InstructionSelector::CanAddressRelativeToRootsRegister() const {
+ return (enable_serialization_ == kDisableSerialization &&
+ (linkage()->GetIncomingDescriptor()->flags() &
+ CallDescriptor::kCanUseRoots));
+}
+
void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
const InstructionOperand& op) {
UnallocatedOperand unalloc = UnallocatedOperand::cast(op);
@@ -350,6 +417,10 @@ enum class FrameStateInputKind { kAny, kStackSlot };
InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input,
FrameStateInputKind kind,
MachineRepresentation rep) {
+ if (rep == MachineRepresentation::kNone) {
+ return g->TempImmediate(FrameStateDescriptor::kImpossibleValue);
+ }
+
switch (input->opcode()) {
case IrOpcode::kInt32Constant:
case IrOpcode::kInt64Constant:
@@ -362,15 +433,13 @@ InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input,
UNREACHABLE();
break;
default:
- if (rep == MachineRepresentation::kNone) {
- return g->TempImmediate(FrameStateDescriptor::kImpossibleValue);
- } else {
- switch (kind) {
- case FrameStateInputKind::kStackSlot:
- return g->UseUniqueSlot(input);
- case FrameStateInputKind::kAny:
- return g->UseAny(input);
- }
+ switch (kind) {
+ case FrameStateInputKind::kStackSlot:
+ return g->UseUniqueSlot(input);
+ case FrameStateInputKind::kAny:
+ // Currently deopts "wrap" other operations, so the deopt's inputs
+ // are potentially needed untill the end of the deoptimising code.
+ return g->UseAnyAtEnd(input);
}
}
UNREACHABLE();
@@ -716,7 +785,6 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
}
}
-
void InstructionSelector::VisitBlock(BasicBlock* block) {
DCHECK(!current_block_);
current_block_ = block;
@@ -753,6 +821,7 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
// up".
size_t current_node_end = instructions_.size();
VisitNode(node);
+ if (instruction_selection_failed()) return;
std::reverse(instructions_.begin() + current_node_end, instructions_.end());
if (instructions_.size() == current_node_end) continue;
// Mark source position on first instruction emitted.
@@ -1053,8 +1122,14 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitUint64LessThanOrEqual(node);
case IrOpcode::kUint64Mod:
return MarkAsWord64(node), VisitUint64Mod(node);
+ case IrOpcode::kBitcastTaggedToWord:
+ return MarkAsRepresentation(MachineType::PointerRepresentation(), node),
+ VisitBitcastTaggedToWord(node);
case IrOpcode::kBitcastWordToTagged:
return MarkAsReference(node), VisitBitcastWordToTagged(node);
+ case IrOpcode::kBitcastWordToTaggedSigned:
+ return MarkAsRepresentation(MachineRepresentation::kTaggedSigned, node),
+ EmitIdentity(node);
case IrOpcode::kChangeFloat32ToFloat64:
return MarkAsFloat64(node), VisitChangeFloat32ToFloat64(node);
case IrOpcode::kChangeInt32ToFloat64:
@@ -1065,19 +1140,6 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
case IrOpcode::kChangeFloat64ToUint32:
return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
- case IrOpcode::kImpossibleToWord32:
- return MarkAsWord32(node), VisitImpossibleToWord32(node);
- case IrOpcode::kImpossibleToWord64:
- return MarkAsWord64(node), VisitImpossibleToWord64(node);
- case IrOpcode::kImpossibleToFloat32:
- return MarkAsFloat32(node), VisitImpossibleToFloat32(node);
- case IrOpcode::kImpossibleToFloat64:
- return MarkAsFloat64(node), VisitImpossibleToFloat64(node);
- case IrOpcode::kImpossibleToTagged:
- MarkAsRepresentation(MachineType::PointerRepresentation(), node);
- return VisitImpossibleToTagged(node);
- case IrOpcode::kImpossibleToBit:
- return MarkAsWord32(node), VisitImpossibleToBit(node);
case IrOpcode::kFloat64SilenceNaN:
MarkAsFloat64(node);
if (CanProduceSignalingNaN(node->InputAt(0))) {
@@ -1304,9 +1366,15 @@ void InstructionSelector::VisitNode(Node* node) {
}
case IrOpcode::kAtomicStore:
return VisitAtomicStore(node);
+ case IrOpcode::kProtectedLoad:
+ return VisitProtectedLoad(node);
case IrOpcode::kUnsafePointerAdd:
MarkAsRepresentation(MachineType::PointerRepresentation(), node);
return VisitUnsafePointerAdd(node);
+ case IrOpcode::kCreateInt32x4:
+ return MarkAsSimd128(node), VisitCreateInt32x4(node);
+ case IrOpcode::kInt32x4ExtractLane:
+ return MarkAsWord32(node), VisitInt32x4ExtractLane(node);
default:
V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
node->opcode(), node->op()->mnemonic(), node->id());
@@ -1314,42 +1382,6 @@ void InstructionSelector::VisitNode(Node* node) {
}
}
-void InstructionSelector::VisitImpossibleToWord32(Node* node) {
- OperandGenerator g(this);
- Emit(kArchImpossible, g.DefineAsConstant(node, Constant(0)));
-}
-
-void InstructionSelector::VisitImpossibleToWord64(Node* node) {
- OperandGenerator g(this);
- Emit(kArchImpossible,
- g.DefineAsConstant(node, Constant(static_cast<int64_t>(0))));
-}
-
-void InstructionSelector::VisitImpossibleToFloat32(Node* node) {
- OperandGenerator g(this);
- Emit(kArchImpossible, g.DefineAsConstant(node, Constant(0.0f)));
-}
-
-void InstructionSelector::VisitImpossibleToFloat64(Node* node) {
- OperandGenerator g(this);
- Emit(kArchImpossible, g.DefineAsConstant(node, Constant(0.0)));
-}
-
-void InstructionSelector::VisitImpossibleToBit(Node* node) {
- OperandGenerator g(this);
- Emit(kArchImpossible, g.DefineAsConstant(node, Constant(0)));
-}
-
-void InstructionSelector::VisitImpossibleToTagged(Node* node) {
- OperandGenerator g(this);
-#if V8_TARGET_ARCH_64_BIT
- Emit(kArchImpossible,
- g.DefineAsConstant(node, Constant(static_cast<int64_t>(0))));
-#else // V8_TARGET_ARCH_64_BIT
- Emit(kArchImpossible, g.DefineAsConstant(node, Constant(0)));
-#endif // V8_TARGET_ARCH_64_BIT
-}
-
void InstructionSelector::VisitLoadStackPointer(Node* node) {
OperandGenerator g(this);
Emit(kArchStackPointer, g.DefineAsRegister(node));
@@ -1493,8 +1525,14 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
+void InstructionSelector::VisitBitcastTaggedToWord(Node* node) {
+ OperandGenerator g(this);
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
+}
+
void InstructionSelector::VisitBitcastWordToTagged(Node* node) {
- EmitIdentity(node);
+ OperandGenerator g(this);
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
}
// 32 bit targets do not implement the following instructions.
@@ -1647,7 +1685,6 @@ void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
UNIMPLEMENTED();
}
-
#endif // V8_TARGET_ARCH_32_BIT
// 64 bit targets do not implement the following instructions.
@@ -1665,6 +1702,14 @@ void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
#endif // V8_TARGET_ARCH_64_BIT
+#if !V8_TARGET_ARCH_X64
+void InstructionSelector::VisitCreateInt32x4(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4ExtractLane(Node* node) {
+ UNIMPLEMENTED();
+}
+#endif // !V8_TARGET_ARCH_X64
+
void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
void InstructionSelector::VisitParameter(Node* node) {
@@ -1680,13 +1725,17 @@ void InstructionSelector::VisitParameter(Node* node) {
Emit(kArchNop, op);
}
+namespace {
+LinkageLocation ExceptionLocation() {
+ return LinkageLocation::ForRegister(kReturnRegister0.code(),
+ MachineType::IntPtr());
+}
+}
void InstructionSelector::VisitIfException(Node* node) {
OperandGenerator g(this);
- Node* call = node->InputAt(1);
- DCHECK_EQ(IrOpcode::kCall, call->opcode());
- const CallDescriptor* descriptor = CallDescriptorOf(call->op());
- Emit(kArchNop, g.DefineAsLocation(node, descriptor->GetReturnLocation(0)));
+ DCHECK_EQ(IrOpcode::kCall, node->InputAt(1)->opcode());
+ Emit(kArchNop, g.DefineAsLocation(node, ExceptionLocation()));
}
@@ -1812,9 +1861,11 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Emit the call instruction.
size_t const output_count = buffer.outputs.size();
auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())
- ->MarkAsCall();
+ Instruction* call_instr =
+ Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+ &buffer.instruction_args.front());
+ if (instruction_selection_failed()) return;
+ call_instr->MarkAsCall();
}
@@ -1920,9 +1971,11 @@ void InstructionSelector::VisitTailCall(Node* node) {
// Emit the call instruction.
size_t output_count = buffer.outputs.size();
auto* outputs = &buffer.outputs.front();
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())
- ->MarkAsCall();
+ Instruction* call_instr =
+ Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+ &buffer.instruction_args.front());
+ if (instruction_selection_failed()) return;
+ call_instr->MarkAsCall();
Emit(kArchRet, 0, nullptr, output_count, outputs);
}
}
@@ -1984,8 +2037,8 @@ Instruction* InstructionSelector::EmitDeoptimize(
void InstructionSelector::EmitIdentity(Node* node) {
OperandGenerator g(this);
- Node* value = node->InputAt(0);
- Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ MarkAsUsed(node->InputAt(0));
+ SetRename(node, node->InputAt(0));
}
void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind,
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index f9f43e9f35..2981f90a1c 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -8,11 +8,11 @@
#include <map>
#include "src/compiler/common-operator.h"
-#include "src/compiler/instruction.h"
#include "src/compiler/instruction-scheduler.h"
+#include "src/compiler/instruction.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -48,16 +48,22 @@ class InstructionSelector final {
class Features;
enum SourcePositionMode { kCallSourcePositions, kAllSourcePositions };
+ enum EnableScheduling { kDisableScheduling, kEnableScheduling };
+ enum EnableSerialization { kDisableSerialization, kEnableSerialization };
InstructionSelector(
Zone* zone, size_t node_count, Linkage* linkage,
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions, Frame* frame,
SourcePositionMode source_position_mode = kCallSourcePositions,
- Features features = SupportedFeatures());
+ Features features = SupportedFeatures(),
+ EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling
+ ? kEnableScheduling
+ : kDisableScheduling,
+ EnableSerialization enable_serialization = kDisableSerialization);
// Visit code for the entire graph with the included schedule.
- void SelectInstructions();
+ bool SelectInstructions();
void StartBlock(RpoNumber rpo);
void EndBlock(RpoNumber rpo);
@@ -194,15 +200,31 @@ class InstructionSelector final {
int GetVirtualRegister(const Node* node);
const std::map<NodeId, int> GetVirtualRegistersForTesting() const;
+ // Check if we can generate loads and stores of ExternalConstants relative
+ // to the roots register, i.e. if both a root register is available for this
+ // compilation unit and the serializer is disabled.
+ bool CanAddressRelativeToRootsRegister() const;
+
Isolate* isolate() const { return sequence()->isolate(); }
private:
friend class OperandGenerator;
+ bool UseInstructionScheduling() const {
+ return (enable_scheduling_ == kEnableScheduling) &&
+ InstructionScheduler::SchedulerSupported();
+ }
+
void EmitTableSwitch(const SwitchInfo& sw, InstructionOperand& index_operand);
void EmitLookupSwitch(const SwitchInfo& sw,
InstructionOperand& value_operand);
+ void TryRename(InstructionOperand* op);
+ int GetRename(int virtual_register);
+ void SetRename(const Node* node, const Node* rename);
+ void UpdateRenames(Instruction* instruction);
+ void UpdateRenamesInPhi(PhiInstruction* phi);
+
// Inform the instruction selection that {node} was just defined.
void MarkAsDefined(Node* node);
@@ -228,6 +250,9 @@ class InstructionSelector final {
void MarkAsFloat64(Node* node) {
MarkAsRepresentation(MachineRepresentation::kFloat64, node);
}
+ void MarkAsSimd128(Node* node) {
+ MarkAsRepresentation(MachineRepresentation::kSimd128, node);
+ }
void MarkAsReference(Node* node) {
MarkAsRepresentation(MachineRepresentation::kTagged, node);
}
@@ -276,6 +301,8 @@ class InstructionSelector final {
#define DECLARE_GENERATOR(x) void Visit##x(Node* node);
MACHINE_OP_LIST(DECLARE_GENERATOR)
+ MACHINE_SIMD_RETURN_NUM_OP_LIST(DECLARE_GENERATOR)
+ MACHINE_SIMD_RETURN_SIMD_OP_LIST(DECLARE_GENERATOR)
#undef DECLARE_GENERATOR
void VisitFinishRegion(Node* node);
@@ -312,6 +339,11 @@ class InstructionSelector final {
Zone* instruction_zone() const { return sequence()->zone(); }
Zone* zone() const { return zone_; }
+ void set_instruction_selection_failed() {
+ instruction_selection_failed_ = true;
+ }
+ bool instruction_selection_failed() { return instruction_selection_failed_; }
+
// ===========================================================================
Zone* const zone_;
@@ -327,8 +359,12 @@ class InstructionSelector final {
BoolVector used_;
IntVector effect_level_;
IntVector virtual_registers_;
+ IntVector virtual_register_rename_;
InstructionScheduler* scheduler_;
+ EnableScheduling enable_scheduling_;
+ EnableSerialization enable_serialization_;
Frame* frame_;
+ bool instruction_selection_failed_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index 615b644334..0df7ca0316 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -314,7 +314,6 @@ bool Instruction::AreMovesRedundant() const {
return true;
}
-
void Instruction::Print(const RegisterConfiguration* config) const {
OFStream os(stdout);
PrintableInstruction wrapper;
@@ -569,6 +568,10 @@ void PhiInstruction::SetInput(size_t offset, int virtual_register) {
operands_[offset] = virtual_register;
}
+void PhiInstruction::RenameInput(size_t offset, int virtual_register) {
+ DCHECK_NE(InstructionOperand::kInvalidVirtualRegister, operands_[offset]);
+ operands_[offset] = virtual_register;
+}
InstructionBlock::InstructionBlock(Zone* zone, RpoNumber rpo_number,
RpoNumber loop_header, RpoNumber loop_end,
@@ -631,6 +634,58 @@ static InstructionBlock* InstructionBlockFor(Zone* zone,
return instr_block;
}
+std::ostream& operator<<(std::ostream& os,
+ PrintableInstructionBlock& printable_block) {
+ const InstructionBlock* block = printable_block.block_;
+ const RegisterConfiguration* config = printable_block.register_configuration_;
+ const InstructionSequence* code = printable_block.code_;
+
+ os << "B" << block->rpo_number();
+ os << ": AO#" << block->ao_number();
+ if (block->IsDeferred()) os << " (deferred)";
+ if (!block->needs_frame()) os << " (no frame)";
+ if (block->must_construct_frame()) os << " (construct frame)";
+ if (block->must_deconstruct_frame()) os << " (deconstruct frame)";
+ if (block->IsLoopHeader()) {
+ os << " loop blocks: [" << block->rpo_number() << ", " << block->loop_end()
+ << ")";
+ }
+ os << " instructions: [" << block->code_start() << ", " << block->code_end()
+ << ")" << std::endl
+ << " predecessors:";
+
+ for (RpoNumber pred : block->predecessors()) {
+ os << " B" << pred.ToInt();
+ }
+ os << std::endl;
+
+ for (const PhiInstruction* phi : block->phis()) {
+ PrintableInstructionOperand printable_op = {config, phi->output()};
+ os << " phi: " << printable_op << " =";
+ for (int input : phi->operands()) {
+ os << " v" << input;
+ }
+ os << std::endl;
+ }
+
+ ScopedVector<char> buf(32);
+ PrintableInstruction printable_instr;
+ printable_instr.register_configuration_ = config;
+ for (int j = block->first_instruction_index();
+ j <= block->last_instruction_index(); j++) {
+ // TODO(svenpanne) Add some basic formatting to our streams.
+ SNPrintF(buf, "%5d", j);
+ printable_instr.instr_ = code->InstructionAt(j);
+ os << " " << buf.start() << ": " << printable_instr << std::endl;
+ }
+
+ for (RpoNumber succ : block->successors()) {
+ os << " B" << succ.ToInt();
+ }
+ os << std::endl;
+ return os;
+}
+
InstructionBlocks* InstructionSequence::InstructionBlocksFor(
Zone* zone, const Schedule* schedule) {
InstructionBlocks* blocks = zone->NewArray<InstructionBlocks>(1);
@@ -874,7 +929,6 @@ void InstructionSequence::SetSourcePosition(const Instruction* instr,
source_positions_.insert(std::make_pair(instr, value));
}
-
void InstructionSequence::Print(const RegisterConfiguration* config) const {
OFStream os(stdout);
PrintableInstructionSequence wrapper;
@@ -891,49 +945,8 @@ void InstructionSequence::PrintBlock(const RegisterConfiguration* config,
RpoNumber rpo = RpoNumber::FromInt(block_id);
const InstructionBlock* block = InstructionBlockAt(rpo);
CHECK(block->rpo_number() == rpo);
-
- os << "B" << block->rpo_number();
- os << ": AO#" << block->ao_number();
- if (block->IsDeferred()) os << " (deferred)";
- if (!block->needs_frame()) os << " (no frame)";
- if (block->must_construct_frame()) os << " (construct frame)";
- if (block->must_deconstruct_frame()) os << " (deconstruct frame)";
- if (block->IsLoopHeader()) {
- os << " loop blocks: [" << block->rpo_number() << ", " << block->loop_end()
- << ")";
- }
- os << " instructions: [" << block->code_start() << ", " << block->code_end()
- << ")\n predecessors:";
-
- for (RpoNumber pred : block->predecessors()) {
- os << " B" << pred.ToInt();
- }
- os << "\n";
-
- for (const PhiInstruction* phi : block->phis()) {
- PrintableInstructionOperand printable_op = {config, phi->output()};
- os << " phi: " << printable_op << " =";
- for (int input : phi->operands()) {
- os << " v" << input;
- }
- os << "\n";
- }
-
- ScopedVector<char> buf(32);
- PrintableInstruction printable_instr;
- printable_instr.register_configuration_ = config;
- for (int j = block->first_instruction_index();
- j <= block->last_instruction_index(); j++) {
- // TODO(svenpanne) Add some basic formatting to our streams.
- SNPrintF(buf, "%5d", j);
- printable_instr.instr_ = InstructionAt(j);
- os << " " << buf.start() << ": " << printable_instr << "\n";
- }
-
- for (RpoNumber succ : block->successors()) {
- os << " B" << succ.ToInt();
- }
- os << "\n";
+ PrintableInstructionBlock printable_block = {config, block, this};
+ os << printable_block << std::endl;
}
void InstructionSequence::PrintBlock(int block_id) const {
@@ -1020,8 +1033,11 @@ std::ostream& operator<<(std::ostream& os,
it != code.constants_.end(); ++i, ++it) {
os << "CST#" << i << ": v" << it->first << " = " << it->second << "\n";
}
+ PrintableInstructionBlock printable_block = {
+ printable.register_configuration_, nullptr, printable.sequence_};
for (int i = 0; i < code.InstructionBlockCount(); i++) {
- printable.sequence_->PrintBlock(printable.register_configuration_, i);
+ printable_block.block_ = code.InstructionBlockAt(RpoNumber::FromInt(i));
+ os << printable_block;
}
return os;
}
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index b5aea707d2..b5c5914166 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -17,7 +17,7 @@
#include "src/compiler/source-position.h"
#include "src/macro-assembler.h"
#include "src/register-configuration.h"
-#include "src/zone-allocator.h"
+#include "src/zone/zone-allocator.h"
namespace v8 {
namespace internal {
@@ -33,7 +33,17 @@ class InstructionOperand {
// TODO(dcarney): recover bit. INVALID can be represented as UNALLOCATED with
// kInvalidVirtualRegister and some DCHECKS.
- enum Kind { INVALID, UNALLOCATED, CONSTANT, IMMEDIATE, EXPLICIT, ALLOCATED };
+ enum Kind {
+ INVALID,
+ UNALLOCATED,
+ CONSTANT,
+ IMMEDIATE,
+ // Location operand kinds.
+ EXPLICIT,
+ ALLOCATED,
+ FIRST_LOCATION_OPERAND_KIND = EXPLICIT
+ // Location operand kinds must be last.
+ };
InstructionOperand() : InstructionOperand(INVALID) {}
@@ -64,12 +74,16 @@ class InstructionOperand {
INSTRUCTION_OPERAND_PREDICATE(Allocated, ALLOCATED)
#undef INSTRUCTION_OPERAND_PREDICATE
+ inline bool IsAnyLocationOperand() const;
+ inline bool IsLocationOperand() const;
+ inline bool IsFPLocationOperand() const;
inline bool IsAnyRegister() const;
inline bool IsRegister() const;
inline bool IsFPRegister() const;
inline bool IsFloatRegister() const;
inline bool IsDoubleRegister() const;
inline bool IsSimd128Register() const;
+ inline bool IsAnyStackSlot() const;
inline bool IsStackSlot() const;
inline bool IsFPStackSlot() const;
inline bool IsFloatStackSlot() const;
@@ -105,6 +119,7 @@ class InstructionOperand {
bool InterferesWith(const InstructionOperand& that) const;
+ // APIs to aid debugging. For general-stream APIs, use operator<<
void Print(const RegisterConfiguration* config) const;
void Print() const;
@@ -481,17 +496,17 @@ class LocationOperand : public InstructionOperand {
}
static LocationOperand* cast(InstructionOperand* op) {
- DCHECK(ALLOCATED == op->kind() || EXPLICIT == op->kind());
+ DCHECK(op->IsAnyLocationOperand());
return static_cast<LocationOperand*>(op);
}
static const LocationOperand* cast(const InstructionOperand* op) {
- DCHECK(ALLOCATED == op->kind() || EXPLICIT == op->kind());
+ DCHECK(op->IsAnyLocationOperand());
return static_cast<const LocationOperand*>(op);
}
static LocationOperand cast(const InstructionOperand& op) {
- DCHECK(ALLOCATED == op.kind() || EXPLICIT == op.kind());
+ DCHECK(op.IsAnyLocationOperand());
return *static_cast<const LocationOperand*>(&op);
}
@@ -531,9 +546,22 @@ class AllocatedOperand : public LocationOperand {
#undef INSTRUCTION_OPERAND_CASTS
+bool InstructionOperand::IsAnyLocationOperand() const {
+ return this->kind() >= FIRST_LOCATION_OPERAND_KIND;
+}
+
+bool InstructionOperand::IsLocationOperand() const {
+ return IsAnyLocationOperand() &&
+ !IsFloatingPoint(LocationOperand::cast(this)->representation());
+}
+
+bool InstructionOperand::IsFPLocationOperand() const {
+ return IsAnyLocationOperand() &&
+ IsFloatingPoint(LocationOperand::cast(this)->representation());
+}
bool InstructionOperand::IsAnyRegister() const {
- return (IsAllocated() || IsExplicit()) &&
+ return IsAnyLocationOperand() &&
LocationOperand::cast(this)->location_kind() ==
LocationOperand::REGISTER;
}
@@ -567,22 +595,24 @@ bool InstructionOperand::IsSimd128Register() const {
MachineRepresentation::kSimd128;
}
-bool InstructionOperand::IsStackSlot() const {
- return (IsAllocated() || IsExplicit()) &&
+bool InstructionOperand::IsAnyStackSlot() const {
+ return IsAnyLocationOperand() &&
LocationOperand::cast(this)->location_kind() ==
- LocationOperand::STACK_SLOT &&
+ LocationOperand::STACK_SLOT;
+}
+
+bool InstructionOperand::IsStackSlot() const {
+ return IsAnyStackSlot() &&
!IsFloatingPoint(LocationOperand::cast(this)->representation());
}
bool InstructionOperand::IsFPStackSlot() const {
- return (IsAllocated() || IsExplicit()) &&
- LocationOperand::cast(this)->location_kind() ==
- LocationOperand::STACK_SLOT &&
+ return IsAnyStackSlot() &&
IsFloatingPoint(LocationOperand::cast(this)->representation());
}
bool InstructionOperand::IsFloatStackSlot() const {
- return (IsAllocated() || IsExplicit()) &&
+ return IsAnyLocationOperand() &&
LocationOperand::cast(this)->location_kind() ==
LocationOperand::STACK_SLOT &&
LocationOperand::cast(this)->representation() ==
@@ -590,7 +620,7 @@ bool InstructionOperand::IsFloatStackSlot() const {
}
bool InstructionOperand::IsDoubleStackSlot() const {
- return (IsAllocated() || IsExplicit()) &&
+ return IsAnyLocationOperand() &&
LocationOperand::cast(this)->location_kind() ==
LocationOperand::STACK_SLOT &&
LocationOperand::cast(this)->representation() ==
@@ -598,7 +628,7 @@ bool InstructionOperand::IsDoubleStackSlot() const {
}
bool InstructionOperand::IsSimd128StackSlot() const {
- return (IsAllocated() || IsExplicit()) &&
+ return IsAnyLocationOperand() &&
LocationOperand::cast(this)->location_kind() ==
LocationOperand::STACK_SLOT &&
LocationOperand::cast(this)->representation() ==
@@ -606,7 +636,7 @@ bool InstructionOperand::IsSimd128StackSlot() const {
}
uint64_t InstructionOperand::GetCanonicalizedValue() const {
- if (IsAllocated() || IsExplicit()) {
+ if (IsAnyLocationOperand()) {
MachineRepresentation canonical = MachineRepresentation::kNone;
if (IsFPRegister()) {
// We treat all FP register operands the same for simple aliasing.
@@ -672,6 +702,7 @@ class MoveOperands final : public ZoneObject {
return source_.IsInvalid();
}
+ // APIs to aid debugging. For general-stream APIs, use operator<<
void Print(const RegisterConfiguration* config) const;
void Print() const;
@@ -856,10 +887,7 @@ class Instruction final {
reference_map_ = nullptr;
}
- bool IsNop() const {
- return arch_opcode() == kArchNop && InputCount() == 0 &&
- OutputCount() == 0 && TempCount() == 0;
- }
+ bool IsNop() const { return arch_opcode() == kArchNop; }
bool IsDeoptimizeCall() const {
return arch_opcode() == ArchOpcode::kArchDeoptimize ||
@@ -915,9 +943,18 @@ class Instruction final {
block_ = block;
}
+ // APIs to aid debugging. For general-stream APIs, use operator<<
void Print(const RegisterConfiguration* config) const;
void Print() const;
+ typedef BitField<size_t, 0, 8> OutputCountField;
+ typedef BitField<size_t, 8, 16> InputCountField;
+ typedef BitField<size_t, 24, 6> TempCountField;
+
+ static const size_t kMaxOutputCount = OutputCountField::kMax;
+ static const size_t kMaxInputCount = InputCountField::kMax;
+ static const size_t kMaxTempCount = TempCountField::kMax;
+
private:
explicit Instruction(InstructionCode opcode);
@@ -926,9 +963,6 @@ class Instruction final {
InstructionOperand* inputs, size_t temp_count,
InstructionOperand* temps);
- typedef BitField<size_t, 0, 8> OutputCountField;
- typedef BitField<size_t, 8, 16> InputCountField;
- typedef BitField<size_t, 24, 6> TempCountField;
typedef BitField<bool, 30, 1> IsCallField;
InstructionCode opcode_;
@@ -1184,6 +1218,7 @@ class PhiInstruction final : public ZoneObject {
PhiInstruction(Zone* zone, int virtual_register, size_t input_count);
void SetInput(size_t offset, int virtual_register);
+ void RenameInput(size_t offset, int virtual_register);
int virtual_register() const { return virtual_register_; }
const IntVector& operands() const { return operands_; }
@@ -1251,6 +1286,7 @@ class InstructionBlock final : public ZoneObject {
typedef ZoneVector<PhiInstruction*> PhiInstructions;
const PhiInstructions& phis() const { return phis_; }
+ PhiInstruction* PhiAt(size_t i) const { return phis_[i]; }
void AddPhi(PhiInstruction* phi) { phis_.push_back(phi); }
void set_ao_number(RpoNumber ao_number) { ao_number_ = ao_number; }
@@ -1285,6 +1321,17 @@ class InstructionBlock final : public ZoneObject {
RpoNumber last_deferred_;
};
+class InstructionSequence;
+
+struct PrintableInstructionBlock {
+ const RegisterConfiguration* register_configuration_;
+ const InstructionBlock* block_;
+ const InstructionSequence* code_;
+};
+
+std::ostream& operator<<(std::ostream& os,
+ const PrintableInstructionBlock& printable_block);
+
typedef ZoneDeque<Constant> ConstantDeque;
typedef std::map<int, Constant, std::less<int>,
zone_allocator<std::pair<const int, Constant> > > ConstantMap;
@@ -1343,8 +1390,7 @@ class InstructionSequence final : public ZoneObject {
void MarkAsRepresentation(MachineRepresentation rep, int virtual_register);
bool IsReference(int virtual_register) const {
- return GetRepresentation(virtual_register) ==
- MachineRepresentation::kTagged;
+ return CanBeTaggedPointer(GetRepresentation(virtual_register));
}
bool IsFP(int virtual_register) const {
return IsFloatingPoint(GetRepresentation(virtual_register));
@@ -1445,6 +1491,8 @@ class InstructionSequence final : public ZoneObject {
}
return false;
}
+
+ // APIs to aid debugging. For general-stream APIs, use operator<<
void Print(const RegisterConfiguration* config) const;
void Print() const;
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 737947aad0..539a372504 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -13,7 +13,7 @@
#include "src/compiler/node.h"
#include "src/wasm/wasm-module.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
@@ -778,6 +778,18 @@ void Int64Lowering::LowerNode(Node* node) {
}
break;
}
+ case IrOpcode::kProjection: {
+ Node* call = node->InputAt(0);
+ DCHECK_EQ(IrOpcode::kCall, call->opcode());
+ CallDescriptor* descriptor =
+ const_cast<CallDescriptor*>(CallDescriptorOf(call->op()));
+ for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
+ if (descriptor->GetReturnType(i) == MachineType::Int64()) {
+ UNREACHABLE(); // TODO(titzer): implement multiple i64 returns.
+ }
+ }
+ break;
+ }
case IrOpcode::kWord64ReverseBytes: {
Node* input = node->InputAt(0);
ReplaceNode(node, graph()->NewNode(machine()->Word32ReverseBytes().op(),
diff --git a/deps/v8/src/compiler/int64-lowering.h b/deps/v8/src/compiler/int64-lowering.h
index 4ec4e821eb..084c07a87c 100644
--- a/deps/v8/src/compiler/int64-lowering.h
+++ b/deps/v8/src/compiler/int64-lowering.h
@@ -9,7 +9,7 @@
#include "src/compiler/graph.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-marker.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index 926bd3f715..41d4a00166 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -10,9 +10,9 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
+#include "src/compiler/type-cache.h"
+#include "src/compiler/types.h"
#include "src/objects-inl.h"
-#include "src/type-cache.h"
-#include "src/types.h"
namespace v8 {
namespace internal {
@@ -275,8 +275,8 @@ Reduction JSBuiltinReducer::ReduceArrayPush(Node* node) {
// here is to learn on deopt, i.e. disable Array.prototype.push inlining
// for this function.
if (IsFastSmiElementsKind(receiver_map->elements_kind())) {
- value = effect = graph()->NewNode(simplified()->CheckTaggedSigned(),
- value, effect, control);
+ value = effect =
+ graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
} else if (IsFastDoubleElementsKind(receiver_map->elements_kind())) {
value = effect =
graph()->NewNode(simplified()->CheckNumber(), value, effect, control);
@@ -323,6 +323,123 @@ Reduction JSBuiltinReducer::ReduceArrayPush(Node* node) {
return NoChange();
}
+namespace {
+
+bool HasInstanceTypeWitness(Node* receiver, Node* effect,
+ InstanceType instance_type) {
+ for (Node* dominator = effect;;) {
+ if (dominator->opcode() == IrOpcode::kCheckMaps &&
+ dominator->InputAt(0) == receiver) {
+ // Check if all maps have the given {instance_type}.
+ for (int i = 1; i < dominator->op()->ValueInputCount(); ++i) {
+ Node* const map = NodeProperties::GetValueInput(dominator, i);
+ Type* const map_type = NodeProperties::GetType(map);
+ if (!map_type->IsConstant()) return false;
+ Handle<Map> const map_value =
+ Handle<Map>::cast(map_type->AsConstant()->Value());
+ if (map_value->instance_type() != instance_type) return false;
+ }
+ return true;
+ }
+ switch (dominator->opcode()) {
+ case IrOpcode::kStoreField: {
+ FieldAccess const& access = FieldAccessOf(dominator->op());
+ if (access.base_is_tagged == kTaggedBase &&
+ access.offset == HeapObject::kMapOffset) {
+ return false;
+ }
+ break;
+ }
+ case IrOpcode::kStoreElement:
+ case IrOpcode::kStoreTypedElement:
+ break;
+ default: {
+ DCHECK_EQ(1, dominator->op()->EffectOutputCount());
+ if (dominator->op()->EffectInputCount() != 1 ||
+ !dominator->op()->HasProperty(Operator::kNoWrite)) {
+ // Didn't find any appropriate CheckMaps node.
+ return false;
+ }
+ break;
+ }
+ }
+ dominator = NodeProperties::GetEffectInput(dominator);
+ }
+}
+
+} // namespace
+
+// ES6 section 20.3.4.10 Date.prototype.getTime ( )
+Reduction JSBuiltinReducer::ReduceDateGetTime(Node* node) {
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (HasInstanceTypeWitness(receiver, effect, JS_DATE_TYPE)) {
+ Node* value = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSDateValue()), receiver,
+ effect, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] ( V )
+Reduction JSBuiltinReducer::ReduceFunctionHasInstance(Node* node) {
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* object = (node->op()->ValueInputCount() >= 3)
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // TODO(turbofan): If JSOrdinaryToInstance raises an exception, the
+ // stack trace doesn't contain the @@hasInstance call; we have the
+ // corresponding bug in the baseline case. Some massaging of the frame
+ // state would be necessary here.
+
+ // Morph this {node} into a JSOrdinaryHasInstance node.
+ node->ReplaceInput(0, receiver);
+ node->ReplaceInput(1, object);
+ node->ReplaceInput(2, context);
+ node->ReplaceInput(3, frame_state);
+ node->ReplaceInput(4, effect);
+ node->ReplaceInput(5, control);
+ node->TrimInputCount(6);
+ NodeProperties::ChangeOp(node, javascript()->OrdinaryHasInstance());
+ return Changed(node);
+}
+
+// ES6 section 18.2.2 isFinite ( number )
+Reduction JSBuiltinReducer::ReduceGlobalIsFinite(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // isFinite(a:plain-primitive) -> NumberEqual(a', a')
+ // where a' = NumberSubtract(ToNumber(a), ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* diff = graph()->NewNode(simplified()->NumberSubtract(), input, input);
+ Node* value = graph()->NewNode(simplified()->NumberEqual(), diff, diff);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 18.2.3 isNaN ( number )
+Reduction JSBuiltinReducer::ReduceGlobalIsNaN(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // isNaN(a:plain-primitive) -> BooleanNot(NumberEqual(a', a'))
+ // where a' = ToNumber(a)
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* check = graph()->NewNode(simplified()->NumberEqual(), input, input);
+ Node* value = graph()->NewNode(simplified()->BooleanNot(), check);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
// ES6 section 20.2.2.1 Math.abs ( x )
Reduction JSBuiltinReducer::ReduceMathAbs(Node* node) {
JSCallReduction r(node);
@@ -737,6 +854,60 @@ Reduction JSBuiltinReducer::ReduceMathTrunc(Node* node) {
return NoChange();
}
+// ES6 section 20.1.2.2 Number.isFinite ( number )
+Reduction JSBuiltinReducer::ReduceNumberIsFinite(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::Number())) {
+ // Number.isFinite(a:number) -> NumberEqual(a', a')
+ // where a' = NumberSubtract(a, a)
+ Node* input = r.GetJSCallInput(0);
+ Node* diff = graph()->NewNode(simplified()->NumberSubtract(), input, input);
+ Node* value = graph()->NewNode(simplified()->NumberEqual(), diff, diff);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.1.2.3 Number.isInteger ( number )
+Reduction JSBuiltinReducer::ReduceNumberIsInteger(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::Number())) {
+ // Number.isInteger(x:number) -> NumberEqual(NumberSubtract(x, x'), #0)
+ // where x' = NumberTrunc(x)
+ Node* input = r.GetJSCallInput(0);
+ Node* trunc = graph()->NewNode(simplified()->NumberTrunc(), input);
+ Node* diff = graph()->NewNode(simplified()->NumberSubtract(), input, trunc);
+ Node* value = graph()->NewNode(simplified()->NumberEqual(), diff,
+ jsgraph()->ZeroConstant());
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.1.2.4 Number.isNaN ( number )
+Reduction JSBuiltinReducer::ReduceNumberIsNaN(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::Number())) {
+ // Number.isNaN(a:number) -> BooleanNot(NumberEqual(a, a))
+ Node* input = r.GetJSCallInput(0);
+ Node* check = graph()->NewNode(simplified()->NumberEqual(), input, input);
+ Node* value = graph()->NewNode(simplified()->BooleanNot(), check);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.1.2.5 Number.isSafeInteger ( number )
+Reduction JSBuiltinReducer::ReduceNumberIsSafeInteger(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(type_cache_.kSafeInteger)) {
+ // Number.isInteger(x:safe-integer) -> #true
+ Node* value = jsgraph()->TrueConstant();
+ return Replace(value);
+ }
+ return NoChange();
+}
+
// ES6 section 20.1.2.13 Number.parseInt ( string, radix )
Reduction JSBuiltinReducer::ReduceNumberParseInt(Node* node) {
JSCallReduction r(node);
@@ -887,51 +1058,146 @@ Reduction JSBuiltinReducer::ReduceStringCharCodeAt(Node* node) {
return NoChange();
}
-namespace {
+Reduction JSBuiltinReducer::ReduceStringIteratorNext(Node* node) {
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ if (HasInstanceTypeWitness(receiver, effect, JS_STRING_ITERATOR_TYPE)) {
+ Node* string = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSStringIteratorString()),
+ receiver, effect, control);
+ Node* index = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSStringIteratorIndex()),
+ receiver, effect, control);
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForStringLength()), string,
+ effect, control);
-bool HasInstanceTypeWitness(Node* receiver, Node* effect,
- InstanceType instance_type) {
- for (Node* dominator = effect;;) {
- if (dominator->opcode() == IrOpcode::kCheckMaps &&
- dominator->InputAt(0) == receiver) {
- // Check if all maps have the given {instance_type}.
- for (int i = 1; i < dominator->op()->ValueInputCount(); ++i) {
- Node* const map = NodeProperties::GetValueInput(dominator, i);
- Type* const map_type = NodeProperties::GetType(map);
- if (!map_type->IsConstant()) return false;
- Handle<Map> const map_value =
- Handle<Map>::cast(map_type->AsConstant()->Value());
- if (map_value->instance_type() != instance_type) return false;
- }
- return true;
- }
- switch (dominator->opcode()) {
- case IrOpcode::kStoreField: {
- FieldAccess const& access = FieldAccessOf(dominator->op());
- if (access.base_is_tagged == kTaggedBase &&
- access.offset == HeapObject::kMapOffset) {
- return false;
- }
- break;
- }
- case IrOpcode::kStoreElement:
- break;
- default: {
- DCHECK_EQ(1, dominator->op()->EffectOutputCount());
- if (dominator->op()->EffectInputCount() != 1 ||
- !dominator->op()->HasProperty(Operator::kNoWrite)) {
- // Didn't find any appropriate CheckMaps node.
- return false;
+ // branch0: if (index < length)
+ Node* check0 =
+ graph()->NewNode(simplified()->NumberLessThan(), index, length);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* etrue0 = effect;
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* done_true;
+ Node* vtrue0;
+ {
+ done_true = jsgraph()->FalseConstant();
+ Node* lead = graph()->NewNode(simplified()->StringCharCodeAt(), string,
+ index, if_true0);
+
+ // branch1: if ((lead & 0xFC00) === 0xD800)
+ Node* check1 = graph()->NewNode(
+ simplified()->NumberEqual(),
+ graph()->NewNode(simplified()->NumberBitwiseAnd(), lead,
+ jsgraph()->Int32Constant(0xFC00)),
+ jsgraph()->Int32Constant(0xD800));
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, if_true0);
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* vtrue1;
+ {
+ Node* next_index = graph()->NewNode(simplified()->NumberAdd(), index,
+ jsgraph()->OneConstant());
+ // branch2: if ((index + 1) < length)
+ Node* check2 = graph()->NewNode(simplified()->NumberLessThan(),
+ next_index, length);
+ Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check2, if_true1);
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* vtrue2;
+ {
+ Node* trail = graph()->NewNode(simplified()->StringCharCodeAt(),
+ string, next_index, if_true2);
+ // branch3: if ((trail & 0xFC00) === 0xDC00)
+ Node* check3 = graph()->NewNode(
+ simplified()->NumberEqual(),
+ graph()->NewNode(simplified()->NumberBitwiseAnd(), trail,
+ jsgraph()->Int32Constant(0xFC00)),
+ jsgraph()->Int32Constant(0xDC00));
+ Node* branch3 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check3, if_true2);
+ Node* if_true3 = graph()->NewNode(common()->IfTrue(), branch3);
+ Node* vtrue3;
+ {
+ vtrue3 = graph()->NewNode(
+ simplified()->NumberBitwiseOr(),
+// Need to swap the order for big-endian platforms
+#if V8_TARGET_BIG_ENDIAN
+ graph()->NewNode(simplified()->NumberShiftLeft(), lead,
+ jsgraph()->Int32Constant(16)),
+ trail);
+#else
+ graph()->NewNode(simplified()->NumberShiftLeft(), trail,
+ jsgraph()->Int32Constant(16)),
+ lead);
+#endif
+ }
+
+ Node* if_false3 = graph()->NewNode(common()->IfFalse(), branch3);
+ Node* vfalse3 = lead;
+ if_true2 = graph()->NewNode(common()->Merge(2), if_true3, if_false3);
+ vtrue2 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue3, vfalse3, if_true2);
}
- break;
+
+ Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+ Node* vfalse2 = lead;
+ if_true1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+ vtrue1 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue2, vfalse2, if_true1);
}
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* vfalse1 = lead;
+ if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ vtrue0 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue1, vfalse1, if_true0);
+ vtrue0 = graph()->NewNode(
+ simplified()->StringFromCodePoint(UnicodeEncoding::UTF16), vtrue0);
+
+ // Update iterator.[[NextIndex]]
+ Node* char_length = etrue0 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForStringLength()), vtrue0,
+ etrue0, if_true0);
+ index = graph()->NewNode(simplified()->NumberAdd(), index, char_length);
+ etrue0 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSStringIteratorIndex()),
+ receiver, index, etrue0, if_true0);
}
- dominator = NodeProperties::GetEffectInput(dominator);
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* done_false;
+ Node* vfalse0;
+ {
+ vfalse0 = jsgraph()->UndefinedConstant();
+ done_false = jsgraph()->TrueConstant();
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, effect, control);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue0, vfalse0, control);
+ Node* done =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ done_true, done_false, control);
+
+ value = effect = graph()->NewNode(javascript()->CreateIterResultObject(),
+ value, done, context, effect);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
}
+ return NoChange();
}
-} // namespace
-
Reduction JSBuiltinReducer::ReduceArrayBufferViewAccessor(
Node* node, InstanceType instance_type, FieldAccess const& access) {
Node* receiver = NodeProperties::GetValueInput(node, 1);
@@ -939,27 +1205,21 @@ Reduction JSBuiltinReducer::ReduceArrayBufferViewAccessor(
Node* control = NodeProperties::GetControlInput(node);
if (HasInstanceTypeWitness(receiver, effect, instance_type)) {
// Load the {receiver}s field.
- Node* receiver_length = effect = graph()->NewNode(
+ Node* receiver_value = effect = graph()->NewNode(
simplified()->LoadField(access), receiver, effect, control);
// Check if the {receiver}s buffer was neutered.
Node* receiver_buffer = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
receiver, effect, control);
- Node* receiver_buffer_bitfield = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()),
- receiver_buffer, effect, control);
- Node* check = graph()->NewNode(
- simplified()->NumberEqual(),
- graph()->NewNode(
- simplified()->NumberBitwiseAnd(), receiver_buffer_bitfield,
- jsgraph()->Constant(JSArrayBuffer::WasNeutered::kMask)),
- jsgraph()->ZeroConstant());
+ Node* check = effect =
+ graph()->NewNode(simplified()->ArrayBufferWasNeutered(),
+ receiver_buffer, effect, control);
// Default to zero if the {receiver}s buffer was neutered.
Node* value = graph()->NewNode(
- common()->Select(MachineRepresentation::kTagged, BranchHint::kTrue),
- check, receiver_length, jsgraph()->ZeroConstant());
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+ check, jsgraph()->ZeroConstant(), receiver_value);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
@@ -978,6 +1238,17 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return ReduceArrayPop(node);
case kArrayPush:
return ReduceArrayPush(node);
+ case kDateGetTime:
+ return ReduceDateGetTime(node);
+ case kFunctionHasInstance:
+ return ReduceFunctionHasInstance(node);
+ break;
+ case kGlobalIsFinite:
+ reduction = ReduceGlobalIsFinite(node);
+ break;
+ case kGlobalIsNaN:
+ reduction = ReduceGlobalIsNaN(node);
+ break;
case kMathAbs:
reduction = ReduceMathAbs(node);
break;
@@ -1077,6 +1348,18 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
case kMathTrunc:
reduction = ReduceMathTrunc(node);
break;
+ case kNumberIsFinite:
+ reduction = ReduceNumberIsFinite(node);
+ break;
+ case kNumberIsInteger:
+ reduction = ReduceNumberIsInteger(node);
+ break;
+ case kNumberIsNaN:
+ reduction = ReduceNumberIsNaN(node);
+ break;
+ case kNumberIsSafeInteger:
+ reduction = ReduceNumberIsSafeInteger(node);
+ break;
case kNumberParseInt:
reduction = ReduceNumberParseInt(node);
break;
@@ -1087,6 +1370,8 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return ReduceStringCharAt(node);
case kStringCharCodeAt:
return ReduceStringCharCodeAt(node);
+ case kStringIteratorNext:
+ return ReduceStringIteratorNext(node);
case kDataViewByteLength:
return ReduceArrayBufferViewAccessor(
node, JS_DATA_VIEW_TYPE,
@@ -1146,6 +1431,10 @@ SimplifiedOperatorBuilder* JSBuiltinReducer::simplified() const {
return jsgraph()->simplified();
}
+JSOperatorBuilder* JSBuiltinReducer::javascript() const {
+ return jsgraph()->javascript();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-builtin-reducer.h b/deps/v8/src/compiler/js-builtin-reducer.h
index 2da834718c..524d006174 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.h
+++ b/deps/v8/src/compiler/js-builtin-reducer.h
@@ -14,7 +14,6 @@ namespace internal {
// Forward declarations.
class CompilationDependencies;
class Factory;
-class TypeCache;
namespace compiler {
@@ -22,8 +21,9 @@ namespace compiler {
class CommonOperatorBuilder;
struct FieldAccess;
class JSGraph;
+class JSOperatorBuilder;
class SimplifiedOperatorBuilder;
-
+class TypeCache;
class JSBuiltinReducer final : public AdvancedReducer {
public:
@@ -43,6 +43,10 @@ class JSBuiltinReducer final : public AdvancedReducer {
private:
Reduction ReduceArrayPop(Node* node);
Reduction ReduceArrayPush(Node* node);
+ Reduction ReduceDateGetTime(Node* node);
+ Reduction ReduceFunctionHasInstance(Node* node);
+ Reduction ReduceGlobalIsFinite(Node* node);
+ Reduction ReduceGlobalIsNaN(Node* node);
Reduction ReduceMathAbs(Node* node);
Reduction ReduceMathAcos(Node* node);
Reduction ReduceMathAcosh(Node* node);
@@ -76,10 +80,15 @@ class JSBuiltinReducer final : public AdvancedReducer {
Reduction ReduceMathTan(Node* node);
Reduction ReduceMathTanh(Node* node);
Reduction ReduceMathTrunc(Node* node);
+ Reduction ReduceNumberIsFinite(Node* node);
+ Reduction ReduceNumberIsInteger(Node* node);
+ Reduction ReduceNumberIsNaN(Node* node);
+ Reduction ReduceNumberIsSafeInteger(Node* node);
Reduction ReduceNumberParseInt(Node* node);
Reduction ReduceStringCharAt(Node* node);
Reduction ReduceStringCharCodeAt(Node* node);
Reduction ReduceStringFromCharCode(Node* node);
+ Reduction ReduceStringIteratorNext(Node* node);
Reduction ReduceArrayBufferViewAccessor(Node* node,
InstanceType instance_type,
FieldAccess const& access);
@@ -94,6 +103,7 @@ class JSBuiltinReducer final : public AdvancedReducer {
Isolate* isolate() const;
CommonOperatorBuilder* common() const;
SimplifiedOperatorBuilder* simplified() const;
+ JSOperatorBuilder* javascript() const;
CompilationDependencies* dependencies() const { return dependencies_; }
CompilationDependencies* const dependencies_;
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index e39021412e..dd8f0643dd 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -14,30 +14,6 @@ namespace v8 {
namespace internal {
namespace compiler {
-namespace {
-
-VectorSlotPair CallCountFeedback(VectorSlotPair p) {
- // Extract call count from {p}.
- if (!p.IsValid()) return VectorSlotPair();
- CallICNexus n(p.vector(), p.slot());
- int const call_count = n.ExtractCallCount();
- if (call_count <= 0) return VectorSlotPair();
-
- // Create megamorphic CallIC feedback with the given {call_count}.
- StaticFeedbackVectorSpec spec;
- FeedbackVectorSlot slot = spec.AddCallICSlot();
- Handle<TypeFeedbackMetadata> metadata =
- TypeFeedbackMetadata::New(n.GetIsolate(), &spec);
- Handle<TypeFeedbackVector> vector =
- TypeFeedbackVector::New(n.GetIsolate(), metadata);
- CallICNexus nexus(vector, slot);
- nexus.ConfigureMegamorphic(call_count);
- return VectorSlotPair(vector, slot);
-}
-
-} // namespace
-
-
Reduction JSCallReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kJSCallConstruct:
@@ -166,7 +142,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
}
// Change {node} to the new {JSCallFunction} operator.
NodeProperties::ChangeOp(
- node, javascript()->CallFunction(arity, CallCountFeedback(p.feedback()),
+ node, javascript()->CallFunction(arity, p.frequency(), VectorSlotPair(),
convert_mode, p.tail_call_mode()));
// Change context of {node} to the Function.prototype.apply context,
// to ensure any exception is thrown in the correct context.
@@ -206,7 +182,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
--arity;
}
NodeProperties::ChangeOp(
- node, javascript()->CallFunction(arity, CallCountFeedback(p.feedback()),
+ node, javascript()->CallFunction(arity, p.frequency(), VectorSlotPair(),
convert_mode, p.tail_call_mode()));
// Try to further reduce the JSCallFunction {node}.
Reduction const reduction = ReduceJSCallFunction(node);
@@ -287,7 +263,7 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
arity++;
}
NodeProperties::ChangeOp(node, javascript()->CallFunction(
- arity, CallCountFeedback(p.feedback()),
+ arity, p.frequency(), VectorSlotPair(),
convert_mode, p.tail_call_mode()));
// Try to further reduce the JSCallFunction {node}.
Reduction const reduction = ReduceJSCallFunction(node);
@@ -305,6 +281,20 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
// Extract feedback from the {node} using the CallICNexus.
if (!p.feedback().IsValid()) return NoChange();
CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ if (nexus.IsUninitialized() && (flags() & kBailoutOnUninitialized)) {
+ Node* frame_state = NodeProperties::FindFrameStateBefore(node);
+ Node* deoptimize = graph()->NewNode(
+ common()->Deoptimize(
+ DeoptimizeKind::kSoft,
+ DeoptimizeReason::kInsufficientTypeFeedbackForCall),
+ frame_state, effect, control);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ Revisit(graph()->end());
+ node->TrimInputCount(0);
+ NodeProperties::ChangeOp(node, common()->Dead());
+ return Changed(node);
+ }
Handle<Object> feedback(nexus.GetFeedback(), isolate());
if (feedback->IsAllocationSite()) {
// Retrieve the Array function from the {node}.
@@ -386,8 +376,8 @@ Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
// Check if we have an allocation site.
Handle<AllocationSite> site;
if (p.feedback().IsValid()) {
- Handle<Object> feedback(
- p.feedback().vector()->Get(p.feedback().slot()), isolate());
+ CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ Handle<Object> feedback(nexus.GetFeedback(), isolate());
if (feedback->IsAllocationSite()) {
site = Handle<AllocationSite>::cast(feedback);
}
@@ -412,10 +402,9 @@ Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
// Not much we can do if deoptimization support is disabled.
if (!(flags() & kDeoptimizationEnabled)) return NoChange();
- // TODO(mvstanton): Use ConstructICNexus here, once available.
- Handle<Object> feedback;
if (!p.feedback().IsValid()) return NoChange();
- feedback = handle(p.feedback().vector()->Get(p.feedback().slot()), isolate());
+ CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ Handle<Object> feedback(nexus.GetFeedback(), isolate());
if (feedback->IsAllocationSite()) {
// The feedback is an AllocationSite, which means we have called the
// Array function and collected transition (and pretenuring) feedback
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 8d9700a072..0c3835c35c 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -20,18 +20,22 @@ class SimplifiedOperatorBuilder;
// Performs strength reduction on {JSCallConstruct} and {JSCallFunction} nodes,
// which might allow inlining or other optimizations to be performed afterwards.
-class JSCallReducer final : public Reducer {
+class JSCallReducer final : public AdvancedReducer {
public:
// Flags that control the mode of operation.
enum Flag {
kNoFlags = 0u,
- kDeoptimizationEnabled = 1u << 0,
+ kBailoutOnUninitialized = 1u << 0,
+ kDeoptimizationEnabled = 1u << 1
};
typedef base::Flags<Flag> Flags;
- JSCallReducer(JSGraph* jsgraph, Flags flags,
+ JSCallReducer(Editor* editor, JSGraph* jsgraph, Flags flags,
MaybeHandle<Context> native_context)
- : jsgraph_(jsgraph), flags_(flags), native_context_(native_context) {}
+ : AdvancedReducer(editor),
+ jsgraph_(jsgraph),
+ flags_(flags),
+ native_context_(native_context) {}
Reduction Reduce(Node* node) final;
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index f2c5edd630..b68bb7085d 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -722,16 +722,25 @@ Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateIterResultObject, node->opcode());
Node* value = NodeProperties::GetValueInput(node, 0);
Node* done = NodeProperties::GetValueInput(node, 1);
- Node* context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
- // Load the JSIteratorResult map for the {context}.
- Node* native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
- Node* iterator_result_map = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::ITERATOR_RESULT_MAP_INDEX, true),
- native_context, native_context, effect);
+ Node* iterator_result_map;
+ Handle<Context> native_context;
+ if (GetSpecializationNativeContext(node).ToHandle(&native_context)) {
+ // Specialize to the constant JSIteratorResult map to enable map check
+ // elimination to eliminate subsequent checks in case of inlining.
+ iterator_result_map = jsgraph()->HeapConstant(
+ handle(native_context->iterator_result_map(), isolate()));
+ } else {
+ // Load the JSIteratorResult map for the {context}.
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ iterator_result_map = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::ITERATOR_RESULT_MAP_INDEX, true),
+ native_context, native_context, effect);
+ }
// Emit code to allocate the JSIteratorResult instance.
AllocationBuilder a(jsgraph(), effect, graph()->start());
@@ -815,6 +824,7 @@ Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateWithContext, node->opcode());
+ Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
Node* object = NodeProperties::GetValueInput(node, 0);
Node* closure = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -823,12 +833,20 @@ Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
Node* native_context = effect = graph()->NewNode(
javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
context, context, effect);
- AllocationBuilder a(jsgraph(), effect, control);
+
+ AllocationBuilder aa(jsgraph(), effect, control);
+ aa.Allocate(ContextExtension::kSize);
+ aa.Store(AccessBuilder::ForMap(), factory()->context_extension_map());
+ aa.Store(AccessBuilder::ForContextExtensionScopeInfo(), scope_info);
+ aa.Store(AccessBuilder::ForContextExtensionExtension(), object);
+ Node* extension = aa.Finish();
+
+ AllocationBuilder a(jsgraph(), extension, control);
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
a.AllocateArray(Context::MIN_CONTEXT_SLOTS, factory()->with_context_map());
a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
- a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), object);
+ a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
native_context);
RelaxControls(node);
@@ -838,7 +856,8 @@ Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateCatchContext, node->opcode());
- Handle<String> name = OpParameter<Handle<String>>(node);
+ const CreateCatchContextParameters& parameters =
+ CreateCatchContextParametersOf(node->op());
Node* exception = NodeProperties::GetValueInput(node, 0);
Node* closure = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -847,13 +866,23 @@ Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
Node* native_context = effect = graph()->NewNode(
javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
context, context, effect);
- AllocationBuilder a(jsgraph(), effect, control);
+
+ AllocationBuilder aa(jsgraph(), effect, control);
+ aa.Allocate(ContextExtension::kSize);
+ aa.Store(AccessBuilder::ForMap(), factory()->context_extension_map());
+ aa.Store(AccessBuilder::ForContextExtensionScopeInfo(),
+ parameters.scope_info());
+ aa.Store(AccessBuilder::ForContextExtensionExtension(),
+ parameters.catch_name());
+ Node* extension = aa.Finish();
+
+ AllocationBuilder a(jsgraph(), extension, control);
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
a.AllocateArray(Context::MIN_CONTEXT_SLOTS + 1,
factory()->catch_context_map());
a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
- a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), name);
+ a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
native_context);
a.Store(AccessBuilder::ForContextSlot(Context::THROWN_OBJECT_INDEX),
@@ -1013,10 +1042,17 @@ Node* JSCreateLowering::AllocateElements(Node* effect, Node* control,
ElementAccess access = IsFastDoubleElementsKind(elements_kind)
? AccessBuilder::ForFixedDoubleArrayElement()
: AccessBuilder::ForFixedArrayElement();
- Node* value =
- IsFastDoubleElementsKind(elements_kind)
- ? jsgraph()->Float64Constant(bit_cast<double>(kHoleNanInt64))
- : jsgraph()->TheHoleConstant();
+ Node* value;
+ if (IsFastDoubleElementsKind(elements_kind)) {
+ // Load the hole NaN pattern from the canonical location.
+ value = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForExternalDoubleValue()),
+ jsgraph()->ExternalConstant(
+ ExternalReference::address_of_the_hole_nan()),
+ effect, control);
+ } else {
+ value = jsgraph()->TheHoleConstant();
+ }
// Actually allocate the backing store.
AllocationBuilder a(jsgraph(), effect, control);
@@ -1065,8 +1101,8 @@ Node* JSCreateLowering::AllocateFastLiteral(
boilerplate_map->instance_descriptors()->GetKey(i), isolate());
FieldIndex index = FieldIndex::ForDescriptor(*boilerplate_map, i);
FieldAccess access = {
- kTaggedBase, index.offset(), property_name,
- Type::Tagged(), MachineType::AnyTagged(), kFullWriteBarrier};
+ kTaggedBase, index.offset(), property_name,
+ Type::Any(), MachineType::AnyTagged(), kFullWriteBarrier};
Node* value;
if (boilerplate->IsUnboxedDoubleField(index)) {
access.machine_type = MachineType::Float64();
@@ -1169,18 +1205,18 @@ Node* JSCreateLowering::AllocateFastLiteralElements(
if (elements_map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE) {
Handle<FixedDoubleArray> elements =
Handle<FixedDoubleArray>::cast(boilerplate_elements);
+ Node* the_hole_value = nullptr;
for (int i = 0; i < elements_length; ++i) {
if (elements->is_the_hole(i)) {
- // TODO(turbofan): We cannot currently safely pass thru the (signaling)
- // hole NaN in C++ code, as the C++ compiler on Intel might use FPU
- // instructions/registers for doubles and therefore make the NaN quiet.
- // We should consider passing doubles in the compiler as raw int64
- // values to prevent this.
- elements_values[i] = effect =
- graph()->NewNode(simplified()->LoadElement(
- AccessBuilder::ForFixedDoubleArrayElement()),
- jsgraph()->HeapConstant(elements),
- jsgraph()->Constant(i), effect, control);
+ if (the_hole_value == nullptr) {
+ // Load the hole NaN pattern from the canonical location.
+ the_hole_value = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForExternalDoubleValue()),
+ jsgraph()->ExternalConstant(
+ ExternalReference::address_of_the_hole_nan()),
+ effect, control);
+ }
+ elements_values[i] = the_hole_value;
} else {
elements_values[i] = jsgraph()->Constant(elements->get_scalar(i));
}
@@ -1244,6 +1280,13 @@ MaybeHandle<LiteralsArray> JSCreateLowering::GetSpecializationLiterals(
return MaybeHandle<LiteralsArray>();
}
+MaybeHandle<Context> JSCreateLowering::GetSpecializationNativeContext(
+ Node* node) {
+ Node* const context = NodeProperties::GetContextInput(node);
+ return NodeProperties::GetSpecializationNativeContext(context,
+ native_context_);
+}
+
Factory* JSCreateLowering::factory() const { return isolate()->factory(); }
Graph* JSCreateLowering::graph() const { return jsgraph()->graph(); }
diff --git a/deps/v8/src/compiler/js-create-lowering.h b/deps/v8/src/compiler/js-create-lowering.h
index 2262e66ef2..6248ca2982 100644
--- a/deps/v8/src/compiler/js-create-lowering.h
+++ b/deps/v8/src/compiler/js-create-lowering.h
@@ -31,11 +31,12 @@ class JSCreateLowering final : public AdvancedReducer {
public:
JSCreateLowering(Editor* editor, CompilationDependencies* dependencies,
JSGraph* jsgraph, MaybeHandle<LiteralsArray> literals_array,
- Zone* zone)
+ MaybeHandle<Context> native_context, Zone* zone)
: AdvancedReducer(editor),
dependencies_(dependencies),
jsgraph_(jsgraph),
literals_array_(literals_array),
+ native_context_(native_context),
zone_(zone) {}
~JSCreateLowering() final {}
@@ -76,6 +77,8 @@ class JSCreateLowering final : public AdvancedReducer {
// Infers the LiteralsArray to use for a given {node}.
MaybeHandle<LiteralsArray> GetSpecializationLiterals(Node* node);
+ // Infers the native context to use for a given {node}.
+ MaybeHandle<Context> GetSpecializationNativeContext(Node* node);
Factory* factory() const;
Graph* graph() const;
@@ -91,6 +94,7 @@ class JSCreateLowering final : public AdvancedReducer {
CompilationDependencies* const dependencies_;
JSGraph* const jsgraph_;
MaybeHandle<LiteralsArray> const literals_array_;
+ MaybeHandle<Context> const native_context_;
Zone* const zone_;
};
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 812d3e7bce..22d6c86aa0 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -2,10 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/js-generic-lowering.h"
+
+#include "src/ast/ast.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/compiler/common-operator.h"
-#include "src/compiler/js-generic-lowering.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
@@ -45,13 +47,6 @@ Reduction JSGenericLowering::Reduce(Node* node) {
}
return Changed(node);
}
-#define REPLACE_RUNTIME_CALL(op, fun) \
- void JSGenericLowering::Lower##op(Node* node) { \
- ReplaceWithRuntimeCall(node, fun); \
- }
-REPLACE_RUNTIME_CALL(JSCreateWithContext, Runtime::kPushWithContext)
-REPLACE_RUNTIME_CALL(JSConvertReceiver, Runtime::kConvertReceiver)
-#undef REPLACE_RUNTIME_CALL
#define REPLACE_STUB_CALL(Name) \
void JSGenericLowering::LowerJS##Name(Node* node) { \
@@ -93,8 +88,10 @@ void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
CallDescriptor::Flags flags,
Operator::Properties properties) {
+ const CallInterfaceDescriptor& descriptor = callable.descriptor();
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), 0, flags, properties);
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(), flags,
+ properties);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
node->InsertInput(zone(), 0, stub_code);
NodeProperties::ChangeOp(node, common()->Call(desc));
@@ -346,6 +343,11 @@ void JSGenericLowering::LowerJSInstanceOf(Node* node) {
ReplaceWithStubCall(node, callable, flags);
}
+void JSGenericLowering::LowerJSOrdinaryHasInstance(Node* node) {
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ Callable callable = CodeFactory::OrdinaryHasInstance(isolate());
+ ReplaceWithStubCall(node, callable, flags);
+}
void JSGenericLowering::LowerJSLoadContext(Node* node) {
const ContextAccess& access = ContextAccessOf(node->op());
@@ -513,11 +515,20 @@ void JSGenericLowering::LowerJSCreateLiteralRegExp(Node* node) {
void JSGenericLowering::LowerJSCreateCatchContext(Node* node) {
- Handle<String> name = OpParameter<Handle<String>>(node);
- node->InsertInput(zone(), 0, jsgraph()->HeapConstant(name));
+ const CreateCatchContextParameters& parameters =
+ CreateCatchContextParametersOf(node->op());
+ node->InsertInput(zone(), 0,
+ jsgraph()->HeapConstant(parameters.catch_name()));
+ node->InsertInput(zone(), 2,
+ jsgraph()->HeapConstant(parameters.scope_info()));
ReplaceWithRuntimeCall(node, Runtime::kPushCatchContext);
}
+void JSGenericLowering::LowerJSCreateWithContext(Node* node) {
+ Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(scope_info));
+ ReplaceWithRuntimeCall(node, Runtime::kPushWithContext);
+}
void JSGenericLowering::LowerJSCreateBlockContext(Node* node) {
Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
@@ -577,12 +588,10 @@ void JSGenericLowering::LowerJSCallRuntime(Node* node) {
ReplaceWithRuntimeCall(node, p.id(), static_cast<int>(p.arity()));
}
-
-void JSGenericLowering::LowerJSForInDone(Node* node) {
- ReplaceWithRuntimeCall(node, Runtime::kForInDone);
+void JSGenericLowering::LowerJSConvertReceiver(Node* node) {
+ ReplaceWithRuntimeCall(node, Runtime::kConvertReceiver);
}
-
void JSGenericLowering::LowerJSForInNext(Node* node) {
ReplaceWithRuntimeCall(node, Runtime::kForInNext);
}
@@ -592,12 +601,6 @@ void JSGenericLowering::LowerJSForInPrepare(Node* node) {
ReplaceWithRuntimeCall(node, Runtime::kForInPrepare);
}
-
-void JSGenericLowering::LowerJSForInStep(Node* node) {
- ReplaceWithRuntimeCall(node, Runtime::kForInStep);
-}
-
-
void JSGenericLowering::LowerJSLoadMessage(Node* node) {
ExternalReference message_address =
ExternalReference::address_of_pending_message_obj(isolate());
diff --git a/deps/v8/src/compiler/js-global-object-specialization.cc b/deps/v8/src/compiler/js-global-object-specialization.cc
index 2b4bf1c019..10130f4039 100644
--- a/deps/v8/src/compiler/js-global-object-specialization.cc
+++ b/deps/v8/src/compiler/js-global-object-specialization.cc
@@ -11,9 +11,9 @@
#include "src/compiler/js-operator.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
+#include "src/compiler/type-cache.h"
#include "src/lookup.h"
#include "src/objects-inl.h"
-#include "src/type-cache.h"
namespace v8 {
namespace internal {
@@ -48,6 +48,23 @@ Reduction JSGlobalObjectSpecialization::Reduce(Node* node) {
return NoChange();
}
+namespace {
+
+FieldAccess ForPropertyCellValue(MachineRepresentation representation,
+ Type* type, Handle<Name> name) {
+ WriteBarrierKind kind = kFullWriteBarrier;
+ if (representation == MachineRepresentation::kTaggedSigned) {
+ kind = kNoWriteBarrier;
+ } else if (representation == MachineRepresentation::kTaggedPointer) {
+ kind = kPointerWriteBarrier;
+ }
+ MachineType r = MachineType::TypeForRepresentation(representation);
+ FieldAccess access = {kTaggedBase, PropertyCell::kValueOffset, name, type, r,
+ kind};
+ return access;
+}
+} // namespace
+
Reduction JSGlobalObjectSpecialization::ReduceJSLoadGlobal(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode());
Handle<Name> name = LoadGlobalParametersOf(node->op()).name();
@@ -104,24 +121,31 @@ Reduction JSGlobalObjectSpecialization::ReduceJSLoadGlobal(Node* node) {
}
// Load from constant type cell can benefit from type feedback.
- Type* property_cell_value_type = Type::Tagged();
+ Type* property_cell_value_type = Type::NonInternal();
+ MachineRepresentation representation = MachineRepresentation::kTagged;
if (property_details.cell_type() == PropertyCellType::kConstantType) {
// Compute proper type based on the current value in the cell.
if (property_cell_value->IsSmi()) {
property_cell_value_type = type_cache_.kSmi;
+ representation = MachineRepresentation::kTaggedSigned;
} else if (property_cell_value->IsNumber()) {
+ // TODO(mvstanton): Remove kHeapNumber from type cache, it's just
+ // Type::Number().
property_cell_value_type = type_cache_.kHeapNumber;
+ representation = MachineRepresentation::kTaggedPointer;
} else {
+ // TODO(turbofan): Track the property_cell_value_map on the FieldAccess
+ // below and use it in LoadElimination to eliminate map checks.
Handle<Map> property_cell_value_map(
Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
- property_cell_value_type =
- Type::Class(property_cell_value_map, graph()->zone());
+ property_cell_value_type = Type::For(property_cell_value_map);
+ representation = MachineRepresentation::kTaggedPointer;
}
}
- Node* value = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForPropertyCellValue(property_cell_value_type)),
- jsgraph()->HeapConstant(property_cell), effect, control);
+ Node* value = effect =
+ graph()->NewNode(simplified()->LoadField(ForPropertyCellValue(
+ representation, property_cell_value_type, name)),
+ jsgraph()->HeapConstant(property_cell), effect, control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
@@ -180,6 +204,7 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
// values' type doesn't match the type of the previous value in the cell.
dependencies()->AssumePropertyCell(property_cell);
Type* property_cell_value_type;
+ MachineRepresentation representation = MachineRepresentation::kTagged;
if (property_cell_value->IsHeapObject()) {
// We cannot do anything if the {property_cell_value}s map is no
// longer stable.
@@ -189,23 +214,25 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
dependencies()->AssumeMapStable(property_cell_value_map);
// Check that the {value} is a HeapObject.
- value = effect = graph()->NewNode(simplified()->CheckTaggedPointer(),
+ value = effect = graph()->NewNode(simplified()->CheckHeapObject(),
value, effect, control);
// Check {value} map agains the {property_cell} map.
effect = graph()->NewNode(
simplified()->CheckMaps(1), value,
jsgraph()->HeapConstant(property_cell_value_map), effect, control);
- property_cell_value_type = Type::TaggedPointer();
+ property_cell_value_type = Type::OtherInternal();
+ representation = MachineRepresentation::kTaggedPointer;
} else {
// Check that the {value} is a Smi.
- value = effect = graph()->NewNode(simplified()->CheckTaggedSigned(),
- value, effect, control);
- property_cell_value_type = Type::TaggedSigned();
+ value = effect =
+ graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
+ property_cell_value_type = Type::SignedSmall();
+ representation = MachineRepresentation::kTaggedSigned;
}
effect = graph()->NewNode(
- simplified()->StoreField(
- AccessBuilder::ForPropertyCellValue(property_cell_value_type)),
+ simplified()->StoreField(ForPropertyCellValue(
+ representation, property_cell_value_type, name)),
jsgraph()->HeapConstant(property_cell), value, effect, control);
break;
}
@@ -219,7 +246,8 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
dependencies()->AssumePropertyCell(property_cell);
}
effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForPropertyCellValue()),
+ simplified()->StoreField(ForPropertyCellValue(
+ MachineRepresentation::kTagged, Type::NonInternal(), name)),
jsgraph()->HeapConstant(property_cell), value, effect, control);
break;
}
@@ -251,7 +279,7 @@ bool JSGlobalObjectSpecialization::LookupInScriptContextTable(
Handle<Context> script_context = ScriptContextTable::GetContext(
script_context_table, lookup_result.context_index);
result->context = script_context;
- result->immutable = IsImmutableVariableMode(lookup_result.mode);
+ result->immutable = lookup_result.mode == CONST;
result->index = lookup_result.slot_index;
return true;
}
diff --git a/deps/v8/src/compiler/js-global-object-specialization.h b/deps/v8/src/compiler/js-global-object-specialization.h
index 3ffc67a377..a6c511e9e5 100644
--- a/deps/v8/src/compiler/js-global-object-specialization.h
+++ b/deps/v8/src/compiler/js-global-object-specialization.h
@@ -12,8 +12,6 @@ namespace internal {
// Forward declarations.
class CompilationDependencies;
-class TypeCache;
-
namespace compiler {
@@ -22,7 +20,7 @@ class CommonOperatorBuilder;
class JSGraph;
class JSOperatorBuilder;
class SimplifiedOperatorBuilder;
-
+class TypeCache;
// Specializes a given JSGraph to a given global object, potentially constant
// folding some {JSLoadGlobal} nodes or strength reducing some {JSStoreGlobal}
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index ce7b33ba9f..5c626d15c6 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -4,107 +4,144 @@
#include "src/compiler/js-inlining-heuristic.h"
-#include "src/compiler.h"
+#include "src/compilation-info.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/simplified-operator.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
namespace compiler {
-Reduction JSInliningHeuristic::Reduce(Node* node) {
- if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_turbo_inlining) PrintF(__VA_ARGS__); \
+ } while (false)
- // Check if we already saw that {node} before, and if so, just skip it.
- if (seen_.find(node->id()) != seen_.end()) return NoChange();
- seen_.insert(node->id());
+namespace {
- Node* callee = node->InputAt(0);
- HeapObjectMatcher match(callee);
- if (!match.HasValue() || !match.Value()->IsJSFunction()) return NoChange();
- Handle<JSFunction> function = Handle<JSFunction>::cast(match.Value());
-
- // Functions marked with %SetForceInlineFlag are immediately inlined.
- if (function->shared()->force_inline()) {
- return inliner_.ReduceJSCall(node, function);
+int CollectFunctions(Node* node, Handle<JSFunction>* functions,
+ int functions_size) {
+ DCHECK_NE(0u, functions_size);
+ HeapObjectMatcher m(node);
+ if (m.HasValue() && m.Value()->IsJSFunction()) {
+ functions[0] = Handle<JSFunction>::cast(m.Value());
+ return 1;
}
-
- // Handling of special inlining modes right away:
- // - For restricted inlining: stop all handling at this point.
- // - For stressing inlining: immediately handle all functions.
- switch (mode_) {
- case kRestrictedInlining:
- return NoChange();
- case kStressInlining:
- return inliner_.ReduceJSCall(node, function);
- case kGeneralInlining:
- break;
+ if (m.IsPhi()) {
+ int const value_input_count = m.node()->op()->ValueInputCount();
+ if (value_input_count > functions_size) return 0;
+ for (int n = 0; n < value_input_count; ++n) {
+ HeapObjectMatcher m(node->InputAt(n));
+ if (!m.HasValue() || !m.Value()->IsJSFunction()) return 0;
+ functions[n] = Handle<JSFunction>::cast(m.Value());
+ }
+ return value_input_count;
}
+ return 0;
+}
- // ---------------------------------------------------------------------------
- // Everything below this line is part of the inlining heuristic.
- // ---------------------------------------------------------------------------
-
+bool CanInlineFunction(Handle<JSFunction> function) {
// Built-in functions are handled by the JSBuiltinReducer.
- if (function->shared()->HasBuiltinFunctionId()) return NoChange();
+ if (function->shared()->HasBuiltinFunctionId()) return false;
// Don't inline builtins.
- if (function->shared()->IsBuiltin()) return NoChange();
-
- // Quick check on source code length to avoid parsing large candidate.
- if (function->shared()->SourceSize() > FLAG_max_inlined_source_size) {
- return NoChange();
- }
+ if (function->shared()->IsBuiltin()) return false;
// Quick check on the size of the AST to avoid parsing large candidate.
if (function->shared()->ast_node_count() > FLAG_max_inlined_nodes) {
+ return false;
+ }
+
+ // Avoid inlining across the boundary of asm.js code.
+ if (function->shared()->asm_function()) return false;
+ return true;
+}
+
+} // namespace
+
+Reduction JSInliningHeuristic::Reduce(Node* node) {
+ if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
+
+ // Check if we already saw that {node} before, and if so, just skip it.
+ if (seen_.find(node->id()) != seen_.end()) return NoChange();
+ seen_.insert(node->id());
+
+ // Check if the {node} is an appropriate candidate for inlining.
+ Node* callee = node->InputAt(0);
+ Candidate candidate;
+ candidate.node = node;
+ candidate.num_functions =
+ CollectFunctions(callee, candidate.functions, kMaxCallPolymorphism);
+ if (candidate.num_functions == 0) {
+ return NoChange();
+ } else if (candidate.num_functions > 1 && !FLAG_polymorphic_inlining) {
+ TRACE(
+ "Not considering call site #%d:%s, because polymorphic inlining "
+ "is disabled\n",
+ node->id(), node->op()->mnemonic());
return NoChange();
}
- // Avoid inlining within or across the boundary of asm.js code.
- if (info_->shared_info()->asm_function()) return NoChange();
- if (function->shared()->asm_function()) return NoChange();
+ // Functions marked with %SetForceInlineFlag are immediately inlined.
+ bool can_inline = false, force_inline = true;
+ for (int i = 0; i < candidate.num_functions; ++i) {
+ Handle<JSFunction> function = candidate.functions[i];
+ if (!function->shared()->force_inline()) {
+ force_inline = false;
+ }
+ if (CanInlineFunction(function)) {
+ can_inline = true;
+ }
+ }
+ if (force_inline) return InlineCandidate(candidate);
+ if (!can_inline) return NoChange();
- // Stop inlinining once the maximum allowed level is reached.
+ // Stop inlining once the maximum allowed level is reached.
int level = 0;
for (Node* frame_state = NodeProperties::GetFrameStateInput(node);
frame_state->opcode() == IrOpcode::kFrameState;
frame_state = NodeProperties::GetFrameStateInput(frame_state)) {
- if (++level > FLAG_max_inlining_levels) return NoChange();
+ FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
+ if (FrameStateFunctionInfo::IsJSFunctionType(frame_info.type())) {
+ if (++level > FLAG_max_inlining_levels) {
+ TRACE(
+ "Not considering call site #%d:%s, because inlining depth "
+ "%d exceeds maximum allowed level %d\n",
+ node->id(), node->op()->mnemonic(), level,
+ FLAG_max_inlining_levels);
+ return NoChange();
+ }
+ }
}
// Gather feedback on how often this call site has been hit before.
- int calls = -1; // Same default as CallICNexus::ExtractCallCount.
if (node->opcode() == IrOpcode::kJSCallFunction) {
- CallFunctionParameters p = CallFunctionParametersOf(node->op());
- if (p.feedback().IsValid()) {
- CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
- calls = nexus.ExtractCallCount();
- }
+ CallFunctionParameters const p = CallFunctionParametersOf(node->op());
+ candidate.frequency = p.frequency();
} else {
- DCHECK_EQ(IrOpcode::kJSCallConstruct, node->opcode());
- CallConstructParameters p = CallConstructParametersOf(node->op());
- if (p.feedback().IsValid()) {
- int const extra_index =
- p.feedback().vector()->GetIndex(p.feedback().slot()) + 1;
- Handle<Object> feedback_extra(p.feedback().vector()->get(extra_index),
- function->GetIsolate());
- if (feedback_extra->IsSmi()) {
- calls = Handle<Smi>::cast(feedback_extra)->value();
- }
- }
+ CallConstructParameters const p = CallConstructParametersOf(node->op());
+ candidate.frequency = p.frequency();
}
- // ---------------------------------------------------------------------------
- // Everything above this line is part of the inlining heuristic.
- // ---------------------------------------------------------------------------
+ // Handling of special inlining modes right away:
+ // - For restricted inlining: stop all handling at this point.
+ // - For stressing inlining: immediately handle all functions.
+ switch (mode_) {
+ case kRestrictedInlining:
+ return NoChange();
+ case kStressInlining:
+ return InlineCandidate(candidate);
+ case kGeneralInlining:
+ break;
+ }
// In the general case we remember the candidate for later.
- candidates_.insert({function, node, calls});
+ candidates_.insert(candidate);
return NoChange();
}
-
void JSInliningHeuristic::Finalize() {
if (candidates_.empty()) return; // Nothing to do without candidates.
if (FLAG_trace_turbo_inlining) PrintCandidates();
@@ -120,36 +157,147 @@ void JSInliningHeuristic::Finalize() {
candidates_.erase(i);
// Make sure we don't try to inline dead candidate nodes.
if (!candidate.node->IsDead()) {
- Reduction r = inliner_.ReduceJSCall(candidate.node, candidate.function);
- if (r.Changed()) {
- cumulative_count_ += candidate.function->shared()->ast_node_count();
- return;
- }
+ Reduction const reduction = InlineCandidate(candidate);
+ if (reduction.Changed()) return;
}
}
}
+Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate) {
+ int const num_calls = candidate.num_functions;
+ Node* const node = candidate.node;
+ if (num_calls == 1) {
+ Handle<JSFunction> function = candidate.functions[0];
+ Reduction const reduction = inliner_.ReduceJSCall(node, function);
+ if (reduction.Changed()) {
+ cumulative_count_ += function->shared()->ast_node_count();
+ }
+ return reduction;
+ }
+
+ // Expand the JSCallFunction/JSCallConstruct node to a subgraph first if
+ // we have multiple known target functions.
+ DCHECK_LT(1, num_calls);
+ Node* calls[kMaxCallPolymorphism + 1];
+ Node* if_successes[kMaxCallPolymorphism];
+ Node* callee = NodeProperties::GetValueInput(node, 0);
+ Node* fallthrough_control = NodeProperties::GetControlInput(node);
+
+ // Setup the inputs for the cloned call nodes.
+ int const input_count = node->InputCount();
+ Node** inputs = graph()->zone()->NewArray<Node*>(input_count);
+ for (int i = 0; i < input_count; ++i) {
+ inputs[i] = node->InputAt(i);
+ }
+
+ // Create the appropriate control flow to dispatch to the cloned calls.
+ for (int i = 0; i < num_calls; ++i) {
+ Node* target = jsgraph()->HeapConstant(candidate.functions[i]);
+ if (i != (num_calls - 1)) {
+ Node* check =
+ graph()->NewNode(simplified()->ReferenceEqual(), callee, target);
+ Node* branch =
+ graph()->NewNode(common()->Branch(), check, fallthrough_control);
+ fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+ if_successes[i] = graph()->NewNode(common()->IfTrue(), branch);
+ } else {
+ if_successes[i] = fallthrough_control;
+ }
+
+ // The first input to the call is the actual target (which we specialize
+ // to the known {target}); the last input is the control dependency.
+ inputs[0] = target;
+ inputs[input_count - 1] = if_successes[i];
+ calls[i] = graph()->NewNode(node->op(), input_count, inputs);
+ if_successes[i] = graph()->NewNode(common()->IfSuccess(), calls[i]);
+ }
+
+ // Check if we have an exception projection for the call {node}.
+ Node* if_exception = nullptr;
+ for (Edge const edge : node->use_edges()) {
+ if (NodeProperties::IsControlEdge(edge) &&
+ edge.from()->opcode() == IrOpcode::kIfException) {
+ if_exception = edge.from();
+ break;
+ }
+ }
+ if (if_exception != nullptr) {
+ // Morph the {if_exception} projection into a join.
+ Node* if_exceptions[kMaxCallPolymorphism + 1];
+ for (int i = 0; i < num_calls; ++i) {
+ if_exceptions[i] =
+ graph()->NewNode(common()->IfException(), calls[i], calls[i]);
+ }
+ Node* exception_control =
+ graph()->NewNode(common()->Merge(num_calls), num_calls, if_exceptions);
+ if_exceptions[num_calls] = exception_control;
+ Node* exception_effect = graph()->NewNode(common()->EffectPhi(num_calls),
+ num_calls + 1, if_exceptions);
+ Node* exception_value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, num_calls), num_calls + 1,
+ if_exceptions);
+ ReplaceWithValue(if_exception, exception_value, exception_effect,
+ exception_control);
+ }
+
+ // Morph the call site into the dispatched call sites.
+ Node* control =
+ graph()->NewNode(common()->Merge(num_calls), num_calls, if_successes);
+ calls[num_calls] = control;
+ Node* effect =
+ graph()->NewNode(common()->EffectPhi(num_calls), num_calls + 1, calls);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, num_calls),
+ num_calls + 1, calls);
+ ReplaceWithValue(node, value, effect, control);
+
+ // Inline the individual, cloned call sites.
+ for (int i = 0; i < num_calls; ++i) {
+ Handle<JSFunction> function = candidate.functions[i];
+ Node* node = calls[i];
+ Reduction const reduction = inliner_.ReduceJSCall(node, function);
+ if (reduction.Changed()) {
+ cumulative_count_ += function->shared()->ast_node_count();
+ }
+ }
+
+ return Replace(value);
+}
bool JSInliningHeuristic::CandidateCompare::operator()(
const Candidate& left, const Candidate& right) const {
- if (left.calls != right.calls) {
- return left.calls > right.calls;
+ if (left.frequency > right.frequency) {
+ return true;
+ } else if (left.frequency < right.frequency) {
+ return false;
+ } else {
+ return left.node->id() > right.node->id();
}
- return left.node < right.node;
}
-
void JSInliningHeuristic::PrintCandidates() {
PrintF("Candidates for inlining (size=%zu):\n", candidates_.size());
for (const Candidate& candidate : candidates_) {
- PrintF(" id:%d, calls:%d, size[source]:%d, size[ast]:%d / %s\n",
- candidate.node->id(), candidate.calls,
- candidate.function->shared()->SourceSize(),
- candidate.function->shared()->ast_node_count(),
- candidate.function->shared()->DebugName()->ToCString().get());
+ PrintF(" #%d:%s, frequency:%g\n", candidate.node->id(),
+ candidate.node->op()->mnemonic(), candidate.frequency);
+ for (int i = 0; i < candidate.num_functions; ++i) {
+ Handle<JSFunction> function = candidate.functions[i];
+ PrintF(" - size:%d, name: %s\n", function->shared()->ast_node_count(),
+ function->shared()->DebugName()->ToCString().get());
+ }
}
}
+Graph* JSInliningHeuristic::graph() const { return jsgraph()->graph(); }
+
+CommonOperatorBuilder* JSInliningHeuristic::common() const {
+ return jsgraph()->common();
+}
+
+SimplifiedOperatorBuilder* JSInliningHeuristic::simplified() const {
+ return jsgraph()->simplified();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index 7f577475bf..367e35ad62 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -21,7 +21,7 @@ class JSInliningHeuristic final : public AdvancedReducer {
inliner_(editor, local_zone, info, jsgraph),
candidates_(local_zone),
seen_(local_zone),
- info_(info) {}
+ jsgraph_(jsgraph) {}
Reduction Reduce(Node* node) final;
@@ -30,10 +30,15 @@ class JSInliningHeuristic final : public AdvancedReducer {
void Finalize() final;
private:
+ // This limit currently matches what Crankshaft does. We may want to
+ // re-evaluate and come up with a proper limit for TurboFan.
+ static const int kMaxCallPolymorphism = 4;
+
struct Candidate {
- Handle<JSFunction> function; // The call target being inlined.
- Node* node; // The call site at which to inline.
- int calls; // Number of times the call site was hit.
+ Handle<JSFunction> functions[kMaxCallPolymorphism];
+ int num_functions;
+ Node* node = nullptr; // The call site at which to inline.
+ float frequency = 0.0f; // Relative frequency of this call site.
};
// Comparator for candidates.
@@ -46,12 +51,18 @@ class JSInliningHeuristic final : public AdvancedReducer {
// Dumps candidates to console.
void PrintCandidates();
+ Reduction InlineCandidate(Candidate const& candidate);
+
+ CommonOperatorBuilder* common() const;
+ Graph* graph() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ SimplifiedOperatorBuilder* simplified() const;
Mode const mode_;
JSInliner inliner_;
Candidates candidates_;
ZoneSet<NodeId> seen_;
- CompilationInfo* info_;
+ JSGraph* const jsgraph_;
int cumulative_count_ = 0;
};
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 635daa4d76..58e5a276cc 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -6,10 +6,12 @@
#include "src/ast/ast-numbering.h"
#include "src/ast/ast.h"
-#include "src/ast/scopes.h"
+#include "src/compilation-info.h"
#include "src/compiler.h"
+#include "src/compiler/all-nodes.h"
#include "src/compiler/ast-graph-builder.h"
#include "src/compiler/ast-loop-assignment-analyzer.h"
+#include "src/compiler/bytecode-graph-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-operator.h"
@@ -68,13 +70,20 @@ class JSCallAccessor {
return call_->op()->ValueInputCount() - 2;
}
+ float frequency() const {
+ return (call_->opcode() == IrOpcode::kJSCallFunction)
+ ? CallFunctionParametersOf(call_->op()).frequency()
+ : CallConstructParametersOf(call_->op()).frequency();
+ }
+
private:
Node* call_;
};
-
Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
- Node* frame_state, Node* start, Node* end) {
+ Node* frame_state, Node* start, Node* end,
+ Node* exception_target,
+ const NodeVector& uncaught_subcalls) {
// The scheduler is smart enough to place our code; we just ensure {control}
// becomes the control input of the start of the inlinee, and {effect} becomes
// the effect input of the start of the inlinee.
@@ -131,6 +140,44 @@ Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
}
}
+ if (exception_target != nullptr) {
+ // Link uncaught calls in the inlinee to {exception_target}
+ int subcall_count = static_cast<int>(uncaught_subcalls.size());
+ if (subcall_count > 0) {
+ TRACE(
+ "Inlinee contains %d calls without IfException; "
+ "linking to existing IfException\n",
+ subcall_count);
+ }
+ NodeVector on_exception_nodes(local_zone_);
+ for (Node* subcall : uncaught_subcalls) {
+ Node* on_exception =
+ graph()->NewNode(common()->IfException(), subcall, subcall);
+ on_exception_nodes.push_back(on_exception);
+ }
+
+ DCHECK_EQ(subcall_count, static_cast<int>(on_exception_nodes.size()));
+ if (subcall_count > 0) {
+ Node* control_output =
+ graph()->NewNode(common()->Merge(subcall_count), subcall_count,
+ &on_exception_nodes.front());
+ NodeVector values_effects(local_zone_);
+ values_effects = on_exception_nodes;
+ values_effects.push_back(control_output);
+ Node* value_output = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, subcall_count),
+ subcall_count + 1, &values_effects.front());
+ Node* effect_output =
+ graph()->NewNode(common()->EffectPhi(subcall_count),
+ subcall_count + 1, &values_effects.front());
+ ReplaceWithValue(exception_target, value_output, effect_output,
+ control_output);
+ } else {
+ ReplaceWithValue(exception_target, exception_target, exception_target,
+ jsgraph()->Dead());
+ }
+ }
+
NodeVector values(local_zone_);
NodeVector effects(local_zone_);
NodeVector controls(local_zone_);
@@ -235,6 +282,56 @@ Node* JSInliner::CreateTailCallerFrameState(Node* node, Node* frame_state) {
namespace {
+// TODO(bmeurer): Unify this with the witness helper functions in the
+// js-builtin-reducer.cc once we have a better understanding of the
+// map tracking we want to do, and eventually changed the CheckMaps
+// operator to carry map constants on the operator instead of inputs.
+// I.e. if the CheckMaps has some kind of SmallMapSet as operator
+// parameter, then this could be changed to call a generic
+//
+// SmallMapSet NodeProperties::CollectMapWitness(receiver, effect)
+//
+// function, which either returns the map set from the CheckMaps or
+// a singleton set from a StoreField.
+bool NeedsConvertReceiver(Node* receiver, Node* effect) {
+ for (Node* dominator = effect;;) {
+ if (dominator->opcode() == IrOpcode::kCheckMaps &&
+ dominator->InputAt(0) == receiver) {
+ // Check if all maps have the given {instance_type}.
+ for (int i = 1; i < dominator->op()->ValueInputCount(); ++i) {
+ HeapObjectMatcher m(NodeProperties::GetValueInput(dominator, i));
+ if (!m.HasValue()) return true;
+ Handle<Map> const map = Handle<Map>::cast(m.Value());
+ if (!map->IsJSReceiverMap()) return true;
+ }
+ return false;
+ }
+ switch (dominator->opcode()) {
+ case IrOpcode::kStoreField: {
+ FieldAccess const& access = FieldAccessOf(dominator->op());
+ if (access.base_is_tagged == kTaggedBase &&
+ access.offset == HeapObject::kMapOffset) {
+ return true;
+ }
+ break;
+ }
+ case IrOpcode::kStoreElement:
+ case IrOpcode::kStoreTypedElement:
+ break;
+ default: {
+ DCHECK_EQ(1, dominator->op()->EffectOutputCount());
+ if (dominator->op()->EffectInputCount() != 1 ||
+ !dominator->op()->HasProperty(Operator::kNoWrite)) {
+ // Didn't find any appropriate CheckMaps node.
+ return true;
+ }
+ break;
+ }
+ }
+ dominator = NodeProperties::GetEffectInput(dominator);
+ }
+}
+
// TODO(mstarzinger,verwaest): Move this predicate onto SharedFunctionInfo?
bool NeedsImplicitReceiver(Handle<SharedFunctionInfo> shared_info) {
DisallowHeapAllocation no_gc;
@@ -270,7 +367,6 @@ Reduction JSInliner::Reduce(Node* node) {
return ReduceJSCall(node, function);
}
-
Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
JSCallAccessor call(node);
@@ -344,12 +440,35 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
}
}
- // TODO(turbofan): Inlining into a try-block is not yet supported.
- if (NodeProperties::IsExceptionalCall(node)) {
- TRACE("Not inlining %s into %s because of surrounding try-block\n",
+ // Find the IfException node, if any.
+ Node* exception_target = nullptr;
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsControlEdge(edge) &&
+ edge.from()->opcode() == IrOpcode::kIfException) {
+ DCHECK_NULL(exception_target);
+ exception_target = edge.from();
+ }
+ }
+
+ NodeVector uncaught_subcalls(local_zone_);
+
+ if (exception_target != nullptr) {
+ if (!FLAG_inline_into_try) {
+ TRACE(
+ "Try block surrounds #%d:%s and --no-inline-into-try active, so not "
+ "inlining %s into %s.\n",
+ exception_target->id(), exception_target->op()->mnemonic(),
shared_info->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
- return NoChange();
+ return NoChange();
+ } else {
+ TRACE(
+ "Inlining %s into %s regardless of surrounding try-block to catcher "
+ "#%d:%s\n",
+ shared_info->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get(),
+ exception_target->id(), exception_target->op()->mnemonic());
+ }
}
Zone zone(info_->isolate()->allocator());
@@ -357,8 +476,20 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
CompilationInfo info(&parse_info, function);
if (info_->is_deoptimization_enabled()) info.MarkAsDeoptimizationEnabled();
if (info_->is_type_feedback_enabled()) info.MarkAsTypeFeedbackEnabled();
+ if (info_->is_optimizing_from_bytecode()) info.MarkAsOptimizeFromBytecode();
+
+ if (info.is_optimizing_from_bytecode() && !Compiler::EnsureBytecode(&info)) {
+ TRACE("Not inlining %s into %s because bytecode generation failed\n",
+ shared_info->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ if (info_->isolate()->has_pending_exception()) {
+ info_->isolate()->clear_pending_exception();
+ }
+ return NoChange();
+ }
- if (!Compiler::ParseAndAnalyze(info.parse_info())) {
+ if (!info.is_optimizing_from_bytecode() &&
+ !Compiler::ParseAndAnalyze(info.parse_info())) {
TRACE("Not inlining %s into %s because parsing failed\n",
shared_info->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
@@ -368,7 +499,8 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
return NoChange();
}
- if (!Compiler::EnsureDeoptimizationSupport(&info)) {
+ if (!info.is_optimizing_from_bytecode() &&
+ !Compiler::EnsureDeoptimizationSupport(&info)) {
TRACE("Not inlining %s into %s because deoptimization support failed\n",
shared_info->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
@@ -388,13 +520,23 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
shared_info->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
- // If function was lazily compiled, it's literals array may not yet be set up.
+ // If function was lazily compiled, its literals array may not yet be set up.
JSFunction::EnsureLiterals(function);
// Create the subgraph for the inlinee.
Node* start;
Node* end;
- {
+ if (info.is_optimizing_from_bytecode()) {
+ // Run the BytecodeGraphBuilder to create the subgraph.
+ Graph::SubgraphScope scope(graph());
+ BytecodeGraphBuilder graph_builder(&zone, &info, jsgraph(),
+ call.frequency());
+ graph_builder.CreateGraph();
+
+ // Extract the inlinee start/end nodes.
+ start = graph()->start();
+ end = graph()->end();
+ } else {
// Run the loop assignment analyzer on the inlinee.
AstLoopAssignmentAnalyzer loop_assignment_analyzer(&zone, &info);
LoopAssignmentAnalysis* loop_assignment =
@@ -407,8 +549,8 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
// Run the AstGraphBuilder to create the subgraph.
Graph::SubgraphScope scope(graph());
- AstGraphBuilder graph_builder(&zone, &info, jsgraph(), loop_assignment,
- type_hint_analysis);
+ AstGraphBuilder graph_builder(&zone, &info, jsgraph(), call.frequency(),
+ loop_assignment, type_hint_analysis);
graph_builder.CreateGraph(false);
// Extract the inlinee start/end nodes.
@@ -416,6 +558,29 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
end = graph()->end();
}
+ if (exception_target != nullptr) {
+ // Find all uncaught 'calls' in the inlinee.
+ AllNodes inlined_nodes(local_zone_, end, graph());
+ for (Node* subnode : inlined_nodes.reachable) {
+ // Every possibly throwing node with an IfSuccess should get an
+ // IfException.
+ if (subnode->op()->HasProperty(Operator::kNoThrow)) {
+ continue;
+ }
+ bool hasIfException = false;
+ for (Node* use : subnode->uses()) {
+ if (use->opcode() == IrOpcode::kIfException) {
+ hasIfException = true;
+ break;
+ }
+ }
+ if (!hasIfException) {
+ DCHECK_EQ(2, subnode->op()->ControlOutputCount());
+ uncaught_subcalls.push_back(subnode);
+ }
+ }
+ }
+
Node* frame_state = call.frame_state();
Node* new_target = jsgraph()->UndefinedConstant();
@@ -475,15 +640,17 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
// in that frame state tho, as the conversion of the receiver can be repeated
// any number of times, it's not observable.
if (node->opcode() == IrOpcode::kJSCallFunction &&
- is_sloppy(parse_info.language_mode()) && !shared_info->native()) {
- const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
- Node* frame_state_before = NodeProperties::FindFrameStateBefore(node);
+ is_sloppy(shared_info->language_mode()) && !shared_info->native()) {
Node* effect = NodeProperties::GetEffectInput(node);
- Node* convert = graph()->NewNode(
- javascript()->ConvertReceiver(p.convert_mode()), call.receiver(),
- context, frame_state_before, effect, start);
- NodeProperties::ReplaceValueInput(node, convert, 1);
- NodeProperties::ReplaceEffectInput(node, convert);
+ if (NeedsConvertReceiver(call.receiver(), effect)) {
+ const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
+ Node* frame_state_before = NodeProperties::FindFrameStateBefore(node);
+ Node* convert = effect = graph()->NewNode(
+ javascript()->ConvertReceiver(p.convert_mode()), call.receiver(),
+ context, frame_state_before, effect, start);
+ NodeProperties::ReplaceValueInput(node, convert, 1);
+ NodeProperties::ReplaceEffectInput(node, effect);
+ }
}
// If we are inlining a JS call at tail position then we have to pop current
@@ -504,7 +671,7 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
// count (i.e. value outputs of start node minus target, receiver, new target,
// arguments count and context) have to match the number of arguments passed
// to the call.
- int parameter_count = info.literal()->parameter_count();
+ int parameter_count = shared_info->internal_formal_parameter_count();
DCHECK_EQ(parameter_count, start->op()->ValueOutputCount() - 5);
if (call.formal_arguments() != parameter_count) {
frame_state = CreateArtificialFrameState(
@@ -512,7 +679,8 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
FrameStateType::kArgumentsAdaptor, shared_info);
}
- return InlineCall(node, new_target, context, frame_state, start, end);
+ return InlineCall(node, new_target, context, frame_state, start, end,
+ exception_target, uncaught_subcalls);
}
Graph* JSInliner::graph() const { return jsgraph()->graph(); }
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index 49487f5a0a..323c3ae0bf 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -54,7 +54,9 @@ class JSInliner final : public AdvancedReducer {
Node* CreateTailCallerFrameState(Node* node, Node* outer_frame_state);
Reduction InlineCall(Node* call, Node* new_target, Node* context,
- Node* frame_state, Node* start, Node* end);
+ Node* frame_state, Node* start, Node* end,
+ Node* exception_target,
+ const NodeVector& uncaught_subcalls);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index 3324508559..7fc50e5f5f 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -302,10 +302,10 @@ Reduction JSIntrinsicLowering::ReduceToString(Node* node) {
Reduction JSIntrinsicLowering::ReduceCall(Node* node) {
size_t const arity = CallRuntimeParametersOf(node->op()).arity();
- NodeProperties::ChangeOp(node,
- javascript()->CallFunction(arity, VectorSlotPair(),
- ConvertReceiverMode::kAny,
- TailCallMode::kDisallow));
+ NodeProperties::ChangeOp(
+ node, javascript()->CallFunction(arity, 0.0f, VectorSlotPair(),
+ ConvertReceiverMode::kAny,
+ TailCallMode::kDisallow));
return Changed(node);
}
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index b76744e199..ab20d93ebe 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -13,9 +13,9 @@
#include "src/compiler/js-operator.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/type-cache.h"
#include "src/field-index-inl.h"
#include "src/isolate-inl.h"
-#include "src/type-cache.h"
#include "src/type-feedback-vector.h"
namespace v8 {
@@ -70,6 +70,8 @@ JSNativeContextSpecialization::JSNativeContextSpecialization(
Reduction JSNativeContextSpecialization::Reduce(Node* node) {
switch (node->opcode()) {
+ case IrOpcode::kJSInstanceOf:
+ return ReduceJSInstanceOf(node);
case IrOpcode::kJSLoadContext:
return ReduceJSLoadContext(node);
case IrOpcode::kJSLoadNamed:
@@ -86,6 +88,99 @@ Reduction JSNativeContextSpecialization::Reduce(Node* node) {
return NoChange();
}
+Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSInstanceOf, node->opcode());
+ Node* object = NodeProperties::GetValueInput(node, 0);
+ Node* constructor = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Retrieve the native context from the given {node}.
+ Handle<Context> native_context;
+ if (!GetNativeContext(node).ToHandle(&native_context)) return NoChange();
+
+ // If deoptimization is disabled, we cannot optimize.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+ // Check if the right hand side is a known {receiver}.
+ HeapObjectMatcher m(constructor);
+ if (!m.HasValue() || !m.Value()->IsJSObject()) return NoChange();
+ Handle<JSObject> receiver = Handle<JSObject>::cast(m.Value());
+ Handle<Map> receiver_map(receiver->map(), isolate());
+
+ // Compute property access info for @@hasInstance on {receiver}.
+ PropertyAccessInfo access_info;
+ AccessInfoFactory access_info_factory(dependencies(), native_context,
+ graph()->zone());
+ if (!access_info_factory.ComputePropertyAccessInfo(
+ receiver_map, factory()->has_instance_symbol(), AccessMode::kLoad,
+ &access_info)) {
+ return NoChange();
+ }
+
+ if (access_info.IsNotFound()) {
+ // If there's no @@hasInstance handler, the OrdinaryHasInstance operation
+ // takes over, but that requires the {receiver} to be callable.
+ if (receiver->IsCallable()) {
+ // Determine actual holder and perform prototype chain checks.
+ Handle<JSObject> holder;
+ if (access_info.holder().ToHandle(&holder)) {
+ AssumePrototypesStable(access_info.receiver_maps(), native_context,
+ holder);
+ }
+
+ // Monomorphic property access.
+ effect =
+ BuildCheckMaps(constructor, effect, control, MapList{receiver_map});
+
+ // Lower to OrdinaryHasInstance(C, O).
+ NodeProperties::ReplaceValueInput(node, constructor, 0);
+ NodeProperties::ReplaceValueInput(node, object, 1);
+ NodeProperties::ReplaceEffectInput(node, effect);
+ NodeProperties::ChangeOp(node, javascript()->OrdinaryHasInstance());
+ return Changed(node);
+ }
+ } else if (access_info.IsDataConstant()) {
+ DCHECK(access_info.constant()->IsCallable());
+
+ // Determine actual holder and perform prototype chain checks.
+ Handle<JSObject> holder;
+ if (access_info.holder().ToHandle(&holder)) {
+ AssumePrototypesStable(access_info.receiver_maps(), native_context,
+ holder);
+ }
+
+ // Monomorphic property access.
+ effect =
+ BuildCheckMaps(constructor, effect, control, MapList{receiver_map});
+
+ // Call the @@hasInstance handler.
+ Node* target = jsgraph()->Constant(access_info.constant());
+ node->InsertInput(graph()->zone(), 0, target);
+ node->ReplaceInput(1, constructor);
+ node->ReplaceInput(2, object);
+ node->ReplaceInput(5, effect);
+ NodeProperties::ChangeOp(
+ node,
+ javascript()->CallFunction(3, 0.0f, VectorSlotPair(),
+ ConvertReceiverMode::kNotNullOrUndefined));
+
+ // Rewire the value uses of {node} to ToBoolean conversion of the result.
+ Node* value = graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
+ node, context);
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsValueEdge(edge) && edge.from() != value) {
+ edge.UpdateTo(value);
+ Revisit(edge.from());
+ }
+ }
+ return Changed(node);
+ }
+
+ return NoChange();
+}
+
Reduction JSNativeContextSpecialization::ReduceJSLoadContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
ContextAccess const& access = ContextAccessOf(node->op());
@@ -168,7 +263,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
receiver, effect, control);
} else {
// Monomorphic property access.
- effect = BuildCheckTaggedPointer(receiver, effect, control);
+ effect = BuildCheckHeapObject(receiver, effect, control);
effect = BuildCheckMaps(receiver, effect, control,
access_info.receiver_maps());
}
@@ -206,7 +301,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
receiverissmi_effect = effect;
} else {
- effect = BuildCheckTaggedPointer(receiver, effect, control);
+ effect = BuildCheckHeapObject(receiver, effect, control);
}
// Load the {receiver} map. The resulting effect is the dominating effect
@@ -510,7 +605,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
// Ensure that {receiver} is a heap object.
- effect = BuildCheckTaggedPointer(receiver, effect, control);
+ effect = BuildCheckHeapObject(receiver, effect, control);
// Check for the monomorphic case.
if (access_infos.size() == 1) {
@@ -818,13 +913,14 @@ JSNativeContextSpecialization::BuildPropertyAccess(
DCHECK_EQ(AccessMode::kLoad, access_mode);
value = jsgraph()->UndefinedConstant();
} else if (access_info.IsDataConstant()) {
- value = jsgraph()->Constant(access_info.constant());
+ Node* constant_value = jsgraph()->Constant(access_info.constant());
if (access_mode == AccessMode::kStore) {
- Node* check =
- graph()->NewNode(simplified()->ReferenceEqual(), value, value);
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), value,
+ constant_value);
effect =
graph()->NewNode(simplified()->CheckIf(), check, effect, control);
}
+ value = constant_value;
} else if (access_info.IsAccessorConstant()) {
// TODO(bmeurer): Properly rewire the IfException edge here if there's any.
Node* target = jsgraph()->Constant(access_info.constant());
@@ -849,7 +945,8 @@ JSNativeContextSpecialization::BuildPropertyAccess(
// Introduce the call to the getter function.
value = effect = graph()->NewNode(
javascript()->CallFunction(
- 2, VectorSlotPair(), ConvertReceiverMode::kNotNullOrUndefined),
+ 2, 0.0f, VectorSlotPair(),
+ ConvertReceiverMode::kNotNullOrUndefined),
target, receiver, context, frame_state0, effect, control);
control = graph()->NewNode(common()->IfSuccess(), value);
break;
@@ -869,10 +966,11 @@ JSNativeContextSpecialization::BuildPropertyAccess(
context, target, frame_state);
// Introduce the call to the setter function.
- effect = graph()->NewNode(
- javascript()->CallFunction(
- 3, VectorSlotPair(), ConvertReceiverMode::kNotNullOrUndefined),
- target, receiver, value, context, frame_state0, effect, control);
+ effect = graph()->NewNode(javascript()->CallFunction(
+ 3, 0.0f, VectorSlotPair(),
+ ConvertReceiverMode::kNotNullOrUndefined),
+ target, receiver, value, context,
+ frame_state0, effect, control);
control = graph()->NewNode(common()->IfSuccess(), effect);
break;
}
@@ -881,9 +979,25 @@ JSNativeContextSpecialization::BuildPropertyAccess(
DCHECK(access_info.IsDataField());
FieldIndex const field_index = access_info.field_index();
Type* const field_type = access_info.field_type();
- if (access_mode == AccessMode::kLoad &&
- access_info.holder().ToHandle(&holder)) {
- receiver = jsgraph()->Constant(holder);
+ MachineRepresentation const field_representation =
+ access_info.field_representation();
+ if (access_mode == AccessMode::kLoad) {
+ if (access_info.holder().ToHandle(&holder)) {
+ receiver = jsgraph()->Constant(holder);
+ }
+ // Optimize immutable property loads.
+ HeapObjectMatcher m(receiver);
+ if (m.HasValue() && m.Value()->IsJSObject()) {
+ // TODO(turbofan): Given that we already have the field_index here, we
+ // might be smarter in the future and not rely on the LookupIterator,
+ // but for now let's just do what Crankshaft does.
+ LookupIterator it(m.Value(), name,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ if (it.IsFound() && it.IsReadOnly() && !it.IsConfigurable()) {
+ Node* value = jsgraph()->Constant(JSReceiver::GetDataProperty(&it));
+ return ValueEffectControl(value, effect, control);
+ }
+ }
}
Node* storage = receiver;
if (!field_index.is_inobject()) {
@@ -892,89 +1006,112 @@ JSNativeContextSpecialization::BuildPropertyAccess(
storage, effect, control);
}
FieldAccess field_access = {
- kTaggedBase, field_index.offset(), name,
- field_type, MachineType::AnyTagged(), kFullWriteBarrier};
+ kTaggedBase,
+ field_index.offset(),
+ name,
+ field_type,
+ MachineType::TypeForRepresentation(field_representation),
+ kFullWriteBarrier};
if (access_mode == AccessMode::kLoad) {
- if (field_type->Is(Type::UntaggedFloat64())) {
- // TODO(turbofan): We remove the representation axis from the type to
- // avoid uninhabited representation types. This is a workaround until
- // the {PropertyAccessInfo} is using {MachineRepresentation} instead.
- field_access.type = Type::Union(
- field_type, Type::Representation(Type::Number(), zone()), zone());
+ if (field_representation == MachineRepresentation::kFloat64) {
if (!field_index.is_inobject() || field_index.is_hidden_field() ||
!FLAG_unbox_double_fields) {
- storage = effect = graph()->NewNode(
- simplified()->LoadField(field_access), storage, effect, control);
+ FieldAccess const storage_access = {kTaggedBase,
+ field_index.offset(),
+ name,
+ Type::OtherInternal(),
+ MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ storage = effect =
+ graph()->NewNode(simplified()->LoadField(storage_access), storage,
+ effect, control);
field_access.offset = HeapNumber::kValueOffset;
field_access.name = MaybeHandle<Name>();
}
- field_access.machine_type = MachineType::Float64();
}
+ // TODO(turbofan): Track the field_map (if any) on the {field_access} and
+ // use it in LoadElimination to eliminate map checks.
value = effect = graph()->NewNode(simplified()->LoadField(field_access),
storage, effect, control);
} else {
DCHECK_EQ(AccessMode::kStore, access_mode);
- if (field_type->Is(Type::UntaggedFloat64())) {
- // TODO(turbofan): We remove the representation axis from the type to
- // avoid uninhabited representation types. This is a workaround until
- // the {PropertyAccessInfo} is using {MachineRepresentation} instead.
- field_access.type = Type::Union(
- field_type, Type::Representation(Type::Number(), zone()), zone());
- value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
- effect, control);
-
- if (!field_index.is_inobject() || field_index.is_hidden_field() ||
- !FLAG_unbox_double_fields) {
- if (access_info.HasTransitionMap()) {
- // Allocate a MutableHeapNumber for the new property.
- effect = graph()->NewNode(
- common()->BeginRegion(RegionObservability::kNotObservable),
- effect);
- Node* box = effect = graph()->NewNode(
- simplified()->Allocate(NOT_TENURED),
- jsgraph()->Constant(HeapNumber::kSize), effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForMap()), box,
- jsgraph()->HeapConstant(factory()->mutable_heap_number_map()),
- effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
- box, value, effect, control);
- value = effect =
- graph()->NewNode(common()->FinishRegion(), box, effect);
-
- field_access.type = Type::TaggedPointer();
- } else {
- // We just store directly to the MutableHeapNumber.
- storage = effect =
- graph()->NewNode(simplified()->LoadField(field_access), storage,
- effect, control);
- field_access.offset = HeapNumber::kValueOffset;
- field_access.name = MaybeHandle<Name>();
- field_access.machine_type = MachineType::Float64();
+ switch (field_representation) {
+ case MachineRepresentation::kFloat64: {
+ value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
+ effect, control);
+ if (!field_index.is_inobject() || field_index.is_hidden_field() ||
+ !FLAG_unbox_double_fields) {
+ if (access_info.HasTransitionMap()) {
+ // Allocate a MutableHeapNumber for the new property.
+ effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable),
+ effect);
+ Node* box = effect = graph()->NewNode(
+ simplified()->Allocate(NOT_TENURED),
+ jsgraph()->Constant(HeapNumber::kSize), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), box,
+ jsgraph()->HeapConstant(factory()->mutable_heap_number_map()),
+ effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
+ box, value, effect, control);
+ value = effect =
+ graph()->NewNode(common()->FinishRegion(), box, effect);
+
+ field_access.type = Type::Any();
+ field_access.machine_type = MachineType::TaggedPointer();
+ field_access.write_barrier_kind = kPointerWriteBarrier;
+ } else {
+ // We just store directly to the MutableHeapNumber.
+ FieldAccess const storage_access = {kTaggedBase,
+ field_index.offset(),
+ name,
+ Type::OtherInternal(),
+ MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ storage = effect =
+ graph()->NewNode(simplified()->LoadField(storage_access),
+ storage, effect, control);
+ field_access.offset = HeapNumber::kValueOffset;
+ field_access.name = MaybeHandle<Name>();
+ field_access.machine_type = MachineType::Float64();
+ }
}
- } else {
- // Unboxed double field, we store directly to the field.
- field_access.machine_type = MachineType::Float64();
+ break;
}
- } else if (field_type->Is(Type::TaggedSigned())) {
- value = effect = graph()->NewNode(simplified()->CheckTaggedSigned(),
- value, effect, control);
- } else if (field_type->Is(Type::TaggedPointer())) {
- // Ensure that {value} is a HeapObject.
- value = effect = graph()->NewNode(simplified()->CheckTaggedPointer(),
- value, effect, control);
- if (field_type->NumClasses() == 1) {
- // Emit a map check for the value.
- Node* field_map =
- jsgraph()->Constant(field_type->Classes().Current());
- effect = graph()->NewNode(simplified()->CheckMaps(1), value,
- field_map, effect, control);
- } else {
- DCHECK_EQ(0, field_type->NumClasses());
+ case MachineRepresentation::kTaggedSigned: {
+ value = effect = graph()->NewNode(simplified()->CheckSmi(), value,
+ effect, control);
+ field_access.write_barrier_kind = kNoWriteBarrier;
+ break;
}
- } else {
- DCHECK(field_type->Is(Type::Tagged()));
+ case MachineRepresentation::kTaggedPointer: {
+ // Ensure that {value} is a HeapObject.
+ value = effect = graph()->NewNode(simplified()->CheckHeapObject(),
+ value, effect, control);
+ Handle<Map> field_map;
+ if (access_info.field_map().ToHandle(&field_map)) {
+ // Emit a map check for the value.
+ effect = graph()->NewNode(simplified()->CheckMaps(1), value,
+ jsgraph()->HeapConstant(field_map),
+ effect, control);
+ }
+ field_access.write_barrier_kind = kPointerWriteBarrier;
+ break;
+ }
+ case MachineRepresentation::kTagged:
+ break;
+ case MachineRepresentation::kNone:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kWord64:
+ case MachineRepresentation::kFloat32:
+ case MachineRepresentation::kSimd128:
+ UNREACHABLE();
+ break;
}
Handle<Map> transition_map;
if (access_info.transition_map().ToHandle(&transition_map)) {
@@ -1048,20 +1185,13 @@ JSNativeContextSpecialization::BuildElementAccess(
Node* buffer = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
receiver, effect, control);
- Node* buffer_bitfield = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()),
- buffer, effect, control);
- Node* check = graph()->NewNode(
- simplified()->NumberEqual(),
- graph()->NewNode(
- simplified()->NumberBitwiseAnd(), buffer_bitfield,
- jsgraph()->Constant(JSArrayBuffer::WasNeutered::kMask)),
- jsgraph()->ZeroConstant());
+ Node* check = effect = graph()->NewNode(
+ simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
// Default to zero if the {receiver}s buffer was neutered.
length = graph()->NewNode(
- common()->Select(MachineRepresentation::kTagged, BranchHint::kTrue),
- check, length, jsgraph()->ZeroConstant());
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+ check, jsgraph()->ZeroConstant(), length);
if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
// Check that the {index} is a valid array index, we do the actual
@@ -1175,6 +1305,7 @@ JSNativeContextSpecialization::BuildElementAccess(
element_machine_type = MachineType::Float64();
} else if (IsFastSmiElementsKind(elements_kind)) {
element_type = type_cache_.kSmi;
+ element_machine_type = MachineType::TaggedSigned();
}
ElementAccess element_access = {kTaggedBase, FixedArray::kHeaderSize,
element_type, element_machine_type,
@@ -1188,6 +1319,7 @@ JSNativeContextSpecialization::BuildElementAccess(
elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
element_access.type =
Type::Union(element_type, Type::Hole(), graph()->zone());
+ element_access.machine_type = MachineType::AnyTagged();
}
// Perform the actual backing store access.
value = effect =
@@ -1221,8 +1353,8 @@ JSNativeContextSpecialization::BuildElementAccess(
} else {
DCHECK_EQ(AccessMode::kStore, access_mode);
if (IsFastSmiElementsKind(elements_kind)) {
- value = effect = graph()->NewNode(simplified()->CheckTaggedSigned(),
- value, effect, control);
+ value = effect =
+ graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
} else if (IsFastDoubleElementsKind(elements_kind)) {
value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
effect, control);
@@ -1293,9 +1425,9 @@ Node* JSNativeContextSpecialization::BuildCheckMaps(
inputs);
}
-Node* JSNativeContextSpecialization::BuildCheckTaggedPointer(Node* receiver,
- Node* effect,
- Node* control) {
+Node* JSNativeContextSpecialization::BuildCheckHeapObject(Node* receiver,
+ Node* effect,
+ Node* control) {
switch (receiver->opcode()) {
case IrOpcode::kHeapConstant:
case IrOpcode::kJSCreate:
@@ -1314,8 +1446,8 @@ Node* JSNativeContextSpecialization::BuildCheckTaggedPointer(Node* receiver,
return effect;
}
default: {
- return graph()->NewNode(simplified()->CheckTaggedPointer(), receiver,
- effect, control);
+ return graph()->NewNode(simplified()->CheckHeapObject(), receiver, effect,
+ control);
}
}
}
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 549dc93575..c015de08e7 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -16,8 +16,6 @@ namespace internal {
class CompilationDependencies;
class Factory;
class FeedbackNexus;
-class TypeCache;
-
namespace compiler {
@@ -30,7 +28,7 @@ class JSOperatorBuilder;
class MachineOperatorBuilder;
class PropertyAccessInfo;
class SimplifiedOperatorBuilder;
-
+class TypeCache;
// Specializes a given JSGraph to a given native context, potentially constant
// folding some {LoadGlobal} nodes or strength reducing some {StoreGlobal}
@@ -55,6 +53,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Reduction Reduce(Node* node) final;
private:
+ Reduction ReduceJSInstanceOf(Node* node);
Reduction ReduceJSLoadContext(Node* node);
Reduction ReduceJSLoadNamed(Node* node);
Reduction ReduceJSStoreNamed(Node* node);
@@ -120,7 +119,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
std::vector<Handle<Map>> const& maps);
// Construct an appropriate heap object check.
- Node* BuildCheckTaggedPointer(Node* receiver, Node* effect, Node* control);
+ Node* BuildCheckHeapObject(Node* receiver, Node* effect, Node* control);
// Adds stability dependencies on all prototypes of every class in
// {receiver_type} up to (and including) the {holder}.
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index d19bb767b4..21e905aee6 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -54,7 +54,8 @@ ToBooleanHints ToBooleanHintsOf(Operator const* op) {
bool operator==(CallConstructParameters const& lhs,
CallConstructParameters const& rhs) {
- return lhs.arity() == rhs.arity() && lhs.feedback() == rhs.feedback();
+ return lhs.arity() == rhs.arity() && lhs.frequency() == rhs.frequency() &&
+ lhs.feedback() == rhs.feedback();
}
@@ -65,12 +66,12 @@ bool operator!=(CallConstructParameters const& lhs,
size_t hash_value(CallConstructParameters const& p) {
- return base::hash_combine(p.arity(), p.feedback());
+ return base::hash_combine(p.arity(), p.frequency(), p.feedback());
}
std::ostream& operator<<(std::ostream& os, CallConstructParameters const& p) {
- return os << p.arity();
+ return os << p.arity() << ", " << p.frequency();
}
@@ -81,7 +82,8 @@ CallConstructParameters const& CallConstructParametersOf(Operator const* op) {
std::ostream& operator<<(std::ostream& os, CallFunctionParameters const& p) {
- os << p.arity() << ", " << p.convert_mode() << ", " << p.tail_call_mode();
+ os << p.arity() << ", " << p.frequency() << ", " << p.convert_mode() << ", "
+ << p.tail_call_mode();
return os;
}
@@ -157,6 +159,37 @@ ContextAccess const& ContextAccessOf(Operator const* op) {
return OpParameter<ContextAccess>(op);
}
+CreateCatchContextParameters::CreateCatchContextParameters(
+ Handle<String> catch_name, Handle<ScopeInfo> scope_info)
+ : catch_name_(catch_name), scope_info_(scope_info) {}
+
+bool operator==(CreateCatchContextParameters const& lhs,
+ CreateCatchContextParameters const& rhs) {
+ return lhs.catch_name().location() == rhs.catch_name().location() &&
+ lhs.scope_info().location() == rhs.scope_info().location();
+}
+
+bool operator!=(CreateCatchContextParameters const& lhs,
+ CreateCatchContextParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(CreateCatchContextParameters const& parameters) {
+ return base::hash_combine(parameters.catch_name().location(),
+ parameters.scope_info().location());
+}
+
+std::ostream& operator<<(std::ostream& os,
+ CreateCatchContextParameters const& parameters) {
+ return os << Brief(*parameters.catch_name()) << ", "
+ << Brief(*parameters.scope_info());
+}
+
+CreateCatchContextParameters const& CreateCatchContextParametersOf(
+ Operator const* op) {
+ DCHECK_EQ(IrOpcode::kJSCreateCatchContext, op->opcode());
+ return OpParameter<CreateCatchContextParameters>(op);
+}
bool operator==(NamedAccess const& lhs, NamedAccess const& rhs) {
return lhs.name().location() == rhs.name().location() &&
@@ -376,7 +409,7 @@ const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op) {
return OpParameter<CreateLiteralParameters>(op);
}
-const BinaryOperationHint BinaryOperationHintOf(const Operator* op) {
+BinaryOperationHint BinaryOperationHintOf(const Operator* op) {
DCHECK(op->opcode() == IrOpcode::kJSBitwiseOr ||
op->opcode() == IrOpcode::kJSBitwiseXor ||
op->opcode() == IrOpcode::kJSBitwiseAnd ||
@@ -391,7 +424,7 @@ const BinaryOperationHint BinaryOperationHintOf(const Operator* op) {
return OpParameter<BinaryOperationHint>(op);
}
-const CompareOperationHint CompareOperationHintOf(const Operator* op) {
+CompareOperationHint CompareOperationHintOf(const Operator* op) {
DCHECK(op->opcode() == IrOpcode::kJSEqual ||
op->opcode() == IrOpcode::kJSNotEqual ||
op->opcode() == IrOpcode::kJSStrictEqual ||
@@ -415,15 +448,13 @@ const CompareOperationHint CompareOperationHintOf(const Operator* op) {
V(HasProperty, Operator::kNoProperties, 2, 1) \
V(TypeOf, Operator::kPure, 1, 1) \
V(InstanceOf, Operator::kNoProperties, 2, 1) \
- V(ForInDone, Operator::kPure, 2, 1) \
+ V(OrdinaryHasInstance, Operator::kNoProperties, 2, 1) \
V(ForInNext, Operator::kNoProperties, 4, 1) \
V(ForInPrepare, Operator::kNoProperties, 1, 3) \
- V(ForInStep, Operator::kPure, 1, 1) \
V(LoadMessage, Operator::kNoThrow, 0, 1) \
V(StoreMessage, Operator::kNoThrow, 1, 0) \
V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
- V(StackCheck, Operator::kNoWrite, 0, 0) \
- V(CreateWithContext, Operator::kNoProperties, 2, 1)
+ V(StackCheck, Operator::kNoWrite, 0, 0)
#define BINARY_OP_LIST(V) \
V(BitwiseOr) \
@@ -476,6 +507,7 @@ struct JSOperatorGlobalCache final {
Name##Operator<BinaryOperationHint::kSigned32> k##Name##Signed32Operator; \
Name##Operator<BinaryOperationHint::kNumberOrOddball> \
k##Name##NumberOrOddballOperator; \
+ Name##Operator<BinaryOperationHint::kString> k##Name##StringOperator; \
Name##Operator<BinaryOperationHint::kAny> k##Name##AnyOperator;
BINARY_OP_LIST(BINARY_OP)
#undef BINARY_OP
@@ -523,6 +555,8 @@ CACHED_OP_LIST(CACHED_OP)
return &cache_.k##Name##Signed32Operator; \
case BinaryOperationHint::kNumberOrOddball: \
return &cache_.k##Name##NumberOrOddballOperator; \
+ case BinaryOperationHint::kString: \
+ return &cache_.k##Name##StringOperator; \
case BinaryOperationHint::kAny: \
return &cache_.k##Name##AnyOperator; \
} \
@@ -562,9 +596,9 @@ const Operator* JSOperatorBuilder::ToBoolean(ToBooleanHints hints) {
}
const Operator* JSOperatorBuilder::CallFunction(
- size_t arity, VectorSlotPair const& feedback,
+ size_t arity, float frequency, VectorSlotPair const& feedback,
ConvertReceiverMode convert_mode, TailCallMode tail_call_mode) {
- CallFunctionParameters parameters(arity, feedback, tail_call_mode,
+ CallFunctionParameters parameters(arity, frequency, feedback, tail_call_mode,
convert_mode);
return new (zone()) Operator1<CallFunctionParameters>( // --
IrOpcode::kJSCallFunction, Operator::kNoProperties, // opcode
@@ -598,10 +632,9 @@ const Operator* JSOperatorBuilder::CallRuntime(const Runtime::Function* f,
parameters); // parameter
}
-
const Operator* JSOperatorBuilder::CallConstruct(
- size_t arity, VectorSlotPair const& feedback) {
- CallConstructParameters parameters(arity, feedback);
+ uint32_t arity, float frequency, VectorSlotPair const& feedback) {
+ CallConstructParameters parameters(arity, frequency, feedback);
return new (zone()) Operator1<CallConstructParameters>( // --
IrOpcode::kJSCallConstruct, Operator::kNoProperties, // opcode
"JSCallConstruct", // name
@@ -811,16 +844,24 @@ const Operator* JSOperatorBuilder::CreateFunctionContext(int slot_count) {
slot_count); // parameter
}
-
const Operator* JSOperatorBuilder::CreateCatchContext(
- const Handle<String>& name) {
- return new (zone()) Operator1<Handle<String>>( // --
+ const Handle<String>& name, const Handle<ScopeInfo>& scope_info) {
+ CreateCatchContextParameters parameters(name, scope_info);
+ return new (zone()) Operator1<CreateCatchContextParameters>(
IrOpcode::kJSCreateCatchContext, Operator::kNoProperties, // opcode
"JSCreateCatchContext", // name
2, 1, 1, 1, 1, 2, // counts
- name); // parameter
+ parameters); // parameter
}
+const Operator* JSOperatorBuilder::CreateWithContext(
+ const Handle<ScopeInfo>& scope_info) {
+ return new (zone()) Operator1<Handle<ScopeInfo>>(
+ IrOpcode::kJSCreateWithContext, Operator::kNoProperties, // opcode
+ "JSCreateWithContext", // name
+ 2, 1, 1, 1, 1, 2, // counts
+ scope_info); // parameter
+}
const Operator* JSOperatorBuilder::CreateBlockContext(
const Handle<ScopeInfo>& scpope_info) {
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 19022fa881..2374ae63ae 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -5,8 +5,8 @@
#ifndef V8_COMPILER_JS_OPERATOR_H_
#define V8_COMPILER_JS_OPERATOR_H_
-#include "src/compiler/type-hints.h"
#include "src/runtime/runtime.h"
+#include "src/type-hints.h"
namespace v8 {
namespace internal {
@@ -55,14 +55,17 @@ ToBooleanHints ToBooleanHintsOf(Operator const* op);
// used as a parameter by JSCallConstruct operators.
class CallConstructParameters final {
public:
- CallConstructParameters(size_t arity, VectorSlotPair const& feedback)
- : arity_(arity), feedback_(feedback) {}
+ CallConstructParameters(uint32_t arity, float frequency,
+ VectorSlotPair const& feedback)
+ : arity_(arity), frequency_(frequency), feedback_(feedback) {}
- size_t arity() const { return arity_; }
+ uint32_t arity() const { return arity_; }
+ float frequency() const { return frequency_; }
VectorSlotPair const& feedback() const { return feedback_; }
private:
- size_t const arity_;
+ uint32_t const arity_;
+ float const frequency_;
VectorSlotPair const feedback_;
};
@@ -80,15 +83,18 @@ CallConstructParameters const& CallConstructParametersOf(Operator const*);
// used as a parameter by JSCallFunction operators.
class CallFunctionParameters final {
public:
- CallFunctionParameters(size_t arity, VectorSlotPair const& feedback,
+ CallFunctionParameters(size_t arity, float frequency,
+ VectorSlotPair const& feedback,
TailCallMode tail_call_mode,
ConvertReceiverMode convert_mode)
: bit_field_(ArityField::encode(arity) |
ConvertReceiverModeField::encode(convert_mode) |
TailCallModeField::encode(tail_call_mode)),
+ frequency_(frequency),
feedback_(feedback) {}
size_t arity() const { return ArityField::decode(bit_field_); }
+ float frequency() const { return frequency_; }
ConvertReceiverMode convert_mode() const {
return ConvertReceiverModeField::decode(bit_field_);
}
@@ -99,6 +105,7 @@ class CallFunctionParameters final {
bool operator==(CallFunctionParameters const& that) const {
return this->bit_field_ == that.bit_field_ &&
+ this->frequency_ == that.frequency_ &&
this->feedback_ == that.feedback_;
}
bool operator!=(CallFunctionParameters const& that) const {
@@ -107,15 +114,16 @@ class CallFunctionParameters final {
private:
friend size_t hash_value(CallFunctionParameters const& p) {
- return base::hash_combine(p.bit_field_, p.feedback_);
+ return base::hash_combine(p.bit_field_, p.frequency_, p.feedback_);
}
typedef BitField<size_t, 0, 29> ArityField;
typedef BitField<ConvertReceiverMode, 29, 2> ConvertReceiverModeField;
typedef BitField<TailCallMode, 31, 1> TailCallModeField;
- const uint32_t bit_field_;
- const VectorSlotPair feedback_;
+ uint32_t const bit_field_;
+ float const frequency_;
+ VectorSlotPair const feedback_;
};
size_t hash_value(CallFunctionParameters const&);
@@ -178,6 +186,33 @@ std::ostream& operator<<(std::ostream&, ContextAccess const&);
ContextAccess const& ContextAccessOf(Operator const*);
+// Defines the name and ScopeInfo for a new catch context. This is used as a
+// parameter by the JSCreateCatchContext operator.
+class CreateCatchContextParameters final {
+ public:
+ CreateCatchContextParameters(Handle<String> catch_name,
+ Handle<ScopeInfo> scope_info);
+
+ Handle<String> catch_name() const { return catch_name_; }
+ Handle<ScopeInfo> scope_info() const { return scope_info_; }
+
+ private:
+ Handle<String> const catch_name_;
+ Handle<ScopeInfo> const scope_info_;
+};
+
+bool operator==(CreateCatchContextParameters const& lhs,
+ CreateCatchContextParameters const& rhs);
+bool operator!=(CreateCatchContextParameters const& lhs,
+ CreateCatchContextParameters const& rhs);
+
+size_t hash_value(CreateCatchContextParameters const& parameters);
+
+std::ostream& operator<<(std::ostream& os,
+ CreateCatchContextParameters const& parameters);
+
+CreateCatchContextParameters const& CreateCatchContextParametersOf(
+ Operator const*);
// Defines the property of an object for a named access. This is
// used as a parameter by the JSLoadNamed and JSStoreNamed operators.
@@ -374,9 +409,9 @@ std::ostream& operator<<(std::ostream&, CreateLiteralParameters const&);
const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op);
-const BinaryOperationHint BinaryOperationHintOf(const Operator* op);
+BinaryOperationHint BinaryOperationHintOf(const Operator* op);
-const CompareOperationHint CompareOperationHintOf(const Operator* op);
+CompareOperationHint CompareOperationHintOf(const Operator* op);
// Interface for building JavaScript-level operators, e.g. directly from the
// AST. Most operators have no parameters, thus can be globally shared for all
@@ -430,13 +465,15 @@ class JSOperatorBuilder final : public ZoneObject {
int literal_flags, int literal_index);
const Operator* CallFunction(
- size_t arity, VectorSlotPair const& feedback = VectorSlotPair(),
+ size_t arity, float frequency = 0.0f,
+ VectorSlotPair const& feedback = VectorSlotPair(),
ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny,
TailCallMode tail_call_mode = TailCallMode::kDisallow);
const Operator* CallRuntime(Runtime::FunctionId id);
const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
const Operator* CallRuntime(const Runtime::Function* function, size_t arity);
- const Operator* CallConstruct(size_t arity, VectorSlotPair const& feedback);
+ const Operator* CallConstruct(uint32_t arity, float frequency,
+ VectorSlotPair const& feedback);
const Operator* ConvertReceiver(ConvertReceiverMode convert_mode);
@@ -464,11 +501,10 @@ class JSOperatorBuilder final : public ZoneObject {
const Operator* TypeOf();
const Operator* InstanceOf();
+ const Operator* OrdinaryHasInstance();
- const Operator* ForInDone();
const Operator* ForInNext();
const Operator* ForInPrepare();
- const Operator* ForInStep();
const Operator* LoadMessage();
const Operator* StoreMessage();
@@ -483,8 +519,9 @@ class JSOperatorBuilder final : public ZoneObject {
const Operator* StackCheck();
const Operator* CreateFunctionContext(int slot_count);
- const Operator* CreateCatchContext(const Handle<String>& name);
- const Operator* CreateWithContext();
+ const Operator* CreateCatchContext(const Handle<String>& name,
+ const Handle<ScopeInfo>& scope_info);
+ const Operator* CreateWithContext(const Handle<ScopeInfo>& scope_info);
const Operator* CreateBlockContext(const Handle<ScopeInfo>& scpope_info);
const Operator* CreateModuleContext();
const Operator* CreateScriptContext(const Handle<ScopeInfo>& scpope_info);
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 89ab0de97a..82df4edf24 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -13,8 +13,8 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
-#include "src/type-cache.h"
-#include "src/types.h"
+#include "src/compiler/type-cache.h"
+#include "src/compiler/types.h"
namespace v8 {
namespace internal {
@@ -46,6 +46,7 @@ class JSBinopReduction final {
return true;
case BinaryOperationHint::kAny:
case BinaryOperationHint::kNone:
+ case BinaryOperationHint::kString:
break;
}
}
@@ -73,6 +74,37 @@ class JSBinopReduction final {
return false;
}
+ // Check if a string addition will definitely result in creating a ConsString,
+ // i.e. if the combined length of the resulting string exceeds the ConsString
+ // minimum length.
+ bool ShouldCreateConsString() {
+ DCHECK_EQ(IrOpcode::kJSAdd, node_->opcode());
+ if (BothInputsAre(Type::String()) ||
+ ((lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) &&
+ BinaryOperationHintOf(node_->op()) == BinaryOperationHint::kString)) {
+ if (right_type()->IsConstant() &&
+ right_type()->AsConstant()->Value()->IsString()) {
+ Handle<String> right_string =
+ Handle<String>::cast(right_type()->AsConstant()->Value());
+ if (right_string->length() >= ConsString::kMinLength) return true;
+ }
+ if (left_type()->IsConstant() &&
+ left_type()->AsConstant()->Value()->IsString()) {
+ Handle<String> left_string =
+ Handle<String>::cast(left_type()->AsConstant()->Value());
+ if (left_string->length() >= ConsString::kMinLength) {
+ // The invariant for ConsString requires the left hand side to be
+ // a sequential or external string if the right hand side is the
+ // empty string. Since we don't know anything about the right hand
+ // side here, we must ensure that the left hand side satisfy the
+ // constraints independent of the right hand side.
+ return left_string->IsSeqString() || left_string->IsExternalString();
+ }
+ }
+ }
+ return false;
+ }
+
void ConvertInputsToNumber() {
// To convert the inputs to numbers, we have to provide frame states
// for lazy bailouts in the ToNumber conversions.
@@ -430,8 +462,6 @@ JSTypedLowering::JSTypedLowering(Editor* editor,
dependencies_(dependencies),
flags_(flags),
jsgraph_(jsgraph),
- true_type_(Type::Constant(factory()->true_value(), graph()->zone())),
- false_type_(Type::Constant(factory()->false_value(), graph()->zone())),
the_hole_type_(
Type::Constant(factory()->the_hole_value(), graph()->zone())),
type_cache_(TypeCache::Get()) {
@@ -469,6 +499,9 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
}
if (r.OneInputIs(Type::String())) {
+ if (r.ShouldCreateConsString()) {
+ return ReduceCreateConsString(node);
+ }
StringAddFlags flags = STRING_ADD_CHECK_NONE;
if (!r.LeftInputIs(Type::String())) {
flags = STRING_ADD_CONVERT_LEFT;
@@ -546,6 +579,123 @@ Reduction JSTypedLowering::ReduceUI32Shift(Node* node, Signedness signedness) {
return NoChange();
}
+Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
+ Node* first = NodeProperties::GetValueInput(node, 0);
+ Node* second = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Make sure {first} is actually a String.
+ Type* first_type = NodeProperties::GetType(first);
+ if (!first_type->Is(Type::String())) {
+ first = effect =
+ graph()->NewNode(simplified()->CheckString(), first, effect, control);
+ first_type = NodeProperties::GetType(first);
+ }
+
+ // Make sure {second} is actually a String.
+ Type* second_type = NodeProperties::GetType(second);
+ if (!second_type->Is(Type::String())) {
+ second = effect =
+ graph()->NewNode(simplified()->CheckString(), second, effect, control);
+ second_type = NodeProperties::GetType(second);
+ }
+
+ // Determine the {first} length.
+ Node* first_length =
+ first_type->IsConstant()
+ ? jsgraph()->Constant(
+ Handle<String>::cast(first_type->AsConstant()->Value())
+ ->length())
+ : effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForStringLength()),
+ first, effect, control);
+
+ // Determine the {second} length.
+ Node* second_length =
+ second_type->IsConstant()
+ ? jsgraph()->Constant(
+ Handle<String>::cast(second_type->AsConstant()->Value())
+ ->length())
+ : effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForStringLength()),
+ second, effect, control);
+
+ // Compute the resulting length.
+ Node* length =
+ graph()->NewNode(simplified()->NumberAdd(), first_length, second_length);
+
+ // Check if we would overflow the allowed maximum string length.
+ Node* check = graph()->NewNode(simplified()->NumberLessThanOrEqual(), length,
+ jsgraph()->Constant(String::kMaxLength));
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ {
+ // Throw a RangeError in case of overflow.
+ Node* vfalse = efalse = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kThrowInvalidStringLength), context,
+ frame_state, efalse, if_false);
+ if_false = graph()->NewNode(common()->IfSuccess(), vfalse);
+ if_false = graph()->NewNode(common()->Throw(), vfalse, efalse, if_false);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), if_false);
+ Revisit(graph()->end());
+
+ // Update potential {IfException} uses of {node} to point to the
+ // %ThrowInvalidStringLength runtime call node instead.
+ for (Edge edge : node->use_edges()) {
+ if (edge.from()->opcode() == IrOpcode::kIfException) {
+ DCHECK(NodeProperties::IsControlEdge(edge) ||
+ NodeProperties::IsEffectEdge(edge));
+ edge.UpdateTo(vfalse);
+ Revisit(edge.from());
+ }
+ }
+ }
+ control = graph()->NewNode(common()->IfTrue(), branch);
+
+ // Figure out the map for the resulting ConsString.
+ // TODO(turbofan): We currently just use the cons_string_map here for
+ // the sake of simplicity; we could also try to be smarter here and
+ // use the one_byte_cons_string_map instead when the resulting ConsString
+ // contains only one byte characters.
+ Node* value_map = jsgraph()->HeapConstant(factory()->cons_string_map());
+
+ // Allocate the resulting ConsString.
+ effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable), effect);
+ Node* value = effect =
+ graph()->NewNode(simplified()->Allocate(NOT_TENURED),
+ jsgraph()->Constant(ConsString::kSize), effect, control);
+ NodeProperties::SetType(value, Type::OtherString());
+ effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
+ value, value_map, effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForNameHashField()), value,
+ jsgraph()->Uint32Constant(Name::kEmptyHashField), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForStringLength()), value, length,
+ effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForConsStringFirst()), value,
+ first, effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForConsStringSecond()), value,
+ second, effect, control);
+
+ // Morph the {node} into a {FinishRegion}.
+ ReplaceWithValue(node, node, node, control);
+ node->ReplaceInput(0, value);
+ node->ReplaceInput(1, effect);
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, common()->FinishRegion());
+ return Changed(node);
+}
+
Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
JSBinopReduction r(this, node);
if (r.BothInputsAre(Type::String())) {
@@ -779,22 +929,10 @@ Reduction JSTypedLowering::ReduceJSToBoolean(Node* node) {
NodeProperties::ChangeOp(node, simplified()->BooleanNot());
return Changed(node);
} else if (input_type->Is(Type::Number())) {
- // JSToBoolean(x:number) => NumberLessThan(#0,NumberAbs(x))
+ // JSToBoolean(x:number) => NumberToBoolean(x)
RelaxEffectsAndControls(node);
- node->ReplaceInput(0, jsgraph()->ZeroConstant());
- node->ReplaceInput(1, graph()->NewNode(simplified()->NumberAbs(), input));
- node->TrimInputCount(2);
- NodeProperties::ChangeOp(node, simplified()->NumberLessThan());
- return Changed(node);
- } else if (input_type->Is(Type::String())) {
- // JSToBoolean(x:string) => NumberLessThan(#0,x.length)
- FieldAccess const access = AccessBuilder::ForStringLength();
- Node* length = graph()->NewNode(simplified()->LoadField(access), input,
- graph()->start(), graph()->start());
- ReplaceWithValue(node, node, length);
- node->ReplaceInput(0, jsgraph()->ZeroConstant());
- node->ReplaceInput(1, length);
- NodeProperties::ChangeOp(node, simplified()->NumberLessThan());
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->NumberToBoolean());
return Changed(node);
}
return NoChange();
@@ -821,23 +959,12 @@ Reduction JSTypedLowering::ReduceJSToLength(Node* node) {
input = jsgraph()->Constant(kMaxSafeInteger);
} else {
if (input_type->Min() <= 0.0) {
- input = graph()->NewNode(
- common()->Select(MachineRepresentation::kTagged),
- graph()->NewNode(simplified()->NumberLessThanOrEqual(), input,
- jsgraph()->ZeroConstant()),
- jsgraph()->ZeroConstant(), input);
- input_type = Type::Range(0.0, input_type->Max(), graph()->zone());
- NodeProperties::SetType(input, input_type);
+ input = graph()->NewNode(simplified()->NumberMax(),
+ jsgraph()->ZeroConstant(), input);
}
if (input_type->Max() > kMaxSafeInteger) {
- input = graph()->NewNode(
- common()->Select(MachineRepresentation::kTagged),
- graph()->NewNode(simplified()->NumberLessThanOrEqual(),
- jsgraph()->Constant(kMaxSafeInteger), input),
- jsgraph()->Constant(kMaxSafeInteger), input);
- input_type =
- Type::Range(input_type->Min(), kMaxSafeInteger, graph()->zone());
- NodeProperties::SetType(input, input_type);
+ input = graph()->NewNode(simplified()->NumberMin(),
+ jsgraph()->Constant(kMaxSafeInteger), input);
}
}
ReplaceWithValue(node, input);
@@ -1132,169 +1259,162 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
return NoChange();
}
-Reduction JSTypedLowering::ReduceJSInstanceOf(Node* node) {
- DCHECK_EQ(IrOpcode::kJSInstanceOf, node->opcode());
- Node* const context = NodeProperties::GetContextInput(node);
- Node* const frame_state = NodeProperties::GetFrameStateInput(node);
-
- // If deoptimization is disabled, we cannot optimize.
- if (!(flags() & kDeoptimizationEnabled)) return NoChange();
-
- // If we are in a try block, don't optimize since the runtime call
- // in the proxy case can throw.
- if (NodeProperties::IsExceptionalCall(node)) return NoChange();
-
- JSBinopReduction r(this, node);
- Node* effect = r.effect();
- Node* control = r.control();
+Reduction JSTypedLowering::ReduceJSOrdinaryHasInstance(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSOrdinaryHasInstance, node->opcode());
+ Node* constructor = NodeProperties::GetValueInput(node, 0);
+ Type* constructor_type = NodeProperties::GetType(constructor);
+ Node* object = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
- if (!r.right_type()->IsConstant() ||
- !r.right_type()->AsConstant()->Value()->IsJSFunction()) {
+ // Check if the {constructor} is a (known) JSFunction.
+ if (!constructor_type->IsConstant() ||
+ !constructor_type->AsConstant()->Value()->IsJSFunction()) {
return NoChange();
}
-
Handle<JSFunction> function =
- Handle<JSFunction>::cast(r.right_type()->AsConstant()->Value());
- Handle<SharedFunctionInfo> shared(function->shared(), isolate());
-
- // Make sure the prototype of {function} is the %FunctionPrototype%, and it
- // already has a meaningful initial map (i.e. we constructed at least one
- // instance using the constructor {function}).
- if (function->map()->prototype() != function->native_context()->closure() ||
- function->map()->has_non_instance_prototype() ||
- !function->has_initial_map()) {
- return NoChange();
- }
+ Handle<JSFunction>::cast(constructor_type->AsConstant()->Value());
+
+ // Check if the {function} already has an initial map (i.e. the
+ // {function} has been used as a constructor at least once).
+ if (!function->has_initial_map()) return NoChange();
- // We can only use the fast case if @@hasInstance was not used so far.
- if (!isolate()->IsHasInstanceLookupChainIntact()) return NoChange();
- dependencies()->AssumePropertyCell(factory()->has_instance_protector());
+ // Check if the {function}s "prototype" is a JSReceiver.
+ if (!function->prototype()->IsJSReceiver()) return NoChange();
+ // Install a code dependency on the {function}s initial map.
Handle<Map> initial_map(function->initial_map(), isolate());
dependencies()->AssumeInitialMapCantChange(initial_map);
+
Node* prototype =
jsgraph()->Constant(handle(initial_map->prototype(), isolate()));
- // If the left hand side is an object, no smi check is needed.
- Node* is_smi = graph()->NewNode(simplified()->ObjectIsSmi(), r.left());
- Node* branch_is_smi =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), is_smi, control);
- Node* if_is_smi = graph()->NewNode(common()->IfTrue(), branch_is_smi);
- Node* e_is_smi = effect;
- control = graph()->NewNode(common()->IfFalse(), branch_is_smi);
+ Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), object);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
- Node* object_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- r.left(), effect, control);
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0 = jsgraph()->FalseConstant();
+
+ control = graph()->NewNode(common()->IfFalse(), branch0);
// Loop through the {object}s prototype chain looking for the {prototype}.
Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
-
- Node* loop_effect = effect =
+ Node* eloop = effect =
graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+ Node* vloop = object = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), object, object, loop);
+ // TODO(jarin): This is a very ugly hack to work-around the super-smart
+ // implicit typing of the Phi, which goes completely nuts if the {object}
+ // is for example a HeapConstant.
+ NodeProperties::SetType(vloop, Type::NonInternal());
+
+ // Load the {object} map and instance type.
+ Node* object_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()), object,
+ effect, control);
+ Node* object_instance_type = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()), object_map,
+ effect, control);
+
+ // Check if the {object} is a special receiver, because for special
+ // receivers, i.e. proxies or API objects that need access checks,
+ // we have to use the %HasInPrototypeChain runtime function instead.
+ Node* check1 = graph()->NewNode(
+ simplified()->NumberLessThanOrEqual(), object_instance_type,
+ jsgraph()->Constant(LAST_SPECIAL_RECEIVER_TYPE));
+ Node* branch1 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, control);
- Node* loop_object_map =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- object_map, r.left(), loop);
-
- // Check if the lhs needs access checks.
- Node* map_bit_field = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMapBitField()),
- loop_object_map, loop_effect, control);
- int is_access_check_needed_bit = 1 << Map::kIsAccessCheckNeeded;
- Node* is_access_check_needed_num =
- graph()->NewNode(simplified()->NumberBitwiseAnd(), map_bit_field,
- jsgraph()->Constant(is_access_check_needed_bit));
- Node* is_access_check_needed =
- graph()->NewNode(simplified()->NumberEqual(), is_access_check_needed_num,
- jsgraph()->Constant(is_access_check_needed_bit));
-
- Node* branch_is_access_check_needed = graph()->NewNode(
- common()->Branch(BranchHint::kFalse), is_access_check_needed, control);
- Node* if_is_access_check_needed =
- graph()->NewNode(common()->IfTrue(), branch_is_access_check_needed);
- Node* e_is_access_check_needed = effect;
+ control = graph()->NewNode(common()->IfFalse(), branch1);
- control =
- graph()->NewNode(common()->IfFalse(), branch_is_access_check_needed);
-
- // Check if the lhs is a proxy.
- Node* map_instance_type = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
- loop_object_map, loop_effect, control);
- Node* is_proxy =
- graph()->NewNode(simplified()->NumberEqual(), map_instance_type,
- jsgraph()->Constant(JS_PROXY_TYPE));
- Node* branch_is_proxy =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), is_proxy, control);
- Node* if_is_proxy = graph()->NewNode(common()->IfTrue(), branch_is_proxy);
- Node* e_is_proxy = effect;
-
- control = graph()->NewNode(common()->Merge(2), if_is_access_check_needed,
- if_is_proxy);
- effect = graph()->NewNode(common()->EffectPhi(2), e_is_access_check_needed,
- e_is_proxy, control);
-
- // If we need an access check or the object is a Proxy, make a runtime call
- // to finish the lowering.
- Node* runtimecall = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kHasInPrototypeChain), r.left(),
- prototype, context, frame_state, effect, control);
-
- Node* runtimecall_control =
- graph()->NewNode(common()->IfSuccess(), runtimecall);
-
- control = graph()->NewNode(common()->IfFalse(), branch_is_proxy);
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = effect;
+ Node* vtrue1;
+
+ // Check if the {object} is not a receiver at all.
+ Node* check10 =
+ graph()->NewNode(simplified()->NumberLessThan(), object_instance_type,
+ jsgraph()->Constant(FIRST_JS_RECEIVER_TYPE));
+ Node* branch10 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check10, if_true1);
+
+ // A primitive value cannot match the {prototype} we're looking for.
+ if_true1 = graph()->NewNode(common()->IfTrue(), branch10);
+ vtrue1 = jsgraph()->FalseConstant();
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch10);
+ Node* efalse1 = etrue1;
+ Node* vfalse1;
+ {
+ // Slow path, need to call the %HasInPrototypeChain runtime function.
+ vfalse1 = efalse1 = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kHasInPrototypeChain), object,
+ prototype, context, frame_state, efalse1, if_false1);
+ if_false1 = graph()->NewNode(common()->IfSuccess(), vfalse1);
+
+ // Replace any potential IfException on {node} to catch exceptions
+ // from this %HasInPrototypeChain runtime call instead.
+ for (Edge edge : node->use_edges()) {
+ if (edge.from()->opcode() == IrOpcode::kIfException) {
+ edge.UpdateTo(vfalse1);
+ Revisit(edge.from());
+ }
+ }
+ }
+ // Load the {object} prototype.
Node* object_prototype = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapPrototype()),
- loop_object_map, loop_effect, control);
-
- // If not, check if object prototype is the null prototype.
- Node* null_proto =
- graph()->NewNode(simplified()->ReferenceEqual(), object_prototype,
- jsgraph()->NullConstant());
- Node* branch_null_proto = graph()->NewNode(
- common()->Branch(BranchHint::kFalse), null_proto, control);
- Node* if_null_proto = graph()->NewNode(common()->IfTrue(), branch_null_proto);
- Node* e_null_proto = effect;
-
- control = graph()->NewNode(common()->IfFalse(), branch_null_proto);
-
- // Check if object prototype is equal to function prototype.
- Node* eq_proto = graph()->NewNode(simplified()->ReferenceEqual(),
- object_prototype, prototype);
- Node* branch_eq_proto =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), eq_proto, control);
- Node* if_eq_proto = graph()->NewNode(common()->IfTrue(), branch_eq_proto);
- Node* e_eq_proto = effect;
-
- control = graph()->NewNode(common()->IfFalse(), branch_eq_proto);
-
- Node* load_object_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- object_prototype, effect, control);
- // Close the loop.
- loop_effect->ReplaceInput(1, effect);
- loop_object_map->ReplaceInput(1, load_object_map);
- loop->ReplaceInput(1, control);
+ simplified()->LoadField(AccessBuilder::ForMapPrototype()), object_map,
+ effect, control);
+
+ // Check if we reached the end of {object}s prototype chain.
+ Node* check2 = graph()->NewNode(simplified()->ReferenceEqual(),
+ object_prototype, jsgraph()->NullConstant());
+ Node* branch2 = graph()->NewNode(common()->Branch(), check2, control);
+
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* etrue2 = effect;
+ Node* vtrue2 = jsgraph()->FalseConstant();
+
+ control = graph()->NewNode(common()->IfFalse(), branch2);
- control = graph()->NewNode(common()->Merge(3), runtimecall_control,
- if_eq_proto, if_null_proto);
- effect = graph()->NewNode(common()->EffectPhi(3), runtimecall, e_eq_proto,
- e_null_proto, control);
+ // Check if we reached the {prototype}.
+ Node* check3 = graph()->NewNode(simplified()->ReferenceEqual(),
+ object_prototype, prototype);
+ Node* branch3 = graph()->NewNode(common()->Branch(), check3, control);
- Node* result = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 3), runtimecall,
- jsgraph()->TrueConstant(), jsgraph()->FalseConstant(), control);
+ Node* if_true3 = graph()->NewNode(common()->IfTrue(), branch3);
+ Node* etrue3 = effect;
+ Node* vtrue3 = jsgraph()->TrueConstant();
- control = graph()->NewNode(common()->Merge(2), if_is_smi, control);
- effect = graph()->NewNode(common()->EffectPhi(2), e_is_smi, effect, control);
- result = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- jsgraph()->FalseConstant(), result, control);
+ control = graph()->NewNode(common()->IfFalse(), branch3);
- ReplaceWithValue(node, result, effect, control);
- return Changed(result);
+ // Close the loop.
+ vloop->ReplaceInput(1, object_prototype);
+ eloop->ReplaceInput(1, effect);
+ loop->ReplaceInput(1, control);
+
+ control = graph()->NewNode(common()->Merge(5), if_true0, if_true1, if_true2,
+ if_true3, if_false1);
+ effect = graph()->NewNode(common()->EffectPhi(5), etrue0, etrue1, etrue2,
+ etrue3, efalse1, control);
+
+ // Morph the {node} into an appropriate Phi.
+ ReplaceWithValue(node, node, effect, control);
+ node->ReplaceInput(0, vtrue0);
+ node->ReplaceInput(1, vtrue1);
+ node->ReplaceInput(2, vtrue2);
+ node->ReplaceInput(3, vtrue3);
+ node->ReplaceInput(4, vfalse1);
+ node->ReplaceInput(5, control);
+ node->TrimInputCount(6);
+ NodeProperties::ChangeOp(node,
+ common()->Phi(MachineRepresentation::kTagged, 5));
+ return Changed(node);
}
Reduction JSTypedLowering::ReduceJSLoadContext(Node* node) {
@@ -1546,16 +1666,18 @@ void ReduceBuiltin(Isolate* isolate, JSGraph* jsgraph, Node* node,
const int argc = arity + BuiltinArguments::kNumExtraArgsWithReceiver;
Node* argc_node = jsgraph->Int32Constant(argc);
- node->InsertInput(zone, arity + 2, argc_node);
- node->InsertInput(zone, arity + 3, target);
- node->InsertInput(zone, arity + 4, new_target);
+ static const int kStubAndReceiver = 2;
+ int cursor = arity + kStubAndReceiver;
+ node->InsertInput(zone, cursor++, argc_node);
+ node->InsertInput(zone, cursor++, target);
+ node->InsertInput(zone, cursor++, new_target);
Address entry = Builtins::CppEntryOf(builtin_index);
ExternalReference entry_ref(ExternalReference(entry, isolate));
Node* entry_node = jsgraph->ExternalConstant(entry_ref);
- node->InsertInput(zone, arity + 5, entry_node);
- node->InsertInput(zone, arity + 6, argc_node);
+ node->InsertInput(zone, cursor++, entry_node);
+ node->InsertInput(zone, cursor++, argc_node);
static const int kReturnCount = 1;
const char* debug_name = Builtins::name(builtin_index);
@@ -1566,6 +1688,12 @@ void ReduceBuiltin(Isolate* isolate, JSGraph* jsgraph, Node* node,
NodeProperties::ChangeOp(node, jsgraph->common()->Call(desc));
}
+bool NeedsArgumentAdaptorFrame(Handle<SharedFunctionInfo> shared, int arity) {
+ static const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ const int num_decl_parms = shared->internal_formal_parameter_count();
+ return (num_decl_parms != arity && num_decl_parms != sentinel);
+}
+
} // namespace
Reduction JSTypedLowering::ReduceJSCallConstruct(Node* node) {
@@ -1591,9 +1719,7 @@ Reduction JSTypedLowering::ReduceJSCallConstruct(Node* node) {
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
if (is_builtin && Builtins::HasCppImplementation(builtin_index) &&
- (shared->internal_formal_parameter_count() == arity ||
- shared->internal_formal_parameter_count() ==
- SharedFunctionInfo::kDontAdaptArgumentsSentinel)) {
+ !NeedsArgumentAdaptorFrame(shared, arity)) {
// Patch {node} to a direct CEntryStub call.
// Load the context from the {target}.
@@ -1705,22 +1831,7 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
Node* new_target = jsgraph()->UndefinedConstant();
Node* argument_count = jsgraph()->Int32Constant(arity);
- if (is_builtin && Builtins::HasCppImplementation(builtin_index) &&
- (shared->internal_formal_parameter_count() == arity ||
- shared->internal_formal_parameter_count() ==
- SharedFunctionInfo::kDontAdaptArgumentsSentinel)) {
- // Patch {node} to a direct CEntryStub call.
- ReduceBuiltin(isolate(), jsgraph(), node, builtin_index, arity, flags);
- } else if (shared->internal_formal_parameter_count() == arity ||
- shared->internal_formal_parameter_count() ==
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
- // Patch {node} to a direct call.
- node->InsertInput(graph()->zone(), arity + 2, new_target);
- node->InsertInput(graph()->zone(), arity + 3, argument_count);
- NodeProperties::ChangeOp(node,
- common()->Call(Linkage::GetJSCallDescriptor(
- graph()->zone(), false, 1 + arity, flags)));
- } else {
+ if (NeedsArgumentAdaptorFrame(shared, arity)) {
// Patch {node} to an indirect call via the ArgumentsAdaptorTrampoline.
Callable callable = CodeFactory::ArgumentAdaptor(isolate());
node->InsertInput(graph()->zone(), 0,
@@ -1734,6 +1845,16 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
node, common()->Call(Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(),
1 + arity, flags)));
+ } else if (is_builtin && Builtins::HasCppImplementation(builtin_index)) {
+ // Patch {node} to a direct CEntryStub call.
+ ReduceBuiltin(isolate(), jsgraph(), node, builtin_index, arity, flags);
+ } else {
+ // Patch {node} to a direct call.
+ node->InsertInput(graph()->zone(), arity + 2, new_target);
+ node->InsertInput(graph()->zone(), arity + 3, argument_count);
+ NodeProperties::ChangeOp(node,
+ common()->Call(Linkage::GetJSCallDescriptor(
+ graph()->zone(), false, 1 + arity, flags)));
}
return Changed(node);
}
@@ -1761,8 +1882,8 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
// Maybe we did at least learn something about the {receiver}.
if (p.convert_mode() != convert_mode) {
NodeProperties::ChangeOp(
- node, javascript()->CallFunction(p.arity(), p.feedback(), convert_mode,
- p.tail_call_mode()));
+ node, javascript()->CallFunction(p.arity(), p.frequency(), p.feedback(),
+ convert_mode, p.tail_call_mode()));
return Changed(node);
}
@@ -1770,14 +1891,6 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
}
-Reduction JSTypedLowering::ReduceJSForInDone(Node* node) {
- DCHECK_EQ(IrOpcode::kJSForInDone, node->opcode());
- node->TrimInputCount(2);
- NodeProperties::ChangeOp(node, machine()->Word32Equal());
- return Changed(node);
-}
-
-
Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
DCHECK_EQ(IrOpcode::kJSForInNext, node->opcode());
Node* receiver = NodeProperties::GetValueInput(node, 0);
@@ -1843,14 +1956,6 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
return Changed(node);
}
-
-Reduction JSTypedLowering::ReduceJSForInStep(Node* node) {
- DCHECK_EQ(IrOpcode::kJSForInStep, node->opcode());
- node->ReplaceInput(1, jsgraph()->Int32Constant(1));
- NodeProperties::ChangeOp(node, machine()->Int32Add());
- return Changed(node);
-}
-
Reduction JSTypedLowering::ReduceJSGeneratorStore(Node* node) {
DCHECK_EQ(IrOpcode::kJSGeneratorStore, node->opcode());
Node* generator = NodeProperties::GetValueInput(node, 0);
@@ -1930,174 +2035,7 @@ Reduction JSTypedLowering::ReduceJSGeneratorRestoreRegister(Node* node) {
return Changed(element);
}
-Reduction JSTypedLowering::ReduceSelect(Node* node) {
- DCHECK_EQ(IrOpcode::kSelect, node->opcode());
- Node* const condition = NodeProperties::GetValueInput(node, 0);
- Type* const condition_type = NodeProperties::GetType(condition);
- Node* const vtrue = NodeProperties::GetValueInput(node, 1);
- Type* const vtrue_type = NodeProperties::GetType(vtrue);
- Node* const vfalse = NodeProperties::GetValueInput(node, 2);
- Type* const vfalse_type = NodeProperties::GetType(vfalse);
- if (condition_type->Is(true_type_)) {
- // Select(condition:true, vtrue, vfalse) => vtrue
- return Replace(vtrue);
- }
- if (condition_type->Is(false_type_)) {
- // Select(condition:false, vtrue, vfalse) => vfalse
- return Replace(vfalse);
- }
- if (vtrue_type->Is(true_type_) && vfalse_type->Is(false_type_)) {
- // Select(condition, vtrue:true, vfalse:false) => condition
- return Replace(condition);
- }
- if (vtrue_type->Is(false_type_) && vfalse_type->Is(true_type_)) {
- // Select(condition, vtrue:false, vfalse:true) => BooleanNot(condition)
- node->TrimInputCount(1);
- NodeProperties::ChangeOp(node, simplified()->BooleanNot());
- return Changed(node);
- }
- return NoChange();
-}
-
-namespace {
-
-MaybeHandle<Map> GetStableMapFromObjectType(Type* object_type) {
- if (object_type->IsConstant() &&
- object_type->AsConstant()->Value()->IsHeapObject()) {
- Handle<Map> object_map(
- Handle<HeapObject>::cast(object_type->AsConstant()->Value())->map());
- if (object_map->is_stable()) return object_map;
- } else if (object_type->IsClass()) {
- Handle<Map> object_map = object_type->AsClass()->Map();
- if (object_map->is_stable()) return object_map;
- }
- return MaybeHandle<Map>();
-}
-
-} // namespace
-
-Reduction JSTypedLowering::ReduceCheckMaps(Node* node) {
- // TODO(bmeurer): Find a better home for this thing!
- // The CheckMaps(o, ...map...) can be eliminated if map is stable and
- // either
- // (a) o has type Constant(object) and map == object->map, or
- // (b) o has type Class(map),
- // and either
- // (1) map cannot transition further, or
- // (2) we can add a code dependency on the stability of map
- // (to guard the Constant type information).
- Node* const object = NodeProperties::GetValueInput(node, 0);
- Type* const object_type = NodeProperties::GetType(object);
- Node* const effect = NodeProperties::GetEffectInput(node);
- Handle<Map> object_map;
- if (GetStableMapFromObjectType(object_type).ToHandle(&object_map)) {
- for (int i = 1; i < node->op()->ValueInputCount(); ++i) {
- Node* const map = NodeProperties::GetValueInput(node, i);
- Type* const map_type = NodeProperties::GetType(map);
- if (map_type->IsConstant() &&
- map_type->AsConstant()->Value().is_identical_to(object_map)) {
- if (object_map->CanTransition()) {
- DCHECK(flags() & kDeoptimizationEnabled);
- dependencies()->AssumeMapStable(object_map);
- }
- return Replace(effect);
- }
- }
- }
- return NoChange();
-}
-
-Reduction JSTypedLowering::ReduceCheckString(Node* node) {
- // TODO(bmeurer): Find a better home for this thing!
- Node* const input = NodeProperties::GetValueInput(node, 0);
- Type* const input_type = NodeProperties::GetType(input);
- if (input_type->Is(Type::String())) {
- ReplaceWithValue(node, input);
- return Replace(input);
- }
- return NoChange();
-}
-
-Reduction JSTypedLowering::ReduceLoadField(Node* node) {
- // TODO(bmeurer): Find a better home for this thing!
- Node* const object = NodeProperties::GetValueInput(node, 0);
- Type* const object_type = NodeProperties::GetType(object);
- FieldAccess const& access = FieldAccessOf(node->op());
- if (access.base_is_tagged == kTaggedBase &&
- access.offset == HeapObject::kMapOffset) {
- // We can replace LoadField[Map](o) with map if is stable and either
- // (a) o has type Constant(object) and map == object->map, or
- // (b) o has type Class(map),
- // and either
- // (1) map cannot transition further, or
- // (2) deoptimization is enabled and we can add a code dependency on the
- // stability of map (to guard the Constant type information).
- Handle<Map> object_map;
- if (GetStableMapFromObjectType(object_type).ToHandle(&object_map)) {
- if (object_map->CanTransition()) {
- if (flags() & kDeoptimizationEnabled) {
- dependencies()->AssumeMapStable(object_map);
- } else {
- return NoChange();
- }
- }
- Node* const value = jsgraph()->HeapConstant(object_map);
- ReplaceWithValue(node, value);
- return Replace(value);
- }
- }
- return NoChange();
-}
-
-Reduction JSTypedLowering::ReduceNumberRoundop(Node* node) {
- // TODO(bmeurer): Find a better home for this thing!
- Node* const input = NodeProperties::GetValueInput(node, 0);
- Type* const input_type = NodeProperties::GetType(input);
- if (input_type->Is(type_cache_.kIntegerOrMinusZeroOrNaN)) {
- return Replace(input);
- }
- return NoChange();
-}
-
Reduction JSTypedLowering::Reduce(Node* node) {
- // Check if the output type is a singleton. In that case we already know the
- // result value and can simply replace the node if it's eliminable.
- if (!NodeProperties::IsConstant(node) && NodeProperties::IsTyped(node) &&
- node->op()->HasProperty(Operator::kEliminatable)) {
- // We can only constant-fold nodes here, that are known to not cause any
- // side-effect, may it be a JavaScript observable side-effect or a possible
- // eager deoptimization exit (i.e. {node} has an operator that doesn't have
- // the Operator::kNoDeopt property).
- Type* upper = NodeProperties::GetType(node);
- if (upper->IsInhabited()) {
- if (upper->IsConstant()) {
- Node* replacement = jsgraph()->Constant(upper->AsConstant()->Value());
- ReplaceWithValue(node, replacement);
- return Changed(replacement);
- } else if (upper->Is(Type::MinusZero())) {
- Node* replacement = jsgraph()->Constant(factory()->minus_zero_value());
- ReplaceWithValue(node, replacement);
- return Changed(replacement);
- } else if (upper->Is(Type::NaN())) {
- Node* replacement = jsgraph()->NaNConstant();
- ReplaceWithValue(node, replacement);
- return Changed(replacement);
- } else if (upper->Is(Type::Null())) {
- Node* replacement = jsgraph()->NullConstant();
- ReplaceWithValue(node, replacement);
- return Changed(replacement);
- } else if (upper->Is(Type::PlainNumber()) &&
- upper->Min() == upper->Max()) {
- Node* replacement = jsgraph()->Constant(upper->Min());
- ReplaceWithValue(node, replacement);
- return Changed(replacement);
- } else if (upper->Is(Type::Undefined())) {
- Node* replacement = jsgraph()->UndefinedConstant();
- ReplaceWithValue(node, replacement);
- return Changed(replacement);
- }
- }
- }
switch (node->opcode()) {
case IrOpcode::kJSEqual:
return ReduceJSEqual(node, false);
@@ -2128,6 +2066,8 @@ Reduction JSTypedLowering::Reduce(Node* node) {
case IrOpcode::kJSDivide:
case IrOpcode::kJSModulus:
return ReduceNumberBinop(node);
+ case IrOpcode::kJSOrdinaryHasInstance:
+ return ReduceJSOrdinaryHasInstance(node);
case IrOpcode::kJSToBoolean:
return ReduceJSToBoolean(node);
case IrOpcode::kJSToInteger:
@@ -2146,8 +2086,6 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSLoadProperty(node);
case IrOpcode::kJSStoreProperty:
return ReduceJSStoreProperty(node);
- case IrOpcode::kJSInstanceOf:
- return ReduceJSInstanceOf(node);
case IrOpcode::kJSLoadContext:
return ReduceJSLoadContext(node);
case IrOpcode::kJSStoreContext:
@@ -2158,31 +2096,14 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSCallConstruct(node);
case IrOpcode::kJSCallFunction:
return ReduceJSCallFunction(node);
- case IrOpcode::kJSForInDone:
- return ReduceJSForInDone(node);
case IrOpcode::kJSForInNext:
return ReduceJSForInNext(node);
- case IrOpcode::kJSForInStep:
- return ReduceJSForInStep(node);
case IrOpcode::kJSGeneratorStore:
return ReduceJSGeneratorStore(node);
case IrOpcode::kJSGeneratorRestoreContinuation:
return ReduceJSGeneratorRestoreContinuation(node);
case IrOpcode::kJSGeneratorRestoreRegister:
return ReduceJSGeneratorRestoreRegister(node);
- case IrOpcode::kSelect:
- return ReduceSelect(node);
- case IrOpcode::kCheckMaps:
- return ReduceCheckMaps(node);
- case IrOpcode::kCheckString:
- return ReduceCheckString(node);
- case IrOpcode::kNumberCeil:
- case IrOpcode::kNumberFloor:
- case IrOpcode::kNumberRound:
- case IrOpcode::kNumberTrunc:
- return ReduceNumberRoundop(node);
- case IrOpcode::kLoadField:
- return ReduceLoadField(node);
default:
break;
}
@@ -2208,10 +2129,6 @@ CommonOperatorBuilder* JSTypedLowering::common() const {
return jsgraph()->common();
}
-MachineOperatorBuilder* JSTypedLowering::machine() const {
- return jsgraph()->machine();
-}
-
SimplifiedOperatorBuilder* JSTypedLowering::simplified() const {
return jsgraph()->simplified();
}
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index 35c397fb88..b0cf1f4f3d 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -15,8 +15,6 @@ namespace internal {
// Forward declarations.
class CompilationDependencies;
class Factory;
-class TypeCache;
-
namespace compiler {
@@ -24,9 +22,8 @@ namespace compiler {
class CommonOperatorBuilder;
class JSGraph;
class JSOperatorBuilder;
-class MachineOperatorBuilder;
class SimplifiedOperatorBuilder;
-
+class TypeCache;
// Lowers JS-level operators to simplified operators based on types.
class JSTypedLowering final : public AdvancedReducer {
@@ -52,7 +49,7 @@ class JSTypedLowering final : public AdvancedReducer {
Reduction ReduceJSLoadNamed(Node* node);
Reduction ReduceJSLoadProperty(Node* node);
Reduction ReduceJSStoreProperty(Node* node);
- Reduction ReduceJSInstanceOf(Node* node);
+ Reduction ReduceJSOrdinaryHasInstance(Node* node);
Reduction ReduceJSLoadContext(Node* node);
Reduction ReduceJSStoreContext(Node* node);
Reduction ReduceJSEqualTypeOf(Node* node, bool invert);
@@ -69,20 +66,14 @@ class JSTypedLowering final : public AdvancedReducer {
Reduction ReduceJSConvertReceiver(Node* node);
Reduction ReduceJSCallConstruct(Node* node);
Reduction ReduceJSCallFunction(Node* node);
- Reduction ReduceJSForInDone(Node* node);
Reduction ReduceJSForInNext(Node* node);
- Reduction ReduceJSForInStep(Node* node);
Reduction ReduceJSGeneratorStore(Node* node);
Reduction ReduceJSGeneratorRestoreContinuation(Node* node);
Reduction ReduceJSGeneratorRestoreRegister(Node* node);
- Reduction ReduceCheckMaps(Node* node);
- Reduction ReduceCheckString(Node* node);
- Reduction ReduceLoadField(Node* node);
- Reduction ReduceNumberRoundop(Node* node);
- Reduction ReduceSelect(Node* node);
Reduction ReduceNumberBinop(Node* node);
Reduction ReduceInt32Binop(Node* node);
Reduction ReduceUI32Shift(Node* node, Signedness signedness);
+ Reduction ReduceCreateConsString(Node* node);
Factory* factory() const;
Graph* graph() const;
@@ -91,7 +82,6 @@ class JSTypedLowering final : public AdvancedReducer {
JSOperatorBuilder* javascript() const;
CommonOperatorBuilder* common() const;
SimplifiedOperatorBuilder* simplified() const;
- MachineOperatorBuilder* machine() const;
CompilationDependencies* dependencies() const;
Flags flags() const { return flags_; }
@@ -99,8 +89,6 @@ class JSTypedLowering final : public AdvancedReducer {
Flags flags_;
JSGraph* jsgraph_;
Type* shifted_int32_ranges_[4];
- Type* const true_type_;
- Type* const false_type_;
Type* const the_hole_type_;
TypeCache const& type_cache_;
};
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index e4df58d0f7..523ce47b0f 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -7,7 +7,7 @@
#include "src/ast/scopes.h"
#include "src/builtins/builtins-utils.h"
#include "src/code-stubs.h"
-#include "src/compiler.h"
+#include "src/compilation-info.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/frame.h"
#include "src/compiler/node.h"
@@ -24,34 +24,6 @@ LinkageLocation regloc(Register reg, MachineType type) {
return LinkageLocation::ForRegister(reg.code(), type);
}
-MachineType reptyp(Representation representation) {
- switch (representation.kind()) {
- case Representation::kInteger8:
- return MachineType::Int8();
- case Representation::kUInteger8:
- return MachineType::Uint8();
- case Representation::kInteger16:
- return MachineType::Int16();
- case Representation::kUInteger16:
- return MachineType::Uint16();
- case Representation::kInteger32:
- return MachineType::Int32();
- case Representation::kSmi:
- case Representation::kTagged:
- case Representation::kHeapObject:
- return MachineType::AnyTagged();
- case Representation::kDouble:
- return MachineType::Float64();
- case Representation::kExternal:
- return MachineType::Pointer();
- case Representation::kNone:
- case Representation::kNumRepresentations:
- break;
- }
- UNREACHABLE();
- return MachineType::None();
-}
-
} // namespace
@@ -152,17 +124,16 @@ CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
// static
bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
- // Most runtime functions need a FrameState. A few chosen ones that we know
- // not to call into arbitrary JavaScript, not to throw, and not to deoptimize
- // are blacklisted here and can be called without a FrameState.
switch (function) {
+ // Most runtime functions need a FrameState. A few chosen ones that we know
+ // not to call into arbitrary JavaScript, not to throw, and not to
+ // deoptimize
+ // are whitelisted here and can be called without a FrameState.
case Runtime::kAbort:
case Runtime::kAllocateInTargetSpace:
case Runtime::kCreateIterResultObject:
case Runtime::kDefineGetterPropertyUnchecked: // TODO(jarin): Is it safe?
case Runtime::kDefineSetterPropertyUnchecked: // TODO(jarin): Is it safe?
- case Runtime::kForInDone:
- case Runtime::kForInStep:
case Runtime::kGeneratorGetContinuation:
case Runtime::kGetSuperConstructor:
case Runtime::kIsFunction:
@@ -183,29 +154,29 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
case Runtime::kTraceEnter:
case Runtime::kTraceExit:
return false;
- case Runtime::kInlineCall:
- case Runtime::kInlineDeoptimizeNow:
- case Runtime::kInlineGetPrototype:
- case Runtime::kInlineNewObject:
- case Runtime::kInlineRegExpConstructResult:
- case Runtime::kInlineRegExpExec:
- case Runtime::kInlineSubString:
- case Runtime::kInlineThrowNotDateError:
- case Runtime::kInlineToInteger:
- case Runtime::kInlineToLength:
- case Runtime::kInlineToNumber:
- case Runtime::kInlineToObject:
- case Runtime::kInlineToString:
- return true;
+
+ // Some inline intrinsics are also safe to call without a FrameState.
+ case Runtime::kInlineCreateIterResultObject:
+ case Runtime::kInlineFixedArrayGet:
+ case Runtime::kInlineFixedArraySet:
+ case Runtime::kInlineGeneratorClose:
+ case Runtime::kInlineGeneratorGetInputOrDebugPos:
+ case Runtime::kInlineGeneratorGetResumeMode:
+ case Runtime::kInlineGetSuperConstructor:
+ case Runtime::kInlineIsArray:
+ case Runtime::kInlineIsJSReceiver:
+ case Runtime::kInlineIsRegExp:
+ case Runtime::kInlineIsSmi:
+ case Runtime::kInlineIsTypedArray:
+ case Runtime::kInlineRegExpFlags:
+ case Runtime::kInlineRegExpSource:
+ return false;
+
default:
break;
}
- // Most inlined runtime functions (except the ones listed above) can be called
- // without a FrameState or will be lowered by JSIntrinsicLowering internally.
- const Runtime::Function* const f = Runtime::FunctionForId(function);
- if (f->intrinsic_type == Runtime::IntrinsicType::INLINE) return false;
-
+ // For safety, default to needing a FrameState unless whitelisted.
return true;
}
@@ -382,8 +353,7 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
if (i < register_parameter_count) {
// The first parameters go in registers.
Register reg = descriptor.GetRegisterParameter(i);
- MachineType type =
- reptyp(RepresentationFromType(descriptor.GetParameterType(i)));
+ MachineType type = descriptor.GetParameterType(i);
locations.AddParam(regloc(reg, type));
} else {
// The rest of the parameters go on the stack.
@@ -452,8 +422,7 @@ CallDescriptor* Linkage::GetBytecodeDispatchCallDescriptor(
if (i < register_parameter_count) {
// The first parameters go in registers.
Register reg = descriptor.GetRegisterParameter(i);
- MachineType type =
- reptyp(RepresentationFromType(descriptor.GetParameterType(i)));
+ MachineType type = descriptor.GetParameterType(i);
locations.AddParam(regloc(reg, type));
} else {
// The rest of the parameters go on the stack.
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 1c025081c4..6f302bc534 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -11,7 +11,7 @@
#include "src/frames.h"
#include "src/machine-type.h"
#include "src/runtime/runtime.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/live-range-separator.cc b/deps/v8/src/compiler/live-range-separator.cc
index e3cd0a3137..db65593906 100644
--- a/deps/v8/src/compiler/live-range-separator.cc
+++ b/deps/v8/src/compiler/live-range-separator.cc
@@ -58,6 +58,15 @@ void CreateSplinter(TopLevelLiveRange *range, RegisterAllocationData *data,
}
}
+void SetSlotUse(TopLevelLiveRange *range) {
+ range->set_has_slot_use(false);
+ for (const UsePosition *pos = range->first_pos();
+ !range->has_slot_use() && pos != nullptr; pos = pos->next()) {
+ if (pos->type() == UsePositionType::kRequiresSlot) {
+ range->set_has_slot_use(true);
+ }
+ }
+}
void SplinterLiveRange(TopLevelLiveRange *range, RegisterAllocationData *data) {
const InstructionSequence *code = data->code();
@@ -99,7 +108,14 @@ void SplinterLiveRange(TopLevelLiveRange *range, RegisterAllocationData *data) {
if (first_cut.IsValid()) {
CreateSplinter(range, data, first_cut, last_cut);
}
+
+ // Redo has_slot_use
+ if (range->has_slot_use() && range->splinter() != nullptr) {
+ SetSlotUse(range);
+ SetSlotUse(range->splinter());
+ }
}
+
} // namespace
diff --git a/deps/v8/src/compiler/live-range-separator.h b/deps/v8/src/compiler/live-range-separator.h
index 57bc98235d..6aaf6b69e6 100644
--- a/deps/v8/src/compiler/live-range-separator.h
+++ b/deps/v8/src/compiler/live-range-separator.h
@@ -5,8 +5,7 @@
#ifndef V8_LIVE_RANGE_SEPARATOR_H_
#define V8_LIVE_RANGE_SEPARATOR_H_
-
-#include <src/zone.h>
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/liveness-analyzer.h b/deps/v8/src/compiler/liveness-analyzer.h
index 9b09724eef..8a3d715096 100644
--- a/deps/v8/src/compiler/liveness-analyzer.h
+++ b/deps/v8/src/compiler/liveness-analyzer.h
@@ -7,7 +7,7 @@
#include "src/bit-vector.h"
#include "src/compiler/node.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index ad787f8092..93c24a08e5 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -4,6 +4,7 @@
#include "src/compiler/load-elimination.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
@@ -21,28 +22,38 @@ Aliasing QueryAlias(Node* a, Node* b) {
if (!NodeProperties::GetType(a)->Maybe(NodeProperties::GetType(b))) {
return kNoAlias;
}
- if (b->opcode() == IrOpcode::kAllocate) {
- switch (a->opcode()) {
- case IrOpcode::kAllocate:
- case IrOpcode::kHeapConstant:
- case IrOpcode::kParameter:
- return kNoAlias;
- case IrOpcode::kFinishRegion:
- return QueryAlias(a->InputAt(0), b);
- default:
- break;
+ switch (b->opcode()) {
+ case IrOpcode::kAllocate: {
+ switch (a->opcode()) {
+ case IrOpcode::kAllocate:
+ case IrOpcode::kHeapConstant:
+ case IrOpcode::kParameter:
+ return kNoAlias;
+ default:
+ break;
+ }
+ break;
}
+ case IrOpcode::kFinishRegion:
+ return QueryAlias(a, b->InputAt(0));
+ default:
+ break;
}
- if (a->opcode() == IrOpcode::kAllocate) {
- switch (b->opcode()) {
- case IrOpcode::kHeapConstant:
- case IrOpcode::kParameter:
- return kNoAlias;
- case IrOpcode::kFinishRegion:
- return QueryAlias(a, b->InputAt(0));
- default:
- break;
+ switch (a->opcode()) {
+ case IrOpcode::kAllocate: {
+ switch (b->opcode()) {
+ case IrOpcode::kHeapConstant:
+ case IrOpcode::kParameter:
+ return kNoAlias;
+ default:
+ break;
+ }
+ break;
}
+ case IrOpcode::kFinishRegion:
+ return QueryAlias(a->InputAt(0), b);
+ default:
+ break;
}
return kMayAlias;
}
@@ -54,7 +65,35 @@ bool MustAlias(Node* a, Node* b) { return QueryAlias(a, b) == kMustAlias; }
} // namespace
Reduction LoadElimination::Reduce(Node* node) {
+ if (FLAG_trace_turbo_load_elimination) {
+ if (node->op()->EffectInputCount() > 0) {
+ PrintF(" visit #%d:%s", node->id(), node->op()->mnemonic());
+ if (node->op()->ValueInputCount() > 0) {
+ PrintF("(");
+ for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+ if (i > 0) PrintF(", ");
+ Node* const value = NodeProperties::GetValueInput(node, i);
+ PrintF("#%d:%s", value->id(), value->op()->mnemonic());
+ }
+ PrintF(")");
+ }
+ PrintF("\n");
+ for (int i = 0; i < node->op()->EffectInputCount(); ++i) {
+ Node* const effect = NodeProperties::GetEffectInput(node, i);
+ if (AbstractState const* const state = node_states_.Get(effect)) {
+ PrintF(" state[%i]: #%d:%s\n", i, effect->id(),
+ effect->op()->mnemonic());
+ state->Print();
+ } else {
+ PrintF(" no state[%i]: #%d:%s\n", i, effect->id(),
+ effect->op()->mnemonic());
+ }
+ }
+ }
+ }
switch (node->opcode()) {
+ case IrOpcode::kArrayBufferWasNeutered:
+ return ReduceArrayBufferWasNeutered(node);
case IrOpcode::kCheckMaps:
return ReduceCheckMaps(node);
case IrOpcode::kEnsureWritableFastElements:
@@ -85,6 +124,73 @@ Reduction LoadElimination::Reduce(Node* node) {
return NoChange();
}
+namespace {
+
+bool IsCompatibleCheck(Node const* a, Node const* b) {
+ if (a->op() != b->op()) return false;
+ for (int i = a->op()->ValueInputCount(); --i >= 0;) {
+ if (!MustAlias(a->InputAt(i), b->InputAt(i))) return false;
+ }
+ return true;
+}
+
+} // namespace
+
+Node* LoadElimination::AbstractChecks::Lookup(Node* node) const {
+ for (Node* const check : nodes_) {
+ if (check && IsCompatibleCheck(check, node)) {
+ return check;
+ }
+ }
+ return nullptr;
+}
+
+bool LoadElimination::AbstractChecks::Equals(AbstractChecks const* that) const {
+ if (this == that) return true;
+ for (size_t i = 0; i < arraysize(nodes_); ++i) {
+ if (Node* this_node = this->nodes_[i]) {
+ for (size_t j = 0;; ++j) {
+ if (j == arraysize(nodes_)) return false;
+ if (that->nodes_[j] == this_node) break;
+ }
+ }
+ }
+ for (size_t i = 0; i < arraysize(nodes_); ++i) {
+ if (Node* that_node = that->nodes_[i]) {
+ for (size_t j = 0;; ++j) {
+ if (j == arraysize(nodes_)) return false;
+ if (this->nodes_[j] == that_node) break;
+ }
+ }
+ }
+ return true;
+}
+
+LoadElimination::AbstractChecks const* LoadElimination::AbstractChecks::Merge(
+ AbstractChecks const* that, Zone* zone) const {
+ if (this->Equals(that)) return this;
+ AbstractChecks* copy = new (zone) AbstractChecks(zone);
+ for (Node* const this_node : this->nodes_) {
+ if (this_node == nullptr) continue;
+ for (Node* const that_node : that->nodes_) {
+ if (this_node == that_node) {
+ copy->nodes_[copy->next_index_++] = this_node;
+ break;
+ }
+ }
+ }
+ copy->next_index_ %= arraysize(nodes_);
+ return copy;
+}
+
+void LoadElimination::AbstractChecks::Print() const {
+ for (Node* const node : nodes_) {
+ if (node != nullptr) {
+ PrintF(" #%d:%s\n", node->id(), node->op()->mnemonic());
+ }
+ }
+}
+
Node* LoadElimination::AbstractElements::Lookup(Node* object,
Node* index) const {
for (Element const element : elements_) {
@@ -110,7 +216,8 @@ LoadElimination::AbstractElements::Kill(Node* object, Node* index,
DCHECK_NOT_NULL(element.index);
DCHECK_NOT_NULL(element.value);
if (!MayAlias(object, element.object) ||
- !MayAlias(index, element.index)) {
+ !NodeProperties::GetType(index)->Maybe(
+ NodeProperties::GetType(element.index))) {
that->elements_[that->next_index_++] = element;
}
}
@@ -165,6 +272,7 @@ LoadElimination::AbstractElements::Merge(AbstractElements const* that,
this_element.index == that_element.index &&
this_element.value == that_element.value) {
copy->elements_[copy->next_index_++] = this_element;
+ break;
}
}
}
@@ -172,6 +280,17 @@ LoadElimination::AbstractElements::Merge(AbstractElements const* that,
return copy;
}
+void LoadElimination::AbstractElements::Print() const {
+ for (Element const& element : elements_) {
+ if (element.object) {
+ PrintF(" #%d:%s @ #%d:%s -> #%d:%s\n", element.object->id(),
+ element.object->op()->mnemonic(), element.index->id(),
+ element.index->op()->mnemonic(), element.value->id(),
+ element.value->op()->mnemonic());
+ }
+ }
+}
+
Node* LoadElimination::AbstractField::Lookup(Node* object) const {
for (auto pair : info_for_node_) {
if (MustAlias(object, pair.first)) return pair.second;
@@ -193,7 +312,22 @@ LoadElimination::AbstractField const* LoadElimination::AbstractField::Kill(
return this;
}
+void LoadElimination::AbstractField::Print() const {
+ for (auto pair : info_for_node_) {
+ PrintF(" #%d:%s -> #%d:%s\n", pair.first->id(),
+ pair.first->op()->mnemonic(), pair.second->id(),
+ pair.second->op()->mnemonic());
+ }
+}
+
bool LoadElimination::AbstractState::Equals(AbstractState const* that) const {
+ if (this->checks_) {
+ if (!that->checks_ || !that->checks_->Equals(this->checks_)) {
+ return false;
+ }
+ } else if (that->checks_) {
+ return false;
+ }
if (this->elements_) {
if (!that->elements_ || !that->elements_->Equals(this->elements_)) {
return false;
@@ -215,13 +349,17 @@ bool LoadElimination::AbstractState::Equals(AbstractState const* that) const {
void LoadElimination::AbstractState::Merge(AbstractState const* that,
Zone* zone) {
+ // Merge the information we have about the checks.
+ if (this->checks_) {
+ this->checks_ =
+ that->checks_ ? that->checks_->Merge(this->checks_, zone) : nullptr;
+ }
+
// Merge the information we have about the elements.
if (this->elements_) {
this->elements_ = that->elements_
? that->elements_->Merge(this->elements_, zone)
- : that->elements_;
- } else {
- this->elements_ = that->elements_;
+ : nullptr;
}
// Merge the information we have about the fields.
@@ -236,6 +374,21 @@ void LoadElimination::AbstractState::Merge(AbstractState const* that,
}
}
+Node* LoadElimination::AbstractState::LookupCheck(Node* node) const {
+ return this->checks_ ? this->checks_->Lookup(node) : nullptr;
+}
+
+LoadElimination::AbstractState const* LoadElimination::AbstractState::AddCheck(
+ Node* node, Zone* zone) const {
+ AbstractState* that = new (zone) AbstractState(*this);
+ if (that->checks_) {
+ that->checks_ = that->checks_->Extend(node, zone);
+ } else {
+ that->checks_ = new (zone) AbstractChecks(node, zone);
+ }
+ return that;
+}
+
Node* LoadElimination::AbstractState::LookupElement(Node* object,
Node* index) const {
if (this->elements_) {
@@ -303,6 +456,23 @@ Node* LoadElimination::AbstractState::LookupField(Node* object,
return nullptr;
}
+void LoadElimination::AbstractState::Print() const {
+ if (checks_) {
+ PrintF(" checks:\n");
+ checks_->Print();
+ }
+ if (elements_) {
+ PrintF(" elements:\n");
+ elements_->Print();
+ }
+ for (size_t i = 0; i < arraysize(fields_); ++i) {
+ if (AbstractField const* const field = fields_[i]) {
+ PrintF(" field %zu:\n", i);
+ field->Print();
+ }
+ }
+}
+
LoadElimination::AbstractState const*
LoadElimination::AbstractStateForEffectNodes::Get(Node* node) const {
size_t const id = node->id();
@@ -317,13 +487,26 @@ void LoadElimination::AbstractStateForEffectNodes::Set(
info_for_node_[id] = state;
}
+Reduction LoadElimination::ReduceArrayBufferWasNeutered(Node* node) {
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+ if (Node* const check = state->LookupCheck(node)) {
+ ReplaceWithValue(node, check, effect);
+ return Replace(check);
+ }
+ state = state->AddCheck(node, zone());
+ return UpdateState(node, state);
+}
+
Reduction LoadElimination::ReduceCheckMaps(Node* node) {
Node* const object = NodeProperties::GetValueInput(node, 0);
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
int const map_input_count = node->op()->ValueInputCount() - 1;
- if (Node* const object_map = state->LookupField(object, 0)) {
+ if (Node* const object_map =
+ state->LookupField(object, FieldIndexOf(HeapObject::kMapOffset))) {
for (int i = 0; i < map_input_count; ++i) {
Node* map = NodeProperties::GetValueInput(node, 1 + i);
if (map == object_map) return Replace(effect);
@@ -331,7 +514,8 @@ Reduction LoadElimination::ReduceCheckMaps(Node* node) {
}
if (map_input_count == 1) {
Node* const map0 = NodeProperties::GetValueInput(node, 1);
- state = state->AddField(object, 0, map0, zone());
+ state = state->AddField(object, FieldIndexOf(HeapObject::kMapOffset), map0,
+ zone());
}
return UpdateState(node, state);
}
@@ -343,7 +527,8 @@ Reduction LoadElimination::ReduceEnsureWritableFastElements(Node* node) {
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
Node* fixed_array_map = jsgraph()->FixedArrayMapConstant();
- if (Node* const elements_map = state->LookupField(elements, 0)) {
+ if (Node* const elements_map =
+ state->LookupField(elements, FieldIndexOf(HeapObject::kMapOffset))) {
// Check if the {elements} already have the fixed array map.
if (elements_map == fixed_array_map) {
ReplaceWithValue(node, elements, effect);
@@ -351,11 +536,14 @@ Reduction LoadElimination::ReduceEnsureWritableFastElements(Node* node) {
}
}
// We know that the resulting elements have the fixed array map.
- state = state->AddField(node, 0, fixed_array_map, zone());
+ state = state->AddField(node, FieldIndexOf(HeapObject::kMapOffset),
+ fixed_array_map, zone());
// Kill the previous elements on {object}.
- state = state->KillField(object, 2, zone());
+ state =
+ state->KillField(object, FieldIndexOf(JSObject::kElementsOffset), zone());
// Add the new elements on {object}.
- state = state->AddField(object, 2, node, zone());
+ state = state->AddField(object, FieldIndexOf(JSObject::kElementsOffset), node,
+ zone());
return UpdateState(node, state);
}
@@ -368,20 +556,25 @@ Reduction LoadElimination::ReduceMaybeGrowFastElements(Node* node) {
if (flags & GrowFastElementsFlag::kDoubleElements) {
// We know that the resulting elements have the fixed double array map.
Node* fixed_double_array_map = jsgraph()->FixedDoubleArrayMapConstant();
- state = state->AddField(node, 0, fixed_double_array_map, zone());
+ state = state->AddField(node, FieldIndexOf(HeapObject::kMapOffset),
+ fixed_double_array_map, zone());
} else {
// We know that the resulting elements have the fixed array map.
Node* fixed_array_map = jsgraph()->FixedArrayMapConstant();
- state = state->AddField(node, 0, fixed_array_map, zone());
+ state = state->AddField(node, FieldIndexOf(HeapObject::kMapOffset),
+ fixed_array_map, zone());
}
if (flags & GrowFastElementsFlag::kArrayObject) {
// Kill the previous Array::length on {object}.
- state = state->KillField(object, 3, zone());
+ state =
+ state->KillField(object, FieldIndexOf(JSArray::kLengthOffset), zone());
}
// Kill the previous elements on {object}.
- state = state->KillField(object, 2, zone());
+ state =
+ state->KillField(object, FieldIndexOf(JSObject::kElementsOffset), zone());
// Add the new elements on {object}.
- state = state->AddField(object, 2, node, zone());
+ state = state->AddField(object, FieldIndexOf(JSObject::kElementsOffset), node,
+ zone());
return UpdateState(node, state);
}
@@ -392,18 +585,22 @@ Reduction LoadElimination::ReduceTransitionElementsKind(Node* node) {
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
- if (Node* const object_map = state->LookupField(object, 0)) {
+ if (Node* const object_map =
+ state->LookupField(object, FieldIndexOf(HeapObject::kMapOffset))) {
if (target_map == object_map) {
// The {object} already has the {target_map}, so this TransitionElements
// {node} is fully redundant (independent of what {source_map} is).
return Replace(effect);
}
- state = state->KillField(object, 0, zone());
+ state =
+ state->KillField(object, FieldIndexOf(HeapObject::kMapOffset), zone());
if (source_map == object_map) {
- state = state->AddField(object, 0, target_map, zone());
+ state = state->AddField(object, FieldIndexOf(HeapObject::kMapOffset),
+ target_map, zone());
}
} else {
- state = state->KillField(object, 0, zone());
+ state =
+ state->KillField(object, FieldIndexOf(HeapObject::kMapOffset), zone());
}
ElementsTransition transition = ElementsTransitionOf(node->op());
switch (transition) {
@@ -411,7 +608,8 @@ Reduction LoadElimination::ReduceTransitionElementsKind(Node* node) {
break;
case ElementsTransition::kSlowTransition:
// Kill the elements as well.
- state = state->KillField(object, 2, zone());
+ state = state->KillField(object, FieldIndexOf(JSObject::kElementsOffset),
+ zone());
break;
}
return UpdateState(node, state);
@@ -421,16 +619,21 @@ Reduction LoadElimination::ReduceLoadField(Node* node) {
FieldAccess const& access = FieldAccessOf(node->op());
Node* const object = NodeProperties::GetValueInput(node, 0);
Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
int field_index = FieldIndexOf(access);
if (field_index >= 0) {
- if (Node* const replacement = state->LookupField(object, field_index)) {
- // Make sure the {replacement} has at least as good type
- // as the original {node}.
- if (!replacement->IsDead() &&
- NodeProperties::GetType(replacement)
- ->Is(NodeProperties::GetType(node))) {
+ if (Node* replacement = state->LookupField(object, field_index)) {
+ // Make sure we don't resurrect dead {replacement} nodes.
+ if (!replacement->IsDead()) {
+ // We might need to guard the {replacement} if the type of the
+ // {node} is more precise than the type of the {replacement}.
+ Type* const node_type = NodeProperties::GetType(node);
+ if (!NodeProperties::GetType(replacement)->Is(node_type)) {
+ replacement = graph()->NewNode(common()->TypeGuard(node_type),
+ replacement, control);
+ }
ReplaceWithValue(node, replacement, effect);
return Replace(replacement);
}
@@ -468,14 +671,19 @@ Reduction LoadElimination::ReduceLoadElement(Node* node) {
Node* const object = NodeProperties::GetValueInput(node, 0);
Node* const index = NodeProperties::GetValueInput(node, 1);
Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
- if (Node* const replacement = state->LookupElement(object, index)) {
- // Make sure the {replacement} has at least as good type
- // as the original {node}.
- if (!replacement->IsDead() &&
- NodeProperties::GetType(replacement)
- ->Is(NodeProperties::GetType(node))) {
+ if (Node* replacement = state->LookupElement(object, index)) {
+ // Make sure we don't resurrect dead {replacement} nodes.
+ if (!replacement->IsDead()) {
+ // We might need to guard the {replacement} if the type of the
+ // {node} is more precise than the type of the {replacement}.
+ Type* const node_type = NodeProperties::GetType(node);
+ if (!NodeProperties::GetType(replacement)->Is(node_type)) {
+ replacement = graph()->NewNode(common()->TypeGuard(node_type),
+ replacement, control);
+ }
ReplaceWithValue(node, replacement, effect);
return Replace(replacement);
}
@@ -620,23 +828,28 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
switch (current->opcode()) {
case IrOpcode::kEnsureWritableFastElements: {
Node* const object = NodeProperties::GetValueInput(current, 0);
- state = state->KillField(object, 2, zone());
+ state = state->KillField(
+ object, FieldIndexOf(JSObject::kElementsOffset), zone());
break;
}
case IrOpcode::kMaybeGrowFastElements: {
GrowFastElementsFlags flags =
GrowFastElementsFlagsOf(current->op());
Node* const object = NodeProperties::GetValueInput(current, 0);
- state = state->KillField(object, 2, zone());
+ state = state->KillField(
+ object, FieldIndexOf(JSObject::kElementsOffset), zone());
if (flags & GrowFastElementsFlag::kArrayObject) {
- state = state->KillField(object, 3, zone());
+ state = state->KillField(
+ object, FieldIndexOf(JSArray::kLengthOffset), zone());
}
break;
}
case IrOpcode::kTransitionElementsKind: {
Node* const object = NodeProperties::GetValueInput(current, 0);
- state = state->KillField(object, 0, zone());
- state = state->KillField(object, 2, zone());
+ state = state->KillField(
+ object, FieldIndexOf(HeapObject::kMapOffset), zone());
+ state = state->KillField(
+ object, FieldIndexOf(JSObject::kElementsOffset), zone());
break;
}
case IrOpcode::kStoreField: {
@@ -671,6 +884,14 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
}
// static
+int LoadElimination::FieldIndexOf(int offset) {
+ DCHECK_EQ(0, offset % kPointerSize);
+ int field_index = offset / kPointerSize;
+ if (field_index >= static_cast<int>(kMaxTrackedFields)) return -1;
+ return field_index;
+}
+
+// static
int LoadElimination::FieldIndexOf(FieldAccess const& access) {
MachineRepresentation rep = access.machine_type.representation();
switch (rep) {
@@ -699,12 +920,15 @@ int LoadElimination::FieldIndexOf(FieldAccess const& access) {
break;
}
DCHECK_EQ(kTaggedBase, access.base_is_tagged);
- DCHECK_EQ(0, access.offset % kPointerSize);
- int field_index = access.offset / kPointerSize;
- if (field_index >= static_cast<int>(kMaxTrackedFields)) return -1;
- return field_index;
+ return FieldIndexOf(access.offset);
}
+CommonOperatorBuilder* LoadElimination::common() const {
+ return jsgraph()->common();
+}
+
+Graph* LoadElimination::graph() const { return jsgraph()->graph(); }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index 2a4ee40500..985e690bc4 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -12,7 +12,9 @@ namespace internal {
namespace compiler {
// Foward declarations.
+class CommonOperatorBuilder;
struct FieldAccess;
+class Graph;
class JSGraph;
class LoadElimination final : public AdvancedReducer {
@@ -24,6 +26,39 @@ class LoadElimination final : public AdvancedReducer {
Reduction Reduce(Node* node) final;
private:
+ static const size_t kMaxTrackedChecks = 8;
+
+ // Abstract state to approximate the current state of checks that are
+ // only invalidated by calls, i.e. array buffer neutering checks, along
+ // the effect paths through the graph.
+ class AbstractChecks final : public ZoneObject {
+ public:
+ explicit AbstractChecks(Zone* zone) {
+ for (size_t i = 0; i < arraysize(nodes_); ++i) {
+ nodes_[i] = nullptr;
+ }
+ }
+ AbstractChecks(Node* node, Zone* zone) : AbstractChecks(zone) {
+ nodes_[next_index_++] = node;
+ }
+
+ AbstractChecks const* Extend(Node* node, Zone* zone) const {
+ AbstractChecks* that = new (zone) AbstractChecks(*this);
+ that->nodes_[that->next_index_] = node;
+ that->next_index_ = (that->next_index_ + 1) % arraysize(nodes_);
+ return that;
+ }
+ Node* Lookup(Node* node) const;
+ bool Equals(AbstractChecks const* that) const;
+ AbstractChecks const* Merge(AbstractChecks const* that, Zone* zone) const;
+
+ void Print() const;
+
+ private:
+ Node* nodes_[kMaxTrackedChecks];
+ size_t next_index_ = 0;
+ };
+
static const size_t kMaxTrackedElements = 8;
// Abstract state to approximate the current state of an element along the
@@ -53,6 +88,8 @@ class LoadElimination final : public AdvancedReducer {
AbstractElements const* Merge(AbstractElements const* that,
Zone* zone) const;
+ void Print() const;
+
private:
struct Element {
Element() {}
@@ -104,6 +141,8 @@ class LoadElimination final : public AdvancedReducer {
return copy;
}
+ void Print() const;
+
private:
ZoneMap<Node*, Node*> info_for_node_;
};
@@ -133,7 +172,13 @@ class LoadElimination final : public AdvancedReducer {
Zone* zone) const;
Node* LookupElement(Node* object, Node* index) const;
+ AbstractState const* AddCheck(Node* node, Zone* zone) const;
+ Node* LookupCheck(Node* node) const;
+
+ void Print() const;
+
private:
+ AbstractChecks const* checks_ = nullptr;
AbstractElements const* elements_ = nullptr;
AbstractField const* fields_[kMaxTrackedFields];
};
@@ -150,6 +195,7 @@ class LoadElimination final : public AdvancedReducer {
ZoneVector<AbstractState const*> info_for_node_;
};
+ Reduction ReduceArrayBufferWasNeutered(Node* node);
Reduction ReduceCheckMaps(Node* node);
Reduction ReduceEnsureWritableFastElements(Node* node);
Reduction ReduceMaybeGrowFastElements(Node* node);
@@ -168,9 +214,12 @@ class LoadElimination final : public AdvancedReducer {
AbstractState const* ComputeLoopState(Node* node,
AbstractState const* state) const;
+ static int FieldIndexOf(int offset);
static int FieldIndexOf(FieldAccess const& access);
+ CommonOperatorBuilder* common() const;
AbstractState const* empty_state() const { return &empty_state_; }
+ Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
Zone* zone() const { return node_states_.zone(); }
diff --git a/deps/v8/src/compiler/loop-analysis.cc b/deps/v8/src/compiler/loop-analysis.cc
index 2a81aee49b..f3a793347f 100644
--- a/deps/v8/src/compiler/loop-analysis.cc
+++ b/deps/v8/src/compiler/loop-analysis.cc
@@ -5,10 +5,10 @@
#include "src/compiler/loop-analysis.h"
#include "src/compiler/graph.h"
-#include "src/compiler/node.h"
#include "src/compiler/node-marker.h"
#include "src/compiler/node-properties.h"
-#include "src/zone.h"
+#include "src/compiler/node.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/loop-analysis.h b/deps/v8/src/compiler/loop-analysis.h
index a8c3bca7d7..2d0f27b89f 100644
--- a/deps/v8/src/compiler/loop-analysis.h
+++ b/deps/v8/src/compiler/loop-analysis.h
@@ -8,7 +8,7 @@
#include "src/base/iterator.h"
#include "src/compiler/graph.h"
#include "src/compiler/node.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/loop-peeling.cc b/deps/v8/src/compiler/loop-peeling.cc
index 9535df54ad..5f8857c5df 100644
--- a/deps/v8/src/compiler/loop-peeling.cc
+++ b/deps/v8/src/compiler/loop-peeling.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/loop-peeling.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
-#include "src/compiler/loop-peeling.h"
-#include "src/compiler/node.h"
#include "src/compiler/node-marker.h"
#include "src/compiler/node-properties.h"
-#include "src/zone.h"
+#include "src/compiler/node.h"
+#include "src/zone/zone.h"
// Loop peeling is an optimization that copies the body of a loop, creating
// a new copy of the body called the "peeled iteration" that represents the
diff --git a/deps/v8/src/compiler/loop-variable-optimizer.cc b/deps/v8/src/compiler/loop-variable-optimizer.cc
index 8331963a7d..55cce265d8 100644
--- a/deps/v8/src/compiler/loop-variable-optimizer.cc
+++ b/deps/v8/src/compiler/loop-variable-optimizer.cc
@@ -9,8 +9,8 @@
#include "src/compiler/node-marker.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
-#include "src/zone-containers.h"
-#include "src/zone.h"
+#include "src/zone/zone-containers.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
@@ -28,7 +28,7 @@ LoopVariableOptimizer::LoopVariableOptimizer(Graph* graph,
: graph_(graph),
common_(common),
zone_(zone),
- limits_(zone),
+ limits_(graph->NodeCount(), zone),
induction_vars_(zone) {}
void LoopVariableOptimizer::Run() {
@@ -40,14 +40,13 @@ void LoopVariableOptimizer::Run() {
queue.pop();
queued.Set(node, false);
- DCHECK(limits_.find(node->id()) == limits_.end());
+ DCHECK_NULL(limits_[node->id()]);
bool all_inputs_visited = true;
int inputs_end = (node->opcode() == IrOpcode::kLoop)
? kFirstBackedge
: node->op()->ControlInputCount();
for (int i = 0; i < inputs_end; i++) {
- auto input = limits_.find(NodeProperties::GetControlInput(node, i)->id());
- if (input == limits_.end()) {
+ if (limits_[NodeProperties::GetControlInput(node, i)->id()] == nullptr) {
all_inputs_visited = false;
break;
}
@@ -55,7 +54,7 @@ void LoopVariableOptimizer::Run() {
if (!all_inputs_visited) continue;
VisitNode(node);
- DCHECK(limits_.find(node->id()) != limits_.end());
+ DCHECK_NOT_NULL(limits_[node->id()]);
// Queue control outputs.
for (Edge edge : node->use_edges()) {
diff --git a/deps/v8/src/compiler/loop-variable-optimizer.h b/deps/v8/src/compiler/loop-variable-optimizer.h
index a5c1ad448d..8054ec16c8 100644
--- a/deps/v8/src/compiler/loop-variable-optimizer.h
+++ b/deps/v8/src/compiler/loop-variable-optimizer.h
@@ -5,7 +5,7 @@
#ifndef V8_COMPILER_LOOP_VARIABLE_OPTIMIZER_H_
#define V8_COMPILER_LOOP_VARIABLE_OPTIMIZER_H_
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -106,7 +106,7 @@ class LoopVariableOptimizer {
Graph* graph_;
CommonOperatorBuilder* common_;
Zone* zone_;
- ZoneMap<int, const VariableLimits*> limits_;
+ ZoneVector<const VariableLimits*> limits_;
ZoneMap<int, InductionVariable*> induction_vars_;
};
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
new file mode 100644
index 0000000000..d33ee4ec28
--- /dev/null
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -0,0 +1,667 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/machine-graph-verifier.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
+#include "src/compiler/schedule.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+class MachineRepresentationInferrer {
+ public:
+ MachineRepresentationInferrer(Schedule const* schedule, Graph const* graph,
+ Linkage* linkage, Zone* zone)
+ : schedule_(schedule),
+ linkage_(linkage),
+ representation_vector_(graph->NodeCount(), zone) {
+ Run();
+ }
+
+ MachineRepresentation GetRepresentation(Node const* node) const {
+ return representation_vector_.at(node->id());
+ }
+
+ private:
+ MachineRepresentation GetProjectionType(Node const* projection) {
+ size_t index = ProjectionIndexOf(projection->op());
+ Node* input = projection->InputAt(0);
+ switch (input->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow:
+ case IrOpcode::kInt32SubWithOverflow:
+ case IrOpcode::kInt32MulWithOverflow:
+ CHECK_LE(index, static_cast<size_t>(1));
+ return index == 0 ? MachineRepresentation::kWord32
+ : MachineRepresentation::kBit;
+ case IrOpcode::kInt64AddWithOverflow:
+ case IrOpcode::kInt64SubWithOverflow:
+ CHECK_LE(index, static_cast<size_t>(1));
+ return index == 0 ? MachineRepresentation::kWord64
+ : MachineRepresentation::kBit;
+ case IrOpcode::kTryTruncateFloat32ToInt64:
+ case IrOpcode::kTryTruncateFloat64ToInt64:
+ case IrOpcode::kTryTruncateFloat32ToUint64:
+ case IrOpcode::kTryTruncateFloat64ToUint64:
+ CHECK_LE(index, static_cast<size_t>(1));
+ return index == 0 ? MachineRepresentation::kWord64
+ : MachineRepresentation::kBit;
+ case IrOpcode::kCall: {
+ CallDescriptor const* desc = CallDescriptorOf(input->op());
+ return desc->GetReturnType(index).representation();
+ }
+ default:
+ return MachineRepresentation::kNone;
+ }
+ }
+
+ void Run() {
+ auto blocks = schedule_->all_blocks();
+ for (BasicBlock* block : *blocks) {
+ for (size_t i = 0; i <= block->NodeCount(); ++i) {
+ Node const* node =
+ i < block->NodeCount() ? block->NodeAt(i) : block->control_input();
+ if (node == nullptr) {
+ DCHECK_EQ(block->NodeCount(), i);
+ break;
+ }
+ switch (node->opcode()) {
+ case IrOpcode::kParameter:
+ representation_vector_[node->id()] =
+ linkage_->GetParameterType(ParameterIndexOf(node->op()))
+ .representation();
+ break;
+ case IrOpcode::kProjection: {
+ representation_vector_[node->id()] = GetProjectionType(node);
+ } break;
+ case IrOpcode::kTypedStateValues:
+ representation_vector_[node->id()] = MachineRepresentation::kNone;
+ break;
+ case IrOpcode::kAtomicLoad:
+ case IrOpcode::kLoad:
+ case IrOpcode::kProtectedLoad:
+ representation_vector_[node->id()] =
+ LoadRepresentationOf(node->op()).representation();
+ break;
+ case IrOpcode::kCheckedLoad:
+ representation_vector_[node->id()] =
+ CheckedLoadRepresentationOf(node->op()).representation();
+ break;
+ case IrOpcode::kLoadStackPointer:
+ case IrOpcode::kLoadFramePointer:
+ case IrOpcode::kLoadParentFramePointer:
+ representation_vector_[node->id()] =
+ MachineType::PointerRepresentation();
+ break;
+ case IrOpcode::kPhi:
+ representation_vector_[node->id()] =
+ PhiRepresentationOf(node->op());
+ break;
+ case IrOpcode::kCall: {
+ CallDescriptor const* desc = CallDescriptorOf(node->op());
+ if (desc->ReturnCount() > 0) {
+ representation_vector_[node->id()] =
+ desc->GetReturnType(0).representation();
+ } else {
+ representation_vector_[node->id()] =
+ MachineRepresentation::kTagged;
+ }
+ break;
+ }
+ case IrOpcode::kUnalignedLoad:
+ representation_vector_[node->id()] =
+ UnalignedLoadRepresentationOf(node->op()).representation();
+ break;
+ case IrOpcode::kHeapConstant:
+ case IrOpcode::kNumberConstant:
+ case IrOpcode::kChangeBitToTagged:
+ case IrOpcode::kIfException:
+ case IrOpcode::kOsrValue:
+ case IrOpcode::kChangeInt32ToTagged:
+ case IrOpcode::kChangeUint32ToTagged:
+ case IrOpcode::kBitcastWordToTagged:
+ representation_vector_[node->id()] = MachineRepresentation::kTagged;
+ break;
+ case IrOpcode::kExternalConstant:
+ representation_vector_[node->id()] =
+ MachineType::PointerRepresentation();
+ break;
+ case IrOpcode::kBitcastTaggedToWord:
+ representation_vector_[node->id()] =
+ MachineType::PointerRepresentation();
+ break;
+ case IrOpcode::kBitcastWordToTaggedSigned:
+ representation_vector_[node->id()] =
+ MachineRepresentation::kTaggedSigned;
+ break;
+ case IrOpcode::kWord32Equal:
+ case IrOpcode::kInt32LessThan:
+ case IrOpcode::kInt32LessThanOrEqual:
+ case IrOpcode::kUint32LessThan:
+ case IrOpcode::kUint32LessThanOrEqual:
+ case IrOpcode::kWord64Equal:
+ case IrOpcode::kInt64LessThan:
+ case IrOpcode::kInt64LessThanOrEqual:
+ case IrOpcode::kUint64LessThan:
+ case IrOpcode::kUint64LessThanOrEqual:
+ case IrOpcode::kFloat32Equal:
+ case IrOpcode::kFloat32LessThan:
+ case IrOpcode::kFloat32LessThanOrEqual:
+ case IrOpcode::kFloat64Equal:
+ case IrOpcode::kFloat64LessThan:
+ case IrOpcode::kFloat64LessThanOrEqual:
+ case IrOpcode::kChangeTaggedToBit:
+ representation_vector_[node->id()] = MachineRepresentation::kBit;
+ break;
+#define LABEL(opcode) case IrOpcode::k##opcode:
+ case IrOpcode::kTruncateInt64ToInt32:
+ case IrOpcode::kTruncateFloat32ToInt32:
+ case IrOpcode::kTruncateFloat32ToUint32:
+ case IrOpcode::kBitcastFloat32ToInt32:
+ case IrOpcode::kInt32x4ExtractLane:
+ case IrOpcode::kInt32Constant:
+ case IrOpcode::kRelocatableInt32Constant:
+ case IrOpcode::kTruncateFloat64ToWord32:
+ case IrOpcode::kTruncateFloat64ToUint32:
+ case IrOpcode::kChangeFloat64ToInt32:
+ case IrOpcode::kChangeFloat64ToUint32:
+ case IrOpcode::kRoundFloat64ToInt32:
+ case IrOpcode::kFloat64ExtractLowWord32:
+ case IrOpcode::kFloat64ExtractHighWord32:
+ MACHINE_UNOP_32_LIST(LABEL)
+ MACHINE_BINOP_32_LIST(LABEL) {
+ representation_vector_[node->id()] =
+ MachineRepresentation::kWord32;
+ }
+ break;
+ case IrOpcode::kChangeInt32ToInt64:
+ case IrOpcode::kChangeUint32ToUint64:
+ case IrOpcode::kInt64Constant:
+ case IrOpcode::kRelocatableInt64Constant:
+ case IrOpcode::kBitcastFloat64ToInt64:
+ MACHINE_BINOP_64_LIST(LABEL) {
+ representation_vector_[node->id()] =
+ MachineRepresentation::kWord64;
+ }
+ break;
+ case IrOpcode::kRoundInt32ToFloat32:
+ case IrOpcode::kRoundUint32ToFloat32:
+ case IrOpcode::kRoundInt64ToFloat32:
+ case IrOpcode::kRoundUint64ToFloat32:
+ case IrOpcode::kFloat32Constant:
+ case IrOpcode::kTruncateFloat64ToFloat32:
+ MACHINE_FLOAT32_BINOP_LIST(LABEL)
+ MACHINE_FLOAT32_UNOP_LIST(LABEL) {
+ representation_vector_[node->id()] =
+ MachineRepresentation::kFloat32;
+ }
+ break;
+ case IrOpcode::kRoundInt64ToFloat64:
+ case IrOpcode::kRoundUint64ToFloat64:
+ case IrOpcode::kChangeFloat32ToFloat64:
+ case IrOpcode::kChangeInt32ToFloat64:
+ case IrOpcode::kChangeUint32ToFloat64:
+ case IrOpcode::kFloat64Constant:
+ case IrOpcode::kFloat64SilenceNaN:
+ MACHINE_FLOAT64_BINOP_LIST(LABEL)
+ MACHINE_FLOAT64_UNOP_LIST(LABEL) {
+ representation_vector_[node->id()] =
+ MachineRepresentation::kFloat64;
+ }
+ break;
+#undef LABEL
+ default:
+ break;
+ }
+ }
+ }
+ }
+
+ Schedule const* const schedule_;
+ Linkage const* const linkage_;
+ ZoneVector<MachineRepresentation> representation_vector_;
+};
+
+class MachineRepresentationChecker {
+ public:
+ MachineRepresentationChecker(Schedule const* const schedule,
+ MachineRepresentationInferrer const* const typer)
+ : schedule_(schedule), typer_(typer) {}
+
+ void Run() {
+ BasicBlockVector const* blocks = schedule_->all_blocks();
+ for (BasicBlock* block : *blocks) {
+ for (size_t i = 0; i <= block->NodeCount(); ++i) {
+ Node const* node =
+ i < block->NodeCount() ? block->NodeAt(i) : block->control_input();
+ if (node == nullptr) {
+ DCHECK_EQ(block->NodeCount(), i);
+ break;
+ }
+ switch (node->opcode()) {
+ case IrOpcode::kCall:
+ case IrOpcode::kTailCall:
+ CheckCallInputs(node);
+ break;
+ case IrOpcode::kChangeBitToTagged:
+ CHECK_EQ(MachineRepresentation::kBit,
+ typer_->GetRepresentation(node->InputAt(0)));
+ break;
+ case IrOpcode::kChangeTaggedToBit:
+ CHECK_EQ(MachineRepresentation::kTagged,
+ typer_->GetRepresentation(node->InputAt(0)));
+ break;
+ case IrOpcode::kRoundInt64ToFloat64:
+ case IrOpcode::kRoundUint64ToFloat64:
+ case IrOpcode::kRoundInt64ToFloat32:
+ case IrOpcode::kRoundUint64ToFloat32:
+ case IrOpcode::kTruncateInt64ToInt32:
+ CheckValueInputForInt64Op(node, 0);
+ break;
+ case IrOpcode::kBitcastWordToTagged:
+ case IrOpcode::kBitcastWordToTaggedSigned:
+ CheckValueInputRepresentationIs(
+ node, 0, MachineType::PointerRepresentation());
+ break;
+ case IrOpcode::kBitcastTaggedToWord:
+ CheckValueInputIsTagged(node, 0);
+ break;
+ case IrOpcode::kTruncateFloat64ToWord32:
+ case IrOpcode::kTruncateFloat64ToUint32:
+ case IrOpcode::kTruncateFloat64ToFloat32:
+ case IrOpcode::kChangeFloat64ToInt32:
+ case IrOpcode::kChangeFloat64ToUint32:
+ case IrOpcode::kRoundFloat64ToInt32:
+ case IrOpcode::kFloat64ExtractLowWord32:
+ case IrOpcode::kFloat64ExtractHighWord32:
+ case IrOpcode::kBitcastFloat64ToInt64:
+ CheckValueInputForFloat64Op(node, 0);
+ break;
+ case IrOpcode::kWord64Equal:
+ CheckValueInputIsTaggedOrPointer(node, 0);
+ CheckValueInputRepresentationIs(
+ node, 1, typer_->GetRepresentation(node->InputAt(0)));
+ break;
+ case IrOpcode::kInt64LessThan:
+ case IrOpcode::kInt64LessThanOrEqual:
+ case IrOpcode::kUint64LessThan:
+ case IrOpcode::kUint64LessThanOrEqual:
+ CheckValueInputForInt64Op(node, 0);
+ CheckValueInputForInt64Op(node, 1);
+ break;
+ case IrOpcode::kInt32x4ExtractLane:
+ CheckValueInputRepresentationIs(node, 0,
+ MachineRepresentation::kSimd128);
+ break;
+#define LABEL(opcode) case IrOpcode::k##opcode:
+ case IrOpcode::kChangeInt32ToTagged:
+ case IrOpcode::kChangeUint32ToTagged:
+ case IrOpcode::kChangeInt32ToFloat64:
+ case IrOpcode::kChangeUint32ToFloat64:
+ case IrOpcode::kRoundInt32ToFloat32:
+ case IrOpcode::kRoundUint32ToFloat32:
+ case IrOpcode::kChangeInt32ToInt64:
+ case IrOpcode::kChangeUint32ToUint64:
+ MACHINE_UNOP_32_LIST(LABEL) { CheckValueInputForInt32Op(node, 0); }
+ break;
+ case IrOpcode::kWord32Equal:
+ case IrOpcode::kInt32LessThan:
+ case IrOpcode::kInt32LessThanOrEqual:
+ case IrOpcode::kUint32LessThan:
+ case IrOpcode::kUint32LessThanOrEqual:
+ MACHINE_BINOP_32_LIST(LABEL) {
+ CheckValueInputForInt32Op(node, 0);
+ CheckValueInputForInt32Op(node, 1);
+ }
+ break;
+ MACHINE_BINOP_64_LIST(LABEL) {
+ CheckValueInputForInt64Op(node, 0);
+ CheckValueInputForInt64Op(node, 1);
+ }
+ break;
+ case IrOpcode::kFloat32Equal:
+ case IrOpcode::kFloat32LessThan:
+ case IrOpcode::kFloat32LessThanOrEqual:
+ MACHINE_FLOAT32_BINOP_LIST(LABEL) {
+ CheckValueInputForFloat32Op(node, 0);
+ CheckValueInputForFloat32Op(node, 1);
+ }
+ break;
+ case IrOpcode::kChangeFloat32ToFloat64:
+ case IrOpcode::kTruncateFloat32ToInt32:
+ case IrOpcode::kTruncateFloat32ToUint32:
+ case IrOpcode::kBitcastFloat32ToInt32:
+ MACHINE_FLOAT32_UNOP_LIST(LABEL) {
+ CheckValueInputForFloat32Op(node, 0);
+ }
+ break;
+ case IrOpcode::kFloat64Equal:
+ case IrOpcode::kFloat64LessThan:
+ case IrOpcode::kFloat64LessThanOrEqual:
+ MACHINE_FLOAT64_BINOP_LIST(LABEL) {
+ CheckValueInputForFloat64Op(node, 0);
+ CheckValueInputForFloat64Op(node, 1);
+ }
+ break;
+ case IrOpcode::kFloat64SilenceNaN:
+ MACHINE_FLOAT64_UNOP_LIST(LABEL) {
+ CheckValueInputForFloat64Op(node, 0);
+ }
+ break;
+#undef LABEL
+ case IrOpcode::kParameter:
+ case IrOpcode::kProjection:
+ break;
+ case IrOpcode::kLoad:
+ case IrOpcode::kAtomicLoad:
+ CheckValueInputIsTaggedOrPointer(node, 0);
+ CheckValueInputRepresentationIs(
+ node, 1, MachineType::PointerRepresentation());
+ break;
+ case IrOpcode::kStore:
+ CheckValueInputIsTaggedOrPointer(node, 0);
+ CheckValueInputRepresentationIs(
+ node, 1, MachineType::PointerRepresentation());
+ switch (StoreRepresentationOf(node->op()).representation()) {
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kTaggedPointer:
+ case MachineRepresentation::kTaggedSigned:
+ CheckValueInputIsTagged(node, 2);
+ break;
+ default:
+ CheckValueInputRepresentationIs(
+ node, 2,
+ StoreRepresentationOf(node->op()).representation());
+ }
+ break;
+ case IrOpcode::kAtomicStore:
+ CheckValueInputIsTaggedOrPointer(node, 0);
+ CheckValueInputRepresentationIs(
+ node, 1, MachineType::PointerRepresentation());
+ switch (AtomicStoreRepresentationOf(node->op())) {
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kTaggedPointer:
+ case MachineRepresentation::kTaggedSigned:
+ CheckValueInputIsTagged(node, 2);
+ break;
+ default:
+ CheckValueInputRepresentationIs(
+ node, 2, AtomicStoreRepresentationOf(node->op()));
+ }
+ break;
+ case IrOpcode::kPhi:
+ switch (typer_->GetRepresentation(node)) {
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kTaggedPointer:
+ case MachineRepresentation::kTaggedSigned:
+ for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+ CheckValueInputIsTagged(node, i);
+ }
+ break;
+ default:
+ for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+ CheckValueInputRepresentationIs(
+ node, i, typer_->GetRepresentation(node));
+ }
+ break;
+ }
+ break;
+ case IrOpcode::kBranch:
+ case IrOpcode::kSwitch:
+ CheckValueInputForInt32Op(node, 0);
+ break;
+ case IrOpcode::kReturn:
+ // TODO(epertoso): use the linkage to determine which tipe we
+ // should have here.
+ break;
+ case IrOpcode::kTypedStateValues:
+ case IrOpcode::kFrameState:
+ break;
+ default:
+ if (node->op()->ValueInputCount() != 0) {
+ std::stringstream str;
+ str << "Node #" << node->id() << ":" << *node->op()
+ << " in the machine graph is not being checked.";
+ FATAL(str.str().c_str());
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ private:
+ void CheckValueInputRepresentationIs(Node const* node, int index,
+ MachineRepresentation representation) {
+ Node const* input = node->InputAt(index);
+ if (typer_->GetRepresentation(input) != representation) {
+ std::stringstream str;
+ str << "TypeError: node #" << node->id() << ":" << *node->op()
+ << " uses node #" << input->id() << ":" << *input->op()
+ << " which doesn't have a " << MachineReprToString(representation)
+ << " representation.";
+ FATAL(str.str().c_str());
+ }
+ }
+
+ void CheckValueInputIsTagged(Node const* node, int index) {
+ Node const* input = node->InputAt(index);
+ switch (typer_->GetRepresentation(input)) {
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kTaggedPointer:
+ case MachineRepresentation::kTaggedSigned:
+ return;
+ default:
+ break;
+ }
+ std::ostringstream str;
+ str << "TypeError: node #" << node->id() << ":" << *node->op()
+ << " uses node #" << input->id() << ":" << *input->op()
+ << " which doesn't have a tagged representation.";
+ FATAL(str.str().c_str());
+ }
+
+ void CheckValueInputIsTaggedOrPointer(Node const* node, int index) {
+ Node const* input = node->InputAt(index);
+ switch (typer_->GetRepresentation(input)) {
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kTaggedPointer:
+ case MachineRepresentation::kTaggedSigned:
+ return;
+ default:
+ break;
+ }
+ if (typer_->GetRepresentation(input) !=
+ MachineType::PointerRepresentation()) {
+ std::ostringstream str;
+ str << "TypeError: node #" << node->id() << ":" << *node->op()
+ << " uses node #" << input->id() << ":" << *input->op()
+ << " which doesn't have a tagged or pointer representation.";
+ FATAL(str.str().c_str());
+ }
+ }
+
+ void CheckValueInputForInt32Op(Node const* node, int index) {
+ Node const* input = node->InputAt(index);
+ switch (typer_->GetRepresentation(input)) {
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ return;
+ case MachineRepresentation::kNone: {
+ std::ostringstream str;
+ str << "TypeError: node #" << input->id() << ":" << *input->op()
+ << " is untyped.";
+ FATAL(str.str().c_str());
+ break;
+ }
+ default:
+ break;
+ }
+ std::ostringstream str;
+ str << "TypeError: node #" << node->id() << ":" << *node->op()
+ << " uses node #" << input->id() << ":" << *input->op()
+ << " which doesn't have an int32-compatible representation.";
+ FATAL(str.str().c_str());
+ }
+
+ void CheckValueInputForInt64Op(Node const* node, int index) {
+ Node const* input = node->InputAt(index);
+ switch (typer_->GetRepresentation(input)) {
+ case MachineRepresentation::kWord64:
+ return;
+ case MachineRepresentation::kNone: {
+ std::ostringstream str;
+ str << "TypeError: node #" << input->id() << ":" << *input->op()
+ << " is untyped.";
+ FATAL(str.str().c_str());
+ break;
+ }
+
+ default:
+ break;
+ }
+ std::ostringstream str;
+ str << "TypeError: node #" << node->id() << ":" << *node->op()
+ << " uses node #" << input->id() << ":" << *input->op()
+ << " which doesn't have a kWord64 representation.";
+ FATAL(str.str().c_str());
+ }
+
+ void CheckValueInputForFloat32Op(Node const* node, int index) {
+ Node const* input = node->InputAt(index);
+ if (MachineRepresentation::kFloat32 == typer_->GetRepresentation(input)) {
+ return;
+ }
+ std::ostringstream str;
+ str << "TypeError: node #" << node->id() << ":" << *node->op()
+ << " uses node #" << input->id() << ":" << *input->op()
+ << " which doesn't have a kFloat32 representation.";
+ FATAL(str.str().c_str());
+ }
+
+ void CheckValueInputForFloat64Op(Node const* node, int index) {
+ Node const* input = node->InputAt(index);
+ if (MachineRepresentation::kFloat64 == typer_->GetRepresentation(input)) {
+ return;
+ }
+ std::ostringstream str;
+ str << "TypeError: node #" << node->id() << ":" << *node->op()
+ << " uses node #" << input->id() << ":" << *input->op()
+ << " which doesn't have a kFloat64 representation.";
+ FATAL(str.str().c_str());
+ }
+
+ void CheckCallInputs(Node const* node) {
+ CallDescriptor const* desc = CallDescriptorOf(node->op());
+ std::ostringstream str;
+ bool should_log_error = false;
+ for (size_t i = 0; i < desc->InputCount(); ++i) {
+ Node const* input = node->InputAt(static_cast<int>(i));
+ MachineRepresentation const input_type = typer_->GetRepresentation(input);
+ MachineRepresentation const expected_input_type =
+ desc->GetInputType(i).representation();
+ if (!IsCompatible(expected_input_type, input_type)) {
+ if (!should_log_error) {
+ should_log_error = true;
+ str << "TypeError: node #" << node->id() << ":" << *node->op()
+ << " has wrong type for:" << std::endl;
+ } else {
+ str << std::endl;
+ }
+ str << " * input " << i << " (" << input->id() << ":" << *input->op()
+ << ") doesn't have a " << MachineReprToString(expected_input_type)
+ << " representation.";
+ }
+ }
+ if (should_log_error) {
+ FATAL(str.str().c_str());
+ }
+ }
+
+ bool Intersect(MachineRepresentation lhs, MachineRepresentation rhs) {
+ return (GetRepresentationProperties(lhs) &
+ GetRepresentationProperties(rhs)) != 0;
+ }
+
+ enum RepresentationProperties { kIsPointer = 1, kIsTagged = 2 };
+
+ int GetRepresentationProperties(MachineRepresentation representation) {
+ switch (representation) {
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kTaggedPointer:
+ return kIsPointer | kIsTagged;
+ case MachineRepresentation::kTaggedSigned:
+ return kIsTagged;
+ case MachineRepresentation::kWord32:
+ return MachineRepresentation::kWord32 ==
+ MachineType::PointerRepresentation()
+ ? kIsPointer
+ : 0;
+ case MachineRepresentation::kWord64:
+ return MachineRepresentation::kWord64 ==
+ MachineType::PointerRepresentation()
+ ? kIsPointer
+ : 0;
+ default:
+ return 0;
+ }
+ }
+
+ bool IsCompatible(MachineRepresentation expected,
+ MachineRepresentation actual) {
+ switch (expected) {
+ case MachineRepresentation::kTagged:
+ return (actual == MachineRepresentation::kTagged ||
+ actual == MachineRepresentation::kTaggedSigned ||
+ actual == MachineRepresentation::kTaggedPointer);
+ case MachineRepresentation::kTaggedSigned:
+ case MachineRepresentation::kTaggedPointer:
+ case MachineRepresentation::kFloat32:
+ case MachineRepresentation::kFloat64:
+ case MachineRepresentation::kSimd128:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord64:
+ return expected == actual;
+ break;
+ case MachineRepresentation::kWord32:
+ return (actual == MachineRepresentation::kBit ||
+ actual == MachineRepresentation::kWord8 ||
+ actual == MachineRepresentation::kWord16 ||
+ actual == MachineRepresentation::kWord32);
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ }
+ return false;
+ }
+
+ Schedule const* const schedule_;
+ MachineRepresentationInferrer const* const typer_;
+};
+
+} // namespace
+
+void MachineGraphVerifier::Run(Graph* graph, Schedule const* const schedule,
+ Linkage* linkage, Zone* temp_zone) {
+ MachineRepresentationInferrer representation_inferrer(schedule, graph,
+ linkage, temp_zone);
+ MachineRepresentationChecker checker(schedule, &representation_inferrer);
+ checker.Run();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/machine-graph-verifier.h b/deps/v8/src/compiler/machine-graph-verifier.h
new file mode 100644
index 0000000000..b7d7b6166c
--- /dev/null
+++ b/deps/v8/src/compiler/machine-graph-verifier.h
@@ -0,0 +1,31 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MACHINE_GRAPH_VERIFIER_H_
+#define V8_COMPILER_MACHINE_GRAPH_VERIFIER_H_
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+class Zone;
+namespace compiler {
+
+class Graph;
+class Linkage;
+class Schedule;
+
+// Verifies properties of a scheduled graph, such as that the nodes' inputs are
+// of the correct type.
+class MachineGraphVerifier {
+ public:
+ static void Run(Graph* graph, Schedule const* const schedule,
+ Linkage* linkage, Zone* temp_zone);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_MACHINE_GRAPH_VERIFIER_H_
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 99044aa86d..0ad20f0684 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -150,21 +150,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReduceWord32And(node);
case IrOpcode::kWord32Or:
return ReduceWord32Or(node);
- case IrOpcode::kWord32Xor: {
- Int32BinopMatcher m(node);
- if (m.right().Is(0)) return Replace(m.left().node()); // x ^ 0 => x
- if (m.IsFoldable()) { // K ^ K => K
- return ReplaceInt32(m.left().Value() ^ m.right().Value());
- }
- if (m.LeftEqualsRight()) return ReplaceInt32(0); // x ^ x => 0
- if (m.left().IsWord32Xor() && m.right().Is(-1)) {
- Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().Is(-1)) { // (x ^ -1) ^ -1 => x
- return Replace(mleft.left().node());
- }
- }
- break;
- }
+ case IrOpcode::kWord32Xor:
+ return ReduceWord32Xor(node);
case IrOpcode::kWord32Shl:
return ReduceWord32Shl(node);
case IrOpcode::kWord64Shl:
@@ -418,6 +405,11 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.IsFoldable()) { // K * K => K
return ReplaceFloat64(m.left().Value() * m.right().Value());
}
+ if (m.right().Is(2)) { // x * 2.0 => x + x
+ node->ReplaceInput(1, m.left().node());
+ NodeProperties::ChangeOp(node, machine()->Float64Add());
+ return Changed(node);
+ }
break;
}
case IrOpcode::kFloat64Div: {
@@ -432,6 +424,19 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.IsFoldable()) { // K / K => K
return ReplaceFloat64(m.left().Value() / m.right().Value());
}
+ if (m.right().Is(-1)) { // x / -1.0 => -x
+ node->RemoveInput(1);
+ NodeProperties::ChangeOp(node, machine()->Float64Neg());
+ return Changed(node);
+ }
+ if (m.right().IsNormal() && m.right().IsPositiveOrNegativePowerOf2()) {
+ // All reciprocals of non-denormal powers of two can be represented
+ // exactly, so division by power of two can be reduced to
+ // multiplication by reciprocal, with the same result.
+ node->ReplaceInput(1, Float64Constant(1.0 / m.right().Value()));
+ NodeProperties::ChangeOp(node, machine()->Float64Mul());
+ return Changed(node);
+ }
break;
}
case IrOpcode::kFloat64Mod: {
@@ -541,8 +546,9 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kFloat64Pow: {
Float64BinopMatcher m(node);
- // TODO(bmeurer): Constant fold once we have a unified pow implementation.
- if (m.right().Is(0.0)) { // x ** +-0.0 => 1.0
+ if (m.IsFoldable()) {
+ return ReplaceFloat64(Pow(m.left().Value(), m.right().Value()));
+ } else if (m.right().Is(0.0)) { // x ** +-0.0 => 1.0
return ReplaceFloat64(1.0);
} else if (m.right().Is(-2.0)) { // x ** -2.0 => 1 / (x * x)
node->ReplaceInput(0, Float64Constant(1.0));
@@ -1221,22 +1227,17 @@ Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
return NoChange();
}
-
-Reduction MachineOperatorReducer::ReduceWord32Or(Node* node) {
- DCHECK_EQ(IrOpcode::kWord32Or, node->opcode());
+Reduction MachineOperatorReducer::TryMatchWord32Ror(Node* node) {
+ DCHECK(IrOpcode::kWord32Or == node->opcode() ||
+ IrOpcode::kWord32Xor == node->opcode());
Int32BinopMatcher m(node);
- if (m.right().Is(0)) return Replace(m.left().node()); // x | 0 => x
- if (m.right().Is(-1)) return Replace(m.right().node()); // x | -1 => -1
- if (m.IsFoldable()) { // K | K => K
- return ReplaceInt32(m.left().Value() | m.right().Value());
- }
- if (m.LeftEqualsRight()) return Replace(m.left().node()); // x | x => x
-
Node* shl = nullptr;
Node* shr = nullptr;
- // Recognize rotation, we are matching either:
+ // Recognize rotation, we are matching:
// * x << y | x >>> (32 - y) => x ror (32 - y), i.e x rol y
// * x << (32 - y) | x >>> y => x ror y
+ // * x << y ^ x >>> (32 - y) => x ror (32 - y), i.e. x rol y
+ // * x << (32 - y) ^ x >>> y => x ror y
// as well as their commuted form.
if (m.left().IsWord32Shl() && m.right().IsWord32Shr()) {
shl = m.left().node();
@@ -1278,6 +1279,36 @@ Reduction MachineOperatorReducer::ReduceWord32Or(Node* node) {
return Changed(node);
}
+Reduction MachineOperatorReducer::ReduceWord32Or(Node* node) {
+ DCHECK_EQ(IrOpcode::kWord32Or, node->opcode());
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) return Replace(m.left().node()); // x | 0 => x
+ if (m.right().Is(-1)) return Replace(m.right().node()); // x | -1 => -1
+ if (m.IsFoldable()) { // K | K => K
+ return ReplaceInt32(m.left().Value() | m.right().Value());
+ }
+ if (m.LeftEqualsRight()) return Replace(m.left().node()); // x | x => x
+
+ return TryMatchWord32Ror(node);
+}
+
+Reduction MachineOperatorReducer::ReduceWord32Xor(Node* node) {
+ DCHECK_EQ(IrOpcode::kWord32Xor, node->opcode());
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) return Replace(m.left().node()); // x ^ 0 => x
+ if (m.IsFoldable()) { // K ^ K => K
+ return ReplaceInt32(m.left().Value() ^ m.right().Value());
+ }
+ if (m.LeftEqualsRight()) return ReplaceInt32(0); // x ^ x => 0
+ if (m.left().IsWord32Xor() && m.right().Is(-1)) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().Is(-1)) { // (x ^ -1) ^ -1 => x
+ return Replace(mleft.left().node());
+ }
+ }
+
+ return TryMatchWord32Ror(node);
+}
Reduction MachineOperatorReducer::ReduceFloat64InsertLowWord32(Node* node) {
DCHECK_EQ(IrOpcode::kFloat64InsertLowWord32, node->opcode());
diff --git a/deps/v8/src/compiler/machine-operator-reducer.h b/deps/v8/src/compiler/machine-operator-reducer.h
index 167bf7efd3..574f45c0b3 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.h
+++ b/deps/v8/src/compiler/machine-operator-reducer.h
@@ -87,7 +87,9 @@ class MachineOperatorReducer final : public Reducer {
Reduction ReduceWord32Sar(Node* node);
Reduction ReduceWord64Sar(Node* node);
Reduction ReduceWord32And(Node* node);
+ Reduction TryMatchWord32Ror(Node* node);
Reduction ReduceWord32Or(Node* node);
+ Reduction ReduceWord32Xor(Node* node);
Reduction ReduceFloat64InsertLowWord32(Node* node);
Reduction ReduceFloat64InsertHighWord32(Node* node);
Reduction ReduceFloat64Compare(Node* node);
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 43c6202eb7..e36a61e733 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -36,6 +36,7 @@ std::ostream& operator<<(std::ostream& os, StoreRepresentation rep) {
LoadRepresentation LoadRepresentationOf(Operator const* op) {
DCHECK(IrOpcode::kLoad == op->opcode() ||
+ IrOpcode::kProtectedLoad == op->opcode() ||
IrOpcode::kAtomicLoad == op->opcode());
return OpParameter<LoadRepresentation>(op);
}
@@ -78,315 +79,317 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
return OpParameter<MachineRepresentation>(op);
}
-#define PURE_OP_LIST(V) \
- V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Word32Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Word32Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Word32Shl, Operator::kNoProperties, 2, 0, 1) \
- V(Word32Shr, Operator::kNoProperties, 2, 0, 1) \
- V(Word32Sar, Operator::kNoProperties, 2, 0, 1) \
- V(Word32Ror, Operator::kNoProperties, 2, 0, 1) \
- V(Word32Equal, Operator::kCommutative, 2, 0, 1) \
- V(Word32Clz, Operator::kNoProperties, 1, 0, 1) \
- V(Word64And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Word64Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Word64Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Word64Shl, Operator::kNoProperties, 2, 0, 1) \
- V(Word64Shr, Operator::kNoProperties, 2, 0, 1) \
- V(Word64Sar, Operator::kNoProperties, 2, 0, 1) \
- V(Word64Ror, Operator::kNoProperties, 2, 0, 1) \
- V(Word64Clz, Operator::kNoProperties, 1, 0, 1) \
- V(Word64Equal, Operator::kCommutative, 2, 0, 1) \
- V(Int32Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Int32Sub, Operator::kNoProperties, 2, 0, 1) \
- V(Int32Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Int32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Int32Div, Operator::kNoProperties, 2, 1, 1) \
- V(Int32Mod, Operator::kNoProperties, 2, 1, 1) \
- V(Int32LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Int32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Uint32Div, Operator::kNoProperties, 2, 1, 1) \
- V(Uint32LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Uint32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Uint32Mod, Operator::kNoProperties, 2, 1, 1) \
- V(Uint32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Int64Sub, Operator::kNoProperties, 2, 0, 1) \
- V(Int64Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Int64Div, Operator::kNoProperties, 2, 1, 1) \
- V(Int64Mod, Operator::kNoProperties, 2, 1, 1) \
- V(Int64LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Int64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Uint64Div, Operator::kNoProperties, 2, 1, 1) \
- V(Uint64Mod, Operator::kNoProperties, 2, 1, 1) \
- V(Uint64LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Uint64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(BitcastWordToTagged, Operator::kNoProperties, 1, 0, 1) \
- V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1) \
- V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
- V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
- V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
- V(TruncateFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
- V(TruncateFloat32ToInt32, Operator::kNoProperties, 1, 0, 1) \
- V(TruncateFloat32ToUint32, Operator::kNoProperties, 1, 0, 1) \
- V(TryTruncateFloat32ToInt64, Operator::kNoProperties, 1, 0, 2) \
- V(TryTruncateFloat64ToInt64, Operator::kNoProperties, 1, 0, 2) \
- V(TryTruncateFloat32ToUint64, Operator::kNoProperties, 1, 0, 2) \
- V(TryTruncateFloat64ToUint64, Operator::kNoProperties, 1, 0, 2) \
- V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
- V(Float64SilenceNaN, Operator::kNoProperties, 1, 0, 1) \
- V(RoundFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
- V(RoundInt32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
- V(RoundInt64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
- V(RoundInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
- V(RoundUint32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
- V(RoundUint64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
- V(RoundUint64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
- V(ChangeInt32ToInt64, Operator::kNoProperties, 1, 0, 1) \
- V(ChangeUint32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
- V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 0, 1) \
- V(ImpossibleToWord32, Operator::kNoProperties, 1, 0, 1) \
- V(ImpossibleToWord64, Operator::kNoProperties, 1, 0, 1) \
- V(ImpossibleToFloat32, Operator::kNoProperties, 1, 0, 1) \
- V(ImpossibleToFloat64, Operator::kNoProperties, 1, 0, 1) \
- V(ImpossibleToTagged, Operator::kNoProperties, 1, 0, 1) \
- V(ImpossibleToBit, Operator::kNoProperties, 1, 0, 1) \
- V(TruncateFloat64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
- V(TruncateInt64ToInt32, Operator::kNoProperties, 1, 0, 1) \
- V(BitcastFloat32ToInt32, Operator::kNoProperties, 1, 0, 1) \
- V(BitcastFloat64ToInt64, Operator::kNoProperties, 1, 0, 1) \
- V(BitcastInt32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
- V(BitcastInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
- V(Float32Abs, Operator::kNoProperties, 1, 0, 1) \
- V(Float32Add, Operator::kCommutative, 2, 0, 1) \
- V(Float32Sub, Operator::kNoProperties, 2, 0, 1) \
- V(Float32Mul, Operator::kCommutative, 2, 0, 1) \
- V(Float32Div, Operator::kNoProperties, 2, 0, 1) \
- V(Float32Neg, Operator::kNoProperties, 1, 0, 1) \
- V(Float32Sqrt, Operator::kNoProperties, 1, 0, 1) \
- V(Float32Max, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Float32Min, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Float64Abs, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Acos, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Acosh, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Asin, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Asinh, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Atan, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Atan2, Operator::kNoProperties, 2, 0, 1) \
- V(Float64Atanh, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Cbrt, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Cos, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Cosh, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Exp, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Expm1, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Log, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Log1p, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Log2, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Log10, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Max, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Float64Min, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Float64Neg, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Add, Operator::kCommutative, 2, 0, 1) \
- V(Float64Sub, Operator::kNoProperties, 2, 0, 1) \
- V(Float64Mul, Operator::kCommutative, 2, 0, 1) \
- V(Float64Div, Operator::kNoProperties, 2, 0, 1) \
- V(Float64Mod, Operator::kNoProperties, 2, 0, 1) \
- V(Float64Pow, Operator::kNoProperties, 2, 0, 1) \
- V(Float64Sin, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Sinh, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Sqrt, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Tan, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Tanh, Operator::kNoProperties, 1, 0, 1) \
- V(Float32Equal, Operator::kCommutative, 2, 0, 1) \
- V(Float32LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Float32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Float64Equal, Operator::kCommutative, 2, 0, 1) \
- V(Float64LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Float64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Float64ExtractLowWord32, Operator::kNoProperties, 1, 0, 1) \
- V(Float64ExtractHighWord32, Operator::kNoProperties, 1, 0, 1) \
- V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1) \
- V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1) \
- V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1) \
- V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1) \
- V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1) \
- V(Int32PairAdd, Operator::kNoProperties, 4, 0, 2) \
- V(Int32PairSub, Operator::kNoProperties, 4, 0, 2) \
- V(Int32PairMul, Operator::kNoProperties, 4, 0, 2) \
- V(Word32PairShl, Operator::kNoProperties, 3, 0, 2) \
- V(Word32PairShr, Operator::kNoProperties, 3, 0, 2) \
- V(Word32PairSar, Operator::kNoProperties, 3, 0, 2) \
- V(CreateFloat32x4, Operator::kNoProperties, 4, 0, 1) \
- V(Float32x4ExtractLane, Operator::kNoProperties, 2, 0, 1) \
- V(Float32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
- V(Float32x4Abs, Operator::kNoProperties, 1, 0, 1) \
- V(Float32x4Neg, Operator::kNoProperties, 1, 0, 1) \
- V(Float32x4Sqrt, Operator::kNoProperties, 1, 0, 1) \
- V(Float32x4RecipApprox, Operator::kNoProperties, 1, 0, 1) \
- V(Float32x4RecipSqrtApprox, Operator::kNoProperties, 1, 0, 1) \
- V(Float32x4Add, Operator::kCommutative, 2, 0, 1) \
- V(Float32x4Sub, Operator::kNoProperties, 2, 0, 1) \
- V(Float32x4Mul, Operator::kCommutative, 2, 0, 1) \
- V(Float32x4Div, Operator::kNoProperties, 2, 0, 1) \
- V(Float32x4Min, Operator::kCommutative, 2, 0, 1) \
- V(Float32x4Max, Operator::kCommutative, 2, 0, 1) \
- V(Float32x4MinNum, Operator::kCommutative, 2, 0, 1) \
- V(Float32x4MaxNum, Operator::kCommutative, 2, 0, 1) \
- V(Float32x4Equal, Operator::kCommutative, 2, 0, 1) \
- V(Float32x4NotEqual, Operator::kCommutative, 2, 0, 1) \
- V(Float32x4LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Float32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Float32x4GreaterThan, Operator::kNoProperties, 2, 0, 1) \
- V(Float32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Float32x4Select, Operator::kNoProperties, 3, 0, 1) \
- V(Float32x4Swizzle, Operator::kNoProperties, 5, 0, 1) \
- V(Float32x4Shuffle, Operator::kNoProperties, 6, 0, 1) \
- V(Float32x4FromInt32x4, Operator::kNoProperties, 1, 0, 1) \
- V(Float32x4FromUint32x4, Operator::kNoProperties, 1, 0, 1) \
- V(CreateInt32x4, Operator::kNoProperties, 4, 0, 1) \
- V(Int32x4ExtractLane, Operator::kNoProperties, 2, 0, 1) \
- V(Int32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
- V(Int32x4Neg, Operator::kNoProperties, 1, 0, 1) \
- V(Int32x4Add, Operator::kCommutative, 2, 0, 1) \
- V(Int32x4Sub, Operator::kNoProperties, 2, 0, 1) \
- V(Int32x4Mul, Operator::kCommutative, 2, 0, 1) \
- V(Int32x4Min, Operator::kCommutative, 2, 0, 1) \
- V(Int32x4Max, Operator::kCommutative, 2, 0, 1) \
- V(Int32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
- V(Int32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
- V(Int32x4Equal, Operator::kCommutative, 2, 0, 1) \
- V(Int32x4NotEqual, Operator::kCommutative, 2, 0, 1) \
- V(Int32x4LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Int32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Int32x4GreaterThan, Operator::kNoProperties, 2, 0, 1) \
- V(Int32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Int32x4Select, Operator::kNoProperties, 3, 0, 1) \
- V(Int32x4Swizzle, Operator::kNoProperties, 5, 0, 1) \
- V(Int32x4Shuffle, Operator::kNoProperties, 6, 0, 1) \
- V(Int32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1) \
- V(Uint32x4Min, Operator::kCommutative, 2, 0, 1) \
- V(Uint32x4Max, Operator::kCommutative, 2, 0, 1) \
- V(Uint32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
- V(Uint32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
- V(Uint32x4LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Uint32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Uint32x4GreaterThan, Operator::kNoProperties, 2, 0, 1) \
- V(Uint32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Uint32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1) \
- V(CreateBool32x4, Operator::kNoProperties, 4, 0, 1) \
- V(Bool32x4ExtractLane, Operator::kNoProperties, 2, 0, 1) \
- V(Bool32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
- V(Bool32x4And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Bool32x4Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Bool32x4Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Bool32x4Not, Operator::kNoProperties, 1, 0, 1) \
- V(Bool32x4AnyTrue, Operator::kNoProperties, 1, 0, 1) \
- V(Bool32x4AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(Bool32x4Swizzle, Operator::kNoProperties, 5, 0, 1) \
- V(Bool32x4Shuffle, Operator::kNoProperties, 6, 0, 1) \
- V(Bool32x4Equal, Operator::kCommutative, 2, 0, 1) \
- V(Bool32x4NotEqual, Operator::kCommutative, 2, 0, 1) \
- V(CreateInt16x8, Operator::kNoProperties, 8, 0, 1) \
- V(Int16x8ExtractLane, Operator::kNoProperties, 2, 0, 1) \
- V(Int16x8ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
- V(Int16x8Neg, Operator::kNoProperties, 1, 0, 1) \
- V(Int16x8Add, Operator::kCommutative, 2, 0, 1) \
- V(Int16x8AddSaturate, Operator::kCommutative, 2, 0, 1) \
- V(Int16x8Sub, Operator::kNoProperties, 2, 0, 1) \
- V(Int16x8SubSaturate, Operator::kNoProperties, 2, 0, 1) \
- V(Int16x8Mul, Operator::kCommutative, 2, 0, 1) \
- V(Int16x8Min, Operator::kCommutative, 2, 0, 1) \
- V(Int16x8Max, Operator::kCommutative, 2, 0, 1) \
- V(Int16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
- V(Int16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
- V(Int16x8Equal, Operator::kCommutative, 2, 0, 1) \
- V(Int16x8NotEqual, Operator::kCommutative, 2, 0, 1) \
- V(Int16x8LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Int16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Int16x8GreaterThan, Operator::kNoProperties, 2, 0, 1) \
- V(Int16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Int16x8Select, Operator::kNoProperties, 3, 0, 1) \
- V(Int16x8Swizzle, Operator::kNoProperties, 9, 0, 1) \
- V(Int16x8Shuffle, Operator::kNoProperties, 10, 0, 1) \
- V(Uint16x8AddSaturate, Operator::kCommutative, 2, 0, 1) \
- V(Uint16x8SubSaturate, Operator::kNoProperties, 2, 0, 1) \
- V(Uint16x8Min, Operator::kCommutative, 2, 0, 1) \
- V(Uint16x8Max, Operator::kCommutative, 2, 0, 1) \
- V(Uint16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
- V(Uint16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
- V(Uint16x8LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Uint16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Uint16x8GreaterThan, Operator::kNoProperties, 2, 0, 1) \
- V(Uint16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(CreateBool16x8, Operator::kNoProperties, 8, 0, 1) \
- V(Bool16x8ExtractLane, Operator::kNoProperties, 2, 0, 1) \
- V(Bool16x8ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
- V(Bool16x8And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Bool16x8Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Bool16x8Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Bool16x8Not, Operator::kNoProperties, 1, 0, 1) \
- V(Bool16x8AnyTrue, Operator::kNoProperties, 1, 0, 1) \
- V(Bool16x8AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(Bool16x8Swizzle, Operator::kNoProperties, 9, 0, 1) \
- V(Bool16x8Shuffle, Operator::kNoProperties, 10, 0, 1) \
- V(Bool16x8Equal, Operator::kCommutative, 2, 0, 1) \
- V(Bool16x8NotEqual, Operator::kCommutative, 2, 0, 1) \
- V(CreateInt8x16, Operator::kNoProperties, 16, 0, 1) \
- V(Int8x16ExtractLane, Operator::kNoProperties, 2, 0, 1) \
- V(Int8x16ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
- V(Int8x16Neg, Operator::kNoProperties, 1, 0, 1) \
- V(Int8x16Add, Operator::kCommutative, 2, 0, 1) \
- V(Int8x16AddSaturate, Operator::kCommutative, 2, 0, 1) \
- V(Int8x16Sub, Operator::kNoProperties, 2, 0, 1) \
- V(Int8x16SubSaturate, Operator::kNoProperties, 2, 0, 1) \
- V(Int8x16Mul, Operator::kCommutative, 2, 0, 1) \
- V(Int8x16Min, Operator::kCommutative, 2, 0, 1) \
- V(Int8x16Max, Operator::kCommutative, 2, 0, 1) \
- V(Int8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
- V(Int8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
- V(Int8x16Equal, Operator::kCommutative, 2, 0, 1) \
- V(Int8x16NotEqual, Operator::kCommutative, 2, 0, 1) \
- V(Int8x16LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Int8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Int8x16GreaterThan, Operator::kNoProperties, 2, 0, 1) \
- V(Int8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Int8x16Select, Operator::kNoProperties, 3, 0, 1) \
- V(Int8x16Swizzle, Operator::kNoProperties, 17, 0, 1) \
- V(Int8x16Shuffle, Operator::kNoProperties, 18, 0, 1) \
- V(Uint8x16AddSaturate, Operator::kCommutative, 2, 0, 1) \
- V(Uint8x16SubSaturate, Operator::kNoProperties, 2, 0, 1) \
- V(Uint8x16Min, Operator::kCommutative, 2, 0, 1) \
- V(Uint8x16Max, Operator::kCommutative, 2, 0, 1) \
- V(Uint8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
- V(Uint8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
- V(Uint8x16LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Uint8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Uint8x16GreaterThan, Operator::kNoProperties, 2, 0, 1) \
- V(Uint8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(CreateBool8x16, Operator::kNoProperties, 16, 0, 1) \
- V(Bool8x16ExtractLane, Operator::kNoProperties, 2, 0, 1) \
- V(Bool8x16ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
- V(Bool8x16And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Bool8x16Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Bool8x16Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Bool8x16Not, Operator::kNoProperties, 1, 0, 1) \
- V(Bool8x16AnyTrue, Operator::kNoProperties, 1, 0, 1) \
- V(Bool8x16AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(Bool8x16Swizzle, Operator::kNoProperties, 17, 0, 1) \
- V(Bool8x16Shuffle, Operator::kNoProperties, 18, 0, 1) \
- V(Bool8x16Equal, Operator::kCommutative, 2, 0, 1) \
- V(Bool8x16NotEqual, Operator::kCommutative, 2, 0, 1) \
- V(Simd128Load, Operator::kNoProperties, 2, 0, 1) \
- V(Simd128Load1, Operator::kNoProperties, 2, 0, 1) \
- V(Simd128Load2, Operator::kNoProperties, 2, 0, 1) \
- V(Simd128Load3, Operator::kNoProperties, 2, 0, 1) \
- V(Simd128Store, Operator::kNoProperties, 3, 0, 1) \
- V(Simd128Store1, Operator::kNoProperties, 3, 0, 1) \
- V(Simd128Store2, Operator::kNoProperties, 3, 0, 1) \
- V(Simd128Store3, Operator::kNoProperties, 3, 0, 1) \
- V(Simd128And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Simd128Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Simd128Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+#define PURE_BINARY_OP_LIST_32(V) \
+ V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Word32Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Word32Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Word32Shl, Operator::kNoProperties, 2, 0, 1) \
+ V(Word32Shr, Operator::kNoProperties, 2, 0, 1) \
+ V(Word32Sar, Operator::kNoProperties, 2, 0, 1) \
+ V(Word32Ror, Operator::kNoProperties, 2, 0, 1) \
+ V(Word32Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Int32Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Int32Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Int32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Int32Div, Operator::kNoProperties, 2, 1, 1) \
+ V(Int32Mod, Operator::kNoProperties, 2, 1, 1) \
+ V(Int32LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint32Div, Operator::kNoProperties, 2, 1, 1) \
+ V(Uint32LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint32Mod, Operator::kNoProperties, 2, 1, 1) \
+ V(Uint32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)
+
+#define PURE_BINARY_OP_LIST_64(V) \
+ V(Word64And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Word64Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Word64Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Word64Shl, Operator::kNoProperties, 2, 0, 1) \
+ V(Word64Shr, Operator::kNoProperties, 2, 0, 1) \
+ V(Word64Sar, Operator::kNoProperties, 2, 0, 1) \
+ V(Word64Ror, Operator::kNoProperties, 2, 0, 1) \
+ V(Word64Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Int64Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Int64Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Int64Div, Operator::kNoProperties, 2, 1, 1) \
+ V(Int64Mod, Operator::kNoProperties, 2, 1, 1) \
+ V(Int64LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Int64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint64Div, Operator::kNoProperties, 2, 1, 1) \
+ V(Uint64Mod, Operator::kNoProperties, 2, 1, 1) \
+ V(Uint64LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)
+
+#define PURE_OP_LIST(V) \
+ PURE_BINARY_OP_LIST_32(V) \
+ PURE_BINARY_OP_LIST_64(V) \
+ V(Word32Clz, Operator::kNoProperties, 1, 0, 1) \
+ V(Word64Clz, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastTaggedToWord, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastWordToTagged, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastWordToTaggedSigned, Operator::kNoProperties, 1, 0, 1) \
+ V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1) \
+ V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
+ V(TruncateFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
+ V(TruncateFloat32ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(TruncateFloat32ToUint32, Operator::kNoProperties, 1, 0, 1) \
+ V(TryTruncateFloat32ToInt64, Operator::kNoProperties, 1, 0, 2) \
+ V(TryTruncateFloat64ToInt64, Operator::kNoProperties, 1, 0, 2) \
+ V(TryTruncateFloat32ToUint64, Operator::kNoProperties, 1, 0, 2) \
+ V(TryTruncateFloat64ToUint64, Operator::kNoProperties, 1, 0, 2) \
+ V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64SilenceNaN, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundInt32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundInt64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundUint32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundUint64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundUint64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(ChangeInt32ToInt64, Operator::kNoProperties, 1, 0, 1) \
+ V(ChangeUint32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 0, 1) \
+ V(TruncateFloat64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(TruncateInt64ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastFloat32ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastFloat64ToInt64, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastInt32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32Abs, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32Add, Operator::kCommutative, 2, 0, 1) \
+ V(Float32Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32Mul, Operator::kCommutative, 2, 0, 1) \
+ V(Float32Div, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32Sqrt, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32Max, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Float32Min, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Float64Abs, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Acos, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Acosh, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Asin, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Asinh, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Atan, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Atan2, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Atanh, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Cbrt, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Cos, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Cosh, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Exp, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Expm1, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Log, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Log1p, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Log2, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Log10, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Max, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Float64Min, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Float64Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Add, Operator::kCommutative, 2, 0, 1) \
+ V(Float64Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Mul, Operator::kCommutative, 2, 0, 1) \
+ V(Float64Div, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Mod, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Pow, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Sin, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Sinh, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Sqrt, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Tan, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Tanh, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Float32LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Float64LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64ExtractLowWord32, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64ExtractHighWord32, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1) \
+ V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1) \
+ V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1) \
+ V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1) \
+ V(Int32PairAdd, Operator::kNoProperties, 4, 0, 2) \
+ V(Int32PairSub, Operator::kNoProperties, 4, 0, 2) \
+ V(Int32PairMul, Operator::kNoProperties, 4, 0, 2) \
+ V(Word32PairShl, Operator::kNoProperties, 3, 0, 2) \
+ V(Word32PairShr, Operator::kNoProperties, 3, 0, 2) \
+ V(Word32PairSar, Operator::kNoProperties, 3, 0, 2) \
+ V(CreateFloat32x4, Operator::kNoProperties, 4, 0, 1) \
+ V(Float32x4ExtractLane, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
+ V(Float32x4Abs, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32x4Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32x4Sqrt, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32x4RecipApprox, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32x4RecipSqrtApprox, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32x4Add, Operator::kCommutative, 2, 0, 1) \
+ V(Float32x4Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32x4Mul, Operator::kCommutative, 2, 0, 1) \
+ V(Float32x4Div, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32x4Min, Operator::kCommutative, 2, 0, 1) \
+ V(Float32x4Max, Operator::kCommutative, 2, 0, 1) \
+ V(Float32x4MinNum, Operator::kCommutative, 2, 0, 1) \
+ V(Float32x4MaxNum, Operator::kCommutative, 2, 0, 1) \
+ V(Float32x4Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Float32x4NotEqual, Operator::kCommutative, 2, 0, 1) \
+ V(Float32x4LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32x4GreaterThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32x4Select, Operator::kNoProperties, 3, 0, 1) \
+ V(Float32x4Swizzle, Operator::kNoProperties, 5, 0, 1) \
+ V(Float32x4Shuffle, Operator::kNoProperties, 6, 0, 1) \
+ V(Float32x4FromInt32x4, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32x4FromUint32x4, Operator::kNoProperties, 1, 0, 1) \
+ V(CreateInt32x4, Operator::kNoProperties, 4, 0, 1) \
+ V(Int32x4ExtractLane, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
+ V(Int32x4Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(Int32x4Add, Operator::kCommutative, 2, 0, 1) \
+ V(Int32x4Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32x4Mul, Operator::kCommutative, 2, 0, 1) \
+ V(Int32x4Min, Operator::kCommutative, 2, 0, 1) \
+ V(Int32x4Max, Operator::kCommutative, 2, 0, 1) \
+ V(Int32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32x4Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Int32x4NotEqual, Operator::kCommutative, 2, 0, 1) \
+ V(Int32x4LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32x4GreaterThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32x4Select, Operator::kNoProperties, 3, 0, 1) \
+ V(Int32x4Swizzle, Operator::kNoProperties, 5, 0, 1) \
+ V(Int32x4Shuffle, Operator::kNoProperties, 6, 0, 1) \
+ V(Int32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1) \
+ V(Uint32x4Min, Operator::kCommutative, 2, 0, 1) \
+ V(Uint32x4Max, Operator::kCommutative, 2, 0, 1) \
+ V(Uint32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint32x4LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint32x4GreaterThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1) \
+ V(CreateBool32x4, Operator::kNoProperties, 4, 0, 1) \
+ V(Bool32x4ExtractLane, Operator::kNoProperties, 2, 0, 1) \
+ V(Bool32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
+ V(Bool32x4And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool32x4Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool32x4Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool32x4Not, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool32x4AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool32x4AllTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool32x4Swizzle, Operator::kNoProperties, 5, 0, 1) \
+ V(Bool32x4Shuffle, Operator::kNoProperties, 6, 0, 1) \
+ V(Bool32x4Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Bool32x4NotEqual, Operator::kCommutative, 2, 0, 1) \
+ V(CreateInt16x8, Operator::kNoProperties, 8, 0, 1) \
+ V(Int16x8ExtractLane, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
+ V(Int16x8Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(Int16x8Add, Operator::kCommutative, 2, 0, 1) \
+ V(Int16x8AddSaturate, Operator::kCommutative, 2, 0, 1) \
+ V(Int16x8Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8SubSaturate, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8Mul, Operator::kCommutative, 2, 0, 1) \
+ V(Int16x8Min, Operator::kCommutative, 2, 0, 1) \
+ V(Int16x8Max, Operator::kCommutative, 2, 0, 1) \
+ V(Int16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Int16x8NotEqual, Operator::kCommutative, 2, 0, 1) \
+ V(Int16x8LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8GreaterThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8Select, Operator::kNoProperties, 3, 0, 1) \
+ V(Int16x8Swizzle, Operator::kNoProperties, 9, 0, 1) \
+ V(Int16x8Shuffle, Operator::kNoProperties, 10, 0, 1) \
+ V(Uint16x8AddSaturate, Operator::kCommutative, 2, 0, 1) \
+ V(Uint16x8SubSaturate, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint16x8Min, Operator::kCommutative, 2, 0, 1) \
+ V(Uint16x8Max, Operator::kCommutative, 2, 0, 1) \
+ V(Uint16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint16x8LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint16x8GreaterThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(CreateBool16x8, Operator::kNoProperties, 8, 0, 1) \
+ V(Bool16x8ExtractLane, Operator::kNoProperties, 2, 0, 1) \
+ V(Bool16x8ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
+ V(Bool16x8And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool16x8Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool16x8Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool16x8Not, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool16x8AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool16x8AllTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool16x8Swizzle, Operator::kNoProperties, 9, 0, 1) \
+ V(Bool16x8Shuffle, Operator::kNoProperties, 10, 0, 1) \
+ V(Bool16x8Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Bool16x8NotEqual, Operator::kCommutative, 2, 0, 1) \
+ V(CreateInt8x16, Operator::kNoProperties, 16, 0, 1) \
+ V(Int8x16ExtractLane, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
+ V(Int8x16Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(Int8x16Add, Operator::kCommutative, 2, 0, 1) \
+ V(Int8x16AddSaturate, Operator::kCommutative, 2, 0, 1) \
+ V(Int8x16Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16SubSaturate, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16Mul, Operator::kCommutative, 2, 0, 1) \
+ V(Int8x16Min, Operator::kCommutative, 2, 0, 1) \
+ V(Int8x16Max, Operator::kCommutative, 2, 0, 1) \
+ V(Int8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Int8x16NotEqual, Operator::kCommutative, 2, 0, 1) \
+ V(Int8x16LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16GreaterThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16Select, Operator::kNoProperties, 3, 0, 1) \
+ V(Int8x16Swizzle, Operator::kNoProperties, 17, 0, 1) \
+ V(Int8x16Shuffle, Operator::kNoProperties, 18, 0, 1) \
+ V(Uint8x16AddSaturate, Operator::kCommutative, 2, 0, 1) \
+ V(Uint8x16SubSaturate, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint8x16Min, Operator::kCommutative, 2, 0, 1) \
+ V(Uint8x16Max, Operator::kCommutative, 2, 0, 1) \
+ V(Uint8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint8x16LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint8x16GreaterThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(CreateBool8x16, Operator::kNoProperties, 16, 0, 1) \
+ V(Bool8x16ExtractLane, Operator::kNoProperties, 2, 0, 1) \
+ V(Bool8x16ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
+ V(Bool8x16And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool8x16Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool8x16Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool8x16Not, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool8x16AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool8x16AllTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool8x16Swizzle, Operator::kNoProperties, 17, 0, 1) \
+ V(Bool8x16Shuffle, Operator::kNoProperties, 18, 0, 1) \
+ V(Bool8x16Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Bool8x16NotEqual, Operator::kCommutative, 2, 0, 1) \
+ V(Simd128Load, Operator::kNoProperties, 2, 0, 1) \
+ V(Simd128Load1, Operator::kNoProperties, 2, 0, 1) \
+ V(Simd128Load2, Operator::kNoProperties, 2, 0, 1) \
+ V(Simd128Load3, Operator::kNoProperties, 2, 0, 1) \
+ V(Simd128Store, Operator::kNoProperties, 3, 0, 1) \
+ V(Simd128Store1, Operator::kNoProperties, 3, 0, 1) \
+ V(Simd128Store2, Operator::kNoProperties, 3, 0, 1) \
+ V(Simd128Store3, Operator::kNoProperties, 3, 0, 1) \
+ V(Simd128And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Simd128Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Simd128Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Simd128Not, Operator::kNoProperties, 1, 0, 1)
#define PURE_OPTIONAL_OP_LIST(V) \
@@ -428,6 +431,8 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(Int64) \
V(Uint64) \
V(Pointer) \
+ V(TaggedSigned) \
+ V(TaggedPointer) \
V(AnyTagged)
#define MACHINE_REPRESENTATION_LIST(V) \
@@ -504,9 +509,18 @@ struct MachineOperatorGlobalCache {
Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
"CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
+ struct ProtectedLoad##Type##Operator final \
+ : public Operator1<ProtectedLoadRepresentation> { \
+ ProtectedLoad##Type##Operator() \
+ : Operator1<ProtectedLoadRepresentation>( \
+ IrOpcode::kProtectedLoad, \
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
+ "ProtectedLoad", 4, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
Load##Type##Operator kLoad##Type; \
UnalignedLoad##Type##Operator kUnalignedLoad##Type; \
- CheckedLoad##Type##Operator kCheckedLoad##Type;
+ CheckedLoad##Type##Operator kCheckedLoad##Type; \
+ ProtectedLoad##Type##Operator kProtectedLoad##Type;
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
@@ -701,6 +715,17 @@ const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
return nullptr;
}
+const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kProtectedLoad##Type; \
+ }
+ MACHINE_TYPE_LIST(LOAD)
+#undef LOAD
+ UNREACHABLE();
+ return nullptr;
+}
+
const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep) {
#define STACKSLOT(Type) \
if (rep == MachineType::Type().representation()) { \
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 611846a1db..56cefc5923 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -41,6 +41,7 @@ class OptionalOperator final {
// A Load needs a MachineType.
typedef MachineType LoadRepresentation;
+typedef LoadRepresentation ProtectedLoadRepresentation;
LoadRepresentation LoadRepresentationOf(Operator const*);
@@ -276,9 +277,15 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* Uint64LessThanOrEqual();
const Operator* Uint64Mod();
+ // This operator reinterprets the bits of a tagged pointer as word.
+ const Operator* BitcastTaggedToWord();
+
// This operator reinterprets the bits of a word as tagged pointer.
const Operator* BitcastWordToTagged();
+ // This operator reinterprets the bits of a word as a Smi.
+ const Operator* BitcastWordToTaggedSigned();
+
// JavaScript float64 to int32/uint32 truncation.
const Operator* TruncateFloat64ToWord32();
@@ -302,16 +309,6 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* ChangeUint32ToFloat64();
const Operator* ChangeUint32ToUint64();
- // These are changes from impossible values (for example a smi-checked
- // string). They can safely emit an abort instruction, which should
- // never be reached.
- const Operator* ImpossibleToWord32();
- const Operator* ImpossibleToWord64();
- const Operator* ImpossibleToFloat32();
- const Operator* ImpossibleToFloat64();
- const Operator* ImpossibleToTagged();
- const Operator* ImpossibleToBit();
-
// These operators truncate or round numbers, both changing the representation
// of the number and mapping multiple input values onto the same output value.
const Operator* TruncateFloat64ToFloat32();
@@ -611,6 +608,7 @@ class MachineOperatorBuilder final : public ZoneObject {
// load [base + index]
const Operator* Load(LoadRepresentation rep);
+ const Operator* ProtectedLoad(LoadRepresentation rep);
// store [base + index], value
const Operator* Store(StoreRepresentation rep);
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 97c4362728..66fcbb9362 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -107,7 +107,38 @@ void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
Node* size = node->InputAt(0);
Node* effect = node->InputAt(1);
Node* control = node->InputAt(2);
- PretenureFlag pretenure = OpParameter<PretenureFlag>(node->op());
+ PretenureFlag pretenure = PretenureFlagOf(node->op());
+
+ // Propagate tenuring from outer allocations to inner allocations, i.e.
+ // when we allocate an object in old space and store a newly allocated
+ // child object into the pretenured object, then the newly allocated
+ // child object also should get pretenured to old space.
+ if (pretenure == TENURED) {
+ for (Edge const edge : node->use_edges()) {
+ Node* const user = edge.from();
+ if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) {
+ Node* const child = user->InputAt(1);
+ if (child->opcode() == IrOpcode::kAllocate &&
+ PretenureFlagOf(child->op()) == NOT_TENURED) {
+ NodeProperties::ChangeOp(child, node->op());
+ break;
+ }
+ }
+ }
+ } else {
+ DCHECK_EQ(NOT_TENURED, pretenure);
+ for (Edge const edge : node->use_edges()) {
+ Node* const user = edge.from();
+ if (user->opcode() == IrOpcode::kStoreField && edge.index() == 1) {
+ Node* const parent = user->InputAt(0);
+ if (parent->opcode() == IrOpcode::kAllocate &&
+ PretenureFlagOf(parent->op()) == TENURED) {
+ pretenure = TENURED;
+ break;
+ }
+ }
+ }
+ }
// Determine the top/limit addresses.
Node* top_address = jsgraph()->ExternalConstant(
@@ -122,9 +153,9 @@ void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
// Check if we can fold this allocation into a previous allocation represented
// by the incoming {state}.
Int32Matcher m(size);
- if (m.HasValue() && m.Value() < Page::kMaxRegularHeapObjectSize) {
+ if (m.HasValue() && m.Value() < kMaxRegularHeapObjectSize) {
int32_t const object_size = m.Value();
- if (state->size() <= Page::kMaxRegularHeapObjectSize - object_size &&
+ if (state->size() <= kMaxRegularHeapObjectSize - object_size &&
state->group()->pretenure() == pretenure) {
// We can fold this Allocate {node} into the allocation {group}
// represented by the given {state}. Compute the upper bound for
@@ -282,8 +313,9 @@ void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
control = graph()->NewNode(common()->Merge(2), if_true, if_false);
effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, control);
+ value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTaggedPointer, 2), vtrue, vfalse,
+ control);
// Create an unfoldable allocation group.
AllocationGroup* group =
diff --git a/deps/v8/src/compiler/memory-optimizer.h b/deps/v8/src/compiler/memory-optimizer.h
index f0cd546860..ba1d6dd72b 100644
--- a/deps/v8/src/compiler/memory-optimizer.h
+++ b/deps/v8/src/compiler/memory-optimizer.h
@@ -5,7 +5,7 @@
#ifndef V8_COMPILER_MEMORY_OPTIMIZER_H_
#define V8_COMPILER_MEMORY_OPTIMIZER_H_
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index d06bc305e2..12ab4af771 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ast/scopes.h"
#include "src/compiler/code-generator.h"
+#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
@@ -693,9 +693,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDebugBreak:
__ stop("kArchDebugBreak");
break;
- case kArchImpossible:
- __ Abort(kConversionFromImpossibleValue);
- break;
case kArchComment: {
Address comment_string = i.InputExternalReference(0).address();
__ RecordComment(reinterpret_cast<const char*>(comment_string));
@@ -710,8 +707,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- CodeGenResult result =
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result = AssembleDeoptimizerCall(
+ deopt_state_id, bailout_type, current_source_position_);
if (result != kSuccess) return result;
break;
}
@@ -1121,6 +1118,38 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
+ case kMipsMaddS:
+ __ madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+ i.InputFloatRegister(1), i.InputFloatRegister(2));
+ break;
+ case kMipsMaddD:
+ __ madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), i.InputDoubleRegister(2));
+ break;
+ case kMipsMaddfS:
+ __ maddf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
+ i.InputFloatRegister(2));
+ break;
+ case kMipsMaddfD:
+ __ maddf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(2));
+ break;
+ case kMipsMsubS:
+ __ msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+ i.InputFloatRegister(1), i.InputFloatRegister(2));
+ break;
+ case kMipsMsubD:
+ __ msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), i.InputDoubleRegister(2));
+ break;
+ case kMipsMsubfS:
+ __ msubf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
+ i.InputFloatRegister(2));
+ break;
+ case kMipsMsubfD:
+ __ msubf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(2));
+ break;
case kMipsMulD:
// TODO(plind): add special case: right op is -1.0, see arm port.
__ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1358,7 +1387,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
// ... more basic instructions ...
-
+ case kMipsSeb:
+ __ seb(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kMipsSeh:
+ __ seh(i.OutputRegister(), i.InputRegister(0));
+ break;
case kMipsLbu:
__ lbu(i.OutputRegister(), i.MemoryOperand());
break;
@@ -1843,13 +1877,14 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
- int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type,
+ SourcePosition pos) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
- __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
@@ -2028,9 +2063,14 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else if (src.type() == Constant::kFloat32) {
if (destination->IsFPStackSlot()) {
MemOperand dst = g.ToMemOperand(destination);
- __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
- __ sw(at, dst);
+ if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
+ __ sw(zero_reg, dst);
+ } else {
+ __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
+ __ sw(at, dst);
+ }
} else {
+ DCHECK(destination->IsFPRegister());
FloatRegister dst = g.ToSingleRegister(destination);
__ Move(dst, src.ToFloat32());
}
diff --git a/deps/v8/src/compiler/mips/instruction-codes-mips.h b/deps/v8/src/compiler/mips/instruction-codes-mips.h
index 269ac0fed4..45ed041175 100644
--- a/deps/v8/src/compiler/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/mips/instruction-codes-mips.h
@@ -69,6 +69,14 @@ namespace compiler {
V(MipsAddPair) \
V(MipsSubPair) \
V(MipsMulPair) \
+ V(MipsMaddS) \
+ V(MipsMaddD) \
+ V(MipsMaddfS) \
+ V(MipsMaddfD) \
+ V(MipsMsubS) \
+ V(MipsMsubD) \
+ V(MipsMsubfS) \
+ V(MipsMsubfD) \
V(MipsFloat32RoundDown) \
V(MipsFloat32RoundTruncate) \
V(MipsFloat32RoundUp) \
@@ -126,7 +134,9 @@ namespace compiler {
V(MipsPush) \
V(MipsStoreToStackSlot) \
V(MipsByteSwap32) \
- V(MipsStackClaim)
+ V(MipsStackClaim) \
+ V(MipsSeb) \
+ V(MipsSeh)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index 4c353694e8..0a98930b5c 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -185,6 +185,10 @@ void InstructionSelector::VisitLoad(Node* node) {
}
}
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
void InstructionSelector::VisitStore(Node* node) {
MipsOperandGenerator g(this);
@@ -198,7 +202,7 @@ void InstructionSelector::VisitStore(Node* node) {
// TODO(mips): I guess this could be done in a better way.
if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ DCHECK(CanBeTaggedPointer(rep));
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
@@ -403,6 +407,24 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
void InstructionSelector::VisitWord32Sar(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (m.right().HasValue() && mleft.right().HasValue()) {
+ MipsOperandGenerator g(this);
+ uint32_t sar = m.right().Value();
+ uint32_t shl = mleft.right().Value();
+ if ((sar == shl) && (sar == 16)) {
+ Emit(kMipsSeh, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ } else if ((sar == shl) && (sar == 24)) {
+ Emit(kMipsSeb, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ }
+ }
+ }
VisitRRO(this, kMipsSar, node);
}
@@ -759,20 +781,126 @@ void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
void InstructionSelector::VisitFloat32Add(Node* node) {
+ MipsOperandGenerator g(this);
+ Float32BinopMatcher m(node);
+ if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
+ // For Add.S(Mul.S(x, y), z):
+ Float32BinopMatcher mleft(m.left().node());
+ if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
+ Emit(kMipsMaddS, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ } else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.S(z, x, y).
+ Emit(kMipsMaddfS, g.DefineSameAsFirst(node),
+ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
+ // For Add.S(x, Mul.S(y, z)):
+ Float32BinopMatcher mright(m.right().node());
+ if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(x, y, z).
+ Emit(kMipsMaddS, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ } else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.S(x, y, z).
+ Emit(kMipsMaddfS, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ }
VisitRRR(this, kMipsAddS, node);
}
void InstructionSelector::VisitFloat64Add(Node* node) {
+ MipsOperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
+ // For Add.D(Mul.D(x, y), z):
+ Float64BinopMatcher mleft(m.left().node());
+ if (IsMipsArchVariant(kMips32r2)) { // Select Madd.D(z, x, y).
+ Emit(kMipsMaddD, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ } else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.D(z, x, y).
+ Emit(kMipsMaddfD, g.DefineSameAsFirst(node),
+ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+ // For Add.D(x, Mul.D(y, z)):
+ Float64BinopMatcher mright(m.right().node());
+ if (IsMipsArchVariant(kMips32r2)) { // Select Madd.D(x, y, z).
+ Emit(kMipsMaddD, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ } else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.D(x, y, z).
+ Emit(kMipsMaddfD, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ }
VisitRRR(this, kMipsAddD, node);
}
void InstructionSelector::VisitFloat32Sub(Node* node) {
+ MipsOperandGenerator g(this);
+ Float32BinopMatcher m(node);
+ if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
+ if (IsMipsArchVariant(kMips32r2)) {
+ // For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
+ Float32BinopMatcher mleft(m.left().node());
+ Emit(kMipsMsubS, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ } else if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ // For Sub.S(x,Mul.S(y,z)) select Msubf.S(x, y, z).
+ Float32BinopMatcher mright(m.right().node());
+ Emit(kMipsMsubfS, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ }
VisitRRR(this, kMipsSubS, node);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
+ MipsOperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
+ if (IsMipsArchVariant(kMips32r2)) {
+ // For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
+ Float64BinopMatcher mleft(m.left().node());
+ Emit(kMipsMsubD, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ } else if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ // For Sub.D(x,Mul.S(y,z)) select Msubf.D(x, y, z).
+ Float64BinopMatcher mright(m.right().node());
+ Emit(kMipsMsubfD, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ }
VisitRRR(this, kMipsSubD, node);
}
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index 3e2e8e260d..9ed72ae027 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ast/scopes.h"
#include "src/compiler/code-generator.h"
+#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
@@ -702,9 +702,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDebugBreak:
__ stop("kArchDebugBreak");
break;
- case kArchImpossible:
- __ Abort(kConversionFromImpossibleValue);
- break;
case kArchComment: {
Address comment_string = i.InputExternalReference(0).address();
__ RecordComment(reinterpret_cast<const char*>(comment_string));
@@ -719,8 +716,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- CodeGenResult result =
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result = AssembleDeoptimizerCall(
+ deopt_state_id, bailout_type, current_source_position_);
if (result != kSuccess) return result;
break;
}
@@ -1317,6 +1314,38 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
+ case kMips64MaddS:
+ __ madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+ i.InputFloatRegister(1), i.InputFloatRegister(2));
+ break;
+ case kMips64MaddD:
+ __ madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), i.InputDoubleRegister(2));
+ break;
+ case kMips64MaddfS:
+ __ maddf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
+ i.InputFloatRegister(2));
+ break;
+ case kMips64MaddfD:
+ __ maddf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(2));
+ break;
+ case kMips64MsubS:
+ __ msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+ i.InputFloatRegister(1), i.InputFloatRegister(2));
+ break;
+ case kMips64MsubD:
+ __ msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), i.InputDoubleRegister(2));
+ break;
+ case kMips64MsubfS:
+ __ msubf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
+ i.InputFloatRegister(2));
+ break;
+ case kMips64MsubfD:
+ __ msubf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(2));
+ break;
case kMips64MulD:
// TODO(plind): add special case: right op is -1.0, see arm port.
__ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1644,6 +1673,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
// ... more basic instructions ...
+ case kMips64Seb:
+ __ seb(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kMips64Seh:
+ __ seh(i.OutputRegister(), i.InputRegister(0));
+ break;
case kMips64Lbu:
__ lbu(i.OutputRegister(), i.MemoryOperand());
break;
@@ -2164,13 +2199,14 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
- int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type,
+ SourcePosition pos) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
- __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
@@ -2350,9 +2386,14 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else if (src.type() == Constant::kFloat32) {
if (destination->IsFPStackSlot()) {
MemOperand dst = g.ToMemOperand(destination);
- __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
- __ sw(at, dst);
+ if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
+ __ sw(zero_reg, dst);
+ } else {
+ __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
+ __ sw(at, dst);
+ }
} else {
+ DCHECK(destination->IsFPRegister());
FloatRegister dst = g.ToSingleRegister(destination);
__ Move(dst, src.ToFloat32());
}
diff --git a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
index e3dedd1750..6a444342ac 100644
--- a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
@@ -85,6 +85,14 @@ namespace compiler {
V(Mips64SqrtD) \
V(Mips64MaxD) \
V(Mips64MinD) \
+ V(Mips64MaddS) \
+ V(Mips64MaddD) \
+ V(Mips64MaddfS) \
+ V(Mips64MaddfD) \
+ V(Mips64MsubS) \
+ V(Mips64MsubD) \
+ V(Mips64MsubfS) \
+ V(Mips64MsubfD) \
V(Mips64Float64RoundDown) \
V(Mips64Float64RoundTruncate) \
V(Mips64Float64RoundUp) \
@@ -159,7 +167,9 @@ namespace compiler {
V(Mips64StoreToStackSlot) \
V(Mips64ByteSwap64) \
V(Mips64ByteSwap32) \
- V(Mips64StackClaim)
+ V(Mips64StackClaim) \
+ V(Mips64Seb) \
+ V(Mips64Seh)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index 1167117d62..6e937e20d7 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -199,6 +199,10 @@ void InstructionSelector::VisitLoad(Node* node) {
EmitLoad(this, node, opcode);
}
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
void InstructionSelector::VisitStore(Node* node) {
Mips64OperandGenerator g(this);
@@ -212,7 +216,7 @@ void InstructionSelector::VisitStore(Node* node) {
// TODO(mips): I guess this could be done in a better way.
if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ DCHECK(CanBeTaggedPointer(rep));
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
@@ -500,6 +504,28 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
void InstructionSelector::VisitWord32Sar(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (m.right().HasValue() && mleft.right().HasValue()) {
+ Mips64OperandGenerator g(this);
+ uint32_t sar = m.right().Value();
+ uint32_t shl = mleft.right().Value();
+ if ((sar == shl) && (sar == 16)) {
+ Emit(kMips64Seh, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ } else if ((sar == shl) && (sar == 24)) {
+ Emit(kMips64Seb, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ } else if ((sar == shl) && (sar == 32)) {
+ Emit(kMips64Shl, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+ return;
+ }
+ }
+ }
VisitRRO(this, kMips64Sar, node);
}
@@ -1198,20 +1224,126 @@ void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
void InstructionSelector::VisitFloat32Add(Node* node) {
+ Mips64OperandGenerator g(this);
+ Float32BinopMatcher m(node);
+ if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
+ // For Add.S(Mul.S(x, y), z):
+ Float32BinopMatcher mleft(m.left().node());
+ if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
+ Emit(kMips64MaddS, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ } else if (kArchVariant == kMips64r6) { // Select Maddf.S(z, x, y).
+ Emit(kMips64MaddfS, g.DefineSameAsFirst(node),
+ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
+ // For Add.S(x, Mul.S(y, z)):
+ Float32BinopMatcher mright(m.right().node());
+ if (kArchVariant == kMips64r2) { // Select Madd.S(x, y, z).
+ Emit(kMips64MaddS, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ } else if (kArchVariant == kMips64r6) { // Select Maddf.S(x, y, z).
+ Emit(kMips64MaddfS, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ }
VisitRRR(this, kMips64AddS, node);
}
void InstructionSelector::VisitFloat64Add(Node* node) {
+ Mips64OperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
+ // For Add.D(Mul.D(x, y), z):
+ Float64BinopMatcher mleft(m.left().node());
+ if (kArchVariant == kMips64r2) { // Select Madd.D(z, x, y).
+ Emit(kMips64MaddD, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ } else if (kArchVariant == kMips64r6) { // Select Maddf.D(z, x, y).
+ Emit(kMips64MaddfD, g.DefineSameAsFirst(node),
+ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+ // For Add.D(x, Mul.D(y, z)):
+ Float64BinopMatcher mright(m.right().node());
+ if (kArchVariant == kMips64r2) { // Select Madd.D(x, y, z).
+ Emit(kMips64MaddD, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ } else if (kArchVariant == kMips64r6) { // Select Maddf.D(x, y, z).
+ Emit(kMips64MaddfD, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ }
VisitRRR(this, kMips64AddD, node);
}
void InstructionSelector::VisitFloat32Sub(Node* node) {
+ Mips64OperandGenerator g(this);
+ Float32BinopMatcher m(node);
+ if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
+ if (kArchVariant == kMips64r2) {
+ // For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
+ Float32BinopMatcher mleft(m.left().node());
+ Emit(kMips64MsubS, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ } else if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
+ if (kArchVariant == kMips64r6) {
+ // For Sub.S(x,Mul.S(y,z)) select Msubf.S(x, y, z).
+ Float32BinopMatcher mright(m.right().node());
+ Emit(kMips64MsubfS, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ }
VisitRRR(this, kMips64SubS, node);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
+ Mips64OperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
+ if (kArchVariant == kMips64r2) {
+ // For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
+ Float64BinopMatcher mleft(m.left().node());
+ Emit(kMips64MsubD, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ } else if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+ if (kArchVariant == kMips64r6) {
+ // For Sub.D(x,Mul.S(y,z)) select Msubf.D(x, y, z).
+ Float64BinopMatcher mright(m.right().node());
+ Emit(kMips64MsubfD, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ }
VisitRRR(this, kMips64SubD, node);
}
diff --git a/deps/v8/src/compiler/move-optimizer.cc b/deps/v8/src/compiler/move-optimizer.cc
index 482c254de1..d87ece3849 100644
--- a/deps/v8/src/compiler/move-optimizer.cc
+++ b/deps/v8/src/compiler/move-optimizer.cc
@@ -424,7 +424,7 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
namespace {
bool IsSlot(const InstructionOperand& op) {
- return op.IsStackSlot() || op.IsDoubleStackSlot();
+ return op.IsStackSlot() || op.IsFPStackSlot();
}
diff --git a/deps/v8/src/compiler/move-optimizer.h b/deps/v8/src/compiler/move-optimizer.h
index 8e932a0d73..ce26a7f988 100644
--- a/deps/v8/src/compiler/move-optimizer.h
+++ b/deps/v8/src/compiler/move-optimizer.h
@@ -6,7 +6,7 @@
#define V8_COMPILER_MOVE_OPTIMIZER_
#include "src/compiler/instruction.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/node-aux-data.h b/deps/v8/src/compiler/node-aux-data.h
index 7a882921a7..b50ff3885d 100644
--- a/deps/v8/src/compiler/node-aux-data.h
+++ b/deps/v8/src/compiler/node-aux-data.h
@@ -6,7 +6,7 @@
#define V8_COMPILER_NODE_AUX_DATA_H_
#include "src/compiler/node.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/node-cache.cc b/deps/v8/src/compiler/node-cache.cc
index 061a3ae4f4..0be6f81bd5 100644
--- a/deps/v8/src/compiler/node-cache.cc
+++ b/deps/v8/src/compiler/node-cache.cc
@@ -6,8 +6,8 @@
#include <cstring>
-#include "src/zone.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index 10aed51a57..6c283dc032 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -11,6 +11,7 @@
#include "src/assembler.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
+#include "src/double.h"
namespace v8 {
namespace internal {
@@ -161,6 +162,17 @@ struct FloatMatcher final : public ValueMatcher<T, kOpcode> {
bool IsNegative() const { return this->HasValue() && this->Value() < 0.0; }
bool IsNaN() const { return this->HasValue() && std::isnan(this->Value()); }
bool IsZero() const { return this->Is(0.0) && !std::signbit(this->Value()); }
+ bool IsNormal() const {
+ return this->HasValue() && std::isnormal(this->Value());
+ }
+ bool IsPositiveOrNegativePowerOf2() const {
+ if (!this->HasValue() || (this->Value() == 0.0)) {
+ return false;
+ }
+ Double value = Double(this->Value());
+ return !value.IsInfinite() &&
+ base::bits::IsPowerOfTwo64(value.Significand());
+ }
};
typedef FloatMatcher<float, IrOpcode::kFloat32Constant> Float32Matcher;
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 9812158c4e..ed3c117507 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -6,7 +6,7 @@
#define V8_COMPILER_NODE_PROPERTIES_H_
#include "src/compiler/node.h"
-#include "src/types.h"
+#include "src/compiler/types.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index 493518712c..e940371b85 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -7,8 +7,8 @@
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
-#include "src/types.h"
-#include "src/zone-containers.h"
+#include "src/compiler/types.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index c1b5945e60..5ac2012350 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -134,7 +134,8 @@
V(JSStoreGlobal) \
V(JSDeleteProperty) \
V(JSHasProperty) \
- V(JSInstanceOf)
+ V(JSInstanceOf) \
+ V(JSOrdinaryHasInstance)
#define JS_CONTEXT_OP_LIST(V) \
V(JSLoadContext) \
@@ -150,10 +151,8 @@
V(JSCallFunction) \
V(JSCallRuntime) \
V(JSConvertReceiver) \
- V(JSForInDone) \
V(JSForInNext) \
V(JSForInPrepare) \
- V(JSForInStep) \
V(JSLoadMessage) \
V(JSStoreMessage) \
V(JSGeneratorStore) \
@@ -181,7 +180,8 @@
V(ChangeTaggedToBit) \
V(ChangeBitToTagged) \
V(TruncateTaggedToWord32) \
- V(TruncateTaggedToFloat64)
+ V(TruncateTaggedToFloat64) \
+ V(TruncateTaggedToBit)
#define SIMPLIFIED_CHECKED_OP_LIST(V) \
V(CheckedInt32Add) \
@@ -191,12 +191,15 @@
V(CheckedUint32Div) \
V(CheckedUint32Mod) \
V(CheckedInt32Mul) \
+ V(CheckedInt32ToTaggedSigned) \
V(CheckedUint32ToInt32) \
+ V(CheckedUint32ToTaggedSigned) \
V(CheckedFloat64ToInt32) \
V(CheckedTaggedSignedToInt32) \
V(CheckedTaggedToInt32) \
V(CheckedTruncateTaggedToWord32) \
- V(CheckedTaggedToFloat64)
+ V(CheckedTaggedToFloat64) \
+ V(CheckedTaggedToTaggedSigned)
#define SIMPLIFIED_COMPARE_BINOP_LIST(V) \
V(NumberEqual) \
@@ -270,6 +273,7 @@
V(NumberTan) \
V(NumberTanh) \
V(NumberTrunc) \
+ V(NumberToBoolean) \
V(NumberToInt32) \
V(NumberToUint32) \
V(NumberSilenceNaN)
@@ -281,13 +285,14 @@
V(BooleanNot) \
V(StringCharCodeAt) \
V(StringFromCharCode) \
+ V(StringFromCodePoint) \
V(CheckBounds) \
V(CheckIf) \
V(CheckMaps) \
V(CheckNumber) \
V(CheckString) \
- V(CheckTaggedPointer) \
- V(CheckTaggedSigned) \
+ V(CheckSmi) \
+ V(CheckHeapObject) \
V(CheckFloat64Hole) \
V(CheckTaggedHole) \
V(ConvertTaggedHoleToUndefined) \
@@ -306,6 +311,7 @@
V(ObjectIsSmi) \
V(ObjectIsString) \
V(ObjectIsUndetectable) \
+ V(ArrayBufferWasNeutered) \
V(EnsureWritableFastElements) \
V(MaybeGrowFastElements) \
V(TransitionElementsKind)
@@ -338,59 +344,131 @@
V(Float64LessThan) \
V(Float64LessThanOrEqual)
+#define MACHINE_UNOP_32_LIST(V) \
+ V(Word32Clz) \
+ V(Word32Ctz) \
+ V(Word32ReverseBits) \
+ V(Word32ReverseBytes)
+
+#define MACHINE_BINOP_32_LIST(V) \
+ V(Word32And) \
+ V(Word32Or) \
+ V(Word32Xor) \
+ V(Word32Shl) \
+ V(Word32Shr) \
+ V(Word32Sar) \
+ V(Word32Ror) \
+ V(Int32Add) \
+ V(Int32AddWithOverflow) \
+ V(Int32Sub) \
+ V(Int32SubWithOverflow) \
+ V(Int32Mul) \
+ V(Int32MulWithOverflow) \
+ V(Int32MulHigh) \
+ V(Int32Div) \
+ V(Int32Mod) \
+ V(Uint32Div) \
+ V(Uint32Mod) \
+ V(Uint32MulHigh)
+
+#define MACHINE_BINOP_64_LIST(V) \
+ V(Word64And) \
+ V(Word64Or) \
+ V(Word64Xor) \
+ V(Word64Shl) \
+ V(Word64Shr) \
+ V(Word64Sar) \
+ V(Word64Ror) \
+ V(Int64Add) \
+ V(Int64AddWithOverflow) \
+ V(Int64Sub) \
+ V(Int64SubWithOverflow) \
+ V(Int64Mul) \
+ V(Int64Div) \
+ V(Int64Mod) \
+ V(Uint64Div) \
+ V(Uint64Mod)
+
+#define MACHINE_FLOAT32_UNOP_LIST(V) \
+ V(Float32Abs) \
+ V(Float32Neg) \
+ V(Float32RoundDown) \
+ V(Float32RoundTiesEven) \
+ V(Float32RoundTruncate) \
+ V(Float32RoundUp) \
+ V(Float32Sqrt)
+
+#define MACHINE_FLOAT32_BINOP_LIST(V) \
+ V(Float32Add) \
+ V(Float32Sub) \
+ V(Float32Mul) \
+ V(Float32Div) \
+ V(Float32Max) \
+ V(Float32Min)
+
+#define MACHINE_FLOAT64_UNOP_LIST(V) \
+ V(Float64Abs) \
+ V(Float64Acos) \
+ V(Float64Acosh) \
+ V(Float64Asin) \
+ V(Float64Asinh) \
+ V(Float64Atan) \
+ V(Float64Atanh) \
+ V(Float64Cbrt) \
+ V(Float64Cos) \
+ V(Float64Cosh) \
+ V(Float64Exp) \
+ V(Float64Expm1) \
+ V(Float64Log) \
+ V(Float64Log1p) \
+ V(Float64Log10) \
+ V(Float64Log2) \
+ V(Float64Neg) \
+ V(Float64RoundDown) \
+ V(Float64RoundTiesAway) \
+ V(Float64RoundTiesEven) \
+ V(Float64RoundTruncate) \
+ V(Float64RoundUp) \
+ V(Float64Sin) \
+ V(Float64Sinh) \
+ V(Float64Sqrt) \
+ V(Float64Tan) \
+ V(Float64Tanh)
+
+#define MACHINE_FLOAT64_BINOP_LIST(V) \
+ V(Float64Atan2) \
+ V(Float64Max) \
+ V(Float64Min) \
+ V(Float64Add) \
+ V(Float64Sub) \
+ V(Float64Mul) \
+ V(Float64Div) \
+ V(Float64Mod) \
+ V(Float64Pow)
+
#define MACHINE_OP_LIST(V) \
+ MACHINE_UNOP_32_LIST(V) \
+ MACHINE_BINOP_32_LIST(V) \
+ MACHINE_BINOP_64_LIST(V) \
MACHINE_COMPARE_BINOP_LIST(V) \
+ MACHINE_FLOAT32_BINOP_LIST(V) \
+ MACHINE_FLOAT32_UNOP_LIST(V) \
+ MACHINE_FLOAT64_BINOP_LIST(V) \
+ MACHINE_FLOAT64_UNOP_LIST(V) \
V(DebugBreak) \
V(Comment) \
V(Load) \
V(Store) \
V(StackSlot) \
- V(Word32And) \
- V(Word32Or) \
- V(Word32Xor) \
- V(Word32Shl) \
- V(Word32Shr) \
- V(Word32Sar) \
- V(Word32Ror) \
- V(Word32Clz) \
- V(Word32Ctz) \
- V(Word32ReverseBits) \
- V(Word32ReverseBytes) \
V(Word32Popcnt) \
V(Word64Popcnt) \
- V(Word64And) \
- V(Word64Or) \
- V(Word64Xor) \
- V(Word64Shl) \
- V(Word64Shr) \
- V(Word64Sar) \
- V(Word64Ror) \
V(Word64Clz) \
V(Word64Ctz) \
V(Word64ReverseBits) \
V(Word64ReverseBytes) \
- V(Int32Add) \
- V(Int32AddWithOverflow) \
- V(Int32Sub) \
- V(Int32SubWithOverflow) \
- V(Int32Mul) \
- V(Int32MulWithOverflow) \
- V(Int32MulHigh) \
- V(Int32Div) \
- V(Int32Mod) \
- V(Uint32Div) \
- V(Uint32Mod) \
- V(Uint32MulHigh) \
- V(Int64Add) \
- V(Int64AddWithOverflow) \
- V(Int64Sub) \
- V(Int64SubWithOverflow) \
- V(Int64Mul) \
- V(Int64Div) \
- V(Int64Mod) \
- V(Uint64Div) \
- V(Uint64Mod) \
+ V(BitcastTaggedToWord) \
V(BitcastWordToTagged) \
+ V(BitcastWordToTaggedSigned) \
V(TruncateFloat64ToWord32) \
V(ChangeFloat32ToFloat64) \
V(ChangeFloat64ToInt32) \
@@ -407,12 +485,6 @@
V(ChangeInt32ToInt64) \
V(ChangeUint32ToFloat64) \
V(ChangeUint32ToUint64) \
- V(ImpossibleToBit) \
- V(ImpossibleToWord32) \
- V(ImpossibleToWord64) \
- V(ImpossibleToFloat32) \
- V(ImpossibleToFloat64) \
- V(ImpossibleToTagged) \
V(TruncateFloat64ToFloat32) \
V(TruncateInt64ToInt32) \
V(RoundFloat64ToInt32) \
@@ -426,55 +498,6 @@
V(BitcastFloat64ToInt64) \
V(BitcastInt32ToFloat32) \
V(BitcastInt64ToFloat64) \
- V(Float32Add) \
- V(Float32Sub) \
- V(Float32Neg) \
- V(Float32Mul) \
- V(Float32Div) \
- V(Float32Abs) \
- V(Float32Sqrt) \
- V(Float32RoundDown) \
- V(Float32Max) \
- V(Float32Min) \
- V(Float64Add) \
- V(Float64Sub) \
- V(Float64Neg) \
- V(Float64Mul) \
- V(Float64Div) \
- V(Float64Mod) \
- V(Float64Max) \
- V(Float64Min) \
- V(Float64Abs) \
- V(Float64Acos) \
- V(Float64Acosh) \
- V(Float64Asin) \
- V(Float64Asinh) \
- V(Float64Atan) \
- V(Float64Atanh) \
- V(Float64Atan2) \
- V(Float64Cbrt) \
- V(Float64Cos) \
- V(Float64Cosh) \
- V(Float64Exp) \
- V(Float64Expm1) \
- V(Float64Log) \
- V(Float64Log1p) \
- V(Float64Log10) \
- V(Float64Log2) \
- V(Float64Pow) \
- V(Float64Sin) \
- V(Float64Sinh) \
- V(Float64Sqrt) \
- V(Float64Tan) \
- V(Float64Tanh) \
- V(Float64RoundDown) \
- V(Float32RoundUp) \
- V(Float64RoundUp) \
- V(Float32RoundTruncate) \
- V(Float64RoundTruncate) \
- V(Float64RoundTiesAway) \
- V(Float32RoundTiesEven) \
- V(Float64RoundTiesEven) \
V(Float64ExtractLowWord32) \
V(Float64ExtractHighWord32) \
V(Float64InsertLowWord32) \
@@ -492,6 +515,7 @@
V(Word32PairShl) \
V(Word32PairShr) \
V(Word32PairSar) \
+ V(ProtectedLoad) \
V(AtomicLoad) \
V(AtomicStore) \
V(UnsafePointerAdd)
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index f3ef778dc0..4295a22287 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -5,10 +5,10 @@
#include "src/compiler/operation-typer.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/type-cache.h"
+#include "src/compiler/types.h"
#include "src/factory.h"
#include "src/isolate.h"
-#include "src/type-cache.h"
-#include "src/types.h"
#include "src/objects-inl.h"
@@ -460,6 +460,16 @@ Type* OperationTyper::NumberTrunc(Type* type) {
return cache_.kIntegerOrMinusZeroOrNaN;
}
+Type* OperationTyper::NumberToBoolean(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ if (!type->IsInhabited()) return Type::None();
+ if (type->Is(cache_.kZeroish)) return singleton_false_;
+ if (type->Is(Type::PlainNumber()) && (type->Max() < 0 || 0 < type->Min())) {
+ return singleton_true_; // Ruled out nan, -0 and +0.
+ }
+ return Type::Boolean();
+}
+
Type* OperationTyper::NumberToInt32(Type* type) {
DCHECK(type->Is(Type::Number()));
diff --git a/deps/v8/src/compiler/operation-typer.h b/deps/v8/src/compiler/operation-typer.h
index dcfe0c45ea..09f063c14e 100644
--- a/deps/v8/src/compiler/operation-typer.h
+++ b/deps/v8/src/compiler/operation-typer.h
@@ -11,15 +11,17 @@
namespace v8 {
namespace internal {
+// Forward declarations.
class Isolate;
class RangeType;
-class Type;
-class TypeCache;
class Zone;
namespace compiler {
+// Forward declarations.
class Operator;
+class Type;
+class TypeCache;
class OperationTyper {
public:
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 68d884d62d..0a9e6448e2 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -61,6 +61,7 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSLessThanOrEqual:
case IrOpcode::kJSHasProperty:
case IrOpcode::kJSInstanceOf:
+ case IrOpcode::kJSOrdinaryHasInstance:
// Object operations
case IrOpcode::kJSCreate:
diff --git a/deps/v8/src/compiler/operator.h b/deps/v8/src/compiler/operator.h
index b6ec2c618c..8e3a9d1725 100644
--- a/deps/v8/src/compiler/operator.h
+++ b/deps/v8/src/compiler/operator.h
@@ -10,7 +10,7 @@
#include "src/base/flags.h"
#include "src/base/functional.h"
#include "src/handles.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/osr.cc b/deps/v8/src/compiler/osr.cc
index 187e61230c..6d61affe83 100644
--- a/deps/v8/src/compiler/osr.cc
+++ b/deps/v8/src/compiler/osr.cc
@@ -2,22 +2,23 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/osr.h"
#include "src/ast/scopes.h"
-#include "src/compiler.h"
+#include "src/compilation-info.h"
#include "src/compiler/all-nodes.h"
-#include "src/compiler/common-operator.h"
#include "src/compiler/common-operator-reducer.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/dead-code-elimination.h"
#include "src/compiler/frame.h"
-#include "src/compiler/graph.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/graph-trimmer.h"
#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/graph.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/loop-analysis.h"
-#include "src/compiler/node.h"
#include "src/compiler/node-marker.h"
-#include "src/compiler/osr.h"
+#include "src/compiler/node.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -270,11 +271,8 @@ void OsrHelper::Deconstruct(JSGraph* jsgraph, CommonOperatorBuilder* common,
}
}
- if (osr_loop_entry == nullptr) {
- // No OSR entry found, do nothing.
- CHECK(osr_normal_entry);
- return;
- }
+ CHECK_NOT_NULL(osr_normal_entry); // Should have found the OSR normal entry.
+ CHECK_NOT_NULL(osr_loop_entry); // Should have found the OSR loop entry.
for (Node* use : osr_loop_entry->uses()) {
if (use->opcode() == IrOpcode::kLoop) {
diff --git a/deps/v8/src/compiler/osr.h b/deps/v8/src/compiler/osr.h
index 89773f0ec5..1f562c56bf 100644
--- a/deps/v8/src/compiler/osr.h
+++ b/deps/v8/src/compiler/osr.h
@@ -5,7 +5,7 @@
#ifndef V8_COMPILER_OSR_H_
#define V8_COMPILER_OSR_H_
-#include "src/zone.h"
+#include "src/zone/zone.h"
// TurboFan structures OSR graphs in a way that separates almost all phases of
// compilation from OSR implementation details. This is accomplished with
diff --git a/deps/v8/src/compiler/pipeline-statistics.cc b/deps/v8/src/compiler/pipeline-statistics.cc
index 5b97abe5eb..a032c3dac2 100644
--- a/deps/v8/src/compiler/pipeline-statistics.cc
+++ b/deps/v8/src/compiler/pipeline-statistics.cc
@@ -4,9 +4,10 @@
#include <memory>
-#include "src/compiler.h"
+#include "src/compilation-info.h"
#include "src/compiler/pipeline-statistics.h"
#include "src/compiler/zone-pool.h"
+#include "src/isolate.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index ba7aa96085..805b687e7d 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -10,6 +10,8 @@
#include "src/base/adapters.h"
#include "src/base/platform/elapsed-timer.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
#include "src/compiler/ast-graph-builder.h"
#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/compiler/basic-block-instrumentor.h"
@@ -46,6 +48,7 @@
#include "src/compiler/loop-analysis.h"
#include "src/compiler/loop-peeling.h"
#include "src/compiler/loop-variable-optimizer.h"
+#include "src/compiler/machine-graph-verifier.h"
#include "src/compiler/machine-operator-reducer.h"
#include "src/compiler/memory-optimizer.h"
#include "src/compiler/move-optimizer.h"
@@ -63,6 +66,7 @@
#include "src/compiler/store-store-elimination.h"
#include "src/compiler/tail-call-optimization.h"
#include "src/compiler/type-hint-analyzer.h"
+#include "src/compiler/typed-optimization.h"
#include "src/compiler/typer.h"
#include "src/compiler/value-numbering-reducer.h"
#include "src/compiler/verifier.h"
@@ -426,7 +430,8 @@ void TraceSchedule(CompilationInfo* info, Schedule* schedule) {
}
if (FLAG_trace_turbo_graph || FLAG_trace_turbo_scheduler) {
AllowHandleDereference allow_deref;
- OFStream os(stdout);
+ CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
+ OFStream os(tracing_scope.file());
os << "-- Schedule --------------------------------------\n" << *schedule;
}
}
@@ -439,14 +444,14 @@ class AstGraphBuilderWithPositions final : public AstGraphBuilder {
LoopAssignmentAnalysis* loop_assignment,
TypeHintAnalysis* type_hint_analysis,
SourcePositionTable* source_positions)
- : AstGraphBuilder(local_zone, info, jsgraph, loop_assignment,
+ : AstGraphBuilder(local_zone, info, jsgraph, 1.0f, loop_assignment,
type_hint_analysis),
source_positions_(source_positions),
start_position_(info->shared_info()->start_position()) {}
- bool CreateGraph(bool stack_check) {
+ bool CreateGraph() {
SourcePositionTable::Scope pos_scope(source_positions_, start_position_);
- return AstGraphBuilder::CreateGraph(stack_check);
+ return AstGraphBuilder::CreateGraph();
}
#define DEF_VISIT(type) \
@@ -562,7 +567,7 @@ class PipelineCompilationJob final : public CompilationJob {
PipelineCompilationJob(Isolate* isolate, Handle<JSFunction> function)
// Note that the CompilationInfo is not initialized at the time we pass it
// to the CompilationJob constructor, but it is not dereferenced there.
- : CompilationJob(&info_, "TurboFan"),
+ : CompilationJob(isolate, &info_, "TurboFan"),
zone_(isolate->allocator()),
zone_pool_(isolate->allocator()),
parse_info_(&zone_, function),
@@ -601,6 +606,9 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
if (FLAG_native_context_specialization) {
info()->MarkAsNativeContextSpecializing();
}
+ if (FLAG_turbo_inlining) {
+ info()->MarkAsInliningEnabled();
+ }
}
if (!info()->shared_info()->asm_function() || FLAG_turbo_asm_deoptimization) {
info()->MarkAsDeoptimizationEnabled();
@@ -615,14 +623,6 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
if (!Compiler::EnsureDeoptimizationSupport(info())) return FAILED;
}
- // TODO(mstarzinger): Hack to ensure that certain call descriptors are
- // initialized on the main thread, since it is needed off-thread by the
- // effect control linearizer.
- CodeFactory::CopyFastSmiOrObjectElements(info()->isolate());
- CodeFactory::GrowFastDoubleElements(info()->isolate());
- CodeFactory::GrowFastSmiOrObjectElements(info()->isolate());
- CodeFactory::ToNumber(info()->isolate());
-
linkage_ = new (&zone_) Linkage(Linkage::ComputeIncoming(&zone_, info()));
if (!pipeline_.CreateGraph()) {
@@ -660,7 +660,8 @@ class PipelineWasmCompilationJob final : public CompilationJob {
explicit PipelineWasmCompilationJob(CompilationInfo* info, Graph* graph,
CallDescriptor* descriptor,
SourcePositionTable* source_positions)
- : CompilationJob(info, "TurboFan", State::kReadyToExecute),
+ : CompilationJob(info->isolate(), info, "TurboFan",
+ State::kReadyToExecute),
zone_pool_(info->isolate()->allocator()),
data_(&zone_pool_, info, graph, source_positions),
pipeline_(&data_),
@@ -756,18 +757,17 @@ struct GraphBuilderPhase {
static const char* phase_name() { return "graph builder"; }
void Run(PipelineData* data, Zone* temp_zone) {
- bool stack_check = !data->info()->IsStub();
bool succeeded = false;
if (data->info()->is_optimizing_from_bytecode()) {
BytecodeGraphBuilder graph_builder(temp_zone, data->info(),
- data->jsgraph());
+ data->jsgraph(), 1.0f);
succeeded = graph_builder.CreateGraph();
} else {
AstGraphBuilderWithPositions graph_builder(
temp_zone, data->info(), data->jsgraph(), data->loop_assignment(),
data->type_hint_analysis(), data->source_positions());
- succeeded = graph_builder.CreateGraph(stack_check);
+ succeeded = graph_builder.CreateGraph();
}
if (!succeeded) {
@@ -786,15 +786,19 @@ struct InliningPhase {
data->common());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
- JSCallReducer call_reducer(data->jsgraph(),
- data->info()->is_deoptimization_enabled()
- ? JSCallReducer::kDeoptimizationEnabled
- : JSCallReducer::kNoFlags,
- data->native_context());
+ JSCallReducer::Flags call_reducer_flags = JSCallReducer::kNoFlags;
+ if (data->info()->is_bailout_on_uninitialized()) {
+ call_reducer_flags |= JSCallReducer::kBailoutOnUninitialized;
+ }
+ if (data->info()->is_deoptimization_enabled()) {
+ call_reducer_flags |= JSCallReducer::kDeoptimizationEnabled;
+ }
+ JSCallReducer call_reducer(&graph_reducer, data->jsgraph(),
+ call_reducer_flags, data->native_context());
JSContextSpecialization context_specialization(
&graph_reducer, data->jsgraph(),
data->info()->is_function_context_specializing()
- ? data->info()->context()
+ ? handle(data->info()->context())
: MaybeHandle<Context>());
JSFrameSpecialization frame_specialization(data->info()->osr_frame(),
data->jsgraph());
@@ -837,9 +841,7 @@ struct InliningPhase {
AddReducer(data, &graph_reducer, &context_specialization);
AddReducer(data, &graph_reducer, &intrinsic_lowering);
AddReducer(data, &graph_reducer, &call_reducer);
- if (!data->info()->is_optimizing_from_bytecode()) {
- AddReducer(data, &graph_reducer, &inlining);
- }
+ AddReducer(data, &graph_reducer, &inlining);
graph_reducer.ReduceGraph();
}
};
@@ -913,7 +915,7 @@ struct TypedLoweringPhase {
: MaybeHandle<LiteralsArray>();
JSCreateLowering create_lowering(
&graph_reducer, data->info()->dependencies(), data->jsgraph(),
- literals_array, temp_zone);
+ literals_array, data->native_context(), temp_zone);
JSTypedLowering::Flags typed_lowering_flags = JSTypedLowering::kNoFlags;
if (data->info()->is_deoptimization_enabled()) {
typed_lowering_flags |= JSTypedLowering::kDeoptimizationEnabled;
@@ -921,6 +923,12 @@ struct TypedLoweringPhase {
JSTypedLowering typed_lowering(&graph_reducer, data->info()->dependencies(),
typed_lowering_flags, data->jsgraph(),
temp_zone);
+ TypedOptimization typed_optimization(
+ &graph_reducer, data->info()->dependencies(),
+ data->info()->is_deoptimization_enabled()
+ ? TypedOptimization::kDeoptimizationEnabled
+ : TypedOptimization::kNoFlags,
+ data->jsgraph());
SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph());
CheckpointElimination checkpoint_elimination(&graph_reducer);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
@@ -930,6 +938,7 @@ struct TypedLoweringPhase {
if (data->info()->is_deoptimization_enabled()) {
AddReducer(data, &graph_reducer, &create_lowering);
}
+ AddReducer(data, &graph_reducer, &typed_optimization);
AddReducer(data, &graph_reducer, &typed_lowering);
AddReducer(data, &graph_reducer, &simple_reducer);
AddReducer(data, &graph_reducer, &checkpoint_elimination);
@@ -1065,14 +1074,13 @@ struct EffectControlLinearizationPhase {
};
// The store-store elimination greatly benefits from doing a common operator
-// reducer just before it, to eliminate conditional deopts with a constant
-// condition.
+// reducer and dead code elimination just before it, to eliminate conditional
+// deopts with a constant condition.
struct DeadCodeEliminationPhase {
- static const char* phase_name() { return "common operator reducer"; }
+ static const char* phase_name() { return "dead code elimination"; }
void Run(PipelineData* data, Zone* temp_zone) {
- // Run the common operator reducer.
JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common());
@@ -1225,8 +1233,17 @@ struct InstructionSelectionPhase {
data->schedule(), data->source_positions(), data->frame(),
data->info()->is_source_positions_enabled()
? InstructionSelector::kAllSourcePositions
- : InstructionSelector::kCallSourcePositions);
- selector.SelectInstructions();
+ : InstructionSelector::kCallSourcePositions,
+ InstructionSelector::SupportedFeatures(),
+ FLAG_turbo_instruction_scheduling
+ ? InstructionSelector::kEnableScheduling
+ : InstructionSelector::kDisableScheduling,
+ data->info()->will_serialize()
+ ? InstructionSelector::kEnableSerialization
+ : InstructionSelector::kDisableSerialization);
+ if (!selector.SelectInstructions()) {
+ data->set_compilation_failed();
+ }
}
};
@@ -1426,7 +1443,8 @@ struct PrintGraphPhase {
if (FLAG_trace_turbo_graph) { // Simple textual RPO.
AllowHandleDereference allow_deref;
- OFStream os(stdout);
+ CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
+ OFStream os(tracing_scope.file());
os << "-- Graph after " << phase << " -- " << std::endl;
os << AsRPO(*graph);
}
@@ -1459,7 +1477,8 @@ bool PipelineImpl::CreateGraph() {
data->BeginPhaseKind("graph creation");
if (FLAG_trace_turbo) {
- OFStream os(stdout);
+ CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+ OFStream os(tracing_scope.file());
os << "---------------------------------------------------\n"
<< "Begin compiling method " << info()->GetDebugName().get()
<< " using Turbofan" << std::endl;
@@ -1585,7 +1604,7 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
RunPrintAndVerify("Effect and control linearized", true);
Run<DeadCodeEliminationPhase>();
- RunPrintAndVerify("Common operator reducer", true);
+ RunPrintAndVerify("Dead code elimination", true);
if (FLAG_turbo_store_elimination) {
Run<StoreStoreEliminationPhase>();
@@ -1623,6 +1642,7 @@ Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
Code::Flags flags,
const char* debug_name) {
CompilationInfo info(CStrVector(debug_name), isolate, graph->zone(), flags);
+ if (isolate->serializer_enabled()) info.PrepareForSerializing();
// Construct a pipeline for scheduling and code generation.
ZonePool zone_pool(isolate->allocator());
@@ -1717,7 +1737,7 @@ bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
InstructionSequence* sequence,
bool run_verifier) {
CompilationInfo info(ArrayVector("testing"), sequence->isolate(),
- sequence->zone());
+ sequence->zone(), Code::ComputeFlags(Code::STUB));
ZonePool zone_pool(sequence->isolate()->allocator());
PipelineData data(&zone_pool, &info, sequence);
PipelineImpl pipeline(&data);
@@ -1740,11 +1760,22 @@ bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage) {
info(), data->graph(), data->schedule()));
}
+ if (FLAG_turbo_verify_machine_graph) {
+ Zone temp_zone(data->isolate()->allocator());
+ MachineGraphVerifier::Run(data->graph(), data->schedule(), linkage,
+ &temp_zone);
+ }
+
data->InitializeInstructionSequence(call_descriptor);
data->InitializeFrameData(call_descriptor);
// Select and schedule instructions covering the scheduled graph.
Run<InstructionSelectionPhase>(linkage);
+ if (data->compilation_failed()) {
+ info()->AbortOptimization(kCodeGenerationFailed);
+ data->EndPhaseKind();
+ return false;
+ }
if (FLAG_trace_turbo && !data->MayHaveUnverifiableGraph()) {
AllowHandleDereference allow_deref;
@@ -1825,7 +1856,8 @@ Handle<Code> PipelineImpl::GenerateCode(Linkage* linkage) {
json_of << data->source_position_output();
json_of << "}";
- OFStream os(stdout);
+ CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+ OFStream os(tracing_scope.file());
os << "---------------------------------------------------\n"
<< "Finished compiling method " << info()->GetDebugName().get()
<< " using Turbofan" << std::endl;
@@ -1876,7 +1908,8 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
Run<BuildLiveRangesPhase>();
if (FLAG_trace_turbo_graph) {
AllowHandleDereference allow_deref;
- OFStream os(stdout);
+ CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+ OFStream os(tracing_scope.file());
os << "----- Instruction sequence before register allocation -----\n"
<< PrintableInstructionSequence({config, data->sequence()});
}
@@ -1911,7 +1944,8 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
if (FLAG_trace_turbo_graph) {
AllowHandleDereference allow_deref;
- OFStream os(stdout);
+ CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+ OFStream os(tracing_scope.file());
os << "----- Instruction sequence after register allocation -----\n"
<< PrintableInstructionSequence({config, data->sequence()});
}
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index 9db36b4faa..f8f3099209 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -4,7 +4,7 @@
#include "src/compiler/code-generator.h"
-#include "src/ast/scopes.h"
+#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
@@ -1077,9 +1077,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDebugBreak:
__ stop("kArchDebugBreak");
break;
- case kArchImpossible:
- __ Abort(kConversionFromImpossibleValue);
- break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
@@ -1090,8 +1087,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- CodeGenResult result =
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result = AssembleDeoptimizerCall(
+ deopt_state_id, bailout_type, current_source_position_);
if (result != kSuccess) return result;
break;
}
@@ -2071,7 +2068,8 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
- int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type,
+ SourcePosition pos) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
// TODO(turbofan): We should be able to generate better code by sharing the
@@ -2080,7 +2078,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
- __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index bad8ded131..a2eb7b8f22 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -229,6 +229,10 @@ void InstructionSelector::VisitLoad(Node* node) {
}
}
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
void InstructionSelector::VisitStore(Node* node) {
PPCOperandGenerator g(this);
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index ae40f55c12..cdf45ab776 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -402,6 +402,30 @@ Node* RawMachineAssembler::TailCallRuntime5(Runtime::FunctionId function,
return tail_call;
}
+Node* RawMachineAssembler::TailCallRuntime6(Runtime::FunctionId function,
+ Node* arg1, Node* arg2, Node* arg3,
+ Node* arg4, Node* arg5, Node* arg6,
+ Node* context) {
+ const int kArity = 6;
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, kArity, Operator::kNoProperties,
+ CallDescriptor::kSupportsTailCalls);
+ int return_count = static_cast<int>(desc->ReturnCount());
+
+ Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+ Node* ref = AddNode(
+ common()->ExternalConstant(ExternalReference(function, isolate())));
+ Node* arity = Int32Constant(kArity);
+
+ Node* nodes[] = {centry, arg1, arg2, arg3, arg4,
+ arg5, arg6, ref, arity, context};
+ Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
+
+ schedule()->AddTailCall(CurrentBlock(), tail_call);
+ current_block_ = nullptr;
+ return tail_call;
+}
+
Node* RawMachineAssembler::CallCFunction0(MachineType return_type,
Node* function) {
MachineSignature::Builder builder(zone(), 1, 0);
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index c7d42369b9..cdd368ca7c 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -135,6 +135,7 @@ class RawMachineAssembler {
return AddNode(machine()->Store(StoreRepresentation(rep, write_barrier)),
base, index, value);
}
+ Node* Retain(Node* value) { return AddNode(common()->Retain(), value); }
// Unaligned memory operations
Node* UnalignedLoad(MachineType rep, Node* base) {
@@ -531,9 +532,15 @@ class RawMachineAssembler {
}
// Conversions.
+ Node* BitcastTaggedToWord(Node* a) {
+ return AddNode(machine()->BitcastTaggedToWord(), a);
+ }
Node* BitcastWordToTagged(Node* a) {
return AddNode(machine()->BitcastWordToTagged(), a);
}
+ Node* BitcastWordToTaggedSigned(Node* a) {
+ return AddNode(machine()->BitcastWordToTaggedSigned(), a);
+ }
Node* TruncateFloat64ToWord32(Node* a) {
return AddNode(machine()->TruncateFloat64ToWord32(), a);
}
@@ -659,6 +666,9 @@ class RawMachineAssembler {
Node* Float64InsertHighWord32(Node* a, Node* b) {
return AddNode(machine()->Float64InsertHighWord32(), a, b);
}
+ Node* Float64SilenceNaN(Node* a) {
+ return AddNode(machine()->Float64SilenceNaN(), a);
+ }
// Stack operations.
Node* LoadStackPointer() { return AddNode(machine()->LoadStackPointer()); }
@@ -744,6 +754,10 @@ class RawMachineAssembler {
// Tail call to a runtime function with five arguments.
Node* TailCallRuntime5(Runtime::FunctionId function, Node* arg1, Node* arg2,
Node* arg3, Node* arg4, Node* arg5, Node* context);
+ // Tail call to a runtime function with six arguments.
+ Node* TailCallRuntime6(Runtime::FunctionId function, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4, Node* arg5, Node* arg6,
+ Node* context);
// ===========================================================================
// The following utility methods deal with control flow, hence might switch
diff --git a/deps/v8/src/compiler/redundancy-elimination.cc b/deps/v8/src/compiler/redundancy-elimination.cc
index c671fc23b8..6dcf2bf4cf 100644
--- a/deps/v8/src/compiler/redundancy-elimination.cc
+++ b/deps/v8/src/compiler/redundancy-elimination.cc
@@ -19,12 +19,12 @@ Reduction RedundancyElimination::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kCheckBounds:
case IrOpcode::kCheckFloat64Hole:
+ case IrOpcode::kCheckHeapObject:
case IrOpcode::kCheckIf:
case IrOpcode::kCheckNumber:
+ case IrOpcode::kCheckSmi:
case IrOpcode::kCheckString:
case IrOpcode::kCheckTaggedHole:
- case IrOpcode::kCheckTaggedPointer:
- case IrOpcode::kCheckTaggedSigned:
case IrOpcode::kCheckedFloat64ToInt32:
case IrOpcode::kCheckedInt32Add:
case IrOpcode::kCheckedInt32Sub:
diff --git a/deps/v8/src/compiler/register-allocator-verifier.h b/deps/v8/src/compiler/register-allocator-verifier.h
index 2db8af5728..9a605d62da 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.h
+++ b/deps/v8/src/compiler/register-allocator-verifier.h
@@ -5,7 +5,7 @@
#ifndef V8_REGISTER_ALLOCATOR_VERIFIER_H_
#define V8_REGISTER_ALLOCATOR_VERIFIER_H_
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
index 5b55b0224c..efcdcb42e6 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -1041,6 +1041,8 @@ void TopLevelLiveRange::Merge(TopLevelLiveRange* other, Zone* zone) {
TopLevel()->UpdateParentForAllChildren(TopLevel());
TopLevel()->UpdateSpillRangePostMerge(other);
+ TopLevel()->set_has_slot_use(TopLevel()->has_slot_use() ||
+ other->has_slot_use());
#if DEBUG
Verify();
@@ -1113,9 +1115,9 @@ void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
first_interval_ = interval;
} else {
// Order of instruction's processing (see ProcessInstructions) guarantees
- // that each new use interval either precedes or intersects with
- // last added interval.
- DCHECK(start < first_interval_->end());
+ // that each new use interval either precedes, intersects with or touches
+ // the last added interval.
+ DCHECK(start <= first_interval_->end());
first_interval_->set_start(Min(start, first_interval_->start()));
first_interval_->set_end(Max(end, first_interval_->end()));
}
@@ -2383,17 +2385,15 @@ LifetimePosition RegisterAllocator::GetSplitPositionForInstruction(
return ret;
}
-
-void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand(
- bool operands_only) {
+void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
size_t initial_range_count = data()->live_ranges().size();
for (size_t i = 0; i < initial_range_count; ++i) {
TopLevelLiveRange* range = data()->live_ranges()[i];
if (!CanProcessRange(range)) continue;
- if (range->HasNoSpillType() || (operands_only && range->HasSpillRange())) {
+ if (range->HasNoSpillType() ||
+ (range->HasSpillRange() && !range->has_slot_use())) {
continue;
}
-
LifetimePosition start = range->Start();
TRACE("Live range %d:%d is defined by a spill operand.\n",
range->TopLevel()->vreg(), range->relative_id());
@@ -2571,8 +2571,7 @@ void LinearScanAllocator::AllocateRegisters() {
DCHECK(active_live_ranges().empty());
DCHECK(inactive_live_ranges().empty());
- SplitAndSpillRangesDefinedByMemoryOperand(code()->VirtualRegisterCount() <=
- num_allocatable_registers());
+ SplitAndSpillRangesDefinedByMemoryOperand();
for (TopLevelLiveRange* range : data()->live_ranges()) {
if (!CanProcessRange(range)) continue;
@@ -3273,8 +3272,8 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
spill_operand = range->GetSpillRangeOperand();
}
DCHECK(spill_operand.IsStackSlot());
- DCHECK_EQ(MachineRepresentation::kTagged,
- AllocatedOperand::cast(spill_operand).representation());
+ DCHECK(CanBeTaggedPointer(
+ AllocatedOperand::cast(spill_operand).representation()));
}
LiveRange* cur = range;
@@ -3336,8 +3335,8 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
safe_point);
InstructionOperand operand = cur->GetAssignedOperand();
DCHECK(!operand.IsStackSlot());
- DCHECK_EQ(MachineRepresentation::kTagged,
- AllocatedOperand::cast(operand).representation());
+ DCHECK(CanBeTaggedPointer(
+ AllocatedOperand::cast(operand).representation()));
map->RecordReference(AllocatedOperand::cast(operand));
}
}
diff --git a/deps/v8/src/compiler/register-allocator.h b/deps/v8/src/compiler/register-allocator.h
index 6bfc6c410a..2089ea2fc1 100644
--- a/deps/v8/src/compiler/register-allocator.h
+++ b/deps/v8/src/compiler/register-allocator.h
@@ -8,7 +8,7 @@
#include "src/compiler/instruction.h"
#include "src/ostreams.h"
#include "src/register-configuration.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -965,7 +965,7 @@ class RegisterAllocator : public ZoneObject {
// Find the optimal split for ranges defined by a memory operand, e.g.
// constants or function parameters passed on the stack.
- void SplitAndSpillRangesDefinedByMemoryOperand(bool operands_only);
+ void SplitAndSpillRangesDefinedByMemoryOperand();
// Split the given range at the given position.
// If range starts at or after the given position then the
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index 5427bdb1cd..22d809b9d6 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -142,10 +142,17 @@ Node* RepresentationChanger::GetRepresentationFor(
switch (use_info.representation()) {
case MachineRepresentation::kTaggedSigned:
+ DCHECK(use_info.type_check() == TypeCheckKind::kNone ||
+ use_info.type_check() == TypeCheckKind::kSignedSmall);
+ return GetTaggedSignedRepresentationFor(node, output_rep, output_type,
+ use_node, use_info);
case MachineRepresentation::kTaggedPointer:
+ DCHECK(use_info.type_check() == TypeCheckKind::kNone);
+ return GetTaggedPointerRepresentationFor(node, output_rep, output_type);
case MachineRepresentation::kTagged:
DCHECK(use_info.type_check() == TypeCheckKind::kNone);
- return GetTaggedRepresentationFor(node, output_rep, output_type);
+ return GetTaggedRepresentationFor(node, output_rep, output_type,
+ use_info.truncation());
case MachineRepresentation::kFloat32:
DCHECK(use_info.type_check() == TypeCheckKind::kNone);
return GetFloat32RepresentationFor(node, output_rep, output_type,
@@ -174,10 +181,132 @@ Node* RepresentationChanger::GetRepresentationFor(
return nullptr;
}
-Node* RepresentationChanger::GetTaggedRepresentationFor(
+Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
+ Node* node, MachineRepresentation output_rep, Type* output_type,
+ Node* use_node, UseInfo use_info) {
+ // Eagerly fold representation changes for constants.
+ switch (node->opcode()) {
+ case IrOpcode::kNumberConstant:
+ if (output_type->Is(Type::SignedSmall())) {
+ return node;
+ }
+ break;
+ default:
+ break;
+ }
+ // Select the correct X -> Tagged operator.
+ const Operator* op;
+ if (output_type->Is(Type::None())) {
+ // This is an impossible value; it should not be used at runtime.
+ // We just provide a dummy value here.
+ return jsgraph()->Constant(0);
+ } else if (IsWord(output_rep)) {
+ if (output_type->Is(Type::Signed31())) {
+ op = simplified()->ChangeInt31ToTaggedSigned();
+ } else if (output_type->Is(Type::Signed32())) {
+ if (SmiValuesAre32Bits()) {
+ op = simplified()->ChangeInt32ToTagged();
+ } else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
+ op = simplified()->CheckedInt32ToTaggedSigned();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTaggedSigned);
+ }
+ } else if (output_type->Is(Type::Unsigned32()) &&
+ use_info.type_check() == TypeCheckKind::kSignedSmall) {
+ op = simplified()->CheckedUint32ToTaggedSigned();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTaggedSigned);
+ }
+ } else if (output_rep == MachineRepresentation::kFloat64) {
+ if (output_type->Is(Type::Signed31())) {
+ // float64 -> int32 -> tagged signed
+ node = InsertChangeFloat64ToInt32(node);
+ op = simplified()->ChangeInt31ToTaggedSigned();
+ } else if (output_type->Is(Type::Signed32())) {
+ // float64 -> int32 -> tagged signed
+ node = InsertChangeFloat64ToInt32(node);
+ if (SmiValuesAre32Bits()) {
+ op = simplified()->ChangeInt32ToTagged();
+ } else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
+ op = simplified()->CheckedInt32ToTaggedSigned();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTaggedSigned);
+ }
+ } else if (output_type->Is(Type::Unsigned32()) &&
+ use_info.type_check() == TypeCheckKind::kSignedSmall) {
+ // float64 -> uint32 -> tagged signed
+ node = InsertChangeFloat64ToUint32(node);
+ op = simplified()->CheckedUint32ToTaggedSigned();
+ } else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
+ op = simplified()->CheckedFloat64ToInt32(
+ output_type->Maybe(Type::MinusZero())
+ ? CheckForMinusZeroMode::kCheckForMinusZero
+ : CheckForMinusZeroMode::kDontCheckForMinusZero);
+ node = InsertConversion(node, op, use_node);
+ if (SmiValuesAre32Bits()) {
+ op = simplified()->ChangeInt32ToTagged();
+ } else {
+ op = simplified()->CheckedInt32ToTaggedSigned();
+ }
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTaggedSigned);
+ }
+ } else if (CanBeTaggedPointer(output_rep) &&
+ use_info.type_check() == TypeCheckKind::kSignedSmall) {
+ op = simplified()->CheckedTaggedToTaggedSigned();
+ } else if (output_rep == MachineRepresentation::kBit &&
+ use_info.type_check() == TypeCheckKind::kSignedSmall) {
+ // TODO(turbofan): Consider adding a Bailout operator that just deopts.
+ // Also use that for MachineRepresentation::kPointer case above.
+ node = InsertChangeBitToTagged(node);
+ op = simplified()->CheckedTaggedToTaggedSigned();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTaggedSigned);
+ }
+ return InsertConversion(node, op, use_node);
+}
+
+Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
Node* node, MachineRepresentation output_rep, Type* output_type) {
// Eagerly fold representation changes for constants.
switch (node->opcode()) {
+ case IrOpcode::kHeapConstant:
+ return node; // No change necessary.
+ case IrOpcode::kInt32Constant:
+ if (output_type->Is(Type::Boolean())) {
+ return OpParameter<int32_t>(node) == 0 ? jsgraph()->FalseConstant()
+ : jsgraph()->TrueConstant();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTaggedPointer);
+ }
+ case IrOpcode::kFloat64Constant:
+ case IrOpcode::kFloat32Constant:
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTaggedPointer);
+ default:
+ break;
+ }
+ // Select the correct X -> Tagged operator.
+ if (output_type->Is(Type::None())) {
+ // This is an impossible value; it should not be used at runtime.
+ // We just provide a dummy value here.
+ return jsgraph()->TheHoleConstant();
+ }
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTaggedPointer);
+}
+
+Node* RepresentationChanger::GetTaggedRepresentationFor(
+ Node* node, MachineRepresentation output_rep, Type* output_type,
+ Truncation truncation) {
+ // Eagerly fold representation changes for constants.
+ switch (node->opcode()) {
case IrOpcode::kNumberConstant:
case IrOpcode::kHeapConstant:
return node; // No change necessary.
@@ -202,12 +331,17 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
default:
break;
}
+ if (output_rep == MachineRepresentation::kTaggedSigned ||
+ output_rep == MachineRepresentation::kTaggedPointer) {
+ // this is a no-op.
+ return node;
+ }
// Select the correct X -> Tagged operator.
const Operator* op;
- if (output_rep == MachineRepresentation::kNone) {
- // We should only asisgn this representation if the type is empty.
- CHECK(!output_type->IsInhabited());
- op = machine()->ImpossibleToTagged();
+ if (output_type->Is(Type::None())) {
+ // This is an impossible value; it should not be used at runtime.
+ // We just provide a dummy value here.
+ return jsgraph()->TheHoleConstant();
} else if (output_rep == MachineRepresentation::kBit) {
if (output_type->Is(Type::Boolean())) {
op = simplified()->ChangeBitToTagged();
@@ -220,7 +354,10 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
op = simplified()->ChangeInt31ToTaggedSigned();
} else if (output_type->Is(Type::Signed32())) {
op = simplified()->ChangeInt32ToTagged();
- } else if (output_type->Is(Type::Unsigned32())) {
+ } else if (output_type->Is(Type::Unsigned32()) ||
+ truncation.IsUsedAsWord32()) {
+ // Either the output is uint32 or the uses only care about the
+ // low 32 bits (so we can pick uint32 safely).
op = simplified()->ChangeUint32ToTagged();
} else {
return TypeError(node, output_rep, output_type,
@@ -229,10 +366,7 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
} else if (output_rep ==
MachineRepresentation::kFloat32) { // float32 -> float64 -> tagged
node = InsertChangeFloat32ToFloat64(node);
- op = simplified()->ChangeFloat64ToTagged(
- output_type->Maybe(Type::MinusZero())
- ? CheckForMinusZeroMode::kCheckForMinusZero
- : CheckForMinusZeroMode::kDontCheckForMinusZero);
+ op = simplified()->ChangeFloat64ToTagged();
} else if (output_rep == MachineRepresentation::kFloat64) {
if (output_type->Is(Type::Signed31())) { // float64 -> int32 -> tagged
node = InsertChangeFloat64ToInt32(node);
@@ -246,10 +380,7 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
node = InsertChangeFloat64ToUint32(node);
op = simplified()->ChangeUint32ToTagged();
} else {
- op = simplified()->ChangeFloat64ToTagged(
- output_type->Maybe(Type::MinusZero())
- ? CheckForMinusZeroMode::kCheckForMinusZero
- : CheckForMinusZeroMode::kDontCheckForMinusZero);
+ op = simplified()->ChangeFloat64ToTagged();
}
} else {
return TypeError(node, output_rep, output_type,
@@ -283,10 +414,10 @@ Node* RepresentationChanger::GetFloat32RepresentationFor(
}
// Select the correct X -> Float32 operator.
const Operator* op = nullptr;
- if (output_rep == MachineRepresentation::kNone) {
- // We should only use kNone representation if the type is empty.
- CHECK(!output_type->IsInhabited());
- op = machine()->ImpossibleToFloat32();
+ if (output_type->Is(Type::None())) {
+ // This is an impossible value; it should not be used at runtime.
+ // We just provide a dummy value here.
+ return jsgraph()->Float32Constant(0.0f);
} else if (IsWord(output_rep)) {
if (output_type->Is(Type::Signed32())) {
// int32 -> float64 -> float32
@@ -303,7 +434,8 @@ Node* RepresentationChanger::GetFloat32RepresentationFor(
node = jsgraph()->graph()->NewNode(op, node);
op = machine()->TruncateFloat64ToFloat32();
}
- } else if (output_rep == MachineRepresentation::kTagged) {
+ } else if (output_rep == MachineRepresentation::kTagged ||
+ output_rep == MachineRepresentation::kTaggedPointer) {
if (output_type->Is(Type::NumberOrOddball())) {
// tagged -> float64 -> float32
if (output_type->Is(Type::Number())) {
@@ -352,10 +484,10 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
}
// Select the correct X -> Float64 operator.
const Operator* op = nullptr;
- if (output_rep == MachineRepresentation::kNone) {
- // We should only use kNone representation if the type is empty.
- CHECK(!output_type->IsInhabited());
- op = machine()->ImpossibleToFloat64();
+ if (output_type->Is(Type::None())) {
+ // This is an impossible value; it should not be used at runtime.
+ // We just provide a dummy value here.
+ return jsgraph()->Float64Constant(0.0);
} else if (IsWord(output_rep)) {
if (output_type->Is(Type::Signed32())) {
op = machine()->ChangeInt32ToFloat64();
@@ -367,11 +499,14 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
}
} else if (output_rep == MachineRepresentation::kBit) {
op = machine()->ChangeUint32ToFloat64();
- } else if (output_rep == MachineRepresentation::kTagged) {
+ } else if (output_rep == MachineRepresentation::kTagged ||
+ output_rep == MachineRepresentation::kTaggedSigned ||
+ output_rep == MachineRepresentation::kTaggedPointer) {
if (output_type->Is(Type::Undefined())) {
return jsgraph()->Float64Constant(
std::numeric_limits<double>::quiet_NaN());
- } else if (output_type->Is(Type::TaggedSigned())) {
+
+ } else if (output_rep == MachineRepresentation::kTaggedSigned) {
node = InsertChangeTaggedSignedToInt32(node);
op = machine()->ChangeInt32ToFloat64();
} else if (output_type->Is(Type::Number())) {
@@ -435,10 +570,10 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
// Select the correct X -> Word32 operator.
const Operator* op = nullptr;
- if (output_rep == MachineRepresentation::kNone) {
- // We should only use kNone representation if the type is empty.
- CHECK(!output_type->IsInhabited());
- op = machine()->ImpossibleToWord32();
+ if (output_type->Is(Type::None())) {
+ // This is an impossible value; it should not be used at runtime.
+ // We just provide a dummy value here.
+ return jsgraph()->Int32Constant(0);
} else if (output_rep == MachineRepresentation::kBit) {
return node; // Sloppy comparison -> word32
} else if (output_rep == MachineRepresentation::kFloat64) {
@@ -470,10 +605,19 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
? CheckForMinusZeroMode::kCheckForMinusZero
: CheckForMinusZeroMode::kDontCheckForMinusZero);
}
- } else if (output_rep == MachineRepresentation::kTagged) {
- if (output_type->Is(Type::TaggedSigned())) {
+ } else if (output_rep == MachineRepresentation::kTaggedSigned) {
+ if (output_type->Is(Type::Signed32())) {
op = simplified()->ChangeTaggedSignedToInt32();
- } else if (output_type->Is(Type::Unsigned32())) {
+ } else if (use_info.truncation().IsUsedAsWord32()) {
+ if (use_info.type_check() != TypeCheckKind::kNone) {
+ op = simplified()->CheckedTruncateTaggedToWord32();
+ } else {
+ op = simplified()->TruncateTaggedToWord32();
+ }
+ }
+ } else if (output_rep == MachineRepresentation::kTagged ||
+ output_rep == MachineRepresentation::kTaggedPointer) {
+ if (output_type->Is(Type::Unsigned32())) {
op = simplified()->ChangeTaggedToUint32();
} else if (output_type->Is(Type::Signed32())) {
op = simplified()->ChangeTaggedToInt32();
@@ -541,22 +685,43 @@ Node* RepresentationChanger::GetBitRepresentationFor(
switch (node->opcode()) {
case IrOpcode::kHeapConstant: {
Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
- DCHECK(value.is_identical_to(factory()->true_value()) ||
- value.is_identical_to(factory()->false_value()));
- return jsgraph()->Int32Constant(
- value.is_identical_to(factory()->true_value()) ? 1 : 0);
+ return jsgraph()->Int32Constant(value->BooleanValue() ? 1 : 0);
}
default:
break;
}
// Select the correct X -> Bit operator.
const Operator* op;
- if (output_rep == MachineRepresentation::kNone) {
- // We should only use kNone representation if the type is empty.
- CHECK(!output_type->IsInhabited());
- op = machine()->ImpossibleToBit();
- } else if (output_rep == MachineRepresentation::kTagged) {
- op = simplified()->ChangeTaggedToBit();
+ if (output_type->Is(Type::None())) {
+ // This is an impossible value; it should not be used at runtime.
+ // We just provide a dummy value here.
+ return jsgraph()->Int32Constant(0);
+ } else if (output_rep == MachineRepresentation::kTagged ||
+ output_rep == MachineRepresentation::kTaggedPointer) {
+ if (output_type->Is(Type::BooleanOrNullOrUndefined())) {
+ // true is the only trueish Oddball.
+ op = simplified()->ChangeTaggedToBit();
+ } else {
+ op = simplified()->TruncateTaggedToBit();
+ }
+ } else if (output_rep == MachineRepresentation::kTaggedSigned) {
+ node = jsgraph()->graph()->NewNode(machine()->WordEqual(), node,
+ jsgraph()->ZeroConstant());
+ return jsgraph()->graph()->NewNode(machine()->Word32Equal(), node,
+ jsgraph()->Int32Constant(0));
+ } else if (IsWord(output_rep)) {
+ node = jsgraph()->graph()->NewNode(machine()->Word32Equal(), node,
+ jsgraph()->Int32Constant(0));
+ return jsgraph()->graph()->NewNode(machine()->Word32Equal(), node,
+ jsgraph()->Int32Constant(0));
+ } else if (output_rep == MachineRepresentation::kFloat32) {
+ node = jsgraph()->graph()->NewNode(machine()->Float32Abs(), node);
+ return jsgraph()->graph()->NewNode(machine()->Float32LessThan(),
+ jsgraph()->Float32Constant(0.0), node);
+ } else if (output_rep == MachineRepresentation::kFloat64) {
+ node = jsgraph()->graph()->NewNode(machine()->Float64Abs(), node);
+ return jsgraph()->graph()->NewNode(machine()->Float64LessThan(),
+ jsgraph()->Float64Constant(0.0), node);
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kBit);
@@ -566,10 +731,10 @@ Node* RepresentationChanger::GetBitRepresentationFor(
Node* RepresentationChanger::GetWord64RepresentationFor(
Node* node, MachineRepresentation output_rep, Type* output_type) {
- if (output_rep == MachineRepresentation::kNone) {
- // We should only use kNone representation if the type is empty.
- CHECK(!output_type->IsInhabited());
- return jsgraph()->graph()->NewNode(machine()->ImpossibleToFloat64(), node);
+ if (output_type->Is(Type::None())) {
+ // This is an impossible value; it should not be used at runtime.
+ // We just provide a dummy value here.
+ return jsgraph()->Int64Constant(0);
} else if (output_rep == MachineRepresentation::kBit) {
return node; // Sloppy comparison -> word64
}
@@ -787,7 +952,7 @@ Node* RepresentationChanger::TypeError(Node* node,
if (!testing_type_errors_) {
std::ostringstream out_str;
out_str << output_rep << " (";
- output_type->PrintTo(out_str, Type::SEMANTIC_DIM);
+ output_type->PrintTo(out_str);
out_str << ")";
std::ostringstream use_str;
@@ -802,6 +967,9 @@ Node* RepresentationChanger::TypeError(Node* node,
return node;
}
+Node* RepresentationChanger::InsertChangeBitToTagged(Node* node) {
+ return jsgraph()->graph()->NewNode(simplified()->ChangeBitToTagged(), node);
+}
Node* RepresentationChanger::InsertChangeFloat32ToFloat64(Node* node) {
return jsgraph()->graph()->NewNode(machine()->ChangeFloat32ToFloat64(), node);
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index fac328072a..f27108ed46 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -29,6 +29,9 @@ class Truncation final {
// Queries.
bool IsUnused() const { return kind_ == TruncationKind::kNone; }
+ bool IsUsedAsBool() const {
+ return LessGeneral(kind_, TruncationKind::kBool);
+ }
bool IsUsedAsWord32() const {
return LessGeneral(kind_, TruncationKind::kWord32);
}
@@ -139,8 +142,18 @@ class UseInfo {
static UseInfo AnyTagged() {
return UseInfo(MachineRepresentation::kTagged, Truncation::Any());
}
+ static UseInfo TaggedSigned() {
+ return UseInfo(MachineRepresentation::kTaggedSigned, Truncation::Any());
+ }
+ static UseInfo TaggedPointer() {
+ return UseInfo(MachineRepresentation::kTaggedPointer, Truncation::Any());
+ }
// Possibly deoptimizing conversions.
+ static UseInfo CheckedSignedSmallAsTaggedSigned() {
+ return UseInfo(MachineRepresentation::kTaggedSigned, Truncation::Any(),
+ TypeCheckKind::kSignedSmall);
+ }
static UseInfo CheckedSignedSmallAsWord32() {
return UseInfo(MachineRepresentation::kWord32, Truncation::Any(),
TypeCheckKind::kSignedSmall);
@@ -232,8 +245,15 @@ class RepresentationChanger final {
bool testing_type_errors_; // If {true}, don't abort on a type error.
bool type_error_; // Set when a type error is detected.
+ Node* GetTaggedSignedRepresentationFor(Node* node,
+ MachineRepresentation output_rep,
+ Type* output_type, Node* use_node,
+ UseInfo use_info);
+ Node* GetTaggedPointerRepresentationFor(Node* node,
+ MachineRepresentation output_rep,
+ Type* output_type);
Node* GetTaggedRepresentationFor(Node* node, MachineRepresentation output_rep,
- Type* output_type);
+ Type* output_type, Truncation truncation);
Node* GetFloat32RepresentationFor(Node* node,
MachineRepresentation output_rep,
Type* output_type, Truncation truncation);
@@ -251,6 +271,7 @@ class RepresentationChanger final {
Node* TypeError(Node* node, MachineRepresentation output_rep,
Type* output_type, MachineRepresentation use);
Node* MakeTruncatedInt32Constant(double value);
+ Node* InsertChangeBitToTagged(Node* node);
Node* InsertChangeFloat32ToFloat64(Node* node);
Node* InsertChangeFloat64ToInt32(Node* node);
Node* InsertChangeFloat64ToUint32(Node* node);
diff --git a/deps/v8/src/compiler/s390/code-generator-s390.cc b/deps/v8/src/compiler/s390/code-generator-s390.cc
index e69a7ac3e3..284c3fc6e3 100644
--- a/deps/v8/src/compiler/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/s390/code-generator-s390.cc
@@ -4,7 +4,7 @@
#include "src/compiler/code-generator.h"
-#include "src/ast/scopes.h"
+#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
@@ -980,9 +980,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDebugBreak:
__ stop("kArchDebugBreak");
break;
- case kArchImpossible:
- __ Abort(kConversionFromImpossibleValue);
- break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
@@ -992,8 +989,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- CodeGenResult result =
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result = AssembleDeoptimizerCall(
+ deopt_state_id, bailout_type, current_source_position_);
if (result != kSuccess) return result;
break;
}
@@ -2195,7 +2192,8 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
- int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type,
+ SourcePosition pos) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
// TODO(turbofan): We should be able to generate better code by sharing the
@@ -2204,7 +2202,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
- __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
index 6fc8a4d9f0..f1aa332a49 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -327,6 +327,11 @@ void InstructionSelector::VisitLoad(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
void InstructionSelector::VisitStore(Node* node) {
S390OperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -1099,7 +1104,7 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
Node* right = m.right().node();
if (g.CanBeImmediate(right, kInt32Imm) &&
base::bits::IsPowerOfTwo64(g.GetImmediate(right))) {
- int power = 31 - base::bits::CountLeadingZeros64(g.GetImmediate(right));
+ int power = 63 - base::bits::CountLeadingZeros64(g.GetImmediate(right));
Emit(kS390_ShiftLeft64, g.DefineSameAsFirst(node), g.UseRegister(left),
g.UseImmediate(power));
return;
diff --git a/deps/v8/src/compiler/schedule.h b/deps/v8/src/compiler/schedule.h
index 74ba835518..4fc0d0a540 100644
--- a/deps/v8/src/compiler/schedule.h
+++ b/deps/v8/src/compiler/schedule.h
@@ -7,7 +7,7 @@
#include <iosfwd>
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
index 58c01ccf03..b4e74d98fe 100644
--- a/deps/v8/src/compiler/scheduler.cc
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -11,10 +11,10 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/control-equivalence.h"
#include "src/compiler/graph.h"
-#include "src/compiler/node.h"
#include "src/compiler/node-marker.h"
#include "src/compiler/node-properties.h"
-#include "src/zone-containers.h"
+#include "src/compiler/node.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/scheduler.h b/deps/v8/src/compiler/scheduler.h
index 269c271ae5..416ba5c84c 100644
--- a/deps/v8/src/compiler/scheduler.h
+++ b/deps/v8/src/compiler/scheduler.h
@@ -10,7 +10,7 @@
#include "src/compiler/opcodes.h"
#include "src/compiler/schedule.h"
#include "src/compiler/zone-pool.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index d698fe9269..97aacd691c 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -20,9 +20,9 @@
#include "src/compiler/representation-change.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/source-position.h"
+#include "src/compiler/type-cache.h"
#include "src/conversions-inl.h"
#include "src/objects.h"
-#include "src/type-cache.h"
namespace v8 {
namespace internal {
@@ -311,6 +311,9 @@ class RepresentationSelector {
bool updated = UpdateFeedbackType(node);
TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
VisitNode(node, info->truncation(), nullptr);
+ TRACE(" ==> output ");
+ PrintOutputInfo(info);
+ TRACE("\n");
if (updated) {
for (Node* const user : node->uses()) {
if (GetInfo(user)->visited()) {
@@ -330,6 +333,9 @@ class RepresentationSelector {
bool updated = UpdateFeedbackType(node);
TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
VisitNode(node, info->truncation(), nullptr);
+ TRACE(" ==> output ");
+ PrintOutputInfo(info);
+ TRACE("\n");
if (updated) {
for (Node* const user : node->uses()) {
if (GetInfo(user)->visited()) {
@@ -534,9 +540,6 @@ class RepresentationSelector {
TRACE(" visit #%d: %s (trunc: %s)\n", node->id(), node->op()->mnemonic(),
info->truncation().description());
VisitNode(node, info->truncation(), nullptr);
- TRACE(" ==> output ");
- PrintOutputInfo(info);
- TRACE("\n");
}
}
@@ -804,41 +807,10 @@ class RepresentationSelector {
VisitBinop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
}
- void VisitInt32Binop(Node* node) {
- VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- }
void VisitWord32TruncatingBinop(Node* node) {
VisitBinop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
}
- void VisitUint32Binop(Node* node) {
- VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- }
- void VisitInt64Binop(Node* node) {
- VisitBinop(node, UseInfo::TruncatingWord64(),
- MachineRepresentation::kWord64);
- }
- void VisitUint64Binop(Node* node) {
- VisitBinop(node, UseInfo::TruncatingWord64(),
- MachineRepresentation::kWord64);
- }
- void VisitFloat64Cmp(Node* node) {
- VisitBinop(node, UseInfo::TruncatingFloat64(), MachineRepresentation::kBit);
- }
- void VisitInt32Cmp(Node* node) {
- VisitBinop(node, UseInfo::TruncatingWord32(), MachineRepresentation::kBit);
- }
- void VisitUint32Cmp(Node* node) {
- VisitBinop(node, UseInfo::TruncatingWord32(), MachineRepresentation::kBit);
- }
- void VisitInt64Cmp(Node* node) {
- VisitBinop(node, UseInfo::TruncatingWord64(), MachineRepresentation::kBit);
- }
- void VisitUint64Cmp(Node* node) {
- VisitBinop(node, UseInfo::TruncatingWord64(), MachineRepresentation::kBit);
- }
// Infer representation for phi-like nodes.
// The {node} parameter is only used to decide on the int64 representation.
@@ -875,11 +847,13 @@ class RepresentationSelector {
bool is_word64 = GetInfo(node->InputAt(0))->representation() ==
MachineRepresentation::kWord64;
#ifdef DEBUG
- // Check that all the inputs agree on being Word64.
- DCHECK_EQ(IrOpcode::kPhi, node->opcode()); // This only works for phis.
- for (int i = 1; i < node->op()->ValueInputCount(); i++) {
- DCHECK_EQ(is_word64, GetInfo(node->InputAt(i))->representation() ==
- MachineRepresentation::kWord64);
+ if (node->opcode() != IrOpcode::kTypeGuard) {
+ // Check that all the inputs agree on being Word64.
+ DCHECK_EQ(IrOpcode::kPhi, node->opcode()); // This only works for phis.
+ for (int i = 1; i < node->op()->ValueInputCount(); i++) {
+ DCHECK_EQ(is_word64, GetInfo(node->InputAt(i))->representation() ==
+ MachineRepresentation::kWord64);
+ }
}
#endif
return is_word64 ? MachineRepresentation::kWord64
@@ -937,6 +911,21 @@ class RepresentationSelector {
}
}
+ void VisitObjectIs(Node* node, Type* type, SimplifiedLowering* lowering) {
+ Type* const input_type = TypeOf(node->InputAt(0));
+ if (input_type->Is(type)) {
+ VisitUnop(node, UseInfo::None(), MachineRepresentation::kBit);
+ if (lower()) {
+ DeferReplacement(node, lowering->jsgraph()->Int32Constant(1));
+ }
+ } else {
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+ if (lower() && !input_type->Maybe(type)) {
+ DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
+ }
+ }
+ }
+
void VisitCall(Node* node, SimplifiedLowering* lowering) {
const CallDescriptor* desc = CallDescriptorOf(node->op());
int params = static_cast<int>(desc->ParameterCount());
@@ -986,8 +975,11 @@ class RepresentationSelector {
for (int i = 0; i < node->InputCount(); i++) {
Node* input = node->InputAt(i);
NodeInfo* input_info = GetInfo(input);
- MachineType machine_type(input_info->representation(),
- DeoptValueSemanticOf(TypeOf(input)));
+ Type* input_type = TypeOf(input);
+ MachineRepresentation rep = input_type->IsInhabited()
+ ? input_info->representation()
+ : MachineRepresentation::kNone;
+ MachineType machine_type(rep, DeoptValueSemanticOf(input_type));
DCHECK(machine_type.representation() !=
MachineRepresentation::kWord32 ||
machine_type.semantic() == MachineSemantic::kInt32 ||
@@ -1023,12 +1015,12 @@ class RepresentationSelector {
WriteBarrierKind WriteBarrierKindFor(
BaseTaggedness base_taggedness,
MachineRepresentation field_representation, Type* field_type,
- Node* value) {
+ MachineRepresentation value_representation, Node* value) {
if (base_taggedness == kTaggedBase &&
- field_representation == MachineRepresentation::kTagged) {
+ CanBeTaggedPointer(field_representation)) {
Type* value_type = NodeProperties::GetType(value);
- if (field_type->Is(Type::TaggedSigned()) ||
- value_type->Is(Type::TaggedSigned())) {
+ if (field_representation == MachineRepresentation::kTaggedSigned ||
+ value_representation == MachineRepresentation::kTaggedSigned) {
// Write barriers are only for stores of heap objects.
return kNoWriteBarrier;
}
@@ -1054,8 +1046,8 @@ class RepresentationSelector {
return kMapWriteBarrier;
}
}
- if (field_type->Is(Type::TaggedPointer()) ||
- value_type->Is(Type::TaggedPointer())) {
+ if (field_representation == MachineRepresentation::kTaggedPointer ||
+ value_representation == MachineRepresentation::kTaggedPointer) {
// Write barriers for heap objects are cheaper.
return kPointerWriteBarrier;
}
@@ -1076,13 +1068,14 @@ class RepresentationSelector {
WriteBarrierKind WriteBarrierKindFor(
BaseTaggedness base_taggedness,
MachineRepresentation field_representation, int field_offset,
- Type* field_type, Node* value) {
+ Type* field_type, MachineRepresentation value_representation,
+ Node* value) {
if (base_taggedness == kTaggedBase &&
field_offset == HeapObject::kMapOffset) {
return kMapWriteBarrier;
}
return WriteBarrierKindFor(base_taggedness, field_representation,
- field_type, value);
+ field_type, value_representation, value);
}
Graph* graph() const { return jsgraph_->graph(); }
@@ -1169,6 +1162,110 @@ class RepresentationSelector {
return;
}
+ void VisitSpeculativeNumberModulus(Node* node, Truncation truncation,
+ SimplifiedLowering* lowering) {
+ // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
+ // can only eliminate an unused speculative number operation if we know
+ // that the inputs are PlainPrimitive, which excludes everything that's
+ // might have side effects or throws during a ToNumber conversion.
+ if (BothInputsAre(node, Type::PlainPrimitive())) {
+ if (truncation.IsUnused()) return VisitUnused(node);
+ }
+ if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN()) &&
+ (truncation.IsUsedAsWord32() ||
+ NodeProperties::GetType(node)->Is(Type::Unsigned32()))) {
+ // => unsigned Uint32Mod
+ VisitWord32TruncatingBinop(node);
+ if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
+ return;
+ }
+ if (BothInputsAre(node, Type::Signed32OrMinusZeroOrNaN()) &&
+ (truncation.IsUsedAsWord32() ||
+ NodeProperties::GetType(node)->Is(Type::Signed32()))) {
+ // => signed Int32Mod
+ VisitWord32TruncatingBinop(node);
+ if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+ return;
+ }
+
+ // Try to use type feedback.
+ NumberOperationHint hint = NumberOperationHintOf(node->op());
+
+ // Handle the case when no uint32 checks on inputs are necessary
+ // (but an overflow check is needed on the output).
+ if (BothInputsAreUnsigned32(node)) {
+ if (hint == NumberOperationHint::kSignedSmall ||
+ hint == NumberOperationHint::kSigned32) {
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32, Type::Unsigned32());
+ if (lower()) ChangeToUint32OverflowOp(node);
+ return;
+ }
+ }
+
+ // Handle the case when no int32 checks on inputs are necessary
+ // (but an overflow check is needed on the output).
+ if (BothInputsAre(node, Type::Signed32())) {
+ // If both the inputs the feedback are int32, use the overflow op.
+ if (hint == NumberOperationHint::kSignedSmall ||
+ hint == NumberOperationHint::kSigned32) {
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32, Type::Signed32());
+ if (lower()) ChangeToInt32OverflowOp(node);
+ return;
+ }
+ }
+
+ if (hint == NumberOperationHint::kSignedSmall ||
+ hint == NumberOperationHint::kSigned32) {
+ // If the result is truncated, we only need to check the inputs.
+ if (truncation.IsUsedAsWord32()) {
+ VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kWord32);
+ if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+ } else if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN())) {
+ VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kWord32, Type::Unsigned32());
+ if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
+ } else {
+ VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kWord32, Type::Signed32());
+ if (lower()) ChangeToInt32OverflowOp(node);
+ }
+ return;
+ }
+
+ if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32()) &&
+ TypeOf(node->InputAt(1))->Is(Type::Unsigned32()) &&
+ (truncation.IsUsedAsWord32() ||
+ NodeProperties::GetType(node)->Is(Type::Unsigned32()))) {
+ // We can only promise Float64 truncation here, as the decision is
+ // based on the feedback types of the inputs.
+ VisitBinop(node,
+ UseInfo(MachineRepresentation::kWord32, Truncation::Float64()),
+ MachineRepresentation::kWord32, Type::Number());
+ if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
+ return;
+ }
+ if (TypeOf(node->InputAt(0))->Is(Type::Signed32()) &&
+ TypeOf(node->InputAt(1))->Is(Type::Signed32()) &&
+ (truncation.IsUsedAsWord32() ||
+ NodeProperties::GetType(node)->Is(Type::Signed32()))) {
+ // We can only promise Float64 truncation here, as the decision is
+ // based on the feedback types of the inputs.
+ VisitBinop(node,
+ UseInfo(MachineRepresentation::kWord32, Truncation::Float64()),
+ MachineRepresentation::kWord32, Type::Number());
+ if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+ return;
+ }
+ // default case => Float64Mod
+ VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(),
+ MachineRepresentation::kFloat64, Type::Number());
+ if (lower()) ChangeToPureOp(node, Float64Op(node));
+ return;
+ }
+
// Dispatching routine for visiting the node {node} with the usage {use}.
// Depending on the operator, propagate new usage info to the inputs.
void VisitNode(Node* node, Truncation truncation,
@@ -1195,22 +1292,13 @@ class RepresentationSelector {
// tho Start doesn't really produce a value, we have to say Tagged
// here, otherwise the input conversion will fail.
return VisitLeaf(node, MachineRepresentation::kTagged);
- case IrOpcode::kDead:
- return VisitLeaf(node, MachineRepresentation::kNone);
- case IrOpcode::kParameter: {
+ case IrOpcode::kParameter:
// TODO(titzer): use representation from linkage.
- ProcessInput(node, 0, UseInfo::None());
- SetOutput(node, MachineRepresentation::kTagged);
- return;
- }
+ return VisitUnop(node, UseInfo::None(), MachineRepresentation::kTagged);
case IrOpcode::kInt32Constant:
return VisitLeaf(node, MachineRepresentation::kWord32);
case IrOpcode::kInt64Constant:
return VisitLeaf(node, MachineRepresentation::kWord64);
- case IrOpcode::kFloat32Constant:
- return VisitLeaf(node, MachineRepresentation::kFloat32);
- case IrOpcode::kFloat64Constant:
- return VisitLeaf(node, MachineRepresentation::kFloat64);
case IrOpcode::kExternalConstant:
return VisitLeaf(node, MachineType::PointerRepresentation());
case IrOpcode::kNumberConstant:
@@ -1218,12 +1306,6 @@ class RepresentationSelector {
case IrOpcode::kHeapConstant:
return VisitLeaf(node, MachineRepresentation::kTagged);
- case IrOpcode::kDeoptimizeIf:
- case IrOpcode::kDeoptimizeUnless:
- ProcessInput(node, 0, UseInfo::Bool());
- ProcessInput(node, 1, UseInfo::AnyTagged());
- ProcessRemainingInputs(node, 2);
- return;
case IrOpcode::kBranch:
ProcessInput(node, 0, UseInfo::Bool());
EnqueueInput(node, NodeProperties::FirstControlIndex(node));
@@ -1242,6 +1324,18 @@ class RepresentationSelector {
//------------------------------------------------------------------
// JavaScript operators.
//------------------------------------------------------------------
+ case IrOpcode::kJSToBoolean: {
+ if (truncation.IsUsedAsBool()) {
+ ProcessInput(node, 0, UseInfo::Bool());
+ ProcessInput(node, 1, UseInfo::None());
+ SetOutput(node, MachineRepresentation::kBit);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else {
+ VisitInputs(node);
+ SetOutput(node, MachineRepresentation::kTagged);
+ }
+ return;
+ }
case IrOpcode::kJSToNumber: {
VisitInputs(node);
// TODO(bmeurer): Optimize somewhat based on input type?
@@ -1268,6 +1362,8 @@ class RepresentationSelector {
node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0));
NodeProperties::ChangeOp(node, lowering->machine()->Word32Equal());
} else {
+ DCHECK_EQ(input_info->representation(),
+ MachineRepresentation::kTagged);
// BooleanNot(x: kRepTagged) => WordEqual(x, #false)
node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant());
NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
@@ -1289,7 +1385,8 @@ class RepresentationSelector {
rhs_type->Is(Type::Unsigned32OrMinusZeroOrNaN()) &&
OneInputCannotBe(node, type_cache_.kZeroish))) {
// => unsigned Int32Cmp
- VisitUint32Cmp(node);
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kBit);
if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
return;
}
@@ -1299,12 +1396,14 @@ class RepresentationSelector {
rhs_type->Is(Type::Signed32OrMinusZeroOrNaN()) &&
OneInputCannotBe(node, type_cache_.kZeroish))) {
// => signed Int32Cmp
- VisitInt32Cmp(node);
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kBit);
if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
return;
}
// => Float64Cmp
- VisitFloat64Cmp(node);
+ VisitBinop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kBit);
if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
return;
}
@@ -1314,16 +1413,19 @@ class RepresentationSelector {
if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32()) &&
TypeOf(node->InputAt(1))->Is(Type::Unsigned32())) {
// => unsigned Int32Cmp
- VisitUint32Cmp(node);
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kBit);
if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
} else if (TypeOf(node->InputAt(0))->Is(Type::Signed32()) &&
TypeOf(node->InputAt(1))->Is(Type::Signed32())) {
// => signed Int32Cmp
- VisitInt32Cmp(node);
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kBit);
if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
} else {
// => Float64Cmp
- VisitFloat64Cmp(node);
+ VisitBinop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kBit);
if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
}
return;
@@ -1347,13 +1449,15 @@ class RepresentationSelector {
if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32()) &&
TypeOf(node->InputAt(1))->Is(Type::Unsigned32())) {
// => unsigned Int32Cmp
- VisitUint32Cmp(node);
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kBit);
if (lower()) ChangeToPureOp(node, Uint32Op(node));
return;
} else if (TypeOf(node->InputAt(0))->Is(Type::Signed32()) &&
TypeOf(node->InputAt(1))->Is(Type::Signed32())) {
// => signed Int32Cmp
- VisitInt32Cmp(node);
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kBit);
if (lower()) ChangeToPureOp(node, Int32Op(node));
return;
}
@@ -1490,10 +1594,10 @@ class RepresentationSelector {
}
if (BothInputsAreSigned32(node)) {
if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
- // => signed Int32Div
- VisitInt32Binop(node);
- if (lower()) DeferReplacement(node, lowering->Int32Div(node));
- return;
+ // => signed Int32Div
+ VisitWord32TruncatingBinop(node);
+ if (lower()) DeferReplacement(node, lowering->Int32Div(node));
+ return;
}
if (truncation.IsUsedAsWord32()) {
// => signed Int32Div
@@ -1562,7 +1666,7 @@ class RepresentationSelector {
if (BothInputsAreSigned32(node)) {
if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
// => signed Int32Div
- VisitInt32Binop(node);
+ VisitWord32TruncatingBinop(node);
if (lower()) DeferReplacement(node, lowering->Int32Div(node));
return;
}
@@ -1574,116 +1678,12 @@ class RepresentationSelector {
}
}
// Number x Number => Float64Div
- if (BothInputsAre(node, Type::NumberOrUndefined())) {
- VisitFloat64Binop(node);
- if (lower()) ChangeToPureOp(node, Float64Op(node));
- return;
- }
- // Checked float64 x float64 => float64
- DCHECK_EQ(IrOpcode::kSpeculativeNumberDivide, node->opcode());
- VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(),
- MachineRepresentation::kFloat64, Type::Number());
- if (lower()) ChangeToPureOp(node, Float64Op(node));
- return;
- }
- case IrOpcode::kSpeculativeNumberModulus: {
- // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
- // can only eliminate an unused speculative number operation if we know
- // that the inputs are PlainPrimitive, which excludes everything that's
- // might have side effects or throws during a ToNumber conversion.
- if (BothInputsAre(node, Type::PlainPrimitive())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
- if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN()) &&
- (truncation.IsUsedAsWord32() ||
- NodeProperties::GetType(node)->Is(Type::Unsigned32()))) {
- // => unsigned Uint32Mod
- VisitWord32TruncatingBinop(node);
- if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
- return;
- }
- if (BothInputsAre(node, Type::Signed32OrMinusZeroOrNaN()) &&
- (truncation.IsUsedAsWord32() ||
- NodeProperties::GetType(node)->Is(Type::Signed32()))) {
- // => signed Int32Mod
- VisitWord32TruncatingBinop(node);
- if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
- return;
- }
-
- // Try to use type feedback.
- NumberOperationHint hint = NumberOperationHintOf(node->op());
-
- // Handle the case when no uint32 checks on inputs are necessary
- // (but an overflow check is needed on the output).
- if (BothInputsAreUnsigned32(node)) {
- if (hint == NumberOperationHint::kSignedSmall ||
- hint == NumberOperationHint::kSigned32) {
- VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32, Type::Unsigned32());
- if (lower()) ChangeToUint32OverflowOp(node);
- return;
- }
- }
-
- // Handle the case when no int32 checks on inputs are necessary
- // (but an overflow check is needed on the output).
- if (BothInputsAre(node, Type::Signed32())) {
- // If both the inputs the feedback are int32, use the overflow op.
- if (hint == NumberOperationHint::kSignedSmall ||
- hint == NumberOperationHint::kSigned32) {
- VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32, Type::Signed32());
- if (lower()) ChangeToInt32OverflowOp(node);
- return;
- }
- }
-
- if (hint == NumberOperationHint::kSignedSmall ||
- hint == NumberOperationHint::kSigned32) {
- // If the result is truncated, we only need to check the inputs.
- if (truncation.IsUsedAsWord32()) {
- VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
- MachineRepresentation::kWord32);
- if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
- } else {
- VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
- MachineRepresentation::kWord32, Type::Signed32());
- if (lower()) ChangeToInt32OverflowOp(node);
- }
- return;
- }
-
- if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32()) &&
- TypeOf(node->InputAt(1))->Is(Type::Unsigned32()) &&
- (truncation.IsUsedAsWord32() ||
- NodeProperties::GetType(node)->Is(Type::Unsigned32()))) {
- // We can only promise Float64 truncation here, as the decision is
- // based on the feedback types of the inputs.
- VisitBinop(node, UseInfo(MachineRepresentation::kWord32,
- Truncation::Float64()),
- MachineRepresentation::kWord32);
- if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
- return;
- }
- if (TypeOf(node->InputAt(0))->Is(Type::Signed32()) &&
- TypeOf(node->InputAt(1))->Is(Type::Signed32()) &&
- (truncation.IsUsedAsWord32() ||
- NodeProperties::GetType(node)->Is(Type::Signed32()))) {
- // We can only promise Float64 truncation here, as the decision is
- // based on the feedback types of the inputs.
- VisitBinop(node, UseInfo(MachineRepresentation::kWord32,
- Truncation::Float64()),
- MachineRepresentation::kWord32);
- if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
- return;
- }
- // default case => Float64Mod
- VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(),
- MachineRepresentation::kFloat64, Type::Number());
+ VisitFloat64Binop(node);
if (lower()) ChangeToPureOp(node, Float64Op(node));
return;
}
+ case IrOpcode::kSpeculativeNumberModulus:
+ return VisitSpeculativeNumberModulus(node, truncation, lowering);
case IrOpcode::kNumberModulus: {
if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN()) &&
(truncation.IsUsedAsWord32() ||
@@ -1733,7 +1733,7 @@ class RepresentationSelector {
case IrOpcode::kNumberBitwiseOr:
case IrOpcode::kNumberBitwiseXor:
case IrOpcode::kNumberBitwiseAnd: {
- VisitInt32Binop(node);
+ VisitWord32TruncatingBinop(node);
if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
return;
}
@@ -1895,13 +1895,13 @@ class RepresentationSelector {
case IrOpcode::kNumberMax: {
// TODO(turbofan): We should consider feedback types here as well.
if (BothInputsAreUnsigned32(node)) {
- VisitUint32Binop(node);
+ VisitWord32TruncatingBinop(node);
if (lower()) {
lowering->DoMax(node, lowering->machine()->Uint32LessThan(),
MachineRepresentation::kWord32);
}
} else if (BothInputsAreSigned32(node)) {
- VisitInt32Binop(node);
+ VisitWord32TruncatingBinop(node);
if (lower()) {
lowering->DoMax(node, lowering->machine()->Int32LessThan(),
MachineRepresentation::kWord32);
@@ -1921,13 +1921,13 @@ class RepresentationSelector {
case IrOpcode::kNumberMin: {
// TODO(turbofan): We should consider feedback types here as well.
if (BothInputsAreUnsigned32(node)) {
- VisitUint32Binop(node);
+ VisitWord32TruncatingBinop(node);
if (lower()) {
lowering->DoMin(node, lowering->machine()->Uint32LessThan(),
MachineRepresentation::kWord32);
}
} else if (BothInputsAreSigned32(node)) {
- VisitInt32Binop(node);
+ VisitWord32TruncatingBinop(node);
if (lower()) {
lowering->DoMin(node, lowering->machine()->Int32LessThan(),
MachineRepresentation::kWord32);
@@ -2002,6 +2002,23 @@ class RepresentationSelector {
if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
return;
}
+ case IrOpcode::kNumberToBoolean: {
+ Type* const input_type = TypeOf(node->InputAt(0));
+ if (input_type->Is(Type::Integral32())) {
+ VisitUnop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kBit);
+ if (lower()) lowering->DoIntegral32ToBit(node);
+ } else if (input_type->Is(Type::OrderedNumber())) {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kBit);
+ if (lower()) lowering->DoOrderedNumberToBit(node);
+ } else {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kBit);
+ if (lower()) lowering->DoNumberToBit(node);
+ }
+ return;
+ }
case IrOpcode::kNumberToInt32: {
// Just change representation if necessary.
VisitUnop(node, UseInfo::TruncatingWord32(),
@@ -2023,62 +2040,11 @@ class RepresentationSelector {
}
return;
}
- case IrOpcode::kStringEqual: {
- VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
- if (lower()) {
- // StringEqual(x, y) => Call(StringEqualStub, x, y, no-context)
- Operator::Properties properties =
- Operator::kCommutative | Operator::kEliminatable;
- Callable callable = CodeFactory::StringEqual(jsgraph_->isolate());
- CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
- flags, properties);
- node->InsertInput(jsgraph_->zone(), 0,
- jsgraph_->HeapConstant(callable.code()));
- node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
- node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
- NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
- }
- return;
- }
- case IrOpcode::kStringLessThan: {
- VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
- if (lower()) {
- // StringLessThan(x, y) => Call(StringLessThanStub, x, y, no-context)
- Operator::Properties properties = Operator::kEliminatable;
- Callable callable = CodeFactory::StringLessThan(jsgraph_->isolate());
- CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
- flags, properties);
- node->InsertInput(jsgraph_->zone(), 0,
- jsgraph_->HeapConstant(callable.code()));
- node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
- node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
- NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
- }
- return;
- }
+ case IrOpcode::kStringEqual:
+ case IrOpcode::kStringLessThan:
case IrOpcode::kStringLessThanOrEqual: {
- VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
- if (lower()) {
- // StringLessThanOrEqual(x, y)
- // => Call(StringLessThanOrEqualStub, x, y, no-context)
- Operator::Properties properties = Operator::kEliminatable;
- Callable callable =
- CodeFactory::StringLessThanOrEqual(jsgraph_->isolate());
- CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
- flags, properties);
- node->InsertInput(jsgraph_->zone(), 0,
- jsgraph_->HeapConstant(callable.code()));
- node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
- node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
- NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
- }
- return;
+ return VisitBinop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTagged);
}
case IrOpcode::kStringCharCodeAt: {
VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
@@ -2090,23 +2056,36 @@ class RepresentationSelector {
MachineRepresentation::kTagged);
return;
}
+ case IrOpcode::kStringFromCodePoint: {
+ VisitUnop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kTagged);
+ return;
+ }
case IrOpcode::kCheckBounds: {
Type* index_type = TypeOf(node->InputAt(0));
+ Type* length_type = TypeOf(node->InputAt(1));
if (index_type->Is(Type::Unsigned32())) {
VisitBinop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
+ if (lower() && index_type->Max() < length_type->Min()) {
+ // The bounds check is redundant if we already know that
+ // the index is within the bounds of [0.0, length[.
+ DeferReplacement(node, node->InputAt(0));
+ }
} else {
VisitBinop(node, UseInfo::CheckedSigned32AsWord32(),
UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
}
- if (lower()) {
- // The bounds check is redundant if we already know that
- // the index is within the bounds of [0.0, length[.
- if (index_type->Is(NodeProperties::GetType(node))) {
- DeferReplacement(node, node->InputAt(0));
- }
+ return;
+ }
+ case IrOpcode::kCheckHeapObject: {
+ if (InputCannotBe(node, Type::SignedSmall())) {
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else {
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
}
return;
}
@@ -2135,28 +2114,20 @@ class RepresentationSelector {
}
return;
}
- case IrOpcode::kCheckString: {
- if (InputIs(node, Type::String())) {
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
- if (lower()) DeferReplacement(node, node->InputAt(0));
+ case IrOpcode::kCheckSmi: {
+ if (SmiValuesAre32Bits() && truncation.IsUsedAsWord32()) {
+ VisitUnop(node, UseInfo::CheckedSignedSmallAsWord32(),
+ MachineRepresentation::kWord32);
} else {
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ VisitUnop(node, UseInfo::CheckedSignedSmallAsTaggedSigned(),
+ MachineRepresentation::kTaggedSigned);
}
+ if (lower()) DeferReplacement(node, node->InputAt(0));
return;
}
- case IrOpcode::kCheckTaggedPointer: {
- if (InputCannotBe(node, Type::SignedSmall())) {
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else {
+ case IrOpcode::kCheckString: {
+ if (InputIs(node, Type::String())) {
VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
- }
- return;
- }
- case IrOpcode::kCheckTaggedSigned: {
- if (SmiValuesAre32Bits() && truncation.IsUsedAsWord32()) {
- VisitUnop(node, UseInfo::CheckedSignedSmallAsWord32(),
- MachineRepresentation::kWord32);
if (lower()) DeferReplacement(node, node->InputAt(0));
} else {
VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
@@ -2175,15 +2146,16 @@ class RepresentationSelector {
FieldAccess access = FieldAccessOf(node->op());
MachineRepresentation const representation =
access.machine_type.representation();
- // TODO(bmeurer): Introduce an appropriate tagged-signed machine rep.
VisitUnop(node, UseInfoForBasePointer(access), representation);
return;
}
case IrOpcode::kStoreField: {
FieldAccess access = FieldAccessOf(node->op());
+ NodeInfo* input_info = GetInfo(node->InputAt(1));
WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
access.base_is_tagged, access.machine_type.representation(),
- access.offset, access.type, node->InputAt(1));
+ access.offset, access.type, input_info->representation(),
+ node->InputAt(1));
ProcessInput(node, 0, UseInfoForBasePointer(access));
ProcessInput(node, 1, TruncatingUseInfoFromRepresentation(
access.machine_type.representation()));
@@ -2255,9 +2227,10 @@ class RepresentationSelector {
}
case IrOpcode::kStoreElement: {
ElementAccess access = ElementAccessOf(node->op());
+ NodeInfo* input_info = GetInfo(node->InputAt(2));
WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
access.base_is_tagged, access.machine_type.representation(),
- access.type, node->InputAt(2));
+ access.type, input_info->representation(), node->InputAt(2));
ProcessInput(node, 0, UseInfoForBasePointer(access)); // base
ProcessInput(node, 1, UseInfo::TruncatingWord32()); // index
ProcessInput(node, 2,
@@ -2336,14 +2309,34 @@ class RepresentationSelector {
}
return;
}
- case IrOpcode::kObjectIsCallable:
- case IrOpcode::kObjectIsNumber:
- case IrOpcode::kObjectIsReceiver:
- case IrOpcode::kObjectIsSmi:
- case IrOpcode::kObjectIsString:
+ case IrOpcode::kObjectIsCallable: {
+ // TODO(turbofan): Add Type::Callable to optimize this?
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+ return;
+ }
+ case IrOpcode::kObjectIsNumber: {
+ VisitObjectIs(node, Type::Number(), lowering);
+ return;
+ }
+ case IrOpcode::kObjectIsReceiver: {
+ VisitObjectIs(node, Type::Receiver(), lowering);
+ return;
+ }
+ case IrOpcode::kObjectIsSmi: {
+ // TODO(turbofan): Optimize based on input representation.
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+ return;
+ }
+ case IrOpcode::kObjectIsString: {
+ VisitObjectIs(node, Type::String(), lowering);
+ return;
+ }
case IrOpcode::kObjectIsUndetectable: {
- ProcessInput(node, 0, UseInfo::AnyTagged());
- SetOutput(node, MachineRepresentation::kBit);
+ VisitObjectIs(node, Type::Undetectable(), lowering);
+ return;
+ }
+ case IrOpcode::kArrayBufferWasNeutered: {
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
return;
}
case IrOpcode::kCheckFloat64Hole: {
@@ -2403,158 +2396,11 @@ class RepresentationSelector {
return;
}
- //------------------------------------------------------------------
- // Machine-level operators.
- //------------------------------------------------------------------
- case IrOpcode::kLoad: {
- // TODO(jarin) Eventually, we should get rid of all machine stores
- // from the high-level phases, then this becomes UNREACHABLE.
- LoadRepresentation rep = LoadRepresentationOf(node->op());
- ProcessInput(node, 0, UseInfo::AnyTagged()); // tagged pointer
- ProcessInput(node, 1, UseInfo::PointerInt()); // index
- ProcessRemainingInputs(node, 2);
- return SetOutput(node, rep.representation());
- }
- case IrOpcode::kStore: {
- // TODO(jarin) Eventually, we should get rid of all machine stores
- // from the high-level phases, then this becomes UNREACHABLE.
- StoreRepresentation rep = StoreRepresentationOf(node->op());
- ProcessInput(node, 0, UseInfo::AnyTagged()); // tagged pointer
- ProcessInput(node, 1, UseInfo::PointerInt()); // index
- ProcessInput(node, 2,
- TruncatingUseInfoFromRepresentation(rep.representation()));
- ProcessRemainingInputs(node, 3);
- return SetOutput(node, MachineRepresentation::kNone);
- }
- case IrOpcode::kWord32Shr:
- // We output unsigned int32 for shift right because JavaScript.
- return VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- case IrOpcode::kWord32And:
- case IrOpcode::kWord32Or:
- case IrOpcode::kWord32Xor:
- case IrOpcode::kWord32Shl:
- case IrOpcode::kWord32Sar:
- // We use signed int32 as the output type for these word32 operations,
- // though the machine bits are the same for either signed or unsigned,
- // because JavaScript considers the result from these operations signed.
- return VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- case IrOpcode::kWord32Equal:
- return VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kBit);
-
- case IrOpcode::kWord32Clz:
- return VisitUnop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
-
- case IrOpcode::kInt32Add:
- case IrOpcode::kInt32Sub:
- case IrOpcode::kInt32Mul:
- case IrOpcode::kInt32MulHigh:
- case IrOpcode::kInt32Div:
- case IrOpcode::kInt32Mod:
- return VisitInt32Binop(node);
- case IrOpcode::kUint32Div:
- case IrOpcode::kUint32Mod:
- case IrOpcode::kUint32MulHigh:
- return VisitUint32Binop(node);
- case IrOpcode::kInt32LessThan:
- case IrOpcode::kInt32LessThanOrEqual:
- return VisitInt32Cmp(node);
-
- case IrOpcode::kUint32LessThan:
- case IrOpcode::kUint32LessThanOrEqual:
- return VisitUint32Cmp(node);
-
- case IrOpcode::kInt64Add:
- case IrOpcode::kInt64Sub:
- case IrOpcode::kInt64Mul:
- case IrOpcode::kInt64Div:
- case IrOpcode::kInt64Mod:
- return VisitInt64Binop(node);
- case IrOpcode::kInt64LessThan:
- case IrOpcode::kInt64LessThanOrEqual:
- return VisitInt64Cmp(node);
-
- case IrOpcode::kUint64LessThan:
- return VisitUint64Cmp(node);
-
- case IrOpcode::kUint64Div:
- case IrOpcode::kUint64Mod:
- return VisitUint64Binop(node);
-
- case IrOpcode::kWord64And:
- case IrOpcode::kWord64Or:
- case IrOpcode::kWord64Xor:
- case IrOpcode::kWord64Shl:
- case IrOpcode::kWord64Shr:
- case IrOpcode::kWord64Sar:
- return VisitBinop(node, UseInfo::TruncatingWord64(),
- MachineRepresentation::kWord64);
- case IrOpcode::kWord64Equal:
- return VisitBinop(node, UseInfo::TruncatingWord64(),
- MachineRepresentation::kBit);
-
- case IrOpcode::kChangeInt32ToInt64:
- return VisitUnop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord64);
- case IrOpcode::kChangeUint32ToUint64:
- return VisitUnop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord64);
- case IrOpcode::kTruncateFloat64ToFloat32:
- return VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kFloat32);
- case IrOpcode::kTruncateFloat64ToWord32:
- return VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kWord32);
-
- case IrOpcode::kChangeInt32ToFloat64:
- return VisitUnop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kFloat64);
- case IrOpcode::kChangeUint32ToFloat64:
- return VisitUnop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kFloat64);
- case IrOpcode::kFloat64Add:
- case IrOpcode::kFloat64Sub:
- case IrOpcode::kFloat64Mul:
- case IrOpcode::kFloat64Div:
- case IrOpcode::kFloat64Mod:
- case IrOpcode::kFloat64Min:
- return VisitFloat64Binop(node);
- case IrOpcode::kFloat64Abs:
- case IrOpcode::kFloat64Sqrt:
- case IrOpcode::kFloat64RoundDown:
- case IrOpcode::kFloat64RoundTruncate:
- case IrOpcode::kFloat64RoundTiesAway:
- case IrOpcode::kFloat64RoundUp:
- return VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kFloat64);
- case IrOpcode::kFloat64SilenceNaN:
- return VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kFloat64);
- case IrOpcode::kFloat64Equal:
- case IrOpcode::kFloat64LessThan:
- case IrOpcode::kFloat64LessThanOrEqual:
- return VisitFloat64Cmp(node);
- case IrOpcode::kFloat64ExtractLowWord32:
- case IrOpcode::kFloat64ExtractHighWord32:
- return VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kWord32);
- case IrOpcode::kFloat64InsertLowWord32:
- case IrOpcode::kFloat64InsertHighWord32:
- return VisitBinop(node, UseInfo::TruncatingFloat64(),
- UseInfo::TruncatingWord32(),
- MachineRepresentation::kFloat64);
case IrOpcode::kNumberSilenceNaN:
VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
return;
- case IrOpcode::kLoadStackPointer:
- case IrOpcode::kLoadFramePointer:
- case IrOpcode::kLoadParentFramePointer:
- return VisitLeaf(node, MachineType::PointerRepresentation());
case IrOpcode::kStateValues:
return VisitStateValues(node);
case IrOpcode::kTypeGuard: {
@@ -2563,37 +2409,55 @@ class RepresentationSelector {
// the sigma's type.
MachineRepresentation output =
GetOutputInfoForPhi(node, TypeOf(node->InputAt(0)), truncation);
-
VisitUnop(node, UseInfo(output, truncation), output);
if (lower()) DeferReplacement(node, node->InputAt(0));
return;
}
- // The following opcodes are not produced before representation
- // inference runs, so we do not have any real test coverage.
- // Simply fail here.
- case IrOpcode::kChangeFloat64ToInt32:
- case IrOpcode::kChangeFloat64ToUint32:
- case IrOpcode::kTruncateInt64ToInt32:
- case IrOpcode::kChangeFloat32ToFloat64:
- case IrOpcode::kCheckedInt32Add:
- case IrOpcode::kCheckedInt32Sub:
- case IrOpcode::kCheckedUint32ToInt32:
- case IrOpcode::kCheckedFloat64ToInt32:
- case IrOpcode::kCheckedTaggedToInt32:
- case IrOpcode::kCheckedTaggedToFloat64:
- case IrOpcode::kPlainPrimitiveToWord32:
- case IrOpcode::kPlainPrimitiveToFloat64:
- case IrOpcode::kLoopExit:
- case IrOpcode::kLoopExitValue:
- case IrOpcode::kLoopExitEffect:
- FATAL("Representation inference: unsupported opcodes.");
- break;
-
- default:
+ // Operators with all inputs tagged and no or tagged output have uniform
+ // handling.
+ case IrOpcode::kEnd:
+ case IrOpcode::kReturn:
+ case IrOpcode::kIfSuccess:
+ case IrOpcode::kIfException:
+ case IrOpcode::kIfTrue:
+ case IrOpcode::kIfFalse:
+ case IrOpcode::kDeoptimize:
+ case IrOpcode::kEffectPhi:
+ case IrOpcode::kTerminate:
+ case IrOpcode::kFrameState:
+ case IrOpcode::kCheckpoint:
+ case IrOpcode::kLoop:
+ case IrOpcode::kMerge:
+ case IrOpcode::kThrow:
+ case IrOpcode::kBeginRegion:
+ case IrOpcode::kFinishRegion:
+ case IrOpcode::kOsrValue:
+ case IrOpcode::kProjection:
+ case IrOpcode::kObjectState:
+// All JavaScript operators except JSToNumber have uniform handling.
+#define OPCODE_CASE(name) case IrOpcode::k##name:
+ JS_SIMPLE_BINOP_LIST(OPCODE_CASE)
+ JS_OTHER_UNOP_LIST(OPCODE_CASE)
+ JS_OBJECT_OP_LIST(OPCODE_CASE)
+ JS_CONTEXT_OP_LIST(OPCODE_CASE)
+ JS_OTHER_OP_LIST(OPCODE_CASE)
+#undef OPCODE_CASE
+ case IrOpcode::kJSToInteger:
+ case IrOpcode::kJSToLength:
+ case IrOpcode::kJSToName:
+ case IrOpcode::kJSToObject:
+ case IrOpcode::kJSToString:
VisitInputs(node);
// Assume the output is tagged.
return SetOutput(node, MachineRepresentation::kTagged);
+
+ default:
+ V8_Fatal(
+ __FILE__, __LINE__,
+ "Representation inference: unsupported opcode %i (%s), node #%i\n.",
+ node->opcode(), node->op()->mnemonic(), node->id());
+ break;
}
UNREACHABLE();
}
@@ -3307,6 +3171,34 @@ void SimplifiedLowering::DoStringToNumber(Node* node) {
NodeProperties::ChangeOp(node, common()->Call(desc));
}
+void SimplifiedLowering::DoIntegral32ToBit(Node* node) {
+ Node* const input = node->InputAt(0);
+ Node* const zero = jsgraph()->Int32Constant(0);
+ Operator const* const op = machine()->Word32Equal();
+
+ node->ReplaceInput(0, graph()->NewNode(op, input, zero));
+ node->AppendInput(graph()->zone(), zero);
+ NodeProperties::ChangeOp(node, op);
+}
+
+void SimplifiedLowering::DoOrderedNumberToBit(Node* node) {
+ Node* const input = node->InputAt(0);
+
+ node->ReplaceInput(0, graph()->NewNode(machine()->Float64Equal(), input,
+ jsgraph()->Float64Constant(0.0)));
+ node->AppendInput(graph()->zone(), jsgraph()->Int32Constant(0));
+ NodeProperties::ChangeOp(node, machine()->Word32Equal());
+}
+
+void SimplifiedLowering::DoNumberToBit(Node* node) {
+ Node* const input = node->InputAt(0);
+
+ node->ReplaceInput(0, jsgraph()->Float64Constant(0.0));
+ node->AppendInput(graph()->zone(),
+ graph()->NewNode(machine()->Float64Abs(), input));
+ NodeProperties::ChangeOp(node, machine()->Float64LessThan());
+}
+
Node* SimplifiedLowering::ToNumberCode() {
if (!to_number_code_.is_set()) {
Callable callable = CodeFactory::ToNumber(isolate());
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index 18c7331219..9e2a499bc6 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -12,17 +12,13 @@
namespace v8 {
namespace internal {
-
-// Forward declarations.
-class TypeCache;
-
-
namespace compiler {
// Forward declarations.
class RepresentationChanger;
class RepresentationSelector;
class SourcePositionTable;
+class TypeCache;
class SimplifiedLowering final {
public:
@@ -45,6 +41,9 @@ class SimplifiedLowering final {
void DoStoreBuffer(Node* node);
void DoShift(Node* node, Operator const* op, Type* rhs_type);
void DoStringToNumber(Node* node);
+ void DoIntegral32ToBit(Node* node);
+ void DoOrderedNumberToBit(Node* node);
+ void DoNumberToBit(Node* node);
private:
JSGraph* const jsgraph_;
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index d8bd1e0232..d172adcf60 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -9,8 +9,8 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
+#include "src/compiler/type-cache.h"
#include "src/conversions-inl.h"
-#include "src/type-cache.h"
namespace v8 {
namespace internal {
@@ -126,6 +126,14 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
break;
}
+ case IrOpcode::kCheckedTaggedSignedToInt32: {
+ NodeMatcher m(node->InputAt(0));
+ if (m.IsConvertTaggedHoleToUndefined()) {
+ node->ReplaceInput(0, m.InputAt(0));
+ return Changed(node);
+ }
+ break;
+ }
case IrOpcode::kCheckIf: {
HeapObjectMatcher m(node->InputAt(0));
if (m.Is(factory()->true_value())) {
@@ -142,22 +150,30 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
break;
}
- case IrOpcode::kCheckTaggedPointer: {
+ case IrOpcode::kCheckHeapObject: {
Node* const input = node->InputAt(0);
if (DecideObjectIsSmi(input) == Decision::kFalse) {
ReplaceWithValue(node, input);
return Replace(input);
}
+ NodeMatcher m(input);
+ if (m.IsCheckHeapObject()) {
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
break;
}
- case IrOpcode::kCheckTaggedSigned: {
+ case IrOpcode::kCheckSmi: {
Node* const input = node->InputAt(0);
if (DecideObjectIsSmi(input) == Decision::kTrue) {
ReplaceWithValue(node, input);
return Replace(input);
}
NodeMatcher m(input);
- if (m.IsConvertTaggedHoleToUndefined()) {
+ if (m.IsCheckSmi()) {
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ } else if (m.IsConvertTaggedHoleToUndefined()) {
node->ReplaceInput(0, m.InputAt(0));
return Changed(node);
}
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index cf0c3deb56..400db97bdc 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -7,7 +7,7 @@
#include "src/base/lazy-instance.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
-#include "src/types.h"
+#include "src/compiler/types.h"
namespace v8 {
namespace internal {
@@ -208,8 +208,7 @@ CheckFloat64HoleMode CheckFloat64HoleModeOf(const Operator* op) {
}
CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kChangeFloat64ToTagged ||
- op->opcode() == IrOpcode::kCheckedInt32Mul ||
+ DCHECK(op->opcode() == IrOpcode::kCheckedInt32Mul ||
op->opcode() == IrOpcode::kCheckedFloat64ToInt32 ||
op->opcode() == IrOpcode::kCheckedTaggedToInt32);
return OpParameter<CheckForMinusZeroMode>(op);
@@ -332,6 +331,16 @@ NumberOperationHint NumberOperationHintOf(const Operator* op) {
return OpParameter<NumberOperationHint>(op);
}
+PretenureFlag PretenureFlagOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kAllocate, op->opcode());
+ return OpParameter<PretenureFlag>(op);
+}
+
+UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kStringFromCodePoint);
+ return OpParameter<UnicodeEncoding>(op);
+}
+
#define PURE_OP_LIST(V) \
V(BooleanNot, Operator::kNoProperties, 1, 0) \
V(NumberEqual, Operator::kCommutative, 2, 0) \
@@ -381,6 +390,7 @@ NumberOperationHint NumberOperationHintOf(const Operator* op) {
V(NumberTan, Operator::kNoProperties, 1, 0) \
V(NumberTanh, Operator::kNoProperties, 1, 0) \
V(NumberTrunc, Operator::kNoProperties, 1, 0) \
+ V(NumberToBoolean, Operator::kNoProperties, 1, 0) \
V(NumberToInt32, Operator::kNoProperties, 1, 0) \
V(NumberToUint32, Operator::kNoProperties, 1, 0) \
V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
@@ -393,11 +403,13 @@ NumberOperationHint NumberOperationHintOf(const Operator* op) {
V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToUint32, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0) \
+ V(ChangeFloat64ToTagged, Operator::kNoProperties, 1, 0) \
V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0) \
V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0) \
V(ChangeUint32ToTagged, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToBit, Operator::kNoProperties, 1, 0) \
V(ChangeBitToTagged, Operator::kNoProperties, 1, 0) \
+ V(TruncateTaggedToBit, Operator::kNoProperties, 1, 0) \
V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0) \
V(TruncateTaggedToFloat64, Operator::kNoProperties, 1, 0) \
V(ObjectIsCallable, Operator::kNoProperties, 1, 0) \
@@ -418,22 +430,25 @@ NumberOperationHint NumberOperationHintOf(const Operator* op) {
V(SpeculativeNumberLessThan) \
V(SpeculativeNumberLessThanOrEqual)
-#define CHECKED_OP_LIST(V) \
- V(CheckBounds, 2, 1) \
- V(CheckIf, 1, 0) \
- V(CheckNumber, 1, 1) \
- V(CheckString, 1, 1) \
- V(CheckTaggedHole, 1, 1) \
- V(CheckTaggedPointer, 1, 1) \
- V(CheckTaggedSigned, 1, 1) \
- V(CheckedInt32Add, 2, 1) \
- V(CheckedInt32Sub, 2, 1) \
- V(CheckedInt32Div, 2, 1) \
- V(CheckedInt32Mod, 2, 1) \
- V(CheckedUint32Div, 2, 1) \
- V(CheckedUint32Mod, 2, 1) \
- V(CheckedUint32ToInt32, 1, 1) \
- V(CheckedTaggedSignedToInt32, 1, 1) \
+#define CHECKED_OP_LIST(V) \
+ V(CheckBounds, 2, 1) \
+ V(CheckHeapObject, 1, 1) \
+ V(CheckIf, 1, 0) \
+ V(CheckNumber, 1, 1) \
+ V(CheckSmi, 1, 1) \
+ V(CheckString, 1, 1) \
+ V(CheckTaggedHole, 1, 1) \
+ V(CheckedInt32Add, 2, 1) \
+ V(CheckedInt32Sub, 2, 1) \
+ V(CheckedInt32Div, 2, 1) \
+ V(CheckedInt32Mod, 2, 1) \
+ V(CheckedUint32Div, 2, 1) \
+ V(CheckedUint32Mod, 2, 1) \
+ V(CheckedUint32ToInt32, 1, 1) \
+ V(CheckedUint32ToTaggedSigned, 1, 1) \
+ V(CheckedInt32ToTaggedSigned, 1, 1) \
+ V(CheckedTaggedSignedToInt32, 1, 1) \
+ V(CheckedTaggedToTaggedSigned, 1, 1) \
V(CheckedTruncateTaggedToWord32, 1, 1)
struct SimplifiedOperatorGlobalCache final {
@@ -458,18 +473,24 @@ struct SimplifiedOperatorGlobalCache final {
CHECKED_OP_LIST(CHECKED)
#undef CHECKED
- template <CheckForMinusZeroMode kMode>
- struct ChangeFloat64ToTaggedOperator final
- : public Operator1<CheckForMinusZeroMode> {
- ChangeFloat64ToTaggedOperator()
- : Operator1<CheckForMinusZeroMode>(
- IrOpcode::kChangeFloat64ToTagged, Operator::kPure,
- "ChangeFloat64ToTagged", 1, 0, 0, 1, 0, 0, kMode) {}
+ template <UnicodeEncoding kEncoding>
+ struct StringFromCodePointOperator final : public Operator1<UnicodeEncoding> {
+ StringFromCodePointOperator()
+ : Operator1<UnicodeEncoding>(IrOpcode::kStringFromCodePoint,
+ Operator::kPure, "StringFromCodePoint", 1,
+ 0, 0, 1, 0, 0, kEncoding) {}
+ };
+ StringFromCodePointOperator<UnicodeEncoding::UTF16>
+ kStringFromCodePointOperatorUTF16;
+ StringFromCodePointOperator<UnicodeEncoding::UTF32>
+ kStringFromCodePointOperatorUTF32;
+
+ struct ArrayBufferWasNeuteredOperator final : public Operator {
+ ArrayBufferWasNeuteredOperator()
+ : Operator(IrOpcode::kArrayBufferWasNeutered, Operator::kEliminatable,
+ "ArrayBufferWasNeutered", 1, 1, 1, 1, 1, 0) {}
};
- ChangeFloat64ToTaggedOperator<CheckForMinusZeroMode::kCheckForMinusZero>
- kChangeFloat64ToTaggedCheckForMinusZeroOperator;
- ChangeFloat64ToTaggedOperator<CheckForMinusZeroMode::kDontCheckForMinusZero>
- kChangeFloat64ToTaggedDontCheckForMinusZeroOperator;
+ ArrayBufferWasNeuteredOperator kArrayBufferWasNeutered;
template <CheckForMinusZeroMode kMode>
struct CheckedInt32MulOperator final
@@ -614,20 +635,9 @@ SimplifiedOperatorBuilder::SimplifiedOperatorBuilder(Zone* zone)
const Operator* SimplifiedOperatorBuilder::Name() { return &cache_.k##Name; }
PURE_OP_LIST(GET_FROM_CACHE)
CHECKED_OP_LIST(GET_FROM_CACHE)
+GET_FROM_CACHE(ArrayBufferWasNeutered)
#undef GET_FROM_CACHE
-const Operator* SimplifiedOperatorBuilder::ChangeFloat64ToTagged(
- CheckForMinusZeroMode mode) {
- switch (mode) {
- case CheckForMinusZeroMode::kCheckForMinusZero:
- return &cache_.kChangeFloat64ToTaggedCheckForMinusZeroOperator;
- case CheckForMinusZeroMode::kDontCheckForMinusZero:
- return &cache_.kChangeFloat64ToTaggedDontCheckForMinusZeroOperator;
- }
- UNREACHABLE();
- return nullptr;
-}
-
const Operator* SimplifiedOperatorBuilder::CheckedInt32Mul(
CheckForMinusZeroMode mode) {
switch (mode) {
@@ -761,6 +771,18 @@ const Operator* SimplifiedOperatorBuilder::StoreBuffer(BufferAccess access) {
return nullptr;
}
+const Operator* SimplifiedOperatorBuilder::StringFromCodePoint(
+ UnicodeEncoding encoding) {
+ switch (encoding) {
+ case UnicodeEncoding::UTF16:
+ return &cache_.kStringFromCodePointOperatorUTF16;
+ case UnicodeEncoding::UTF32:
+ return &cache_.kStringFromCodePointOperatorUTF32;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
#define SPECULATIVE_NUMBER_BINOP(Name) \
const Operator* SimplifiedOperatorBuilder::Name(NumberOperationHint hint) { \
switch (hint) { \
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 5e7fa75827..a904391310 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -8,6 +8,7 @@
#include <iosfwd>
#include "src/compiler/operator.h"
+#include "src/compiler/types.h"
#include "src/handles.h"
#include "src/machine-type.h"
#include "src/objects.h"
@@ -16,10 +17,8 @@ namespace v8 {
namespace internal {
// Forward declarations.
-class Type;
class Zone;
-
namespace compiler {
// Forward declarations.
@@ -184,6 +183,10 @@ std::ostream& operator<<(std::ostream&, NumberOperationHint);
NumberOperationHint NumberOperationHintOf(const Operator* op)
WARN_UNUSED_RESULT;
+PretenureFlag PretenureFlagOf(const Operator* op) WARN_UNUSED_RESULT;
+
+UnicodeEncoding UnicodeEncodingOf(const Operator*) WARN_UNUSED_RESULT;
+
// Interface for building simplified operators, which represent the
// medium-level operations of V8, including adding numbers, allocating objects,
// indexing into objects and arrays, etc.
@@ -259,6 +262,7 @@ class SimplifiedOperatorBuilder final : public ZoneObject {
const Operator* NumberTan();
const Operator* NumberTanh();
const Operator* NumberTrunc();
+ const Operator* NumberToBoolean();
const Operator* NumberToInt32();
const Operator* NumberToUint32();
@@ -287,6 +291,7 @@ class SimplifiedOperatorBuilder final : public ZoneObject {
const Operator* StringLessThanOrEqual();
const Operator* StringCharCodeAt();
const Operator* StringFromCharCode();
+ const Operator* StringFromCodePoint(UnicodeEncoding encoding);
const Operator* PlainPrimitiveToNumber();
const Operator* PlainPrimitiveToWord32();
@@ -299,19 +304,21 @@ class SimplifiedOperatorBuilder final : public ZoneObject {
const Operator* ChangeInt31ToTaggedSigned();
const Operator* ChangeInt32ToTagged();
const Operator* ChangeUint32ToTagged();
- const Operator* ChangeFloat64ToTagged(CheckForMinusZeroMode);
+ const Operator* ChangeFloat64ToTagged();
const Operator* ChangeTaggedToBit();
const Operator* ChangeBitToTagged();
const Operator* TruncateTaggedToWord32();
const Operator* TruncateTaggedToFloat64();
+ const Operator* TruncateTaggedToBit();
const Operator* CheckIf();
const Operator* CheckBounds();
const Operator* CheckMaps(int map_input_count);
+
+ const Operator* CheckHeapObject();
const Operator* CheckNumber();
+ const Operator* CheckSmi();
const Operator* CheckString();
- const Operator* CheckTaggedPointer();
- const Operator* CheckTaggedSigned();
const Operator* CheckedInt32Add();
const Operator* CheckedInt32Sub();
@@ -320,11 +327,14 @@ class SimplifiedOperatorBuilder final : public ZoneObject {
const Operator* CheckedUint32Div();
const Operator* CheckedUint32Mod();
const Operator* CheckedInt32Mul(CheckForMinusZeroMode);
+ const Operator* CheckedInt32ToTaggedSigned();
const Operator* CheckedUint32ToInt32();
+ const Operator* CheckedUint32ToTaggedSigned();
const Operator* CheckedFloat64ToInt32(CheckForMinusZeroMode);
const Operator* CheckedTaggedSignedToInt32();
const Operator* CheckedTaggedToInt32(CheckForMinusZeroMode);
const Operator* CheckedTaggedToFloat64(CheckTaggedInputMode);
+ const Operator* CheckedTaggedToTaggedSigned();
const Operator* CheckedTruncateTaggedToWord32();
const Operator* CheckFloat64Hole(CheckFloat64HoleMode);
@@ -338,6 +348,9 @@ class SimplifiedOperatorBuilder final : public ZoneObject {
const Operator* ObjectIsString();
const Operator* ObjectIsUndetectable();
+ // array-buffer-was-neutered buffer
+ const Operator* ArrayBufferWasNeutered();
+
// ensure-writable-fast-elements object, elements
const Operator* EnsureWritableFastElements();
diff --git a/deps/v8/src/compiler/state-values-utils.h b/deps/v8/src/compiler/state-values-utils.h
index 79550bd3ff..704f5f63a5 100644
--- a/deps/v8/src/compiler/state-values-utils.h
+++ b/deps/v8/src/compiler/state-values-utils.h
@@ -55,7 +55,7 @@ class StateValuesCache {
Zone* zone() { return graph()->zone(); }
JSGraph* js_graph_;
- ZoneHashMap hash_map_;
+ CustomMatcherZoneHashMap hash_map_;
ZoneVector<NodeVector*> working_space_; // One working space per level.
Node* empty_state_values_;
};
diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc
index 98904b05b5..196cb0d608 100644
--- a/deps/v8/src/compiler/store-store-elimination.cc
+++ b/deps/v8/src/compiler/store-store-elimination.cc
@@ -72,9 +72,7 @@ namespace compiler {
namespace {
-// 16 bits was chosen fairly arbitrarily; it seems enough now. 8 bits is too
-// few.
-typedef uint16_t StoreOffset;
+typedef uint32_t StoreOffset;
struct UnobservableStore {
NodeId id_;
@@ -171,11 +169,11 @@ class RedundantStoreFinder final {
const UnobservablesSet unobservables_visited_empty_;
};
-// To safely cast an offset from a FieldAccess, which has a wider range
-// (namely int).
+// To safely cast an offset from a FieldAccess, which has a potentially wider
+// range (namely int).
StoreOffset ToOffset(int offset) {
- CHECK(0 <= offset && offset < (1 << 8 * sizeof(StoreOffset)));
- return (StoreOffset)offset;
+ CHECK(0 <= offset);
+ return static_cast<StoreOffset>(offset);
}
StoreOffset ToOffset(const FieldAccess& access) {
@@ -405,11 +403,9 @@ void RedundantStoreFinder::VisitEffectfulNode(Node* node) {
// Mark effect inputs for visiting.
for (int i = 0; i < node->op()->EffectInputCount(); i++) {
Node* input = NodeProperties::GetEffectInput(node, i);
- if (!HasBeenVisited(input)) {
- TRACE(" marking #%d:%s for revisit", input->id(),
- input->op()->mnemonic());
- MarkForRevisit(input);
- }
+ TRACE(" marking #%d:%s for revisit", input->id(),
+ input->op()->mnemonic());
+ MarkForRevisit(input);
}
}
}
diff --git a/deps/v8/src/compiler/store-store-elimination.h b/deps/v8/src/compiler/store-store-elimination.h
index 07ae2c25d1..cda7591fcc 100644
--- a/deps/v8/src/compiler/store-store-elimination.h
+++ b/deps/v8/src/compiler/store-store-elimination.h
@@ -7,7 +7,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/js-graph.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/type-cache.cc b/deps/v8/src/compiler/type-cache.cc
index d05aaa1f4d..cd80dc315a 100644
--- a/deps/v8/src/type-cache.cc
+++ b/deps/v8/src/compiler/type-cache.cc
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/type-cache.h"
+#include "src/compiler/type-cache.h"
#include "src/base/lazy-instance.h"
namespace v8 {
namespace internal {
+namespace compiler {
namespace {
@@ -15,9 +16,9 @@ base::LazyInstance<TypeCache>::type kCache = LAZY_INSTANCE_INITIALIZER;
} // namespace
-
// static
TypeCache const& TypeCache::Get() { return kCache.Get(); }
+} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/type-cache.h b/deps/v8/src/compiler/type-cache.h
new file mode 100644
index 0000000000..aa51dacf91
--- /dev/null
+++ b/deps/v8/src/compiler/type-cache.h
@@ -0,0 +1,157 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TYPE_CACHE_H_
+#define V8_COMPILER_TYPE_CACHE_H_
+
+#include "src/compiler/types.h"
+#include "src/date.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class TypeCache final {
+ private:
+ // This has to be first for the initialization magic to work.
+ AccountingAllocator allocator;
+ Zone zone_;
+
+ public:
+ static TypeCache const& Get();
+
+ TypeCache() : zone_(&allocator) {}
+
+ Type* const kInt8 = CreateRange<int8_t>();
+ Type* const kUint8 = CreateRange<uint8_t>();
+ Type* const kUint8Clamped = kUint8;
+ Type* const kInt16 = CreateRange<int16_t>();
+ Type* const kUint16 = CreateRange<uint16_t>();
+ Type* const kInt32 = Type::Signed32();
+ Type* const kUint32 = Type::Unsigned32();
+ Type* const kFloat32 = Type::Number();
+ Type* const kFloat64 = Type::Number();
+
+ Type* const kSmi = Type::SignedSmall();
+ Type* const kHoleySmi = Type::Union(kSmi, Type::Hole(), zone());
+ Type* const kHeapNumber = Type::Number();
+
+ Type* const kSingletonZero = CreateRange(0.0, 0.0);
+ Type* const kSingletonOne = CreateRange(1.0, 1.0);
+ Type* const kSingletonTen = CreateRange(10.0, 10.0);
+ Type* const kSingletonMinusOne = CreateRange(-1.0, -1.0);
+ Type* const kZeroOrUndefined =
+ Type::Union(kSingletonZero, Type::Undefined(), zone());
+ Type* const kTenOrUndefined =
+ Type::Union(kSingletonTen, Type::Undefined(), zone());
+ Type* const kMinusOneOrZero = CreateRange(-1.0, 0.0);
+ Type* const kMinusOneToOneOrMinusZeroOrNaN = Type::Union(
+ Type::Union(CreateRange(-1.0, 1.0), Type::MinusZero(), zone()),
+ Type::NaN(), zone());
+ Type* const kZeroOrOne = CreateRange(0.0, 1.0);
+ Type* const kZeroOrOneOrNaN = Type::Union(kZeroOrOne, Type::NaN(), zone());
+ Type* const kZeroToThirtyOne = CreateRange(0.0, 31.0);
+ Type* const kZeroToThirtyTwo = CreateRange(0.0, 32.0);
+ Type* const kZeroish =
+ Type::Union(kSingletonZero, Type::MinusZeroOrNaN(), zone());
+ Type* const kInteger = CreateRange(-V8_INFINITY, V8_INFINITY);
+ Type* const kIntegerOrMinusZero =
+ Type::Union(kInteger, Type::MinusZero(), zone());
+ Type* const kIntegerOrMinusZeroOrNaN =
+ Type::Union(kIntegerOrMinusZero, Type::NaN(), zone());
+ Type* const kPositiveInteger = CreateRange(0.0, V8_INFINITY);
+ Type* const kPositiveIntegerOrMinusZero =
+ Type::Union(kPositiveInteger, Type::MinusZero(), zone());
+ Type* const kPositiveIntegerOrMinusZeroOrNaN =
+ Type::Union(kPositiveIntegerOrMinusZero, Type::NaN(), zone());
+
+ Type* const kAdditiveSafeInteger =
+ CreateRange(-4503599627370496.0, 4503599627370496.0);
+ Type* const kSafeInteger = CreateRange(-kMaxSafeInteger, kMaxSafeInteger);
+ Type* const kAdditiveSafeIntegerOrMinusZero =
+ Type::Union(kAdditiveSafeInteger, Type::MinusZero(), zone());
+ Type* const kSafeIntegerOrMinusZero =
+ Type::Union(kSafeInteger, Type::MinusZero(), zone());
+ Type* const kPositiveSafeInteger = CreateRange(0.0, kMaxSafeInteger);
+
+ // The FixedArray::length property always containts a smi in the range
+ // [0, FixedArray::kMaxLength].
+ Type* const kFixedArrayLengthType = CreateRange(0.0, FixedArray::kMaxLength);
+
+ // The FixedDoubleArray::length property always containts a smi in the range
+ // [0, FixedDoubleArray::kMaxLength].
+ Type* const kFixedDoubleArrayLengthType =
+ CreateRange(0.0, FixedDoubleArray::kMaxLength);
+
+ // The JSArray::length property always contains a tagged number in the range
+ // [0, kMaxUInt32].
+ Type* const kJSArrayLengthType = Type::Unsigned32();
+
+ // The JSTyped::length property always contains a tagged number in the range
+ // [0, kMaxSmiValue].
+ Type* const kJSTypedArrayLengthType = Type::UnsignedSmall();
+
+ // The String::length property always contains a smi in the range
+ // [0, String::kMaxLength].
+ Type* const kStringLengthType = CreateRange(0.0, String::kMaxLength);
+
+ // The JSDate::day property always contains a tagged number in the range
+ // [1, 31] or NaN.
+ Type* const kJSDateDayType =
+ Type::Union(CreateRange(1, 31.0), Type::NaN(), zone());
+
+ // The JSDate::hour property always contains a tagged number in the range
+ // [0, 23] or NaN.
+ Type* const kJSDateHourType =
+ Type::Union(CreateRange(0, 23.0), Type::NaN(), zone());
+
+ // The JSDate::minute property always contains a tagged number in the range
+ // [0, 59] or NaN.
+ Type* const kJSDateMinuteType =
+ Type::Union(CreateRange(0, 59.0), Type::NaN(), zone());
+
+ // The JSDate::month property always contains a tagged number in the range
+ // [0, 11] or NaN.
+ Type* const kJSDateMonthType =
+ Type::Union(CreateRange(0, 11.0), Type::NaN(), zone());
+
+ // The JSDate::second property always contains a tagged number in the range
+ // [0, 59] or NaN.
+ Type* const kJSDateSecondType = kJSDateMinuteType;
+
+ // The JSDate::value property always contains a tagged number in the range
+ // [-kMaxTimeInMs, kMaxTimeInMs] or NaN.
+ Type* const kJSDateValueType = Type::Union(
+ CreateRange(-DateCache::kMaxTimeInMs, DateCache::kMaxTimeInMs),
+ Type::NaN(), zone());
+
+ // The JSDate::weekday property always contains a tagged number in the range
+ // [0, 6] or NaN.
+ Type* const kJSDateWeekdayType =
+ Type::Union(CreateRange(0, 6.0), Type::NaN(), zone());
+
+ // The JSDate::year property always contains a tagged number in the signed
+ // small range or NaN.
+ Type* const kJSDateYearType =
+ Type::Union(Type::SignedSmall(), Type::NaN(), zone());
+
+ private:
+ template <typename T>
+ Type* CreateRange() {
+ return CreateRange(std::numeric_limits<T>::min(),
+ std::numeric_limits<T>::max());
+ }
+
+ Type* CreateRange(double min, double max) {
+ return Type::Range(min, max, zone());
+ }
+
+ Zone* zone() { return &zone_; }
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_TYPE_CACHE_H_
diff --git a/deps/v8/src/compiler/type-hint-analyzer.cc b/deps/v8/src/compiler/type-hint-analyzer.cc
index 8e7a0f3eae..a668a48ad8 100644
--- a/deps/v8/src/compiler/type-hint-analyzer.cc
+++ b/deps/v8/src/compiler/type-hint-analyzer.cc
@@ -6,8 +6,8 @@
#include "src/assembler.h"
#include "src/code-stubs.h"
-#include "src/compiler/type-hints.h"
#include "src/ic/ic-state.h"
+#include "src/type-hints.h"
namespace v8 {
namespace internal {
@@ -15,17 +15,21 @@ namespace compiler {
namespace {
-BinaryOperationHint ToBinaryOperationHint(BinaryOpICState::Kind kind) {
+BinaryOperationHint ToBinaryOperationHint(Token::Value op,
+ BinaryOpICState::Kind kind) {
switch (kind) {
case BinaryOpICState::NONE:
return BinaryOperationHint::kNone;
case BinaryOpICState::SMI:
return BinaryOperationHint::kSignedSmall;
case BinaryOpICState::INT32:
- return BinaryOperationHint::kSigned32;
+ return (Token::IsTruncatingBinaryOp(op) && SmiValuesAre31Bits())
+ ? BinaryOperationHint::kNumberOrOddball
+ : BinaryOperationHint::kSigned32;
case BinaryOpICState::NUMBER:
return BinaryOperationHint::kNumberOrOddball;
case BinaryOpICState::STRING:
+ return BinaryOperationHint::kString;
case BinaryOpICState::GENERIC:
return BinaryOperationHint::kAny;
}
@@ -66,7 +70,7 @@ bool TypeHintAnalysis::GetBinaryOperationHint(TypeFeedbackId id,
Handle<Code> code = i->second;
DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
BinaryOpICState state(code->GetIsolate(), code->extra_ic_state());
- *hint = ToBinaryOperationHint(state.kind());
+ *hint = ToBinaryOperationHint(state.op(), state.kind());
return true;
}
@@ -132,20 +136,6 @@ TypeHintAnalysis* TypeHintAnalyzer::Analyze(Handle<Code> code) {
return new (zone()) TypeHintAnalysis(infos, zone());
}
-// Helper function to transform the feedback to BinaryOperationHint.
-BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback) {
- switch (type_feedback) {
- case BinaryOperationFeedback::kSignedSmall:
- return BinaryOperationHint::kSignedSmall;
- case BinaryOperationFeedback::kNumber:
- return BinaryOperationHint::kNumberOrOddball;
- case BinaryOperationFeedback::kAny:
- default:
- return BinaryOperationHint::kAny;
- }
- UNREACHABLE();
- return BinaryOperationHint::kNone;
-}
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/type-hint-analyzer.h b/deps/v8/src/compiler/type-hint-analyzer.h
index e48938a976..354f8943bb 100644
--- a/deps/v8/src/compiler/type-hint-analyzer.h
+++ b/deps/v8/src/compiler/type-hint-analyzer.h
@@ -5,9 +5,9 @@
#ifndef V8_COMPILER_TYPE_HINT_ANALYZER_H_
#define V8_COMPILER_TYPE_HINT_ANALYZER_H_
-#include "src/compiler/type-hints.h"
#include "src/handles.h"
-#include "src/zone-containers.h"
+#include "src/type-hints.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -50,8 +50,6 @@ class TypeHintAnalyzer final {
DISALLOW_COPY_AND_ASSIGN(TypeHintAnalyzer);
};
-BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback);
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc
new file mode 100644
index 0000000000..c5e8648ca5
--- /dev/null
+++ b/deps/v8/src/compiler/typed-optimization.cc
@@ -0,0 +1,253 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/typed-optimization.h"
+
+#include "src/compilation-dependencies.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/type-cache.h"
+#include "src/isolate-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+TypedOptimization::TypedOptimization(Editor* editor,
+ CompilationDependencies* dependencies,
+ Flags flags, JSGraph* jsgraph)
+ : AdvancedReducer(editor),
+ dependencies_(dependencies),
+ flags_(flags),
+ jsgraph_(jsgraph),
+ true_type_(Type::Constant(factory()->true_value(), graph()->zone())),
+ false_type_(Type::Constant(factory()->false_value(), graph()->zone())),
+ type_cache_(TypeCache::Get()) {}
+
+TypedOptimization::~TypedOptimization() {}
+
+Reduction TypedOptimization::Reduce(Node* node) {
+ // Check if the output type is a singleton. In that case we already know the
+ // result value and can simply replace the node if it's eliminable.
+ if (!NodeProperties::IsConstant(node) && NodeProperties::IsTyped(node) &&
+ node->op()->HasProperty(Operator::kEliminatable)) {
+ // TODO(v8:5303): We must not eliminate FinishRegion here. This special
+ // case can be removed once we have separate operators for value and
+ // effect regions.
+ if (node->opcode() == IrOpcode::kFinishRegion) return NoChange();
+ // We can only constant-fold nodes here, that are known to not cause any
+ // side-effect, may it be a JavaScript observable side-effect or a possible
+ // eager deoptimization exit (i.e. {node} has an operator that doesn't have
+ // the Operator::kNoDeopt property).
+ Type* upper = NodeProperties::GetType(node);
+ if (upper->IsInhabited()) {
+ if (upper->IsConstant()) {
+ Node* replacement = jsgraph()->Constant(upper->AsConstant()->Value());
+ ReplaceWithValue(node, replacement);
+ return Changed(replacement);
+ } else if (upper->Is(Type::MinusZero())) {
+ Node* replacement = jsgraph()->Constant(factory()->minus_zero_value());
+ ReplaceWithValue(node, replacement);
+ return Changed(replacement);
+ } else if (upper->Is(Type::NaN())) {
+ Node* replacement = jsgraph()->NaNConstant();
+ ReplaceWithValue(node, replacement);
+ return Changed(replacement);
+ } else if (upper->Is(Type::Null())) {
+ Node* replacement = jsgraph()->NullConstant();
+ ReplaceWithValue(node, replacement);
+ return Changed(replacement);
+ } else if (upper->Is(Type::PlainNumber()) &&
+ upper->Min() == upper->Max()) {
+ Node* replacement = jsgraph()->Constant(upper->Min());
+ ReplaceWithValue(node, replacement);
+ return Changed(replacement);
+ } else if (upper->Is(Type::Undefined())) {
+ Node* replacement = jsgraph()->UndefinedConstant();
+ ReplaceWithValue(node, replacement);
+ return Changed(replacement);
+ }
+ }
+ }
+ switch (node->opcode()) {
+ case IrOpcode::kCheckMaps:
+ return ReduceCheckMaps(node);
+ case IrOpcode::kCheckString:
+ return ReduceCheckString(node);
+ case IrOpcode::kLoadField:
+ return ReduceLoadField(node);
+ case IrOpcode::kNumberCeil:
+ case IrOpcode::kNumberFloor:
+ case IrOpcode::kNumberRound:
+ case IrOpcode::kNumberTrunc:
+ return ReduceNumberRoundop(node);
+ case IrOpcode::kPhi:
+ return ReducePhi(node);
+ case IrOpcode::kSelect:
+ return ReduceSelect(node);
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+namespace {
+
+MaybeHandle<Map> GetStableMapFromObjectType(Type* object_type) {
+ if (object_type->IsConstant() &&
+ object_type->AsConstant()->Value()->IsHeapObject()) {
+ Handle<Map> object_map(
+ Handle<HeapObject>::cast(object_type->AsConstant()->Value())->map());
+ if (object_map->is_stable()) return object_map;
+ }
+ return MaybeHandle<Map>();
+}
+
+} // namespace
+
+Reduction TypedOptimization::ReduceCheckMaps(Node* node) {
+ // The CheckMaps(o, ...map...) can be eliminated if map is stable,
+ // o has type Constant(object) and map == object->map, and either
+ // (1) map cannot transition further, or
+ // (2) we can add a code dependency on the stability of map
+ // (to guard the Constant type information).
+ Node* const object = NodeProperties::GetValueInput(node, 0);
+ Type* const object_type = NodeProperties::GetType(object);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Handle<Map> object_map;
+ if (GetStableMapFromObjectType(object_type).ToHandle(&object_map)) {
+ for (int i = 1; i < node->op()->ValueInputCount(); ++i) {
+ Node* const map = NodeProperties::GetValueInput(node, i);
+ Type* const map_type = NodeProperties::GetType(map);
+ if (map_type->IsConstant() &&
+ map_type->AsConstant()->Value().is_identical_to(object_map)) {
+ if (object_map->CanTransition()) {
+ dependencies()->AssumeMapStable(object_map);
+ }
+ return Replace(effect);
+ }
+ }
+ }
+ return NoChange();
+}
+
+Reduction TypedOptimization::ReduceCheckString(Node* node) {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type* const input_type = NodeProperties::GetType(input);
+ if (input_type->Is(Type::String())) {
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
+ return NoChange();
+}
+
+Reduction TypedOptimization::ReduceLoadField(Node* node) {
+ Node* const object = NodeProperties::GetValueInput(node, 0);
+ Type* const object_type = NodeProperties::GetType(object);
+ FieldAccess const& access = FieldAccessOf(node->op());
+ if (access.base_is_tagged == kTaggedBase &&
+ access.offset == HeapObject::kMapOffset) {
+ // We can replace LoadField[Map](o) with map if is stable, and
+ // o has type Constant(object) and map == object->map, and either
+ // (1) map cannot transition further, or
+ // (2) deoptimization is enabled and we can add a code dependency on the
+ // stability of map (to guard the Constant type information).
+ Handle<Map> object_map;
+ if (GetStableMapFromObjectType(object_type).ToHandle(&object_map)) {
+ if (object_map->CanTransition()) {
+ if (flags() & kDeoptimizationEnabled) {
+ dependencies()->AssumeMapStable(object_map);
+ } else {
+ return NoChange();
+ }
+ }
+ Node* const value = jsgraph()->HeapConstant(object_map);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ }
+ return NoChange();
+}
+
+Reduction TypedOptimization::ReduceNumberRoundop(Node* node) {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type* const input_type = NodeProperties::GetType(input);
+ if (input_type->Is(type_cache_.kIntegerOrMinusZeroOrNaN)) {
+ return Replace(input);
+ }
+ return NoChange();
+}
+
+Reduction TypedOptimization::ReducePhi(Node* node) {
+ // Try to narrow the type of the Phi {node}, which might be more precise now
+ // after lowering based on types, i.e. a SpeculativeNumberAdd has a more
+ // precise type than the JSAdd that was in the graph when the Typer was run.
+ DCHECK_EQ(IrOpcode::kPhi, node->opcode());
+ int arity = node->op()->ValueInputCount();
+ Type* type = NodeProperties::GetType(node->InputAt(0));
+ for (int i = 1; i < arity; ++i) {
+ type = Type::Union(type, NodeProperties::GetType(node->InputAt(i)),
+ graph()->zone());
+ }
+ Type* const node_type = NodeProperties::GetType(node);
+ if (!node_type->Is(type)) {
+ type = Type::Intersect(node_type, type, graph()->zone());
+ NodeProperties::SetType(node, type);
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+Reduction TypedOptimization::ReduceSelect(Node* node) {
+ DCHECK_EQ(IrOpcode::kSelect, node->opcode());
+ Node* const condition = NodeProperties::GetValueInput(node, 0);
+ Type* const condition_type = NodeProperties::GetType(condition);
+ Node* const vtrue = NodeProperties::GetValueInput(node, 1);
+ Type* const vtrue_type = NodeProperties::GetType(vtrue);
+ Node* const vfalse = NodeProperties::GetValueInput(node, 2);
+ Type* const vfalse_type = NodeProperties::GetType(vfalse);
+ if (condition_type->Is(true_type_)) {
+ // Select(condition:true, vtrue, vfalse) => vtrue
+ return Replace(vtrue);
+ }
+ if (condition_type->Is(false_type_)) {
+ // Select(condition:false, vtrue, vfalse) => vfalse
+ return Replace(vfalse);
+ }
+ if (vtrue_type->Is(true_type_) && vfalse_type->Is(false_type_)) {
+ // Select(condition, vtrue:true, vfalse:false) => condition
+ return Replace(condition);
+ }
+ if (vtrue_type->Is(false_type_) && vfalse_type->Is(true_type_)) {
+ // Select(condition, vtrue:false, vfalse:true) => BooleanNot(condition)
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->BooleanNot());
+ return Changed(node);
+ }
+ // Try to narrow the type of the Select {node}, which might be more precise
+ // now after lowering based on types.
+ Type* type = Type::Union(vtrue_type, vfalse_type, graph()->zone());
+ Type* const node_type = NodeProperties::GetType(node);
+ if (!node_type->Is(type)) {
+ type = Type::Intersect(node_type, type, graph()->zone());
+ NodeProperties::SetType(node, type);
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+Factory* TypedOptimization::factory() const { return isolate()->factory(); }
+
+Graph* TypedOptimization::graph() const { return jsgraph()->graph(); }
+
+Isolate* TypedOptimization::isolate() const { return jsgraph()->isolate(); }
+
+SimplifiedOperatorBuilder* TypedOptimization::simplified() const {
+ return jsgraph()->simplified();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/typed-optimization.h b/deps/v8/src/compiler/typed-optimization.h
new file mode 100644
index 0000000000..54d780c33e
--- /dev/null
+++ b/deps/v8/src/compiler/typed-optimization.h
@@ -0,0 +1,73 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TYPED_OPTIMIZATION_H_
+#define V8_COMPILER_TYPED_OPTIMIZATION_H_
+
+#include "src/base/flags.h"
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class CompilationDependencies;
+class Factory;
+class Isolate;
+
+namespace compiler {
+
+// Forward declarations.
+class JSGraph;
+class SimplifiedOperatorBuilder;
+class TypeCache;
+
+class TypedOptimization final : public AdvancedReducer {
+ public:
+ // Flags that control the mode of operation.
+ enum Flag {
+ kNoFlags = 0u,
+ kDeoptimizationEnabled = 1u << 0,
+ };
+ typedef base::Flags<Flag> Flags;
+
+ TypedOptimization(Editor* editor, CompilationDependencies* dependencies,
+ Flags flags, JSGraph* jsgraph);
+ ~TypedOptimization();
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Reduction ReduceCheckMaps(Node* node);
+ Reduction ReduceCheckString(Node* node);
+ Reduction ReduceLoadField(Node* node);
+ Reduction ReduceNumberRoundop(Node* node);
+ Reduction ReducePhi(Node* node);
+ Reduction ReduceSelect(Node* node);
+
+ CompilationDependencies* dependencies() const { return dependencies_; }
+ Factory* factory() const;
+ Flags flags() const { return flags_; }
+ Graph* graph() const;
+ Isolate* isolate() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ SimplifiedOperatorBuilder* simplified() const;
+
+ CompilationDependencies* const dependencies_;
+ Flags const flags_;
+ JSGraph* const jsgraph_;
+ Type* const true_type_;
+ Type* const false_type_;
+ TypeCache const& type_cache_;
+
+ DISALLOW_COPY_AND_ASSIGN(TypedOptimization);
+};
+
+DEFINE_OPERATORS_FOR_FLAGS(TypedOptimization::Flags)
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_TYPED_OPTIMIZATION_H_
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index b4051e5547..ec1197bb80 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -16,8 +16,8 @@
#include "src/compiler/node.h"
#include "src/compiler/operation-typer.h"
#include "src/compiler/simplified-operator.h"
+#include "src/compiler/type-cache.h"
#include "src/objects-inl.h"
-#include "src/type-cache.h"
namespace v8 {
namespace internal {
@@ -88,8 +88,6 @@ class Typer::Visitor : public Reducer {
COMMON_OP_LIST(DECLARE_CASE)
SIMPLIFIED_COMPARE_BINOP_LIST(DECLARE_CASE)
SIMPLIFIED_OTHER_OP_LIST(DECLARE_CASE)
- MACHINE_OP_LIST(DECLARE_CASE)
- MACHINE_SIMD_OP_LIST(DECLARE_CASE)
JS_SIMPLE_UNOP_LIST(DECLARE_CASE)
JS_OBJECT_OP_LIST(DECLARE_CASE)
JS_CONTEXT_OP_LIST(DECLARE_CASE)
@@ -131,6 +129,8 @@ class Typer::Visitor : public Reducer {
DECLARE_CASE(End)
SIMPLIFIED_CHANGE_OP_LIST(DECLARE_CASE)
SIMPLIFIED_CHECKED_OP_LIST(DECLARE_CASE)
+ MACHINE_SIMD_OP_LIST(DECLARE_CASE)
+ MACHINE_OP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
break;
}
@@ -151,8 +151,6 @@ class Typer::Visitor : public Reducer {
COMMON_OP_LIST(DECLARE_CASE)
SIMPLIFIED_COMPARE_BINOP_LIST(DECLARE_CASE)
SIMPLIFIED_OTHER_OP_LIST(DECLARE_CASE)
- MACHINE_OP_LIST(DECLARE_CASE)
- MACHINE_SIMD_OP_LIST(DECLARE_CASE)
JS_SIMPLE_UNOP_LIST(DECLARE_CASE)
JS_OBJECT_OP_LIST(DECLARE_CASE)
JS_CONTEXT_OP_LIST(DECLARE_CASE)
@@ -194,6 +192,8 @@ class Typer::Visitor : public Reducer {
DECLARE_CASE(End)
SIMPLIFIED_CHANGE_OP_LIST(DECLARE_CASE)
SIMPLIFIED_CHECKED_OP_LIST(DECLARE_CASE)
+ MACHINE_SIMD_OP_LIST(DECLARE_CASE)
+ MACHINE_OP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
break;
}
@@ -214,8 +214,6 @@ class Typer::Visitor : public Reducer {
COMMON_OP_LIST(DECLARE_METHOD)
SIMPLIFIED_COMPARE_BINOP_LIST(DECLARE_METHOD)
SIMPLIFIED_OTHER_OP_LIST(DECLARE_METHOD)
- MACHINE_OP_LIST(DECLARE_METHOD)
- MACHINE_SIMD_OP_LIST(DECLARE_METHOD)
JS_OP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
@@ -229,7 +227,6 @@ class Typer::Visitor : public Reducer {
return TypeOrNone(operand_node);
}
- Type* WrapContextTypeForInput(Node* node);
Type* Weaken(Node* node, Type* current_type, Type* previous_type);
Zone* zone() { return typer_->zone(); }
@@ -298,6 +295,7 @@ class Typer::Visitor : public Reducer {
static Type* ReferenceEqualTyper(Type*, Type*, Typer*);
static Type* StringFromCharCodeTyper(Type*, Typer*);
+ static Type* StringFromCodePointTyper(Type*, Typer*);
Reduction UpdateType(Node* node, Type* current) {
if (NodeProperties::IsTyped(node)) {
@@ -426,8 +424,8 @@ Type* Typer::Visitor::ToBoolean(Type* type, Typer* t) {
if (type->Is(Type::Boolean())) return type;
if (type->Is(t->falsish_)) return t->singleton_false_;
if (type->Is(t->truish_)) return t->singleton_true_;
- if (type->Is(Type::PlainNumber()) && (type->Max() < 0 || 0 < type->Min())) {
- return t->singleton_true_; // Ruled out nan, -0 and +0.
+ if (type->Is(Type::Number())) {
+ return t->operation_typer()->NumberToBoolean(type);
}
return Type::Boolean();
}
@@ -519,8 +517,7 @@ Type* Typer::Visitor::ObjectIsReceiver(Type* type, Typer* t) {
Type* Typer::Visitor::ObjectIsSmi(Type* type, Typer* t) {
- if (type->Is(Type::TaggedSigned())) return t->singleton_true_;
- if (type->Is(Type::TaggedPointer())) return t->singleton_false_;
+ if (!type->Maybe(Type::SignedSmall())) return t->singleton_false_;
return Type::Boolean();
}
@@ -554,11 +551,15 @@ Type* Typer::Visitor::TypeParameter(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeOsrValue(Node* node) { return Type::Any(); }
+Type* Typer::Visitor::TypeRetain(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
Type* Typer::Visitor::TypeInt32Constant(Node* node) {
double number = OpParameter<int32_t>(node);
return Type::Intersect(Type::Range(number, number, zone()),
- Type::UntaggedIntegral32(), zone());
+ Type::Integral32(), zone());
}
@@ -567,24 +568,25 @@ Type* Typer::Visitor::TypeInt64Constant(Node* node) {
return Type::Internal(); // TODO(rossberg): Add int64 bitset type?
}
-// TODO(gdeepti) : Fix this to do something meaningful.
Type* Typer::Visitor::TypeRelocatableInt32Constant(Node* node) {
- return Type::Internal();
+ UNREACHABLE();
+ return nullptr;
}
Type* Typer::Visitor::TypeRelocatableInt64Constant(Node* node) {
- return Type::Internal();
+ UNREACHABLE();
+ return nullptr;
}
Type* Typer::Visitor::TypeFloat32Constant(Node* node) {
- return Type::Intersect(Type::Of(OpParameter<float>(node), zone()),
- Type::UntaggedFloat32(), zone());
+ UNREACHABLE();
+ return nullptr;
}
Type* Typer::Visitor::TypeFloat64Constant(Node* node) {
- return Type::Intersect(Type::Of(OpParameter<double>(node), zone()),
- Type::UntaggedFloat64(), zone());
+ UNREACHABLE();
+ return nullptr;
}
@@ -633,16 +635,22 @@ Type* Typer::Visitor::TypeInductionVariablePhi(Node* node) {
// do not apply and we cannot do anything).
if (!initial_type->Is(typer_->cache_.kInteger) ||
!increment_type->Is(typer_->cache_.kInteger)) {
- // Fallback to normal phi typing.
- Type* type = Operand(node, 0);
- for (int i = 1; i < arity; ++i) {
+ // Fallback to normal phi typing, but ensure monotonicity.
+ // (Unfortunately, without baking in the previous type, monotonicity might
+ // be violated because we might not yet have retyped the incrementing
+ // operation even though the increment's type might been already reflected
+ // in the induction variable phi.)
+ Type* type = NodeProperties::IsTyped(node) ? NodeProperties::GetType(node)
+ : Type::None();
+ for (int i = 0; i < arity; ++i) {
type = Type::Union(type, Operand(node, i), zone());
}
return type;
}
// If we do not have enough type information for the initial value or
// the increment, just return the initial value's type.
- if (!initial_type->IsInhabited() || !increment_type->IsInhabited()) {
+ if (!initial_type->IsInhabited() ||
+ increment_type->Is(typer_->cache_.kSingletonZero)) {
return initial_type;
}
@@ -1219,16 +1227,24 @@ Type* Typer::Visitor::TypeJSHasProperty(Node* node) { return Type::Boolean(); }
Type* Typer::Visitor::TypeJSInstanceOf(Node* node) { return Type::Boolean(); }
+Type* Typer::Visitor::TypeJSOrdinaryHasInstance(Node* node) {
+ return Type::Boolean();
+}
+
// JS context operators.
Type* Typer::Visitor::TypeJSLoadContext(Node* node) {
ContextAccess const& access = ContextAccessOf(node->op());
- if (access.index() == Context::EXTENSION_INDEX) {
- return Type::TaggedPointer();
+ switch (access.index()) {
+ case Context::PREVIOUS_INDEX:
+ case Context::NATIVE_CONTEXT_INDEX:
+ return Type::OtherInternal();
+ case Context::CLOSURE_INDEX:
+ return Type::Function();
+ default:
+ return Type::Any();
}
- // Since contexts are mutable, we just return the top.
- return Type::Any();
}
@@ -1238,42 +1254,26 @@ Type* Typer::Visitor::TypeJSStoreContext(Node* node) {
}
-Type* Typer::Visitor::WrapContextTypeForInput(Node* node) {
- Type* outer = TypeOrNone(NodeProperties::GetContextInput(node));
- if (outer->Is(Type::None())) {
- return Type::None();
- } else {
- DCHECK(outer->Maybe(Type::OtherInternal()));
- return Type::Context(outer, zone());
- }
-}
-
-
Type* Typer::Visitor::TypeJSCreateFunctionContext(Node* node) {
- return WrapContextTypeForInput(node);
+ return Type::OtherInternal();
}
-
Type* Typer::Visitor::TypeJSCreateCatchContext(Node* node) {
- return WrapContextTypeForInput(node);
+ return Type::OtherInternal();
}
-
Type* Typer::Visitor::TypeJSCreateWithContext(Node* node) {
- return WrapContextTypeForInput(node);
+ return Type::OtherInternal();
}
-
Type* Typer::Visitor::TypeJSCreateBlockContext(Node* node) {
- return WrapContextTypeForInput(node);
+ return Type::OtherInternal();
}
-
Type* Typer::Visitor::TypeJSCreateScriptContext(Node* node) {
- return WrapContextTypeForInput(node);
+ return Type::OtherInternal();
}
-
// JS other operators.
@@ -1283,16 +1283,13 @@ Type* Typer::Visitor::TypeJSCallConstruct(Node* node) {
Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
- if (fun->IsFunction()) {
- return fun->AsFunction()->Result();
- }
if (fun->IsConstant() && fun->AsConstant()->Value()->IsJSFunction()) {
Handle<JSFunction> function =
Handle<JSFunction>::cast(fun->AsConstant()->Value());
if (function->shared()->HasBuiltinFunctionId()) {
switch (function->shared()->builtin_function_id()) {
case kMathRandom:
- return Type::OrderedNumber();
+ return Type::PlainNumber();
case kMathFloor:
case kMathCeil:
case kMathRound:
@@ -1332,7 +1329,32 @@ Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
return Type::Signed32();
case kMathClz32:
return t->cache_.kZeroToThirtyTwo;
+ // Date functions.
+ case kDateGetDate:
+ return t->cache_.kJSDateDayType;
+ case kDateGetDay:
+ return t->cache_.kJSDateWeekdayType;
+ case kDateGetFullYear:
+ return t->cache_.kJSDateYearType;
+ case kDateGetHours:
+ return t->cache_.kJSDateHourType;
+ case kDateGetMilliseconds:
+ return Type::Union(Type::Range(0.0, 999.0, t->zone()), Type::NaN(),
+ t->zone());
+ case kDateGetMinutes:
+ return t->cache_.kJSDateMinuteType;
+ case kDateGetMonth:
+ return t->cache_.kJSDateMonthType;
+ case kDateGetSeconds:
+ return t->cache_.kJSDateSecondType;
+ case kDateGetTime:
+ return t->cache_.kJSDateValueType;
// Number functions.
+ case kNumberIsFinite:
+ case kNumberIsInteger:
+ case kNumberIsNaN:
+ case kNumberIsSafeInteger:
+ return Type::Boolean();
case kNumberParseInt:
return t->cache_.kIntegerOrMinusZeroOrNaN;
case kNumberToString:
@@ -1348,15 +1370,25 @@ Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
case kStringToLowerCase:
case kStringToUpperCase:
return Type::String();
+
+ case kStringIteratorNext:
+ return Type::OtherObject();
+
// Array functions.
case kArrayIndexOf:
case kArrayLastIndexOf:
return Type::Range(-1, kMaxSafeInteger, t->zone());
case kArrayPush:
return t->cache_.kPositiveSafeInteger;
+
// Object functions.
case kObjectHasOwnProperty:
return Type::Boolean();
+
+ // Function functions.
+ case kFunctionHasInstance:
+ return Type::Boolean();
+
// Global functions.
case kGlobalDecodeURI:
case kGlobalDecodeURIComponent:
@@ -1365,6 +1397,9 @@ Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
case kGlobalEscape:
case kGlobalUnescape:
return Type::String();
+ case kGlobalIsFinite:
+ case kGlobalIsNaN:
+ return Type::Boolean();
default:
break;
}
@@ -1432,21 +1467,13 @@ Type* Typer::Visitor::TypeJSForInNext(Node* node) {
Type* Typer::Visitor::TypeJSForInPrepare(Node* node) {
STATIC_ASSERT(Map::EnumLengthBits::kMax <= FixedArray::kMaxLength);
- Factory* const f = isolate()->factory();
- Type* const cache_type = Type::Union(
- typer_->cache_.kSmi, Type::Class(f->meta_map(), zone()), zone());
- Type* const cache_array = Type::Class(f->fixed_array_map(), zone());
+ Type* const cache_type =
+ Type::Union(typer_->cache_.kSmi, Type::OtherInternal(), zone());
+ Type* const cache_array = Type::OtherInternal();
Type* const cache_length = typer_->cache_.kFixedArrayLengthType;
return Type::Tuple(cache_type, cache_array, cache_length, zone());
}
-Type* Typer::Visitor::TypeJSForInDone(Node* node) { return Type::Boolean(); }
-
-Type* Typer::Visitor::TypeJSForInStep(Node* node) {
- STATIC_ASSERT(Map::EnumLengthBits::kMax <= FixedArray::kMaxLength);
- return Type::Range(1, FixedArray::kMaxLength + 1, zone());
-}
-
Type* Typer::Visitor::TypeJSLoadMessage(Node* node) { return Type::Any(); }
@@ -1541,6 +1568,19 @@ Type* Typer::Visitor::StringFromCharCodeTyper(Type* type, Typer* t) {
return Type::String();
}
+Type* Typer::Visitor::StringFromCodePointTyper(Type* type, Typer* t) {
+ type = NumberToUint32(ToNumber(type, t), t);
+ Factory* f = t->isolate()->factory();
+ double min = type->Min();
+ double max = type->Max();
+ if (min == max) {
+ uint32_t code = static_cast<uint32_t>(min) & String::kMaxUtf16CodeUnitU;
+ Handle<String> string = f->LookupSingleCharacterStringFromCode(code);
+ return Type::Constant(string, t->zone());
+ }
+ return Type::String();
+}
+
Type* Typer::Visitor::TypeStringCharCodeAt(Node* node) {
// TODO(bmeurer): We could do better here based on inputs.
return Type::Range(0, kMaxUInt16, zone());
@@ -1550,17 +1590,31 @@ Type* Typer::Visitor::TypeStringFromCharCode(Node* node) {
return TypeUnaryOp(node, StringFromCharCodeTyper);
}
+Type* Typer::Visitor::TypeStringFromCodePoint(Node* node) {
+ return TypeUnaryOp(node, StringFromCodePointTyper);
+}
+
Type* Typer::Visitor::TypeCheckBounds(Node* node) {
Type* index = Operand(node, 0);
Type* length = Operand(node, 1);
index = Type::Intersect(index, Type::Integral32(), zone());
if (!index->IsInhabited() || !length->IsInhabited()) return Type::None();
double min = std::max(index->Min(), 0.0);
- double max = std::min(index->Max(), length->Min() - 1);
+ double max = std::min(index->Max(), length->Max() - 1);
if (max < min) return Type::None();
return Type::Range(min, max, zone());
}
+Type* Typer::Visitor::TypeCheckHeapObject(Node* node) {
+ Type* type = Operand(node, 0);
+ return type;
+}
+
+Type* Typer::Visitor::TypeCheckIf(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
+
Type* Typer::Visitor::TypeCheckMaps(Node* node) {
UNREACHABLE();
return nullptr;
@@ -1571,24 +1625,14 @@ Type* Typer::Visitor::TypeCheckNumber(Node* node) {
return Type::Intersect(arg, Type::Number(), zone());
}
-Type* Typer::Visitor::TypeCheckString(Node* node) {
+Type* Typer::Visitor::TypeCheckSmi(Node* node) {
Type* arg = Operand(node, 0);
- return Type::Intersect(arg, Type::String(), zone());
+ return Type::Intersect(arg, Type::SignedSmall(), zone());
}
-Type* Typer::Visitor::TypeCheckIf(Node* node) {
- UNREACHABLE();
- return nullptr;
-}
-
-Type* Typer::Visitor::TypeCheckTaggedPointer(Node* node) {
- Type* arg = Operand(node, 0);
- return Type::Intersect(arg, Type::TaggedPointer(), zone());
-}
-
-Type* Typer::Visitor::TypeCheckTaggedSigned(Node* node) {
+Type* Typer::Visitor::TypeCheckString(Node* node) {
Type* arg = Operand(node, 0);
- return Type::Intersect(arg, typer_->cache_.kSmi, zone());
+ return Type::Intersect(arg, Type::String(), zone());
}
Type* Typer::Visitor::TypeCheckFloat64Hole(Node* node) {
@@ -1612,7 +1656,7 @@ Type* Typer::Visitor::TypeConvertTaggedHoleToUndefined(Node* node) {
return type;
}
-Type* Typer::Visitor::TypeAllocate(Node* node) { return Type::TaggedPointer(); }
+Type* Typer::Visitor::TypeAllocate(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeLoadField(Node* node) {
return FieldAccessOf(node->op()).type;
@@ -1697,652 +1741,13 @@ Type* Typer::Visitor::TypeObjectIsUndetectable(Node* node) {
return TypeUnaryOp(node, ObjectIsUndetectable);
}
-
-// Machine operators.
-
-Type* Typer::Visitor::TypeDebugBreak(Node* node) { return Type::None(); }
-
-Type* Typer::Visitor::TypeComment(Node* node) { return Type::None(); }
-
-Type* Typer::Visitor::TypeRetain(Node* node) {
- UNREACHABLE();
- return nullptr;
-}
-
-Type* Typer::Visitor::TypeUnsafePointerAdd(Node* node) { return Type::None(); }
-
-Type* Typer::Visitor::TypeLoad(Node* node) { return Type::Any(); }
-
-Type* Typer::Visitor::TypeStackSlot(Node* node) { return Type::Any(); }
-
-Type* Typer::Visitor::TypeStore(Node* node) {
- UNREACHABLE();
- return nullptr;
-}
-
-
-Type* Typer::Visitor::TypeWord32And(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeWord32Or(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeWord32Xor(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeWord32Shl(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeWord32Shr(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeWord32Sar(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeWord32Ror(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeWord32Equal(Node* node) { return Type::Boolean(); }
-
-
-Type* Typer::Visitor::TypeWord32Clz(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeWord32Ctz(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeWord32ReverseBits(Node* node) {
- return Type::Integral32();
-}
-
-Type* Typer::Visitor::TypeWord32ReverseBytes(Node* node) {
- return Type::Integral32();
-}
-
-Type* Typer::Visitor::TypeWord32Popcnt(Node* node) {
- return Type::Integral32();
-}
-
-
-Type* Typer::Visitor::TypeWord64And(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeWord64Or(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeWord64Xor(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeWord64Shl(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeWord64Shr(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeWord64Sar(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeWord64Ror(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeWord64Clz(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeWord64Ctz(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeWord64ReverseBits(Node* node) {
- return Type::Internal();
-}
-
-Type* Typer::Visitor::TypeWord64ReverseBytes(Node* node) {
- return Type::Internal();
-}
-
-Type* Typer::Visitor::TypeWord64Popcnt(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeWord64Equal(Node* node) { return Type::Boolean(); }
-
-
-Type* Typer::Visitor::TypeInt32Add(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeInt32AddWithOverflow(Node* node) {
- return Type::Internal();
-}
-
-
-Type* Typer::Visitor::TypeInt32Sub(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeInt32SubWithOverflow(Node* node) {
- return Type::Internal();
-}
-
-
-Type* Typer::Visitor::TypeInt32Mul(Node* node) { return Type::Integral32(); }
-
-Type* Typer::Visitor::TypeInt32MulWithOverflow(Node* node) {
- return Type::Internal();
-}
-
-Type* Typer::Visitor::TypeInt32MulHigh(Node* node) { return Type::Signed32(); }
-
-
-Type* Typer::Visitor::TypeInt32Div(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeInt32Mod(Node* node) { return Type::Integral32(); }
-
-
-Type* Typer::Visitor::TypeInt32LessThan(Node* node) { return Type::Boolean(); }
-
-
-Type* Typer::Visitor::TypeInt32LessThanOrEqual(Node* node) {
+Type* Typer::Visitor::TypeArrayBufferWasNeutered(Node* node) {
return Type::Boolean();
}
-
-Type* Typer::Visitor::TypeUint32Div(Node* node) { return Type::Unsigned32(); }
-
-
-Type* Typer::Visitor::TypeUint32LessThan(Node* node) { return Type::Boolean(); }
-
-
-Type* Typer::Visitor::TypeUint32LessThanOrEqual(Node* node) {
- return Type::Boolean();
-}
-
-
-Type* Typer::Visitor::TypeUint32Mod(Node* node) { return Type::Unsigned32(); }
-
-
-Type* Typer::Visitor::TypeUint32MulHigh(Node* node) {
- return Type::Unsigned32();
-}
-
-
-Type* Typer::Visitor::TypeInt64Add(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeInt64AddWithOverflow(Node* node) {
- return Type::Internal();
-}
-
-
-Type* Typer::Visitor::TypeInt64Sub(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeInt64SubWithOverflow(Node* node) {
- return Type::Internal();
-}
-
-
-Type* Typer::Visitor::TypeInt64Mul(Node* node) { return Type::Internal(); }
-
-Type* Typer::Visitor::TypeInt64Div(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeInt64Mod(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeInt64LessThan(Node* node) { return Type::Boolean(); }
-
-
-Type* Typer::Visitor::TypeInt64LessThanOrEqual(Node* node) {
- return Type::Boolean();
-}
-
-
-Type* Typer::Visitor::TypeUint64Div(Node* node) { return Type::Internal(); }
-
-
-Type* Typer::Visitor::TypeUint64LessThan(Node* node) { return Type::Boolean(); }
-
-
-Type* Typer::Visitor::TypeUint64LessThanOrEqual(Node* node) {
- return Type::Boolean();
-}
-
-
-Type* Typer::Visitor::TypeUint64Mod(Node* node) { return Type::Internal(); }
-
-Type* Typer::Visitor::TypeBitcastWordToTagged(Node* node) {
- return Type::TaggedPointer();
-}
-
-Type* Typer::Visitor::TypeChangeFloat32ToFloat64(Node* node) {
- return Type::Intersect(Type::Number(), Type::UntaggedFloat64(), zone());
-}
-
-
-Type* Typer::Visitor::TypeChangeFloat64ToInt32(Node* node) {
- return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
-}
-
-Type* Typer::Visitor::TypeChangeFloat64ToUint32(Node* node) {
- return Type::Intersect(Type::Unsigned32(), Type::UntaggedIntegral32(),
- zone());
-}
-
-Type* Typer::Visitor::TypeTruncateFloat64ToUint32(Node* node) {
- return Type::Intersect(Type::Unsigned32(), Type::UntaggedIntegral32(),
- zone());
-}
-
-Type* Typer::Visitor::TypeTruncateFloat32ToInt32(Node* node) {
- return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
-}
-
-
-Type* Typer::Visitor::TypeTruncateFloat32ToUint32(Node* node) {
- return Type::Intersect(Type::Unsigned32(), Type::UntaggedIntegral32(),
- zone());
-}
-
-
-Type* Typer::Visitor::TypeTryTruncateFloat32ToInt64(Node* node) {
- return Type::Internal();
-}
-
-
-Type* Typer::Visitor::TypeTryTruncateFloat64ToInt64(Node* node) {
- return Type::Internal();
-}
-
-
-Type* Typer::Visitor::TypeTryTruncateFloat32ToUint64(Node* node) {
- return Type::Internal();
-}
-
-
-Type* Typer::Visitor::TypeTryTruncateFloat64ToUint64(Node* node) {
- return Type::Internal();
-}
-
-
-Type* Typer::Visitor::TypeChangeInt32ToFloat64(Node* node) {
- return Type::Intersect(Type::Signed32(), Type::UntaggedFloat64(), zone());
-}
-
-Type* Typer::Visitor::TypeFloat64SilenceNaN(Node* node) {
- return Type::UntaggedFloat64();
-}
-
-Type* Typer::Visitor::TypeChangeInt32ToInt64(Node* node) {
- return Type::Internal();
-}
-
-Type* Typer::Visitor::TypeChangeUint32ToFloat64(Node* node) {
- return Type::Intersect(Type::Unsigned32(), Type::UntaggedFloat64(), zone());
-}
-
-Type* Typer::Visitor::TypeChangeUint32ToUint64(Node* node) {
- return Type::Internal();
-}
-
-Type* Typer::Visitor::TypeImpossibleToWord32(Node* node) {
- return Type::None();
-}
-
-Type* Typer::Visitor::TypeImpossibleToWord64(Node* node) {
- return Type::None();
-}
-
-Type* Typer::Visitor::TypeImpossibleToFloat32(Node* node) {
- return Type::None();
-}
-
-Type* Typer::Visitor::TypeImpossibleToFloat64(Node* node) {
- return Type::None();
-}
-
-Type* Typer::Visitor::TypeImpossibleToTagged(Node* node) {
- return Type::None();
-}
-
-Type* Typer::Visitor::TypeImpossibleToBit(Node* node) { return Type::None(); }
-
-Type* Typer::Visitor::TypeTruncateFloat64ToFloat32(Node* node) {
- return Type::Intersect(Type::Number(), Type::UntaggedFloat32(), zone());
-}
-
-Type* Typer::Visitor::TypeTruncateFloat64ToWord32(Node* node) {
- return Type::Intersect(Type::Integral32(), Type::UntaggedIntegral32(),
- zone());
-}
-
-Type* Typer::Visitor::TypeTruncateInt64ToInt32(Node* node) {
- return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
-}
-
-Type* Typer::Visitor::TypeRoundFloat64ToInt32(Node* node) {
- return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
-}
-
-Type* Typer::Visitor::TypeRoundInt32ToFloat32(Node* node) {
- return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
-}
-
-
-Type* Typer::Visitor::TypeRoundInt64ToFloat32(Node* node) {
- return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
-}
-
-
-Type* Typer::Visitor::TypeRoundInt64ToFloat64(Node* node) {
- return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat64(), zone());
-}
-
-
-Type* Typer::Visitor::TypeRoundUint32ToFloat32(Node* node) {
- return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
-}
-
-
-Type* Typer::Visitor::TypeRoundUint64ToFloat32(Node* node) {
- return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
-}
-
-
-Type* Typer::Visitor::TypeRoundUint64ToFloat64(Node* node) {
- return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat64(), zone());
-}
-
-
-Type* Typer::Visitor::TypeBitcastFloat32ToInt32(Node* node) {
- return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeBitcastFloat64ToInt64(Node* node) {
- return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeBitcastInt32ToFloat32(Node* node) {
- return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeBitcastInt64ToFloat64(Node* node) {
- return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeFloat32Add(Node* node) { return Type::Number(); }
-
-
-Type* Typer::Visitor::TypeFloat32Sub(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat32Neg(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat32Mul(Node* node) { return Type::Number(); }
-
-
-Type* Typer::Visitor::TypeFloat32Div(Node* node) { return Type::Number(); }
-
-
-Type* Typer::Visitor::TypeFloat32Abs(Node* node) {
- // TODO(turbofan): We should be able to infer a better type here.
- return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeFloat32Sqrt(Node* node) { return Type::Number(); }
-
-
-Type* Typer::Visitor::TypeFloat32Equal(Node* node) { return Type::Boolean(); }
-
-
-Type* Typer::Visitor::TypeFloat32LessThan(Node* node) {
- return Type::Boolean();
-}
-
-
-Type* Typer::Visitor::TypeFloat32LessThanOrEqual(Node* node) {
- return Type::Boolean();
-}
-
-Type* Typer::Visitor::TypeFloat32Max(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat32Min(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Add(Node* node) { return Type::Number(); }
-
-
-Type* Typer::Visitor::TypeFloat64Sub(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Neg(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Mul(Node* node) { return Type::Number(); }
-
-
-Type* Typer::Visitor::TypeFloat64Div(Node* node) { return Type::Number(); }
-
-
-Type* Typer::Visitor::TypeFloat64Mod(Node* node) { return Type::Number(); }
-
-
-Type* Typer::Visitor::TypeFloat64Max(Node* node) { return Type::Number(); }
-
-
-Type* Typer::Visitor::TypeFloat64Min(Node* node) { return Type::Number(); }
-
-
-Type* Typer::Visitor::TypeFloat64Abs(Node* node) {
- // TODO(turbofan): We should be able to infer a better type here.
- return Type::Number();
-}
-
-Type* Typer::Visitor::TypeFloat64Acos(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Acosh(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Asin(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Asinh(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Atan(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Atanh(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Atan2(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Cbrt(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Cos(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Cosh(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Exp(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Expm1(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Log(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Log1p(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Log10(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Log2(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Pow(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Sin(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Sinh(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Sqrt(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Tan(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Tanh(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeFloat64Equal(Node* node) { return Type::Boolean(); }
-
-
-Type* Typer::Visitor::TypeFloat64LessThan(Node* node) {
- return Type::Boolean();
-}
-
-
-Type* Typer::Visitor::TypeFloat64LessThanOrEqual(Node* node) {
- return Type::Boolean();
-}
-
-
-Type* Typer::Visitor::TypeFloat32RoundDown(Node* node) {
- // TODO(sigurds): We could have a tighter bound here.
- return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeFloat64RoundDown(Node* node) {
- // TODO(sigurds): We could have a tighter bound here.
- return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeFloat32RoundUp(Node* node) {
- // TODO(sigurds): We could have a tighter bound here.
- return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeFloat64RoundUp(Node* node) {
- // TODO(sigurds): We could have a tighter bound here.
- return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeFloat32RoundTruncate(Node* node) {
- // TODO(sigurds): We could have a tighter bound here.
- return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeFloat64RoundTruncate(Node* node) {
- // TODO(sigurds): We could have a tighter bound here.
- return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeFloat64RoundTiesAway(Node* node) {
- // TODO(sigurds): We could have a tighter bound here.
- return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeFloat32RoundTiesEven(Node* node) {
- // TODO(sigurds): We could have a tighter bound here.
- return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeFloat64RoundTiesEven(Node* node) {
- // TODO(sigurds): We could have a tighter bound here.
- return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeFloat64ExtractLowWord32(Node* node) {
- return Type::Signed32();
-}
-
-
-Type* Typer::Visitor::TypeFloat64ExtractHighWord32(Node* node) {
- return Type::Signed32();
-}
-
-
-Type* Typer::Visitor::TypeFloat64InsertLowWord32(Node* node) {
- return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeFloat64InsertHighWord32(Node* node) {
- return Type::Number();
-}
-
-
-Type* Typer::Visitor::TypeLoadStackPointer(Node* node) {
- return Type::Internal();
-}
-
-
-Type* Typer::Visitor::TypeLoadFramePointer(Node* node) {
- return Type::Internal();
-}
-
-Type* Typer::Visitor::TypeLoadParentFramePointer(Node* node) {
- return Type::Internal();
-}
-
-Type* Typer::Visitor::TypeUnalignedLoad(Node* node) { return Type::Any(); }
-
-Type* Typer::Visitor::TypeUnalignedStore(Node* node) {
- UNREACHABLE();
- return nullptr;
-}
-
-Type* Typer::Visitor::TypeCheckedLoad(Node* node) { return Type::Any(); }
-
-Type* Typer::Visitor::TypeCheckedStore(Node* node) {
- UNREACHABLE();
- return nullptr;
-}
-
-Type* Typer::Visitor::TypeAtomicLoad(Node* node) { return Type::Any(); }
-
-Type* Typer::Visitor::TypeAtomicStore(Node* node) {
- UNREACHABLE();
- return nullptr;
-}
-
-Type* Typer::Visitor::TypeInt32PairAdd(Node* node) { return Type::Internal(); }
-
-Type* Typer::Visitor::TypeInt32PairSub(Node* node) { return Type::Internal(); }
-
-Type* Typer::Visitor::TypeInt32PairMul(Node* node) { return Type::Internal(); }
-
-Type* Typer::Visitor::TypeWord32PairShl(Node* node) { return Type::Internal(); }
-
-Type* Typer::Visitor::TypeWord32PairShr(Node* node) { return Type::Internal(); }
-
-Type* Typer::Visitor::TypeWord32PairSar(Node* node) { return Type::Internal(); }
-
-// SIMD type methods.
-
-#define SIMD_RETURN_SIMD(Name) \
- Type* Typer::Visitor::Type##Name(Node* node) { return Type::Simd(); }
-MACHINE_SIMD_RETURN_SIMD_OP_LIST(SIMD_RETURN_SIMD)
-MACHINE_SIMD_GENERIC_OP_LIST(SIMD_RETURN_SIMD)
-#undef SIMD_RETURN_SIMD
-
-#define SIMD_RETURN_NUM(Name) \
- Type* Typer::Visitor::Type##Name(Node* node) { return Type::Number(); }
-MACHINE_SIMD_RETURN_NUM_OP_LIST(SIMD_RETURN_NUM)
-#undef SIMD_RETURN_NUM
-
-#define SIMD_RETURN_BOOL(Name) \
- Type* Typer::Visitor::Type##Name(Node* node) { return Type::Boolean(); }
-MACHINE_SIMD_RETURN_BOOL_OP_LIST(SIMD_RETURN_BOOL)
-#undef SIMD_RETURN_BOOL
-
// Heap constants.
Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
- if (value->IsJSTypedArray()) {
- switch (JSTypedArray::cast(*value)->type()) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: \
- return typer_->cache_.k##Type##Array;
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- }
- }
if (Type::IsInteger(*value)) {
return Type::Range(value->Number(), value->Number(), zone());
}
diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h
index d4d5744a6e..875b4839e5 100644
--- a/deps/v8/src/compiler/typer.h
+++ b/deps/v8/src/compiler/typer.h
@@ -7,18 +7,13 @@
#include "src/compiler/graph.h"
#include "src/compiler/operation-typer.h"
-#include "src/types.h"
namespace v8 {
namespace internal {
-
-// Forward declarations.
-class TypeCache;
-
namespace compiler {
+// Forward declarations.
class LoopVariableOptimizer;
-class OperationTyper;
class Typer {
public:
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
new file mode 100644
index 0000000000..43d2f80483
--- /dev/null
+++ b/deps/v8/src/compiler/types.cc
@@ -0,0 +1,961 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <iomanip>
+
+#include "src/compiler/types.h"
+
+#include "src/handles-inl.h"
+#include "src/ostreams.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// NOTE: If code is marked as being a "shortcut", this means that removing
+// the code won't affect the semantics of the surrounding function definition.
+
+// static
+bool Type::IsInteger(i::Object* x) {
+ return x->IsNumber() && Type::IsInteger(x->Number());
+}
+
+// -----------------------------------------------------------------------------
+// Range-related helper functions.
+
+bool RangeType::Limits::IsEmpty() { return this->min > this->max; }
+
+RangeType::Limits RangeType::Limits::Intersect(Limits lhs, Limits rhs) {
+ DisallowHeapAllocation no_allocation;
+ Limits result(lhs);
+ if (lhs.min < rhs.min) result.min = rhs.min;
+ if (lhs.max > rhs.max) result.max = rhs.max;
+ return result;
+}
+
+RangeType::Limits RangeType::Limits::Union(Limits lhs, Limits rhs) {
+ DisallowHeapAllocation no_allocation;
+ if (lhs.IsEmpty()) return rhs;
+ if (rhs.IsEmpty()) return lhs;
+ Limits result(lhs);
+ if (lhs.min > rhs.min) result.min = rhs.min;
+ if (lhs.max < rhs.max) result.max = rhs.max;
+ return result;
+}
+
+bool Type::Overlap(RangeType* lhs, RangeType* rhs) {
+ DisallowHeapAllocation no_allocation;
+ return !RangeType::Limits::Intersect(RangeType::Limits(lhs),
+ RangeType::Limits(rhs))
+ .IsEmpty();
+}
+
+bool Type::Contains(RangeType* lhs, RangeType* rhs) {
+ DisallowHeapAllocation no_allocation;
+ return lhs->Min() <= rhs->Min() && rhs->Max() <= lhs->Max();
+}
+
+bool Type::Contains(RangeType* lhs, ConstantType* rhs) {
+ DisallowHeapAllocation no_allocation;
+ return IsInteger(*rhs->Value()) && lhs->Min() <= rhs->Value()->Number() &&
+ rhs->Value()->Number() <= lhs->Max();
+}
+
+bool Type::Contains(RangeType* range, i::Object* val) {
+ DisallowHeapAllocation no_allocation;
+ return IsInteger(val) && range->Min() <= val->Number() &&
+ val->Number() <= range->Max();
+}
+
+// -----------------------------------------------------------------------------
+// Min and Max computation.
+
+double Type::Min() {
+ DCHECK(this->Is(Number()));
+ if (this->IsBitset()) return BitsetType::Min(this->AsBitset());
+ if (this->IsUnion()) {
+ double min = +V8_INFINITY;
+ for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
+ min = std::min(min, this->AsUnion()->Get(i)->Min());
+ }
+ return min;
+ }
+ if (this->IsRange()) return this->AsRange()->Min();
+ if (this->IsConstant()) return this->AsConstant()->Value()->Number();
+ UNREACHABLE();
+ return 0;
+}
+
+double Type::Max() {
+ DCHECK(this->Is(Number()));
+ if (this->IsBitset()) return BitsetType::Max(this->AsBitset());
+ if (this->IsUnion()) {
+ double max = -V8_INFINITY;
+ for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
+ max = std::max(max, this->AsUnion()->Get(i)->Max());
+ }
+ return max;
+ }
+ if (this->IsRange()) return this->AsRange()->Max();
+ if (this->IsConstant()) return this->AsConstant()->Value()->Number();
+ UNREACHABLE();
+ return 0;
+}
+
+// -----------------------------------------------------------------------------
+// Glb and lub computation.
+
+// The largest bitset subsumed by this type.
+Type::bitset BitsetType::Glb(Type* type) {
+ DisallowHeapAllocation no_allocation;
+ // Fast case.
+ if (IsBitset(type)) {
+ return type->AsBitset();
+ } else if (type->IsUnion()) {
+ SLOW_DCHECK(type->AsUnion()->Wellformed());
+ return type->AsUnion()->Get(0)->BitsetGlb() |
+ type->AsUnion()->Get(1)->BitsetGlb(); // Shortcut.
+ } else if (type->IsRange()) {
+ bitset glb =
+ BitsetType::Glb(type->AsRange()->Min(), type->AsRange()->Max());
+ return glb;
+ } else {
+ return kNone;
+ }
+}
+
+// The smallest bitset subsuming this type, possibly not a proper one.
+Type::bitset BitsetType::Lub(Type* type) {
+ DisallowHeapAllocation no_allocation;
+ if (IsBitset(type)) return type->AsBitset();
+ if (type->IsUnion()) {
+ // Take the representation from the first element, which is always
+ // a bitset.
+ int bitset = type->AsUnion()->Get(0)->BitsetLub();
+ for (int i = 0, n = type->AsUnion()->Length(); i < n; ++i) {
+ // Other elements only contribute their semantic part.
+ bitset |= type->AsUnion()->Get(i)->BitsetLub();
+ }
+ return bitset;
+ }
+ if (type->IsConstant()) return type->AsConstant()->Lub();
+ if (type->IsRange()) return type->AsRange()->Lub();
+ if (type->IsTuple()) return kOtherInternal;
+ UNREACHABLE();
+ return kNone;
+}
+
+Type::bitset BitsetType::Lub(i::Map* map) {
+ DisallowHeapAllocation no_allocation;
+ switch (map->instance_type()) {
+ case STRING_TYPE:
+ case ONE_BYTE_STRING_TYPE:
+ case CONS_STRING_TYPE:
+ case CONS_ONE_BYTE_STRING_TYPE:
+ case SLICED_STRING_TYPE:
+ case SLICED_ONE_BYTE_STRING_TYPE:
+ case EXTERNAL_STRING_TYPE:
+ case EXTERNAL_ONE_BYTE_STRING_TYPE:
+ case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case SHORT_EXTERNAL_STRING_TYPE:
+ case SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE:
+ case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ return kOtherString;
+ case INTERNALIZED_STRING_TYPE:
+ case ONE_BYTE_INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
+ case SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
+ case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ return kInternalizedString;
+ case SYMBOL_TYPE:
+ return kSymbol;
+ case ODDBALL_TYPE: {
+ Heap* heap = map->GetHeap();
+ if (map == heap->undefined_map()) return kUndefined;
+ if (map == heap->null_map()) return kNull;
+ if (map == heap->boolean_map()) return kBoolean;
+ if (map == heap->the_hole_map()) return kHole;
+ DCHECK(map == heap->uninitialized_map() ||
+ map == heap->no_interceptor_result_sentinel_map() ||
+ map == heap->termination_exception_map() ||
+ map == heap->arguments_marker_map() ||
+ map == heap->optimized_out_map() ||
+ map == heap->stale_register_map());
+ return kOtherInternal;
+ }
+ case HEAP_NUMBER_TYPE:
+ return kNumber;
+ case SIMD128_VALUE_TYPE:
+ return kSimd;
+ case JS_OBJECT_TYPE:
+ case JS_ARGUMENTS_TYPE:
+ case JS_ERROR_TYPE:
+ case JS_GLOBAL_OBJECT_TYPE:
+ case JS_GLOBAL_PROXY_TYPE:
+ case JS_API_OBJECT_TYPE:
+ case JS_SPECIAL_API_OBJECT_TYPE:
+ if (map->is_undetectable()) return kOtherUndetectable;
+ return kOtherObject;
+ case JS_VALUE_TYPE:
+ case JS_MESSAGE_OBJECT_TYPE:
+ case JS_DATE_TYPE:
+ case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case JS_GENERATOR_OBJECT_TYPE:
+ case JS_ARRAY_BUFFER_TYPE:
+ case JS_ARRAY_TYPE:
+ case JS_REGEXP_TYPE: // TODO(rossberg): there should be a RegExp type.
+ case JS_TYPED_ARRAY_TYPE:
+ case JS_DATA_VIEW_TYPE:
+ case JS_SET_TYPE:
+ case JS_MAP_TYPE:
+ case JS_SET_ITERATOR_TYPE:
+ case JS_MAP_ITERATOR_TYPE:
+ case JS_STRING_ITERATOR_TYPE:
+ case JS_WEAK_MAP_TYPE:
+ case JS_WEAK_SET_TYPE:
+ case JS_PROMISE_TYPE:
+ case JS_BOUND_FUNCTION_TYPE:
+ DCHECK(!map->is_undetectable());
+ return kOtherObject;
+ case JS_FUNCTION_TYPE:
+ DCHECK(!map->is_undetectable());
+ return kFunction;
+ case JS_PROXY_TYPE:
+ DCHECK(!map->is_undetectable());
+ return kProxy;
+ case MAP_TYPE:
+ case ALLOCATION_SITE_TYPE:
+ case ACCESSOR_INFO_TYPE:
+ case SHARED_FUNCTION_INFO_TYPE:
+ case ACCESSOR_PAIR_TYPE:
+ case FIXED_ARRAY_TYPE:
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ case BYTE_ARRAY_TYPE:
+ case BYTECODE_ARRAY_TYPE:
+ case TRANSITION_ARRAY_TYPE:
+ case FOREIGN_TYPE:
+ case SCRIPT_TYPE:
+ case CODE_TYPE:
+ case PROPERTY_CELL_TYPE:
+ case MODULE_TYPE:
+ return kOtherInternal;
+
+ // Remaining instance types are unsupported for now. If any of them do
+ // require bit set types, they should get kOtherInternal.
+ case MUTABLE_HEAP_NUMBER_TYPE:
+ case FREE_SPACE_TYPE:
+#define FIXED_TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case FIXED_##TYPE##_ARRAY_TYPE:
+
+ TYPED_ARRAYS(FIXED_TYPED_ARRAY_CASE)
+#undef FIXED_TYPED_ARRAY_CASE
+ case FILLER_TYPE:
+ case ACCESS_CHECK_INFO_TYPE:
+ case INTERCEPTOR_INFO_TYPE:
+ case CALL_HANDLER_INFO_TYPE:
+ case FUNCTION_TEMPLATE_INFO_TYPE:
+ case OBJECT_TEMPLATE_INFO_TYPE:
+ case SIGNATURE_INFO_TYPE:
+ case TYPE_SWITCH_INFO_TYPE:
+ case ALLOCATION_MEMENTO_TYPE:
+ case TYPE_FEEDBACK_INFO_TYPE:
+ case ALIASED_ARGUMENTS_ENTRY_TYPE:
+ case BOX_TYPE:
+ case PROMISE_CONTAINER_TYPE:
+ case DEBUG_INFO_TYPE:
+ case BREAK_POINT_INFO_TYPE:
+ case CELL_TYPE:
+ case WEAK_CELL_TYPE:
+ case PROTOTYPE_INFO_TYPE:
+ case CONTEXT_EXTENSION_TYPE:
+ UNREACHABLE();
+ return kNone;
+ }
+ UNREACHABLE();
+ return kNone;
+}
+
+Type::bitset BitsetType::Lub(i::Object* value) {
+ DisallowHeapAllocation no_allocation;
+ if (value->IsNumber()) {
+ return Lub(value->Number());
+ }
+ return Lub(i::HeapObject::cast(value)->map());
+}
+
+Type::bitset BitsetType::Lub(double value) {
+ DisallowHeapAllocation no_allocation;
+ if (i::IsMinusZero(value)) return kMinusZero;
+ if (std::isnan(value)) return kNaN;
+ if (IsUint32Double(value) || IsInt32Double(value)) return Lub(value, value);
+ return kOtherNumber;
+}
+
+// Minimum values of plain numeric bitsets.
+const BitsetType::Boundary BitsetType::BoundariesArray[] = {
+ {kOtherNumber, kPlainNumber, -V8_INFINITY},
+ {kOtherSigned32, kNegative32, kMinInt},
+ {kNegative31, kNegative31, -0x40000000},
+ {kUnsigned30, kUnsigned30, 0},
+ {kOtherUnsigned31, kUnsigned31, 0x40000000},
+ {kOtherUnsigned32, kUnsigned32, 0x80000000},
+ {kOtherNumber, kPlainNumber, static_cast<double>(kMaxUInt32) + 1}};
+
+const BitsetType::Boundary* BitsetType::Boundaries() { return BoundariesArray; }
+
+size_t BitsetType::BoundariesSize() {
+ // Windows doesn't like arraysize here.
+ // return arraysize(BoundariesArray);
+ return 7;
+}
+
+Type::bitset BitsetType::ExpandInternals(Type::bitset bits) {
+ DisallowHeapAllocation no_allocation;
+ if (!(bits & kPlainNumber)) return bits; // Shortcut.
+ const Boundary* boundaries = Boundaries();
+ for (size_t i = 0; i < BoundariesSize(); ++i) {
+ DCHECK(BitsetType::Is(boundaries[i].internal, boundaries[i].external));
+ if (bits & boundaries[i].internal) bits |= boundaries[i].external;
+ }
+ return bits;
+}
+
+Type::bitset BitsetType::Lub(double min, double max) {
+ DisallowHeapAllocation no_allocation;
+ int lub = kNone;
+ const Boundary* mins = Boundaries();
+
+ for (size_t i = 1; i < BoundariesSize(); ++i) {
+ if (min < mins[i].min) {
+ lub |= mins[i - 1].internal;
+ if (max < mins[i].min) return lub;
+ }
+ }
+ return lub | mins[BoundariesSize() - 1].internal;
+}
+
+Type::bitset BitsetType::NumberBits(bitset bits) { return bits & kPlainNumber; }
+
+Type::bitset BitsetType::Glb(double min, double max) {
+ DisallowHeapAllocation no_allocation;
+ int glb = kNone;
+ const Boundary* mins = Boundaries();
+
+ // If the range does not touch 0, the bound is empty.
+ if (max < -1 || min > 0) return glb;
+
+ for (size_t i = 1; i + 1 < BoundariesSize(); ++i) {
+ if (min <= mins[i].min) {
+ if (max + 1 < mins[i + 1].min) break;
+ glb |= mins[i].external;
+ }
+ }
+ // OtherNumber also contains float numbers, so it can never be
+ // in the greatest lower bound.
+ return glb & ~(kOtherNumber);
+}
+
+double BitsetType::Min(bitset bits) {
+ DisallowHeapAllocation no_allocation;
+ DCHECK(Is(bits, kNumber));
+ const Boundary* mins = Boundaries();
+ bool mz = bits & kMinusZero;
+ for (size_t i = 0; i < BoundariesSize(); ++i) {
+ if (Is(mins[i].internal, bits)) {
+ return mz ? std::min(0.0, mins[i].min) : mins[i].min;
+ }
+ }
+ if (mz) return 0;
+ return std::numeric_limits<double>::quiet_NaN();
+}
+
+double BitsetType::Max(bitset bits) {
+ DisallowHeapAllocation no_allocation;
+ DCHECK(Is(bits, kNumber));
+ const Boundary* mins = Boundaries();
+ bool mz = bits & kMinusZero;
+ if (BitsetType::Is(mins[BoundariesSize() - 1].internal, bits)) {
+ return +V8_INFINITY;
+ }
+ for (size_t i = BoundariesSize() - 1; i-- > 0;) {
+ if (Is(mins[i].internal, bits)) {
+ return mz ? std::max(0.0, mins[i + 1].min - 1) : mins[i + 1].min - 1;
+ }
+ }
+ if (mz) return 0;
+ return std::numeric_limits<double>::quiet_NaN();
+}
+
+// -----------------------------------------------------------------------------
+// Predicates.
+
+bool Type::SimplyEquals(Type* that) {
+ DisallowHeapAllocation no_allocation;
+ if (this->IsConstant()) {
+ return that->IsConstant() &&
+ *this->AsConstant()->Value() == *that->AsConstant()->Value();
+ }
+ if (this->IsTuple()) {
+ if (!that->IsTuple()) return false;
+ TupleType* this_tuple = this->AsTuple();
+ TupleType* that_tuple = that->AsTuple();
+ if (this_tuple->Arity() != that_tuple->Arity()) {
+ return false;
+ }
+ for (int i = 0, n = this_tuple->Arity(); i < n; ++i) {
+ if (!this_tuple->Element(i)->Equals(that_tuple->Element(i))) return false;
+ }
+ return true;
+ }
+ UNREACHABLE();
+ return false;
+}
+
+// Check if [this] <= [that].
+bool Type::SlowIs(Type* that) {
+ DisallowHeapAllocation no_allocation;
+
+ // Fast bitset cases
+ if (that->IsBitset()) {
+ return BitsetType::Is(this->BitsetLub(), that->AsBitset());
+ }
+
+ if (this->IsBitset()) {
+ return BitsetType::Is(this->AsBitset(), that->BitsetGlb());
+ }
+
+ // (T1 \/ ... \/ Tn) <= T if (T1 <= T) /\ ... /\ (Tn <= T)
+ if (this->IsUnion()) {
+ for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
+ if (!this->AsUnion()->Get(i)->Is(that)) return false;
+ }
+ return true;
+ }
+
+ // T <= (T1 \/ ... \/ Tn) if (T <= T1) \/ ... \/ (T <= Tn)
+ if (that->IsUnion()) {
+ for (int i = 0, n = that->AsUnion()->Length(); i < n; ++i) {
+ if (this->Is(that->AsUnion()->Get(i))) return true;
+ if (i > 1 && this->IsRange()) return false; // Shortcut.
+ }
+ return false;
+ }
+
+ if (that->IsRange()) {
+ return (this->IsRange() && Contains(that->AsRange(), this->AsRange())) ||
+ (this->IsConstant() &&
+ Contains(that->AsRange(), this->AsConstant()));
+ }
+ if (this->IsRange()) return false;
+
+ return this->SimplyEquals(that);
+}
+
+// Check if [this] and [that] overlap.
+bool Type::Maybe(Type* that) {
+ DisallowHeapAllocation no_allocation;
+
+ if (!BitsetType::IsInhabited(this->BitsetLub() & that->BitsetLub()))
+ return false;
+
+ // (T1 \/ ... \/ Tn) overlaps T if (T1 overlaps T) \/ ... \/ (Tn overlaps T)
+ if (this->IsUnion()) {
+ for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
+ if (this->AsUnion()->Get(i)->Maybe(that)) return true;
+ }
+ return false;
+ }
+
+ // T overlaps (T1 \/ ... \/ Tn) if (T overlaps T1) \/ ... \/ (T overlaps Tn)
+ if (that->IsUnion()) {
+ for (int i = 0, n = that->AsUnion()->Length(); i < n; ++i) {
+ if (this->Maybe(that->AsUnion()->Get(i))) return true;
+ }
+ return false;
+ }
+
+ if (this->IsBitset() && that->IsBitset()) return true;
+
+ if (this->IsRange()) {
+ if (that->IsConstant()) {
+ return Contains(this->AsRange(), that->AsConstant());
+ }
+ if (that->IsRange()) {
+ return Overlap(this->AsRange(), that->AsRange());
+ }
+ if (that->IsBitset()) {
+ bitset number_bits = BitsetType::NumberBits(that->AsBitset());
+ if (number_bits == BitsetType::kNone) {
+ return false;
+ }
+ double min = std::max(BitsetType::Min(number_bits), this->Min());
+ double max = std::min(BitsetType::Max(number_bits), this->Max());
+ return min <= max;
+ }
+ }
+ if (that->IsRange()) {
+ return that->Maybe(this); // This case is handled above.
+ }
+
+ if (this->IsBitset() || that->IsBitset()) return true;
+
+ return this->SimplyEquals(that);
+}
+
+// Return the range in [this], or [NULL].
+Type* Type::GetRange() {
+ DisallowHeapAllocation no_allocation;
+ if (this->IsRange()) return this;
+ if (this->IsUnion() && this->AsUnion()->Get(1)->IsRange()) {
+ return this->AsUnion()->Get(1);
+ }
+ return NULL;
+}
+
+bool UnionType::Wellformed() {
+ DisallowHeapAllocation no_allocation;
+ // This checks the invariants of the union representation:
+ // 1. There are at least two elements.
+ // 2. The first element is a bitset, no other element is a bitset.
+ // 3. At most one element is a range, and it must be the second one.
+ // 4. No element is itself a union.
+ // 5. No element (except the bitset) is a subtype of any other.
+ // 6. If there is a range, then the bitset type does not contain
+ // plain number bits.
+ DCHECK(this->Length() >= 2); // (1)
+ DCHECK(this->Get(0)->IsBitset()); // (2a)
+
+ for (int i = 0; i < this->Length(); ++i) {
+ if (i != 0) DCHECK(!this->Get(i)->IsBitset()); // (2b)
+ if (i != 1) DCHECK(!this->Get(i)->IsRange()); // (3)
+ DCHECK(!this->Get(i)->IsUnion()); // (4)
+ for (int j = 0; j < this->Length(); ++j) {
+ if (i != j && i != 0) DCHECK(!this->Get(i)->Is(this->Get(j))); // (5)
+ }
+ }
+ DCHECK(!this->Get(1)->IsRange() ||
+ (BitsetType::NumberBits(this->Get(0)->AsBitset()) ==
+ BitsetType::kNone)); // (6)
+ return true;
+}
+
+// -----------------------------------------------------------------------------
+// Union and intersection
+
+static bool AddIsSafe(int x, int y) {
+ return x >= 0 ? y <= std::numeric_limits<int>::max() - x
+ : y >= std::numeric_limits<int>::min() - x;
+}
+
+Type* Type::Intersect(Type* type1, Type* type2, Zone* zone) {
+ // Fast case: bit sets.
+ if (type1->IsBitset() && type2->IsBitset()) {
+ return BitsetType::New(type1->AsBitset() & type2->AsBitset());
+ }
+
+ // Fast case: top or bottom types.
+ if (type1->IsNone() || type2->IsAny()) return type1; // Shortcut.
+ if (type2->IsNone() || type1->IsAny()) return type2; // Shortcut.
+
+ // Semi-fast case.
+ if (type1->Is(type2)) return type1;
+ if (type2->Is(type1)) return type2;
+
+ // Slow case: create union.
+
+ // Semantic subtyping check - this is needed for consistency with the
+ // semi-fast case above.
+ if (type1->Is(type2)) {
+ type2 = Any();
+ } else if (type2->Is(type1)) {
+ type1 = Any();
+ }
+
+ bitset bits = type1->BitsetGlb() & type2->BitsetGlb();
+ int size1 = type1->IsUnion() ? type1->AsUnion()->Length() : 1;
+ int size2 = type2->IsUnion() ? type2->AsUnion()->Length() : 1;
+ if (!AddIsSafe(size1, size2)) return Any();
+ int size = size1 + size2;
+ if (!AddIsSafe(size, 2)) return Any();
+ size += 2;
+ Type* result_type = UnionType::New(size, zone);
+ UnionType* result = result_type->AsUnion();
+ size = 0;
+
+ // Deal with bitsets.
+ result->Set(size++, BitsetType::New(bits));
+
+ RangeType::Limits lims = RangeType::Limits::Empty();
+ size = IntersectAux(type1, type2, result, size, &lims, zone);
+
+ // If the range is not empty, then insert it into the union and
+ // remove the number bits from the bitset.
+ if (!lims.IsEmpty()) {
+ size = UpdateRange(RangeType::New(lims, zone), result, size, zone);
+
+ // Remove the number bits.
+ bitset number_bits = BitsetType::NumberBits(bits);
+ bits &= ~number_bits;
+ result->Set(0, BitsetType::New(bits));
+ }
+ return NormalizeUnion(result_type, size, zone);
+}
+
+int Type::UpdateRange(Type* range, UnionType* result, int size, Zone* zone) {
+ if (size == 1) {
+ result->Set(size++, range);
+ } else {
+ // Make space for the range.
+ result->Set(size++, result->Get(1));
+ result->Set(1, range);
+ }
+
+ // Remove any components that just got subsumed.
+ for (int i = 2; i < size;) {
+ if (result->Get(i)->Is(range)) {
+ result->Set(i, result->Get(--size));
+ } else {
+ ++i;
+ }
+ }
+ return size;
+}
+
+RangeType::Limits Type::ToLimits(bitset bits, Zone* zone) {
+ bitset number_bits = BitsetType::NumberBits(bits);
+
+ if (number_bits == BitsetType::kNone) {
+ return RangeType::Limits::Empty();
+ }
+
+ return RangeType::Limits(BitsetType::Min(number_bits),
+ BitsetType::Max(number_bits));
+}
+
+RangeType::Limits Type::IntersectRangeAndBitset(Type* range, Type* bitset,
+ Zone* zone) {
+ RangeType::Limits range_lims(range->AsRange());
+ RangeType::Limits bitset_lims = ToLimits(bitset->AsBitset(), zone);
+ return RangeType::Limits::Intersect(range_lims, bitset_lims);
+}
+
+int Type::IntersectAux(Type* lhs, Type* rhs, UnionType* result, int size,
+ RangeType::Limits* lims, Zone* zone) {
+ if (lhs->IsUnion()) {
+ for (int i = 0, n = lhs->AsUnion()->Length(); i < n; ++i) {
+ size =
+ IntersectAux(lhs->AsUnion()->Get(i), rhs, result, size, lims, zone);
+ }
+ return size;
+ }
+ if (rhs->IsUnion()) {
+ for (int i = 0, n = rhs->AsUnion()->Length(); i < n; ++i) {
+ size =
+ IntersectAux(lhs, rhs->AsUnion()->Get(i), result, size, lims, zone);
+ }
+ return size;
+ }
+
+ if (!BitsetType::IsInhabited(lhs->BitsetLub() & rhs->BitsetLub())) {
+ return size;
+ }
+
+ if (lhs->IsRange()) {
+ if (rhs->IsBitset()) {
+ RangeType::Limits lim = IntersectRangeAndBitset(lhs, rhs, zone);
+
+ if (!lim.IsEmpty()) {
+ *lims = RangeType::Limits::Union(lim, *lims);
+ }
+ return size;
+ }
+ if (rhs->IsConstant() && Contains(lhs->AsRange(), rhs->AsConstant())) {
+ return AddToUnion(rhs, result, size, zone);
+ }
+ if (rhs->IsRange()) {
+ RangeType::Limits lim = RangeType::Limits::Intersect(
+ RangeType::Limits(lhs->AsRange()), RangeType::Limits(rhs->AsRange()));
+ if (!lim.IsEmpty()) {
+ *lims = RangeType::Limits::Union(lim, *lims);
+ }
+ }
+ return size;
+ }
+ if (rhs->IsRange()) {
+ // This case is handled symmetrically above.
+ return IntersectAux(rhs, lhs, result, size, lims, zone);
+ }
+ if (lhs->IsBitset() || rhs->IsBitset()) {
+ return AddToUnion(lhs->IsBitset() ? rhs : lhs, result, size, zone);
+ }
+ if (lhs->SimplyEquals(rhs)) {
+ return AddToUnion(lhs, result, size, zone);
+ }
+ return size;
+}
+
+// Make sure that we produce a well-formed range and bitset:
+// If the range is non-empty, the number bits in the bitset should be
+// clear. Moreover, if we have a canonical range (such as Signed32),
+// we want to produce a bitset rather than a range.
+Type* Type::NormalizeRangeAndBitset(Type* range, bitset* bits, Zone* zone) {
+ // Fast path: If the bitset does not mention numbers, we can just keep the
+ // range.
+ bitset number_bits = BitsetType::NumberBits(*bits);
+ if (number_bits == 0) {
+ return range;
+ }
+
+ // If the range is semantically contained within the bitset, return None and
+ // leave the bitset untouched.
+ bitset range_lub = range->BitsetLub();
+ if (BitsetType::Is(range_lub, *bits)) {
+ return None();
+ }
+
+ // Slow path: reconcile the bitset range and the range.
+ double bitset_min = BitsetType::Min(number_bits);
+ double bitset_max = BitsetType::Max(number_bits);
+
+ double range_min = range->Min();
+ double range_max = range->Max();
+
+ // Remove the number bits from the bitset, they would just confuse us now.
+ // NOTE: bits contains OtherNumber iff bits contains PlainNumber, in which
+ // case we already returned after the subtype check above.
+ *bits &= ~number_bits;
+
+ if (range_min <= bitset_min && range_max >= bitset_max) {
+ // Bitset is contained within the range, just return the range.
+ return range;
+ }
+
+ if (bitset_min < range_min) {
+ range_min = bitset_min;
+ }
+ if (bitset_max > range_max) {
+ range_max = bitset_max;
+ }
+ return RangeType::New(range_min, range_max, zone);
+}
+
+Type* Type::Union(Type* type1, Type* type2, Zone* zone) {
+ // Fast case: bit sets.
+ if (type1->IsBitset() && type2->IsBitset()) {
+ return BitsetType::New(type1->AsBitset() | type2->AsBitset());
+ }
+
+ // Fast case: top or bottom types.
+ if (type1->IsAny() || type2->IsNone()) return type1;
+ if (type2->IsAny() || type1->IsNone()) return type2;
+
+ // Semi-fast case.
+ if (type1->Is(type2)) return type2;
+ if (type2->Is(type1)) return type1;
+
+ // Slow case: create union.
+ int size1 = type1->IsUnion() ? type1->AsUnion()->Length() : 1;
+ int size2 = type2->IsUnion() ? type2->AsUnion()->Length() : 1;
+ if (!AddIsSafe(size1, size2)) return Any();
+ int size = size1 + size2;
+ if (!AddIsSafe(size, 2)) return Any();
+ size += 2;
+ Type* result_type = UnionType::New(size, zone);
+ UnionType* result = result_type->AsUnion();
+ size = 0;
+
+ // Compute the new bitset.
+ bitset new_bitset = type1->BitsetGlb() | type2->BitsetGlb();
+
+ // Deal with ranges.
+ Type* range = None();
+ Type* range1 = type1->GetRange();
+ Type* range2 = type2->GetRange();
+ if (range1 != NULL && range2 != NULL) {
+ RangeType::Limits lims =
+ RangeType::Limits::Union(RangeType::Limits(range1->AsRange()),
+ RangeType::Limits(range2->AsRange()));
+ Type* union_range = RangeType::New(lims, zone);
+ range = NormalizeRangeAndBitset(union_range, &new_bitset, zone);
+ } else if (range1 != NULL) {
+ range = NormalizeRangeAndBitset(range1, &new_bitset, zone);
+ } else if (range2 != NULL) {
+ range = NormalizeRangeAndBitset(range2, &new_bitset, zone);
+ }
+ Type* bits = BitsetType::New(new_bitset);
+ result->Set(size++, bits);
+ if (!range->IsNone()) result->Set(size++, range);
+
+ size = AddToUnion(type1, result, size, zone);
+ size = AddToUnion(type2, result, size, zone);
+ return NormalizeUnion(result_type, size, zone);
+}
+
+// Add [type] to [result] unless [type] is bitset, range, or already subsumed.
+// Return new size of [result].
+int Type::AddToUnion(Type* type, UnionType* result, int size, Zone* zone) {
+ if (type->IsBitset() || type->IsRange()) return size;
+ if (type->IsUnion()) {
+ for (int i = 0, n = type->AsUnion()->Length(); i < n; ++i) {
+ size = AddToUnion(type->AsUnion()->Get(i), result, size, zone);
+ }
+ return size;
+ }
+ for (int i = 0; i < size; ++i) {
+ if (type->Is(result->Get(i))) return size;
+ }
+ result->Set(size++, type);
+ return size;
+}
+
+Type* Type::NormalizeUnion(Type* union_type, int size, Zone* zone) {
+ UnionType* unioned = union_type->AsUnion();
+ DCHECK(size >= 1);
+ DCHECK(unioned->Get(0)->IsBitset());
+ // If the union has just one element, return it.
+ if (size == 1) {
+ return unioned->Get(0);
+ }
+ bitset bits = unioned->Get(0)->AsBitset();
+ // If the union only consists of a range, we can get rid of the union.
+ if (size == 2 && bits == BitsetType::kNone) {
+ if (unioned->Get(1)->IsRange()) {
+ return RangeType::New(unioned->Get(1)->AsRange()->Min(),
+ unioned->Get(1)->AsRange()->Max(), zone);
+ }
+ }
+ unioned->Shrink(size);
+ SLOW_DCHECK(unioned->Wellformed());
+ return union_type;
+}
+
+// -----------------------------------------------------------------------------
+// Iteration.
+
+int Type::NumConstants() {
+ DisallowHeapAllocation no_allocation;
+ if (this->IsConstant()) {
+ return 1;
+ } else if (this->IsUnion()) {
+ int result = 0;
+ for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
+ if (this->AsUnion()->Get(i)->IsConstant()) ++result;
+ }
+ return result;
+ } else {
+ return 0;
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Printing.
+
+const char* BitsetType::Name(bitset bits) {
+ switch (bits) {
+#define RETURN_NAMED_TYPE(type, value) \
+ case k##type: \
+ return #type;
+ PROPER_BITSET_TYPE_LIST(RETURN_NAMED_TYPE)
+ INTERNAL_BITSET_TYPE_LIST(RETURN_NAMED_TYPE)
+#undef RETURN_NAMED_TYPE
+
+ default:
+ return NULL;
+ }
+}
+
+void BitsetType::Print(std::ostream& os, // NOLINT
+ bitset bits) {
+ DisallowHeapAllocation no_allocation;
+ const char* name = Name(bits);
+ if (name != NULL) {
+ os << name;
+ return;
+ }
+
+ // clang-format off
+ static const bitset named_bitsets[] = {
+#define BITSET_CONSTANT(type, value) k##type,
+ INTERNAL_BITSET_TYPE_LIST(BITSET_CONSTANT)
+ PROPER_BITSET_TYPE_LIST(BITSET_CONSTANT)
+#undef BITSET_CONSTANT
+ };
+ // clang-format on
+
+ bool is_first = true;
+ os << "(";
+ for (int i(arraysize(named_bitsets) - 1); bits != 0 && i >= 0; --i) {
+ bitset subset = named_bitsets[i];
+ if ((bits & subset) == subset) {
+ if (!is_first) os << " | ";
+ is_first = false;
+ os << Name(subset);
+ bits -= subset;
+ }
+ }
+ DCHECK(bits == 0);
+ os << ")";
+}
+
+void Type::PrintTo(std::ostream& os) {
+ DisallowHeapAllocation no_allocation;
+ if (this->IsBitset()) {
+ BitsetType::Print(os, this->AsBitset());
+ } else if (this->IsConstant()) {
+ os << "Constant(" << Brief(*this->AsConstant()->Value()) << ")";
+ } else if (this->IsRange()) {
+ std::ostream::fmtflags saved_flags = os.setf(std::ios::fixed);
+ std::streamsize saved_precision = os.precision(0);
+ os << "Range(" << this->AsRange()->Min() << ", " << this->AsRange()->Max()
+ << ")";
+ os.flags(saved_flags);
+ os.precision(saved_precision);
+ } else if (this->IsUnion()) {
+ os << "(";
+ for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
+ Type* type_i = this->AsUnion()->Get(i);
+ if (i > 0) os << " | ";
+ type_i->PrintTo(os);
+ }
+ os << ")";
+ } else if (this->IsTuple()) {
+ os << "<";
+ for (int i = 0, n = this->AsTuple()->Arity(); i < n; ++i) {
+ Type* type_i = this->AsTuple()->Element(i);
+ if (i > 0) os << ", ";
+ type_i->PrintTo(os);
+ }
+ os << ">";
+ } else {
+ UNREACHABLE();
+ }
+}
+
+#ifdef DEBUG
+void Type::Print() {
+ OFStream os(stdout);
+ PrintTo(os);
+ os << std::endl;
+}
+void BitsetType::Print(bitset bits) {
+ OFStream os(stdout);
+ Print(os, bits);
+ os << std::endl;
+}
+#endif
+
+BitsetType::bitset BitsetType::SignedSmall() {
+ return i::SmiValuesAre31Bits() ? kSigned31 : kSigned32;
+}
+
+BitsetType::bitset BitsetType::UnsignedSmall() {
+ return i::SmiValuesAre31Bits() ? kUnsigned30 : kUnsigned31;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/types.h b/deps/v8/src/compiler/types.h
index 746cca764e..ef5bec3f9d 100644
--- a/deps/v8/src/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_TYPES_H_
-#define V8_TYPES_H_
+#ifndef V8_COMPILER_TYPES_H_
+#define V8_COMPILER_TYPES_H_
#include "src/conversions.h"
#include "src/handles.h"
@@ -12,6 +12,7 @@
namespace v8 {
namespace internal {
+namespace compiler {
// SUMMARY
//
@@ -21,13 +22,7 @@ namespace internal {
// can express class types (a.k.a. specific maps) and singleton types (i.e.,
// concrete constants).
//
-// Types consist of two dimensions: semantic (value range) and representation.
-// Both are related through subtyping.
-//
-//
-// SEMANTIC DIMENSION
-//
-// The following equations and inequations hold for the semantic axis:
+// The following equations and inequations hold:
//
// None <= T
// T <= Any
@@ -39,57 +34,10 @@ namespace internal {
// InternalizedString < String
//
// Receiver = Object \/ Proxy
-// Array < Object
-// Function < Object
-// RegExp < Object
// OtherUndetectable < Object
// DetectableReceiver = Receiver - OtherUndetectable
//
-// Class(map) < T iff instance_type(map) < T
// Constant(x) < T iff instance_type(map(x)) < T
-// Array(T) < Array
-// Function(R, S, T0, T1, ...) < Function
-// Context(T) < Internal
-//
-// Both structural Array and Function types are invariant in all parameters;
-// relaxing this would make Union and Intersect operations more involved.
-// There is no subtyping relation between Array, Function, or Context types
-// and respective Constant types, since these types cannot be reconstructed
-// for arbitrary heap values.
-// Note also that Constant(x) < Class(map(x)) does _not_ hold, since x's map can
-// change! (Its instance type cannot, however.)
-// TODO(rossberg): the latter is not currently true for proxies, because of fix,
-// but will hold once we implement direct proxies.
-// However, we also define a 'temporal' variant of the subtyping relation that
-// considers the _current_ state only, i.e., Constant(x) <_now Class(map(x)).
-//
-//
-// REPRESENTATIONAL DIMENSION
-//
-// For the representation axis, the following holds:
-//
-// None <= R
-// R <= Any
-//
-// UntaggedInt = UntaggedInt1 \/ UntaggedInt8 \/
-// UntaggedInt16 \/ UntaggedInt32
-// UntaggedFloat = UntaggedFloat32 \/ UntaggedFloat64
-// UntaggedNumber = UntaggedInt \/ UntaggedFloat
-// Untagged = UntaggedNumber \/ UntaggedPtr
-// Tagged = TaggedInt \/ TaggedPtr
-//
-// Subtyping relates the two dimensions, for example:
-//
-// Number <= Tagged \/ UntaggedNumber
-// Object <= TaggedPtr \/ UntaggedPtr
-//
-// That holds because the semantic type constructors defined by the API create
-// types that allow for all possible representations, and dually, the ones for
-// representation types initially include all semantic ranges. Representations
-// can then e.g. be narrowed for a given semantic type using intersection:
-//
-// SignedSmall /\ TaggedInt (a 'smi')
-// Number /\ TaggedPtr (a heap number)
//
//
// RANGE TYPES
@@ -140,67 +88,39 @@ namespace internal {
// IMPLEMENTATION
//
// Internally, all 'primitive' types, and their unions, are represented as
-// bitsets. Bit 0 is reserved for tagging. Class is a heap pointer to the
-// respective map. Only structured types require allocation.
-// Note that the bitset representation is closed under both Union and Intersect.
-
+// bitsets. Bit 0 is reserved for tagging. Only structured types require
+// allocation.
// -----------------------------------------------------------------------------
// Values for bitset types
// clang-format off
-#define MASK_BITSET_TYPE_LIST(V) \
- V(Representation, 0xffc00000u) \
- V(Semantic, 0x003ffffeu)
-
-#define REPRESENTATION(k) ((k) & BitsetType::kRepresentation)
-#define SEMANTIC(k) ((k) & BitsetType::kSemantic)
-
-#define REPRESENTATION_BITSET_TYPE_LIST(V) \
- V(None, 0) \
- V(UntaggedBit, 1u << 22 | kSemantic) \
- V(UntaggedIntegral8, 1u << 23 | kSemantic) \
- V(UntaggedIntegral16, 1u << 24 | kSemantic) \
- V(UntaggedIntegral32, 1u << 25 | kSemantic) \
- V(UntaggedFloat32, 1u << 26 | kSemantic) \
- V(UntaggedFloat64, 1u << 27 | kSemantic) \
- V(UntaggedSimd128, 1u << 28 | kSemantic) \
- V(UntaggedPointer, 1u << 29 | kSemantic) \
- V(TaggedSigned, 1u << 30 | kSemantic) \
- V(TaggedPointer, 1u << 31 | kSemantic) \
- \
- V(UntaggedIntegral, kUntaggedBit | kUntaggedIntegral8 | \
- kUntaggedIntegral16 | kUntaggedIntegral32) \
- V(UntaggedFloat, kUntaggedFloat32 | kUntaggedFloat64) \
- V(UntaggedNumber, kUntaggedIntegral | kUntaggedFloat) \
- V(Untagged, kUntaggedNumber | kUntaggedPointer) \
- V(Tagged, kTaggedSigned | kTaggedPointer)
-
#define INTERNAL_BITSET_TYPE_LIST(V) \
- V(OtherUnsigned31, 1u << 1 | REPRESENTATION(kTagged | kUntaggedNumber)) \
- V(OtherUnsigned32, 1u << 2 | REPRESENTATION(kTagged | kUntaggedNumber)) \
- V(OtherSigned32, 1u << 3 | REPRESENTATION(kTagged | kUntaggedNumber)) \
- V(OtherNumber, 1u << 4 | REPRESENTATION(kTagged | kUntaggedNumber))
-
-#define SEMANTIC_BITSET_TYPE_LIST(V) \
- V(Negative31, 1u << 5 | REPRESENTATION(kTagged | kUntaggedNumber)) \
- V(Null, 1u << 6 | REPRESENTATION(kTaggedPointer)) \
- V(Undefined, 1u << 7 | REPRESENTATION(kTaggedPointer)) \
- V(Boolean, 1u << 8 | REPRESENTATION(kTaggedPointer)) \
- V(Unsigned30, 1u << 9 | REPRESENTATION(kTagged | kUntaggedNumber)) \
- V(MinusZero, 1u << 10 | REPRESENTATION(kTagged | kUntaggedNumber)) \
- V(NaN, 1u << 11 | REPRESENTATION(kTagged | kUntaggedNumber)) \
- V(Symbol, 1u << 12 | REPRESENTATION(kTaggedPointer)) \
- V(InternalizedString, 1u << 13 | REPRESENTATION(kTaggedPointer)) \
- V(OtherString, 1u << 14 | REPRESENTATION(kTaggedPointer)) \
- V(Simd, 1u << 15 | REPRESENTATION(kTaggedPointer)) \
- V(OtherObject, 1u << 17 | REPRESENTATION(kTaggedPointer)) \
- V(OtherUndetectable, 1u << 16 | REPRESENTATION(kTaggedPointer)) \
- V(Proxy, 1u << 18 | REPRESENTATION(kTaggedPointer)) \
- V(Function, 1u << 19 | REPRESENTATION(kTaggedPointer)) \
- V(Hole, 1u << 20 | REPRESENTATION(kTaggedPointer)) \
- V(OtherInternal, 1u << 21 | REPRESENTATION(kTagged | kUntagged)) \
+ V(OtherUnsigned31, 1u << 1) \
+ V(OtherUnsigned32, 1u << 2) \
+ V(OtherSigned32, 1u << 3) \
+ V(OtherNumber, 1u << 4) \
+
+#define PROPER_BITSET_TYPE_LIST(V) \
+ V(None, 0u) \
+ V(Negative31, 1u << 5) \
+ V(Null, 1u << 6) \
+ V(Undefined, 1u << 7) \
+ V(Boolean, 1u << 8) \
+ V(Unsigned30, 1u << 9) \
+ V(MinusZero, 1u << 10) \
+ V(NaN, 1u << 11) \
+ V(Symbol, 1u << 12) \
+ V(InternalizedString, 1u << 13) \
+ V(OtherString, 1u << 14) \
+ V(Simd, 1u << 15) \
+ V(OtherObject, 1u << 17) \
+ V(OtherUndetectable, 1u << 16) \
+ V(Proxy, 1u << 18) \
+ V(Function, 1u << 19) \
+ V(Hole, 1u << 20) \
+ V(OtherInternal, 1u << 21) \
\
V(Signed31, kUnsigned30 | kNegative31) \
V(Signed32, kSigned31 | kOtherUnsigned31 | kOtherSigned32) \
@@ -261,15 +181,9 @@ namespace internal {
* occur as part of PlainNumber.
*/
-#define PROPER_BITSET_TYPE_LIST(V) \
- REPRESENTATION_BITSET_TYPE_LIST(V) \
- SEMANTIC_BITSET_TYPE_LIST(V)
-
-#define BITSET_TYPE_LIST(V) \
- MASK_BITSET_TYPE_LIST(V) \
- REPRESENTATION_BITSET_TYPE_LIST(V) \
- INTERNAL_BITSET_TYPE_LIST(V) \
- SEMANTIC_BITSET_TYPE_LIST(V)
+#define BITSET_TYPE_LIST(V) \
+ INTERNAL_BITSET_TYPE_LIST(V) \
+ PROPER_BITSET_TYPE_LIST(V)
class Type;
@@ -294,13 +208,7 @@ class BitsetType {
return static_cast<bitset>(reinterpret_cast<uintptr_t>(this) ^ 1u);
}
- static bool IsInhabited(bitset bits) {
- return SEMANTIC(bits) != kNone && REPRESENTATION(bits) != kNone;
- }
-
- static bool SemanticIsInhabited(bitset bits) {
- return SEMANTIC(bits) != kNone;
- }
+ static bool IsInhabited(bitset bits) { return bits != kNone; }
static bool Is(bitset bits1, bitset bits2) {
return (bits1 | bits2) == bits2;
@@ -355,16 +263,7 @@ class TypeBase {
protected:
friend class Type;
- enum Kind {
- kClass,
- kConstant,
- kContext,
- kArray,
- kFunction,
- kTuple,
- kUnion,
- kRange
- };
+ enum Kind { kConstant, kTuple, kUnion, kRange };
Kind kind() const { return kind_; }
explicit TypeBase(Kind kind) : kind_(kind) {}
@@ -386,36 +285,6 @@ class TypeBase {
};
// -----------------------------------------------------------------------------
-// Class types.
-
-class ClassType : public TypeBase {
- public:
- i::Handle<i::Map> Map() { return map_; }
-
- private:
- friend class Type;
- friend class BitsetType;
-
- static Type* New(i::Handle<i::Map> map, Zone* zone) {
- return AsType(new (zone->New(sizeof(ClassType)))
- ClassType(BitsetType::Lub(*map), map));
- }
-
- static ClassType* cast(Type* type) {
- DCHECK(IsKind(type, kClass));
- return static_cast<ClassType*>(FromType(type));
- }
-
- ClassType(BitsetType::bitset bitset, i::Handle<i::Map> map)
- : TypeBase(kClass), bitset_(bitset), map_(map) {}
-
- BitsetType::bitset Lub() { return bitset_; }
-
- BitsetType::bitset bitset_;
- Handle<i::Map> map_;
-};
-
-// -----------------------------------------------------------------------------
// Constant types.
class ConstantType : public TypeBase {
@@ -446,7 +315,6 @@ class ConstantType : public TypeBase {
Handle<i::Object> object_;
};
// TODO(neis): Also cache value if numerical.
-// TODO(neis): Allow restricting the representation.
// -----------------------------------------------------------------------------
// Range types.
@@ -472,21 +340,18 @@ class RangeType : public TypeBase {
friend class BitsetType;
friend class UnionType;
- static Type* New(double min, double max, BitsetType::bitset representation,
- Zone* zone) {
- return New(Limits(min, max), representation, zone);
+ static Type* New(double min, double max, Zone* zone) {
+ return New(Limits(min, max), zone);
}
static bool IsInteger(double x) {
return nearbyint(x) == x && !i::IsMinusZero(x); // Allows for infinities.
}
- static Type* New(Limits lim, BitsetType::bitset representation, Zone* zone) {
+ static Type* New(Limits lim, Zone* zone) {
DCHECK(IsInteger(lim.min) && IsInteger(lim.max));
DCHECK(lim.min <= lim.max);
- DCHECK(REPRESENTATION(representation) == representation);
- BitsetType::bitset bits =
- SEMANTIC(BitsetType::Lub(lim.min, lim.max)) | representation;
+ BitsetType::bitset bits = BitsetType::Lub(lim.min, lim.max);
return AsType(new (zone->New(sizeof(RangeType))) RangeType(bits, lim));
}
@@ -506,54 +371,6 @@ class RangeType : public TypeBase {
};
// -----------------------------------------------------------------------------
-// Context types.
-
-class ContextType : public TypeBase {
- public:
- Type* Outer() { return outer_; }
-
- private:
- friend class Type;
-
- static Type* New(Type* outer, Zone* zone) {
- return AsType(new (zone->New(sizeof(ContextType))) ContextType(outer));
- }
-
- static ContextType* cast(Type* type) {
- DCHECK(IsKind(type, kContext));
- return static_cast<ContextType*>(FromType(type));
- }
-
- explicit ContextType(Type* outer) : TypeBase(kContext), outer_(outer) {}
-
- Type* outer_;
-};
-
-// -----------------------------------------------------------------------------
-// Array types.
-
-class ArrayType : public TypeBase {
- public:
- Type* Element() { return element_; }
-
- private:
- friend class Type;
-
- explicit ArrayType(Type* element) : TypeBase(kArray), element_(element) {}
-
- static Type* New(Type* element, Zone* zone) {
- return AsType(new (zone->New(sizeof(ArrayType))) ArrayType(element));
- }
-
- static ArrayType* cast(Type* type) {
- DCHECK(IsKind(type, kArray));
- return static_cast<ArrayType*>(FromType(type));
- }
-
- Type* element_;
-};
-
-// -----------------------------------------------------------------------------
// Superclass for types with variable number of type fields.
class StructuralType : public TypeBase {
public:
@@ -590,38 +407,6 @@ class StructuralType : public TypeBase {
};
// -----------------------------------------------------------------------------
-// Function types.
-
-class FunctionType : public StructuralType {
- public:
- int Arity() { return this->Length() - 2; }
- Type* Result() { return this->Get(0); }
- Type* Receiver() { return this->Get(1); }
- Type* Parameter(int i) { return this->Get(2 + i); }
-
- void InitParameter(int i, Type* type) { this->Set(2 + i, type); }
-
- private:
- friend class Type;
-
- FunctionType(Type* result, Type* receiver, int arity, Zone* zone)
- : StructuralType(kFunction, 2 + arity, zone) {
- Set(0, result);
- Set(1, receiver);
- }
-
- static Type* New(Type* result, Type* receiver, int arity, Zone* zone) {
- return AsType(new (zone->New(sizeof(FunctionType)))
- FunctionType(result, receiver, arity, zone));
- }
-
- static FunctionType* cast(Type* type) {
- DCHECK(IsKind(type, kFunction));
- return static_cast<FunctionType*>(FromType(type));
- }
-};
-
-// -----------------------------------------------------------------------------
// Tuple types.
class TupleType : public StructuralType {
@@ -689,54 +474,11 @@ class Type {
return BitsetType::New(BitsetType::UnsignedSmall());
}
- static Type* Class(i::Handle<i::Map> map, Zone* zone) {
- return ClassType::New(map, zone);
- }
static Type* Constant(i::Handle<i::Object> value, Zone* zone) {
return ConstantType::New(value, zone);
}
static Type* Range(double min, double max, Zone* zone) {
- return RangeType::New(min, max, REPRESENTATION(BitsetType::kTagged |
- BitsetType::kUntaggedNumber),
- zone);
- }
- static Type* Context(Type* outer, Zone* zone) {
- return ContextType::New(outer, zone);
- }
- static Type* Array(Type* element, Zone* zone) {
- return ArrayType::New(element, zone);
- }
- static Type* Function(Type* result, Type* receiver, int arity, Zone* zone) {
- return FunctionType::New(result, receiver, arity, zone);
- }
- static Type* Function(Type* result, Zone* zone) {
- return Function(result, Any(), 0, zone);
- }
- static Type* Function(Type* result, Type* param0, Zone* zone) {
- Type* function = Function(result, Any(), 1, zone);
- function->AsFunction()->InitParameter(0, param0);
- return function;
- }
- static Type* Function(Type* result, Type* param0, Type* param1, Zone* zone) {
- Type* function = Function(result, Any(), 2, zone);
- function->AsFunction()->InitParameter(0, param0);
- function->AsFunction()->InitParameter(1, param1);
- return function;
- }
- static Type* Function(Type* result, Type* param0, Type* param1, Type* param2,
- Zone* zone) {
- Type* function = Function(result, Any(), 3, zone);
- function->AsFunction()->InitParameter(0, param0);
- function->AsFunction()->InitParameter(1, param1);
- function->AsFunction()->InitParameter(2, param2);
- return function;
- }
- static Type* Function(Type* result, int arity, Type** params, Zone* zone) {
- Type* function = Function(result, Any(), arity, zone);
- for (int i = 0; i < arity; ++i) {
- function->AsFunction()->InitParameter(i, params[i]);
- }
- return function;
+ return RangeType::New(min, max, zone);
}
static Type* Tuple(Type* first, Type* second, Type* third, Zone* zone) {
Type* tuple = TupleType::New(3, zone);
@@ -746,11 +488,6 @@ class Type {
return tuple;
}
-#define CONSTRUCT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
- static Type* Name(Isolate* isolate, Zone* zone);
- SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
-#undef CONSTRUCT_SIMD_TYPE
-
static Type* Union(Type* type1, Type* type2, Zone* zone);
static Type* Intersect(Type* type1, Type* type2, Zone* zone);
@@ -764,9 +501,10 @@ class Type {
return Of(*value, zone);
}
- // Extraction of components.
- static Type* Representation(Type* t, Zone* zone);
- static Type* Semantic(Type* t, Zone* zone);
+ static Type* For(i::Map* map) {
+ return BitsetType::New(BitsetType::ExpandInternals(BitsetType::Lub(map)));
+ }
+ static Type* For(i::Handle<i::Map> map) { return For(*map); }
// Predicates.
bool IsInhabited() { return BitsetType::IsInhabited(this->BitsetLub()); }
@@ -775,37 +513,13 @@ class Type {
bool Maybe(Type* that);
bool Equals(Type* that) { return this->Is(that) && that->Is(this); }
- // Equivalent to Constant(val)->Is(this), but avoiding allocation.
- bool Contains(i::Object* val);
- bool Contains(i::Handle<i::Object> val) { return this->Contains(*val); }
-
- // State-dependent versions of the above that consider subtyping between
- // a constant and its map class.
- static Type* NowOf(i::Object* value, Zone* zone);
- static Type* NowOf(i::Handle<i::Object> value, Zone* zone) {
- return NowOf(*value, zone);
- }
- bool NowIs(Type* that);
- bool NowContains(i::Object* val);
- bool NowContains(i::Handle<i::Object> val) { return this->NowContains(*val); }
-
- bool NowStable();
-
// Inspection.
bool IsRange() { return IsKind(TypeBase::kRange); }
- bool IsClass() { return IsKind(TypeBase::kClass); }
bool IsConstant() { return IsKind(TypeBase::kConstant); }
- bool IsContext() { return IsKind(TypeBase::kContext); }
- bool IsArray() { return IsKind(TypeBase::kArray); }
- bool IsFunction() { return IsKind(TypeBase::kFunction); }
bool IsTuple() { return IsKind(TypeBase::kTuple); }
- ClassType* AsClass() { return ClassType::cast(this); }
ConstantType* AsConstant() { return ConstantType::cast(this); }
RangeType* AsRange() { return RangeType::cast(this); }
- ContextType* AsContext() { return ContextType::cast(this); }
- ArrayType* AsArray() { return ArrayType::cast(this); }
- FunctionType* AsFunction() { return FunctionType::cast(this); }
TupleType* AsTuple() { return TupleType::cast(this); }
// Minimum and maximum of a numeric type.
@@ -824,43 +538,11 @@ class Type {
return nearbyint(x) == x && !i::IsMinusZero(x); // Allows for infinities.
}
- int NumClasses();
int NumConstants();
- template <class T>
- class Iterator {
- public:
- bool Done() const { return index_ < 0; }
- i::Handle<T> Current();
- void Advance();
-
- private:
- friend class Type;
-
- Iterator() : index_(-1) {}
- explicit Iterator(Type* type) : type_(type), index_(-1) { Advance(); }
-
- inline bool matches(Type* type);
- inline Type* get_type();
-
- Type* type_;
- int index_;
- };
-
- Iterator<i::Map> Classes() {
- if (this->IsBitset()) return Iterator<i::Map>();
- return Iterator<i::Map>(this);
- }
- Iterator<i::Object> Constants() {
- if (this->IsBitset()) return Iterator<i::Object>();
- return Iterator<i::Object>(this);
- }
-
// Printing.
- enum PrintDimension { BOTH_DIMS, SEMANTIC_DIM, REPRESENTATION_DIM };
-
- void PrintTo(std::ostream& os, PrintDimension dim = BOTH_DIMS); // NOLINT
+ void PrintTo(std::ostream& os);
#ifdef DEBUG
void Print();
@@ -893,16 +575,10 @@ class Type {
}
UnionType* AsUnion() { return UnionType::cast(this); }
- bitset Representation();
-
- // Auxiliary functions.
- bool SemanticMaybe(Type* that);
-
bitset BitsetGlb() { return BitsetType::Glb(this); }
bitset BitsetLub() { return BitsetType::Lub(this); }
bool SlowIs(Type* that);
- bool SemanticIs(Type* that);
static bool Overlap(RangeType* lhs, RangeType* rhs);
static bool Contains(RangeType* lhs, RangeType* rhs);
@@ -924,59 +600,8 @@ class Type {
static Type* NormalizeRangeAndBitset(Type* range, bitset* bits, Zone* zone);
};
-// -----------------------------------------------------------------------------
-// Type bounds. A simple struct to represent a pair of lower/upper types.
-
-struct Bounds {
- Type* lower;
- Type* upper;
-
- Bounds()
- : // Make sure accessing uninitialized bounds crashes big-time.
- lower(nullptr),
- upper(nullptr) {}
- explicit Bounds(Type* t) : lower(t), upper(t) {}
- Bounds(Type* l, Type* u) : lower(l), upper(u) { DCHECK(lower->Is(upper)); }
-
- // Unrestricted bounds.
- static Bounds Unbounded() { return Bounds(Type::None(), Type::Any()); }
-
- // Meet: both b1 and b2 are known to hold.
- static Bounds Both(Bounds b1, Bounds b2, Zone* zone) {
- Type* lower = Type::Union(b1.lower, b2.lower, zone);
- Type* upper = Type::Intersect(b1.upper, b2.upper, zone);
- // Lower bounds are considered approximate, correct as necessary.
- if (!lower->Is(upper)) lower = upper;
- return Bounds(lower, upper);
- }
-
- // Join: either b1 or b2 is known to hold.
- static Bounds Either(Bounds b1, Bounds b2, Zone* zone) {
- Type* lower = Type::Intersect(b1.lower, b2.lower, zone);
- Type* upper = Type::Union(b1.upper, b2.upper, zone);
- return Bounds(lower, upper);
- }
-
- static Bounds NarrowLower(Bounds b, Type* t, Zone* zone) {
- Type* lower = Type::Union(b.lower, t, zone);
- // Lower bounds are considered approximate, correct as necessary.
- if (!lower->Is(b.upper)) lower = b.upper;
- return Bounds(lower, b.upper);
- }
- static Bounds NarrowUpper(Bounds b, Type* t, Zone* zone) {
- Type* lower = b.lower;
- Type* upper = Type::Intersect(b.upper, t, zone);
- // Lower bounds are considered approximate, correct as necessary.
- if (!lower->Is(upper)) lower = upper;
- return Bounds(lower, upper);
- }
-
- bool Narrows(Bounds that) {
- return that.lower->Is(this->lower) && this->upper->Is(that.upper);
- }
-};
-
+} // namespace compiler
} // namespace internal
} // namespace v8
-#endif // V8_TYPES_H_
+#endif // V8_COMPILER_TYPES_H_
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index eb42b39569..b9faeeedd4 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -48,7 +48,7 @@ class Verifier::Visitor {
FATAL(str.str().c_str());
}
}
- void CheckUpperIs(Node* node, Type* type) {
+ void CheckTypeIs(Node* node, Type* type) {
if (typing == TYPED && !NodeProperties::GetType(node)->Is(type)) {
std::ostringstream str;
str << "TypeError: node #" << node->id() << ":" << *node->op()
@@ -59,7 +59,7 @@ class Verifier::Visitor {
FATAL(str.str().c_str());
}
}
- void CheckUpperMaybe(Node* node, Type* type) {
+ void CheckTypeMaybe(Node* node, Type* type) {
if (typing == TYPED && !NodeProperties::GetType(node)->Maybe(type)) {
std::ostringstream str;
str << "TypeError: node #" << node->id() << ":" << *node->op()
@@ -181,7 +181,7 @@ void Verifier::Visitor::Check(Node* node) {
CHECK_EQ(0, input_count);
// Type is a tuple.
// TODO(rossberg): Multiple outputs are currently typed as Internal.
- CheckUpperIs(node, Type::Internal());
+ CheckTypeIs(node, Type::Internal());
break;
case IrOpcode::kEnd:
// End has no outputs.
@@ -230,7 +230,7 @@ void Verifier::Visitor::Check(Node* node) {
Node* input = NodeProperties::GetControlInput(node, 0);
CHECK(!input->op()->HasProperty(Operator::kNoThrow));
// Type can be anything.
- CheckUpperIs(node, Type::Any());
+ CheckTypeIs(node, Type::Any());
break;
}
case IrOpcode::kSwitch: {
@@ -330,21 +330,21 @@ void Verifier::Visitor::Check(Node* node) {
CHECK_LE(-1, index);
CHECK_LT(index + 1, start->op()->ValueOutputCount());
// Type can be anything.
- CheckUpperIs(node, Type::Any());
+ CheckTypeIs(node, Type::Any());
break;
}
case IrOpcode::kInt32Constant: // TODO(rossberg): rename Word32Constant?
// Constants have no inputs.
CHECK_EQ(0, input_count);
// Type is a 32 bit integer, signed or unsigned.
- CheckUpperIs(node, Type::Integral32());
+ CheckTypeIs(node, Type::Integral32());
break;
case IrOpcode::kInt64Constant:
// Constants have no inputs.
CHECK_EQ(0, input_count);
// Type is internal.
// TODO(rossberg): Introduce proper Int64 type.
- CheckUpperIs(node, Type::Internal());
+ CheckTypeIs(node, Type::Internal());
break;
case IrOpcode::kFloat32Constant:
case IrOpcode::kFloat64Constant:
@@ -352,7 +352,7 @@ void Verifier::Visitor::Check(Node* node) {
// Constants have no inputs.
CHECK_EQ(0, input_count);
// Type is a number.
- CheckUpperIs(node, Type::Number());
+ CheckTypeIs(node, Type::Number());
break;
case IrOpcode::kRelocatableInt32Constant:
case IrOpcode::kRelocatableInt64Constant:
@@ -361,21 +361,19 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kHeapConstant:
// Constants have no inputs.
CHECK_EQ(0, input_count);
- // Type can be anything represented as a heap pointer.
- CheckUpperIs(node, Type::TaggedPointer());
break;
case IrOpcode::kExternalConstant:
// Constants have no inputs.
CHECK_EQ(0, input_count);
// Type is considered internal.
- CheckUpperIs(node, Type::Internal());
+ CheckTypeIs(node, Type::Internal());
break;
case IrOpcode::kOsrValue:
// OSR values have a value and a control input.
CHECK_EQ(1, control_count);
CHECK_EQ(1, input_count);
// Type is merged from other values in the graph and could be any.
- CheckUpperIs(node, Type::Any());
+ CheckTypeIs(node, Type::Any());
break;
case IrOpcode::kProjection: {
// Projection has an input that produces enough values.
@@ -385,7 +383,7 @@ void Verifier::Visitor::Check(Node* node) {
// Type can be anything.
// TODO(rossberg): Introduce tuple types for this.
// TODO(titzer): Convince rossberg not to.
- CheckUpperIs(node, Type::Any());
+ CheckTypeIs(node, Type::Any());
break;
}
case IrOpcode::kSelect: {
@@ -495,7 +493,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kJSLessThanOrEqual:
case IrOpcode::kJSGreaterThanOrEqual:
// Type is Boolean.
- CheckUpperIs(node, Type::Boolean());
+ CheckTypeIs(node, Type::Boolean());
break;
case IrOpcode::kJSBitwiseOr:
@@ -505,80 +503,80 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kJSShiftRight:
case IrOpcode::kJSShiftRightLogical:
// Type is 32 bit integral.
- CheckUpperIs(node, Type::Integral32());
+ CheckTypeIs(node, Type::Integral32());
break;
case IrOpcode::kJSAdd:
// Type is Number or String.
- CheckUpperIs(node, Type::NumberOrString());
+ CheckTypeIs(node, Type::NumberOrString());
break;
case IrOpcode::kJSSubtract:
case IrOpcode::kJSMultiply:
case IrOpcode::kJSDivide:
case IrOpcode::kJSModulus:
// Type is Number.
- CheckUpperIs(node, Type::Number());
+ CheckTypeIs(node, Type::Number());
break;
case IrOpcode::kJSToBoolean:
// Type is Boolean.
- CheckUpperIs(node, Type::Boolean());
+ CheckTypeIs(node, Type::Boolean());
break;
case IrOpcode::kJSToInteger:
// Type is OrderedNumber.
- CheckUpperIs(node, Type::OrderedNumber());
+ CheckTypeIs(node, Type::OrderedNumber());
break;
case IrOpcode::kJSToLength:
// Type is OrderedNumber.
- CheckUpperIs(node, Type::OrderedNumber());
+ CheckTypeIs(node, Type::OrderedNumber());
break;
case IrOpcode::kJSToName:
// Type is Name.
- CheckUpperIs(node, Type::Name());
+ CheckTypeIs(node, Type::Name());
break;
case IrOpcode::kJSToNumber:
// Type is Number.
- CheckUpperIs(node, Type::Number());
+ CheckTypeIs(node, Type::Number());
break;
case IrOpcode::kJSToString:
// Type is String.
- CheckUpperIs(node, Type::String());
+ CheckTypeIs(node, Type::String());
break;
case IrOpcode::kJSToObject:
// Type is Receiver.
- CheckUpperIs(node, Type::Receiver());
+ CheckTypeIs(node, Type::Receiver());
break;
case IrOpcode::kJSCreate:
// Type is Object.
- CheckUpperIs(node, Type::Object());
+ CheckTypeIs(node, Type::Object());
break;
case IrOpcode::kJSCreateArguments:
// Type is OtherObject.
- CheckUpperIs(node, Type::OtherObject());
+ CheckTypeIs(node, Type::OtherObject());
break;
case IrOpcode::kJSCreateArray:
// Type is OtherObject.
- CheckUpperIs(node, Type::OtherObject());
+ CheckTypeIs(node, Type::OtherObject());
break;
case IrOpcode::kJSCreateClosure:
// Type is Function.
- CheckUpperIs(node, Type::Function());
+ CheckTypeIs(node, Type::Function());
break;
case IrOpcode::kJSCreateIterResultObject:
// Type is OtherObject.
- CheckUpperIs(node, Type::OtherObject());
+ CheckTypeIs(node, Type::OtherObject());
break;
case IrOpcode::kJSCreateLiteralArray:
case IrOpcode::kJSCreateLiteralObject:
case IrOpcode::kJSCreateLiteralRegExp:
// Type is OtherObject.
- CheckUpperIs(node, Type::OtherObject());
+ CheckTypeIs(node, Type::OtherObject());
break;
case IrOpcode::kJSLoadProperty:
case IrOpcode::kJSLoadNamed:
case IrOpcode::kJSLoadGlobal:
// Type can be anything.
- CheckUpperIs(node, Type::Any());
+ CheckTypeIs(node, Type::Any());
break;
case IrOpcode::kJSStoreProperty:
case IrOpcode::kJSStoreNamed:
@@ -589,17 +587,18 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kJSDeleteProperty:
case IrOpcode::kJSHasProperty:
case IrOpcode::kJSInstanceOf:
+ case IrOpcode::kJSOrdinaryHasInstance:
// Type is Boolean.
- CheckUpperIs(node, Type::Boolean());
+ CheckTypeIs(node, Type::Boolean());
break;
case IrOpcode::kJSTypeOf:
// Type is String.
- CheckUpperIs(node, Type::String());
+ CheckTypeIs(node, Type::String());
break;
case IrOpcode::kJSLoadContext:
// Type can be anything.
- CheckUpperIs(node, Type::Any());
+ CheckTypeIs(node, Type::Any());
break;
case IrOpcode::kJSStoreContext:
// Type is empty.
@@ -612,44 +611,31 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kJSCreateScriptContext: {
// Type is Context, and operand is Internal.
Node* context = NodeProperties::GetContextInput(node);
- // TODO(rossberg): This should really be Is(Internal), but the typer
- // currently can't do backwards propagation.
- CheckUpperMaybe(context, Type::Internal());
- if (typing == TYPED) CHECK(NodeProperties::GetType(node)->IsContext());
+ // TODO(bmeurer): This should say CheckTypeIs, but we don't have type
+ // OtherInternal on certain contexts, i.e. those from OsrValue inputs.
+ CheckTypeMaybe(context, Type::OtherInternal());
+ CheckTypeIs(node, Type::OtherInternal());
break;
}
case IrOpcode::kJSCallConstruct:
case IrOpcode::kJSConvertReceiver:
// Type is Receiver.
- CheckUpperIs(node, Type::Receiver());
+ CheckTypeIs(node, Type::Receiver());
break;
case IrOpcode::kJSCallFunction:
case IrOpcode::kJSCallRuntime:
// Type can be anything.
- CheckUpperIs(node, Type::Any());
+ CheckTypeIs(node, Type::Any());
break;
case IrOpcode::kJSForInPrepare: {
// TODO(bmeurer): What are the constraints on thse?
- CheckUpperIs(node, Type::Any());
- break;
- }
- case IrOpcode::kJSForInDone: {
- // TODO(bmeurer): OSR breaks this invariant, although the node is not user
- // visible, so we know it is safe (fullcodegen has an unsigned smi there).
- // CheckValueInputIs(node, 0, Type::UnsignedSmall());
+ CheckTypeIs(node, Type::Any());
break;
}
case IrOpcode::kJSForInNext: {
- CheckUpperIs(node, Type::Union(Type::Name(), Type::Undefined(), zone));
- break;
- }
- case IrOpcode::kJSForInStep: {
- // TODO(bmeurer): OSR breaks this invariant, although the node is not user
- // visible, so we know it is safe (fullcodegen has an unsigned smi there).
- // CheckValueInputIs(node, 0, Type::UnsignedSmall());
- CheckUpperIs(node, Type::UnsignedSmall());
+ CheckTypeIs(node, Type::Union(Type::Name(), Type::Undefined(), zone));
break;
}
@@ -662,11 +648,11 @@ void Verifier::Visitor::Check(Node* node) {
break;
case IrOpcode::kJSGeneratorRestoreContinuation:
- CheckUpperIs(node, Type::SignedSmall());
+ CheckTypeIs(node, Type::SignedSmall());
break;
case IrOpcode::kJSGeneratorRestoreRegister:
- CheckUpperIs(node, Type::Any());
+ CheckTypeIs(node, Type::Any());
break;
case IrOpcode::kJSStackCheck:
@@ -686,32 +672,32 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kBooleanNot:
// Boolean -> Boolean
CheckValueInputIs(node, 0, Type::Boolean());
- CheckUpperIs(node, Type::Boolean());
+ CheckTypeIs(node, Type::Boolean());
break;
case IrOpcode::kNumberEqual:
// (Number, Number) -> Boolean
CheckValueInputIs(node, 0, Type::Number());
CheckValueInputIs(node, 1, Type::Number());
- CheckUpperIs(node, Type::Boolean());
+ CheckTypeIs(node, Type::Boolean());
break;
case IrOpcode::kNumberLessThan:
case IrOpcode::kNumberLessThanOrEqual:
// (Number, Number) -> Boolean
CheckValueInputIs(node, 0, Type::Number());
CheckValueInputIs(node, 1, Type::Number());
- CheckUpperIs(node, Type::Boolean());
+ CheckTypeIs(node, Type::Boolean());
break;
case IrOpcode::kSpeculativeNumberAdd:
case IrOpcode::kSpeculativeNumberSubtract:
case IrOpcode::kSpeculativeNumberMultiply:
case IrOpcode::kSpeculativeNumberDivide:
case IrOpcode::kSpeculativeNumberModulus:
- CheckUpperIs(node, Type::Number());
+ CheckTypeIs(node, Type::Number());
break;
case IrOpcode::kSpeculativeNumberEqual:
case IrOpcode::kSpeculativeNumberLessThan:
case IrOpcode::kSpeculativeNumberLessThanOrEqual:
- CheckUpperIs(node, Type::Boolean());
+ CheckTypeIs(node, Type::Boolean());
break;
case IrOpcode::kNumberAdd:
case IrOpcode::kNumberSubtract:
@@ -720,13 +706,13 @@ void Verifier::Visitor::Check(Node* node) {
// (Number, Number) -> Number
CheckValueInputIs(node, 0, Type::Number());
CheckValueInputIs(node, 1, Type::Number());
- CheckUpperIs(node, Type::Number());
+ CheckTypeIs(node, Type::Number());
break;
case IrOpcode::kNumberModulus:
// (Number, Number) -> Number
CheckValueInputIs(node, 0, Type::Number());
CheckValueInputIs(node, 1, Type::Number());
- CheckUpperIs(node, Type::Number());
+ CheckTypeIs(node, Type::Number());
break;
case IrOpcode::kNumberBitwiseOr:
case IrOpcode::kNumberBitwiseXor:
@@ -734,43 +720,43 @@ void Verifier::Visitor::Check(Node* node) {
// (Signed32, Signed32) -> Signed32
CheckValueInputIs(node, 0, Type::Signed32());
CheckValueInputIs(node, 1, Type::Signed32());
- CheckUpperIs(node, Type::Signed32());
+ CheckTypeIs(node, Type::Signed32());
break;
case IrOpcode::kSpeculativeNumberBitwiseOr:
case IrOpcode::kSpeculativeNumberBitwiseXor:
case IrOpcode::kSpeculativeNumberBitwiseAnd:
- CheckUpperIs(node, Type::Signed32());
+ CheckTypeIs(node, Type::Signed32());
break;
case IrOpcode::kNumberShiftLeft:
case IrOpcode::kNumberShiftRight:
// (Signed32, Unsigned32) -> Signed32
CheckValueInputIs(node, 0, Type::Signed32());
CheckValueInputIs(node, 1, Type::Unsigned32());
- CheckUpperIs(node, Type::Signed32());
+ CheckTypeIs(node, Type::Signed32());
break;
case IrOpcode::kSpeculativeNumberShiftLeft:
case IrOpcode::kSpeculativeNumberShiftRight:
- CheckUpperIs(node, Type::Signed32());
+ CheckTypeIs(node, Type::Signed32());
break;
case IrOpcode::kNumberShiftRightLogical:
// (Unsigned32, Unsigned32) -> Unsigned32
CheckValueInputIs(node, 0, Type::Unsigned32());
CheckValueInputIs(node, 1, Type::Unsigned32());
- CheckUpperIs(node, Type::Unsigned32());
+ CheckTypeIs(node, Type::Unsigned32());
break;
case IrOpcode::kSpeculativeNumberShiftRightLogical:
- CheckUpperIs(node, Type::Unsigned32());
+ CheckTypeIs(node, Type::Unsigned32());
break;
case IrOpcode::kNumberImul:
// (Unsigned32, Unsigned32) -> Signed32
CheckValueInputIs(node, 0, Type::Unsigned32());
CheckValueInputIs(node, 1, Type::Unsigned32());
- CheckUpperIs(node, Type::Signed32());
+ CheckTypeIs(node, Type::Signed32());
break;
case IrOpcode::kNumberClz32:
// Unsigned32 -> Unsigned32
CheckValueInputIs(node, 0, Type::Unsigned32());
- CheckUpperIs(node, Type::Unsigned32());
+ CheckTypeIs(node, Type::Unsigned32());
break;
case IrOpcode::kNumberAtan2:
case IrOpcode::kNumberMax:
@@ -779,7 +765,7 @@ void Verifier::Visitor::Check(Node* node) {
// (Number, Number) -> Number
CheckValueInputIs(node, 0, Type::Number());
CheckValueInputIs(node, 1, Type::Number());
- CheckUpperIs(node, Type::Number());
+ CheckTypeIs(node, Type::Number());
break;
case IrOpcode::kNumberAbs:
case IrOpcode::kNumberCeil:
@@ -810,32 +796,37 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kNumberTrunc:
// Number -> Number
CheckValueInputIs(node, 0, Type::Number());
- CheckUpperIs(node, Type::Number());
+ CheckTypeIs(node, Type::Number());
+ break;
+ case IrOpcode::kNumberToBoolean:
+ // Number -> Boolean
+ CheckValueInputIs(node, 0, Type::Number());
+ CheckTypeIs(node, Type::Boolean());
break;
case IrOpcode::kNumberToInt32:
// Number -> Signed32
CheckValueInputIs(node, 0, Type::Number());
- CheckUpperIs(node, Type::Signed32());
+ CheckTypeIs(node, Type::Signed32());
break;
case IrOpcode::kNumberToUint32:
// Number -> Unsigned32
CheckValueInputIs(node, 0, Type::Number());
- CheckUpperIs(node, Type::Unsigned32());
+ CheckTypeIs(node, Type::Unsigned32());
break;
case IrOpcode::kPlainPrimitiveToNumber:
// PlainPrimitive -> Number
CheckValueInputIs(node, 0, Type::PlainPrimitive());
- CheckUpperIs(node, Type::Number());
+ CheckTypeIs(node, Type::Number());
break;
case IrOpcode::kPlainPrimitiveToWord32:
// PlainPrimitive -> Integral32
CheckValueInputIs(node, 0, Type::PlainPrimitive());
- CheckUpperIs(node, Type::Integral32());
+ CheckTypeIs(node, Type::Integral32());
break;
case IrOpcode::kPlainPrimitiveToFloat64:
// PlainPrimitive -> Number
CheckValueInputIs(node, 0, Type::PlainPrimitive());
- CheckUpperIs(node, Type::Number());
+ CheckTypeIs(node, Type::Number());
break;
case IrOpcode::kStringEqual:
case IrOpcode::kStringLessThan:
@@ -843,23 +834,28 @@ void Verifier::Visitor::Check(Node* node) {
// (String, String) -> Boolean
CheckValueInputIs(node, 0, Type::String());
CheckValueInputIs(node, 1, Type::String());
- CheckUpperIs(node, Type::Boolean());
+ CheckTypeIs(node, Type::Boolean());
break;
case IrOpcode::kStringCharCodeAt:
// (String, Unsigned32) -> UnsignedSmall
CheckValueInputIs(node, 0, Type::String());
CheckValueInputIs(node, 1, Type::Unsigned32());
- CheckUpperIs(node, Type::UnsignedSmall());
+ CheckTypeIs(node, Type::UnsignedSmall());
break;
case IrOpcode::kStringFromCharCode:
// Number -> String
CheckValueInputIs(node, 0, Type::Number());
- CheckUpperIs(node, Type::String());
+ CheckTypeIs(node, Type::String());
+ break;
+ case IrOpcode::kStringFromCodePoint:
+ // (Unsigned32) -> String
+ CheckValueInputIs(node, 0, Type::Number());
+ CheckTypeIs(node, Type::String());
break;
case IrOpcode::kReferenceEqual: {
// (Unique, Any) -> Boolean and
// (Any, Unique) -> Boolean
- CheckUpperIs(node, Type::Boolean());
+ CheckTypeIs(node, Type::Boolean());
break;
}
case IrOpcode::kObjectIsCallable:
@@ -868,24 +864,24 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kObjectIsSmi:
case IrOpcode::kObjectIsString:
case IrOpcode::kObjectIsUndetectable:
+ case IrOpcode::kArrayBufferWasNeutered:
CheckValueInputIs(node, 0, Type::Any());
- CheckUpperIs(node, Type::Boolean());
+ CheckTypeIs(node, Type::Boolean());
break;
case IrOpcode::kAllocate:
CheckValueInputIs(node, 0, Type::PlainNumber());
- CheckUpperIs(node, Type::TaggedPointer());
break;
case IrOpcode::kEnsureWritableFastElements:
CheckValueInputIs(node, 0, Type::Any());
CheckValueInputIs(node, 1, Type::Internal());
- CheckUpperIs(node, Type::Internal());
+ CheckTypeIs(node, Type::Internal());
break;
case IrOpcode::kMaybeGrowFastElements:
CheckValueInputIs(node, 0, Type::Any());
CheckValueInputIs(node, 1, Type::Internal());
CheckValueInputIs(node, 2, Type::Unsigned31());
CheckValueInputIs(node, 3, Type::Unsigned31());
- CheckUpperIs(node, Type::Internal());
+ CheckTypeIs(node, Type::Internal());
break;
case IrOpcode::kTransitionElementsKind:
CheckValueInputIs(node, 0, Type::Any());
@@ -900,7 +896,7 @@ void Verifier::Visitor::Check(Node* node) {
// Type* from = Type::Intersect(Type::Signed32(), Type::Tagged());
// Type* to = Type::Intersect(Type::Signed32(), Type::UntaggedInt32());
// CheckValueInputIs(node, 0, from));
- // CheckUpperIs(node, to));
+ // CheckTypeIs(node, to));
break;
}
case IrOpcode::kChangeTaggedToInt32: {
@@ -909,7 +905,7 @@ void Verifier::Visitor::Check(Node* node) {
// Type* from = Type::Intersect(Type::Signed32(), Type::Tagged());
// Type* to = Type::Intersect(Type::Signed32(), Type::UntaggedInt32());
// CheckValueInputIs(node, 0, from));
- // CheckUpperIs(node, to));
+ // CheckTypeIs(node, to));
break;
}
case IrOpcode::kChangeTaggedToUint32: {
@@ -918,7 +914,7 @@ void Verifier::Visitor::Check(Node* node) {
// Type* from = Type::Intersect(Type::Unsigned32(), Type::Tagged());
// Type* to =Type::Intersect(Type::Unsigned32(), Type::UntaggedInt32());
// CheckValueInputIs(node, 0, from));
- // CheckUpperIs(node, to));
+ // CheckTypeIs(node, to));
break;
}
case IrOpcode::kChangeTaggedToFloat64: {
@@ -927,7 +923,7 @@ void Verifier::Visitor::Check(Node* node) {
// Type* from = Type::Intersect(Type::Number(), Type::Tagged());
// Type* to = Type::Intersect(Type::Number(), Type::UntaggedFloat64());
// CheckValueInputIs(node, 0, from));
- // CheckUpperIs(node, to));
+ // CheckTypeIs(node, to));
break;
}
case IrOpcode::kTruncateTaggedToFloat64: {
@@ -937,7 +933,7 @@ void Verifier::Visitor::Check(Node* node) {
// Type::Tagged());
// Type* to = Type::Intersect(Type::Number(), Type::UntaggedFloat64());
// CheckValueInputIs(node, 0, from));
- // CheckUpperIs(node, to));
+ // CheckTypeIs(node, to));
break;
}
case IrOpcode::kChangeInt31ToTaggedSigned: {
@@ -946,7 +942,7 @@ void Verifier::Visitor::Check(Node* node) {
// Type* from =Type::Intersect(Type::Signed31(), Type::UntaggedInt32());
// Type* to = Type::Intersect(Type::Signed31(), Type::Tagged());
// CheckValueInputIs(node, 0, from));
- // CheckUpperIs(node, to));
+ // CheckTypeIs(node, to));
break;
}
case IrOpcode::kChangeInt32ToTagged: {
@@ -955,7 +951,7 @@ void Verifier::Visitor::Check(Node* node) {
// Type* from =Type::Intersect(Type::Signed32(), Type::UntaggedInt32());
// Type* to = Type::Intersect(Type::Signed32(), Type::Tagged());
// CheckValueInputIs(node, 0, from));
- // CheckUpperIs(node, to));
+ // CheckTypeIs(node, to));
break;
}
case IrOpcode::kChangeUint32ToTagged: {
@@ -964,7 +960,7 @@ void Verifier::Visitor::Check(Node* node) {
// Type* from=Type::Intersect(Type::Unsigned32(),Type::UntaggedInt32());
// Type* to = Type::Intersect(Type::Unsigned32(), Type::Tagged());
// CheckValueInputIs(node, 0, from));
- // CheckUpperIs(node, to));
+ // CheckTypeIs(node, to));
break;
}
case IrOpcode::kChangeFloat64ToTagged: {
@@ -973,7 +969,7 @@ void Verifier::Visitor::Check(Node* node) {
// Type* from =Type::Intersect(Type::Number(), Type::UntaggedFloat64());
// Type* to = Type::Intersect(Type::Number(), Type::Tagged());
// CheckValueInputIs(node, 0, from));
- // CheckUpperIs(node, to));
+ // CheckTypeIs(node, to));
break;
}
case IrOpcode::kChangeTaggedToBit: {
@@ -982,7 +978,7 @@ void Verifier::Visitor::Check(Node* node) {
// Type* from = Type::Intersect(Type::Boolean(), Type::TaggedPtr());
// Type* to = Type::Intersect(Type::Boolean(), Type::UntaggedInt1());
// CheckValueInputIs(node, 0, from));
- // CheckUpperIs(node, to));
+ // CheckTypeIs(node, to));
break;
}
case IrOpcode::kChangeBitToTagged: {
@@ -991,7 +987,7 @@ void Verifier::Visitor::Check(Node* node) {
// Type* from = Type::Intersect(Type::Boolean(), Type::UntaggedInt1());
// Type* to = Type::Intersect(Type::Boolean(), Type::TaggedPtr());
// CheckValueInputIs(node, 0, from));
- // CheckUpperIs(node, to));
+ // CheckTypeIs(node, to));
break;
}
case IrOpcode::kTruncateTaggedToWord32: {
@@ -1000,21 +996,23 @@ void Verifier::Visitor::Check(Node* node) {
// Type* from = Type::Intersect(Type::Number(), Type::Tagged());
// Type* to = Type::Intersect(Type::Number(), Type::UntaggedInt32());
// CheckValueInputIs(node, 0, from));
- // CheckUpperIs(node, to));
+ // CheckTypeIs(node, to));
break;
}
- case IrOpcode::kImpossibleToWord32:
- case IrOpcode::kImpossibleToWord64:
- case IrOpcode::kImpossibleToFloat32:
- case IrOpcode::kImpossibleToFloat64:
- case IrOpcode::kImpossibleToTagged:
- case IrOpcode::kImpossibleToBit:
+ case IrOpcode::kTruncateTaggedToBit:
break;
case IrOpcode::kCheckBounds:
CheckValueInputIs(node, 0, Type::Any());
CheckValueInputIs(node, 1, Type::Unsigned31());
- CheckUpperIs(node, Type::Unsigned31());
+ CheckTypeIs(node, Type::Unsigned31());
+ break;
+ case IrOpcode::kCheckHeapObject:
+ CheckValueInputIs(node, 0, Type::Any());
+ break;
+ case IrOpcode::kCheckIf:
+ CheckValueInputIs(node, 0, Type::Boolean());
+ CheckNotTyped(node);
break;
case IrOpcode::kCheckMaps:
// (Any, Internal, ..., Internal) -> Any
@@ -1026,23 +1024,14 @@ void Verifier::Visitor::Check(Node* node) {
break;
case IrOpcode::kCheckNumber:
CheckValueInputIs(node, 0, Type::Any());
- CheckUpperIs(node, Type::Number());
+ CheckTypeIs(node, Type::Number());
break;
- case IrOpcode::kCheckString:
+ case IrOpcode::kCheckSmi:
CheckValueInputIs(node, 0, Type::Any());
- CheckUpperIs(node, Type::String());
break;
- case IrOpcode::kCheckIf:
- CheckValueInputIs(node, 0, Type::Boolean());
- CheckNotTyped(node);
- break;
- case IrOpcode::kCheckTaggedSigned:
- CheckValueInputIs(node, 0, Type::Any());
- CheckUpperIs(node, Type::TaggedSigned());
- break;
- case IrOpcode::kCheckTaggedPointer:
+ case IrOpcode::kCheckString:
CheckValueInputIs(node, 0, Type::Any());
- CheckUpperIs(node, Type::TaggedPointer());
+ CheckTypeIs(node, Type::String());
break;
case IrOpcode::kCheckedInt32Add:
@@ -1052,32 +1041,35 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kCheckedUint32Div:
case IrOpcode::kCheckedUint32Mod:
case IrOpcode::kCheckedInt32Mul:
+ case IrOpcode::kCheckedInt32ToTaggedSigned:
case IrOpcode::kCheckedUint32ToInt32:
+ case IrOpcode::kCheckedUint32ToTaggedSigned:
case IrOpcode::kCheckedFloat64ToInt32:
case IrOpcode::kCheckedTaggedSignedToInt32:
case IrOpcode::kCheckedTaggedToInt32:
case IrOpcode::kCheckedTaggedToFloat64:
+ case IrOpcode::kCheckedTaggedToTaggedSigned:
case IrOpcode::kCheckedTruncateTaggedToWord32:
break;
case IrOpcode::kCheckFloat64Hole:
CheckValueInputIs(node, 0, Type::Number());
- CheckUpperIs(node, Type::Number());
+ CheckTypeIs(node, Type::Number());
break;
case IrOpcode::kCheckTaggedHole:
CheckValueInputIs(node, 0, Type::Any());
- CheckUpperIs(node, Type::NonInternal());
+ CheckTypeIs(node, Type::NonInternal());
break;
case IrOpcode::kConvertTaggedHoleToUndefined:
CheckValueInputIs(node, 0, Type::Any());
- CheckUpperIs(node, Type::NonInternal());
+ CheckTypeIs(node, Type::NonInternal());
break;
case IrOpcode::kLoadField:
// Object -> fieldtype
// TODO(rossberg): activate once machine ops are typed.
// CheckValueInputIs(node, 0, Type::Object());
- // CheckUpperIs(node, FieldAccessOf(node->op()).type));
+ // CheckTypeIs(node, FieldAccessOf(node->op()).type));
break;
case IrOpcode::kLoadBuffer:
break;
@@ -1085,7 +1077,7 @@ void Verifier::Visitor::Check(Node* node) {
// Object -> elementtype
// TODO(rossberg): activate once machine ops are typed.
// CheckValueInputIs(node, 0, Type::Object());
- // CheckUpperIs(node, ElementAccessOf(node->op()).type));
+ // CheckTypeIs(node, ElementAccessOf(node->op()).type));
break;
case IrOpcode::kLoadTypedElement:
break;
@@ -1110,15 +1102,16 @@ void Verifier::Visitor::Check(Node* node) {
break;
case IrOpcode::kNumberSilenceNaN:
CheckValueInputIs(node, 0, Type::Number());
- CheckUpperIs(node, Type::Number());
+ CheckTypeIs(node, Type::Number());
break;
case IrOpcode::kTypeGuard:
- CheckUpperIs(node, TypeGuardTypeOf(node->op()));
+ CheckTypeIs(node, TypeGuardTypeOf(node->op()));
break;
// Machine operators
// -----------------------
case IrOpcode::kLoad:
+ case IrOpcode::kProtectedLoad:
case IrOpcode::kStore:
case IrOpcode::kStackSlot:
case IrOpcode::kWord32And:
@@ -1245,7 +1238,9 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kBitcastFloat64ToInt64:
case IrOpcode::kBitcastInt32ToFloat32:
case IrOpcode::kBitcastInt64ToFloat64:
+ case IrOpcode::kBitcastTaggedToWord:
case IrOpcode::kBitcastWordToTagged:
+ case IrOpcode::kBitcastWordToTaggedSigned:
case IrOpcode::kChangeInt32ToInt64:
case IrOpcode::kChangeUint32ToUint64:
case IrOpcode::kChangeInt32ToFloat64:
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index e92a434d1d..b003e9968a 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -189,26 +189,29 @@ class WasmTrapHelper : public ZoneObject {
Node* GetTrapValue(wasm::FunctionSig* sig) {
if (sig->return_count() > 0) {
- switch (sig->GetReturn()) {
- case wasm::kAstI32:
- return jsgraph()->Int32Constant(0xdeadbeef);
- case wasm::kAstI64:
- return jsgraph()->Int64Constant(0xdeadbeefdeadbeef);
- case wasm::kAstF32:
- return jsgraph()->Float32Constant(bit_cast<float>(0xdeadbeef));
- case wasm::kAstF64:
- return jsgraph()->Float64Constant(
- bit_cast<double>(0xdeadbeefdeadbeef));
- break;
- default:
- UNREACHABLE();
- return nullptr;
- }
+ return GetTrapValue(sig->GetReturn());
} else {
return jsgraph()->Int32Constant(0xdeadbeef);
}
}
+ Node* GetTrapValue(wasm::LocalType type) {
+ switch (type) {
+ case wasm::kAstI32:
+ return jsgraph()->Int32Constant(0xdeadbeef);
+ case wasm::kAstI64:
+ return jsgraph()->Int64Constant(0xdeadbeefdeadbeef);
+ case wasm::kAstF32:
+ return jsgraph()->Float32Constant(bit_cast<float>(0xdeadbeef));
+ case wasm::kAstF64:
+ return jsgraph()->Float64Constant(bit_cast<double>(0xdeadbeefdeadbeef));
+ break;
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+ }
+
private:
WasmGraphBuilder* builder_;
JSGraph* jsgraph_;
@@ -334,6 +337,19 @@ bool WasmGraphBuilder::IsPhiWithMerge(Node* phi, Node* merge) {
NodeProperties::GetControlInput(phi) == merge;
}
+bool WasmGraphBuilder::ThrowsException(Node* node, Node** if_success,
+ Node** if_exception) {
+ if (node->op()->HasProperty(compiler::Operator::kNoThrow)) {
+ return false;
+ }
+
+ *if_success = graph()->NewNode(jsgraph()->common()->IfSuccess(), node);
+ *if_exception =
+ graph()->NewNode(jsgraph()->common()->IfException(), node, node);
+
+ return true;
+}
+
void WasmGraphBuilder::AppendToMerge(Node* merge, Node* from) {
DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
merge->AppendInput(jsgraph()->zone(), from);
@@ -932,8 +948,6 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
return BuildI64UConvertF32(input, position);
case wasm::kExprI64UConvertF64:
return BuildI64UConvertF64(input, position);
- case wasm::kExprGrowMemory:
- return BuildGrowMemory(input);
case wasm::kExprI32AsmjsLoadMem8S:
return BuildAsmjsLoadMem(MachineType::Int8(), input);
case wasm::kExprI32AsmjsLoadMem8U:
@@ -995,16 +1009,11 @@ Node* WasmGraphBuilder::Return(unsigned count, Node** vals) {
DCHECK_NOT_NULL(*control_);
DCHECK_NOT_NULL(*effect_);
- if (count == 0) {
- // Handle a return of void.
- vals[0] = jsgraph()->Int32Constant(0);
- count = 1;
- }
-
Node** buf = Realloc(vals, count, count + 2);
buf[count] = *effect_;
buf[count + 1] = *control_;
- Node* ret = graph()->NewNode(jsgraph()->common()->Return(), count + 2, vals);
+ Node* ret =
+ graph()->NewNode(jsgraph()->common()->Return(count), count + 2, vals);
MergeControlToEnd(jsgraph(), ret);
return ret;
@@ -1667,14 +1676,21 @@ Node* WasmGraphBuilder::BuildFloatToIntConversionInstruction(
return load;
}
-Node* WasmGraphBuilder::BuildGrowMemory(Node* input) {
+Node* WasmGraphBuilder::GrowMemory(Node* input) {
+ Diamond check_input_range(
+ graph(), jsgraph()->common(),
+ graph()->NewNode(
+ jsgraph()->machine()->Uint32LessThanOrEqual(), input,
+ jsgraph()->Uint32Constant(wasm::WasmModule::kMaxMemPages)),
+ BranchHint::kTrue);
+
+ check_input_range.Chain(*control_);
+
Runtime::FunctionId function_id = Runtime::kWasmGrowMemory;
const Runtime::Function* function = Runtime::FunctionForId(function_id);
CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
jsgraph()->zone(), function_id, function->nargs, Operator::kNoThrow,
CallDescriptor::kNoFlags);
- Node** control_ptr = control_;
- Node** effect_ptr = effect_;
wasm::ModuleEnv* module = module_;
input = BuildChangeUint32ToSmi(input);
Node* inputs[] = {
@@ -1683,13 +1699,86 @@ Node* WasmGraphBuilder::BuildGrowMemory(Node* input) {
ExternalReference(function_id, jsgraph()->isolate())), // ref
jsgraph()->Int32Constant(function->nargs), // arity
jsgraph()->HeapConstant(module->instance->context), // context
- *effect_ptr,
- *control_ptr};
- Node* node = graph()->NewNode(jsgraph()->common()->Call(desc),
+ *effect_,
+ check_input_range.if_true};
+ Node* call = graph()->NewNode(jsgraph()->common()->Call(desc),
static_cast<int>(arraysize(inputs)), inputs);
- *effect_ptr = node;
- node = BuildChangeSmiToInt32(node);
- return node;
+
+ Node* result = BuildChangeSmiToInt32(call);
+
+ result = check_input_range.Phi(MachineRepresentation::kWord32, result,
+ jsgraph()->Int32Constant(-1));
+ *effect_ = graph()->NewNode(jsgraph()->common()->EffectPhi(2), call, *effect_,
+ check_input_range.merge);
+ *control_ = check_input_range.merge;
+ return result;
+}
+
+Node* WasmGraphBuilder::Throw(Node* input) {
+ MachineOperatorBuilder* machine = jsgraph()->machine();
+
+ // Pass the thrown value as two SMIs:
+ //
+ // upper = static_cast<uint32_t>(input) >> 16;
+ // lower = input & 0xFFFF;
+ //
+ // This is needed because we can't safely call BuildChangeInt32ToTagged from
+ // this method.
+ //
+ // TODO(wasm): figure out how to properly pass this to the runtime function.
+ Node* upper = BuildChangeInt32ToSmi(
+ graph()->NewNode(machine->Word32Shr(), input, Int32Constant(16)));
+ Node* lower = BuildChangeInt32ToSmi(
+ graph()->NewNode(machine->Word32And(), input, Int32Constant(0xFFFFu)));
+
+ Node* parameters[] = {lower, upper}; // thrown value
+ return BuildCallToRuntime(Runtime::kWasmThrow, jsgraph(),
+ module_->instance->context, parameters,
+ arraysize(parameters), effect_, *control_);
+}
+
+Node* WasmGraphBuilder::Catch(Node* input, wasm::WasmCodePosition position) {
+ CommonOperatorBuilder* common = jsgraph()->common();
+
+ Node* parameters[] = {input}; // caught value
+ Node* value =
+ BuildCallToRuntime(Runtime::kWasmGetCaughtExceptionValue, jsgraph(),
+ module_->instance->context, parameters,
+ arraysize(parameters), effect_, *control_);
+
+ Node* is_smi;
+ Node* is_heap;
+ Branch(BuildTestNotSmi(value), &is_heap, &is_smi);
+
+ // is_smi
+ Node* smi_i32 = BuildChangeSmiToInt32(value);
+ Node* is_smi_effect = *effect_;
+
+ // is_heap
+ *control_ = is_heap;
+ Node* heap_f64 = BuildLoadHeapNumberValue(value, is_heap);
+
+ // *control_ needs to point to the current control dependency (is_heap) in
+ // case BuildI32SConvertF64 needs to insert nodes that depend on the "current"
+ // control node.
+ Node* heap_i32 = BuildI32SConvertF64(heap_f64, position);
+ // *control_ contains the control node that should be used when merging the
+ // result for the catch clause. It may be different than *control_ because
+ // BuildI32SConvertF64 may introduce a new control node (used for trapping if
+ // heap_f64 cannot be converted to an i32.
+ is_heap = *control_;
+ Node* is_heap_effect = *effect_;
+
+ Node* merge = graph()->NewNode(common->Merge(2), is_heap, is_smi);
+ Node* effect_merge = graph()->NewNode(common->EffectPhi(2), is_heap_effect,
+ is_smi_effect, merge);
+
+ Node* value_i32 = graph()->NewNode(
+ common->Phi(MachineRepresentation::kWord32, 2), heap_i32, smi_i32, merge);
+
+ *control_ = merge;
+ *effect_ = effect_merge;
+ return value_i32;
}
Node* WasmGraphBuilder::BuildI32DivS(Node* left, Node* right,
@@ -1961,6 +2050,7 @@ Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node** args) {
}
Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
+ Node*** rets,
wasm::WasmCodePosition position) {
const size_t params = sig->parameter_count();
const size_t extra = 2; // effect and control inputs.
@@ -1980,32 +2070,37 @@ Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
SetSourcePosition(call, position);
*effect_ = call;
+ size_t ret_count = sig->return_count();
+ if (ret_count == 0) return call; // No return value.
+
+ *rets = Buffer(ret_count);
+ if (ret_count == 1) {
+ // Only a single return value.
+ (*rets)[0] = call;
+ } else {
+ // Create projections for all return values.
+ for (size_t i = 0; i < ret_count; i++) {
+ (*rets)[i] = graph()->NewNode(jsgraph()->common()->Projection(i), call,
+ graph()->start());
+ }
+ }
return call;
}
-Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args,
+Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args, Node*** rets,
wasm::WasmCodePosition position) {
DCHECK_NULL(args[0]);
// Add code object as constant.
- args[0] = HeapConstant(module_->GetCodeOrPlaceholder(index));
+ Handle<Code> code = module_->GetFunctionCode(index);
+ DCHECK(!code.is_null());
+ args[0] = HeapConstant(code);
wasm::FunctionSig* sig = module_->GetFunctionSignature(index);
- return BuildWasmCall(sig, args, position);
+ return BuildWasmCall(sig, args, rets, position);
}
-Node* WasmGraphBuilder::CallImport(uint32_t index, Node** args,
- wasm::WasmCodePosition position) {
- DCHECK_NULL(args[0]);
-
- // Add code object as constant.
- args[0] = HeapConstant(module_->GetImportCode(index));
- wasm::FunctionSig* sig = module_->GetImportSignature(index);
-
- return BuildWasmCall(sig, args, position);
-}
-
-Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args,
+Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args, Node*** rets,
wasm::WasmCodePosition position) {
DCHECK_NOT_NULL(args[0]);
DCHECK(module_ && module_->instance);
@@ -2020,6 +2115,7 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args,
// Bounds check the index.
uint32_t table_size =
module_->IsValidTable(0) ? module_->GetTable(0)->max_size : 0;
+ wasm::FunctionSig* sig = module_->GetSignature(index);
if (table_size > 0) {
// Bounds check against the table size.
Node* size = Uint32Constant(table_size);
@@ -2028,7 +2124,11 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args,
} else {
// No function table. Generate a trap and return a constant.
trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, Int32Constant(0), position);
- return trap_->GetTrapValue(module_->GetSignature(index));
+ (*rets) = Buffer(sig->return_count());
+ for (size_t i = 0; i < sig->return_count(); i++) {
+ (*rets)[i] = trap_->GetTrapValue(sig->GetReturn(i));
+ }
+ return trap_->GetTrapValue(sig);
}
Node* table = FunctionTable(0);
@@ -2062,8 +2162,7 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args,
*effect_, *control_);
args[0] = load_code;
- wasm::FunctionSig* sig = module_->GetSignature(index);
- return BuildWasmCall(sig, args, position);
+ return BuildWasmCall(sig, args, rets, position);
}
Node* WasmGraphBuilder::BuildI32Rol(Node* left, Node* right) {
@@ -2197,11 +2296,11 @@ Node* WasmGraphBuilder::ToJS(Node* node, wasm::LocalType type) {
case wasm::kAstI32:
return BuildChangeInt32ToTagged(node);
case wasm::kAstI64:
- DCHECK(module_ && !module_->instance->context.is_null());
- // Throw a TypeError.
+ // Throw a TypeError. The native context is good enough here because we
+ // only throw a TypeError.
return BuildCallToRuntime(Runtime::kWasmThrowTypeError, jsgraph(),
- module_->instance->context, nullptr, 0, effect_,
- *control_);
+ jsgraph()->isolate()->native_context(), nullptr,
+ 0, effect_, *control_);
case wasm::kAstF32:
node = graph()->NewNode(jsgraph()->machine()->ChangeFloat32ToFloat64(),
node);
@@ -2359,15 +2458,11 @@ Node* WasmGraphBuilder::FromJS(Node* node, Node* context,
break;
}
case wasm::kAstI64:
- // TODO(titzer): JS->i64 has no good solution right now. Using 32 bits.
- num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToWord32(),
- num);
- if (jsgraph()->machine()->Is64()) {
- // We cannot change an int32 to an int64 on a 32 bit platform. Instead
- // we will split the parameter node later.
- num = graph()->NewNode(jsgraph()->machine()->ChangeInt32ToInt64(), num);
- }
- break;
+ // Throw a TypeError. The native context is good enough here because we
+ // only throw a TypeError.
+ return BuildCallToRuntime(Runtime::kWasmThrowTypeError, jsgraph(),
+ jsgraph()->isolate()->native_context(), nullptr,
+ 0, effect_, *control_);
case wasm::kAstF32:
num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToFloat32(),
num);
@@ -2528,6 +2623,23 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
MergeControlToEnd(jsgraph(), ret);
}
+int WasmGraphBuilder::AddParameterNodes(Node** args, int pos, int param_count,
+ wasm::FunctionSig* sig) {
+ // Convert WASM numbers to JS values.
+ int param_index = 0;
+ for (int i = 0; i < param_count; ++i) {
+ Node* param = graph()->NewNode(
+ jsgraph()->common()->Parameter(param_index++), graph()->start());
+ args[pos++] = ToJS(param, sig->GetParam(i));
+ if (jsgraph()->machine()->Is32() && sig->GetParam(i) == wasm::kAstI64) {
+ // On 32 bit platforms we have to skip the high word of int64
+ // parameters.
+ param_index++;
+ }
+ }
+ return pos;
+}
+
void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
wasm::FunctionSig* sig) {
DCHECK(target->IsCallable());
@@ -2548,18 +2660,14 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
*control_ = start;
Node** args = Buffer(wasm_count + 7);
- // The default context of the target.
- Handle<Context> target_context = isolate->native_context();
+ Node* call;
+ bool direct_call = false;
- // Optimization: check if the target is a JSFunction with the right arity so
- // that we can call it directly.
- bool call_direct = false;
- int pos = 0;
if (target->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(target);
if (function->shared()->internal_formal_parameter_count() == wasm_count) {
- call_direct = true;
-
+ direct_call = true;
+ int pos = 0;
args[pos++] = jsgraph()->Constant(target); // target callable.
// Receiver.
if (is_sloppy(function->shared()->language_mode()) &&
@@ -2574,13 +2682,22 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
desc = Linkage::GetJSCallDescriptor(
graph()->zone(), false, wasm_count + 1, CallDescriptor::kNoFlags);
- // For a direct call we have to use the context of the JSFunction.
- target_context = handle(function->context());
+ // Convert WASM numbers to JS values.
+ pos = AddParameterNodes(args, pos, wasm_count, sig);
+
+ args[pos++] = jsgraph()->UndefinedConstant(); // new target
+ args[pos++] = jsgraph()->Int32Constant(wasm_count); // argument count
+ args[pos++] = HeapConstant(handle(function->context()));
+ args[pos++] = *effect_;
+ args[pos++] = *control_;
+
+ call = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
}
}
// We cannot call the target directly, we have to use the Call builtin.
- if (!call_direct) {
+ if (!direct_call) {
+ int pos = 0;
Callable callable = CodeFactory::Call(isolate);
args[pos++] = jsgraph()->HeapConstant(callable.code());
args[pos++] = jsgraph()->Constant(target); // target callable
@@ -2591,30 +2708,21 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
desc = Linkage::GetStubCallDescriptor(isolate, graph()->zone(),
callable.descriptor(), wasm_count + 1,
CallDescriptor::kNoFlags);
- }
- // Convert WASM numbers to JS values.
- int param_index = 0;
- for (int i = 0; i < wasm_count; ++i) {
- Node* param =
- graph()->NewNode(jsgraph()->common()->Parameter(param_index++), start);
- args[pos++] = ToJS(param, sig->GetParam(i));
- if (jsgraph()->machine()->Is32() && sig->GetParam(i) == wasm::kAstI64) {
- // On 32 bit platforms we have to skip the high word of int64 parameters.
- param_index++;
- }
- }
+ // Convert WASM numbers to JS values.
+ pos = AddParameterNodes(args, pos, wasm_count, sig);
- if (call_direct) {
- args[pos++] = jsgraph()->UndefinedConstant(); // new target
- args[pos++] = jsgraph()->Int32Constant(wasm_count); // argument count
- }
+ // The native_context is sufficient here, because all kind of callables
+ // which depend on the context provide their own context. The context here
+ // is only needed if the target is a constructor to throw a TypeError, if
+ // the target is a native function, or if the target is a callable JSObject,
+ // which can only be constructed by the runtime.
+ args[pos++] = HeapConstant(isolate->native_context());
+ args[pos++] = *effect_;
+ args[pos++] = *control_;
- args[pos++] = HeapConstant(target_context);
- args[pos++] = *effect_;
- args[pos++] = *control_;
-
- Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
+ call = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
+ }
// Convert the return value back.
Node* ret;
@@ -2650,6 +2758,30 @@ Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
}
}
+Node* WasmGraphBuilder::CurrentMemoryPages() {
+ Runtime::FunctionId function_id = Runtime::kWasmMemorySize;
+ const Runtime::Function* function = Runtime::FunctionForId(function_id);
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ jsgraph()->zone(), function_id, function->nargs, Operator::kNoThrow,
+ CallDescriptor::kNoFlags);
+ wasm::ModuleEnv* module = module_;
+ Node* inputs[] = {
+ jsgraph()->CEntryStubConstant(function->result_size), // C entry
+ jsgraph()->ExternalConstant(
+ ExternalReference(function_id, jsgraph()->isolate())), // ref
+ jsgraph()->Int32Constant(function->nargs), // arity
+ jsgraph()->HeapConstant(module->instance->context), // context
+ *effect_,
+ *control_};
+ Node* call = graph()->NewNode(jsgraph()->common()->Call(desc),
+ static_cast<int>(arraysize(inputs)), inputs);
+
+ Node* result = BuildChangeSmiToInt32(call);
+
+ *effect_ = call;
+ return result;
+}
+
Node* WasmGraphBuilder::MemSize(uint32_t offset) {
DCHECK(module_ && module_->instance);
uint32_t size = static_cast<uint32_t>(module_->instance->mem_size);
@@ -2715,19 +2847,34 @@ void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
// Check against the effective size.
size_t effective_size;
- if (offset >= size || (static_cast<uint64_t>(offset) + memsize) > size) {
+ if (size == 0) {
effective_size = 0;
+ } else if (offset >= size ||
+ (static_cast<uint64_t>(offset) + memsize) > size) {
+ // Two checks are needed in the case where the offset is statically
+ // out of bounds; one check for the offset being in bounds, and the next for
+ // the offset + index being out of bounds for code to be patched correctly
+ // on relocation.
+ effective_size = size - memsize + 1;
+ Node* cond = graph()->NewNode(jsgraph()->machine()->Uint32LessThan(),
+ jsgraph()->IntPtrConstant(offset),
+ jsgraph()->RelocatableInt32Constant(
+ static_cast<uint32_t>(effective_size),
+ RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
+ trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
+ DCHECK(offset >= effective_size);
+ effective_size = offset - effective_size;
} else {
effective_size = size - offset - memsize + 1;
- }
- CHECK(effective_size <= kMaxUInt32);
-
- Uint32Matcher m(index);
- if (m.HasValue()) {
- uint32_t value = m.Value();
- if (value < effective_size) {
- // The bounds check will always succeed.
- return;
+ CHECK(effective_size <= kMaxUInt32);
+
+ Uint32Matcher m(index);
+ if (m.HasValue()) {
+ uint32_t value = m.Value();
+ if (value < effective_size) {
+ // The bounds check will always succeed.
+ return;
+ }
}
}
@@ -2746,15 +2893,26 @@ Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
Node* load;
// WASM semantics throw on OOB. Introduce explicit bounds check.
- BoundsCheckMem(memtype, index, offset, position);
+ if (!FLAG_wasm_trap_handler) {
+ BoundsCheckMem(memtype, index, offset, position);
+ }
bool aligned = static_cast<int>(alignment) >=
ElementSizeLog2Of(memtype.representation());
if (aligned ||
jsgraph()->machine()->UnalignedLoadSupported(memtype, alignment)) {
- load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
- MemBuffer(offset), index, *effect_, *control_);
+ if (FLAG_wasm_trap_handler) {
+ Node* context = HeapConstant(module_->instance->context);
+ Node* position_node = jsgraph()->Int32Constant(position);
+ load = graph()->NewNode(jsgraph()->machine()->ProtectedLoad(memtype),
+ MemBuffer(offset), index, context, position_node,
+ *effect_, *control_);
+ } else {
+ load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
+ MemBuffer(offset), index, *effect_, *control_);
+ }
} else {
+ DCHECK(!FLAG_wasm_trap_handler);
load = graph()->NewNode(jsgraph()->machine()->UnalignedLoad(memtype),
MemBuffer(offset), index, *effect_, *control_);
}
@@ -2866,15 +3024,31 @@ void WasmGraphBuilder::SetSourcePosition(Node* node,
source_position_table_->SetSourcePosition(node, pos);
}
+Node* WasmGraphBuilder::DefaultS128Value() {
+ // TODO(gdeepti): Introduce Simd128Constant to common-operator.h and use
+ // instead of creating a SIMD Value.
+ return graph()->NewNode(jsgraph()->machine()->CreateInt32x4(),
+ Int32Constant(0), Int32Constant(0), Int32Constant(0),
+ Int32Constant(0));
+}
+
Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
const NodeVector& inputs) {
switch (opcode) {
- case wasm::kExprI32x4ExtractLane:
- return graph()->NewNode(jsgraph()->machine()->Int32x4ExtractLane(),
- inputs[0], inputs[1]);
case wasm::kExprI32x4Splat:
- return graph()->NewNode(jsgraph()->machine()->Int32x4ExtractLane(),
- inputs[0], inputs[0], inputs[0], inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->CreateInt32x4(), inputs[0],
+ inputs[0], inputs[0], inputs[0]);
+ default:
+ return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
+ }
+}
+
+Node* WasmGraphBuilder::SimdExtractLane(wasm::WasmOpcode opcode, uint8_t lane,
+ Node* input) {
+ switch (opcode) {
+ case wasm::kExprI32x4ExtractLane:
+ return graph()->NewNode(jsgraph()->machine()->Int32x4ExtractLane(), input,
+ Int32Constant(lane));
default:
return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
}
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 487ddcb760..c980a87fcb 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -9,10 +9,11 @@
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
+#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
@@ -132,8 +133,12 @@ class WasmGraphBuilder {
wasm::WasmCodePosition position = wasm::kNoCodePosition);
Node* Unop(wasm::WasmOpcode opcode, Node* input,
wasm::WasmCodePosition position = wasm::kNoCodePosition);
+ Node* GrowMemory(Node* input);
+ Node* Throw(Node* input);
+ Node* Catch(Node* input, wasm::WasmCodePosition position);
unsigned InputCount(Node* node);
bool IsPhiWithMerge(Node* phi, Node* merge);
+ bool ThrowsException(Node* node, Node** if_success, Node** if_exception);
void AppendToMerge(Node* merge, Node* from);
void AppendToPhi(Node* phi, Node* from);
@@ -150,12 +155,11 @@ class WasmGraphBuilder {
Node* ReturnVoid();
Node* Unreachable(wasm::WasmCodePosition position);
- Node* CallDirect(uint32_t index, Node** args,
+ Node* CallDirect(uint32_t index, Node** args, Node*** rets,
wasm::WasmCodePosition position);
- Node* CallImport(uint32_t index, Node** args,
- wasm::WasmCodePosition position);
- Node* CallIndirect(uint32_t index, Node** args,
+ Node* CallIndirect(uint32_t index, Node** args, Node*** rets,
wasm::WasmCodePosition position);
+
void BuildJSToWasmWrapper(Handle<Code> wasm_code, wasm::FunctionSig* sig);
void BuildWasmToJSWrapper(Handle<JSReceiver> target, wasm::FunctionSig* sig);
@@ -167,7 +171,7 @@ class WasmGraphBuilder {
//-----------------------------------------------------------------------
// Operations that concern the linear memory.
//-----------------------------------------------------------------------
- Node* MemSize(uint32_t offset);
+ Node* CurrentMemoryPages();
Node* GetGlobal(uint32_t index);
Node* SetGlobal(uint32_t index, Node* val);
Node* LoadMem(wasm::LocalType type, MachineType memtype, Node* index,
@@ -194,7 +198,10 @@ class WasmGraphBuilder {
void SetSourcePosition(Node* node, wasm::WasmCodePosition position);
+ Node* DefaultS128Value();
+
Node* SimdOp(wasm::WasmOpcode opcode, const NodeVector& inputs);
+ Node* SimdExtractLane(wasm::WasmOpcode opcode, uint8_t lane, Node* input);
private:
static const int kDefaultBufferSize = 16;
@@ -223,6 +230,7 @@ class WasmGraphBuilder {
Graph* graph();
Node* String(const char* string);
+ Node* MemSize(uint32_t offset);
Node* MemBuffer(uint32_t offset);
void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset,
wasm::WasmCodePosition position);
@@ -234,7 +242,7 @@ class WasmGraphBuilder {
Node* MaskShiftCount64(Node* node);
Node* BuildCCall(MachineSignature* sig, Node** args);
- Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args,
+ Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args, Node*** rets,
wasm::WasmCodePosition position);
Node* BuildF32CopySign(Node* left, Node* right);
@@ -301,6 +309,7 @@ class WasmGraphBuilder {
Node* BuildJavaScriptToNumber(Node* node, Node* context, Node* effect,
Node* control);
+
Node* BuildChangeInt32ToTagged(Node* value);
Node* BuildChangeFloat64ToTagged(Node* value);
Node* BuildChangeTaggedToFloat64(Node* value);
@@ -315,7 +324,6 @@ class WasmGraphBuilder {
Node* BuildAllocateHeapNumberWithValue(Node* value, Node* control);
Node* BuildLoadHeapNumberValue(Node* value, Node* control);
Node* BuildHeapNumberValueIndexConstant();
- Node* BuildGrowMemory(Node* input);
// Asm.js specific functionality.
Node* BuildI32AsmjsSConvertF32(Node* input);
@@ -334,6 +342,9 @@ class WasmGraphBuilder {
if (buf != buffer) memcpy(buf, buffer, old_count * sizeof(Node*));
return buf;
}
+
+ int AddParameterNodes(Node** args, int pos, int param_count,
+ wasm::FunctionSig* sig);
};
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/wasm-linkage.cc b/deps/v8/src/compiler/wasm-linkage.cc
index c50f643910..574db1cfef 100644
--- a/deps/v8/src/compiler/wasm-linkage.cc
+++ b/deps/v8/src/compiler/wasm-linkage.cc
@@ -11,7 +11,7 @@
#include "src/compiler/linkage.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
@@ -131,7 +131,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == s390x ==================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS r2, r3, r4, r5, r6
-#define GP_RETURN_REGISTERS r2
+#define GP_RETURN_REGISTERS r2, r3
#define FP_PARAM_REGISTERS d0, d2, d4, d6
#define FP_RETURN_REGISTERS d0, d2, d4, d6
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index 49a097b3bb..4d63e9ad83 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -4,11 +4,12 @@
#include "src/compiler/code-generator.h"
-#include "src/ast/scopes.h"
+#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
+#include "src/wasm/wasm-module.h"
#include "src/x64/assembler-x64.h"
#include "src/x64/macro-assembler-x64.h"
@@ -132,6 +133,11 @@ class X64OperandConverter : public InstructionOperandConverter {
int32_t disp = InputInt32(NextOffset(offset));
return Operand(index, scale, disp);
}
+ case kMode_Root: {
+ Register base = kRootRegister;
+ int32_t disp = InputInt32(NextOffset(offset));
+ return Operand(base, disp);
+ }
case kMode_None:
UNREACHABLE();
return Operand(no_reg, 0);
@@ -260,6 +266,40 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
RecordWriteMode const mode_;
};
+class WasmOutOfLineTrap final : public OutOfLineCode {
+ public:
+ WasmOutOfLineTrap(CodeGenerator* gen, Address pc, bool frame_elided,
+ Register context, int32_t position)
+ : OutOfLineCode(gen),
+ pc_(pc),
+ frame_elided_(frame_elided),
+ context_(context),
+ position_(position) {}
+
+ void Generate() final {
+ // TODO(eholk): record pc_ and the current pc in a table so that
+ // the signal handler can find it.
+ USE(pc_);
+
+ if (frame_elided_) {
+ __ EnterFrame(StackFrame::WASM);
+ }
+
+ wasm::TrapReason trap_id = wasm::kTrapMemOutOfBounds;
+ int trap_reason = wasm::WasmOpcodes::TrapReasonToMessageId(trap_id);
+ __ Push(Smi::FromInt(trap_reason));
+ __ Push(Smi::FromInt(position_));
+ __ Move(rsi, context_);
+ __ CallRuntime(Runtime::kThrowWasmError);
+ }
+
+ private:
+ Address pc_;
+ bool frame_elided_;
+ Register context_;
+ int32_t position_;
+};
+
} // namespace
@@ -866,9 +906,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDebugBreak:
__ int3();
break;
- case kArchImpossible:
- __ Abort(kConversionFromImpossibleValue);
- break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
@@ -878,8 +915,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- CodeGenResult result =
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result = AssembleDeoptimizerCall(
+ deopt_state_id, bailout_type, current_source_position_);
if (result != kSuccess) return result;
break;
}
@@ -1422,7 +1459,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEFloat64Sqrt:
- ASSEMBLE_SSE_UNOP(sqrtsd);
+ ASSEMBLE_SSE_UNOP(Sqrtsd);
break;
case kSSEFloat64Round: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
@@ -1852,6 +1889,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64Movl:
+ case kX64TrapMovl:
if (instr->HasOutput()) {
if (instr->addressing_mode() == kMode_None) {
if (instr->InputAt(0)->IsRegister()) {
@@ -1860,7 +1898,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movl(i.OutputRegister(), i.InputOperand(0));
}
} else {
+ Address pc = __ pc();
__ movl(i.OutputRegister(), i.MemoryOperand());
+
+ if (arch_opcode == kX64TrapMovl) {
+ bool frame_elided = !frame_access_state()->has_frame();
+ new (zone()) WasmOutOfLineTrap(this, pc, frame_elided,
+ i.InputRegister(2), i.InputInt32(3));
+ }
}
__ AssertZeroExtended(i.OutputRegister());
} else {
@@ -2032,6 +2077,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ xchgl(i.InputRegister(index), operand);
break;
}
+ case kX64Int32x4Create: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ __ Movd(dst, i.InputRegister(0));
+ __ shufps(dst, dst, 0x0);
+ break;
+ }
+ case kX64Int32x4ExtractLane: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ Pextrd(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
+ break;
+ }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
break;
@@ -2252,13 +2309,14 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
- int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type,
+ SourcePosition pos) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
- __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
@@ -2449,7 +2507,11 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (value == 0) {
__ xorl(dst, dst);
} else {
- __ movl(dst, Immediate(value));
+ if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ __ movl(dst, Immediate(value, src.rmode()));
+ } else {
+ __ movl(dst, Immediate(value));
+ }
}
}
break;
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index 7ab1097428..35acec08dc 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -128,6 +128,7 @@ namespace compiler {
V(X64Movzxwq) \
V(X64Movw) \
V(X64Movl) \
+ V(X64TrapMovl) \
V(X64Movsxlq) \
V(X64Movq) \
V(X64Movsd) \
@@ -145,7 +146,9 @@ namespace compiler {
V(X64StackCheck) \
V(X64Xchgb) \
V(X64Xchgw) \
- V(X64Xchgl)
+ V(X64Xchgl) \
+ V(X64Int32x4Create) \
+ V(X64Int32x4ExtractLane)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
@@ -177,7 +180,8 @@ namespace compiler {
V(M1I) /* [ %r2*1 + K] */ \
V(M2I) /* [ %r2*2 + K] */ \
V(M4I) /* [ %r2*4 + K] */ \
- V(M8I) /* [ %r2*8 + K] */
+ V(M8I) /* [ %r2*8 + K] */ \
+ V(Root) /* [%root + K] */
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
index fb4b74914d..4208d8a594 100644
--- a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
@@ -36,10 +36,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Imul32:
case kX64ImulHigh32:
case kX64UmulHigh32:
- case kX64Idiv:
- case kX64Idiv32:
- case kX64Udiv:
- case kX64Udiv32:
case kX64Not:
case kX64Not32:
case kX64Neg:
@@ -127,10 +123,20 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Lea:
case kX64Dec32:
case kX64Inc32:
+ case kX64Int32x4Create:
+ case kX64Int32x4ExtractLane:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
+ case kX64Idiv:
+ case kX64Idiv32:
+ case kX64Udiv:
+ case kX64Udiv32:
+ return (instr->addressing_mode() == kMode_None)
+ ? kMayNeedDeoptCheck
+ : kMayNeedDeoptCheck | kIsLoadOperation | kHasSideEffect;
+
case kX64Movsxbl:
case kX64Movzxbl:
case kX64Movsxbq:
@@ -149,6 +155,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
return kHasSideEffect;
case kX64Movl:
+ case kX64TrapMovl:
if (instr->HasOutput()) {
DCHECK(instr->InputCount() >= 1);
return instr->InputAt(0)->IsRegister() ? kNoOpcodeFlags
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index 798d438e25..9a7657ef32 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -60,8 +60,7 @@ class X64OperandGenerator final : public OperandGenerator {
switch (opcode) {
case kX64Cmp:
case kX64Test:
- return rep == MachineRepresentation::kWord64 ||
- rep == MachineRepresentation::kTagged;
+ return rep == MachineRepresentation::kWord64 || IsAnyTagged(rep);
case kX64Cmp32:
case kX64Test32:
return rep == MachineRepresentation::kWord32;
@@ -137,6 +136,22 @@ class X64OperandGenerator final : public OperandGenerator {
AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
InstructionOperand inputs[],
size_t* input_count) {
+ if (selector()->CanAddressRelativeToRootsRegister()) {
+ LoadMatcher<ExternalReferenceMatcher> m(operand);
+ if (m.index().HasValue() && m.object().HasValue()) {
+ Address const kRootsRegisterValue =
+ kRootRegisterBias +
+ reinterpret_cast<Address>(
+ selector()->isolate()->heap()->roots_array_start());
+ ptrdiff_t const delta =
+ m.index().Value() +
+ (m.object().Value().address() - kRootsRegisterValue);
+ if (is_int32(delta)) {
+ inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta));
+ return kMode_Root;
+ }
+ }
+ }
BaseWithIndexAndDisplacement64Matcher m(operand, AddressOption::kAllowAll);
DCHECK(m.matches());
if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
@@ -155,11 +170,9 @@ class X64OperandGenerator final : public OperandGenerator {
}
};
+namespace {
-void InstructionSelector::VisitLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- X64OperandGenerator g(this);
-
+ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
@@ -187,9 +200,18 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
- return;
+ break;
}
+ return opcode;
+}
+
+} // namespace
+void InstructionSelector::VisitLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ X64OperandGenerator g(this);
+
+ ArchOpcode opcode = GetLoadOpcode(load_rep);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
InstructionOperand inputs[3];
@@ -200,6 +222,24 @@ void InstructionSelector::VisitLoad(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ X64OperandGenerator g(this);
+
+ ArchOpcode opcode = GetLoadOpcode(load_rep);
+ InstructionOperand outputs[1];
+ outputs[0] = g.DefineAsRegister(node);
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ AddressingMode mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ // Add the context parameter as an input.
+ inputs[input_count++] = g.UseUniqueRegister(node->InputAt(2));
+ // Add the source position as an input
+ inputs[input_count++] = g.UseImmediate(node->InputAt(3));
+ InstructionCode code = opcode | AddressingModeField::encode(mode);
+ Emit(code, 1, outputs, input_count, inputs);
+}
void InstructionSelector::VisitStore(Node* node) {
X64OperandGenerator g(this);
@@ -212,7 +252,7 @@ void InstructionSelector::VisitStore(Node* node) {
MachineRepresentation rep = store_rep.representation();
if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ DCHECK(CanBeTaggedPointer(rep));
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
@@ -701,6 +741,7 @@ bool TryMatchLoadWord64AndShiftRight(InstructionSelector* selector, Node* node,
case kMode_M2I:
case kMode_M4I:
case kMode_M8I:
+ case kMode_Root:
UNREACHABLE();
}
inputs[input_count++] = ImmediateOperand(ImmediateOperand::INLINE, 4);
@@ -1170,11 +1211,10 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
}
}
+namespace {
-void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
- X64OperandGenerator g(this);
- Node* value = node->InputAt(0);
- switch (value->opcode()) {
+bool ZeroExtendsWord32ToWord64(Node* node) {
+ switch (node->opcode()) {
case IrOpcode::kWord32And:
case IrOpcode::kWord32Or:
case IrOpcode::kWord32Xor:
@@ -1195,14 +1235,35 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
case IrOpcode::kUint32LessThan:
case IrOpcode::kUint32LessThanOrEqual:
case IrOpcode::kUint32Mod:
- case IrOpcode::kUint32MulHigh: {
+ case IrOpcode::kUint32MulHigh:
// These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
// zero-extension is a no-op.
- Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
- return;
+ return true;
+ case IrOpcode::kProjection: {
+ Node* const value = node->InputAt(0);
+ switch (value->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow:
+ case IrOpcode::kInt32SubWithOverflow:
+ case IrOpcode::kInt32MulWithOverflow:
+ return true;
+ default:
+ return false;
+ }
}
default:
- break;
+ return false;
+ }
+}
+
+} // namespace
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+ X64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ if (ZeroExtendsWord32ToWord64(value)) {
+ // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
+ // zero-extension is a no-op.
+ return EmitIdentity(node);
}
Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
}
@@ -1276,8 +1337,7 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Int64BinopMatcher m(value);
if (m.right().Is(32)) {
if (TryMatchLoadWord64AndShiftRight(this, value, kX64Movl)) {
- Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
- return;
+ return EmitIdentity(node);
}
Emit(kX64Shr, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.TempImmediate(32));
@@ -2213,6 +2273,17 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
}
+void InstructionSelector::VisitCreateInt32x4(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Int32x4Create, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitInt32x4ExtractLane(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Int32x4ExtractLane, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(node->InputAt(1)));
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/x87/code-generator-x87.cc b/deps/v8/src/compiler/x87/code-generator-x87.cc
index 29e2dd7383..f5e6634561 100644
--- a/deps/v8/src/compiler/x87/code-generator-x87.cc
+++ b/deps/v8/src/compiler/x87/code-generator-x87.cc
@@ -4,7 +4,7 @@
#include "src/compiler/code-generator.h"
-#include "src/ast/scopes.h"
+#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
@@ -715,9 +715,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDebugBreak:
__ int3();
break;
- case kArchImpossible:
- __ Abort(kConversionFromImpossibleValue);
- break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
@@ -746,8 +743,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- CodeGenResult result =
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result = AssembleDeoptimizerCall(
+ deopt_state_id, bailout_type, current_source_position_);
if (result != kSuccess) return result;
break;
}
@@ -2241,13 +2238,14 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
- int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type,
+ SourcePosition pos) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
- __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
diff --git a/deps/v8/src/compiler/x87/instruction-selector-x87.cc b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
index 0fe6a4b704..757eee961e 100644
--- a/deps/v8/src/compiler/x87/instruction-selector-x87.cc
+++ b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
@@ -211,6 +211,10 @@ void InstructionSelector::VisitLoad(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
void InstructionSelector::VisitStore(Node* node) {
X87OperandGenerator g(this);
@@ -223,7 +227,7 @@ void InstructionSelector::VisitStore(Node* node) {
MachineRepresentation rep = store_rep.representation();
if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ DCHECK(CanBeTaggedPointer(rep));
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
diff --git a/deps/v8/src/compiler/zone-pool.cc b/deps/v8/src/compiler/zone-pool.cc
index 13fec35a00..7681eeb5d1 100644
--- a/deps/v8/src/compiler/zone-pool.cc
+++ b/deps/v8/src/compiler/zone-pool.cc
@@ -64,7 +64,7 @@ void ZonePool::StatsScope::ZoneReturned(Zone* zone) {
}
}
-ZonePool::ZonePool(base::AccountingAllocator* allocator)
+ZonePool::ZonePool(AccountingAllocator* allocator)
: max_allocated_bytes_(0), total_deleted_bytes_(0), allocator_(allocator) {}
ZonePool::~ZonePool() {
diff --git a/deps/v8/src/compiler/zone-pool.h b/deps/v8/src/compiler/zone-pool.h
index 44a649fcfb..7a3fe75468 100644
--- a/deps/v8/src/compiler/zone-pool.h
+++ b/deps/v8/src/compiler/zone-pool.h
@@ -9,7 +9,7 @@
#include <set>
#include <vector>
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
@@ -61,7 +61,7 @@ class ZonePool final {
DISALLOW_COPY_AND_ASSIGN(StatsScope);
};
- explicit ZonePool(base::AccountingAllocator* allocator);
+ explicit ZonePool(AccountingAllocator* allocator);
~ZonePool();
size_t GetMaxAllocatedBytes();
@@ -82,7 +82,7 @@ class ZonePool final {
Stats stats_;
size_t max_allocated_bytes_;
size_t total_deleted_bytes_;
- base::AccountingAllocator* allocator_;
+ AccountingAllocator* allocator_;
DISALLOW_COPY_AND_ASSIGN(ZonePool);
};
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index b3cf255736..4fb3c833b7 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -57,15 +57,16 @@ bool ScriptContextTable::Lookup(Handle<ScriptContextTable> table,
bool Context::is_declaration_context() {
- if (IsFunctionContext() || IsNativeContext() || IsScriptContext()) {
+ if (IsFunctionContext() || IsNativeContext() || IsScriptContext() ||
+ IsModuleContext()) {
return true;
}
if (!IsBlockContext()) return false;
Object* ext = extension();
// If we have the special extension, we immediately know it must be a
// declaration scope. That's just a small performance shortcut.
- return ext->IsSloppyBlockWithEvalContextExtension()
- || ScopeInfo::cast(ext)->is_declaration_scope();
+ return ext->IsContextExtension() ||
+ ScopeInfo::cast(ext)->is_declaration_scope();
}
@@ -93,36 +94,47 @@ JSObject* Context::extension_object() {
HeapObject* object = extension();
if (object->IsTheHole(GetIsolate())) return nullptr;
if (IsBlockContext()) {
- if (!object->IsSloppyBlockWithEvalContextExtension()) return nullptr;
- object = SloppyBlockWithEvalContextExtension::cast(object)->extension();
+ if (!object->IsContextExtension()) return nullptr;
+ object = JSObject::cast(ContextExtension::cast(object)->extension());
}
DCHECK(object->IsJSContextExtensionObject() ||
(IsNativeContext() && object->IsJSGlobalObject()));
return JSObject::cast(object);
}
-
JSReceiver* Context::extension_receiver() {
DCHECK(IsNativeContext() || IsWithContext() ||
IsFunctionContext() || IsBlockContext());
- return IsWithContext() ? JSReceiver::cast(extension()) : extension_object();
+ return IsWithContext() ? JSReceiver::cast(
+ ContextExtension::cast(extension())->extension())
+ : extension_object();
}
-
ScopeInfo* Context::scope_info() {
- DCHECK(IsModuleContext() || IsScriptContext() || IsBlockContext());
+ DCHECK(!IsNativeContext());
+ if (IsFunctionContext() || IsModuleContext()) {
+ return closure()->shared()->scope_info();
+ }
HeapObject* object = extension();
- if (object->IsSloppyBlockWithEvalContextExtension()) {
- DCHECK(IsBlockContext());
- object = SloppyBlockWithEvalContextExtension::cast(object)->scope_info();
+ if (object->IsContextExtension()) {
+ DCHECK(IsBlockContext() || IsCatchContext() || IsWithContext() ||
+ IsDebugEvaluateContext());
+ object = ContextExtension::cast(object)->scope_info();
}
return ScopeInfo::cast(object);
}
+Module* Context::module() {
+ Context* current = this;
+ while (!current->IsModuleContext()) {
+ current = current->previous();
+ }
+ return Module::cast(current->extension());
+}
String* Context::catch_name() {
DCHECK(IsCatchContext());
- return String::cast(extension());
+ return String::cast(ContextExtension::cast(extension())->extension());
}
@@ -178,13 +190,14 @@ static Maybe<bool> UnscopableLookup(LookupIterator* it) {
static PropertyAttributes GetAttributesForMode(VariableMode mode) {
DCHECK(IsDeclaredVariableMode(mode));
- return IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
+ return mode == CONST ? READ_ONLY : NONE;
}
Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
int* index, PropertyAttributes* attributes,
InitializationFlag* init_flag,
VariableMode* variable_mode) {
+ DCHECK(!IsModuleContext());
Isolate* isolate = GetIsolate();
Handle<Context> context(this, isolate);
@@ -248,8 +261,14 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
object->IsJSContextExtensionObject()) {
maybe = JSReceiver::GetOwnPropertyAttributes(object, name);
} else if (context->IsWithContext()) {
- // A with context will never bind "this".
- if (name->Equals(*isolate->factory()->this_string())) {
+ // A with context will never bind "this", but debug-eval may look into
+ // a with context when resolving "this". Other synthetic variables such
+ // as new.target may be resolved as DYNAMIC_LOCAL due to bug v8:5405 ,
+ // skipping them here serves as a workaround until a more thorough
+ // fix can be applied.
+ // TODO(v8:5405): Replace this check with a DCHECK when resolution of
+ // of synthetic variables does not go through this code path.
+ if (ScopeInfo::VariableIsSynthetic(*name)) {
maybe = Just(ABSENT);
} else {
LookupIterator it(object, name, object);
@@ -307,10 +326,11 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
}
// Check the slot corresponding to the intermediate context holding
- // only the function name variable.
- if (follow_context_chain && context->IsFunctionContext()) {
- VariableMode mode;
- int function_index = scope_info->FunctionContextSlotIndex(*name, &mode);
+ // only the function name variable. It's conceptually (and spec-wise)
+ // in an outer scope of the function's declaration scope.
+ if (follow_context_chain && (flags & STOP_AT_DECLARATION_SCOPE) == 0 &&
+ context->IsFunctionContext()) {
+ int function_index = scope_info->FunctionContextSlotIndex(*name);
if (function_index >= 0) {
if (FLAG_trace_contexts) {
PrintF("=> found intermediate function in context slot %d\n",
@@ -318,9 +338,8 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
}
*index = function_index;
*attributes = READ_ONLY;
- DCHECK(mode == CONST_LEGACY || mode == CONST);
*init_flag = kCreatedInitialized;
- *variable_mode = mode;
+ *variable_mode = CONST;
return context;
}
}
@@ -339,18 +358,21 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
}
} else if (context->IsDebugEvaluateContext()) {
// Check materialized locals.
- Object* obj = context->get(EXTENSION_INDEX);
- if (obj->IsJSReceiver()) {
- Handle<JSReceiver> extension(JSReceiver::cast(obj));
- LookupIterator it(extension, name, extension);
- Maybe<bool> found = JSReceiver::HasProperty(&it);
- if (found.FromMaybe(false)) {
- *attributes = NONE;
- return extension;
+ Object* ext = context->get(EXTENSION_INDEX);
+ if (ext->IsContextExtension()) {
+ Object* obj = ContextExtension::cast(ext)->extension();
+ if (obj->IsJSReceiver()) {
+ Handle<JSReceiver> extension(JSReceiver::cast(obj));
+ LookupIterator it(extension, name, extension);
+ Maybe<bool> found = JSReceiver::HasProperty(&it);
+ if (found.FromMaybe(false)) {
+ *attributes = NONE;
+ return extension;
+ }
}
}
// Check the original context, but do not follow its context chain.
- obj = context->get(WRAPPED_CONTEXT_INDEX);
+ Object* obj = context->get(WRAPPED_CONTEXT_INDEX);
if (obj->IsContext()) {
Handle<Object> result =
Context::cast(obj)->Lookup(name, DONT_FOLLOW_CHAINS, index,
@@ -387,25 +409,6 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
}
-void Context::InitializeGlobalSlots() {
- DCHECK(IsScriptContext());
- DisallowHeapAllocation no_gc;
-
- ScopeInfo* scope_info = this->scope_info();
-
- int context_globals = scope_info->ContextGlobalCount();
- if (context_globals > 0) {
- PropertyCell* empty_cell = GetHeap()->empty_property_cell();
-
- int context_locals = scope_info->ContextLocalCount();
- int index = Context::MIN_CONTEXT_SLOTS + context_locals;
- for (int i = 0; i < context_globals; i++) {
- set(index++, empty_cell);
- }
- }
-}
-
-
void Context::AddOptimizedFunction(JSFunction* function) {
DCHECK(IsNativeContext());
Isolate* isolate = GetIsolate();
@@ -544,6 +547,17 @@ int Context::IntrinsicIndexForName(Handle<String> string) {
#undef COMPARE_NAME
+#define COMPARE_NAME(index, type, name) \
+ if (strncmp(string, #name, length) == 0) return index;
+
+int Context::IntrinsicIndexForName(const unsigned char* unsigned_string,
+ int length) {
+ const char* string = reinterpret_cast<const char*>(unsigned_string);
+ NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(COMPARE_NAME);
+ return kNotFound;
+}
+
+#undef COMPARE_NAME
#ifdef DEBUG
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index fd5b006192..b927d05fd6 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -36,6 +36,7 @@ enum ContextLookupFlags {
#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
V(IS_ARRAYLIKE, JSFunction, is_arraylike) \
+ V(GENERATOR_NEXT_INTERNAL, JSFunction, generator_next_internal) \
V(GET_TEMPLATE_CALL_SITE_INDEX, JSFunction, get_template_call_site) \
V(MAKE_ERROR_INDEX, JSFunction, make_error) \
V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \
@@ -59,45 +60,53 @@ enum ContextLookupFlags {
V(MATH_FLOOR_INDEX, JSFunction, math_floor) \
V(MATH_POW_INDEX, JSFunction, math_pow)
-#define NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
- V(ARRAY_CONCAT_INDEX, JSFunction, array_concat) \
- V(ARRAY_POP_INDEX, JSFunction, array_pop) \
- V(ARRAY_PUSH_INDEX, JSFunction, array_push) \
- V(ARRAY_SHIFT_INDEX, JSFunction, array_shift) \
- V(ARRAY_SPLICE_INDEX, JSFunction, array_splice) \
- V(ARRAY_SLICE_INDEX, JSFunction, array_slice) \
- V(ARRAY_UNSHIFT_INDEX, JSFunction, array_unshift) \
- V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator) \
- V(ASYNC_FUNCTION_AWAIT_INDEX, JSFunction, async_function_await) \
- V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
- V(ERROR_FUNCTION_INDEX, JSFunction, error_function) \
- V(ERROR_TO_STRING, JSFunction, error_to_string) \
- V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function) \
- V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
- V(MAP_DELETE_METHOD_INDEX, JSFunction, map_delete) \
- V(MAP_GET_METHOD_INDEX, JSFunction, map_get) \
- V(MAP_HAS_METHOD_INDEX, JSFunction, map_has) \
- V(MAP_SET_METHOD_INDEX, JSFunction, map_set) \
- V(FUNCTION_HAS_INSTANCE_INDEX, JSFunction, function_has_instance) \
- V(OBJECT_VALUE_OF, JSFunction, object_value_of) \
- V(OBJECT_TO_STRING, JSFunction, object_to_string) \
- V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
- V(PROMISE_CREATE_INDEX, JSFunction, promise_create) \
- V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function) \
- V(PROMISE_HAS_USER_DEFINED_REJECT_HANDLER_INDEX, JSFunction, \
- promise_has_user_defined_reject_handler) \
- V(PROMISE_REJECT_INDEX, JSFunction, promise_reject) \
- V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve) \
- V(PROMISE_CREATE_RESOLVED_INDEX, JSFunction, promise_create_resolved) \
- V(PROMISE_CREATE_REJECTED_INDEX, JSFunction, promise_create_rejected) \
- V(PROMISE_THEN_INDEX, JSFunction, promise_then) \
- V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function) \
- V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
- V(SET_ADD_METHOD_INDEX, JSFunction, set_add) \
- V(SET_DELETE_METHOD_INDEX, JSFunction, set_delete) \
- V(SET_HAS_METHOD_INDEX, JSFunction, set_has) \
- V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function) \
- V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function) \
+#define NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
+ V(ARRAY_CONCAT_INDEX, JSFunction, array_concat) \
+ V(ARRAY_POP_INDEX, JSFunction, array_pop) \
+ V(ARRAY_PUSH_INDEX, JSFunction, array_push) \
+ V(ARRAY_SHIFT_INDEX, JSFunction, array_shift) \
+ V(ARRAY_SPLICE_INDEX, JSFunction, array_splice) \
+ V(ARRAY_SLICE_INDEX, JSFunction, array_slice) \
+ V(ARRAY_UNSHIFT_INDEX, JSFunction, array_unshift) \
+ V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator) \
+ V(ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX, JSFunction, \
+ async_function_await_caught) \
+ V(ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX, JSFunction, \
+ async_function_await_uncaught) \
+ V(ASYNC_FUNCTION_PROMISE_CREATE_INDEX, JSFunction, \
+ async_function_promise_create) \
+ V(ASYNC_FUNCTION_PROMISE_RELEASE_INDEX, JSFunction, \
+ async_function_promise_release) \
+ V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
+ V(ERROR_FUNCTION_INDEX, JSFunction, error_function) \
+ V(ERROR_TO_STRING, JSFunction, error_to_string) \
+ V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function) \
+ V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
+ V(MAP_DELETE_METHOD_INDEX, JSFunction, map_delete) \
+ V(MAP_GET_METHOD_INDEX, JSFunction, map_get) \
+ V(MAP_HAS_METHOD_INDEX, JSFunction, map_has) \
+ V(MAP_SET_METHOD_INDEX, JSFunction, map_set) \
+ V(FUNCTION_HAS_INSTANCE_INDEX, JSFunction, function_has_instance) \
+ V(OBJECT_VALUE_OF, JSFunction, object_value_of) \
+ V(OBJECT_TO_STRING, JSFunction, object_to_string) \
+ V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
+ V(PROMISE_CREATE_INDEX, JSFunction, promise_create) \
+ V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function) \
+ V(PROMISE_HAS_USER_DEFINED_REJECT_HANDLER_INDEX, JSFunction, \
+ promise_has_user_defined_reject_handler) \
+ V(PROMISE_REJECT_INDEX, JSFunction, promise_reject) \
+ V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve) \
+ V(PROMISE_THEN_INDEX, JSFunction, promise_then) \
+ V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function) \
+ V(REGEXP_LAST_MATCH_INFO_INDEX, JSObject, regexp_last_match_info) \
+ V(REJECT_PROMISE_NO_DEBUG_EVENT_INDEX, JSFunction, \
+ reject_promise_no_debug_event) \
+ V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
+ V(SET_ADD_METHOD_INDEX, JSFunction, set_add) \
+ V(SET_DELETE_METHOD_INDEX, JSFunction, set_delete) \
+ V(SET_HAS_METHOD_INDEX, JSFunction, set_has) \
+ V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function) \
+ V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function) \
V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function)
#define NATIVE_CONTEXT_FIELDS(V) \
@@ -146,6 +155,7 @@ enum ContextLookupFlags {
V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, generator_object_prototype_map) \
V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype) \
V(INITIAL_GENERATOR_PROTOTYPE_INDEX, JSObject, initial_generator_prototype) \
+ V(INITIAL_ITERATOR_PROTOTYPE_INDEX, JSObject, initial_iterator_prototype) \
V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
V(INT16_ARRAY_FUN_INDEX, JSFunction, int16_array_fun) \
V(INT16X8_FUNCTION_INDEX, JSFunction, int16x8_function) \
@@ -205,7 +215,11 @@ enum ContextLookupFlags {
V(WASM_FUNCTION_MAP_INDEX, Map, wasm_function_map) \
V(WASM_MODULE_CONSTRUCTOR_INDEX, JSFunction, wasm_module_constructor) \
V(WASM_INSTANCE_CONSTRUCTOR_INDEX, JSFunction, wasm_instance_constructor) \
+ V(WASM_TABLE_CONSTRUCTOR_INDEX, JSFunction, wasm_table_constructor) \
+ V(WASM_MEMORY_CONSTRUCTOR_INDEX, JSFunction, wasm_memory_constructor) \
V(WASM_MODULE_SYM_INDEX, Symbol, wasm_module_sym) \
+ V(WASM_TABLE_SYM_INDEX, Symbol, wasm_table_sym) \
+ V(WASM_MEMORY_SYM_INDEX, Symbol, wasm_memory_sym) \
V(WASM_INSTANCE_SYM_INDEX, Symbol, wasm_instance_sym) \
V(SLOPPY_ASYNC_FUNCTION_MAP_INDEX, Map, sloppy_async_function_map) \
V(SLOPPY_GENERATOR_FUNCTION_MAP_INDEX, Map, sloppy_generator_function_map) \
@@ -228,6 +242,7 @@ enum ContextLookupFlags {
V(UINT8_ARRAY_FUN_INDEX, JSFunction, uint8_array_fun) \
V(UINT8_CLAMPED_ARRAY_FUN_INDEX, JSFunction, uint8_clamped_array_fun) \
V(UINT8X16_FUNCTION_INDEX, JSFunction, uint8x16_function) \
+ V(CURRENT_MODULE_INDEX, Module, current_module) \
NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
NATIVE_CONTEXT_IMPORTED_FIELDS(V)
@@ -299,18 +314,29 @@ class ScriptContextTable : public FixedArray {
//
// [ previous ] A pointer to the previous context.
//
-// [ extension ] A pointer to an extension JSObject, or "the hole". Used to
-// implement 'with' statements and dynamic declarations
-// (through 'eval'). The object in a 'with' statement is
-// stored in the extension slot of a 'with' context.
-// Dynamically declared variables/functions are also added
-// to lazily allocated extension object. Context::Lookup
-// searches the extension object for properties.
-// For script and block contexts, contains the respective
-// ScopeInfo. For block contexts representing sloppy declaration
-// block scopes, it may also be a struct being a
-// SloppyBlockWithEvalContextExtension, pairing the ScopeInfo
-// with an extension object.
+// [ extension ] Additional data.
+//
+// For script contexts, it contains the respective ScopeInfo.
+//
+// For catch contexts, it contains a ContextExtension object
+// consisting of the ScopeInfo and the name of the catch
+// variable.
+//
+// For module contexts, it contains the module object.
+//
+// For block contexts, it contains either the respective
+// ScopeInfo or a ContextExtension object consisting of the
+// ScopeInfo and an "extension object" (see below).
+//
+// For with contexts, it contains a ContextExtension object
+// consisting of the ScopeInfo and an "extension object".
+//
+// An "extension object" is used to dynamically extend a context
+// with additional variables, namely in the implementation of the
+// 'with' construct and the 'eval' construct. For instance,
+// Context::Lookup also searches the extension object for
+// properties. (Storing the extension object is the original
+// purpose of this context slot, hence the name.)
//
// [ native_context ] A pointer to the native context.
//
@@ -388,6 +414,10 @@ class Context: public FixedArray {
ScopeInfo* scope_info();
String* catch_name();
+ // Find the module context (assuming there is one) and return the associated
+ // module object.
+ Module* module();
+
// Get the context where var declarations will be hoisted to, which
// may be the context itself.
Context* declaration_context();
@@ -401,7 +431,7 @@ class Context: public FixedArray {
void set_global_proxy(JSObject* global);
// Get the JSGlobalObject object.
- JSGlobalObject* global_object();
+ V8_EXPORT_PRIVATE JSGlobalObject* global_object();
// Get the script context by traversing the context chain.
Context* script_context();
@@ -424,9 +454,6 @@ class Context: public FixedArray {
inline bool HasSameSecurityTokenAs(Context* that);
- // Initializes global variable bindings in given script context.
- void InitializeGlobalSlots();
-
// A native context holds a list of all functions with optimized code.
void AddOptimizedFunction(JSFunction* function);
void RemoveOptimizedFunction(JSFunction* function);
@@ -445,6 +472,7 @@ class Context: public FixedArray {
static int ImportedFieldIndexForName(Handle<String> name);
static int IntrinsicIndexForName(Handle<String> name);
+ static int IntrinsicIndexForName(const unsigned char* name, int length);
#define NATIVE_CONTEXT_FIELD_ACCESSORS(index, type, name) \
inline void set_##name(type* value); \
@@ -526,7 +554,8 @@ class Context: public FixedArray {
private:
#ifdef DEBUG
// Bootstrapping-aware type checks.
- static bool IsBootstrappingOrNativeContext(Isolate* isolate, Object* object);
+ V8_EXPORT_PRIVATE static bool IsBootstrappingOrNativeContext(Isolate* isolate,
+ Object* object);
static bool IsBootstrappingOrValidParentContext(Object* object, Context* kid);
#endif
diff --git a/deps/v8/src/counters-inl.h b/deps/v8/src/counters-inl.h
index c8c06d2950..303e5e3a81 100644
--- a/deps/v8/src/counters-inl.h
+++ b/deps/v8/src/counters-inl.h
@@ -11,10 +11,18 @@ namespace v8 {
namespace internal {
RuntimeCallTimerScope::RuntimeCallTimerScope(
+ Isolate* isolate, RuntimeCallStats::CounterId counter_id) {
+ if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
+ FLAG_runtime_call_stats)) {
+ Initialize(isolate, counter_id);
+ }
+}
+
+RuntimeCallTimerScope::RuntimeCallTimerScope(
HeapObject* heap_object, RuntimeCallStats::CounterId counter_id) {
- if (V8_UNLIKELY(FLAG_runtime_call_stats)) {
- isolate_ = heap_object->GetIsolate();
- RuntimeCallStats::Enter(isolate_, &timer_, counter_id);
+ if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
+ FLAG_runtime_call_stats)) {
+ Initialize(heap_object->GetIsolate(), counter_id);
}
}
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index 8a5908c9af..c4e86460aa 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -282,18 +282,15 @@ void RuntimeCallCounter::Dump(std::stringstream& out) {
}
// static
-void RuntimeCallStats::Enter(Isolate* isolate, RuntimeCallTimer* timer,
+void RuntimeCallStats::Enter(RuntimeCallStats* stats, RuntimeCallTimer* timer,
CounterId counter_id) {
- RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();
RuntimeCallCounter* counter = &(stats->*counter_id);
timer->Start(counter, stats->current_timer_);
stats->current_timer_ = timer;
}
// static
-void RuntimeCallStats::Leave(Isolate* isolate, RuntimeCallTimer* timer) {
- RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();
-
+void RuntimeCallStats::Leave(RuntimeCallStats* stats, RuntimeCallTimer* timer) {
if (stats->current_timer_ == timer) {
stats->current_timer_ = timer->Stop();
} else {
@@ -307,9 +304,8 @@ void RuntimeCallStats::Leave(Isolate* isolate, RuntimeCallTimer* timer) {
}
// static
-void RuntimeCallStats::CorrectCurrentCounterId(Isolate* isolate,
+void RuntimeCallStats::CorrectCurrentCounterId(RuntimeCallStats* stats,
CounterId counter_id) {
- RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();
DCHECK_NOT_NULL(stats->current_timer_);
RuntimeCallCounter* counter = &(stats->*counter_id);
stats->current_timer_->counter_ = counter;
@@ -342,7 +338,9 @@ void RuntimeCallStats::Print(std::ostream& os) {
}
void RuntimeCallStats::Reset() {
- if (!FLAG_runtime_call_stats) return;
+ if (!FLAG_runtime_call_stats &&
+ !TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED())
+ return;
#define RESET_COUNTER(name) this->name.Reset();
FOR_EACH_MANUAL_COUNTER(RESET_COUNTER)
#undef RESET_COUNTER
@@ -362,6 +360,41 @@ void RuntimeCallStats::Reset() {
#define RESET_COUNTER(name) this->Handler_##name.Reset();
FOR_EACH_HANDLER_COUNTER(RESET_COUNTER)
#undef RESET_COUNTER
+
+ in_use_ = true;
+}
+
+std::string RuntimeCallStats::Dump() {
+ buffer_.str(std::string());
+ buffer_.clear();
+ buffer_ << "{";
+#define DUMP_COUNTER(name) \
+ if (this->name.count > 0) this->name.Dump(buffer_);
+ FOR_EACH_MANUAL_COUNTER(DUMP_COUNTER)
+#undef DUMP_COUNTER
+
+#define DUMP_COUNTER(name, nargs, result_size) \
+ if (this->Runtime_##name.count > 0) this->Runtime_##name.Dump(buffer_);
+ FOR_EACH_INTRINSIC(DUMP_COUNTER)
+#undef DUMP_COUNTER
+
+#define DUMP_COUNTER(name) \
+ if (this->Builtin_##name.count > 0) this->Builtin_##name.Dump(buffer_);
+ BUILTIN_LIST_C(DUMP_COUNTER)
+#undef DUMP_COUNTER
+
+#define DUMP_COUNTER(name) \
+ if (this->API_##name.count > 0) this->API_##name.Dump(buffer_);
+ FOR_EACH_API_COUNTER(DUMP_COUNTER)
+#undef DUMP_COUNTER
+
+#define DUMP_COUNTER(name) \
+ if (this->Handler_##name.count > 0) this->Handler_##name.Dump(buffer_);
+ FOR_EACH_HANDLER_COUNTER(DUMP_COUNTER)
+#undef DUMP_COUNTER
+ buffer_ << "\"END\":[]}";
+ in_use_ = false;
+ return buffer_.str();
}
} // namespace internal
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index 59627f13f6..707ae9f738 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -11,8 +11,10 @@
#include "src/base/platform/time.h"
#include "src/builtins/builtins.h"
#include "src/globals.h"
+#include "src/isolate.h"
#include "src/objects.h"
#include "src/runtime/runtime.h"
+#include "src/tracing/trace-event.h"
namespace v8 {
namespace internal {
@@ -566,12 +568,15 @@ class RuntimeCallTimer {
V(Message_GetLineNumber) \
V(Message_GetSourceLine) \
V(Message_GetStartColumn) \
+ V(Module_Evaluate) \
+ V(Module_Instantiate) \
V(NumberObject_New) \
V(NumberObject_NumberValue) \
V(Object_CallAsConstructor) \
V(Object_CallAsFunction) \
V(Object_CreateDataProperty) \
V(Object_DefineOwnProperty) \
+ V(Object_DefineProperty) \
V(Object_Delete) \
V(Object_DeleteProperty) \
V(Object_ForceSet) \
@@ -657,7 +662,10 @@ class RuntimeCallTimer {
V(UnboundScript_GetName) \
V(UnboundScript_GetSourceMappingURL) \
V(UnboundScript_GetSourceURL) \
- V(Value_TypeOf)
+ V(Value_TypeOf) \
+ V(ValueDeserializer_ReadHeader) \
+ V(ValueDeserializer_ReadValue) \
+ V(ValueSerializer_WriteValue)
#define FOR_EACH_MANUAL_COUNTER(V) \
V(AccessorGetterCallback) \
@@ -674,13 +682,18 @@ class RuntimeCallTimer {
V(DeoptimizeCode) \
V(FunctionCallback) \
V(GC) \
+ V(GenericNamedPropertyDefinerCallback) \
V(GenericNamedPropertyDeleterCallback) \
+ V(GenericNamedPropertyDescriptorCallback) \
V(GenericNamedPropertyQueryCallback) \
V(GenericNamedPropertySetterCallback) \
+ V(IndexedPropertyDefinerCallback) \
V(IndexedPropertyDeleterCallback) \
+ V(IndexedPropertyDescriptorCallback) \
V(IndexedPropertyGetterCallback) \
V(IndexedPropertyQueryCallback) \
V(IndexedPropertySetterCallback) \
+ V(InvokeApiInterruptCallbacks) \
V(InvokeFunctionCallback) \
V(JS_Execution) \
V(Map_SetPrototype) \
@@ -765,67 +778,52 @@ class RuntimeCallStats {
// Starting measuring the time for a function. This will establish the
// connection to the parent counter for properly calculating the own times.
- static void Enter(Isolate* isolate, RuntimeCallTimer* timer,
+ static void Enter(RuntimeCallStats* stats, RuntimeCallTimer* timer,
CounterId counter_id);
// Leave a scope for a measured runtime function. This will properly add
// the time delta to the current_counter and subtract the delta from its
// parent.
- static void Leave(Isolate* isolate, RuntimeCallTimer* timer);
+ static void Leave(RuntimeCallStats* stats, RuntimeCallTimer* timer);
// Set counter id for the innermost measurement. It can be used to refine
// event kind when a runtime entry counter is too generic.
- static void CorrectCurrentCounterId(Isolate* isolate, CounterId counter_id);
+ static void CorrectCurrentCounterId(RuntimeCallStats* stats,
+ CounterId counter_id);
void Reset();
- void Print(std::ostream& os);
+ V8_NOINLINE void Print(std::ostream& os);
+ V8_NOINLINE std::string Dump();
+
+ RuntimeCallStats() {
+ Reset();
+ in_use_ = false;
+ }
- RuntimeCallStats() { Reset(); }
RuntimeCallTimer* current_timer() { return current_timer_; }
+ bool InUse() { return in_use_; }
private:
+ std::stringstream buffer_;
// Counter to track recursive time events.
RuntimeCallTimer* current_timer_ = NULL;
+ // Used to track nested tracing scopes.
+ bool in_use_;
};
-#define TRACE_RUNTIME_CALL_STATS(isolate, counter_name) \
- do { \
- if (FLAG_runtime_call_stats) { \
- RuntimeCallStats::CorrectCurrentCounterId( \
- isolate, &RuntimeCallStats::counter_name); \
- } \
+#define TRACE_RUNTIME_CALL_STATS(isolate, counter_name) \
+ do { \
+ if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() || \
+ FLAG_runtime_call_stats)) { \
+ RuntimeCallStats::CorrectCurrentCounterId( \
+ isolate->counters()->runtime_call_stats(), \
+ &RuntimeCallStats::counter_name); \
+ } \
} while (false)
#define TRACE_HANDLER_STATS(isolate, counter_name) \
TRACE_RUNTIME_CALL_STATS(isolate, Handler_##counter_name)
-// A RuntimeCallTimerScopes wraps around a RuntimeCallTimer to measure the
-// the time of C++ scope.
-class RuntimeCallTimerScope {
- public:
- inline RuntimeCallTimerScope(Isolate* isolate,
- RuntimeCallStats::CounterId counter_id) {
- if (V8_UNLIKELY(FLAG_runtime_call_stats)) {
- isolate_ = isolate;
- RuntimeCallStats::Enter(isolate_, &timer_, counter_id);
- }
- }
- // This constructor is here just to avoid calling GetIsolate() when the
- // stats are disabled and the isolate is not directly available.
- inline RuntimeCallTimerScope(HeapObject* heap_object,
- RuntimeCallStats::CounterId counter_id);
-
- inline ~RuntimeCallTimerScope() {
- if (V8_UNLIKELY(FLAG_runtime_call_stats)) {
- RuntimeCallStats::Leave(isolate_, &timer_);
- }
- }
-
- private:
- Isolate* isolate_;
- RuntimeCallTimer timer_;
-};
-
#define HISTOGRAM_RANGE_LIST(HR) \
/* Generic range histograms */ \
HR(detached_context_age_in_gc, V8.DetachedContextAgeInGC, 0, 20, 21) \
@@ -836,6 +834,9 @@ class RuntimeCallTimerScope {
HR(code_cache_reject_reason, V8.CodeCacheRejectReason, 1, 6, 6) \
HR(errors_thrown_per_context, V8.ErrorsThrownPerContext, 0, 200, 20) \
HR(debug_feature_usage, V8.DebugFeatureUsage, 1, 7, 7) \
+ HR(incremental_marking_reason, V8.GCIncrementalMarkingReason, 0, 21, 22) \
+ HR(mark_compact_reason, V8.GCMarkCompactReason, 0, 21, 22) \
+ HR(scavenge_reason, V8.GCScavengeReason, 0, 21, 22) \
/* Asm/Wasm. */ \
HR(wasm_functions_per_module, V8.WasmFunctionsPerModule, 1, 10000, 51)
@@ -1238,6 +1239,36 @@ class Counters {
DISALLOW_IMPLICIT_CONSTRUCTORS(Counters);
};
+// A RuntimeCallTimerScopes wraps around a RuntimeCallTimer to measure the
+// the time of C++ scope.
+class RuntimeCallTimerScope {
+ public:
+ inline RuntimeCallTimerScope(Isolate* isolate,
+ RuntimeCallStats::CounterId counter_id);
+ // This constructor is here just to avoid calling GetIsolate() when the
+ // stats are disabled and the isolate is not directly available.
+ inline RuntimeCallTimerScope(HeapObject* heap_object,
+ RuntimeCallStats::CounterId counter_id);
+
+ inline ~RuntimeCallTimerScope() {
+ if (V8_UNLIKELY(isolate_ != nullptr)) {
+ RuntimeCallStats::Leave(isolate_->counters()->runtime_call_stats(),
+ &timer_);
+ }
+ }
+
+ private:
+ V8_INLINE void Initialize(Isolate* isolate,
+ RuntimeCallStats::CounterId counter_id) {
+ isolate_ = isolate;
+ RuntimeCallStats::Enter(isolate_->counters()->runtime_call_stats(), &timer_,
+ counter_id);
+ }
+
+ Isolate* isolate_ = nullptr;
+ RuntimeCallTimer timer_;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/arm/lithium-arm.cc b/deps/v8/src/crankshaft/arm/lithium-arm.cc
index 324dcfefa8..8c4b7356c9 100644
--- a/deps/v8/src/crankshaft/arm/lithium-arm.cc
+++ b/deps/v8/src/crankshaft/arm/lithium-arm.cc
@@ -304,15 +304,6 @@ void LStoreNamedField::PrintDataTo(StringStream* stream) {
}
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(String::cast(*name())->ToCString().get());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -345,15 +336,6 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) {
}
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(" %p -> %p", *original_map(), *transitioned_map());
@@ -877,7 +859,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
LInstruction* result = new (zone()) LPrologue();
- if (info_->scope()->num_heap_slots() > 0) {
+ if (info_->scope()->NeedsContext()) {
result = MarkAsCall(result, instr);
}
return result;
@@ -1019,6 +1001,9 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
CallInterfaceDescriptor descriptor = instr->descriptor();
+ DCHECK_EQ(descriptor.GetParameterCount() +
+ LCallWithDescriptor::kImplicitRegisterParameterCount,
+ instr->OperandCount());
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
@@ -1027,15 +1012,20 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
// Context
LOperand* op = UseFixed(instr->OperandAt(1), cp);
ops.Add(op, zone());
- // Other register parameters
- for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
- i < instr->OperandCount(); i++) {
- op =
- UseFixed(instr->OperandAt(i),
- descriptor.GetRegisterParameter(
- i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+ // Load register parameters.
+ int i = 0;
+ for (; i < descriptor.GetRegisterParameterCount(); i++) {
+ op = UseFixed(instr->OperandAt(
+ i + LCallWithDescriptor::kImplicitRegisterParameterCount),
+ descriptor.GetRegisterParameter(i));
ops.Add(op, zone());
}
+ // Push stack parameters.
+ for (; i < descriptor.GetParameterCount(); i++) {
+ op = UseAny(instr->OperandAt(
+ i + LCallWithDescriptor::kImplicitRegisterParameterCount));
+ AddInstruction(new (zone()) LPushArgument(op), instr);
+ }
LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
descriptor, ops, zone());
@@ -2180,26 +2170,6 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
}
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* obj =
- UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
- LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-
- DCHECK(instr->object()->representation().IsTagged());
- DCHECK(instr->key()->representation().IsTagged());
- DCHECK(instr->value()->representation().IsTagged());
-
- LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
- LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
- LStoreKeyedGeneric* result =
- new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
@@ -2276,20 +2246,6 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
}
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* obj =
- UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
- LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
- LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
- LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
- LStoreNamedGeneric* result =
- new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), r1);
diff --git a/deps/v8/src/crankshaft/arm/lithium-arm.h b/deps/v8/src/crankshaft/arm/lithium-arm.h
index 80fbe81a0f..abdfbddf4d 100644
--- a/deps/v8/src/crankshaft/arm/lithium-arm.h
+++ b/deps/v8/src/crankshaft/arm/lithium-arm.h
@@ -132,9 +132,7 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreKeyed) \
- V(StoreKeyedGeneric) \
V(StoreNamedField) \
- V(StoreNamedGeneric) \
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
@@ -2005,33 +2003,6 @@ class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
};
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
- public:
- LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
- LOperand* slot, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = value;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
- void PrintDataTo(StringStream* stream) override;
-
- Handle<Object> name() const { return hydrogen()->name(); }
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
public:
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
@@ -2068,34 +2039,6 @@ class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
};
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
- public:
- LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
- LOperand* value, LOperand* slot, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = key;
- inputs_[3] = value;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
- LOperand* value() { return inputs_[3]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
- void PrintDataTo(StringStream* stream) override;
-
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> {
public:
LTransitionElementsKind(LOperand* object,
diff --git a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
index ee3e54b604..f2cc4b447e 100644
--- a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
@@ -152,7 +152,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
Comment(";;; Prologue begin");
// Possibly allocate a local context.
- if (info()->scope()->num_heap_slots() > 0) {
+ if (info()->scope()->NeedsContext()) {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is in r1.
@@ -160,7 +160,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
if (info()->scope()->is_script_scope()) {
__ push(r1);
- __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+ __ Push(info()->scope()->scope_info());
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else {
@@ -2602,20 +2602,6 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
}
-template <class T>
-void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
- Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = ToRegister(instr->temp_slot());
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- __ Move(vector_register, vector);
- FeedbackVectorSlot slot = instr->hydrogen()->slot();
- int index = vector->GetIndex(slot);
- __ mov(slot_register, Operand(Smi::FromInt(index)));
-}
-
-
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->result()).is(r0));
@@ -3860,21 +3846,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
- EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-
- __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic =
- CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
if (instr->index()->IsConstantOperand()) {
@@ -4071,21 +4042,6 @@ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
}
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
- EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-
- Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), instr->language_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
-}
-
-
void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
class DeferredMaybeGrowElements final : public LDeferredCode {
public:
@@ -5063,7 +5019,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ CHECK(size <= kMaxRegularHeapObjectSize);
__ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
@@ -5165,7 +5121,7 @@ void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ CHECK(size <= kMaxRegularHeapObjectSize);
__ FastAllocate(size, result, scratch1, scratch2, flags);
} else {
Register size = ToRegister(instr->size());
diff --git a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h
index 533f4c8cca..26b7fb50a8 100644
--- a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h
@@ -311,8 +311,6 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorLoadICRegisters(T* instr);
- template <class T>
- void EmitVectorStoreICRegisters(T* instr);
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
Scope* const scope_;
diff --git a/deps/v8/src/crankshaft/arm64/lithium-arm64.cc b/deps/v8/src/crankshaft/arm64/lithium-arm64.cc
index 8067a6ae28..8a9ce4266d 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-arm64.cc
+++ b/deps/v8/src/crankshaft/arm64/lithium-arm64.cc
@@ -252,15 +252,6 @@ void LStoreContextSlot::PrintDataTo(StringStream* stream) {
}
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -271,15 +262,6 @@ void LStoreNamedField::PrintDataTo(StringStream* stream) {
}
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(String::cast(*name())->ToCString().get());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if string_compare(");
left()->PrintTo(stream);
@@ -726,7 +708,7 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
LInstruction* result = new (zone()) LPrologue();
- if (info_->scope()->num_heap_slots() > 0) {
+ if (info_->scope()->NeedsContext()) {
result = MarkAsCall(result, instr);
}
return result;
@@ -981,6 +963,9 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
CallInterfaceDescriptor descriptor = instr->descriptor();
+ DCHECK_EQ(descriptor.GetParameterCount() +
+ LCallWithDescriptor::kImplicitRegisterParameterCount,
+ instr->OperandCount());
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
@@ -989,15 +974,30 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
// Context
LOperand* op = UseFixed(instr->OperandAt(1), cp);
ops.Add(op, zone());
- // Other register parameters
- for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
- i < instr->OperandCount(); i++) {
- op =
- UseFixed(instr->OperandAt(i),
- descriptor.GetRegisterParameter(
- i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+ // Load register parameters.
+ int i = 0;
+ for (; i < descriptor.GetRegisterParameterCount(); i++) {
+ op = UseFixed(instr->OperandAt(
+ i + LCallWithDescriptor::kImplicitRegisterParameterCount),
+ descriptor.GetRegisterParameter(i));
ops.Add(op, zone());
}
+ // Push stack parameters.
+ if (i < descriptor.GetParameterCount()) {
+ int argc = descriptor.GetParameterCount() - i;
+ AddInstruction(new (zone()) LPreparePushArguments(argc), instr);
+ LPushArguments* push_args = new (zone()) LPushArguments(zone());
+ for (; i < descriptor.GetParameterCount(); i++) {
+ if (push_args->ShouldSplitPush()) {
+ AddInstruction(push_args, instr);
+ push_args = new (zone()) LPushArguments(zone());
+ }
+ op = UseRegisterAtStart(instr->OperandAt(
+ i + LCallWithDescriptor::kImplicitRegisterParameterCount));
+ push_args->AddArgument(op);
+ }
+ AddInstruction(push_args, instr);
+ }
LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(descriptor,
ops,
@@ -2209,26 +2209,6 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
}
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object =
- UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
- LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-
- DCHECK(instr->object()->representation().IsTagged());
- DCHECK(instr->key()->representation().IsTagged());
- DCHECK(instr->value()->representation().IsTagged());
-
- LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
- LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
- LStoreKeyedGeneric* result = new (zone())
- LStoreKeyedGeneric(context, object, key, value, slot, vector);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
// TODO(jbramley): It might be beneficial to allow value to be a constant in
// some cases. x64 makes use of this with FLAG_track_fields, for example.
@@ -2258,21 +2238,6 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
}
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object =
- UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
- LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-
- LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
- LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
- LStoreNamedGeneric* result =
- new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), x1);
diff --git a/deps/v8/src/crankshaft/arm64/lithium-arm64.h b/deps/v8/src/crankshaft/arm64/lithium-arm64.h
index 782da09546..9891f9ee49 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-arm64.h
+++ b/deps/v8/src/crankshaft/arm64/lithium-arm64.h
@@ -143,9 +143,7 @@ class LCodeGen;
V(StoreKeyedExternal) \
V(StoreKeyedFixed) \
V(StoreKeyedFixedDouble) \
- V(StoreKeyedGeneric) \
V(StoreNamedField) \
- V(StoreNamedGeneric) \
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
@@ -2336,34 +2334,6 @@ class LStoreKeyedFixedDouble final : public LStoreKeyed<1> {
};
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
- public:
- LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
- LOperand* value, LOperand* slot, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = key;
- inputs_[3] = value;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
- LOperand* value() { return inputs_[3]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
- void PrintDataTo(StringStream* stream) override;
-
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
class LStoreNamedField final : public LTemplateInstruction<0, 2, 2> {
public:
LStoreNamedField(LOperand* object, LOperand* value,
@@ -2390,33 +2360,6 @@ class LStoreNamedField final : public LTemplateInstruction<0, 2, 2> {
};
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
- public:
- LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
- LOperand* slot, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = value;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
- void PrintDataTo(StringStream* stream) override;
-
- Handle<Object> name() const { return hydrogen()->name(); }
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
public:
LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
diff --git a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
index ce5813b1e1..a4aa275b15 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
+++ b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
@@ -583,14 +583,14 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
Comment(";;; Prologue begin");
// Allocate a local context if needed.
- if (info()->scope()->num_heap_slots() > 0) {
+ if (info()->scope()->NeedsContext()) {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is in x1.
int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
if (info()->scope()->is_script_scope()) {
- __ Mov(x10, Operand(info()->scope()->GetScopeInfo(info()->isolate())));
+ __ Mov(x10, Operand(info()->scope()->scope_info()));
__ Push(x1, x10);
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
@@ -1403,7 +1403,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ CHECK(size <= kMaxRegularHeapObjectSize);
__ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
} else {
Register size = ToRegister32(instr->size());
@@ -1499,7 +1499,7 @@ void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ CHECK(size <= kMaxRegularHeapObjectSize);
__ FastAllocate(size, result, scratch1, scratch2, flags);
} else {
Register size = ToRegister(instr->size());
@@ -1973,7 +1973,16 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
generator.AfterCall();
}
- RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
+ HCallWithDescriptor* hinstr = instr->hydrogen();
+ RecordPushedArgumentsDelta(hinstr->argument_delta());
+
+ // HCallWithDescriptor instruction is translated to zero or more
+ // LPushArguments (they handle parameters passed on the stack) followed by
+ // a LCallWithDescriptor. Each LPushArguments instruction generated records
+ // the number of arguments pushed thus we need to offset them here.
+ // The |argument_delta()| used above "knows" only about JS parameters while
+ // we are dealing here with particular calling convention details.
+ RecordPushedArgumentsDelta(-hinstr->descriptor().GetStackParameterCount());
}
@@ -3021,20 +3030,6 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
}
-template <class T>
-void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
- Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = ToRegister(instr->temp_slot());
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- __ Mov(vector_register, vector);
- FeedbackVectorSlot slot = instr->hydrogen()->slot();
- int index = vector->GetIndex(slot);
- __ Mov(slot_register, Smi::FromInt(index));
-}
-
-
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->result()).Is(x0));
@@ -4933,21 +4928,6 @@ void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
}
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
- EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-
- Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), instr->language_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
class DeferredMaybeGrowElements final : public LDeferredCode {
public:
@@ -5131,21 +5111,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
- EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-
- __ Mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic =
- CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoStringAdd(LStringAdd* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->left()).Is(x1));
diff --git a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
index 2fc6f96d7a..ca04fa27c0 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
+++ b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
@@ -186,8 +186,6 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorLoadICRegisters(T* instr);
- template <class T>
- void EmitVectorStoreICRegisters(T* instr);
// Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to
diff --git a/deps/v8/src/crankshaft/compilation-phase.h b/deps/v8/src/crankshaft/compilation-phase.h
index 99e24c72c8..8d6468d4dc 100644
--- a/deps/v8/src/crankshaft/compilation-phase.h
+++ b/deps/v8/src/crankshaft/compilation-phase.h
@@ -6,8 +6,9 @@
#define V8_CRANKSHAFT_COMPILATION_PHASE_H_
#include "src/allocation.h"
-#include "src/compiler.h"
-#include "src/zone.h"
+#include "src/base/platform/elapsed-timer.h"
+#include "src/compilation-info.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/hydrogen-bce.cc b/deps/v8/src/crankshaft/hydrogen-bce.cc
index d00d8ce25c..7910c5bdae 100644
--- a/deps/v8/src/crankshaft/hydrogen-bce.cc
+++ b/deps/v8/src/crankshaft/hydrogen-bce.cc
@@ -307,24 +307,25 @@ static bool BoundsCheckKeyMatch(void* key1, void* key2) {
return k1->IndexBase() == k2->IndexBase() && k1->Length() == k2->Length();
}
-
BoundsCheckTable::BoundsCheckTable(Zone* zone)
- : ZoneHashMap(BoundsCheckKeyMatch, ZoneHashMap::kDefaultHashMapCapacity,
- ZoneAllocationPolicy(zone)) { }
-
+ : CustomMatcherZoneHashMap(BoundsCheckKeyMatch,
+ ZoneHashMap::kDefaultHashMapCapacity,
+ ZoneAllocationPolicy(zone)) {}
BoundsCheckBbData** BoundsCheckTable::LookupOrInsert(BoundsCheckKey* key,
Zone* zone) {
return reinterpret_cast<BoundsCheckBbData**>(
- &(ZoneHashMap::LookupOrInsert(key, key->Hash(),
- ZoneAllocationPolicy(zone))->value));
+ &(CustomMatcherZoneHashMap::LookupOrInsert(key, key->Hash(),
+ ZoneAllocationPolicy(zone))
+ ->value));
}
void BoundsCheckTable::Insert(BoundsCheckKey* key,
BoundsCheckBbData* data,
Zone* zone) {
- ZoneHashMap::LookupOrInsert(key, key->Hash(), ZoneAllocationPolicy(zone))
+ CustomMatcherZoneHashMap::LookupOrInsert(key, key->Hash(),
+ ZoneAllocationPolicy(zone))
->value = data;
}
diff --git a/deps/v8/src/crankshaft/hydrogen-bce.h b/deps/v8/src/crankshaft/hydrogen-bce.h
index e819ffc403..237fb953f2 100644
--- a/deps/v8/src/crankshaft/hydrogen-bce.h
+++ b/deps/v8/src/crankshaft/hydrogen-bce.h
@@ -13,7 +13,7 @@ namespace internal {
class BoundsCheckBbData;
class BoundsCheckKey;
-class BoundsCheckTable : private ZoneHashMap {
+class BoundsCheckTable : private CustomMatcherZoneHashMap {
public:
explicit BoundsCheckTable(Zone* zone);
diff --git a/deps/v8/src/crankshaft/hydrogen-flow-engine.h b/deps/v8/src/crankshaft/hydrogen-flow-engine.h
index 3a488ddc18..149c99bec5 100644
--- a/deps/v8/src/crankshaft/hydrogen-flow-engine.h
+++ b/deps/v8/src/crankshaft/hydrogen-flow-engine.h
@@ -5,9 +5,9 @@
#ifndef V8_CRANKSHAFT_HYDROGEN_FLOW_ENGINE_H_
#define V8_CRANKSHAFT_HYDROGEN_FLOW_ENGINE_H_
-#include "src/crankshaft/hydrogen.h"
#include "src/crankshaft/hydrogen-instructions.h"
-#include "src/zone.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/hydrogen-gvn.h b/deps/v8/src/crankshaft/hydrogen-gvn.h
index 9a8d40710f..5f11737dbc 100644
--- a/deps/v8/src/crankshaft/hydrogen-gvn.h
+++ b/deps/v8/src/crankshaft/hydrogen-gvn.h
@@ -7,9 +7,9 @@
#include <iosfwd>
-#include "src/crankshaft/hydrogen.h"
#include "src/crankshaft/hydrogen-instructions.h"
-#include "src/zone.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/hydrogen-instructions.cc b/deps/v8/src/crankshaft/hydrogen-instructions.cc
index 9fed9612c6..3a0aaa70e7 100644
--- a/deps/v8/src/crankshaft/hydrogen-instructions.cc
+++ b/deps/v8/src/crankshaft/hydrogen-instructions.cc
@@ -831,7 +831,6 @@ bool HInstruction::CanDeoptimize() {
case HValue::kStoreCodeEntry:
case HValue::kStoreKeyed:
case HValue::kStoreNamedField:
- case HValue::kStoreNamedGeneric:
case HValue::kStringCharCodeAt:
case HValue::kStringCharFromCode:
case HValue::kThisFunction:
@@ -881,7 +880,6 @@ bool HInstruction::CanDeoptimize() {
case HValue::kSimulate:
case HValue::kStackCheck:
case HValue::kStoreContextSlot:
- case HValue::kStoreKeyedGeneric:
case HValue::kStringAdd:
case HValue::kStringCompareAndBranch:
case HValue::kSub:
@@ -3039,14 +3037,6 @@ HValue* HLoadKeyedGeneric::Canonicalize() {
}
-std::ostream& HStoreNamedGeneric::PrintDataTo(
- std::ostream& os) const { // NOLINT
- Handle<String> n = Handle<String>::cast(name());
- return os << NameOf(object()) << "." << n->ToCString().get() << " = "
- << NameOf(value());
-}
-
-
std::ostream& HStoreNamedField::PrintDataTo(std::ostream& os) const { // NOLINT
os << NameOf(object()) << access_ << " = " << NameOf(value());
if (NeedsWriteBarrier()) os << " (write-barrier)";
@@ -3070,13 +3060,6 @@ std::ostream& HStoreKeyed::PrintDataTo(std::ostream& os) const { // NOLINT
}
-std::ostream& HStoreKeyedGeneric::PrintDataTo(
- std::ostream& os) const { // NOLINT
- return os << NameOf(object()) << "[" << NameOf(key())
- << "] = " << NameOf(value());
-}
-
-
std::ostream& HTransitionElementsKind::PrintDataTo(
std::ostream& os) const { // NOLINT
os << NameOf(object());
@@ -3236,8 +3219,8 @@ bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
int32_t new_dominator_size = dominator_size_constant + current_size_max_value;
// Since we clear the first word after folded memory, we cannot use the
- // whole Page::kMaxRegularHeapObjectSize memory.
- if (new_dominator_size > Page::kMaxRegularHeapObjectSize - kPointerSize) {
+ // whole kMaxRegularHeapObjectSize memory.
+ if (new_dominator_size > kMaxRegularHeapObjectSize - kPointerSize) {
if (FLAG_trace_allocation_folding) {
PrintF("#%d (%s) cannot fold into #%d (%s) due to size: %d\n",
id(), Mnemonic(), dominator_allocate->id(),
diff --git a/deps/v8/src/crankshaft/hydrogen-instructions.h b/deps/v8/src/crankshaft/hydrogen-instructions.h
index 41b1e1be8b..cfede98039 100644
--- a/deps/v8/src/crankshaft/hydrogen-instructions.h
+++ b/deps/v8/src/crankshaft/hydrogen-instructions.h
@@ -9,6 +9,7 @@
#include <iosfwd>
#include "src/allocation.h"
+#include "src/ast/ast.h"
#include "src/base/bits.h"
#include "src/bit-vector.h"
#include "src/code-stubs.h"
@@ -19,7 +20,7 @@
#include "src/globals.h"
#include "src/small-pointer-list.h"
#include "src/utils.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
@@ -36,6 +37,7 @@ class HStoreNamedField;
class HValue;
class LInstruction;
class LChunkBuilder;
+class SmallMapList;
#define HYDROGEN_ABSTRACT_INSTRUCTION_LIST(V) \
V(ArithmeticBinaryOperation) \
@@ -131,9 +133,7 @@ class LChunkBuilder;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreKeyed) \
- V(StoreKeyedGeneric) \
V(StoreNamedField) \
- V(StoreNamedGeneric) \
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
@@ -2176,7 +2176,8 @@ class HCallWithDescriptor final : public HInstruction {
} else {
int par_index = index - 2;
DCHECK(par_index < GetParameterCount());
- return RepresentationFromType(descriptor_.GetParameterType(par_index));
+ return RepresentationFromMachineType(
+ descriptor_.GetParameterType(par_index));
}
}
@@ -2215,7 +2216,7 @@ class HCallWithDescriptor final : public HInstruction {
TailCallMode syntactic_tail_call_mode,
TailCallMode tail_call_mode, Zone* zone)
: descriptor_(descriptor),
- values_(GetParameterCount() + 1, zone),
+ values_(GetParameterCount() + 1, zone), // +1 here is for target.
argument_count_(argument_count),
bit_field_(
TailCallModeField::encode(tail_call_mode) |
@@ -2237,7 +2238,7 @@ class HCallWithDescriptor final : public HInstruction {
}
int GetParameterCount() const {
- return descriptor_.GetRegisterParameterCount() + 1;
+ return descriptor_.GetParameterCount() + 1; // +1 here is for context.
}
void InternalSetOperandAt(int index, HValue* value) final {
@@ -6326,52 +6327,6 @@ class HStoreNamedField final : public HTemplateInstruction<3> {
uint32_t bit_field_;
};
-class HStoreNamedGeneric final : public HTemplateInstruction<3> {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P6(HStoreNamedGeneric, HValue*,
- Handle<Name>, HValue*,
- LanguageMode,
- Handle<TypeFeedbackVector>,
- FeedbackVectorSlot);
- HValue* object() const { return OperandAt(0); }
- HValue* value() const { return OperandAt(1); }
- HValue* context() const { return OperandAt(2); }
- Handle<Name> name() const { return name_; }
- LanguageMode language_mode() const { return language_mode_; }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- FeedbackVectorSlot slot() const { return slot_; }
- Handle<TypeFeedbackVector> feedback_vector() const {
- return feedback_vector_;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric)
-
- private:
- HStoreNamedGeneric(HValue* context, HValue* object, Handle<Name> name,
- HValue* value, LanguageMode language_mode,
- Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
- : name_(name),
- feedback_vector_(vector),
- slot_(slot),
- language_mode_(language_mode) {
- SetOperandAt(0, object);
- SetOperandAt(1, value);
- SetOperandAt(2, context);
- SetAllSideEffects();
- }
-
- Handle<Name> name_;
- Handle<TypeFeedbackVector> feedback_vector_;
- FeedbackVectorSlot slot_;
- LanguageMode language_mode_;
-};
-
class HStoreKeyed final : public HTemplateInstruction<4>,
public ArrayInstructionInterface {
public:
@@ -6554,50 +6509,6 @@ class HStoreKeyed final : public HTemplateInstruction<4>,
HValue* dominator_;
};
-class HStoreKeyedGeneric final : public HTemplateInstruction<4> {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P6(HStoreKeyedGeneric, HValue*,
- HValue*, HValue*, LanguageMode,
- Handle<TypeFeedbackVector>,
- FeedbackVectorSlot);
-
- HValue* object() const { return OperandAt(0); }
- HValue* key() const { return OperandAt(1); }
- HValue* value() const { return OperandAt(2); }
- HValue* context() const { return OperandAt(3); }
- LanguageMode language_mode() const { return language_mode_; }
-
- Representation RequiredInputRepresentation(int index) override {
- // tagged[tagged] = tagged
- return Representation::Tagged();
- }
-
- FeedbackVectorSlot slot() const { return slot_; }
- Handle<TypeFeedbackVector> feedback_vector() const {
- return feedback_vector_;
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric)
-
- private:
- HStoreKeyedGeneric(HValue* context, HValue* object, HValue* key,
- HValue* value, LanguageMode language_mode,
- Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
- : feedback_vector_(vector), slot_(slot), language_mode_(language_mode) {
- SetOperandAt(0, object);
- SetOperandAt(1, key);
- SetOperandAt(2, value);
- SetOperandAt(3, context);
- SetAllSideEffects();
- }
-
- Handle<TypeFeedbackVector> feedback_vector_;
- FeedbackVectorSlot slot_;
- LanguageMode language_mode_;
-};
-
class HTransitionElementsKind final : public HTemplateInstruction<2> {
public:
inline static HTransitionElementsKind* New(Isolate* isolate, Zone* zone,
diff --git a/deps/v8/src/crankshaft/hydrogen-osr.h b/deps/v8/src/crankshaft/hydrogen-osr.h
index 0610b4284f..3bd9b6edad 100644
--- a/deps/v8/src/crankshaft/hydrogen-osr.h
+++ b/deps/v8/src/crankshaft/hydrogen-osr.h
@@ -6,7 +6,7 @@
#define V8_CRANKSHAFT_HYDROGEN_OSR_H_
#include "src/crankshaft/hydrogen.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/hydrogen-types.cc b/deps/v8/src/crankshaft/hydrogen-types.cc
index 20d50d897c..684e6ad09c 100644
--- a/deps/v8/src/crankshaft/hydrogen-types.cc
+++ b/deps/v8/src/crankshaft/hydrogen-types.cc
@@ -12,17 +12,17 @@ namespace v8 {
namespace internal {
// static
-HType HType::FromType(Type* type) {
- if (Type::Any()->Is(type)) return HType::Any();
+HType HType::FromType(AstType* type) {
+ if (AstType::Any()->Is(type)) return HType::Any();
if (!type->IsInhabited()) return HType::None();
- if (type->Is(Type::SignedSmall())) return HType::Smi();
- if (type->Is(Type::Number())) return HType::TaggedNumber();
- if (type->Is(Type::Null())) return HType::Null();
- if (type->Is(Type::String())) return HType::String();
- if (type->Is(Type::Boolean())) return HType::Boolean();
- if (type->Is(Type::Undefined())) return HType::Undefined();
- if (type->Is(Type::Object())) return HType::JSObject();
- if (type->Is(Type::DetectableReceiver())) return HType::JSReceiver();
+ if (type->Is(AstType::SignedSmall())) return HType::Smi();
+ if (type->Is(AstType::Number())) return HType::TaggedNumber();
+ if (type->Is(AstType::Null())) return HType::Null();
+ if (type->Is(AstType::String())) return HType::String();
+ if (type->Is(AstType::Boolean())) return HType::Boolean();
+ if (type->Is(AstType::Undefined())) return HType::Undefined();
+ if (type->Is(AstType::Object())) return HType::JSObject();
+ if (type->Is(AstType::DetectableReceiver())) return HType::JSReceiver();
return HType::Tagged();
}
diff --git a/deps/v8/src/crankshaft/hydrogen-types.h b/deps/v8/src/crankshaft/hydrogen-types.h
index 0690ece34f..3e68872924 100644
--- a/deps/v8/src/crankshaft/hydrogen-types.h
+++ b/deps/v8/src/crankshaft/hydrogen-types.h
@@ -8,8 +8,8 @@
#include <climits>
#include <iosfwd>
+#include "src/ast/ast-types.h"
#include "src/base/macros.h"
-#include "src/types.h"
namespace v8 {
namespace internal {
@@ -64,7 +64,7 @@ class HType final {
HTYPE_LIST(DECLARE_IS_TYPE)
#undef DECLARE_IS_TYPE
- static HType FromType(Type* type) WARN_UNUSED_RESULT;
+ static HType FromType(AstType* type) WARN_UNUSED_RESULT;
static HType FromFieldType(Handle<FieldType> type,
Zone* temp_zone) WARN_UNUSED_RESULT;
static HType FromValue(Handle<Object> value) WARN_UNUSED_RESULT;
diff --git a/deps/v8/src/crankshaft/hydrogen.cc b/deps/v8/src/crankshaft/hydrogen.cc
index f40337e645..8d7b4797c5 100644
--- a/deps/v8/src/crankshaft/hydrogen.cc
+++ b/deps/v8/src/crankshaft/hydrogen.cc
@@ -9,6 +9,7 @@
#include "src/allocation-site-scopes.h"
#include "src/ast/ast-numbering.h"
+#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/crankshaft/hydrogen-bce.h"
@@ -42,7 +43,6 @@
// GetRootConstructor
#include "src/ic/ic-inl.h"
#include "src/isolate-inl.h"
-#include "src/parsing/parser.h"
#include "src/runtime/runtime.h"
#if V8_TARGET_ARCH_IA32
@@ -75,7 +75,9 @@ const auto GetRegConfig = RegisterConfiguration::Crankshaft;
class HOptimizedGraphBuilderWithPositions : public HOptimizedGraphBuilder {
public:
explicit HOptimizedGraphBuilderWithPositions(CompilationInfo* info)
- : HOptimizedGraphBuilder(info) {}
+ : HOptimizedGraphBuilder(info, true) {
+ SetSourcePosition(info->shared_info()->start_position());
+ }
#define DEF_VISIT(type) \
void Visit##type(type* node) override { \
@@ -178,9 +180,10 @@ HCompilationJob::Status HCompilationJob::PrepareJobImpl() {
}
HOptimizedGraphBuilder* graph_builder =
- (info()->is_tracking_positions() || FLAG_trace_ic)
+ (FLAG_hydrogen_track_positions || isolate()->is_profiling() ||
+ FLAG_trace_ic)
? new (info()->zone()) HOptimizedGraphBuilderWithPositions(info())
- : new (info()->zone()) HOptimizedGraphBuilder(info());
+ : new (info()->zone()) HOptimizedGraphBuilder(info(), false);
// Type-check the function.
AstTyper(info()->isolate(), info()->zone(), info()->closure(),
@@ -1362,7 +1365,7 @@ HGraph* HGraphBuilder::CreateGraph() {
DCHECK(!FLAG_minimal);
graph_ = new (zone()) HGraph(info_, descriptor_);
if (FLAG_hydrogen_stats) isolate()->GetHStatistics()->Initialize(info_);
- if (!info_->IsStub() && info_->is_tracking_positions()) {
+ if (!info_->IsStub() && is_tracking_positions()) {
TraceInlinedFunction(info_->shared_info(), SourcePosition::Unknown());
}
CompilationPhase phase("H_Block building", info_);
@@ -1374,7 +1377,7 @@ HGraph* HGraphBuilder::CreateGraph() {
int HGraphBuilder::TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
SourcePosition position) {
- DCHECK(info_->is_tracking_positions());
+ DCHECK(is_tracking_positions());
int inline_id = static_cast<int>(graph()->inlined_function_infos().size());
HInlinedFunctionInfo info(shared->start_position());
@@ -1645,48 +1648,6 @@ HValue* HGraphBuilder::BuildCopyElementsOnWrite(HValue* object,
}
-void HGraphBuilder::BuildTransitionElementsKind(HValue* object,
- HValue* map,
- ElementsKind from_kind,
- ElementsKind to_kind,
- bool is_jsarray) {
- DCHECK(!IsFastHoleyElementsKind(from_kind) ||
- IsFastHoleyElementsKind(to_kind));
-
- if (AllocationSite::GetMode(from_kind, to_kind) == TRACK_ALLOCATION_SITE) {
- Add<HTrapAllocationMemento>(object);
- }
-
- if (!IsSimpleMapChangeTransition(from_kind, to_kind)) {
- HInstruction* elements = AddLoadElements(object);
-
- HInstruction* empty_fixed_array = Add<HConstant>(
- isolate()->factory()->empty_fixed_array());
-
- IfBuilder if_builder(this);
-
- if_builder.IfNot<HCompareObjectEqAndBranch>(elements, empty_fixed_array);
-
- if_builder.Then();
-
- HInstruction* elements_length = AddLoadFixedArrayLength(elements);
-
- HInstruction* array_length =
- is_jsarray
- ? Add<HLoadNamedField>(object, nullptr,
- HObjectAccess::ForArrayLength(from_kind))
- : elements_length;
-
- BuildGrowElementsCapacity(object, elements, from_kind, to_kind,
- array_length, elements_length);
-
- if_builder.End();
- }
-
- Add<HStoreNamedField>(object, HObjectAccess::ForMap(), map);
-}
-
-
void HGraphBuilder::BuildJSObjectCheck(HValue* receiver,
int bit_field_mask) {
// Check that the object isn't a smi.
@@ -2129,8 +2090,7 @@ HValue* HGraphBuilder::BuildRegExpConstructResult(HValue* length,
return result;
}
-
-HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
+HValue* HGraphBuilder::BuildNumberToString(HValue* object, AstType* type) {
NoObservableSideEffectsScope scope(this);
// Convert constant numbers at compile time.
@@ -2180,7 +2140,7 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
}
if_objectissmi.Else();
{
- if (type->Is(Type::SignedSmall())) {
+ if (type->Is(AstType::SignedSmall())) {
if_objectissmi.Deopt(DeoptimizeReason::kExpectedSmi);
} else {
// Check if the object is a heap number.
@@ -2236,7 +2196,7 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
}
if_objectisnumber.Else();
{
- if (type->Is(Type::Number())) {
+ if (type->Is(AstType::Number())) {
if_objectisnumber.Deopt(DeoptimizeReason::kExpectedHeapNumber);
}
}
@@ -2411,7 +2371,7 @@ HValue* HGraphBuilder::BuildAddStringLengths(HValue* left_length,
HValue* length = AddUncasted<HAdd>(left_length, right_length);
// Check that length <= kMaxLength <=> length < MaxLength + 1.
HValue* max_length = Add<HConstant>(String::kMaxLength + 1);
- if (top_info()->IsStub()) {
+ if (top_info()->IsStub() || !isolate()->IsStringLengthOverflowIntact()) {
// This is a mitigation for crbug.com/627934; the real fix
// will be to migrate the StringAddStub to TurboFan one day.
IfBuilder if_invalid(this);
@@ -2423,6 +2383,7 @@ HValue* HGraphBuilder::BuildAddStringLengths(HValue* left_length,
}
if_invalid.End();
} else {
+ graph()->MarkDependsOnStringLengthOverflow();
Add<HBoundsCheck>(length, max_length);
}
return length;
@@ -2652,7 +2613,7 @@ HValue* HGraphBuilder::BuildUncheckedStringAdd(
IfBuilder if_size(this);
if_size.If<HCompareNumericAndBranch>(
- size, Add<HConstant>(Page::kMaxRegularHeapObjectSize), Token::LT);
+ size, Add<HConstant>(kMaxRegularHeapObjectSize), Token::LT);
if_size.Then();
{
// Allocate the string object. HAllocate does not care whether we pass
@@ -3075,9 +3036,10 @@ HValue* HGraphBuilder::BuildGrowElementsCapacity(HValue* object,
ElementsKind new_kind,
HValue* length,
HValue* new_capacity) {
- Add<HBoundsCheck>(new_capacity, Add<HConstant>(
- (Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) >>
- ElementsKindToShiftSize(new_kind)));
+ Add<HBoundsCheck>(
+ new_capacity,
+ Add<HConstant>((kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) >>
+ ElementsKindToShiftSize(new_kind)));
HValue* new_elements =
BuildAllocateAndInitializeArray(new_kind, new_capacity);
@@ -3268,93 +3230,6 @@ void HGraphBuilder::BuildCopyElements(HValue* from_elements,
AddIncrementCounter(counters->inlined_copied_elements());
}
-
-HValue* HGraphBuilder::BuildCloneShallowArrayCow(HValue* boilerplate,
- HValue* allocation_site,
- AllocationSiteMode mode,
- ElementsKind kind) {
- HAllocate* array = AllocateJSArrayObject(mode);
-
- HValue* map = AddLoadMap(boilerplate);
- HValue* elements = AddLoadElements(boilerplate);
- HValue* length = AddLoadArrayLength(boilerplate, kind);
-
- BuildJSArrayHeader(array,
- map,
- elements,
- mode,
- FAST_ELEMENTS,
- allocation_site,
- length);
- return array;
-}
-
-
-HValue* HGraphBuilder::BuildCloneShallowArrayEmpty(HValue* boilerplate,
- HValue* allocation_site,
- AllocationSiteMode mode) {
- HAllocate* array = AllocateJSArrayObject(mode);
-
- HValue* map = AddLoadMap(boilerplate);
-
- BuildJSArrayHeader(array,
- map,
- NULL, // set elements to empty fixed array
- mode,
- FAST_ELEMENTS,
- allocation_site,
- graph()->GetConstant0());
- return array;
-}
-
-
-HValue* HGraphBuilder::BuildCloneShallowArrayNonEmpty(HValue* boilerplate,
- HValue* allocation_site,
- AllocationSiteMode mode,
- ElementsKind kind) {
- HValue* boilerplate_elements = AddLoadElements(boilerplate);
- HValue* capacity = AddLoadFixedArrayLength(boilerplate_elements);
-
- // Generate size calculation code here in order to make it dominate
- // the JSArray allocation.
- HValue* elements_size = BuildCalculateElementsSize(kind, capacity);
-
- // Create empty JSArray object for now, store elimination should remove
- // redundant initialization of elements and length fields and at the same
- // time the object will be fully prepared for GC if it happens during
- // elements allocation.
- HValue* result = BuildCloneShallowArrayEmpty(
- boilerplate, allocation_site, mode);
-
- HAllocate* elements = BuildAllocateElements(kind, elements_size);
-
- Add<HStoreNamedField>(result, HObjectAccess::ForElementsPointer(), elements);
-
- // The allocation for the cloned array above causes register pressure on
- // machines with low register counts. Force a reload of the boilerplate
- // elements here to free up a register for the allocation to avoid unnecessary
- // spillage.
- boilerplate_elements = AddLoadElements(boilerplate);
- boilerplate_elements->SetFlag(HValue::kCantBeReplaced);
-
- // Copy the elements array header.
- for (int i = 0; i < FixedArrayBase::kHeaderSize; i += kPointerSize) {
- HObjectAccess access = HObjectAccess::ForFixedArrayHeader(i);
- Add<HStoreNamedField>(
- elements, access,
- Add<HLoadNamedField>(boilerplate_elements, nullptr, access));
- }
-
- // And the result of the length
- HValue* length = AddLoadArrayLength(boilerplate, kind);
- Add<HStoreNamedField>(result, HObjectAccess::ForArrayLength(kind), length);
-
- BuildCopyElements(boilerplate_elements, kind, elements,
- kind, length, NULL);
- return result;
-}
-
-
void HGraphBuilder::BuildCreateAllocationMemento(
HValue* previous_object,
HValue* previous_object_size,
@@ -3402,16 +3277,6 @@ HInstruction* HGraphBuilder::BuildGetNativeContext(HValue* closure) {
}
-HInstruction* HGraphBuilder::BuildGetScriptContext(int context_index) {
- HValue* native_context = BuildGetNativeContext();
- HValue* script_context_table = Add<HLoadNamedField>(
- native_context, nullptr,
- HObjectAccess::ForContextSlot(Context::SCRIPT_CONTEXT_TABLE_INDEX));
- return Add<HLoadNamedField>(script_context_table, nullptr,
- HObjectAccess::ForScriptContext(context_index));
-}
-
-
HValue* HGraphBuilder::BuildGetParentContext(HValue* depth, int depth_value) {
HValue* script_context = context();
if (depth != NULL) {
@@ -3504,8 +3369,9 @@ HValue* HGraphBuilder::AddLoadJSBuiltin(int context_index) {
return Add<HLoadNamedField>(native_context, nullptr, function_access);
}
-HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
- : HGraphBuilder(info, CallInterfaceDescriptor()),
+HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info,
+ bool track_positions)
+ : HGraphBuilder(info, CallInterfaceDescriptor(), track_positions),
function_state_(NULL),
initial_function_state_(this, info, NORMAL_RETURN, 0,
TailCallMode::kAllow),
@@ -3520,9 +3386,6 @@ HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
// to know it's the initial state.
function_state_ = &initial_function_state_;
InitializeAstVisitor(info->isolate());
- if (top_info()->is_tracking_positions()) {
- SetSourcePosition(info->shared_info()->start_position());
- }
}
@@ -3622,6 +3485,7 @@ HGraph::HGraph(CompilationInfo* info, CallInterfaceDescriptor descriptor)
allow_code_motion_(false),
use_optimistic_licm_(false),
depends_on_empty_array_proto_elements_(false),
+ depends_on_string_length_overflow_(false),
type_change_checksum_(0),
maximum_environment_size_(0),
no_side_effects_scope_count_(0),
@@ -3629,8 +3493,8 @@ HGraph::HGraph(CompilationInfo* info, CallInterfaceDescriptor descriptor)
inlined_function_infos_(info->zone()) {
if (info->IsStub()) {
// For stubs, explicitly add the context to the environment.
- start_environment_ = new (zone_)
- HEnvironment(zone_, descriptor.GetRegisterParameterCount() + 1);
+ start_environment_ =
+ new (zone_) HEnvironment(zone_, descriptor.GetParameterCount() + 1);
} else {
start_environment_ =
new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
@@ -4088,7 +3952,7 @@ FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
// Push on the state stack.
owner->set_function_state(this);
- if (compilation_info_->is_tracking_positions()) {
+ if (owner->is_tracking_positions()) {
outer_source_position_ = owner->source_position();
owner->EnterInlinedSource(
info->shared_info()->start_position(),
@@ -4102,7 +3966,7 @@ FunctionState::~FunctionState() {
delete test_context_;
owner_->set_function_state(outer_);
- if (compilation_info_->is_tracking_positions()) {
+ if (owner_->is_tracking_positions()) {
owner_->set_source_position(outer_source_position_);
owner_->EnterInlinedSource(
outer_->compilation_info()->shared_info()->start_position(),
@@ -4651,9 +4515,7 @@ void HOptimizedGraphBuilder::SetUpScope(DeclarationScope* scope) {
environment()->Bind(scope->arguments(), arguments_object);
}
- int rest_index;
- Variable* rest = scope->rest_parameter(&rest_index);
- if (rest) {
+ if (scope->rest_parameter() != nullptr) {
return Bailout(kRestParameter);
}
@@ -4704,7 +4566,7 @@ void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
}
AddInstruction(function);
// Allocate a block context and store it to the stack frame.
- HValue* scope_info = Add<HConstant>(scope->GetScopeInfo(isolate()));
+ HValue* scope_info = Add<HConstant>(scope->scope_info());
Add<HPushArguments>(scope_info, function);
HInstruction* inner_context = Add<HCallRuntime>(
Runtime::FunctionForId(Runtime::kPushBlockContext), 2);
@@ -5001,7 +4863,7 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
CHECK_ALIVE(VisitForValue(stmt->tag()));
Add<HSimulate>(stmt->EntryId());
HValue* tag_value = Top();
- Type* tag_type = bounds_.get(stmt->tag()).lower;
+ AstType* tag_type = bounds_.get(stmt->tag()).lower;
// 1. Build all the tests, with dangling true branches
BailoutId default_id = BailoutId::None();
@@ -5018,8 +4880,8 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
if (current_block() == NULL) return Bailout(kUnsupportedSwitchStatement);
HValue* label_value = Pop();
- Type* label_type = bounds_.get(clause->label()).lower;
- Type* combined_type = clause->compare_type();
+ AstType* label_type = bounds_.get(clause->label()).lower;
+ AstType* combined_type = clause->compare_type();
HControlInstruction* compare = BuildCompareInstruction(
Token::EQ_STRICT, tag_value, label_value, tag_type, label_type,
combined_type,
@@ -5634,7 +5496,6 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
DCHECK(current_block()->HasPredecessor());
Variable* variable = expr->var();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
if (IsLexicalVariableMode(variable->mode())) {
// TODO(rossberg): should this be an DCHECK?
@@ -6218,7 +6079,7 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatible(
PropertyAccessInfo* info) {
if (!CanInlinePropertyAccess(map_)) return false;
- // Currently only handle Type::Number as a polymorphic case.
+ // Currently only handle AstType::Number as a polymorphic case.
// TODO(verwaest): Support monomorphic handling of numbers with a HCheckNumber
// instruction.
if (IsNumberType()) return false;
@@ -6929,9 +6790,16 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
HObjectAccess::ForContextSlot(Context::EXTENSION_INDEX));
Handle<TypeFeedbackVector> vector =
handle(current_feedback_vector(), isolate());
- HStoreNamedGeneric* instr =
- Add<HStoreNamedGeneric>(global_object, var->name(), value,
- function_language_mode(), vector, slot);
+ HValue* name = Add<HConstant>(var->name());
+ HValue* vector_value = Add<HConstant>(vector);
+ HValue* slot_value = Add<HConstant>(vector->GetIndex(slot));
+ Callable callable = CodeFactory::StoreICInOptimizedCode(
+ isolate(), function_language_mode());
+ HValue* stub = Add<HConstant>(callable.code());
+ HValue* values[] = {context(), global_object, name,
+ value, slot_value, vector_value};
+ HCallWithDescriptor* instr = Add<HCallWithDescriptor>(
+ stub, 0, callable.descriptor(), ArrayVector(values));
USE(instr);
DCHECK(instr->HasObservableSideEffects());
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -6958,7 +6826,6 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
CHECK_ALIVE(VisitForValue(operation));
switch (var->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED:
HandleGlobalVariableAssignment(var, Top(), expr->AssignmentSlot(),
expr->AssignmentId());
@@ -6966,9 +6833,6 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
- if (var->mode() == CONST_LEGACY) {
- return Bailout(kUnsupportedConstCompoundAssignment);
- }
if (var->mode() == CONST) {
return Bailout(kNonInitializerAssignmentToConst);
}
@@ -6998,9 +6862,7 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
mode = HStoreContextSlot::kCheckDeoptimize;
break;
case CONST:
- return Bailout(kNonInitializerAssignmentToConst);
- case CONST_LEGACY:
- if (is_strict(function_language_mode())) {
+ if (var->throw_on_const_assignment(function_language_mode())) {
return Bailout(kNonInitializerAssignmentToConst);
} else {
return ast_context()->ReturnValue(Pop());
@@ -7072,33 +6934,17 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
if (var->mode() == CONST) {
if (expr->op() != Token::INIT) {
- return Bailout(kNonInitializerAssignmentToConst);
- }
- } else if (var->mode() == CONST_LEGACY) {
- if (expr->op() != Token::INIT) {
- if (is_strict(function_language_mode())) {
+ if (var->throw_on_const_assignment(function_language_mode())) {
return Bailout(kNonInitializerAssignmentToConst);
} else {
CHECK_ALIVE(VisitForValue(expr->value()));
return ast_context()->ReturnValue(Pop());
}
}
-
- // TODO(adamk): Is this required? Legacy const variables are always
- // initialized before use.
- if (var->IsStackAllocated()) {
- // We insert a use of the old value to detect unsupported uses of const
- // variables (e.g. initialization inside a loop).
- HValue* old_value = environment()->Lookup(var);
- Add<HUseConst>(old_value);
- }
}
- if (var->is_arguments()) return Bailout(kAssignmentToArguments);
-
// Handle the assignment.
switch (var->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED:
CHECK_ALIVE(VisitForValue(expr->value()));
HandleGlobalVariableAssignment(var, Top(), expr->AssignmentSlot(),
@@ -7147,10 +6993,10 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
mode = HStoreContextSlot::kCheckDeoptimize;
break;
case CONST:
- // This case is checked statically so no need to
- // perform checks here
- UNREACHABLE();
- case CONST_LEGACY:
+ // If we reached this point, the only possibility
+ // is a sloppy assignment to a function name.
+ DCHECK(function_language_mode() == SLOPPY &&
+ !var->throw_on_const_assignment(SLOPPY));
return ast_context()->ReturnValue(Pop());
default:
mode = HStoreContextSlot::kNoCheck;
@@ -7200,7 +7046,7 @@ void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
CHECK_ALIVE(VisitForValue(expr->exception()));
HValue* value = environment()->Pop();
- if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
+ if (!is_tracking_positions()) SetSourcePosition(expr->position());
Add<HPushArguments>(value);
Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kThrow), 1);
Add<HSimulate>(expr->id());
@@ -7274,20 +7120,30 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
Handle<TypeFeedbackVector> vector =
handle(current_feedback_vector(), isolate());
+ HValue* key = Add<HConstant>(name);
+ HValue* vector_value = Add<HConstant>(vector);
+ HValue* slot_value = Add<HConstant>(vector->GetIndex(slot));
+ HValue* values[] = {context(), object, key,
+ value, slot_value, vector_value};
+
if (current_feedback_vector()->GetKind(slot) ==
FeedbackVectorSlotKind::KEYED_STORE_IC) {
// It's possible that a keyed store of a constant string was converted
// to a named store. Here, at the last minute, we need to make sure to
// use a generic Keyed Store if we are using the type vector, because
// it has to share information with full code.
- HConstant* key = Add<HConstant>(name);
- HStoreKeyedGeneric* result = New<HStoreKeyedGeneric>(
- object, key, value, function_language_mode(), vector, slot);
+ Callable callable = CodeFactory::KeyedStoreICInOptimizedCode(
+ isolate(), function_language_mode());
+ HValue* stub = Add<HConstant>(callable.code());
+ HCallWithDescriptor* result = New<HCallWithDescriptor>(
+ stub, 0, callable.descriptor(), ArrayVector(values));
return result;
}
-
- HStoreNamedGeneric* result = New<HStoreNamedGeneric>(
- object, name, value, function_language_mode(), vector, slot);
+ Callable callable = CodeFactory::StoreICInOptimizedCode(
+ isolate(), function_language_mode());
+ HValue* stub = Add<HConstant>(callable.code());
+ HCallWithDescriptor* result = New<HCallWithDescriptor>(
+ stub, 0, callable.descriptor(), ArrayVector(values));
return result;
}
}
@@ -7303,8 +7159,16 @@ HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
New<HLoadKeyedGeneric>(object, key, vector, slot);
return result;
} else {
- HStoreKeyedGeneric* result = New<HStoreKeyedGeneric>(
- object, key, value, function_language_mode(), vector, slot);
+ HValue* vector_value = Add<HConstant>(vector);
+ HValue* slot_value = Add<HConstant>(vector->GetIndex(slot));
+ HValue* values[] = {context(), object, key,
+ value, slot_value, vector_value};
+
+ Callable callable = CodeFactory::KeyedStoreICInOptimizedCode(
+ isolate(), function_language_mode());
+ HValue* stub = Add<HConstant>(callable.code());
+ HCallWithDescriptor* result = New<HCallWithDescriptor>(
+ stub, 0, callable.descriptor(), ArrayVector(values));
return result;
}
}
@@ -7843,7 +7707,7 @@ HValue* HOptimizedGraphBuilder::BuildNamedAccess(
}
HValue* checked_object;
- // Type::Number() is only supported by polymorphic load/call handling.
+ // AstType::Number() is only supported by polymorphic load/call handling.
DCHECK(!info.IsNumberType());
BuildCheckHeapObject(object);
if (AreStringTypes(maps)) {
@@ -8409,14 +8273,12 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
return false;
}
- if (target_info.scope()->num_heap_slots() > 0) {
+ if (target_info.scope()->NeedsContext()) {
TraceInline(target, caller, "target has context-allocated variables");
return false;
}
- int rest_index;
- Variable* rest = target_info.scope()->rest_parameter(&rest_index);
- if (rest) {
+ if (target_info.scope()->rest_parameter() != nullptr) {
TraceInline(target, caller, "target uses rest parameters");
return false;
}
@@ -8490,7 +8352,7 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
.Run();
int inlining_id = 0;
- if (top_info()->is_tracking_positions()) {
+ if (is_tracking_positions()) {
inlining_id = TraceInlinedFunction(target_shared, source_position());
}
@@ -8539,7 +8401,7 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
return_id, target, context, arguments_count, function,
function_state()->inlining_kind(), function->scope()->arguments(),
arguments_object, syntactic_tail_call_mode);
- if (top_info()->is_tracking_positions()) {
+ if (is_tracking_positions()) {
enter_inlined->set_inlining_id(inlining_id);
}
function_state()->set_entry(enter_inlined);
@@ -9375,7 +9237,7 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(
HValue* api_function_address = Add<HConstant>(ExternalReference(ref));
HValue* op_vals[] = {context(), Add<HConstant>(function), call_data, holder,
- api_function_address, nullptr};
+ api_function_address};
HInstruction* call = nullptr;
CHECK(argc <= CallApiCallbackStub::kArgMax);
@@ -9386,16 +9248,14 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(
HConstant* code_value = Add<HConstant>(code);
call = New<HCallWithDescriptor>(
code_value, argc + 1, stub.GetCallInterfaceDescriptor(),
- Vector<HValue*>(op_vals, arraysize(op_vals) - 1),
- syntactic_tail_call_mode);
+ Vector<HValue*>(op_vals, arraysize(op_vals)), syntactic_tail_call_mode);
} else {
CallApiCallbackStub stub(isolate(), argc, call_data_undefined, false);
Handle<Code> code = stub.GetCode();
HConstant* code_value = Add<HConstant>(code);
call = New<HCallWithDescriptor>(
code_value, argc + 1, stub.GetCallInterfaceDescriptor(),
- Vector<HValue*>(op_vals, arraysize(op_vals) - 1),
- syntactic_tail_call_mode);
+ Vector<HValue*>(op_vals, arraysize(op_vals)), syntactic_tail_call_mode);
Drop(1); // Drop function.
}
@@ -9461,8 +9321,6 @@ bool HOptimizedGraphBuilder::TryIndirectCall(Call* expr) {
case kFunctionApply: {
// For .apply, only the pattern f.apply(receiver, arguments)
// is supported.
- if (current_info()->scope()->arguments() == NULL) return false;
-
if (!CanBeFunctionApplyArguments(expr)) return false;
BuildFunctionApply(expr);
@@ -9482,6 +9340,10 @@ void HOptimizedGraphBuilder::BuildFunctionApply(Call* expr) {
HValue* function = Pop(); // f
Drop(1); // apply
+ // Make sure the arguments object is live.
+ VariableProxy* arg_two = args->at(1)->AsVariableProxy();
+ LookupAndMakeLive(arg_two->var());
+
Handle<Map> function_map = expr->GetReceiverTypes()->first();
HValue* checked_function = AddCheckMap(function, function_map);
@@ -9727,8 +9589,9 @@ bool HOptimizedGraphBuilder::CanBeFunctionApplyArguments(Call* expr) {
if (args->length() != 2) return false;
VariableProxy* arg_two = args->at(1)->AsVariableProxy();
if (arg_two == NULL || !arg_two->var()->IsStackAllocated()) return false;
- HValue* arg_two_value = LookupAndMakeLive(arg_two->var());
+ HValue* arg_two_value = environment()->Lookup(arg_two->var());
if (!arg_two_value->CheckFlag(HValue::kIsArguments)) return false;
+ DCHECK_NOT_NULL(current_info()->scope()->arguments());
return true;
}
@@ -9737,7 +9600,7 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
- if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
+ if (!is_tracking_positions()) SetSourcePosition(expr->position());
Expression* callee = expr->expression();
int argument_count = expr->arguments()->length() + 1; // Plus receiver.
HInstruction* call = NULL;
@@ -9975,7 +9838,7 @@ bool HOptimizedGraphBuilder::TryInlineArrayCall(Expression* expression,
HValue* elements_size = BuildCalculateElementsSize(kind, capacity);
// Bail out for large objects.
- HValue* max_size = Add<HConstant>(Page::kMaxRegularHeapObjectSize);
+ HValue* max_size = Add<HConstant>(kMaxRegularHeapObjectSize);
Add<HBoundsCheck>(elements_size, max_size);
// Allocate (dealing with failure appropriately).
@@ -10019,7 +9882,7 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
- if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
+ if (!is_tracking_positions()) SetSourcePosition(expr->position());
int argument_count = expr->arguments()->length() + 1; // Plus constructor.
Factory* factory = isolate()->factory();
@@ -10419,6 +10282,8 @@ void HOptimizedGraphBuilder::GenerateTypedArrayInitialize(
HInstruction* length = AddUncasted<HDiv>(byte_length,
Add<HConstant>(static_cast<int32_t>(element_size)));
+ // Callers (in typedarray.js) ensure that length <= %_MaxSmi().
+ length = AddUncasted<HForceRepresentation>(length, Representation::Smi());
Add<HStoreNamedField>(obj,
HObjectAccess::ForJSTypedArrayLength(),
@@ -10602,7 +10467,7 @@ void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
return ast_context()->ReturnInstruction(instr, expr->id());
} else if (proxy != NULL) {
Variable* var = proxy->var();
- if (var->IsUnallocatedOrGlobalSlot()) {
+ if (var->IsUnallocated()) {
Bailout(kDeleteWithGlobalVariable);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global variables is false. 'this' is not really
@@ -10680,13 +10545,12 @@ void HOptimizedGraphBuilder::VisitNot(UnaryOperation* expr) {
if (join != NULL) return ast_context()->ReturnValue(Pop());
}
-
-static Representation RepresentationFor(Type* type) {
+static Representation RepresentationFor(AstType* type) {
DisallowHeapAllocation no_allocation;
- if (type->Is(Type::None())) return Representation::None();
- if (type->Is(Type::SignedSmall())) return Representation::Smi();
- if (type->Is(Type::Signed32())) return Representation::Integer32();
- if (type->Is(Type::Number())) return Representation::Double();
+ if (type->Is(AstType::None())) return Representation::None();
+ if (type->Is(AstType::SignedSmall())) return Representation::Smi();
+ if (type->Is(AstType::Signed32())) return Representation::Integer32();
+ if (type->Is(AstType::Number())) return Representation::Double();
return Representation::Tagged();
}
@@ -10745,7 +10609,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
- if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
+ if (!is_tracking_positions()) SetSourcePosition(expr->position());
Expression* target = expr->expression();
VariableProxy* proxy = target->AsVariableProxy();
Property* prop = target->AsProperty();
@@ -10763,9 +10627,6 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
if (proxy != NULL) {
Variable* var = proxy->var();
- if (var->mode() == CONST_LEGACY) {
- return Bailout(kUnsupportedCountOperationWithConst);
- }
if (var->mode() == CONST) {
return Bailout(kNonInitializerAssignmentToConst);
}
@@ -10778,7 +10639,6 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
Push(after);
switch (var->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED:
HandleGlobalVariableAssignment(var, after, expr->CountSlot(),
expr->AssignmentId());
@@ -10939,27 +10799,24 @@ bool CanBeZero(HValue* right) {
return true;
}
-
-HValue* HGraphBuilder::EnforceNumberType(HValue* number,
- Type* expected) {
- if (expected->Is(Type::SignedSmall())) {
+HValue* HGraphBuilder::EnforceNumberType(HValue* number, AstType* expected) {
+ if (expected->Is(AstType::SignedSmall())) {
return AddUncasted<HForceRepresentation>(number, Representation::Smi());
}
- if (expected->Is(Type::Signed32())) {
+ if (expected->Is(AstType::Signed32())) {
return AddUncasted<HForceRepresentation>(number,
Representation::Integer32());
}
return number;
}
-
-HValue* HGraphBuilder::TruncateToNumber(HValue* value, Type** expected) {
+HValue* HGraphBuilder::TruncateToNumber(HValue* value, AstType** expected) {
if (value->IsConstant()) {
HConstant* constant = HConstant::cast(value);
Maybe<HConstant*> number =
constant->CopyToTruncatedNumber(isolate(), zone());
if (number.IsJust()) {
- *expected = Type::Number();
+ *expected = AstType::Number();
return AddInstruction(number.FromJust());
}
}
@@ -10969,24 +10826,24 @@ HValue* HGraphBuilder::TruncateToNumber(HValue* value, Type** expected) {
// pushes with a NoObservableSideEffectsScope.
NoObservableSideEffectsScope no_effects(this);
- Type* expected_type = *expected;
+ AstType* expected_type = *expected;
// Separate the number type from the rest.
- Type* expected_obj =
- Type::Intersect(expected_type, Type::NonNumber(), zone());
- Type* expected_number =
- Type::Intersect(expected_type, Type::Number(), zone());
+ AstType* expected_obj =
+ AstType::Intersect(expected_type, AstType::NonNumber(), zone());
+ AstType* expected_number =
+ AstType::Intersect(expected_type, AstType::Number(), zone());
// We expect to get a number.
- // (We need to check first, since Type::None->Is(Type::Any()) == true.
- if (expected_obj->Is(Type::None())) {
- DCHECK(!expected_number->Is(Type::None()));
+ // (We need to check first, since AstType::None->Is(AstType::Any()) == true.
+ if (expected_obj->Is(AstType::None())) {
+ DCHECK(!expected_number->Is(AstType::None()));
return value;
}
- if (expected_obj->Is(Type::Undefined())) {
+ if (expected_obj->Is(AstType::Undefined())) {
// This is already done by HChange.
- *expected = Type::Union(expected_number, Type::Number(), zone());
+ *expected = AstType::Union(expected_number, AstType::Number(), zone());
return value;
}
@@ -10999,9 +10856,9 @@ HValue* HOptimizedGraphBuilder::BuildBinaryOperation(
HValue* left,
HValue* right,
PushBeforeSimulateBehavior push_sim_result) {
- Type* left_type = bounds_.get(expr->left()).lower;
- Type* right_type = bounds_.get(expr->right()).lower;
- Type* result_type = bounds_.get(expr).lower;
+ AstType* left_type = bounds_.get(expr->left()).lower;
+ AstType* right_type = bounds_.get(expr->right()).lower;
+ AstType* result_type = bounds_.get(expr).lower;
Maybe<int> fixed_right_arg = expr->fixed_right_arg();
Handle<AllocationSite> allocation_site = expr->allocation_site();
@@ -11027,12 +10884,10 @@ HValue* HOptimizedGraphBuilder::BuildBinaryOperation(
return result;
}
-HValue* HGraphBuilder::BuildBinaryOperation(Token::Value op, HValue* left,
- HValue* right, Type* left_type,
- Type* right_type, Type* result_type,
- Maybe<int> fixed_right_arg,
- HAllocationMode allocation_mode,
- BailoutId opt_id) {
+HValue* HGraphBuilder::BuildBinaryOperation(
+ Token::Value op, HValue* left, HValue* right, AstType* left_type,
+ AstType* right_type, AstType* result_type, Maybe<int> fixed_right_arg,
+ HAllocationMode allocation_mode, BailoutId opt_id) {
bool maybe_string_add = false;
if (op == Token::ADD) {
// If we are adding constant string with something for which we don't have
@@ -11040,18 +10895,18 @@ HValue* HGraphBuilder::BuildBinaryOperation(Token::Value op, HValue* left,
// generate deopt instructions.
if (!left_type->IsInhabited() && right->IsConstant() &&
HConstant::cast(right)->HasStringValue()) {
- left_type = Type::String();
+ left_type = AstType::String();
}
if (!right_type->IsInhabited() && left->IsConstant() &&
HConstant::cast(left)->HasStringValue()) {
- right_type = Type::String();
+ right_type = AstType::String();
}
- maybe_string_add = (left_type->Maybe(Type::String()) ||
- left_type->Maybe(Type::Receiver()) ||
- right_type->Maybe(Type::String()) ||
- right_type->Maybe(Type::Receiver()));
+ maybe_string_add = (left_type->Maybe(AstType::String()) ||
+ left_type->Maybe(AstType::Receiver()) ||
+ right_type->Maybe(AstType::String()) ||
+ right_type->Maybe(AstType::Receiver()));
}
Representation left_rep = RepresentationFor(left_type);
@@ -11061,7 +10916,7 @@ HValue* HGraphBuilder::BuildBinaryOperation(Token::Value op, HValue* left,
Add<HDeoptimize>(
DeoptimizeReason::kInsufficientTypeFeedbackForLHSOfBinaryOperation,
Deoptimizer::SOFT);
- left_type = Type::Any();
+ left_type = AstType::Any();
left_rep = RepresentationFor(left_type);
maybe_string_add = op == Token::ADD;
}
@@ -11070,7 +10925,7 @@ HValue* HGraphBuilder::BuildBinaryOperation(Token::Value op, HValue* left,
Add<HDeoptimize>(
DeoptimizeReason::kInsufficientTypeFeedbackForRHSOfBinaryOperation,
Deoptimizer::SOFT);
- right_type = Type::Any();
+ right_type = AstType::Any();
right_rep = RepresentationFor(right_type);
maybe_string_add = op == Token::ADD;
}
@@ -11082,34 +10937,34 @@ HValue* HGraphBuilder::BuildBinaryOperation(Token::Value op, HValue* left,
// Special case for string addition here.
if (op == Token::ADD &&
- (left_type->Is(Type::String()) || right_type->Is(Type::String()))) {
+ (left_type->Is(AstType::String()) || right_type->Is(AstType::String()))) {
// Validate type feedback for left argument.
- if (left_type->Is(Type::String())) {
+ if (left_type->Is(AstType::String())) {
left = BuildCheckString(left);
}
// Validate type feedback for right argument.
- if (right_type->Is(Type::String())) {
+ if (right_type->Is(AstType::String())) {
right = BuildCheckString(right);
}
// Convert left argument as necessary.
- if (left_type->Is(Type::Number())) {
- DCHECK(right_type->Is(Type::String()));
+ if (left_type->Is(AstType::Number())) {
+ DCHECK(right_type->Is(AstType::String()));
left = BuildNumberToString(left, left_type);
- } else if (!left_type->Is(Type::String())) {
- DCHECK(right_type->Is(Type::String()));
+ } else if (!left_type->Is(AstType::String())) {
+ DCHECK(right_type->Is(AstType::String()));
return AddUncasted<HStringAdd>(
left, right, allocation_mode.GetPretenureMode(),
STRING_ADD_CONVERT_LEFT, allocation_mode.feedback_site());
}
// Convert right argument as necessary.
- if (right_type->Is(Type::Number())) {
- DCHECK(left_type->Is(Type::String()));
+ if (right_type->Is(AstType::Number())) {
+ DCHECK(left_type->Is(AstType::String()));
right = BuildNumberToString(right, right_type);
- } else if (!right_type->Is(Type::String())) {
- DCHECK(left_type->Is(Type::String()));
+ } else if (!right_type->Is(AstType::String())) {
+ DCHECK(left_type->Is(AstType::String()));
return AddUncasted<HStringAdd>(
left, right, allocation_mode.GetPretenureMode(),
STRING_ADD_CONVERT_RIGHT, allocation_mode.feedback_site());
@@ -11267,8 +11122,8 @@ HValue* HGraphBuilder::BuildBinaryOperation(Token::Value op, HValue* left,
break;
case Token::BIT_OR: {
HValue *operand, *shift_amount;
- if (left_type->Is(Type::Signed32()) &&
- right_type->Is(Type::Signed32()) &&
+ if (left_type->Is(AstType::Signed32()) &&
+ right_type->Is(AstType::Signed32()) &&
MatchRotateRight(left, right, &operand, &shift_amount)) {
instr = AddUncasted<HRor>(operand, shift_amount);
} else {
@@ -11470,7 +11325,7 @@ void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
BuildBinaryOperation(expr, left, right,
ast_context()->IsEffect() ? NO_PUSH_BEFORE_SIMULATE
: PUSH_BEFORE_SIMULATE);
- if (top_info()->is_tracking_positions() && result->IsBinaryOperation()) {
+ if (is_tracking_positions() && result->IsBinaryOperation()) {
HBinaryOperation::cast(result)->SetOperandPositions(
zone(),
ScriptPositionToSourcePosition(expr->left()->position()),
@@ -11512,7 +11367,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
- if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
+ if (!is_tracking_positions()) SetSourcePosition(expr->position());
// Check for a few fast cases. The AST visiting behavior must be in sync
// with the full codegen: We don't push both left and right values onto
@@ -11540,9 +11395,9 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
return ast_context()->ReturnControl(instr, expr->id());
}
- Type* left_type = bounds_.get(expr->left()).lower;
- Type* right_type = bounds_.get(expr->right()).lower;
- Type* combined_type = expr->combined_type();
+ AstType* left_type = bounds_.get(expr->left()).lower;
+ AstType* right_type = bounds_.get(expr->right()).lower;
+ AstType* combined_type = expr->combined_type();
CHECK_ALIVE(VisitForValue(expr->left()));
CHECK_ALIVE(VisitForValue(expr->right()));
@@ -11627,10 +11482,9 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
return ast_context()->ReturnControl(compare, expr->id());
}
-
HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
- Token::Value op, HValue* left, HValue* right, Type* left_type,
- Type* right_type, Type* combined_type, SourcePosition left_position,
+ Token::Value op, HValue* left, HValue* right, AstType* left_type,
+ AstType* right_type, AstType* combined_type, SourcePosition left_position,
SourcePosition right_position, PushBeforeSimulateBehavior push_sim_result,
BailoutId bailout_id) {
// Cases handled below depend on collected type feedback. They should
@@ -11640,14 +11494,14 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
DeoptimizeReason::
kInsufficientTypeFeedbackForCombinedTypeOfBinaryOperation,
Deoptimizer::SOFT);
- combined_type = left_type = right_type = Type::Any();
+ combined_type = left_type = right_type = AstType::Any();
}
Representation left_rep = RepresentationFor(left_type);
Representation right_rep = RepresentationFor(right_type);
Representation combined_rep = RepresentationFor(combined_type);
- if (combined_type->Is(Type::Receiver())) {
+ if (combined_type->Is(AstType::Receiver())) {
if (Token::IsEqualityOp(op)) {
// HCompareObjectEqAndBranch can only deal with object, so
// exclude numbers.
@@ -11669,7 +11523,7 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
AddCheckMap(operand_to_check, map);
HCompareObjectEqAndBranch* result =
New<HCompareObjectEqAndBranch>(left, right);
- if (top_info()->is_tracking_positions()) {
+ if (is_tracking_positions()) {
result->set_operand_position(zone(), 0, left_position);
result->set_operand_position(zone(), 1, right_position);
}
@@ -11731,7 +11585,7 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
Bailout(kUnsupportedNonPrimitiveCompare);
return NULL;
}
- } else if (combined_type->Is(Type::InternalizedString()) &&
+ } else if (combined_type->Is(AstType::InternalizedString()) &&
Token::IsEqualityOp(op)) {
// If we have a constant argument, it should be consistent with the type
// feedback (otherwise we fail assertions in HCompareObjectEqAndBranch).
@@ -11752,7 +11606,7 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
HCompareObjectEqAndBranch* result =
New<HCompareObjectEqAndBranch>(left, right);
return result;
- } else if (combined_type->Is(Type::String())) {
+ } else if (combined_type->Is(AstType::String())) {
BuildCheckHeapObject(left);
Add<HCheckInstanceType>(left, HCheckInstanceType::IS_STRING);
BuildCheckHeapObject(right);
@@ -11760,7 +11614,7 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
HStringCompareAndBranch* result =
New<HStringCompareAndBranch>(left, right, op);
return result;
- } else if (combined_type->Is(Type::Boolean())) {
+ } else if (combined_type->Is(AstType::Boolean())) {
AddCheckMap(left, isolate()->factory()->boolean_map());
AddCheckMap(right, isolate()->factory()->boolean_map());
if (Token::IsEqualityOp(op)) {
@@ -11812,7 +11666,7 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
HCompareNumericAndBranch* result =
New<HCompareNumericAndBranch>(left, right, op);
result->set_observed_input_representation(left_rep, right_rep);
- if (top_info()->is_tracking_positions()) {
+ if (is_tracking_positions()) {
result->SetOperandPositions(zone(), left_position, right_position);
}
return result;
@@ -11828,7 +11682,7 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
DCHECK(expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT);
- if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
+ if (!is_tracking_positions()) SetSourcePosition(expr->position());
CHECK_ALIVE(VisitForValue(sub_expr));
HValue* value = Pop();
HControlInstruction* instr;
@@ -11899,7 +11753,7 @@ HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
Add<HAllocate>(object_size_constant, type, pretenure_flag, instance_type,
graph()->GetConstant0(), top_site);
- // If allocation folding reaches Page::kMaxRegularHeapObjectSize the
+ // If allocation folding reaches kMaxRegularHeapObjectSize the
// elements array may not get folded into the object. Hence, we set the
// elements pointer to empty fixed array and let store elimination remove
// this store in the folding case.
@@ -12196,7 +12050,6 @@ void HOptimizedGraphBuilder::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
@@ -12236,7 +12089,6 @@ void HOptimizedGraphBuilder::VisitFunctionDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
@@ -12475,27 +12327,18 @@ void HOptimizedGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
}
-// Fast support for string.charAt(n) and string[n].
-void HOptimizedGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* char_code = Pop();
- HInstruction* result = NewUncasted<HStringCharFromCode>(char_code);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
// Fast support for SubString.
void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
DCHECK_EQ(3, call->arguments()->length());
CHECK_ALIVE(VisitExpressions(call->arguments()));
- PushArgumentsFromEnvironment(call->arguments()->length());
Callable callable = CodeFactory::SubString(isolate());
HValue* stub = Add<HConstant>(callable.code());
- HValue* values[] = {context()};
- HInstruction* result =
- New<HCallWithDescriptor>(stub, call->arguments()->length(),
- callable.descriptor(), ArrayVector(values));
+ HValue* to = Pop();
+ HValue* from = Pop();
+ HValue* string = Pop();
+ HValue* values[] = {context(), string, from, to};
+ HInstruction* result = New<HCallWithDescriptor>(
+ stub, 0, callable.descriptor(), ArrayVector(values));
result->set_type(HType::String());
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -12517,13 +12360,16 @@ void HOptimizedGraphBuilder::GenerateNewObject(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
DCHECK_EQ(4, call->arguments()->length());
CHECK_ALIVE(VisitExpressions(call->arguments()));
- PushArgumentsFromEnvironment(call->arguments()->length());
Callable callable = CodeFactory::RegExpExec(isolate());
+ HValue* last_match_info = Pop();
+ HValue* index = Pop();
+ HValue* subject = Pop();
+ HValue* regexp_object = Pop();
HValue* stub = Add<HConstant>(callable.code());
- HValue* values[] = {context()};
- HInstruction* result =
- New<HCallWithDescriptor>(stub, call->arguments()->length(),
- callable.descriptor(), ArrayVector(values));
+ HValue* values[] = {context(), regexp_object, subject, index,
+ last_match_info};
+ HInstruction* result = New<HCallWithDescriptor>(
+ stub, 0, callable.descriptor(), ArrayVector(values));
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -12567,7 +12413,7 @@ void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
DCHECK_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* number = Pop();
- HValue* result = BuildNumberToString(number, Type::Any());
+ HValue* result = BuildNumberToString(number, AstType::Any());
return ast_context()->ReturnValue(result);
}
@@ -13249,8 +13095,7 @@ void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
PrintIndent();
std::ostringstream os;
os << "0 " << uses << " " << NameOf(instruction) << " " << *instruction;
- if (graph->info()->is_tracking_positions() &&
- instruction->has_position() && instruction->position().raw() != 0) {
+ if (instruction->has_position() && instruction->position().raw() != 0) {
const SourcePosition pos = instruction->position();
os << " pos:";
if (pos.inlining_id() != 0) os << pos.inlining_id() << "_";
diff --git a/deps/v8/src/crankshaft/hydrogen.h b/deps/v8/src/crankshaft/hydrogen.h
index 931dd01dcb..d2f1637d11 100644
--- a/deps/v8/src/crankshaft/hydrogen.h
+++ b/deps/v8/src/crankshaft/hydrogen.h
@@ -8,13 +8,15 @@
#include "src/accessors.h"
#include "src/allocation.h"
#include "src/ast/ast-type-bounds.h"
+#include "src/ast/scopes.h"
#include "src/bailout-reason.h"
+#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/crankshaft/compilation-phase.h"
#include "src/crankshaft/hydrogen-instructions.h"
#include "src/globals.h"
#include "src/parsing/parse-info.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
@@ -30,12 +32,11 @@ class HTracer;
class LAllocator;
class LChunk;
class LiveRange;
-class Scope;
class HCompilationJob final : public CompilationJob {
public:
explicit HCompilationJob(Handle<JSFunction> function)
- : CompilationJob(&info_, "Crankshaft"),
+ : CompilationJob(function->GetIsolate(), &info_, "Crankshaft"),
zone_(function->GetIsolate()->allocator()),
parse_info_(&zone_, function),
info_(&parse_info_, function),
@@ -439,6 +440,13 @@ class HGraph final : public ZoneObject {
return depends_on_empty_array_proto_elements_;
}
+ void MarkDependsOnStringLengthOverflow() {
+ if (depends_on_string_length_overflow_) return;
+ info()->dependencies()->AssumePropertyCell(
+ isolate()->factory()->string_length_protector());
+ depends_on_string_length_overflow_ = true;
+ }
+
bool has_uint32_instructions() {
DCHECK(uint32_instructions_ == NULL || !uint32_instructions_->is_empty());
return uint32_instructions_ != NULL;
@@ -514,6 +522,7 @@ class HGraph final : public ZoneObject {
bool allow_code_motion_;
bool use_optimistic_licm_;
bool depends_on_empty_array_proto_elements_;
+ bool depends_on_string_length_overflow_;
int type_change_checksum_;
int maximum_environment_size_;
int no_side_effects_scope_count_;
@@ -1056,14 +1065,16 @@ class HAllocationMode final BASE_EMBEDDED {
class HGraphBuilder {
public:
explicit HGraphBuilder(CompilationInfo* info,
- CallInterfaceDescriptor descriptor)
+ CallInterfaceDescriptor descriptor,
+ bool track_positions)
: info_(info),
descriptor_(descriptor),
graph_(NULL),
current_block_(NULL),
scope_(info->scope()),
position_(SourcePosition::Unknown()),
- start_position_(0) {}
+ start_position_(0),
+ track_positions_(track_positions) {}
virtual ~HGraphBuilder() {}
Scope* scope() const { return scope_; }
@@ -1395,7 +1406,7 @@ class HGraphBuilder {
ElementsKind to_kind,
bool is_jsarray);
- HValue* BuildNumberToString(HValue* object, Type* type);
+ HValue* BuildNumberToString(HValue* object, AstType* type);
HValue* BuildToNumber(HValue* input);
HValue* BuildToObject(HValue* receiver);
@@ -1499,8 +1510,8 @@ class HGraphBuilder {
HValue** shift_amount);
HValue* BuildBinaryOperation(Token::Value op, HValue* left, HValue* right,
- Type* left_type, Type* right_type,
- Type* result_type, Maybe<int> fixed_right_arg,
+ AstType* left_type, AstType* right_type,
+ AstType* result_type, Maybe<int> fixed_right_arg,
HAllocationMode allocation_mode,
BailoutId opt_id = BailoutId::None());
@@ -1513,8 +1524,8 @@ class HGraphBuilder {
HValue* AddLoadJSBuiltin(int context_index);
- HValue* EnforceNumberType(HValue* number, Type* expected);
- HValue* TruncateToNumber(HValue* value, Type** expected);
+ HValue* EnforceNumberType(HValue* number, AstType* expected);
+ HValue* TruncateToNumber(HValue* value, AstType** expected);
void FinishExitWithHardDeoptimization(DeoptimizeReason reason);
@@ -1833,20 +1844,6 @@ class HGraphBuilder {
HValue* length,
HValue* capacity);
- HValue* BuildCloneShallowArrayCow(HValue* boilerplate,
- HValue* allocation_site,
- AllocationSiteMode mode,
- ElementsKind kind);
-
- HValue* BuildCloneShallowArrayEmpty(HValue* boilerplate,
- HValue* allocation_site,
- AllocationSiteMode mode);
-
- HValue* BuildCloneShallowArrayNonEmpty(HValue* boilerplate,
- HValue* allocation_site,
- AllocationSiteMode mode,
- ElementsKind kind);
-
HValue* BuildElementIndexHash(HValue* index);
void BuildCreateAllocationMemento(HValue* previous_object,
@@ -1859,7 +1856,7 @@ class HGraphBuilder {
HInstruction* BuildGetNativeContext(HValue* closure);
HInstruction* BuildGetNativeContext();
- HInstruction* BuildGetScriptContext(int context_index);
+
// Builds a loop version if |depth| is specified or unrolls the loop to
// |depth_value| iterations otherwise.
HValue* BuildGetParentContext(HValue* depth, int depth_value);
@@ -1879,7 +1876,7 @@ class HGraphBuilder {
}
void EnterInlinedSource(int start_position, int id) {
- if (top_info()->is_tracking_positions()) {
+ if (is_tracking_positions()) {
start_position_ = start_position;
position_.set_inlining_id(id);
}
@@ -1900,6 +1897,8 @@ class HGraphBuilder {
SourcePosition source_position() { return position_; }
void set_source_position(SourcePosition position) { position_ = position; }
+ bool is_tracking_positions() { return track_positions_; }
+
int TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
SourcePosition position);
@@ -1925,6 +1924,7 @@ class HGraphBuilder {
Scope* scope_;
SourcePosition position_;
int start_position_;
+ bool track_positions_;
};
template <>
@@ -2122,7 +2122,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
BreakAndContinueScope* next_;
};
- explicit HOptimizedGraphBuilder(CompilationInfo* info);
+ explicit HOptimizedGraphBuilder(CompilationInfo* info, bool track_positions);
bool BuildGraph() override;
@@ -2214,7 +2214,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
F(IsJSProxy) \
F(Call) \
F(NewObject) \
- F(StringCharFromCode) \
F(ToInteger) \
F(ToObject) \
F(ToString) \
@@ -2305,11 +2304,9 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
int index,
HEnvironment* env) {
if (!FLAG_analyze_environment_liveness) return false;
- // |this| and |arguments| are always live; zapping parameters isn't
- // safe because function.arguments can inspect them at any time.
- return !var->is_this() &&
- !var->is_arguments() &&
- env->is_local_index(index);
+ // Zapping parameters isn't safe because function.arguments can inspect them
+ // at any time.
+ return env->is_local_index(index);
}
void BindIfLive(Variable* var, HValue* value) {
HEnvironment* env = environment();
@@ -2706,8 +2703,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
};
HControlInstruction* BuildCompareInstruction(
- Token::Value op, HValue* left, HValue* right, Type* left_type,
- Type* right_type, Type* combined_type, SourcePosition left_position,
+ Token::Value op, HValue* left, HValue* right, AstType* left_type,
+ AstType* right_type, AstType* combined_type, SourcePosition left_position,
SourcePosition right_position, PushBeforeSimulateBehavior push_sim_result,
BailoutId bailout_id);
diff --git a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
index 2512e2be01..6c121dd271 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
@@ -164,7 +164,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
Comment(";;; Prologue begin");
// Possibly allocate a local context.
- if (info_->scope()->num_heap_slots() > 0) {
+ if (info_->scope()->NeedsContext()) {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is still in edi.
@@ -172,7 +172,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
if (info()->scope()->is_script_scope()) {
__ push(edi);
- __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+ __ Push(info()->scope()->scope_info());
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else {
@@ -2397,20 +2397,6 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
}
-template <class T>
-void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
- Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = ToRegister(instr->temp_slot());
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- __ mov(vector_register, vector);
- FeedbackVectorSlot slot = instr->hydrogen()->slot();
- int index = vector->GetIndex(slot);
- __ mov(slot_register, Immediate(Smi::FromInt(index)));
-}
-
-
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(ToRegister(instr->result()).is(eax));
@@ -3703,21 +3689,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
- EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-
- __ mov(StoreDescriptor::NameRegister(), instr->name());
- Handle<Code> ic =
- CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
if (instr->index()->IsConstantOperand()) {
@@ -3877,21 +3848,6 @@ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
}
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
- EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-
- Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), instr->language_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
Register temp = ToRegister(instr->temp());
@@ -4831,7 +4787,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ CHECK(size <= kMaxRegularHeapObjectSize);
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
@@ -4874,7 +4830,7 @@ void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ CHECK(size <= kMaxRegularHeapObjectSize);
__ FastAllocate(size, result, temp, flags);
} else {
Register size = ToRegister(instr->size());
diff --git a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h
index 38a493dbb4..8e16d9c5fc 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h
@@ -294,8 +294,6 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorLoadICRegisters(T* instr);
- template <class T>
- void EmitVectorStoreICRegisters(T* instr);
void EmitReturn(LReturn* instr);
diff --git a/deps/v8/src/crankshaft/ia32/lithium-ia32.cc b/deps/v8/src/crankshaft/ia32/lithium-ia32.cc
index 67942241e6..e6077cc4ad 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-ia32.cc
+++ b/deps/v8/src/crankshaft/ia32/lithium-ia32.cc
@@ -351,15 +351,6 @@ void LStoreNamedField::PrintDataTo(StringStream* stream) {
}
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(String::cast(*name())->ToCString().get());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -392,15 +383,6 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) {
}
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(" %p -> %p", *original_map(), *transitioned_map());
@@ -910,7 +892,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
LInstruction* result = new (zone()) LPrologue();
- if (info_->scope()->num_heap_slots() > 0) {
+ if (info_->scope()->NeedsContext()) {
result = MarkAsCall(result, instr);
}
return result;
@@ -1054,6 +1036,10 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
CallInterfaceDescriptor descriptor = instr->descriptor();
+ DCHECK_EQ(descriptor.GetParameterCount() +
+ LCallWithDescriptor::kImplicitRegisterParameterCount,
+ instr->OperandCount());
+
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
// Target
@@ -1061,15 +1047,20 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
// Context
LOperand* op = UseFixed(instr->OperandAt(1), esi);
ops.Add(op, zone());
- // Other register parameters
- for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
- i < instr->OperandCount(); i++) {
- op =
- UseFixed(instr->OperandAt(i),
- descriptor.GetRegisterParameter(
- i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+ // Load register parameters.
+ int i = 0;
+ for (; i < descriptor.GetRegisterParameterCount(); i++) {
+ op = UseFixed(instr->OperandAt(
+ i + LCallWithDescriptor::kImplicitRegisterParameterCount),
+ descriptor.GetRegisterParameter(i));
ops.Add(op, zone());
}
+ // Push stack parameters.
+ for (; i < descriptor.GetParameterCount(); i++) {
+ op = UseAny(instr->OperandAt(
+ i + LCallWithDescriptor::kImplicitRegisterParameterCount));
+ AddInstruction(new (zone()) LPushArgument(op), instr);
+ }
LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
descriptor, ops, zone());
@@ -2211,26 +2202,6 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
}
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object =
- UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
- LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-
- DCHECK(instr->object()->representation().IsTagged());
- DCHECK(instr->key()->representation().IsTagged());
- DCHECK(instr->value()->representation().IsTagged());
-
- LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
- LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
- LStoreKeyedGeneric* result = new (zone())
- LStoreKeyedGeneric(context, object, key, value, slot, vector);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
@@ -2332,20 +2303,6 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
}
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object =
- UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
- LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
- LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
- LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
- LStoreNamedGeneric* result =
- new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* left = UseFixed(instr->left(), edx);
diff --git a/deps/v8/src/crankshaft/ia32/lithium-ia32.h b/deps/v8/src/crankshaft/ia32/lithium-ia32.h
index e525341ca0..816d8fd2c1 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-ia32.h
+++ b/deps/v8/src/crankshaft/ia32/lithium-ia32.h
@@ -136,9 +136,7 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreKeyed) \
- V(StoreKeyedGeneric) \
V(StoreNamedField) \
- V(StoreNamedGeneric) \
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
@@ -2022,32 +2020,6 @@ class LStoreNamedField final : public LTemplateInstruction<0, 2, 2> {
};
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
- public:
- LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
- LOperand* slot, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = value;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
- void PrintDataTo(StringStream* stream) override;
- Handle<Object> name() const { return hydrogen()->name(); }
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
public:
LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val,
@@ -2078,34 +2050,6 @@ class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
};
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
- public:
- LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
- LOperand* value, LOperand* slot, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = key;
- inputs_[3] = value;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
- LOperand* value() { return inputs_[3]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
- void PrintDataTo(StringStream* stream) override;
-
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 2> {
public:
LTransitionElementsKind(LOperand* object,
diff --git a/deps/v8/src/crankshaft/lithium-allocator.h b/deps/v8/src/crankshaft/lithium-allocator.h
index ce0e56560b..d28ad7f9e7 100644
--- a/deps/v8/src/crankshaft/lithium-allocator.h
+++ b/deps/v8/src/crankshaft/lithium-allocator.h
@@ -9,7 +9,7 @@
#include "src/base/compiler-specific.h"
#include "src/crankshaft/compilation-phase.h"
#include "src/crankshaft/lithium.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/lithium-codegen.cc b/deps/v8/src/crankshaft/lithium-codegen.cc
index 5041de6451..decc2a596f 100644
--- a/deps/v8/src/crankshaft/lithium-codegen.cc
+++ b/deps/v8/src/crankshaft/lithium-codegen.cc
@@ -66,6 +66,8 @@ LCodeGenBase::LCodeGenBase(LChunk* chunk, MacroAssembler* assembler,
source_position_table_builder_(info->zone(),
info->SourcePositionRecordingMode()) {}
+Isolate* LCodeGenBase::isolate() const { return info_->isolate(); }
+
bool LCodeGenBase::GenerateBody() {
DCHECK(is_generating());
bool emit_instructions = true;
diff --git a/deps/v8/src/crankshaft/lithium-codegen.h b/deps/v8/src/crankshaft/lithium-codegen.h
index fbf96924ee..c6bf447543 100644
--- a/deps/v8/src/crankshaft/lithium-codegen.h
+++ b/deps/v8/src/crankshaft/lithium-codegen.h
@@ -6,13 +6,13 @@
#define V8_CRANKSHAFT_LITHIUM_CODEGEN_H_
#include "src/bailout-reason.h"
-#include "src/compiler.h"
#include "src/deoptimizer.h"
#include "src/source-position-table.h"
namespace v8 {
namespace internal {
+class CompilationInfo;
class HGraph;
class LChunk;
class LEnvironment;
@@ -29,7 +29,7 @@ class LCodeGenBase BASE_EMBEDDED {
// Simple accessors.
MacroAssembler* masm() const { return masm_; }
CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
+ Isolate* isolate() const;
Factory* factory() const { return isolate()->factory(); }
Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/crankshaft/lithium.h b/deps/v8/src/crankshaft/lithium.h
index a2c028330b..d04bd5674a 100644
--- a/deps/v8/src/crankshaft/lithium.h
+++ b/deps/v8/src/crankshaft/lithium.h
@@ -12,7 +12,7 @@
#include "src/crankshaft/compilation-phase.h"
#include "src/crankshaft/hydrogen.h"
#include "src/safepoint-table.h"
-#include "src/zone-allocator.h"
+#include "src/zone/zone-allocator.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
index 6be0d13f13..b24b1c5f08 100644
--- a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
@@ -171,7 +171,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
Comment(";;; Prologue begin");
// Possibly allocate a local context.
- if (info()->scope()->num_heap_slots() > 0) {
+ if (info()->scope()->NeedsContext()) {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is in a1.
@@ -179,7 +179,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
if (info()->scope()->is_script_scope()) {
__ push(a1);
- __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+ __ Push(info()->scope()->scope_info());
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else {
@@ -2499,20 +2499,6 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
}
-template <class T>
-void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
- Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = ToRegister(instr->temp_slot());
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- __ li(vector_register, vector);
- FeedbackVectorSlot slot = instr->hydrogen()->slot();
- int index = vector->GetIndex(slot);
- __ li(slot_register, Operand(Smi::FromInt(index)));
-}
-
-
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->result()).is(v0));
@@ -3448,7 +3434,9 @@ void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
// Math.sqrt(-Infinity) == NaN
Label done;
__ Move(temp, static_cast<double>(-V8_INFINITY));
+ // Set up Infinity.
__ Neg_d(result, temp);
+ // result is overwritten if the branch is not taken.
__ BranchF(&done, NULL, eq, temp, input);
// Add +0 to convert -0 to +0.
@@ -3800,21 +3788,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
- EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-
- __ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic =
- CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
Operand operand(0);
@@ -4025,21 +3998,6 @@ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
}
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
- EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-
- Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), instr->language_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
class DeferredMaybeGrowElements final : public LDeferredCode {
public:
@@ -5042,7 +5000,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ CHECK(size <= kMaxRegularHeapObjectSize);
__ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
@@ -5145,7 +5103,7 @@ void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ CHECK(size <= kMaxRegularHeapObjectSize);
__ FastAllocate(size, result, scratch1, scratch2, flags);
} else {
Register size = ToRegister(instr->size());
diff --git a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h
index d51f62c90f..bb09abc1df 100644
--- a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h
@@ -340,8 +340,6 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorLoadICRegisters(T* instr);
- template <class T>
- void EmitVectorStoreICRegisters(T* instr);
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
Scope* const scope_;
diff --git a/deps/v8/src/crankshaft/mips/lithium-mips.cc b/deps/v8/src/crankshaft/mips/lithium-mips.cc
index a7880eee87..5533b8f59d 100644
--- a/deps/v8/src/crankshaft/mips/lithium-mips.cc
+++ b/deps/v8/src/crankshaft/mips/lithium-mips.cc
@@ -311,15 +311,6 @@ void LStoreNamedField::PrintDataTo(StringStream* stream) {
}
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(String::cast(*name())->ToCString().get());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -352,15 +343,6 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) {
}
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(" %p -> %p", *original_map(), *transitioned_map());
@@ -887,7 +869,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
LInstruction* result = new (zone()) LPrologue();
- if (info_->scope()->num_heap_slots() > 0) {
+ if (info_->scope()->NeedsContext()) {
result = MarkAsCall(result, instr);
}
return result;
@@ -1024,6 +1006,9 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
CallInterfaceDescriptor descriptor = instr->descriptor();
+ DCHECK_EQ(descriptor.GetParameterCount() +
+ LCallWithDescriptor::kImplicitRegisterParameterCount,
+ instr->OperandCount());
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
@@ -1032,15 +1017,20 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
// Context
LOperand* op = UseFixed(instr->OperandAt(1), cp);
ops.Add(op, zone());
- // Other register parameters
- for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
- i < instr->OperandCount(); i++) {
- op =
- UseFixed(instr->OperandAt(i),
- descriptor.GetRegisterParameter(
- i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+ // Load register parameters.
+ int i = 0;
+ for (; i < descriptor.GetRegisterParameterCount(); i++) {
+ op = UseFixed(instr->OperandAt(
+ i + LCallWithDescriptor::kImplicitRegisterParameterCount),
+ descriptor.GetRegisterParameter(i));
ops.Add(op, zone());
}
+ // Push stack parameters.
+ for (; i < descriptor.GetParameterCount(); i++) {
+ op = UseAny(instr->OperandAt(
+ i + LCallWithDescriptor::kImplicitRegisterParameterCount));
+ AddInstruction(new (zone()) LPushArgument(op), instr);
+ }
LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
descriptor, ops, zone());
@@ -2127,26 +2117,6 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
}
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* obj =
- UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
- LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-
- DCHECK(instr->object()->representation().IsTagged());
- DCHECK(instr->key()->representation().IsTagged());
- DCHECK(instr->value()->representation().IsTagged());
-
- LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
- LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
- LStoreKeyedGeneric* result =
- new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
@@ -2223,20 +2193,6 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
}
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* obj =
- UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
- LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
- LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
- LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
- LStoreNamedGeneric* result =
- new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), a1);
diff --git a/deps/v8/src/crankshaft/mips/lithium-mips.h b/deps/v8/src/crankshaft/mips/lithium-mips.h
index 9711c9a6db..f49fb93c59 100644
--- a/deps/v8/src/crankshaft/mips/lithium-mips.h
+++ b/deps/v8/src/crankshaft/mips/lithium-mips.h
@@ -131,9 +131,7 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreKeyed) \
- V(StoreKeyedGeneric) \
V(StoreNamedField) \
- V(StoreNamedGeneric) \
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
@@ -1969,33 +1967,6 @@ class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
};
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
- public:
- LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
- LOperand* slot, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = value;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
- void PrintDataTo(StringStream* stream) override;
-
- Handle<Object> name() const { return hydrogen()->name(); }
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
public:
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
@@ -2026,34 +1997,6 @@ class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
};
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
- public:
- LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
- LOperand* value, LOperand* slot, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = key;
- inputs_[3] = value;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
- LOperand* value() { return inputs_[3]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
- void PrintDataTo(StringStream* stream) override;
-
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> {
public:
LTransitionElementsKind(LOperand* object,
diff --git a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
index 924f552ab0..5f93e55fde 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
+++ b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
@@ -147,7 +147,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
Comment(";;; Prologue begin");
// Possibly allocate a local context.
- if (info()->scope()->num_heap_slots() > 0) {
+ if (info()->scope()->NeedsContext()) {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is in a1.
@@ -155,7 +155,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
if (info()->scope()->is_script_scope()) {
__ push(a1);
- __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+ __ Push(info()->scope()->scope_info());
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else {
@@ -2623,20 +2623,6 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
}
-template <class T>
-void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
- Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = ToRegister(instr->temp_slot());
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- __ li(vector_register, vector);
- FeedbackVectorSlot slot = instr->hydrogen()->slot();
- int index = vector->GetIndex(slot);
- __ li(slot_register, Operand(Smi::FromInt(index)));
-}
-
-
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->result()).is(v0));
@@ -3655,7 +3641,9 @@ void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
// Math.sqrt(-Infinity) == NaN
Label done;
__ Move(temp, static_cast<double>(-V8_INFINITY));
+ // Set up Infinity.
__ Neg_d(result, temp);
+ // result is overwritten if the branch is not taken.
__ BranchF(&done, NULL, eq, temp, input);
// Add +0 to convert -0 to +0.
@@ -4013,21 +4001,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
- EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-
- __ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic =
- CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
Operand operand((int64_t)0);
@@ -4260,21 +4233,6 @@ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
}
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
- EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-
- Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), instr->language_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
class DeferredMaybeGrowElements final : public LDeferredCode {
public:
@@ -5248,7 +5206,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ CHECK(size <= kMaxRegularHeapObjectSize);
__ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
@@ -5353,7 +5311,7 @@ void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ CHECK(size <= kMaxRegularHeapObjectSize);
__ FastAllocate(size, result, scratch1, scratch2, flags);
} else {
Register size = ToRegister(instr->size());
diff --git a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
index 41d8b2c031..aaa2e6be17 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
+++ b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
@@ -343,8 +343,6 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorLoadICRegisters(T* instr);
- template <class T>
- void EmitVectorStoreICRegisters(T* instr);
ZoneList<Deoptimizer::JumpTableEntry*> jump_table_;
Scope* const scope_;
diff --git a/deps/v8/src/crankshaft/mips64/lithium-mips64.cc b/deps/v8/src/crankshaft/mips64/lithium-mips64.cc
index 922f12ada8..0855754d31 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-mips64.cc
+++ b/deps/v8/src/crankshaft/mips64/lithium-mips64.cc
@@ -311,15 +311,6 @@ void LStoreNamedField::PrintDataTo(StringStream* stream) {
}
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(String::cast(*name())->ToCString().get());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -352,15 +343,6 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) {
}
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(" %p -> %p", *original_map(), *transitioned_map());
@@ -887,7 +869,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
LInstruction* result = new (zone()) LPrologue();
- if (info_->scope()->num_heap_slots() > 0) {
+ if (info_->scope()->NeedsContext()) {
result = MarkAsCall(result, instr);
}
return result;
@@ -1024,6 +1006,9 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
CallInterfaceDescriptor descriptor = instr->descriptor();
+ DCHECK_EQ(descriptor.GetParameterCount() +
+ LCallWithDescriptor::kImplicitRegisterParameterCount,
+ instr->OperandCount());
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
@@ -1032,15 +1017,20 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
// Context
LOperand* op = UseFixed(instr->OperandAt(1), cp);
ops.Add(op, zone());
- // Other register parameters
- for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
- i < instr->OperandCount(); i++) {
- op =
- UseFixed(instr->OperandAt(i),
- descriptor.GetRegisterParameter(
- i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+ // Load register parameters.
+ int i = 0;
+ for (; i < descriptor.GetRegisterParameterCount(); i++) {
+ op = UseFixed(instr->OperandAt(
+ i + LCallWithDescriptor::kImplicitRegisterParameterCount),
+ descriptor.GetRegisterParameter(i));
ops.Add(op, zone());
}
+ // Push stack parameters.
+ for (; i < descriptor.GetParameterCount(); i++) {
+ op = UseAny(instr->OperandAt(
+ i + LCallWithDescriptor::kImplicitRegisterParameterCount));
+ AddInstruction(new (zone()) LPushArgument(op), instr);
+ }
LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
descriptor, ops, zone());
@@ -2132,26 +2122,6 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
}
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* obj =
- UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
- LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-
- DCHECK(instr->object()->representation().IsTagged());
- DCHECK(instr->key()->representation().IsTagged());
- DCHECK(instr->value()->representation().IsTagged());
-
- LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
- LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
- LStoreKeyedGeneric* result =
- new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
@@ -2228,20 +2198,6 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
}
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* obj =
- UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
- LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
- LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
- LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
- LStoreNamedGeneric* result =
- new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), a1);
diff --git a/deps/v8/src/crankshaft/mips64/lithium-mips64.h b/deps/v8/src/crankshaft/mips64/lithium-mips64.h
index f8b5c48885..7bc89afd46 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-mips64.h
+++ b/deps/v8/src/crankshaft/mips64/lithium-mips64.h
@@ -133,9 +133,7 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreKeyed) \
- V(StoreKeyedGeneric) \
V(StoreNamedField) \
- V(StoreNamedGeneric) \
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
@@ -2015,33 +2013,6 @@ class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
};
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
- public:
- LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
- LOperand* slot, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = value;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
- void PrintDataTo(StringStream* stream) override;
-
- Handle<Object> name() const { return hydrogen()->name(); }
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
public:
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
@@ -2072,34 +2043,6 @@ class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
};
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
- public:
- LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
- LOperand* value, LOperand* slot, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = key;
- inputs_[3] = value;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
- LOperand* value() { return inputs_[3]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
- void PrintDataTo(StringStream* stream) override;
-
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> {
public:
LTransitionElementsKind(LOperand* object,
diff --git a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
index e1203b86a4..321c39355f 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
+++ b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
@@ -158,7 +158,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
Comment(";;; Prologue begin");
// Possibly allocate a local context.
- if (info()->scope()->num_heap_slots() > 0) {
+ if (info()->scope()->NeedsContext()) {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is in r4.
@@ -166,7 +166,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
if (info()->scope()->is_script_scope()) {
__ push(r4);
- __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+ __ Push(info()->scope()->scope_info());
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else {
@@ -2677,20 +2677,6 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
}
-template <class T>
-void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
- Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = ToRegister(instr->temp_slot());
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- __ Move(vector_register, vector);
- FeedbackVectorSlot slot = instr->hydrogen()->slot();
- int index = vector->GetIndex(slot);
- __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
-}
-
-
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->result()).is(r3));
@@ -4085,21 +4071,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
- EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-
- __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic =
- CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Representation representation = instr->hydrogen()->length()->representation();
DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
@@ -4110,7 +4081,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
Register index = ToRegister(instr->index());
if (representation.IsSmi()) {
- __ Cmpli(index, Operand(Smi::FromInt(length)), r0);
+ __ CmplSmiLiteral(index, Smi::FromInt(length), r0);
} else {
__ Cmplwi(index, Operand(length), r0);
}
@@ -4119,7 +4090,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
Register length = ToRegister(instr->length());
if (representation.IsSmi()) {
- __ Cmpli(length, Operand(Smi::FromInt(index)), r0);
+ __ CmplSmiLiteral(length, Smi::FromInt(index), r0);
} else {
__ Cmplwi(length, Operand(index), r0);
}
@@ -4344,21 +4315,6 @@ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
}
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
- EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-
- Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), instr->language_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
class DeferredMaybeGrowElements final : public LDeferredCode {
public:
@@ -5324,7 +5280,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ CHECK(size <= kMaxRegularHeapObjectSize);
__ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
@@ -5430,7 +5386,7 @@ void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ CHECK(size <= kMaxRegularHeapObjectSize);
__ FastAllocate(size, result, scratch1, scratch2, flags);
} else {
Register size = ToRegister(instr->size());
diff --git a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
index fe212d4034..a4a90a7184 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
+++ b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
@@ -277,8 +277,6 @@ class LCodeGen : public LCodeGenBase {
template <class T>
void EmitVectorLoadICRegisters(T* instr);
- template <class T>
- void EmitVectorStoreICRegisters(T* instr);
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
Scope* const scope_;
diff --git a/deps/v8/src/crankshaft/ppc/lithium-ppc.cc b/deps/v8/src/crankshaft/ppc/lithium-ppc.cc
index 958620c38a..738cf231ce 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-ppc.cc
+++ b/deps/v8/src/crankshaft/ppc/lithium-ppc.cc
@@ -317,15 +317,6 @@ void LStoreNamedField::PrintDataTo(StringStream* stream) {
}
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(String::cast(*name())->ToCString().get());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -358,15 +349,6 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) {
}
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(" %p -> %p", *original_map(), *transitioned_map());
@@ -892,7 +874,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
LInstruction* result = new (zone()) LPrologue();
- if (info_->scope()->num_heap_slots() > 0) {
+ if (info_->scope()->NeedsContext()) {
result = MarkAsCall(result, instr);
}
return result;
@@ -1030,6 +1012,9 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
LInstruction* LChunkBuilder::DoCallWithDescriptor(HCallWithDescriptor* instr) {
CallInterfaceDescriptor descriptor = instr->descriptor();
+ DCHECK_EQ(descriptor.GetParameterCount() +
+ LCallWithDescriptor::kImplicitRegisterParameterCount,
+ instr->OperandCount());
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
@@ -1038,15 +1023,20 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(HCallWithDescriptor* instr) {
// Context
LOperand* op = UseFixed(instr->OperandAt(1), cp);
ops.Add(op, zone());
- // Other register parameters
- for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
- i < instr->OperandCount(); i++) {
- op =
- UseFixed(instr->OperandAt(i),
- descriptor.GetRegisterParameter(
- i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+ // Load register parameters.
+ int i = 0;
+ for (; i < descriptor.GetRegisterParameterCount(); i++) {
+ op = UseFixed(instr->OperandAt(
+ i + LCallWithDescriptor::kImplicitRegisterParameterCount),
+ descriptor.GetRegisterParameter(i));
ops.Add(op, zone());
}
+ // Push stack parameters.
+ for (; i < descriptor.GetParameterCount(); i++) {
+ op = UseAny(instr->OperandAt(
+ i + LCallWithDescriptor::kImplicitRegisterParameterCount));
+ AddInstruction(new (zone()) LPushArgument(op), instr);
+ }
LCallWithDescriptor* result =
new (zone()) LCallWithDescriptor(descriptor, ops, zone());
@@ -2150,26 +2140,6 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
}
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* obj =
- UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
- LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-
- DCHECK(instr->object()->representation().IsTagged());
- DCHECK(instr->key()->representation().IsTagged());
- DCHECK(instr->value()->representation().IsTagged());
-
- LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
- LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
- LStoreKeyedGeneric* result =
- new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
@@ -2245,19 +2215,6 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
}
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* obj =
- UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
- LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
- LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
- LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
- LStoreNamedGeneric* result =
- new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), r4);
diff --git a/deps/v8/src/crankshaft/ppc/lithium-ppc.h b/deps/v8/src/crankshaft/ppc/lithium-ppc.h
index f26bfc5e87..626f00ab8e 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-ppc.h
+++ b/deps/v8/src/crankshaft/ppc/lithium-ppc.h
@@ -134,9 +134,7 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreKeyed) \
- V(StoreKeyedGeneric) \
V(StoreNamedField) \
- V(StoreNamedGeneric) \
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
@@ -1954,33 +1952,6 @@ class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
};
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
- public:
- LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
- LOperand* slot, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = value;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
- void PrintDataTo(StringStream* stream) override;
-
- Handle<Object> name() const { return hydrogen()->name(); }
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
public:
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
@@ -2015,34 +1986,6 @@ class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
};
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
- public:
- LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
- LOperand* value, LOperand* slot, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = key;
- inputs_[3] = value;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
- LOperand* value() { return inputs_[3]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
- void PrintDataTo(StringStream* stream) override;
-
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> {
public:
LTransitionElementsKind(LOperand* object, LOperand* context,
diff --git a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc
index ec2a85a07b..71881ada39 100644
--- a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc
+++ b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc
@@ -66,8 +66,8 @@ void LCodeGen::SaveCallerDoubles() {
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
- __ std(DoubleRegister::from_code(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
+ __ StoreDouble(DoubleRegister::from_code(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
}
@@ -81,8 +81,8 @@ void LCodeGen::RestoreCallerDoubles() {
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
- __ ld(DoubleRegister::from_code(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
+ __ LoadDouble(DoubleRegister::from_code(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
}
@@ -148,7 +148,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
Comment(";;; Prologue begin");
// Possibly allocate a local context.
- if (info()->scope()->num_heap_slots() > 0) {
+ if (info()->scope()->NeedsContext()) {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is in r3.
@@ -156,7 +156,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
if (info()->scope()->is_script_scope()) {
__ push(r3);
- __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+ __ Push(info()->scope()->scope_info());
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else {
@@ -2089,7 +2089,8 @@ void LCodeGen::DoBranch(LBranch* instr) {
EmitBranch(instr, al);
} else if (type.IsHeapNumber()) {
DCHECK(!info()->IsStub());
- __ ld(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+ __ LoadDouble(dbl_scratch,
+ FieldMemOperand(reg, HeapNumber::kValueOffset));
// Test the double value. Zero and NaN are false.
__ lzdr(kDoubleRegZero);
__ cdbr(dbl_scratch, kDoubleRegZero);
@@ -2652,19 +2653,6 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
__ LoadSmiLiteral(slot_register, Smi::FromInt(index));
}
-template <class T>
-void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
- Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = ToRegister(instr->temp_slot());
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- __ Move(vector_register, vector);
- FeedbackVectorSlot slot = instr->hydrogen()->slot();
- int index = vector->GetIndex(slot);
- __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
-}
-
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->result()).is(r2));
@@ -2739,7 +2727,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
if (instr->hydrogen()->representation().IsDouble()) {
DCHECK(access.IsInobject());
DoubleRegister result = ToDoubleRegister(instr->result());
- __ ld(result, FieldMemOperand(object, offset));
+ __ LoadDouble(result, FieldMemOperand(object, offset));
return;
}
@@ -2889,9 +2877,10 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
}
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
if (!use_scratch) {
- __ ld(result, MemOperand(external_pointer, base_offset));
+ __ LoadDouble(result, MemOperand(external_pointer, base_offset));
} else {
- __ ld(result, MemOperand(scratch0(), external_pointer, base_offset));
+ __ LoadDouble(result,
+ MemOperand(scratch0(), external_pointer, base_offset));
}
}
} else {
@@ -2986,9 +2975,9 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
}
if (!use_scratch) {
- __ ld(result, MemOperand(elements, base_offset));
+ __ LoadDouble(result, MemOperand(elements, base_offset));
} else {
- __ ld(result, MemOperand(scratch, elements, base_offset));
+ __ LoadDouble(result, MemOperand(scratch, elements, base_offset));
}
if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -3919,7 +3908,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
DCHECK(!hinstr->NeedsWriteBarrier());
DoubleRegister value = ToDoubleRegister(instr->value());
DCHECK(offset >= 0);
- __ std(value, FieldMemOperand(object, offset));
+ __ StoreDouble(value, FieldMemOperand(object, offset));
return;
}
@@ -3944,7 +3933,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (FLAG_unbox_double_fields && representation.IsDouble()) {
DCHECK(access.IsInobject());
DoubleRegister value = ToDoubleRegister(instr->value());
- __ std(value, FieldMemOperand(object, offset));
+ __ StoreDouble(value, FieldMemOperand(object, offset));
if (hinstr->NeedsWriteBarrier()) {
record_value = ToRegister(instr->value());
}
@@ -3984,31 +3973,18 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
}
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
- EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-
- __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic =
- CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Representation representation = instr->hydrogen()->length()->representation();
DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
DCHECK(representation.IsSmiOrInteger32());
+ Register temp = scratch0();
Condition cc = instr->hydrogen()->allow_equality() ? lt : le;
if (instr->length()->IsConstantOperand()) {
int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
Register index = ToRegister(instr->index());
if (representation.IsSmi()) {
- __ CmpLogicalP(index, Operand(Smi::FromInt(length)));
+ __ CmpLogicalSmiLiteral(index, Smi::FromInt(length), temp);
} else {
__ CmpLogical32(index, Operand(length));
}
@@ -4017,7 +3993,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
Register length = ToRegister(instr->length());
if (representation.IsSmi()) {
- __ CmpLogicalP(length, Operand(Smi::FromInt(index)));
+ __ CmpLogicalSmiLiteral(length, Smi::FromInt(index), temp);
} else {
__ CmpLogical32(length, Operand(index));
}
@@ -4187,14 +4163,15 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
__ CanonicalizeNaN(double_scratch, value);
DCHECK(address_offset >= 0);
if (use_scratch)
- __ std(double_scratch, MemOperand(scratch, elements, address_offset));
+ __ StoreDouble(double_scratch,
+ MemOperand(scratch, elements, address_offset));
else
- __ std(double_scratch, MemOperand(elements, address_offset));
+ __ StoreDouble(double_scratch, MemOperand(elements, address_offset));
} else {
if (use_scratch)
- __ std(value, MemOperand(scratch, elements, address_offset));
+ __ StoreDouble(value, MemOperand(scratch, elements, address_offset));
else
- __ std(value, MemOperand(elements, address_offset));
+ __ StoreDouble(value, MemOperand(elements, address_offset));
}
}
@@ -4286,20 +4263,6 @@ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
}
}
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
- EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-
- Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), instr->language_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
class DeferredMaybeGrowElements final : public LDeferredCode {
public:
@@ -4789,7 +4752,8 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
}
// load heap number
- __ ld(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ __ LoadDouble(result_reg,
+ FieldMemOperand(input_reg, HeapNumber::kValueOffset));
if (deoptimize_on_minus_zero) {
__ TestDoubleIsMinusZero(result_reg, scratch, ip);
DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
@@ -4801,7 +4765,8 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
- __ ld(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+ __ LoadDouble(result_reg,
+ FieldMemOperand(scratch, HeapNumber::kValueOffset));
__ b(&done, Label::kNear);
}
} else {
@@ -4862,8 +4827,8 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// Deoptimize if we don't have a heap number.
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
- __ ld(double_scratch2,
- FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ __ LoadDouble(double_scratch2,
+ FieldMemOperand(input_reg, HeapNumber::kValueOffset));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// preserve heap number pointer in scratch2 for minus zero check below
__ LoadRR(scratch2, input_reg);
@@ -5177,7 +5142,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Heap number
__ bind(&heap_number);
- __ ld(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ __ LoadDouble(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
__ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
__ b(&done, Label::kNear);
@@ -5224,7 +5189,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ CHECK(size <= kMaxRegularHeapObjectSize);
__ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
@@ -5337,7 +5302,7 @@ void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ CHECK(size <= kMaxRegularHeapObjectSize);
__ FastAllocate(size, result, scratch1, scratch2, flags);
} else {
Register size = ToRegister(instr->size());
diff --git a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.h b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.h
index e5df255f4d..30e9d2b997 100644
--- a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.h
+++ b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.h
@@ -276,8 +276,6 @@ class LCodeGen : public LCodeGenBase {
template <class T>
void EmitVectorLoadICRegisters(T* instr);
- template <class T>
- void EmitVectorStoreICRegisters(T* instr);
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
Scope* const scope_;
diff --git a/deps/v8/src/crankshaft/s390/lithium-s390.cc b/deps/v8/src/crankshaft/s390/lithium-s390.cc
index 3048e4c8b5..bf9dfd56ba 100644
--- a/deps/v8/src/crankshaft/s390/lithium-s390.cc
+++ b/deps/v8/src/crankshaft/s390/lithium-s390.cc
@@ -287,14 +287,6 @@ void LStoreNamedField::PrintDataTo(StringStream* stream) {
value()->PrintTo(stream);
}
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(String::cast(*name())->ToCString().get());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -325,14 +317,6 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) {
}
}
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(" %p -> %p", *original_map(), *transitioned_map());
@@ -815,7 +799,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
LInstruction* result = new (zone()) LPrologue();
- if (info_->scope()->num_heap_slots() > 0) {
+ if (info_->scope()->NeedsContext()) {
result = MarkAsCall(result, instr);
}
return result;
@@ -937,6 +921,9 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
LInstruction* LChunkBuilder::DoCallWithDescriptor(HCallWithDescriptor* instr) {
CallInterfaceDescriptor descriptor = instr->descriptor();
+ DCHECK_EQ(descriptor.GetParameterCount() +
+ LCallWithDescriptor::kImplicitRegisterParameterCount,
+ instr->OperandCount());
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
@@ -945,15 +932,20 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(HCallWithDescriptor* instr) {
// Context
LOperand* op = UseFixed(instr->OperandAt(1), cp);
ops.Add(op, zone());
- // Other register parameters
- for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
- i < instr->OperandCount(); i++) {
- op =
- UseFixed(instr->OperandAt(i),
- descriptor.GetRegisterParameter(
- i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+ // Load register parameters.
+ int i = 0;
+ for (; i < descriptor.GetRegisterParameterCount(); i++) {
+ op = UseFixed(instr->OperandAt(
+ i + LCallWithDescriptor::kImplicitRegisterParameterCount),
+ descriptor.GetRegisterParameter(i));
ops.Add(op, zone());
}
+ // Push stack parameters.
+ for (; i < descriptor.GetParameterCount(); i++) {
+ op = UseAny(instr->OperandAt(
+ i + LCallWithDescriptor::kImplicitRegisterParameterCount));
+ AddInstruction(new (zone()) LPushArgument(op), instr);
+ }
LCallWithDescriptor* result =
new (zone()) LCallWithDescriptor(descriptor, ops, zone());
@@ -1968,25 +1960,6 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
}
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* obj =
- UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
- LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-
- DCHECK(instr->object()->representation().IsTagged());
- DCHECK(instr->key()->representation().IsTagged());
- DCHECK(instr->value()->representation().IsTagged());
-
- LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
- LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
- LStoreKeyedGeneric* result =
- new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
- return MarkAsCall(result, instr);
-}
-
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
@@ -2058,18 +2031,6 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
return new (zone()) LStoreNamedField(obj, val, temp);
}
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* obj =
- UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
- LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
- LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
- LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
- LStoreNamedGeneric* result =
- new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
- return MarkAsCall(result, instr);
-}
-
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), r3);
diff --git a/deps/v8/src/crankshaft/s390/lithium-s390.h b/deps/v8/src/crankshaft/s390/lithium-s390.h
index 1f1e520067..70670ac3e8 100644
--- a/deps/v8/src/crankshaft/s390/lithium-s390.h
+++ b/deps/v8/src/crankshaft/s390/lithium-s390.h
@@ -132,9 +132,7 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreKeyed) \
- V(StoreKeyedGeneric) \
V(StoreNamedField) \
- V(StoreNamedGeneric) \
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
@@ -1822,32 +1820,6 @@ class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
}
};
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
- public:
- LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
- LOperand* slot, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = value;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
- void PrintDataTo(StringStream* stream) override;
-
- Handle<Object> name() const { return hydrogen()->name(); }
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
public:
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
@@ -1881,33 +1853,6 @@ class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
uint32_t base_offset() const { return hydrogen()->base_offset(); }
};
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
- public:
- LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
- LOperand* value, LOperand* slot, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = key;
- inputs_[3] = value;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
- LOperand* value() { return inputs_[3]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
- void PrintDataTo(StringStream* stream) override;
-
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> {
public:
LTransitionElementsKind(LOperand* object, LOperand* context,
diff --git a/deps/v8/src/crankshaft/typing.cc b/deps/v8/src/crankshaft/typing.cc
index 5961838711..d2b56e255b 100644
--- a/deps/v8/src/crankshaft/typing.cc
+++ b/deps/v8/src/crankshaft/typing.cc
@@ -4,11 +4,12 @@
#include "src/crankshaft/typing.h"
+#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
-#include "src/frames.h"
+#include "src/ast/variables.h"
#include "src/frames-inl.h"
+#include "src/frames.h"
#include "src/ostreams.h"
-#include "src/parsing/parser.h" // for CompileTimeValue; TODO(rossberg): move
#include "src/splay-tree-inl.h"
namespace v8 {
@@ -33,20 +34,20 @@ AstTyper::AstTyper(Isolate* isolate, Zone* zone, Handle<JSFunction> closure,
#ifdef OBJECT_PRINT
- static void PrintObserved(Variable* var, Object* value, Type* type) {
- OFStream os(stdout);
- os << " observed " << (var->IsParameter() ? "param" : "local") << " ";
- var->name()->Print(os);
- os << " : " << Brief(value) << " -> ";
- type->PrintTo(os);
- os << std::endl;
+static void PrintObserved(Variable* var, Object* value, AstType* type) {
+ OFStream os(stdout);
+ os << " observed " << (var->IsParameter() ? "param" : "local") << " ";
+ var->name()->Print(os);
+ os << " : " << Brief(value) << " -> ";
+ type->PrintTo(os);
+ os << std::endl;
}
#endif // OBJECT_PRINT
Effect AstTyper::ObservedOnStack(Object* value) {
- Type* lower = Type::NowOf(value, zone());
- return Effect(Bounds(lower, Type::Any()));
+ AstType* lower = AstType::NowOf(value, zone());
+ return Effect(AstBounds(lower, AstType::Any()));
}
@@ -84,15 +85,16 @@ void AstTyper::ObserveTypesAtOsrEntry(IterationStatement* stmt) {
store_.LookupBounds(parameter_index(i)).lower);
}
- ZoneList<Variable*> local_vars(locals, zone());
- ZoneList<Variable*> context_vars(scope_->ContextLocalCount(), zone());
- ZoneList<Variable*> global_vars(scope_->ContextGlobalCount(), zone());
- scope_->CollectStackAndContextLocals(&local_vars, &context_vars,
- &global_vars);
- for (int i = 0; i < locals; i++) {
- PrintObserved(local_vars.at(i),
- frame->GetExpression(i),
- store_.LookupBounds(stack_local_index(i)).lower);
+ ZoneList<Variable*>* local_vars = scope_->locals();
+ int local_index = 0;
+ for (int i = 0; i < local_vars->length(); i++) {
+ Variable* var = local_vars->at(i);
+ if (var->IsStackLocal()) {
+ PrintObserved(
+ var, frame->GetExpression(local_index),
+ store_.LookupBounds(stack_local_index(local_index)).lower);
+ local_index++;
+ }
}
}
#endif // OBJECT_PRINT
@@ -205,11 +207,12 @@ void AstTyper::VisitSwitchStatement(SwitchStatement* stmt) {
if (!clause->is_default()) {
Expression* label = clause->label();
// Collect type feedback.
- Type* tag_type;
- Type* label_type;
- Type* combined_type;
+ AstType* tag_type;
+ AstType* label_type;
+ AstType* combined_type;
oracle()->CompareType(clause->CompareId(),
- &tag_type, &label_type, &combined_type);
+ clause->CompareOperationFeedbackSlot(), &tag_type,
+ &label_type, &combined_type);
NarrowLowerType(stmt->tag(), tag_type);
NarrowLowerType(label, label_type);
clause->set_compare_type(combined_type);
@@ -366,8 +369,8 @@ void AstTyper::VisitConditional(Conditional* expr) {
store_.Seq(then_effects);
NarrowType(expr,
- Bounds::Either(bounds_->get(expr->then_expression()),
- bounds_->get(expr->else_expression()), zone()));
+ AstBounds::Either(bounds_->get(expr->then_expression()),
+ bounds_->get(expr->else_expression()), zone()));
}
@@ -380,14 +383,14 @@ void AstTyper::VisitVariableProxy(VariableProxy* expr) {
void AstTyper::VisitLiteral(Literal* expr) {
- Type* type = Type::Constant(expr->value(), zone());
- NarrowType(expr, Bounds(type));
+ AstType* type = AstType::Constant(expr->value(), zone());
+ NarrowType(expr, AstBounds(type));
}
void AstTyper::VisitRegExpLiteral(RegExpLiteral* expr) {
// TODO(rossberg): Reintroduce RegExp type.
- NarrowType(expr, Bounds(Type::Object()));
+ NarrowType(expr, AstBounds(AstType::Object()));
}
@@ -415,7 +418,7 @@ void AstTyper::VisitObjectLiteral(ObjectLiteral* expr) {
RECURSE(Visit(prop->value()));
}
- NarrowType(expr, Bounds(Type::Object()));
+ NarrowType(expr, AstBounds(AstType::Object()));
}
@@ -426,7 +429,7 @@ void AstTyper::VisitArrayLiteral(ArrayLiteral* expr) {
RECURSE(Visit(value));
}
- NarrowType(expr, Bounds(Type::Object()));
+ NarrowType(expr, AstBounds(AstType::Object()));
}
@@ -479,7 +482,7 @@ void AstTyper::VisitThrow(Throw* expr) {
RECURSE(Visit(expr->exception()));
// TODO(rossberg): is it worth having a non-termination effect?
- NarrowType(expr, Bounds(Type::None()));
+ NarrowType(expr, AstBounds(AstType::None()));
}
@@ -562,7 +565,7 @@ void AstTyper::VisitCallNew(CallNew* expr) {
RECURSE(Visit(arg));
}
- NarrowType(expr, Bounds(Type::None(), Type::Receiver()));
+ NarrowType(expr, AstBounds(AstType::None(), AstType::Receiver()));
}
@@ -589,13 +592,13 @@ void AstTyper::VisitUnaryOperation(UnaryOperation* expr) {
switch (expr->op()) {
case Token::NOT:
case Token::DELETE:
- NarrowType(expr, Bounds(Type::Boolean()));
+ NarrowType(expr, AstBounds(AstType::Boolean()));
break;
case Token::VOID:
- NarrowType(expr, Bounds(Type::Undefined()));
+ NarrowType(expr, AstBounds(AstType::Undefined()));
break;
case Token::TYPEOF:
- NarrowType(expr, Bounds(Type::InternalizedString()));
+ NarrowType(expr, AstBounds(AstType::InternalizedString()));
break;
default:
UNREACHABLE();
@@ -612,12 +615,13 @@ void AstTyper::VisitCountOperation(CountOperation* expr) {
oracle()->CountReceiverTypes(slot, expr->GetReceiverTypes());
expr->set_store_mode(store_mode);
expr->set_key_type(key_type);
- expr->set_type(oracle()->CountType(expr->CountBinOpFeedbackId()));
+ expr->set_type(oracle()->CountType(expr->CountBinOpFeedbackId(),
+ expr->CountBinaryOpFeedbackSlot()));
// TODO(rossberg): merge the count type with the generic expression type.
RECURSE(Visit(expr->expression()));
- NarrowType(expr, Bounds(Type::SignedSmall(), Type::Number()));
+ NarrowType(expr, AstBounds(AstType::SignedSmall(), AstType::Number()));
VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsStackAllocated()) {
@@ -625,17 +629,18 @@ void AstTyper::VisitCountOperation(CountOperation* expr) {
}
}
-
void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
// Collect type feedback.
- Type* type;
- Type* left_type;
- Type* right_type;
+ AstType* type;
+ AstType* left_type;
+ AstType* right_type;
Maybe<int> fixed_right_arg = Nothing<int>();
Handle<AllocationSite> allocation_site;
oracle()->BinaryType(expr->BinaryOperationFeedbackId(),
- &left_type, &right_type, &type, &fixed_right_arg,
- &allocation_site, expr->op());
+ expr->BinaryOperationFeedbackSlot(), &left_type,
+ &right_type, &type, &fixed_right_arg, &allocation_site,
+ expr->op());
+
NarrowLowerType(expr, type);
NarrowLowerType(expr->left(), left_type);
NarrowLowerType(expr->right(), right_type);
@@ -662,19 +667,21 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
left_effects.Alt(right_effects);
store_.Seq(left_effects);
- NarrowType(expr, Bounds::Either(bounds_->get(expr->left()),
- bounds_->get(expr->right()), zone()));
+ NarrowType(expr, AstBounds::Either(bounds_->get(expr->left()),
+ bounds_->get(expr->right()), zone()));
break;
}
case Token::BIT_OR:
case Token::BIT_AND: {
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- Type* upper = Type::Union(bounds_->get(expr->left()).upper,
- bounds_->get(expr->right()).upper, zone());
- if (!upper->Is(Type::Signed32())) upper = Type::Signed32();
- Type* lower = Type::Intersect(Type::SignedSmall(), upper, zone());
- NarrowType(expr, Bounds(lower, upper));
+ AstType* upper =
+ AstType::Union(bounds_->get(expr->left()).upper,
+ bounds_->get(expr->right()).upper, zone());
+ if (!upper->Is(AstType::Signed32())) upper = AstType::Signed32();
+ AstType* lower =
+ AstType::Intersect(AstType::SignedSmall(), upper, zone());
+ NarrowType(expr, AstBounds(lower, upper));
break;
}
case Token::BIT_XOR:
@@ -682,7 +689,7 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
case Token::SAR:
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- NarrowType(expr, Bounds(Type::SignedSmall(), Type::Signed32()));
+ NarrowType(expr, AstBounds(AstType::SignedSmall(), AstType::Signed32()));
break;
case Token::SHR:
RECURSE(Visit(expr->left()));
@@ -690,28 +697,29 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
// TODO(rossberg): The upper bound would be Unsigned32, but since there
// is no 'positive Smi' type for the lower bound, we use the smallest
// union of Smi and Unsigned32 as upper bound instead.
- NarrowType(expr, Bounds(Type::SignedSmall(), Type::Number()));
+ NarrowType(expr, AstBounds(AstType::SignedSmall(), AstType::Number()));
break;
case Token::ADD: {
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- Bounds l = bounds_->get(expr->left());
- Bounds r = bounds_->get(expr->right());
- Type* lower =
+ AstBounds l = bounds_->get(expr->left());
+ AstBounds r = bounds_->get(expr->right());
+ AstType* lower =
!l.lower->IsInhabited() || !r.lower->IsInhabited()
- ? Type::None()
- : l.lower->Is(Type::String()) || r.lower->Is(Type::String())
- ? Type::String()
- : l.lower->Is(Type::Number()) && r.lower->Is(Type::Number())
- ? Type::SignedSmall()
- : Type::None();
- Type* upper =
- l.upper->Is(Type::String()) || r.upper->Is(Type::String())
- ? Type::String()
- : l.upper->Is(Type::Number()) && r.upper->Is(Type::Number())
- ? Type::Number()
- : Type::NumberOrString();
- NarrowType(expr, Bounds(lower, upper));
+ ? AstType::None()
+ : l.lower->Is(AstType::String()) || r.lower->Is(AstType::String())
+ ? AstType::String()
+ : l.lower->Is(AstType::Number()) &&
+ r.lower->Is(AstType::Number())
+ ? AstType::SignedSmall()
+ : AstType::None();
+ AstType* upper =
+ l.upper->Is(AstType::String()) || r.upper->Is(AstType::String())
+ ? AstType::String()
+ : l.upper->Is(AstType::Number()) && r.upper->Is(AstType::Number())
+ ? AstType::Number()
+ : AstType::NumberOrString();
+ NarrowType(expr, AstBounds(lower, upper));
break;
}
case Token::SUB:
@@ -720,7 +728,7 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
case Token::MOD:
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- NarrowType(expr, Bounds(Type::SignedSmall(), Type::Number()));
+ NarrowType(expr, AstBounds(AstType::SignedSmall(), AstType::Number()));
break;
default:
UNREACHABLE();
@@ -730,11 +738,12 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
void AstTyper::VisitCompareOperation(CompareOperation* expr) {
// Collect type feedback.
- Type* left_type;
- Type* right_type;
- Type* combined_type;
+ AstType* left_type;
+ AstType* right_type;
+ AstType* combined_type;
oracle()->CompareType(expr->CompareOperationFeedbackId(),
- &left_type, &right_type, &combined_type);
+ expr->CompareOperationFeedbackSlot(), &left_type,
+ &right_type, &combined_type);
NarrowLowerType(expr->left(), left_type);
NarrowLowerType(expr->right(), right_type);
expr->set_combined_type(combined_type);
@@ -742,7 +751,7 @@ void AstTyper::VisitCompareOperation(CompareOperation* expr) {
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- NarrowType(expr, Bounds(Type::Boolean()));
+ NarrowType(expr, AstBounds(AstType::Boolean()));
}
@@ -767,6 +776,14 @@ void AstTyper::VisitRewritableExpression(RewritableExpression* expr) {
Visit(expr->expression());
}
+int AstTyper::variable_index(Variable* var) {
+ // Stack locals have the range [0 .. l]
+ // Parameters have the range [-1 .. p]
+ // We map this to [-p-2 .. -1, 0 .. l]
+ return var->IsStackLocal()
+ ? stack_local_index(var->index())
+ : var->IsParameter() ? parameter_index(var->index()) : kNoVar;
+}
void AstTyper::VisitDeclarations(ZoneList<Declaration*>* decls) {
for (int i = 0; i < decls->length(); ++i) {
diff --git a/deps/v8/src/crankshaft/typing.h b/deps/v8/src/crankshaft/typing.h
index 94340c5a74..eb88634777 100644
--- a/deps/v8/src/crankshaft/typing.h
+++ b/deps/v8/src/crankshaft/typing.h
@@ -7,16 +7,18 @@
#include "src/allocation.h"
#include "src/ast/ast-type-bounds.h"
-#include "src/ast/scopes.h"
+#include "src/ast/ast-types.h"
+#include "src/ast/ast.h"
#include "src/ast/variables.h"
#include "src/effects.h"
#include "src/type-info.h"
-#include "src/types.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
+class DeclarationScope;
+class Isolate;
class FunctionLiteral;
class AstTyper final : public AstVisitor<AstTyper> {
@@ -49,11 +51,11 @@ class AstTyper final : public AstVisitor<AstTyper> {
Zone* zone() const { return zone_; }
TypeFeedbackOracle* oracle() { return &oracle_; }
- void NarrowType(Expression* e, Bounds b) {
- bounds_->set(e, Bounds::Both(bounds_->get(e), b, zone()));
+ void NarrowType(Expression* e, AstBounds b) {
+ bounds_->set(e, AstBounds::Both(bounds_->get(e), b, zone()));
}
- void NarrowLowerType(Expression* e, Type* t) {
- bounds_->set(e, Bounds::NarrowLower(bounds_->get(e), t, zone()));
+ void NarrowLowerType(Expression* e, AstType* t) {
+ bounds_->set(e, AstBounds::NarrowLower(bounds_->get(e), t, zone()));
}
Effects EnterEffects() {
@@ -65,13 +67,7 @@ class AstTyper final : public AstVisitor<AstTyper> {
int parameter_index(int index) { return -index - 2; }
int stack_local_index(int index) { return index; }
- int variable_index(Variable* var) {
- // Stack locals have the range [0 .. l]
- // Parameters have the range [-1 .. p]
- // We map this to [-p-2 .. -1, 0 .. l]
- return var->IsStackLocal() ? stack_local_index(var->index()) :
- var->IsParameter() ? parameter_index(var->index()) : kNoVar;
- }
+ int variable_index(Variable* var);
void VisitDeclarations(ZoneList<Declaration*>* declarations);
void VisitStatements(ZoneList<Statement*>* statements);
diff --git a/deps/v8/src/crankshaft/unique.h b/deps/v8/src/crankshaft/unique.h
index 54abfa7710..4c6a0976f8 100644
--- a/deps/v8/src/crankshaft/unique.h
+++ b/deps/v8/src/crankshaft/unique.h
@@ -11,7 +11,7 @@
#include "src/base/functional.h"
#include "src/handles.h"
#include "src/utils.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
index 66046a4e68..50e2aa0915 100644
--- a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
@@ -167,7 +167,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
Comment(";;; Prologue begin");
// Possibly allocate a local context.
- if (info_->scope()->num_heap_slots() > 0) {
+ if (info_->scope()->NeedsContext()) {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is still in rdi.
@@ -175,7 +175,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
if (info()->scope()->is_script_scope()) {
__ Push(rdi);
- __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+ __ Push(info()->scope()->scope_info());
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else {
@@ -2539,20 +2539,6 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
}
-template <class T>
-void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
- Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = ToRegister(instr->temp_slot());
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- __ Move(vector_register, vector);
- FeedbackVectorSlot slot = instr->hydrogen()->slot();
- int index = vector->GetIndex(slot);
- __ Move(slot_register, Smi::FromInt(index));
-}
-
-
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
DCHECK(ToRegister(instr->result()).is(rax));
@@ -3902,21 +3888,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
- EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-
- __ Move(StoreDescriptor::NameRegister(), instr->hydrogen()->name());
- Handle<Code> ic =
- CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Representation representation = instr->hydrogen()->length()->representation();
DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
@@ -4158,21 +4129,6 @@ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
}
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
- EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-
- Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), instr->language_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
class DeferredMaybeGrowElements final : public LDeferredCode {
public:
@@ -5110,7 +5066,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ CHECK(size <= kMaxRegularHeapObjectSize);
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
@@ -5153,7 +5109,7 @@ void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ CHECK(size <= kMaxRegularHeapObjectSize);
__ FastAllocate(size, result, temp, flags);
} else {
Register size = ToRegister(instr->size());
diff --git a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h
index 22c39ad088..22a32a147d 100644
--- a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h
@@ -297,8 +297,6 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorLoadICRegisters(T* instr);
- template <class T>
- void EmitVectorStoreICRegisters(T* instr);
#ifdef _MSC_VER
// On windows, you may not access the stack more than one page below
diff --git a/deps/v8/src/crankshaft/x64/lithium-x64.cc b/deps/v8/src/crankshaft/x64/lithium-x64.cc
index 42451690af..18fb5d4d09 100644
--- a/deps/v8/src/crankshaft/x64/lithium-x64.cc
+++ b/deps/v8/src/crankshaft/x64/lithium-x64.cc
@@ -348,15 +348,6 @@ void LStoreNamedField::PrintDataTo(StringStream* stream) {
}
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(String::cast(*name())->ToCString().get());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -389,15 +380,6 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) {
}
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(" %p -> %p", *original_map(), *transitioned_map());
@@ -907,7 +889,7 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
LInstruction* result = new (zone()) LPrologue();
- if (info_->scope()->num_heap_slots() > 0) {
+ if (info_->scope()->NeedsContext()) {
result = MarkAsCall(result, instr);
}
return result;
@@ -1042,6 +1024,9 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
CallInterfaceDescriptor descriptor = instr->descriptor();
+ DCHECK_EQ(descriptor.GetParameterCount() +
+ LCallWithDescriptor::kImplicitRegisterParameterCount,
+ instr->OperandCount());
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
@@ -1050,15 +1035,20 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
// Context
LOperand* op = UseFixed(instr->OperandAt(1), rsi);
ops.Add(op, zone());
- // Other register parameters
- for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
- i < instr->OperandCount(); i++) {
- op =
- UseFixed(instr->OperandAt(i),
- descriptor.GetRegisterParameter(
- i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+ // Load register parameters.
+ int i = 0;
+ for (; i < descriptor.GetRegisterParameterCount(); i++) {
+ op = UseFixed(instr->OperandAt(
+ i + LCallWithDescriptor::kImplicitRegisterParameterCount),
+ descriptor.GetRegisterParameter(i));
ops.Add(op, zone());
}
+ // Push stack parameters.
+ for (; i < descriptor.GetParameterCount(); i++) {
+ op = UseAny(instr->OperandAt(
+ i + LCallWithDescriptor::kImplicitRegisterParameterCount));
+ AddInstruction(new (zone()) LPushArgument(op), instr);
+ }
LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
descriptor, ops, zone());
@@ -2223,26 +2213,6 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
}
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* object =
- UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
- LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-
- DCHECK(instr->object()->representation().IsTagged());
- DCHECK(instr->key()->representation().IsTagged());
- DCHECK(instr->value()->representation().IsTagged());
-
- LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
- LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
- LStoreKeyedGeneric* result = new (zone())
- LStoreKeyedGeneric(context, object, key, value, slot, vector);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
@@ -2337,20 +2307,6 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
}
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* object =
- UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
- LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
- LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
- LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
- LStoreNamedGeneric* result =
- new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
LOperand* left = UseFixed(instr->left(), rdx);
diff --git a/deps/v8/src/crankshaft/x64/lithium-x64.h b/deps/v8/src/crankshaft/x64/lithium-x64.h
index 5c0ce04a8a..e7eaa01529 100644
--- a/deps/v8/src/crankshaft/x64/lithium-x64.h
+++ b/deps/v8/src/crankshaft/x64/lithium-x64.h
@@ -132,9 +132,7 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreKeyed) \
- V(StoreKeyedGeneric) \
V(StoreNamedField) \
- V(StoreNamedGeneric) \
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
@@ -2013,33 +2011,6 @@ class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
};
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
- public:
- LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
- LOperand* slot, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = value;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
- void PrintDataTo(StringStream* stream) override;
-
- Handle<Object> name() const { return hydrogen()->name(); }
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
public:
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
@@ -2068,34 +2039,6 @@ class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
};
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
- public:
- LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
- LOperand* value, LOperand* slot, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = key;
- inputs_[3] = value;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
- LOperand* value() { return inputs_[3]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
- void PrintDataTo(StringStream* stream) override;
-
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 2> {
public:
LTransitionElementsKind(LOperand* object,
diff --git a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
index 1a42d5b41b..2d597d4c3b 100644
--- a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
+++ b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
@@ -134,7 +134,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
Comment(";;; Prologue begin");
// Possibly allocate a local context.
- if (info_->scope()->num_heap_slots() > 0) {
+ if (info_->scope()->NeedsContext()) {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is still in edi.
@@ -142,7 +142,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
if (info()->scope()->is_script_scope()) {
__ push(edi);
- __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+ __ Push(info()->scope()->scope_info());
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else {
@@ -2681,20 +2681,6 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
}
-template <class T>
-void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
- Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = ToRegister(instr->temp_slot());
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- __ mov(vector_register, vector);
- FeedbackVectorSlot slot = instr->hydrogen()->slot();
- int index = vector->GetIndex(slot);
- __ mov(slot_register, Immediate(Smi::FromInt(index)));
-}
-
-
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(ToRegister(instr->result()).is(eax));
@@ -3703,7 +3689,9 @@ void LCodeGen::DoMathCos(LMathCos* instr) {
__ PrepareCallCFunction(2, eax);
__ fstp_d(MemOperand(esp, 0));
X87PrepareToWrite(result);
+ __ X87SetFPUCW(0x027F);
__ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 2);
+ __ X87SetFPUCW(0x037F);
// Return value is in st(0) on ia32.
X87CommitWrite(result);
}
@@ -3717,7 +3705,9 @@ void LCodeGen::DoMathSin(LMathSin* instr) {
__ PrepareCallCFunction(2, eax);
__ fstp_d(MemOperand(esp, 0));
X87PrepareToWrite(result);
+ __ X87SetFPUCW(0x027F);
__ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 2);
+ __ X87SetFPUCW(0x037F);
// Return value is in st(0) on ia32.
X87CommitWrite(result);
}
@@ -3976,21 +3966,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
- EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
-
- __ mov(StoreDescriptor::NameRegister(), instr->name());
- Handle<Code> ic =
- CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
if (instr->index()->IsConstantOperand()) {
@@ -4199,21 +4174,6 @@ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
}
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
-
- EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
-
- Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), instr->language_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
Register temp = ToRegister(instr->temp());
@@ -5315,7 +5275,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ CHECK(size <= kMaxRegularHeapObjectSize);
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
@@ -5358,7 +5318,7 @@ void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ CHECK(size <= kMaxRegularHeapObjectSize);
__ FastAllocate(size, result, temp, flags);
} else {
Register size = ToRegister(instr->size());
diff --git a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h
index cdf02f3f8c..850f330900 100644
--- a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h
+++ b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h
@@ -323,8 +323,6 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorLoadICRegisters(T* instr);
- template <class T>
- void EmitVectorStoreICRegisters(T* instr);
void EmitReturn(LReturn* instr);
diff --git a/deps/v8/src/crankshaft/x87/lithium-x87.cc b/deps/v8/src/crankshaft/x87/lithium-x87.cc
index f614b93c9c..a319c0c718 100644
--- a/deps/v8/src/crankshaft/x87/lithium-x87.cc
+++ b/deps/v8/src/crankshaft/x87/lithium-x87.cc
@@ -362,15 +362,6 @@ void LStoreNamedField::PrintDataTo(StringStream* stream) {
}
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(String::cast(*name())->ToCString().get());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -403,15 +394,6 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) {
}
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(" %p -> %p", *original_map(), *transitioned_map());
@@ -925,7 +907,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
LInstruction* result = new (zone()) LPrologue();
- if (info_->scope()->num_heap_slots() > 0) {
+ if (info_->scope()->NeedsContext()) {
result = MarkAsCall(result, instr);
}
return result;
@@ -1071,6 +1053,10 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
CallInterfaceDescriptor descriptor = instr->descriptor();
+ DCHECK_EQ(descriptor.GetParameterCount() +
+ LCallWithDescriptor::kImplicitRegisterParameterCount,
+ instr->OperandCount());
+
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
// Target
@@ -1078,15 +1064,20 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
// Context
LOperand* op = UseFixed(instr->OperandAt(1), esi);
ops.Add(op, zone());
- // Other register parameters
- for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
- i < instr->OperandCount(); i++) {
- op =
- UseFixed(instr->OperandAt(i),
- descriptor.GetRegisterParameter(
- i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+ // Load register parameters.
+ int i = 0;
+ for (; i < descriptor.GetRegisterParameterCount(); i++) {
+ op = UseFixed(instr->OperandAt(
+ i + LCallWithDescriptor::kImplicitRegisterParameterCount),
+ descriptor.GetRegisterParameter(i));
ops.Add(op, zone());
}
+ // Push stack parameters.
+ for (; i < descriptor.GetParameterCount(); i++) {
+ op = UseAny(instr->OperandAt(
+ i + LCallWithDescriptor::kImplicitRegisterParameterCount));
+ AddInstruction(new (zone()) LPushArgument(op), instr);
+ }
LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
descriptor, ops, zone());
@@ -2213,26 +2204,6 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
}
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object =
- UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
- LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
-
- DCHECK(instr->object()->representation().IsTagged());
- DCHECK(instr->key()->representation().IsTagged());
- DCHECK(instr->value()->representation().IsTagged());
-
- LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
- LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
- LStoreKeyedGeneric* result = new (zone())
- LStoreKeyedGeneric(context, object, key, value, slot, vector);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
@@ -2334,20 +2305,6 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
}
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object =
- UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
- LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
- LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
- LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
-
- LStoreNamedGeneric* result =
- new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* left = UseFixed(instr->left(), edx);
diff --git a/deps/v8/src/crankshaft/x87/lithium-x87.h b/deps/v8/src/crankshaft/x87/lithium-x87.h
index 3ef8f75523..e2b804322a 100644
--- a/deps/v8/src/crankshaft/x87/lithium-x87.h
+++ b/deps/v8/src/crankshaft/x87/lithium-x87.h
@@ -135,9 +135,7 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreKeyed) \
- V(StoreKeyedGeneric) \
V(StoreNamedField) \
- V(StoreNamedGeneric) \
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
@@ -2008,32 +2006,6 @@ class LStoreNamedField final : public LTemplateInstruction<0, 2, 2> {
};
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
- public:
- LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
- LOperand* slot, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = value;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
- void PrintDataTo(StringStream* stream) override;
- Handle<Object> name() const { return hydrogen()->name(); }
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
public:
LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val,
@@ -2064,34 +2036,6 @@ class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
};
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
- public:
- LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
- LOperand* value, LOperand* slot, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = key;
- inputs_[3] = value;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
- LOperand* value() { return inputs_[3]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
- void PrintDataTo(StringStream* stream) override;
-
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 2> {
public:
LTransitionElementsKind(LOperand* object,
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index a8af9de2d1..01801f80f6 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -2,30 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-// Defined when linking against shared lib on Windows.
-#if defined(USING_V8_SHARED) && !defined(V8_SHARED)
-#define V8_SHARED
-#endif
-
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
-#ifdef V8_SHARED
-#include <assert.h>
-#endif // V8_SHARED
-
-#ifndef V8_SHARED
#include <algorithm>
#include <fstream>
+#include <map>
+#include <utility>
#include <vector>
-#endif // !V8_SHARED
-
-#ifdef V8_SHARED
-#include "include/v8-testing.h"
-#endif // V8_SHARED
#ifdef ENABLE_VTUNE_JIT_INTERFACE
#include "src/third_party/vtune/v8-vtune.h"
@@ -36,7 +22,6 @@
#include "include/libplatform/libplatform.h"
#include "include/libplatform/v8-tracing.h"
-#ifndef V8_SHARED
#include "src/api.h"
#include "src/base/cpu.h"
#include "src/base/debug/stack_trace.h"
@@ -48,7 +33,6 @@
#include "src/snapshot/natives.h"
#include "src/utils.h"
#include "src/v8.h"
-#endif // !V8_SHARED
#if !defined(_WIN32) && !defined(_WIN64)
#include <unistd.h> // NOLINT
@@ -72,9 +56,7 @@ namespace v8 {
namespace {
const int MB = 1024 * 1024;
-#ifndef V8_SHARED
const int kMaxWorkers = 50;
-#endif
class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
@@ -102,7 +84,6 @@ class MockArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
};
-#ifndef V8_SHARED
// Predictable v8::Platform implementation. All background and foreground
// tasks are run immediately, delayed tasks are not executed at all.
class PredictablePlatform : public Platform {
@@ -136,6 +117,7 @@ class PredictablePlatform : public Platform {
return synthetic_time_in_sec_ += 0.00001;
}
+ using Platform::AddTraceEvent;
uint64_t AddTraceEvent(char phase, const uint8_t* categoryEnabledFlag,
const char* name, const char* scope, uint64_t id,
uint64_t bind_id, int numArgs, const char** argNames,
@@ -163,7 +145,6 @@ class PredictablePlatform : public Platform {
DISALLOW_COPY_AND_ASSIGN(PredictablePlatform);
};
-#endif // !V8_SHARED
v8::Platform* g_platform = NULL;
@@ -176,7 +157,6 @@ static Local<Value> Throw(Isolate* isolate, const char* message) {
}
-#ifndef V8_SHARED
bool FindInObjectList(Local<Object> object, const Shell::ObjectList& list) {
for (int i = 0; i < list.length(); ++i) {
if (list[i]->StrictEquals(object)) {
@@ -202,7 +182,6 @@ Worker* GetWorkerFromInternalField(Isolate* isolate, Local<Object> object) {
return worker;
}
-#endif // !V8_SHARED
} // namespace
@@ -370,7 +349,6 @@ class PerIsolateData {
};
-#ifndef V8_SHARED
CounterMap* Shell::counter_map_;
base::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
CounterCollection Shell::local_counters_;
@@ -383,20 +361,17 @@ base::LazyMutex Shell::workers_mutex_;
bool Shell::allow_new_workers_ = true;
i::List<Worker*> Shell::workers_;
i::List<SharedArrayBuffer::Contents> Shell::externalized_shared_contents_;
-#endif // !V8_SHARED
Global<Context> Shell::evaluation_context_;
ArrayBuffer::Allocator* Shell::array_buffer_allocator;
ShellOptions Shell::options;
base::OnceType Shell::quit_once_ = V8_ONCE_INIT;
-#ifndef V8_SHARED
bool CounterMap::Match(void* key1, void* key2) {
const char* name1 = reinterpret_cast<const char*>(key1);
const char* name2 = reinterpret_cast<const char*>(key2);
return strcmp(name1, name2) == 0;
}
-#endif // !V8_SHARED
// Converts a V8 value to a C string.
@@ -460,18 +435,12 @@ ScriptCompiler::CachedData* CompileForCachedData(
// Compile a string within the current v8 context.
MaybeLocal<Script> Shell::CompileString(
Isolate* isolate, Local<String> source, Local<Value> name,
- ScriptCompiler::CompileOptions compile_options, SourceType source_type) {
+ ScriptCompiler::CompileOptions compile_options) {
Local<Context> context(isolate->GetCurrentContext());
ScriptOrigin origin(name);
- // TODO(adamk): Make use of compile options for Modules.
- if (compile_options == ScriptCompiler::kNoCompileOptions ||
- source_type == MODULE) {
+ if (compile_options == ScriptCompiler::kNoCompileOptions) {
ScriptCompiler::Source script_source(source, origin);
- return source_type == SCRIPT
- ? ScriptCompiler::Compile(context, &script_source,
- compile_options)
- : ScriptCompiler::CompileModule(context, &script_source,
- compile_options);
+ return ScriptCompiler::Compile(context, &script_source, compile_options);
}
ScriptCompiler::CachedData* data =
@@ -485,7 +454,6 @@ MaybeLocal<Script> Shell::CompileString(
DCHECK(false); // A new compile option?
}
if (data == NULL) compile_options = ScriptCompiler::kNoCompileOptions;
- DCHECK_EQ(SCRIPT, source_type);
MaybeLocal<Script> result =
ScriptCompiler::Compile(context, &cached_source, compile_options);
CHECK(data == NULL || !data->rejected);
@@ -496,7 +464,7 @@ MaybeLocal<Script> Shell::CompileString(
// Executes a string within the current v8 context.
bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
Local<Value> name, bool print_result,
- bool report_exceptions, SourceType source_type) {
+ bool report_exceptions) {
HandleScope handle_scope(isolate);
TryCatch try_catch(isolate);
try_catch.SetVerbose(true);
@@ -508,8 +476,8 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
Local<Context>::New(isolate, data->realms_[data->realm_current_]);
Context::Scope context_scope(realm);
Local<Script> script;
- if (!Shell::CompileString(isolate, source, name, options.compile_options,
- source_type).ToLocal(&script)) {
+ if (!Shell::CompileString(isolate, source, name, options.compile_options)
+ .ToLocal(&script)) {
// Print errors that happened during compilation.
if (report_exceptions) ReportException(isolate, &try_catch);
return false;
@@ -527,9 +495,7 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
}
DCHECK(!try_catch.HasCaught());
if (print_result) {
-#if !defined(V8_SHARED)
if (options.test_shell) {
-#endif
if (!result->IsUndefined()) {
// If all went well and the result wasn't undefined then print
// the returned value.
@@ -537,17 +503,160 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
fwrite(*str, sizeof(**str), str.length(), stdout);
printf("\n");
}
-#if !defined(V8_SHARED)
} else {
v8::String::Utf8Value str(Stringify(isolate, result));
fwrite(*str, sizeof(**str), str.length(), stdout);
printf("\n");
}
-#endif
}
return true;
}
+namespace {
+
+std::string ToSTLString(Local<String> v8_str) {
+ String::Utf8Value utf8(v8_str);
+ // Should not be able to fail since the input is a String.
+ CHECK(*utf8);
+ return *utf8;
+}
+
+bool IsAbsolutePath(const std::string& path) {
+#if defined(_WIN32) || defined(_WIN64)
+ // TODO(adamk): This is an incorrect approximation, but should
+ // work for all our test-running cases.
+ return path.find(':') != std::string::npos;
+#else
+ return path[0] == '/';
+#endif
+}
+
+std::string GetWorkingDirectory() {
+#if defined(_WIN32) || defined(_WIN64)
+ char system_buffer[MAX_PATH];
+ // TODO(adamk): Support Unicode paths.
+ DWORD len = GetCurrentDirectoryA(MAX_PATH, system_buffer);
+ CHECK(len > 0);
+ return system_buffer;
+#else
+ char curdir[PATH_MAX];
+ CHECK_NOT_NULL(getcwd(curdir, PATH_MAX));
+ return curdir;
+#endif
+}
+
+// Returns the directory part of path, without the trailing '/'.
+std::string DirName(const std::string& path) {
+ DCHECK(IsAbsolutePath(path));
+ size_t last_slash = path.find_last_of('/');
+ DCHECK(last_slash != std::string::npos);
+ return path.substr(0, last_slash);
+}
+
+std::string EnsureAbsolutePath(const std::string& path,
+ const std::string& dir_name) {
+ return IsAbsolutePath(path) ? path : dir_name + '/' + path;
+}
+
+MaybeLocal<Module> ResolveModuleCallback(Local<Context> context,
+ Local<String> specifier,
+ Local<Module> referrer,
+ Local<Value> data) {
+ Isolate* isolate = context->GetIsolate();
+ auto module_map = static_cast<std::map<std::string, Global<Module>>*>(
+ External::Cast(*data)->Value());
+ Local<String> dir_name = Local<String>::Cast(referrer->GetEmbedderData());
+ std::string absolute_path =
+ EnsureAbsolutePath(ToSTLString(specifier), ToSTLString(dir_name));
+ auto it = module_map->find(absolute_path);
+ if (it != module_map->end()) {
+ return it->second.Get(isolate);
+ }
+ return MaybeLocal<Module>();
+}
+
+} // anonymous namespace
+
+MaybeLocal<Module> Shell::FetchModuleTree(
+ Isolate* isolate, const std::string& file_name,
+ std::map<std::string, Global<Module>>* module_map) {
+ DCHECK(IsAbsolutePath(file_name));
+ TryCatch try_catch(isolate);
+ try_catch.SetVerbose(true);
+ Local<String> source_text = ReadFile(isolate, file_name.c_str());
+ if (source_text.IsEmpty()) {
+ printf("Error reading '%s'\n", file_name.c_str());
+ Shell::Exit(1);
+ }
+ ScriptOrigin origin(
+ String::NewFromUtf8(isolate, file_name.c_str(), NewStringType::kNormal)
+ .ToLocalChecked());
+ ScriptCompiler::Source source(source_text, origin);
+ Local<Module> module;
+ if (!ScriptCompiler::CompileModule(isolate, &source).ToLocal(&module)) {
+ ReportException(isolate, &try_catch);
+ return MaybeLocal<Module>();
+ }
+ module_map->insert(
+ std::make_pair(file_name, Global<Module>(isolate, module)));
+
+ std::string dir_name = DirName(file_name);
+ module->SetEmbedderData(
+ String::NewFromUtf8(isolate, dir_name.c_str(), NewStringType::kNormal)
+ .ToLocalChecked());
+
+ for (int i = 0, length = module->GetModuleRequestsLength(); i < length; ++i) {
+ Local<String> name = module->GetModuleRequest(i);
+ std::string absolute_path = EnsureAbsolutePath(ToSTLString(name), dir_name);
+ if (!module_map->count(absolute_path)) {
+ if (FetchModuleTree(isolate, absolute_path, module_map).IsEmpty()) {
+ return MaybeLocal<Module>();
+ }
+ }
+ }
+
+ return module;
+}
+
+bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) {
+ HandleScope handle_scope(isolate);
+
+ std::string absolute_path =
+ EnsureAbsolutePath(file_name, GetWorkingDirectory());
+ std::replace(absolute_path.begin(), absolute_path.end(), '\\', '/');
+
+ Local<Module> root_module;
+ std::map<std::string, Global<Module>> module_map;
+ if (!FetchModuleTree(isolate, absolute_path, &module_map)
+ .ToLocal(&root_module)) {
+ return false;
+ }
+
+ TryCatch try_catch(isolate);
+ try_catch.SetVerbose(true);
+
+ MaybeLocal<Value> maybe_result;
+ {
+ PerIsolateData* data = PerIsolateData::Get(isolate);
+ Local<Context> realm = data->realms_[data->realm_current_].Get(isolate);
+ Context::Scope context_scope(realm);
+
+ if (root_module->Instantiate(realm, ResolveModuleCallback,
+ External::New(isolate, &module_map))) {
+ maybe_result = root_module->Evaluate(realm);
+ EmptyMessageQueues(isolate);
+ }
+ }
+ Local<Value> result;
+ if (!maybe_result.ToLocal(&result)) {
+ DCHECK(try_catch.HasCaught());
+ // Print errors that happened during execution.
+ ReportException(isolate, &try_catch);
+ return false;
+ }
+ DCHECK(!try_catch.HasCaught());
+ return true;
+}
PerIsolateData::RealmScope::RealmScope(PerIsolateData* data) : data_(data) {
data_->realm_count_ = 1;
@@ -595,7 +704,6 @@ int PerIsolateData::RealmIndexOrThrow(
}
-#ifndef V8_SHARED
// performance.now() returns a time stamp as double, measured in milliseconds.
// When FLAG_verify_predictable mode is enabled it returns result of
// v8::Platform::MonotonicallyIncreasingTime().
@@ -608,7 +716,6 @@ void Shell::PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(delta.InMillisecondsF());
}
}
-#endif // !V8_SHARED
// Realm.current() returns the index of the currently active realm.
@@ -879,7 +986,6 @@ void Shell::Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
-#ifndef V8_SHARED
void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
HandleScope handle_scope(isolate);
@@ -1001,16 +1107,13 @@ void Shell::WorkerTerminate(const v8::FunctionCallbackInfo<v8::Value>& args) {
worker->Terminate();
}
-#endif // !V8_SHARED
void Shell::QuitOnce(v8::FunctionCallbackInfo<v8::Value>* args) {
int exit_code = (*args)[0]
->Int32Value(args->GetIsolate()->GetCurrentContext())
.FromMaybe(0);
-#ifndef V8_SHARED
CleanupWorkers();
-#endif // !V8_SHARED
OnExit(args->GetIsolate());
Exit(exit_code);
}
@@ -1031,14 +1134,12 @@ void Shell::Version(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
HandleScope handle_scope(isolate);
-#ifndef V8_SHARED
Local<Context> context;
bool enter_context = !isolate->InContext();
if (enter_context) {
context = Local<Context>::New(isolate, evaluation_context_);
context->Enter();
}
-#endif // !V8_SHARED
v8::String::Utf8Value exception(try_catch->Exception());
const char* exception_string = ToCString(exception);
Local<Message> message = try_catch->Message();
@@ -1082,13 +1183,10 @@ void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
}
}
printf("\n");
-#ifndef V8_SHARED
if (enter_context) context->Exit();
-#endif // !V8_SHARED
}
-#ifndef V8_SHARED
int32_t* Counter::Bind(const char* name, bool is_histogram) {
int i;
for (i = 0; i < kMaxNameSize - 1 && name[i]; i++)
@@ -1217,7 +1315,6 @@ Local<String> Shell::Stringify(Isolate* isolate, Local<Value> value) {
if (result.IsEmpty()) return String::Empty(isolate);
return result.ToLocalChecked().As<String>();
}
-#endif // !V8_SHARED
Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
@@ -1308,7 +1405,6 @@ Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
.ToLocalChecked(),
realm_template);
-#ifndef V8_SHARED
Local<ObjectTemplate> performance_template = ObjectTemplate::New(isolate);
performance_template->Set(
String::NewFromUtf8(isolate, "now", NewStringType::kNormal)
@@ -1347,7 +1443,6 @@ Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
String::NewFromUtf8(isolate, "Worker", NewStringType::kNormal)
.ToLocalChecked(),
worker_fun_template);
-#endif // !V8_SHARED
Local<ObjectTemplate> os_templ = ObjectTemplate::New(isolate);
AddOSMethods(isolate, os_templ);
@@ -1365,21 +1460,17 @@ static void EmptyMessageCallback(Local<Message> message, Local<Value> error) {
}
void Shell::Initialize(Isolate* isolate) {
-#ifndef V8_SHARED
// Set up counters
if (i::StrLength(i::FLAG_map_counters) != 0)
MapCounters(isolate, i::FLAG_map_counters);
-#endif // !V8_SHARED
// Disable default message reporting.
isolate->AddMessageListener(EmptyMessageCallback);
}
Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
-#ifndef V8_SHARED
// This needs to be a critical section since this is not thread-safe
base::LockGuard<base::Mutex> lock_guard(context_mutex_.Pointer());
-#endif // !V8_SHARED
// Initialize the global objects
Local<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
EscapableHandleScope handle_scope(isolate);
@@ -1387,7 +1478,6 @@ Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
DCHECK(!context.IsEmpty());
Context::Scope scope(context);
-#ifndef V8_SHARED
i::Factory* factory = reinterpret_cast<i::Isolate*>(isolate)->factory();
i::JSArguments js_args = i::FLAG_js_arguments;
i::Handle<i::FixedArray> arguments_array =
@@ -1405,7 +1495,6 @@ Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
.ToLocalChecked(),
Utils::ToLocal(arguments_jsarray))
.FromJust();
-#endif // !V8_SHARED
return handle_scope.Escape(context);
}
@@ -1419,7 +1508,6 @@ void Shell::Exit(int exit_code) {
}
-#ifndef V8_SHARED
struct CounterAndKey {
Counter* counter;
const char* key;
@@ -1444,11 +1532,8 @@ void Shell::WriteIgnitionDispatchCountersFile(v8::Isolate* isolate) {
JSON::Stringify(context, dispatch_counters).ToLocalChecked());
}
-#endif // !V8_SHARED
-
void Shell::OnExit(v8::Isolate* isolate) {
-#ifndef V8_SHARED
if (i::FLAG_dump_counters) {
int number_of_counters = 0;
for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) {
@@ -1484,7 +1569,6 @@ void Shell::OnExit(v8::Isolate* isolate) {
delete counters_file_;
delete counter_map_;
-#endif // !V8_SHARED
}
@@ -1618,10 +1702,8 @@ void Shell::RunShell(Isolate* isolate) {
SourceGroup::~SourceGroup() {
-#ifndef V8_SHARED
delete thread_;
thread_ = NULL;
-#endif // !V8_SHARED
}
@@ -1629,7 +1711,6 @@ void SourceGroup::Execute(Isolate* isolate) {
bool exception_was_thrown = false;
for (int i = begin_offset_; i < end_offset_; ++i) {
const char* arg = argv_[i];
- Shell::SourceType source_type = Shell::SCRIPT;
if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) {
// Execute argument given to -e option directly.
HandleScope handle_scope(isolate);
@@ -1648,8 +1729,13 @@ void SourceGroup::Execute(Isolate* isolate) {
continue;
} else if (strcmp(arg, "--module") == 0 && i + 1 < end_offset_) {
// Treat the next file as a module.
- source_type = Shell::MODULE;
arg = argv_[++i];
+ Shell::options.script_executed = true;
+ if (!Shell::ExecuteModule(isolate, arg)) {
+ exception_was_thrown = true;
+ break;
+ }
+ continue;
} else if (arg[0] == '-') {
// Ignore other options. They have been parsed already.
continue;
@@ -1666,8 +1752,7 @@ void SourceGroup::Execute(Isolate* isolate) {
Shell::Exit(1);
}
Shell::options.script_executed = true;
- if (!Shell::ExecuteString(isolate, source, file_name, false, true,
- source_type)) {
+ if (!Shell::ExecuteString(isolate, source, file_name, false, true)) {
exception_was_thrown = true;
break;
}
@@ -1690,7 +1775,6 @@ Local<String> SourceGroup::ReadFile(Isolate* isolate, const char* name) {
}
-#ifndef V8_SHARED
base::Thread::Options SourceGroup::GetThreadOptions() {
// On some systems (OSX 10.6) the stack size default is 0.5Mb or less
// which is not enough to parse the big literal expressions used in tests.
@@ -2014,7 +2098,6 @@ void Worker::PostMessageOut(const v8::FunctionCallbackInfo<v8::Value>& args) {
delete data;
}
}
-#endif // !V8_SHARED
void SetFlagsFromString(const char* flags) {
@@ -2070,30 +2153,16 @@ bool Shell::SetOptions(int argc, char* argv[]) {
// JavaScript engines.
continue;
} else if (strcmp(argv[i], "--isolate") == 0) {
-#ifdef V8_SHARED
- printf("D8 with shared library does not support multi-threading\n");
- return false;
-#endif // V8_SHARED
options.num_isolates++;
} else if (strcmp(argv[i], "--dump-heap-constants") == 0) {
-#ifdef V8_SHARED
- printf("D8 with shared library does not support constant dumping\n");
- return false;
-#else
options.dump_heap_constants = true;
argv[i] = NULL;
-#endif // V8_SHARED
} else if (strcmp(argv[i], "--throws") == 0) {
options.expected_to_throw = true;
argv[i] = NULL;
} else if (strncmp(argv[i], "--icu-data-file=", 16) == 0) {
options.icu_data_file = argv[i] + 16;
argv[i] = NULL;
-#ifdef V8_SHARED
- } else if (strcmp(argv[i], "--dump-counters") == 0) {
- printf("D8 with shared library does not include counters\n");
- return false;
-#endif // V8_SHARED
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
} else if (strncmp(argv[i], "--natives_blob=", 15) == 0) {
options.natives_blob = argv[i] + 15;
@@ -2159,11 +2228,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
int Shell::RunMain(Isolate* isolate, int argc, char* argv[], bool last_run) {
-#ifndef V8_SHARED
for (int i = 1; i < options.num_isolates; ++i) {
options.isolate_sources[i].StartExecuteInThread();
}
-#endif // !V8_SHARED
{
HandleScope scope(isolate);
Local<Context> context = CreateEvaluationContext(isolate);
@@ -2178,7 +2245,6 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[], bool last_run) {
}
}
CollectGarbage(isolate);
-#ifndef V8_SHARED
for (int i = 1; i < options.num_isolates; ++i) {
if (last_run) {
options.isolate_sources[i].JoinThread();
@@ -2187,7 +2253,6 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[], bool last_run) {
}
}
CleanupWorkers();
-#endif // !V8_SHARED
return 0;
}
@@ -2209,17 +2274,12 @@ void Shell::CollectGarbage(Isolate* isolate) {
void Shell::EmptyMessageQueues(Isolate* isolate) {
-#ifndef V8_SHARED
if (!i::FLAG_verify_predictable) {
-#endif
while (v8::platform::PumpMessageLoop(g_platform, isolate)) continue;
-#ifndef V8_SHARED
}
-#endif
}
-#ifndef V8_SHARED
bool Shell::SerializeValue(Isolate* isolate, Local<Value> value,
const ObjectList& to_transfer,
ObjectList* seen_objects,
@@ -2534,14 +2594,11 @@ static void DumpHeapConstants(i::Isolate* isolate) {
printf("}\n");
#undef ROOT_LIST_CASE
}
-#endif // !V8_SHARED
int Shell::Main(int argc, char* argv[]) {
std::ofstream trace_file;
-#ifndef V8_SHARED
v8::base::debug::EnableInProcessStackDumping();
-#endif
#if (defined(_WIN32) || defined(_WIN64))
UINT new_flags =
SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX;
@@ -2559,13 +2616,9 @@ int Shell::Main(int argc, char* argv[]) {
#endif // defined(_WIN32) || defined(_WIN64)
if (!SetOptions(argc, argv)) return 1;
v8::V8::InitializeICUDefaultLocation(argv[0], options.icu_data_file);
-#ifndef V8_SHARED
g_platform = i::FLAG_verify_predictable
? new PredictablePlatform()
: v8::platform::CreateDefaultPlatform();
-#else
- g_platform = v8::platform::CreateDefaultPlatform();
-#endif // !V8_SHARED
v8::V8::InitializePlatform(g_platform);
v8::V8::Initialize();
@@ -2591,7 +2644,6 @@ int Shell::Main(int argc, char* argv[]) {
#ifdef ENABLE_VTUNE_JIT_INTERFACE
create_params.code_event_handler = vTune::GetVtuneCodeEventHandler();
#endif
-#ifndef V8_SHARED
create_params.constraints.ConfigureDefaults(
base::SysInfo::AmountOfPhysicalMemory(),
base::SysInfo::AmountOfVirtualMemory());
@@ -2602,7 +2654,6 @@ int Shell::Main(int argc, char* argv[]) {
create_params.create_histogram_callback = CreateHistogram;
create_params.add_histogram_sample_callback = AddHistogramSample;
}
-#endif
Isolate* isolate = Isolate::New(create_params);
{
Isolate::Scope scope(isolate);
@@ -2632,21 +2683,15 @@ int Shell::Main(int argc, char* argv[]) {
}
tracing_controller->Initialize(trace_buffer);
tracing_controller->StartTracing(trace_config);
-#ifndef V8_SHARED
if (!i::FLAG_verify_predictable) {
platform::SetTracingController(g_platform, tracing_controller);
}
-#else
- platform::SetTracingController(g_platform, tracing_controller);
-#endif
}
-#ifndef V8_SHARED
if (options.dump_heap_constants) {
DumpHeapConstants(reinterpret_cast<i::Isolate*>(isolate));
return 0;
}
-#endif
if (options.stress_opt || options.stress_deopt) {
Testing::SetStressRunType(options.stress_opt
@@ -2662,7 +2707,6 @@ int Shell::Main(int argc, char* argv[]) {
}
printf("======== Full Deoptimization =======\n");
Testing::DeoptimizeAll(isolate);
-#if !defined(V8_SHARED)
} else if (i::FLAG_stress_runs > 0) {
options.stress_runs = i::FLAG_stress_runs;
for (int i = 0; i < options.stress_runs && result == 0; i++) {
@@ -2671,7 +2715,6 @@ int Shell::Main(int argc, char* argv[]) {
bool last_run = i == options.stress_runs - 1;
result = RunMain(isolate, argc, argv, last_run);
}
-#endif
} else {
bool last_run = true;
result = RunMain(isolate, argc, argv, last_run);
@@ -2683,29 +2726,23 @@ int Shell::Main(int argc, char* argv[]) {
RunShell(isolate);
}
-#ifndef V8_SHARED
if (i::FLAG_ignition && i::FLAG_trace_ignition_dispatches &&
i::FLAG_trace_ignition_dispatches_output_file != nullptr) {
WriteIgnitionDispatchCountersFile(isolate);
}
-#endif
// Shut down contexts and collect garbage.
evaluation_context_.Reset();
-#ifndef V8_SHARED
stringify_function_.Reset();
-#endif // !V8_SHARED
CollectGarbage(isolate);
}
OnExit(isolate);
-#ifndef V8_SHARED
// Dump basic block profiling data.
if (i::BasicBlockProfiler* profiler =
reinterpret_cast<i::Isolate*>(isolate)->basic_block_profiler()) {
i::OFStream os(stdout);
os << *profiler;
}
-#endif // !V8_SHARED
isolate->Dispose();
V8::Dispose();
V8::ShutdownPlatform();
diff --git a/deps/v8/src/d8.gyp b/deps/v8/src/d8.gyp
index cc65a5b75a..e0270f5178 100644
--- a/deps/v8/src/d8.gyp
+++ b/deps/v8/src/d8.gyp
@@ -49,10 +49,18 @@
'sources': [
'd8.h',
'd8.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
],
'conditions': [
[ 'want_separate_host_toolset==1', {
'toolsets': [ 'target', ],
+ 'dependencies': [
+ 'd8_js2c#host',
+ ],
+ }, {
+ 'dependencies': [
+ 'd8_js2c',
+ ],
}],
['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \
or OS=="openbsd" or OS=="solaris" or OS=="android" \
@@ -63,19 +71,7 @@
'sources': [ 'd8-windows.cc', ]
}],
[ 'component!="shared_library"', {
- 'sources': [
- '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
- ],
'conditions': [
- [ 'want_separate_host_toolset==1', {
- 'dependencies': [
- 'd8_js2c#host',
- ],
- }, {
- 'dependencies': [
- 'd8_js2c',
- ],
- }],
[ 'v8_postmortem_support=="true"', {
'xcode_settings': {
'OTHER_LDFLAGS': [
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index 0e365a52dd..32a7d25c2f 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -5,15 +5,13 @@
#ifndef V8_D8_H_
#define V8_D8_H_
-#ifndef V8_SHARED
+#include <map>
+#include <string>
+
#include "src/allocation.h"
#include "src/base/hashmap.h"
#include "src/base/platform/time.h"
#include "src/list.h"
-#else
-#include "include/v8.h"
-#include "src/base/compiler-specific.h"
-#endif // !V8_SHARED
#include "src/base/once.h"
@@ -21,7 +19,6 @@
namespace v8 {
-#ifndef V8_SHARED
// A single counter in a counter collection.
class Counter {
public:
@@ -81,26 +78,23 @@ class CounterMap {
const char* CurrentKey() { return static_cast<const char*>(entry_->key); }
Counter* CurrentValue() { return static_cast<Counter*>(entry_->value); }
private:
- base::HashMap* map_;
- base::HashMap::Entry* entry_;
+ base::CustomMatcherHashMap* map_;
+ base::CustomMatcherHashMap::Entry* entry_;
};
private:
static int Hash(const char* name);
static bool Match(void* key1, void* key2);
- base::HashMap hash_map_;
+ base::CustomMatcherHashMap hash_map_;
};
-#endif // !V8_SHARED
class SourceGroup {
public:
SourceGroup() :
-#ifndef V8_SHARED
next_semaphore_(0),
done_semaphore_(0),
thread_(NULL),
-#endif // !V8_SHARED
argv_(NULL),
begin_offset_(0),
end_offset_(0) {}
@@ -116,7 +110,6 @@ class SourceGroup {
void Execute(Isolate* isolate);
-#ifndef V8_SHARED
void StartExecuteInThread();
void WaitForThread();
void JoinThread();
@@ -141,7 +134,6 @@ class SourceGroup {
base::Semaphore next_semaphore_;
base::Semaphore done_semaphore_;
base::Thread* thread_;
-#endif // !V8_SHARED
void ExitShell(int exit_code);
Local<String> ReadFile(Isolate* isolate, const char* name);
@@ -151,7 +143,6 @@ class SourceGroup {
int end_offset_;
};
-#ifndef V8_SHARED
enum SerializationTag {
kSerializationTagUndefined,
kSerializationTagNull,
@@ -267,7 +258,6 @@ class Worker {
char* script_;
base::Atomic32 running_;
};
-#endif // !V8_SHARED
class ShellOptions {
@@ -324,23 +314,15 @@ class ShellOptions {
const char* trace_config;
};
-#ifdef V8_SHARED
-class Shell {
-#else
class Shell : public i::AllStatic {
-#endif // V8_SHARED
-
public:
- enum SourceType { SCRIPT, MODULE };
-
static MaybeLocal<Script> CompileString(
Isolate* isolate, Local<String> source, Local<Value> name,
- v8::ScriptCompiler::CompileOptions compile_options,
- SourceType source_type);
+ v8::ScriptCompiler::CompileOptions compile_options);
static bool ExecuteString(Isolate* isolate, Local<String> source,
Local<Value> name, bool print_result,
- bool report_exceptions,
- SourceType source_type = SCRIPT);
+ bool report_exceptions);
+ static bool ExecuteModule(Isolate* isolate, const char* file_name);
static const char* ToCString(const v8::String::Utf8Value& value);
static void ReportException(Isolate* isolate, TryCatch* try_catch);
static Local<String> ReadFile(Isolate* isolate, const char* name);
@@ -352,7 +334,6 @@ class Shell : public i::AllStatic {
static void CollectGarbage(Isolate* isolate);
static void EmptyMessageQueues(Isolate* isolate);
-#ifndef V8_SHARED
// TODO(binji): stupid implementation for now. Is there an easy way to hash an
// object for use in base::HashMap? By pointer?
typedef i::List<Local<Object>> ObjectList;
@@ -373,7 +354,6 @@ class Shell : public i::AllStatic {
static void MapCounters(v8::Isolate* isolate, const char* name);
static void PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args);
-#endif // !V8_SHARED
static void RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args);
static void RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args);
@@ -451,7 +431,6 @@ class Shell : public i::AllStatic {
private:
static Global<Context> evaluation_context_;
static base::OnceType quit_once_;
-#ifndef V8_SHARED
static Global<Function> stringify_function_;
static CounterMap* counter_map_;
// We statically allocate a set of local counters to be used if we
@@ -470,13 +449,15 @@ class Shell : public i::AllStatic {
static void WriteIgnitionDispatchCountersFile(v8::Isolate* isolate);
static Counter* GetCounter(const char* name, bool is_histogram);
static Local<String> Stringify(Isolate* isolate, Local<Value> value);
-#endif // !V8_SHARED
static void Initialize(Isolate* isolate);
static void RunShell(Isolate* isolate);
static bool SetOptions(int argc, char* argv[]);
static Local<ObjectTemplate> CreateGlobalTemplate(Isolate* isolate);
static MaybeLocal<Context> CreateRealm(
const v8::FunctionCallbackInfo<v8::Value>& args);
+ static MaybeLocal<Module> FetchModuleTree(
+ Isolate* isolate, const std::string& file_name,
+ std::map<std::string, Global<Module>>* module_map);
};
diff --git a/deps/v8/src/dateparser.h b/deps/v8/src/dateparser.h
index d7676cbe08..709c1cbaf2 100644
--- a/deps/v8/src/dateparser.h
+++ b/deps/v8/src/dateparser.h
@@ -7,7 +7,7 @@
#include "src/allocation.h"
#include "src/char-predicates.h"
-#include "src/parsing/scanner.h"
+#include "src/unicode-cache.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/arm/debug-arm.cc b/deps/v8/src/debug/arm/debug-arm.cc
index 29e4827be4..d96ec31bfa 100644
--- a/deps/v8/src/debug/arm/debug-arm.cc
+++ b/deps/v8/src/debug/arm/debug-arm.cc
@@ -4,9 +4,11 @@
#if V8_TARGET_ARCH_ARM
-#include "src/codegen.h"
#include "src/debug/debug.h"
+#include "src/codegen.h"
+#include "src/debug/liveedit.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/arm64/debug-arm64.cc b/deps/v8/src/debug/arm64/debug-arm64.cc
index bf7964a7be..e344924a61 100644
--- a/deps/v8/src/debug/arm64/debug-arm64.cc
+++ b/deps/v8/src/debug/arm64/debug-arm64.cc
@@ -4,9 +4,11 @@
#if V8_TARGET_ARCH_ARM64
+#include "src/debug/debug.h"
+
#include "src/arm64/frames-arm64.h"
#include "src/codegen.h"
-#include "src/debug/debug.h"
+#include "src/debug/liveedit.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index fb2df312b8..8970520edc 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -94,7 +94,13 @@ MaybeHandle<Object> DebugEvaluate::Evaluate(
if (context_extension->IsJSObject()) {
Handle<JSObject> extension = Handle<JSObject>::cast(context_extension);
Handle<JSFunction> closure(context->closure(), isolate);
- context = isolate->factory()->NewWithContext(closure, context, extension);
+ context = isolate->factory()->NewWithContext(
+ closure, context,
+ ScopeInfo::CreateForWithScope(
+ isolate, context->IsNativeContext()
+ ? Handle<ScopeInfo>::null()
+ : Handle<ScopeInfo>(context->scope_info())),
+ extension);
}
Handle<JSFunction> eval_fun;
@@ -203,8 +209,13 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
}
for (int i = context_chain_.length() - 1; i >= 0; i--) {
+ Handle<ScopeInfo> scope_info(ScopeInfo::CreateForWithScope(
+ isolate, evaluation_context_->IsNativeContext()
+ ? Handle<ScopeInfo>::null()
+ : Handle<ScopeInfo>(evaluation_context_->scope_info())));
+ scope_info->SetIsDebugEvaluateScope();
evaluation_context_ = factory->NewDebugEvaluateContext(
- evaluation_context_, context_chain_[i].materialized_object,
+ evaluation_context_, scope_info, context_chain_[i].materialized_object,
context_chain_[i].wrapped_context, context_chain_[i].whitelist);
}
}
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index 55108bb96e..c7eb0f75f9 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -7,7 +7,6 @@
#include <memory>
#include "src/ast/scopes.h"
-#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/frames-inl.h"
#include "src/globals.h"
@@ -100,7 +99,9 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
} else {
DCHECK(scope_info->scope_type() == EVAL_SCOPE);
info->set_eval();
- info->set_context(Handle<Context>(function->context()));
+ if (!function->context()->IsNativeContext()) {
+ info->set_outer_scope_info(handle(function->context()->scope_info()));
+ }
// Language mode may be inherited from the eval caller.
// Retrieve it from shared function info.
info->set_language_mode(shared_info->language_mode());
@@ -115,8 +116,7 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
CollectNonLocals(info.get(), scope);
}
if (!ignore_nested_scopes) {
- AstNodeFactory ast_node_factory(info.get()->ast_value_factory());
- scope->AllocateVariables(info.get(), &ast_node_factory);
+ DeclarationScope::Analyze(info.get(), AnalyzeMode::kDebugger);
RetrieveScopeChain(scope);
}
} else if (!ignore_nested_scopes) {
@@ -364,7 +364,7 @@ bool ScopeIterator::SetVariableValue(Handle<String> variable_name,
case ScopeIterator::ScopeTypeEval:
return SetInnerScopeVariableValue(variable_name, new_value);
case ScopeIterator::ScopeTypeModule:
- // TODO(2399): should we implement it?
+ // TODO(neis): Implement.
break;
}
return false;
@@ -619,6 +619,8 @@ MaybeHandle<JSObject> ScopeIterator::MaterializeModuleScope() {
// Fill all context locals.
CopyContextLocalsToScopeObject(scope_info, context, module_scope);
+ // TODO(neis): Also collect stack locals as well as imports and exports.
+
return module_scope;
}
@@ -819,11 +821,10 @@ void ScopeIterator::GetNestedScopeChain(Isolate* isolate, Scope* scope,
if (scope->is_hidden()) {
// We need to add this chain element in case the scope has a context
// associated. We need to keep the scope chain and context chain in sync.
- nested_scope_chain_.Add(ExtendedScopeInfo(scope->GetScopeInfo(isolate)));
+ nested_scope_chain_.Add(ExtendedScopeInfo(scope->scope_info()));
} else {
- nested_scope_chain_.Add(ExtendedScopeInfo(scope->GetScopeInfo(isolate),
- scope->start_position(),
- scope->end_position()));
+ nested_scope_chain_.Add(ExtendedScopeInfo(
+ scope->scope_info(), scope->start_position(), scope->end_position()));
}
for (Scope* inner_scope = scope->inner_scope(); inner_scope != nullptr;
inner_scope = inner_scope->sibling()) {
diff --git a/deps/v8/src/debug/debug-scopes.h b/deps/v8/src/debug/debug-scopes.h
index 0491d73c74..026a1da0f5 100644
--- a/deps/v8/src/debug/debug-scopes.h
+++ b/deps/v8/src/debug/debug-scopes.h
@@ -11,6 +11,8 @@
namespace v8 {
namespace internal {
+class ParseInfo;
+
// Iterate over the actual scopes visible from a stack frame or from a closure.
// The iteration proceeds from the innermost visible nested scope outwards.
// All scopes are backed by an actual context except the local scope,
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index cce167f942..e93dd35661 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -14,6 +14,7 @@
#include "src/compilation-cache.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/compiler.h"
+#include "src/debug/liveedit.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
#include "src/frames-inl.h"
@@ -935,7 +936,7 @@ void Debug::PrepareStepOnThrow() {
it.Advance();
}
- if (last_step_action() == StepNext) {
+ if (last_step_action() == StepNext || last_step_action() == StepOut) {
while (!it.done()) {
Address current_fp = it.frame()->UnpaddedFP();
if (current_fp >= thread_local_.target_fp_) break;
@@ -1281,7 +1282,7 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
// Make sure we abort incremental marking.
isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "prepare for break points");
+ GarbageCollectionReason::kDebugger);
DCHECK(shared->is_compiled());
bool baseline_exists = shared->HasBaselineCode();
@@ -1293,7 +1294,8 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
HeapIterator iterator(isolate_->heap());
HeapObject* obj;
// Continuation from old-style generators need to be recomputed.
- bool find_resumables = baseline_exists && shared->is_resumable();
+ bool find_resumables =
+ baseline_exists && IsResumableFunction(shared->kind());
while ((obj = iterator.next())) {
if (obj->IsJSFunction()) {
@@ -1352,7 +1354,7 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
void Debug::RecordAsyncFunction(Handle<JSGeneratorObject> generator_object) {
if (last_step_action() <= StepOut) return;
- if (!generator_object->function()->shared()->is_async()) return;
+ if (!IsAsyncFunction(generator_object->function()->shared()->kind())) return;
DCHECK(!has_suspended_generator());
thread_local_.suspended_generator_ = *generator_object;
ClearStepping();
@@ -1576,10 +1578,9 @@ bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
return location.IsReturn() || location.IsTailCall();
}
-
void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
- LiveEdit::FrameDropMode mode) {
- if (mode != LiveEdit::CURRENTLY_SET_MODE) {
+ LiveEditFrameDropMode mode) {
+ if (mode != LIVE_EDIT_CURRENTLY_SET_MODE) {
thread_local_.frame_drop_mode_ = mode;
}
thread_local_.break_frame_id_ = new_break_frame_id;
@@ -1599,7 +1600,8 @@ void Debug::ClearMirrorCache() {
Handle<FixedArray> Debug::GetLoadedScripts() {
- isolate_->heap()->CollectAllGarbage();
+ isolate_->heap()->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
+ GarbageCollectionReason::kDebugger);
Factory* factory = isolate_->factory();
if (!factory->script_list()->IsWeakFixedArray()) {
return factory->empty_fixed_array();
@@ -1681,25 +1683,19 @@ void Debug::OnThrow(Handle<Object> exception) {
}
}
-
-void Debug::OnPromiseReject(Handle<JSObject> promise, Handle<Object> value) {
+void Debug::OnPromiseReject(Handle<Object> promise, Handle<Object> value) {
if (in_debug_scope() || ignore_events()) return;
HandleScope scope(isolate_);
// Check whether the promise has been marked as having triggered a message.
Handle<Symbol> key = isolate_->factory()->promise_debug_marker_symbol();
- if (JSReceiver::GetDataProperty(promise, key)->IsUndefined(isolate_)) {
+ if (!promise->IsJSObject() ||
+ JSReceiver::GetDataProperty(Handle<JSObject>::cast(promise), key)
+ ->IsUndefined(isolate_)) {
OnException(value, promise);
}
}
-MaybeHandle<Object> Debug::PromiseHasUserDefinedRejectHandler(
- Handle<JSObject> promise) {
- Handle<JSFunction> fun = isolate_->promise_has_user_defined_reject_handler();
- return Execution::Call(isolate_, fun, promise, 0, NULL);
-}
-
-
void Debug::OnException(Handle<Object> exception, Handle<Object> promise) {
// We cannot generate debug events when JS execution is disallowed.
// TODO(5530): Reenable debug events within DisallowJSScopes once relevant
@@ -1711,18 +1707,14 @@ void Debug::OnException(Handle<Object> exception, Handle<Object> promise) {
// Don't notify listener of exceptions that are internal to a desugaring.
if (catch_type == Isolate::CAUGHT_BY_DESUGARING) return;
- bool uncaught = (catch_type == Isolate::NOT_CAUGHT);
+ bool uncaught = catch_type == Isolate::NOT_CAUGHT;
if (promise->IsJSObject()) {
Handle<JSObject> jspromise = Handle<JSObject>::cast(promise);
// Mark the promise as already having triggered a message.
Handle<Symbol> key = isolate_->factory()->promise_debug_marker_symbol();
JSObject::SetProperty(jspromise, key, key, STRICT).Assert();
// Check whether the promise reject is considered an uncaught exception.
- Handle<Object> has_reject_handler;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate_, has_reject_handler,
- PromiseHasUserDefinedRejectHandler(jspromise), /* void */);
- uncaught = has_reject_handler->IsFalse(isolate_);
+ uncaught = !isolate_->PromiseHasUserDefinedRejectHandler(jspromise);
}
// Bail out if exception breaks are not active
if (uncaught) {
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index 36f973c500..c4e8c17246 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -11,11 +11,11 @@
#include "src/base/atomicops.h"
#include "src/base/hashmap.h"
#include "src/base/platform/platform.h"
-#include "src/debug/liveedit.h"
#include "src/execution.h"
#include "src/factory.h"
#include "src/flags.h"
#include "src/frames.h"
+#include "src/globals.h"
#include "src/runtime/runtime.h"
#include "src/source-position-table.h"
#include "src/string-stream.h"
@@ -413,7 +413,7 @@ class Debug {
void OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue);
void OnThrow(Handle<Object> exception);
- void OnPromiseReject(Handle<JSObject> promise, Handle<Object> value);
+ void OnPromiseReject(Handle<Object> promise, Handle<Object> value);
void OnCompileError(Handle<Script> script);
void OnBeforeCompile(Handle<Script> script);
void OnAfterCompile(Handle<Script> script);
@@ -489,7 +489,7 @@ class Debug {
// Support for LiveEdit
void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
- LiveEdit::FrameDropMode mode);
+ LiveEditFrameDropMode mode);
// Threading support.
char* ArchiveDebug(char* to);
@@ -594,9 +594,6 @@ class Debug {
// Mirror cache handling.
void ClearMirrorCache();
- MaybeHandle<Object> PromiseHasUserDefinedRejectHandler(
- Handle<JSObject> promise);
-
void CallEventCallback(v8::DebugEvent event,
Handle<Object> exec_state,
Handle<Object> event_data,
@@ -704,7 +701,7 @@ class Debug {
// Stores the way how LiveEdit has patched the stack. It is used when
// debugger returns control back to user script.
- LiveEdit::FrameDropMode frame_drop_mode_;
+ LiveEditFrameDropMode frame_drop_mode_;
// Value of accumulator in interpreter frames. In non-interpreter frames
// this value will be the hole.
diff --git a/deps/v8/src/debug/ia32/debug-ia32.cc b/deps/v8/src/debug/ia32/debug-ia32.cc
index 8e4dee7797..47ec69ec5b 100644
--- a/deps/v8/src/debug/ia32/debug-ia32.cc
+++ b/deps/v8/src/debug/ia32/debug-ia32.cc
@@ -4,8 +4,10 @@
#if V8_TARGET_ARCH_IA32
-#include "src/codegen.h"
#include "src/debug/debug.h"
+
+#include "src/codegen.h"
+#include "src/debug/liveedit.h"
#include "src/ia32/frames-ia32.h"
namespace v8 {
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index b51bb1a1c4..b451842f90 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -14,7 +14,6 @@
#include "src/global-handles.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
-#include "src/parsing/parser.h"
#include "src/source-position-table.h"
#include "src/v8.h"
#include "src/v8memory.h"
@@ -655,7 +654,7 @@ Handle<SharedFunctionInfo> SharedInfoWrapper::GetInfo() {
void LiveEdit::InitializeThreadLocal(Debug* debug) {
- debug->thread_local_.frame_drop_mode_ = LiveEdit::FRAMES_UNTOUCHED;
+ debug->thread_local_.frame_drop_mode_ = LIVE_EDIT_FRAMES_UNTOUCHED;
}
@@ -663,20 +662,20 @@ bool LiveEdit::SetAfterBreakTarget(Debug* debug) {
Code* code = NULL;
Isolate* isolate = debug->isolate_;
switch (debug->thread_local_.frame_drop_mode_) {
- case FRAMES_UNTOUCHED:
+ case LIVE_EDIT_FRAMES_UNTOUCHED:
return false;
- case FRAME_DROPPED_IN_DEBUG_SLOT_CALL:
+ case LIVE_EDIT_FRAME_DROPPED_IN_DEBUG_SLOT_CALL:
// Debug break slot stub does not return normally, instead it manually
// cleans the stack and jumps. We should patch the jump address.
code = isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit);
break;
- case FRAME_DROPPED_IN_DIRECT_CALL:
+ case LIVE_EDIT_FRAME_DROPPED_IN_DIRECT_CALL:
// Nothing to do, after_break_target is not used here.
return true;
- case FRAME_DROPPED_IN_RETURN_CALL:
+ case LIVE_EDIT_FRAME_DROPPED_IN_RETURN_CALL:
code = isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit);
break;
- case CURRENTLY_SET_MODE:
+ case LIVE_EDIT_CURRENTLY_SET_MODE:
UNREACHABLE();
break;
}
@@ -1017,6 +1016,7 @@ void LiveEdit::ReplaceFunctionCode(
handle(shared_info->GetDebugInfo()));
}
shared_info->set_scope_info(new_shared_info->scope_info());
+ shared_info->set_outer_scope_info(new_shared_info->outer_scope_info());
shared_info->DisableOptimization(kLiveEdit);
// Update the type feedback vector, if needed.
Handle<TypeFeedbackMetadata> new_feedback_metadata(
@@ -1303,7 +1303,7 @@ static void SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
// Returns error message or NULL.
static const char* DropFrames(Vector<StackFrame*> frames, int top_frame_index,
int bottom_js_frame_index,
- LiveEdit::FrameDropMode* mode) {
+ LiveEditFrameDropMode* mode) {
if (!LiveEdit::kFrameDropperSupported) {
return "Stack manipulations are not supported in this architecture.";
}
@@ -1321,22 +1321,22 @@ static const char* DropFrames(Vector<StackFrame*> frames, int top_frame_index,
if (pre_top_frame_code ==
isolate->builtins()->builtin(Builtins::kSlot_DebugBreak)) {
// OK, we can drop debug break slot.
- *mode = LiveEdit::FRAME_DROPPED_IN_DEBUG_SLOT_CALL;
+ *mode = LIVE_EDIT_FRAME_DROPPED_IN_DEBUG_SLOT_CALL;
} else if (pre_top_frame_code ==
isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit)) {
// OK, we can drop our own code.
pre_top_frame = frames[top_frame_index - 2];
top_frame = frames[top_frame_index - 1];
- *mode = LiveEdit::CURRENTLY_SET_MODE;
+ *mode = LIVE_EDIT_CURRENTLY_SET_MODE;
frame_has_padding = false;
} else if (pre_top_frame_code ==
isolate->builtins()->builtin(Builtins::kReturn_DebugBreak)) {
- *mode = LiveEdit::FRAME_DROPPED_IN_RETURN_CALL;
+ *mode = LIVE_EDIT_FRAME_DROPPED_IN_RETURN_CALL;
} else if (pre_top_frame_code->kind() == Code::STUB &&
CodeStub::GetMajorKey(pre_top_frame_code) == CodeStub::CEntry) {
// Entry from our unit tests on 'debugger' statement.
// It's fine, we support this case.
- *mode = LiveEdit::FRAME_DROPPED_IN_DIRECT_CALL;
+ *mode = LIVE_EDIT_FRAME_DROPPED_IN_DIRECT_CALL;
// We don't have a padding from 'debugger' statement call.
// Here the stub is CEntry, it's not debug-only and can't be padded.
// If anyone would complain, a proxy padded stub could be added.
@@ -1348,13 +1348,13 @@ static const char* DropFrames(Vector<StackFrame*> frames, int top_frame_index,
isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit));
pre_top_frame = frames[top_frame_index - 3];
top_frame = frames[top_frame_index - 2];
- *mode = LiveEdit::CURRENTLY_SET_MODE;
+ *mode = LIVE_EDIT_CURRENTLY_SET_MODE;
frame_has_padding = false;
} else if (pre_top_frame_code->kind() == Code::BYTECODE_HANDLER) {
// Interpreted bytecode takes up two stack frames, one for the bytecode
// handler and one for the interpreter entry trampoline. Therefore we shift
// up by one frame.
- *mode = LiveEdit::FRAME_DROPPED_IN_DIRECT_CALL;
+ *mode = LIVE_EDIT_FRAME_DROPPED_IN_DIRECT_CALL;
pre_top_frame = frames[top_frame_index - 2];
top_frame = frames[top_frame_index - 1];
} else {
@@ -1557,7 +1557,7 @@ static const char* DropActivationsInActiveThreadImpl(Isolate* isolate,
if (frame->is_java_script()) {
SharedFunctionInfo* shared =
JavaScriptFrame::cast(frame)->function()->shared();
- if (shared->is_resumable()) {
+ if (IsResumableFunction(shared->kind())) {
non_droppable_frame_found = true;
non_droppable_reason = LiveEdit::FUNCTION_BLOCKED_UNDER_GENERATOR;
break;
@@ -1605,7 +1605,7 @@ static const char* DropActivationsInActiveThreadImpl(Isolate* isolate,
return target.GetNotFoundMessage();
}
- LiveEdit::FrameDropMode drop_mode = LiveEdit::FRAMES_UNTOUCHED;
+ LiveEditFrameDropMode drop_mode = LIVE_EDIT_FRAMES_UNTOUCHED;
const char* error_message =
DropFrames(frames, top_frame_index, bottom_js_frame_index, &drop_mode);
@@ -1900,25 +1900,19 @@ Handle<Object> LiveEditFunctionTracker::SerializeFunctionScope(Scope* scope) {
Scope* current_scope = scope;
while (current_scope != NULL) {
HandleScope handle_scope(isolate_);
- ZoneList<Variable*> stack_list(current_scope->StackLocalCount(), zone_);
- ZoneList<Variable*> context_list(current_scope->ContextLocalCount(), zone_);
- ZoneList<Variable*> globals_list(current_scope->ContextGlobalCount(),
- zone_);
- current_scope->CollectStackAndContextLocals(&stack_list, &context_list,
- &globals_list);
- context_list.Sort(&Variable::CompareIndex);
-
- for (int i = 0; i < context_list.length(); i++) {
- SetElementSloppy(scope_info_list, scope_info_length,
- context_list[i]->name());
- scope_info_length++;
- SetElementSloppy(
- scope_info_list, scope_info_length,
- Handle<Smi>(Smi::FromInt(context_list[i]->index()), isolate_));
- scope_info_length++;
+ ZoneList<Variable*>* locals = current_scope->locals();
+ for (int i = 0; i < locals->length(); i++) {
+ Variable* var = locals->at(i);
+ if (!var->IsContextSlot()) continue;
+ int context_index = var->index() - Context::MIN_CONTEXT_SLOTS;
+ int location = scope_info_length + context_index * 2;
+ SetElementSloppy(scope_info_list, location, var->name());
+ SetElementSloppy(scope_info_list, location + 1,
+ handle(Smi::FromInt(var->index()), isolate_));
}
+ scope_info_length += current_scope->ContextLocalCount() * 2;
SetElementSloppy(scope_info_list, scope_info_length,
- Handle<Object>(isolate_->heap()->null_value(), isolate_));
+ isolate_->factory()->null_value());
scope_info_length++;
current_scope = current_scope->outer_scope();
diff --git a/deps/v8/src/debug/liveedit.h b/deps/v8/src/debug/liveedit.h
index 784f828162..2034dcb026 100644
--- a/deps/v8/src/debug/liveedit.h
+++ b/deps/v8/src/debug/liveedit.h
@@ -72,20 +72,6 @@ class LiveEditFunctionTracker
class LiveEdit : AllStatic {
public:
- // Describes how exactly a frame has been dropped from stack.
- enum FrameDropMode {
- // No frame has been dropped.
- FRAMES_UNTOUCHED,
- // The top JS frame had been calling debug break slot stub. Patch the
- // address this stub jumps to in the end.
- FRAME_DROPPED_IN_DEBUG_SLOT_CALL,
- // The top JS frame had been calling some C++ function. The return address
- // gets patched automatically.
- FRAME_DROPPED_IN_DIRECT_CALL,
- FRAME_DROPPED_IN_RETURN_CALL,
- CURRENTLY_SET_MODE
- };
-
static void InitializeThreadLocal(Debug* debug);
static bool SetAfterBreakTarget(Debug* debug);
diff --git a/deps/v8/src/debug/mips/debug-mips.cc b/deps/v8/src/debug/mips/debug-mips.cc
index 49320d8a81..4d8b54f4b6 100644
--- a/deps/v8/src/debug/mips/debug-mips.cc
+++ b/deps/v8/src/debug/mips/debug-mips.cc
@@ -4,9 +4,11 @@
#if V8_TARGET_ARCH_MIPS
-#include "src/codegen.h"
#include "src/debug/debug.h"
+#include "src/codegen.h"
+#include "src/debug/liveedit.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/mips64/debug-mips64.cc b/deps/v8/src/debug/mips64/debug-mips64.cc
index 2e967d7b8e..2a6ce7b5cd 100644
--- a/deps/v8/src/debug/mips64/debug-mips64.cc
+++ b/deps/v8/src/debug/mips64/debug-mips64.cc
@@ -4,9 +4,11 @@
#if V8_TARGET_ARCH_MIPS64
-#include "src/codegen.h"
#include "src/debug/debug.h"
+#include "src/codegen.h"
+#include "src/debug/liveedit.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/ppc/debug-ppc.cc b/deps/v8/src/debug/ppc/debug-ppc.cc
index 7facf9526a..e57aa3caa2 100644
--- a/deps/v8/src/debug/ppc/debug-ppc.cc
+++ b/deps/v8/src/debug/ppc/debug-ppc.cc
@@ -4,9 +4,11 @@
#if V8_TARGET_ARCH_PPC
-#include "src/codegen.h"
#include "src/debug/debug.h"
+#include "src/codegen.h"
+#include "src/debug/liveedit.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/s390/debug-s390.cc b/deps/v8/src/debug/s390/debug-s390.cc
index 9c33b95e3b..b745d5b966 100644
--- a/deps/v8/src/debug/s390/debug-s390.cc
+++ b/deps/v8/src/debug/s390/debug-s390.cc
@@ -6,9 +6,11 @@
#if V8_TARGET_ARCH_S390
-#include "src/codegen.h"
#include "src/debug/debug.h"
+#include "src/codegen.h"
+#include "src/debug/liveedit.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/x64/debug-x64.cc b/deps/v8/src/debug/x64/debug-x64.cc
index 910d1ca001..4f80e18c85 100644
--- a/deps/v8/src/debug/x64/debug-x64.cc
+++ b/deps/v8/src/debug/x64/debug-x64.cc
@@ -4,10 +4,11 @@
#if V8_TARGET_ARCH_X64
-#include "src/assembler.h"
-#include "src/codegen.h"
#include "src/debug/debug.h"
+#include "src/assembler.h"
+#include "src/codegen.h"
+#include "src/debug/liveedit.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/x87/debug-x87.cc b/deps/v8/src/debug/x87/debug-x87.cc
index 1cbdf45b8c..c29eac19c8 100644
--- a/deps/v8/src/debug/x87/debug-x87.cc
+++ b/deps/v8/src/debug/x87/debug-x87.cc
@@ -4,8 +4,10 @@
#if V8_TARGET_ARCH_X87
-#include "src/codegen.h"
#include "src/debug/debug.h"
+
+#include "src/codegen.h"
+#include "src/debug/liveedit.h"
#include "src/x87/frames-x87.h"
namespace v8 {
diff --git a/deps/v8/src/deoptimize-reason.cc b/deps/v8/src/deoptimize-reason.cc
index 87c8905ff8..b0ee780070 100644
--- a/deps/v8/src/deoptimize-reason.cc
+++ b/deps/v8/src/deoptimize-reason.cc
@@ -23,7 +23,7 @@ size_t hash_value(DeoptimizeReason reason) {
return static_cast<uint8_t>(reason);
}
-char const* const DeoptimizeReasonToString(DeoptimizeReason reason) {
+char const* DeoptimizeReasonToString(DeoptimizeReason reason) {
static char const* kDeoptimizeReasonStrings[] = {
#define DEOPTIMIZE_REASON(Name, message) message,
DEOPTIMIZE_REASON_LIST(DEOPTIMIZE_REASON)
diff --git a/deps/v8/src/deoptimize-reason.h b/deps/v8/src/deoptimize-reason.h
index 60e0a59c5a..d28ec4750b 100644
--- a/deps/v8/src/deoptimize-reason.h
+++ b/deps/v8/src/deoptimize-reason.h
@@ -23,6 +23,7 @@ namespace internal {
V(ForcedDeoptToRuntime, "Forced deopt to runtime") \
V(Hole, "hole") \
V(InstanceMigrationFailed, "instance migration failed") \
+ V(InsufficientTypeFeedbackForCall, "Insufficient type feedback for call") \
V(InsufficientTypeFeedbackForCallWithArguments, \
"Insufficient type feedback for call with arguments") \
V(FastPathFailed, "Falling off the fast path") \
@@ -68,7 +69,6 @@ namespace internal {
"Unexpected cell contents in global store") \
V(UnexpectedObject, "unexpected object") \
V(UnexpectedRHSOfBinaryOperation, "Unexpected RHS of binary operation") \
- V(UninitializedBoilerplateLiterals, "Uninitialized boilerplate literals") \
V(UnknownMapInPolymorphicAccess, "Unknown map in polymorphic access") \
V(UnknownMapInPolymorphicCall, "Unknown map in polymorphic call") \
V(UnknownMapInPolymorphicElementAccess, \
@@ -90,7 +90,7 @@ std::ostream& operator<<(std::ostream&, DeoptimizeReason);
size_t hash_value(DeoptimizeReason reason);
-char const* const DeoptimizeReasonToString(DeoptimizeReason reason);
+char const* DeoptimizeReasonToString(DeoptimizeReason reason);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index d4756ff183..971de9ec77 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -73,13 +73,8 @@ Deoptimizer* Deoptimizer::New(JSFunction* function,
Address from,
int fp_to_sp_delta,
Isolate* isolate) {
- Deoptimizer* deoptimizer = new Deoptimizer(isolate,
- function,
- type,
- bailout_id,
- from,
- fp_to_sp_delta,
- NULL);
+ Deoptimizer* deoptimizer = new Deoptimizer(isolate, function, type,
+ bailout_id, from, fp_to_sp_delta);
CHECK(isolate->deoptimizer_data()->current_ == NULL);
isolate->deoptimizer_data()->current_ = deoptimizer;
return deoptimizer;
@@ -108,23 +103,6 @@ Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
return result;
}
-
-int Deoptimizer::ConvertJSFrameIndexToFrameIndex(int jsframe_index) {
- if (jsframe_index == 0) return 0;
-
- int frame_index = 0;
- while (jsframe_index >= 0) {
- FrameDescription* frame = output_[frame_index];
- if (frame->GetFrameType() == StackFrame::JAVA_SCRIPT) {
- jsframe_index--;
- }
- frame_index++;
- }
-
- return frame_index - 1;
-}
-
-
DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
JavaScriptFrame* frame,
int jsframe_index,
@@ -366,8 +344,7 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
RuntimeCallTimerScope runtimeTimer(isolate,
&RuntimeCallStats::DeoptimizeCode);
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate, &tracing::TraceEventStatsTable::DeoptimizeCode);
+ TRACE_EVENT0("v8", "V8.DeoptimizeCode");
if (FLAG_trace_deopt) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[deoptimize all code in all contexts]\n");
@@ -388,8 +365,7 @@ void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
RuntimeCallTimerScope runtimeTimer(isolate,
&RuntimeCallStats::DeoptimizeCode);
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate, &tracing::TraceEventStatsTable::DeoptimizeCode);
+ TRACE_EVENT0("v8", "V8.DeoptimizeCode");
if (FLAG_trace_deopt) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[deoptimize marked code in all contexts]\n");
@@ -422,8 +398,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
RuntimeCallTimerScope runtimeTimer(isolate,
&RuntimeCallStats::DeoptimizeCode);
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate, &tracing::TraceEventStatsTable::DeoptimizeCode);
+ TRACE_EVENT0("v8", "V8.DeoptimizeCode");
Code* code = function->code();
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
// Mark the code for deoptimization and unlink any functions that also
@@ -439,19 +414,9 @@ void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) {
deoptimizer->DoComputeOutputFrames();
}
-
-bool Deoptimizer::TraceEnabledFor(BailoutType deopt_type,
- StackFrame::Type frame_type) {
- switch (deopt_type) {
- case EAGER:
- case SOFT:
- case LAZY:
- return (frame_type == StackFrame::STUB)
- ? FLAG_trace_stub_failures
- : FLAG_trace_deopt;
- }
- FATAL("Unsupported deopt type");
- return false;
+bool Deoptimizer::TraceEnabledFor(StackFrame::Type frame_type) {
+ return (frame_type == StackFrame::STUB) ? FLAG_trace_stub_failures
+ : FLAG_trace_deopt;
}
@@ -467,7 +432,7 @@ const char* Deoptimizer::MessageFor(BailoutType type) {
Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
BailoutType type, unsigned bailout_id, Address from,
- int fp_to_sp_delta, Code* optimized_code)
+ int fp_to_sp_delta)
: isolate_(isolate),
function_(function),
bailout_id_(bailout_id),
@@ -510,7 +475,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
function->shared()->set_opt_count(opt_count);
}
}
- compiled_code_ = FindOptimizedCode(function, optimized_code);
+ compiled_code_ = FindOptimizedCode(function);
#if DEBUG
DCHECK(compiled_code_ != NULL);
if (type == EAGER || type == SOFT || type == LAZY) {
@@ -521,8 +486,9 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
StackFrame::Type frame_type = function == NULL
? StackFrame::STUB
: StackFrame::JAVA_SCRIPT;
- trace_scope_ = TraceEnabledFor(type, frame_type) ?
- new CodeTracer::Scope(isolate->GetCodeTracer()) : NULL;
+ trace_scope_ = TraceEnabledFor(frame_type)
+ ? new CodeTracer::Scope(isolate->GetCodeTracer())
+ : NULL;
#ifdef DEBUG
CHECK(AllowHeapAllocation::IsAllowed());
disallow_heap_allocation_ = new DisallowHeapAllocation();
@@ -539,21 +505,11 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
input_->SetFrameType(frame_type);
}
-
-Code* Deoptimizer::FindOptimizedCode(JSFunction* function,
- Code* optimized_code) {
- switch (bailout_type_) {
- case Deoptimizer::SOFT:
- case Deoptimizer::EAGER:
- case Deoptimizer::LAZY: {
- Code* compiled_code = FindDeoptimizingCode(from_);
- return (compiled_code == NULL)
- ? static_cast<Code*>(isolate_->FindCodeObject(from_))
- : compiled_code;
- }
- }
- FATAL("Could not find code for optimized function");
- return NULL;
+Code* Deoptimizer::FindOptimizedCode(JSFunction* function) {
+ Code* compiled_code = FindDeoptimizingCode(from_);
+ return (compiled_code == NULL)
+ ? static_cast<Code*>(isolate_->FindCodeObject(from_))
+ : compiled_code;
}
@@ -912,6 +868,10 @@ void Deoptimizer::DoComputeJSFrame(TranslatedFrame* translated_frame,
output_offset);
}
+ if (trace_scope_ != nullptr) {
+ PrintF(trace_scope_->file(), " -------------------------\n");
+ }
+
// There are no translation commands for the caller's pc and fp, the
// context, and the function. Synthesize their values and set them up
// explicitly.
@@ -969,11 +929,11 @@ void Deoptimizer::DoComputeJSFrame(TranslatedFrame* translated_frame,
// so long as we don't inline functions that need local contexts.
output_offset -= kPointerSize;
- TranslatedFrame::iterator context_pos = value_iterator;
- int context_input_index = input_index;
// When deoptimizing into a catch block, we need to take the context
// from just above the top of the operand stack (we push the context
// at the entry of the try block).
+ TranslatedFrame::iterator context_pos = value_iterator;
+ int context_input_index = input_index;
if (goto_catch_handler) {
for (unsigned i = 0; i < height + 1; ++i) {
context_pos++;
@@ -991,10 +951,6 @@ void Deoptimizer::DoComputeJSFrame(TranslatedFrame* translated_frame,
}
value = reinterpret_cast<intptr_t>(context);
output_frame->SetContext(value);
- if (is_topmost) {
- Register context_reg = JavaScriptFrame::context_register();
- output_frame->SetRegister(context_reg.code(), value);
- }
WriteValueToOutput(context, context_input_index, frame_index, output_offset,
"context ");
if (context == isolate_->heap()->arguments_marker()) {
@@ -1011,6 +967,10 @@ void Deoptimizer::DoComputeJSFrame(TranslatedFrame* translated_frame,
value = reinterpret_cast<intptr_t>(function);
WriteValueToOutput(function, 0, frame_index, output_offset, "function ");
+ if (trace_scope_ != nullptr) {
+ PrintF(trace_scope_->file(), " -------------------------\n");
+ }
+
// Translate the rest of the frame.
for (unsigned i = 0; i < height; ++i) {
output_offset -= kPointerSize;
@@ -1060,6 +1020,15 @@ void Deoptimizer::DoComputeJSFrame(TranslatedFrame* translated_frame,
: FullCodeGenerator::BailoutStateField::decode(pc_and_state);
output_frame->SetState(Smi::FromInt(static_cast<int>(state)));
+ // Clear the context register. The context might be a de-materialized object
+ // and will be materialized by {Runtime_NotifyDeoptimized}. For additional
+ // safety we use Smi(0) instead of the potential {arguments_marker} here.
+ if (is_topmost) {
+ intptr_t context_value = reinterpret_cast<intptr_t>(Smi::FromInt(0));
+ Register context_reg = JavaScriptFrame::context_register();
+ output_frame->SetRegister(context_reg.code(), context_value);
+ }
+
// Set the continuation for the topmost frame.
if (is_topmost) {
Builtins* builtins = isolate_->builtins();
@@ -1082,11 +1051,20 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
SharedFunctionInfo* shared = translated_frame->raw_shared_info();
TranslatedFrame::iterator value_iterator = translated_frame->begin();
+ bool is_bottommost = (0 == frame_index);
+ bool is_topmost = (output_count_ - 1 == frame_index);
int input_index = 0;
int bytecode_offset = translated_frame->node_id().ToInt();
unsigned height = translated_frame->height();
unsigned height_in_bytes = height * kPointerSize;
+
+ // All tranlations for interpreted frames contain the accumulator and hence
+ // are assumed to be in bailout state {BailoutState::TOS_REGISTER}. However
+ // such a state is only supported for the topmost frame. We need to skip
+ // pushing the accumulator for any non-topmost frame.
+ if (!is_topmost) height_in_bytes -= kPointerSize;
+
JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
value_iterator++;
input_index++;
@@ -1113,8 +1091,6 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
FrameDescription(output_frame_size, parameter_count);
output_frame->SetFrameType(StackFrame::INTERPRETED);
- bool is_bottommost = (0 == frame_index);
- bool is_topmost = (output_count_ - 1 == frame_index);
CHECK(frame_index >= 0 && frame_index < output_count_);
CHECK_NULL(output_[frame_index]);
output_[frame_index] = output_frame;
@@ -1137,6 +1113,10 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
output_offset);
}
+ if (trace_scope_ != nullptr) {
+ PrintF(trace_scope_->file(), " -------------------------\n");
+ }
+
// There are no translation commands for the caller's pc and fp, the
// context, the function, new.target and the bytecode offset. Synthesize
// their values and set them up
@@ -1193,7 +1173,6 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
// For the bottommost output frame the context can be gotten from the input
// frame. For all subsequent output frames it can be gotten from the function
// so long as we don't inline functions that need local contexts.
- Register context_reg = InterpretedFrame::context_register();
output_offset -= kPointerSize;
// When deoptimizing into a catch block, we need to take the context
@@ -1210,13 +1189,16 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
}
// Read the context from the translations.
Object* context = context_pos->GetRawValue();
- // The context should not be a placeholder for a materialized object.
- CHECK(context != isolate_->heap()->arguments_marker());
value = reinterpret_cast<intptr_t>(context);
output_frame->SetContext(value);
- if (is_topmost) output_frame->SetRegister(context_reg.code(), value);
WriteValueToOutput(context, context_input_index, frame_index, output_offset,
"context ");
+ if (context == isolate_->heap()->arguments_marker()) {
+ Address output_address =
+ reinterpret_cast<Address>(output_[frame_index]->GetTop()) +
+ output_offset;
+ values_to_materialize_.push_back({output_address, context_pos});
+ }
value_iterator++;
input_index++;
@@ -1248,6 +1230,10 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
WriteValueToOutput(smi_bytecode_offset, 0, frame_index, output_offset,
"bytecode offset ");
+ if (trace_scope_ != nullptr) {
+ PrintF(trace_scope_->file(), " -------------------------\n");
+ }
+
// Translate the rest of the interpreter registers in the frame.
for (unsigned i = 0; i < height - 1; ++i) {
output_offset -= kPointerSize;
@@ -1255,20 +1241,30 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
output_offset);
}
- // Put the accumulator on the stack. It will be popped by the
- // InterpreterNotifyDeopt builtin (possibly after materialization).
- output_offset -= kPointerSize;
- if (goto_catch_handler) {
- // If we are lazy deopting to a catch handler, we set the accumulator to
- // the exception (which lives in the result register).
- intptr_t accumulator_value =
- input_->GetRegister(FullCodeGenerator::result_register().code());
- WriteValueToOutput(reinterpret_cast<Object*>(accumulator_value), 0,
- frame_index, output_offset, "accumulator ");
- value_iterator++;
+ // Translate the accumulator register (depending on frame position).
+ if (is_topmost) {
+ // For topmost frmae, p ut the accumulator on the stack. The bailout state
+ // for interpreted frames is always set to {BailoutState::TOS_REGISTER} and
+ // the {NotifyDeoptimized} builtin pops it off the topmost frame (possibly
+ // after materialization).
+ output_offset -= kPointerSize;
+ if (goto_catch_handler) {
+ // If we are lazy deopting to a catch handler, we set the accumulator to
+ // the exception (which lives in the result register).
+ intptr_t accumulator_value =
+ input_->GetRegister(FullCodeGenerator::result_register().code());
+ WriteValueToOutput(reinterpret_cast<Object*>(accumulator_value), 0,
+ frame_index, output_offset, "accumulator ");
+ value_iterator++;
+ } else {
+ WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
+ output_offset, "accumulator ");
+ }
} else {
- WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
- output_offset);
+ // For non-topmost frames, skip the accumulator translation. For those
+ // frames, the return value from the callee will become the accumulator.
+ value_iterator++;
+ input_index++;
}
CHECK_EQ(0u, output_offset);
@@ -1292,6 +1288,15 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
}
}
+ // Clear the context register. The context might be a de-materialized object
+ // and will be materialized by {Runtime_NotifyDeoptimized}. For additional
+ // safety we use Smi(0) instead of the potential {arguments_marker} here.
+ if (is_topmost) {
+ intptr_t context_value = reinterpret_cast<intptr_t>(Smi::FromInt(0));
+ Register context_reg = JavaScriptFrame::context_register();
+ output_frame->SetRegister(context_reg.code(), context_value);
+ }
+
// Set the continuation for the topmost frame.
if (is_topmost) {
Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
@@ -1595,10 +1600,6 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetContext();
output_frame->SetFrameSlot(output_offset, value);
- if (is_topmost) {
- Register context_reg = JavaScriptFrame::context_register();
- output_frame->SetRegister(context_reg.code(), value);
- }
DebugPrintOutputSlot(value, frame_index, output_offset, "context\n");
// The allocation site.
@@ -1654,6 +1655,15 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
}
}
+ // Clear the context register. The context might be a de-materialized object
+ // and will be materialized by {Runtime_NotifyDeoptimized}. For additional
+ // safety we use Smi(0) instead of the potential {arguments_marker} here.
+ if (is_topmost) {
+ intptr_t context_value = reinterpret_cast<intptr_t>(Smi::FromInt(0));
+ Register context_reg = JavaScriptFrame::context_register();
+ output_frame->SetRegister(context_reg.code(), context_value);
+ }
+
// Set the continuation for the topmost frame.
if (is_topmost) {
Builtins* builtins = isolate_->builtins();
@@ -1780,10 +1790,6 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslatedFrame* translated_frame,
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetContext();
output_frame->SetFrameSlot(output_offset, value);
- if (is_topmost) {
- Register context_reg = JavaScriptFrame::context_register();
- output_frame->SetRegister(context_reg.code(), value);
- }
DebugPrintOutputSlot(value, frame_index, output_offset, "context\n");
// Skip receiver.
@@ -1833,6 +1839,15 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslatedFrame* translated_frame,
}
}
+ // Clear the context register. The context might be a de-materialized object
+ // and will be materialized by {Runtime_NotifyDeoptimized}. For additional
+ // safety we use Smi(0) instead of the potential {arguments_marker} here.
+ if (is_topmost) {
+ intptr_t context_value = reinterpret_cast<intptr_t>(Smi::FromInt(0));
+ Register context_reg = JavaScriptFrame::context_register();
+ output_frame->SetRegister(context_reg.code(), context_value);
+ }
+
// Set the continuation for the topmost frame.
if (is_topmost) {
Builtins* builtins = isolate_->builtins();
@@ -2214,15 +2229,6 @@ unsigned Deoptimizer::ComputeOutgoingArgumentSize(Code* code,
return height * kPointerSize;
}
-
-Object* Deoptimizer::ComputeLiteral(int index) const {
- DeoptimizationInputData* data =
- DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
- FixedArray* literals = data->LiteralArray();
- return literals->get(index);
-}
-
-
void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
BailoutType type,
int max_entry_id) {
@@ -2281,33 +2287,6 @@ FrameDescription::FrameDescription(uint32_t frame_size, int parameter_count)
}
}
-
-int FrameDescription::ComputeFixedSize() {
- if (type_ == StackFrame::INTERPRETED) {
- return InterpreterFrameConstants::kFixedFrameSize +
- parameter_count() * kPointerSize;
- } else {
- return StandardFrameConstants::kFixedFrameSize +
- parameter_count() * kPointerSize;
- }
-}
-
-
-unsigned FrameDescription::GetOffsetFromSlotIndex(int slot_index) {
- if (slot_index >= 0) {
- // Local or spill slots. Skip the fixed part of the frame
- // including all arguments.
- unsigned base = GetFrameSize() - ComputeFixedSize();
- return base - ((slot_index + 1) * kPointerSize);
- } else {
- // Incoming parameter.
- int arg_size = parameter_count() * kPointerSize;
- unsigned base = GetFrameSize() - arg_size;
- return base - ((slot_index + 1) * kPointerSize);
- }
-}
-
-
void TranslationBuffer::Add(int32_t value, Zone* zone) {
// This wouldn't handle kMinInt correctly if it ever encountered it.
DCHECK(value != kMinInt);
@@ -3746,8 +3725,8 @@ Handle<Object> TranslatedState::MaterializeAt(int frame_index,
return object;
}
case JS_ARRAY_TYPE: {
- Handle<JSArray> object =
- isolate_->factory()->NewJSArray(0, map->elements_kind());
+ Handle<JSArray> object = Handle<JSArray>::cast(
+ isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
slot->value_ = object;
Handle<Object> properties = MaterializeAt(frame_index, value_index);
Handle<Object> elements = MaterializeAt(frame_index, value_index);
@@ -3758,14 +3737,15 @@ Handle<Object> TranslatedState::MaterializeAt(int frame_index,
return object;
}
case JS_FUNCTION_TYPE: {
+ Handle<SharedFunctionInfo> temporary_shared =
+ isolate_->factory()->NewSharedFunctionInfo(
+ isolate_->factory()->empty_string(), MaybeHandle<Code>(),
+ false);
Handle<JSFunction> object =
isolate_->factory()->NewFunctionFromSharedFunctionInfo(
- handle(isolate_->object_function()->shared()),
- handle(isolate_->context()));
+ map, temporary_shared, isolate_->factory()->undefined_value(),
+ NOT_TENURED);
slot->value_ = object;
- // We temporarily allocated a JSFunction for the {Object} function
- // within the current context, to break cycles in the object graph.
- // The correct function and context will be set below once available.
Handle<Object> properties = MaterializeAt(frame_index, value_index);
Handle<Object> elements = MaterializeAt(frame_index, value_index);
Handle<Object> prototype = MaterializeAt(frame_index, value_index);
@@ -3786,6 +3766,36 @@ Handle<Object> TranslatedState::MaterializeAt(int frame_index,
CHECK(next_link->IsUndefined(isolate_));
return object;
}
+ case CONS_STRING_TYPE: {
+ Handle<ConsString> object = Handle<ConsString>::cast(
+ isolate_->factory()
+ ->NewConsString(isolate_->factory()->undefined_string(),
+ isolate_->factory()->undefined_string())
+ .ToHandleChecked());
+ slot->value_ = object;
+ Handle<Object> hash = MaterializeAt(frame_index, value_index);
+ Handle<Object> length = MaterializeAt(frame_index, value_index);
+ Handle<Object> first = MaterializeAt(frame_index, value_index);
+ Handle<Object> second = MaterializeAt(frame_index, value_index);
+ object->set_map(*map);
+ object->set_length(Smi::cast(*length)->value());
+ object->set_first(String::cast(*first));
+ object->set_second(String::cast(*second));
+ CHECK(hash->IsNumber()); // The {Name::kEmptyHashField} value.
+ return object;
+ }
+ case CONTEXT_EXTENSION_TYPE: {
+ Handle<ContextExtension> object =
+ isolate_->factory()->NewContextExtension(
+ isolate_->factory()->NewScopeInfo(1),
+ isolate_->factory()->undefined_value());
+ slot->value_ = object;
+ Handle<Object> scope_info = MaterializeAt(frame_index, value_index);
+ Handle<Object> extension = MaterializeAt(frame_index, value_index);
+ object->set_scope_info(ScopeInfo::cast(*scope_info));
+ object->set_extension(*extension);
+ return object;
+ }
case FIXED_ARRAY_TYPE: {
Handle<Object> lengthObject = MaterializeAt(frame_index, value_index);
int32_t length = 0;
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 7822d1cf50..4fb7851710 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -383,8 +383,7 @@ class Deoptimizer : public Malloced {
bool needs_frame;
};
- static bool TraceEnabledFor(BailoutType deopt_type,
- StackFrame::Type frame_type);
+ static bool TraceEnabledFor(StackFrame::Type frame_type);
static const char* MessageFor(BailoutType type);
int output_count() const { return output_count_; }
@@ -500,8 +499,6 @@ class Deoptimizer : public Malloced {
int count_;
};
- int ConvertJSFrameIndexToFrameIndex(int jsframe_index);
-
static size_t GetMaxDeoptTableSize();
static void EnsureCodeForDeoptimizationEntry(Isolate* isolate,
@@ -514,14 +511,9 @@ class Deoptimizer : public Malloced {
static const int kMinNumberOfEntries = 64;
static const int kMaxNumberOfEntries = 16384;
- Deoptimizer(Isolate* isolate,
- JSFunction* function,
- BailoutType type,
- unsigned bailout_id,
- Address from,
- int fp_to_sp_delta,
- Code* optimized_code);
- Code* FindOptimizedCode(JSFunction* function, Code* optimized_code);
+ Deoptimizer(Isolate* isolate, JSFunction* function, BailoutType type,
+ unsigned bailout_id, Address from, int fp_to_sp_delta);
+ Code* FindOptimizedCode(JSFunction* function);
void PrintFunctionName();
void DeleteFrameDescriptions();
@@ -560,8 +552,6 @@ class Deoptimizer : public Malloced {
static unsigned ComputeIncomingArgumentSize(SharedFunctionInfo* shared);
static unsigned ComputeOutgoingArgumentSize(Code* code, unsigned bailout_id);
- Object* ComputeLiteral(int index) const;
-
static void GenerateDeoptimizationEntries(
MacroAssembler* masm, int count, BailoutType type);
@@ -711,8 +701,6 @@ class FrameDescription {
return static_cast<uint32_t>(frame_size_);
}
- unsigned GetOffsetFromSlotIndex(int slot_index);
-
intptr_t GetFrameSlot(unsigned offset) {
return *GetFrameSlotPointer(offset);
}
@@ -833,8 +821,6 @@ class FrameDescription {
return reinterpret_cast<intptr_t*>(
reinterpret_cast<Address>(this) + frame_content_offset() + offset);
}
-
- int ComputeFixedSize();
};
diff --git a/deps/v8/src/effects.h b/deps/v8/src/effects.h
index 020471830c..f8b1bd9b2f 100644
--- a/deps/v8/src/effects.h
+++ b/deps/v8/src/effects.h
@@ -5,7 +5,7 @@
#ifndef V8_EFFECTS_H_
#define V8_EFFECTS_H_
-#include "src/types.h"
+#include "src/ast/ast-types.h"
namespace v8 {
namespace internal {
@@ -28,31 +28,31 @@ struct Effect {
enum Modality { POSSIBLE, DEFINITE };
Modality modality;
- Bounds bounds;
+ AstBounds bounds;
Effect() : modality(DEFINITE) {}
- explicit Effect(Bounds b, Modality m = DEFINITE) : modality(m), bounds(b) {}
+ explicit Effect(AstBounds b, Modality m = DEFINITE)
+ : modality(m), bounds(b) {}
// The unknown effect.
static Effect Unknown(Zone* zone) {
- return Effect(Bounds::Unbounded(), POSSIBLE);
+ return Effect(AstBounds::Unbounded(), POSSIBLE);
}
static Effect Forget(Zone* zone) {
- return Effect(Bounds::Unbounded(), DEFINITE);
+ return Effect(AstBounds::Unbounded(), DEFINITE);
}
// Sequential composition, as in 'e1; e2'.
static Effect Seq(Effect e1, Effect e2, Zone* zone) {
if (e2.modality == DEFINITE) return e2;
- return Effect(Bounds::Either(e1.bounds, e2.bounds, zone), e1.modality);
+ return Effect(AstBounds::Either(e1.bounds, e2.bounds, zone), e1.modality);
}
// Alternative composition, as in 'cond ? e1 : e2'.
static Effect Alt(Effect e1, Effect e2, Zone* zone) {
- return Effect(
- Bounds::Either(e1.bounds, e2.bounds, zone),
- e1.modality == POSSIBLE ? POSSIBLE : e2.modality);
+ return Effect(AstBounds::Either(e1.bounds, e2.bounds, zone),
+ e1.modality == POSSIBLE ? POSSIBLE : e2.modality);
}
};
@@ -84,10 +84,10 @@ class EffectsMixin: public Base {
? locator.value() : Effect::Unknown(Base::zone());
}
- Bounds LookupBounds(Var var) {
+ AstBounds LookupBounds(Var var) {
Effect effect = Lookup(var);
- return effect.modality == Effect::DEFINITE
- ? effect.bounds : Bounds::Unbounded();
+ return effect.modality == Effect::DEFINITE ? effect.bounds
+ : AstBounds::Unbounded();
}
// Sequential composition.
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 56d800168d..fb73d6c24c 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -911,6 +911,30 @@ class ElementsAccessorBase : public ElementsAccessor {
Subclass::GrowCapacityAndConvertImpl(object, capacity);
}
+ bool GrowCapacity(Handle<JSObject> object, uint32_t index) final {
+ // This function is intended to be called from optimized code. We don't
+ // want to trigger lazy deopts there, so refuse to handle cases that would.
+ if (object->map()->is_prototype_map() ||
+ object->WouldConvertToSlowElements(index)) {
+ return false;
+ }
+ Handle<FixedArrayBase> old_elements(object->elements());
+ uint32_t new_capacity = JSObject::NewElementsCapacity(index + 1);
+ DCHECK(static_cast<uint32_t>(old_elements->length()) < new_capacity);
+ Handle<FixedArrayBase> elements =
+ ConvertElementsWithCapacity(object, old_elements, kind(), new_capacity);
+
+ DCHECK_EQ(object->GetElementsKind(), kind());
+ // Transition through the allocation site as well if present.
+ if (JSObject::UpdateAllocationSite<AllocationSiteUpdateMode::kCheckOnly>(
+ object, kind())) {
+ return false;
+ }
+
+ object->set_elements(*elements);
+ return true;
+ }
+
void Delete(Handle<JSObject> obj, uint32_t entry) final {
Subclass::DeleteImpl(obj, entry);
}
@@ -1165,13 +1189,13 @@ class ElementsAccessorBase : public ElementsAccessor {
static uint32_t GetEntryForIndexImpl(JSObject* holder,
FixedArrayBase* backing_store,
uint32_t index, PropertyFilter filter) {
+ uint32_t length = Subclass::GetMaxIndex(holder, backing_store);
if (IsHoleyElementsKind(kind())) {
- return index < Subclass::GetCapacityImpl(holder, backing_store) &&
+ return index < length &&
!BackingStore::cast(backing_store)->is_the_hole(index)
? index
: kMaxUInt32;
} else {
- uint32_t length = Subclass::GetMaxIndex(holder, backing_store);
return index < length ? index : kMaxUInt32;
}
}
@@ -2922,8 +2946,7 @@ class SloppyArgumentsElementsAccessor
FixedArray* parameter_map = FixedArray::cast(parameters);
uint32_t length = parameter_map->length() - 2;
if (entry < length) {
- return !GetParameterMapArg(parameter_map, entry)
- ->IsTheHole(parameter_map->GetIsolate());
+ return HasParameterMapArg(parameter_map, entry);
}
FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
@@ -2951,8 +2974,7 @@ class SloppyArgumentsElementsAccessor
FixedArrayBase* parameters,
uint32_t index, PropertyFilter filter) {
FixedArray* parameter_map = FixedArray::cast(parameters);
- Object* probe = GetParameterMapArg(parameter_map, index);
- if (!probe->IsTheHole(holder->GetIsolate())) return index;
+ if (HasParameterMapArg(parameter_map, index)) return index;
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
uint32_t entry = ArgumentsAccessor::GetEntryForIndexImpl(holder, arguments,
@@ -2971,11 +2993,11 @@ class SloppyArgumentsElementsAccessor
return ArgumentsAccessor::GetDetailsImpl(arguments, entry - length);
}
- static Object* GetParameterMapArg(FixedArray* parameter_map, uint32_t index) {
+ static bool HasParameterMapArg(FixedArray* parameter_map, uint32_t index) {
uint32_t length = parameter_map->length() - 2;
- return index < length
- ? parameter_map->get(index + 2)
- : Object::cast(parameter_map->GetHeap()->the_hole_value());
+ if (index >= length) return false;
+ return !parameter_map->get(index + 2)->IsTheHole(
+ parameter_map->GetIsolate());
}
static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
@@ -3012,7 +3034,7 @@ class SloppyArgumentsElementsAccessor
Handle<FixedArrayBase> backing_store, GetKeysConversion convert,
PropertyFilter filter, Handle<FixedArray> list, uint32_t* nof_indices,
uint32_t insertion_index = 0) {
- FixedArray* parameter_map = FixedArray::cast(*backing_store);
+ Handle<FixedArray> parameter_map(FixedArray::cast(*backing_store), isolate);
uint32_t length = parameter_map->length() - 2;
for (uint32_t i = 0; i < length; ++i) {
@@ -3038,18 +3060,19 @@ class SloppyArgumentsElementsAccessor
uint32_t start_from, uint32_t length) {
DCHECK(JSObject::PrototypeHasNoElements(isolate, *object));
Handle<Map> original_map = handle(object->map(), isolate);
- FixedArray* parameter_map = FixedArray::cast(object->elements());
+ Handle<FixedArray> parameter_map(FixedArray::cast(object->elements()),
+ isolate);
bool search_for_hole = value->IsUndefined(isolate);
for (uint32_t k = start_from; k < length; ++k) {
uint32_t entry =
- GetEntryForIndexImpl(*object, parameter_map, k, ALL_PROPERTIES);
+ GetEntryForIndexImpl(*object, *parameter_map, k, ALL_PROPERTIES);
if (entry == kMaxUInt32) {
if (search_for_hole) return Just(true);
continue;
}
- Handle<Object> element_k = GetImpl(parameter_map, entry);
+ Handle<Object> element_k = GetImpl(*parameter_map, entry);
if (element_k->IsAccessorPair()) {
LookupIterator it(isolate, object, k, LookupIterator::OWN);
@@ -3078,16 +3101,17 @@ class SloppyArgumentsElementsAccessor
uint32_t start_from, uint32_t length) {
DCHECK(JSObject::PrototypeHasNoElements(isolate, *object));
Handle<Map> original_map = handle(object->map(), isolate);
- FixedArray* parameter_map = FixedArray::cast(object->elements());
+ Handle<FixedArray> parameter_map(FixedArray::cast(object->elements()),
+ isolate);
for (uint32_t k = start_from; k < length; ++k) {
uint32_t entry =
- GetEntryForIndexImpl(*object, parameter_map, k, ALL_PROPERTIES);
+ GetEntryForIndexImpl(*object, *parameter_map, k, ALL_PROPERTIES);
if (entry == kMaxUInt32) {
continue;
}
- Handle<Object> element_k = GetImpl(parameter_map, entry);
+ Handle<Object> element_k = GetImpl(*parameter_map, entry);
if (element_k->IsAccessorPair()) {
LookupIterator it(isolate, object, k, LookupIterator::OWN);
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index 1ffd4d996f..76e1aa6f39 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -114,6 +114,9 @@ class ElementsAccessor {
Handle<Map> map) = 0;
virtual void GrowCapacityAndConvert(Handle<JSObject> object,
uint32_t capacity) = 0;
+ // Unlike GrowCapacityAndConvert do not attempt to convert the backing store
+ // and simply return false in this case.
+ virtual bool GrowCapacity(Handle<JSObject> object, uint32_t index) = 0;
static void InitializeOncePerProcess();
static void TearDown();
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index c42d164603..59421c70be 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -136,8 +136,6 @@ MUST_USE_RESULT MaybeHandle<Object> Invoke(Isolate* isolate, bool is_construct,
PrintDeserializedCodeInfo(Handle<JSFunction>::cast(target));
}
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::JS_Execution);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate, &tracing::TraceEventStatsTable::JS_Execution);
value = CALL_GENERATED_CODE(isolate, stub_entry, orig_func, func, recv,
argc, argv);
}
@@ -436,31 +434,6 @@ void StackGuard::InitThread(const ExecutionAccess& lock) {
// --- C a l l s t o n a t i v e s ---
-Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
- Handle<JSFunction> fun,
- Handle<Object> pos,
- Handle<Object> is_global) {
- Isolate* isolate = fun->GetIsolate();
- Handle<Object> strict_mode = isolate->factory()->ToBoolean(false);
-
- MaybeHandle<Object> maybe_callsite =
- CallSiteUtils::Construct(isolate, recv, fun, pos, strict_mode);
- if (maybe_callsite.is_null()) {
- isolate->clear_pending_exception();
- return isolate->factory()->empty_string();
- }
-
- MaybeHandle<String> maybe_to_string =
- CallSiteUtils::ToString(isolate, maybe_callsite.ToHandleChecked());
- if (maybe_to_string.is_null()) {
- isolate->clear_pending_exception();
- return isolate->factory()->empty_string();
- }
-
- return maybe_to_string.ToHandleChecked();
-}
-
-
void StackGuard::HandleGCInterrupt() {
if (CheckAndClearInterrupt(GC_REQUEST)) {
isolate_->heap()->HandleGCRequest();
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index 52c76280eb..6f4bb331a3 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -21,11 +21,9 @@ class Execution final : public AllStatic {
// When the function called is not in strict mode, receiver is
// converted to an object.
//
- MUST_USE_RESULT static MaybeHandle<Object> Call(Isolate* isolate,
- Handle<Object> callable,
- Handle<Object> receiver,
- int argc,
- Handle<Object> argv[]);
+ V8_EXPORT_PRIVATE MUST_USE_RESULT static MaybeHandle<Object> Call(
+ Isolate* isolate, Handle<Object> callable, Handle<Object> receiver,
+ int argc, Handle<Object> argv[]);
// Construct object from function, the caller supplies an array of
// arguments.
@@ -48,11 +46,6 @@ class Execution final : public AllStatic {
Handle<Object> receiver, int argc,
Handle<Object> argv[],
MaybeHandle<Object>* exception_out = NULL);
-
- static Handle<String> GetStackTraceLine(Handle<Object> recv,
- Handle<JSFunction> fun,
- Handle<Object> pos,
- Handle<Object> is_global);
};
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index 5aafb7a974..da533363af 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -67,7 +67,8 @@ void StatisticsExtension::GetCounters(
args[0]
->BooleanValue(args.GetIsolate()->GetCurrentContext())
.FromMaybe(false)) {
- heap->CollectAllGarbage(Heap::kNoGCFlags, "counters extension");
+ heap->CollectAllGarbage(Heap::kNoGCFlags,
+ GarbageCollectionReason::kCountersExtension);
}
}
@@ -116,19 +117,24 @@ void StatisticsExtension::GetCounters(
};
const StatisticNumber numbers[] = {
- {heap->memory_allocator()->Size(), "total_committed_bytes"},
+ {static_cast<intptr_t>(heap->memory_allocator()->Size()),
+ "total_committed_bytes"},
{heap->new_space()->Size(), "new_space_live_bytes"},
{heap->new_space()->Available(), "new_space_available_bytes"},
- {heap->new_space()->CommittedMemory(), "new_space_commited_bytes"},
+ {static_cast<intptr_t>(heap->new_space()->CommittedMemory()),
+ "new_space_commited_bytes"},
{heap->old_space()->Size(), "old_space_live_bytes"},
{heap->old_space()->Available(), "old_space_available_bytes"},
- {heap->old_space()->CommittedMemory(), "old_space_commited_bytes"},
+ {static_cast<intptr_t>(heap->old_space()->CommittedMemory()),
+ "old_space_commited_bytes"},
{heap->code_space()->Size(), "code_space_live_bytes"},
{heap->code_space()->Available(), "code_space_available_bytes"},
- {heap->code_space()->CommittedMemory(), "code_space_commited_bytes"},
+ {static_cast<intptr_t>(heap->code_space()->CommittedMemory()),
+ "code_space_commited_bytes"},
{heap->lo_space()->Size(), "lo_space_live_bytes"},
{heap->lo_space()->Available(), "lo_space_available_bytes"},
- {heap->lo_space()->CommittedMemory(), "lo_space_commited_bytes"},
+ {static_cast<intptr_t>(heap->lo_space()->CommittedMemory()),
+ "lo_space_commited_bytes"},
};
for (size_t i = 0; i < arraysize(numbers); i++) {
diff --git a/deps/v8/src/external-reference-table.cc b/deps/v8/src/external-reference-table.cc
index 5833eef4b7..f908be1e47 100644
--- a/deps/v8/src/external-reference-table.cc
+++ b/deps/v8/src/external-reference-table.cc
@@ -215,10 +215,6 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
"double_constants.minus_one_half");
Add(ExternalReference::stress_deopt_count(isolate).address(),
"Isolate::stress_deopt_count_address()");
- Add(ExternalReference::virtual_handler_register(isolate).address(),
- "Isolate::virtual_handler_register()");
- Add(ExternalReference::virtual_slot_register(isolate).address(),
- "Isolate::virtual_slot_register()");
Add(ExternalReference::runtime_function_table_address(isolate).address(),
"Runtime::runtime_function_table_address()");
Add(ExternalReference::is_tail_call_elimination_enabled_address(isolate)
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index bedcb9b61a..163e86484d 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -37,13 +37,15 @@ namespace internal {
RETURN_OBJECT_UNLESS_RETRY(ISOLATE, TYPE) \
/* Two GCs before panicking. In newspace will almost always succeed. */ \
for (int __i__ = 0; __i__ < 2; __i__++) { \
- (ISOLATE)->heap()->CollectGarbage(__allocation__.RetrySpace(), \
- "allocation failure"); \
+ (ISOLATE)->heap()->CollectGarbage( \
+ __allocation__.RetrySpace(), \
+ GarbageCollectionReason::kAllocationFailure); \
__allocation__ = FUNCTION_CALL; \
RETURN_OBJECT_UNLESS_RETRY(ISOLATE, TYPE) \
} \
(ISOLATE)->counters()->gc_last_resort_from_handles()->Increment(); \
- (ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc"); \
+ (ISOLATE)->heap()->CollectAllAvailableGarbage( \
+ GarbageCollectionReason::kLastResort); \
{ \
AlwaysAllocateScope __scope__(ISOLATE); \
__allocation__ = FUNCTION_CALL; \
@@ -54,7 +56,6 @@ namespace internal {
return Handle<TYPE>(); \
} while (false)
-
template<typename T>
Handle<T> Factory::New(Handle<Map> map, AllocationSpace space) {
CALL_HEAP_FUNCTION(
@@ -91,7 +92,6 @@ Handle<Box> Factory::NewBox(Handle<Object> value) {
return result;
}
-
Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
Handle<PrototypeInfo> result =
Handle<PrototypeInfo>::cast(NewStruct(PROTOTYPE_INFO_TYPE));
@@ -102,14 +102,10 @@ Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
return result;
}
-
-Handle<SloppyBlockWithEvalContextExtension>
-Factory::NewSloppyBlockWithEvalContextExtension(
- Handle<ScopeInfo> scope_info, Handle<JSObject> extension) {
- DCHECK(scope_info->is_declaration_scope());
- Handle<SloppyBlockWithEvalContextExtension> result =
- Handle<SloppyBlockWithEvalContextExtension>::cast(
- NewStruct(SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION_TYPE));
+Handle<ContextExtension> Factory::NewContextExtension(
+ Handle<ScopeInfo> scope_info, Handle<Object> extension) {
+ Handle<ContextExtension> result =
+ Handle<ContextExtension>::cast(NewStruct(CONTEXT_EXTENSION_TYPE));
result->set_scope_info(*scope_info);
result->set_extension(*extension);
return result;
@@ -178,6 +174,14 @@ Handle<FixedArrayBase> Factory::NewFixedDoubleArrayWithHoles(
return array;
}
+Handle<FrameArray> Factory::NewFrameArray(int number_of_frames,
+ PretenureFlag pretenure) {
+ DCHECK_LE(0, number_of_frames);
+ Handle<FixedArray> result =
+ NewFixedArrayWithHoles(FrameArray::LengthFor(number_of_frames));
+ result->set(FrameArray::kFrameCountIndex, Smi::FromInt(0));
+ return Handle<FrameArray>::cast(result);
+}
Handle<OrderedHashSet> Factory::NewOrderedHashSet() {
return OrderedHashSet::Allocate(isolate(), OrderedHashSet::kMinCapacity);
@@ -595,6 +599,19 @@ MaybeHandle<String> Factory::NewConsString(Handle<String> left,
return result;
}
+Handle<String> Factory::NewSurrogatePairString(uint16_t lead, uint16_t trail) {
+ DCHECK_GE(lead, 0xD800);
+ DCHECK_LE(lead, 0xDBFF);
+ DCHECK_GE(trail, 0xDC00);
+ DCHECK_LE(trail, 0xDFFF);
+
+ Handle<SeqTwoByteString> str =
+ isolate()->factory()->NewRawTwoByteString(2).ToHandleChecked();
+ uc16* dest = str->GetChars();
+ dest[0] = lead;
+ dest[1] = trail;
+ return str;
+}
Handle<String> Factory::NewProperSubString(Handle<String> str,
int begin,
@@ -729,6 +746,17 @@ Handle<ExternalOneByteString> Factory::NewNativeSourceString(
return external_string;
}
+Handle<JSStringIterator> Factory::NewJSStringIterator(Handle<String> string) {
+ Handle<Map> map(isolate()->native_context()->string_iterator_map(),
+ isolate());
+ Handle<String> flat_string = String::Flatten(string);
+ Handle<JSStringIterator> iterator =
+ Handle<JSStringIterator>::cast(NewJSObjectFromMap(map));
+ iterator->set_string(*flat_string);
+ iterator->set_index(0);
+
+ return iterator;
+}
Handle<Symbol> Factory::NewSymbol() {
CALL_HEAP_FUNCTION(
@@ -784,15 +812,19 @@ Handle<ScriptContextTable> Factory::NewScriptContextTable() {
return context_table;
}
-
-Handle<Context> Factory::NewModuleContext(Handle<ScopeInfo> scope_info) {
+Handle<Context> Factory::NewModuleContext(Handle<Module> module,
+ Handle<JSFunction> function,
+ Handle<ScopeInfo> scope_info) {
DCHECK_EQ(scope_info->scope_type(), MODULE_SCOPE);
Handle<FixedArray> array =
NewFixedArray(scope_info->ContextLength(), TENURED);
array->set_map_no_write_barrier(*module_context_map());
- // Instance link will be set later.
Handle<Context> context = Handle<Context>::cast(array);
- context->set_extension(*the_hole_value());
+ context->set_closure(*function);
+ context->set_previous(function->context());
+ context->set_extension(*module);
+ context->set_native_context(function->native_context());
+ DCHECK(context->IsModuleContext());
return context;
}
@@ -811,35 +843,41 @@ Handle<Context> Factory::NewFunctionContext(int length,
return context;
}
-
Handle<Context> Factory::NewCatchContext(Handle<JSFunction> function,
Handle<Context> previous,
+ Handle<ScopeInfo> scope_info,
Handle<String> name,
Handle<Object> thrown_object) {
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
+ Handle<ContextExtension> extension = NewContextExtension(scope_info, name);
Handle<FixedArray> array = NewFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
array->set_map_no_write_barrier(*catch_context_map());
Handle<Context> context = Handle<Context>::cast(array);
context->set_closure(*function);
context->set_previous(*previous);
- context->set_extension(*name);
+ context->set_extension(*extension);
context->set_native_context(previous->native_context());
context->set(Context::THROWN_OBJECT_INDEX, *thrown_object);
return context;
}
Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous,
+ Handle<ScopeInfo> scope_info,
Handle<JSReceiver> extension,
Handle<Context> wrapped,
Handle<StringSet> whitelist) {
STATIC_ASSERT(Context::WHITE_LIST_INDEX == Context::MIN_CONTEXT_SLOTS + 1);
+ DCHECK(scope_info->IsDebugEvaluateScope());
+ Handle<ContextExtension> context_extension = NewContextExtension(
+ scope_info, extension.is_null() ? Handle<Object>::cast(undefined_value())
+ : Handle<Object>::cast(extension));
Handle<FixedArray> array = NewFixedArray(Context::MIN_CONTEXT_SLOTS + 2);
array->set_map_no_write_barrier(*debug_evaluate_context_map());
Handle<Context> c = Handle<Context>::cast(array);
c->set_closure(wrapped.is_null() ? previous->closure() : wrapped->closure());
c->set_previous(*previous);
c->set_native_context(previous->native_context());
- if (!extension.is_null()) c->set(Context::EXTENSION_INDEX, *extension);
+ c->set_extension(*context_extension);
if (!wrapped.is_null()) c->set(Context::WRAPPED_CONTEXT_INDEX, *wrapped);
if (!whitelist.is_null()) c->set(Context::WHITE_LIST_INDEX, *whitelist);
return c;
@@ -847,13 +885,16 @@ Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous,
Handle<Context> Factory::NewWithContext(Handle<JSFunction> function,
Handle<Context> previous,
+ Handle<ScopeInfo> scope_info,
Handle<JSReceiver> extension) {
+ Handle<ContextExtension> context_extension =
+ NewContextExtension(scope_info, extension);
Handle<FixedArray> array = NewFixedArray(Context::MIN_CONTEXT_SLOTS);
array->set_map_no_write_barrier(*with_context_map());
Handle<Context> context = Handle<Context>::cast(array);
context->set_closure(*function);
context->set_previous(*previous);
- context->set_extension(*extension);
+ context->set_extension(*context_extension);
context->set_native_context(previous->native_context());
return context;
}
@@ -881,6 +922,20 @@ Handle<Struct> Factory::NewStruct(InstanceType type) {
Struct);
}
+Handle<PromiseContainer> Factory::NewPromiseContainer(
+ Handle<JSReceiver> thenable, Handle<JSReceiver> then,
+ Handle<JSFunction> resolve, Handle<JSFunction> reject,
+ Handle<Object> before_debug_event, Handle<Object> after_debug_event) {
+ Handle<PromiseContainer> result =
+ Handle<PromiseContainer>::cast(NewStruct(PROMISE_CONTAINER_TYPE));
+ result->set_thenable(*thenable);
+ result->set_then(*then);
+ result->set_resolve(*resolve);
+ result->set_reject(*reject);
+ result->set_before_debug_event(*before_debug_event);
+ result->set_after_debug_event(*after_debug_event);
+ return result;
+}
Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry(
int aliased_context_slot) {
@@ -1196,6 +1251,13 @@ Handle<Object> Factory::NewError(Handle<JSFunction> constructor,
return maybe_error.ToHandleChecked();
}
+Handle<Object> Factory::NewInvalidStringLengthError() {
+ // Invalidate the "string length" protector.
+ if (isolate()->IsStringLengthOverflowIntact()) {
+ isolate()->InvalidateStringLengthOverflowProtector();
+ }
+ return NewRangeError(MessageTemplate::kInvalidStringLength);
+}
#define DEFINE_ERROR(NAME, name) \
Handle<Object> Factory::New##NAME(MessageTemplate::Template template_index, \
@@ -1296,7 +1358,7 @@ Handle<JSFunction> Factory::NewFunction(Handle<String> name, Handle<Code> code,
// TODO(littledan): Why do we have this is_generator test when
// NewFunctionPrototype already handles finding an appropriately
// shared prototype?
- if (!function->shared()->is_resumable()) {
+ if (!IsResumableFunction(function->shared()->kind())) {
if (prototype->IsTheHole(isolate())) {
prototype = NewFunctionPrototype(function);
}
@@ -1322,12 +1384,11 @@ Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
// can be from a different context.
Handle<Context> native_context(function->context()->native_context());
Handle<Map> new_map;
- if (function->shared()->is_resumable()) {
+ if (IsResumableFunction(function->shared()->kind())) {
// Generator and async function prototypes can share maps since they
// don't have "constructor" properties.
new_map = handle(native_context->generator_object_prototype_map());
} else {
- CHECK(!function->shared()->is_async());
// Each function prototype gets a fresh map to avoid unwanted sharing of
// maps between prototypes of different constructors.
Handle<JSFunction> object_function(native_context->object_function());
@@ -1338,7 +1399,7 @@ Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
DCHECK(!new_map->is_prototype_map());
Handle<JSObject> prototype = NewJSObjectFromMap(new_map);
- if (!function->shared()->is_resumable()) {
+ if (!IsResumableFunction(function->shared()->kind())) {
JSObject::AddProperty(prototype, constructor_string(), function, DONT_ENUM);
}
@@ -1385,6 +1446,17 @@ Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
return scope_info;
}
+Handle<ModuleInfoEntry> Factory::NewModuleInfoEntry() {
+ Handle<FixedArray> array = NewFixedArray(ModuleInfoEntry::kLength, TENURED);
+ array->set_map_no_write_barrier(*module_info_entry_map());
+ return Handle<ModuleInfoEntry>::cast(array);
+}
+
+Handle<ModuleInfo> Factory::NewModuleInfo() {
+ Handle<FixedArray> array = NewFixedArray(ModuleInfo::kLength, TENURED);
+ array->set_map_no_write_barrier(*module_info_map());
+ return Handle<ModuleInfo>::cast(array);
+}
Handle<JSObject> Factory::NewExternal(void* value) {
Handle<Foreign> foreign = NewForeign(static_cast<Address>(value));
@@ -1666,7 +1738,7 @@ void Factory::NewJSArrayStorage(Handle<JSArray> array,
Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
Handle<JSFunction> function) {
- DCHECK(function->shared()->is_resumable());
+ DCHECK(IsResumableFunction(function->shared()->kind()));
JSFunction::EnsureHasInitialMap(function);
Handle<Map> map(function->initial_map());
DCHECK_EQ(JS_GENERATOR_OBJECT_TYPE, map->instance_type());
@@ -1676,6 +1748,29 @@ Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
JSGeneratorObject);
}
+Handle<Module> Factory::NewModule(Handle<SharedFunctionInfo> code) {
+ Handle<ModuleInfo> module_info(code->scope_info()->ModuleDescriptorInfo(),
+ isolate());
+ Handle<ObjectHashTable> exports =
+ ObjectHashTable::New(isolate(), module_info->regular_exports()->length());
+ int requested_modules_length = module_info->module_requests()->length();
+ Handle<FixedArray> requested_modules =
+ requested_modules_length > 0 ? NewFixedArray(requested_modules_length)
+ : empty_fixed_array();
+
+ // To make it easy to hash Modules, we set a new symbol as the name of
+ // SharedFunctionInfo representing this Module.
+ Handle<Symbol> name_symbol = NewSymbol();
+ code->set_name(*name_symbol);
+
+ Handle<Module> module = Handle<Module>::cast(NewStruct(MODULE_TYPE));
+ module->set_code(*code);
+ module->set_exports(*exports);
+ module->set_requested_modules(*requested_modules);
+ module->set_flags(0);
+ module->set_embedder_data(isolate()->heap()->undefined_value());
+ return module;
+}
Handle<JSArrayBuffer> Factory::NewJSArrayBuffer(SharedFlag shared,
PretenureFlag pretenure) {
@@ -1698,6 +1793,15 @@ Handle<JSDataView> Factory::NewJSDataView() {
JSDataView);
}
+Handle<JSIteratorResult> Factory::NewJSIteratorResult(Handle<Object> value,
+ bool done) {
+ Handle<Map> map(isolate()->native_context()->iterator_result_map());
+ Handle<JSIteratorResult> js_iter_result =
+ Handle<JSIteratorResult>::cast(NewJSObjectFromMap(map));
+ js_iter_result->set_value(*value);
+ js_iter_result->set_done(*ToBoolean(done));
+ return js_iter_result;
+}
Handle<JSMap> Factory::NewJSMap() {
Handle<Map> map(isolate()->native_context()->js_map_map());
@@ -2066,6 +2170,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(
name, code, IsConstructable(kind, scope_info->language_mode()));
shared->set_scope_info(*scope_info);
+ shared->set_outer_scope_info(*the_hole_value());
shared->set_kind(kind);
shared->set_num_literals(number_of_literals);
if (IsGeneratorFunction(kind)) {
@@ -2112,6 +2217,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->set_code(*code);
share->set_optimized_code_map(*cleared_optimized_code_map());
share->set_scope_info(ScopeInfo::Empty(isolate()));
+ share->set_outer_scope_info(*the_hole_value());
Handle<Code> construct_stub =
is_constructor ? isolate()->builtins()->JSConstructStubGeneric()
: isolate()->builtins()->ConstructedNonConstructable();
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 4908d5fad8..82c2317cc0 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -28,9 +28,8 @@ class Factory final {
byte kind);
// Allocates a fixed array initialized with undefined values.
- Handle<FixedArray> NewFixedArray(
- int size,
- PretenureFlag pretenure = NOT_TENURED);
+ V8_EXPORT_PRIVATE Handle<FixedArray> NewFixedArray(
+ int size, PretenureFlag pretenure = NOT_TENURED);
// Allocate a new fixed array with non-existing entries (the hole).
Handle<FixedArray> NewFixedArrayWithHoles(
@@ -52,19 +51,27 @@ class Factory final {
int size,
PretenureFlag pretenure = NOT_TENURED);
+ Handle<FrameArray> NewFrameArray(int number_of_frames,
+ PretenureFlag pretenure = NOT_TENURED);
+
Handle<OrderedHashSet> NewOrderedHashSet();
Handle<OrderedHashMap> NewOrderedHashMap();
// Create a new boxed value.
Handle<Box> NewBox(Handle<Object> value);
+ // Create a new PromiseContainer struct.
+ Handle<PromiseContainer> NewPromiseContainer(
+ Handle<JSReceiver> thenable, Handle<JSReceiver> then,
+ Handle<JSFunction> resolve, Handle<JSFunction> reject,
+ Handle<Object> before_debug_event, Handle<Object> after_debug_event);
+
// Create a new PrototypeInfo struct.
Handle<PrototypeInfo> NewPrototypeInfo();
- // Create a new SloppyBlockWithEvalContextExtension struct.
- Handle<SloppyBlockWithEvalContextExtension>
- NewSloppyBlockWithEvalContextExtension(Handle<ScopeInfo> scope_info,
- Handle<JSObject> extension);
+ // Create a new ContextExtension struct.
+ Handle<ContextExtension> NewContextExtension(Handle<ScopeInfo> scope_info,
+ Handle<Object> extension);
// Create a pre-tenured empty AccessorPair.
Handle<AccessorPair> NewAccessorPair();
@@ -74,7 +81,8 @@ class Factory final {
// Finds the internalized copy for string in the string table.
// If not found, a new string is added to the table and returned.
- Handle<String> InternalizeUtf8String(Vector<const char> str);
+ V8_EXPORT_PRIVATE Handle<String> InternalizeUtf8String(
+ Vector<const char> str);
Handle<String> InternalizeUtf8String(const char* str) {
return InternalizeUtf8String(CStrVector(str));
}
@@ -119,9 +127,8 @@ class Factory final {
// will be converted to Latin1, otherwise it will be left as two-byte.
//
// One-byte strings are pretenured when used as keys in the SourceCodeCache.
- MUST_USE_RESULT MaybeHandle<String> NewStringFromOneByte(
- Vector<const uint8_t> str,
- PretenureFlag pretenure = NOT_TENURED);
+ V8_EXPORT_PRIVATE MUST_USE_RESULT MaybeHandle<String> NewStringFromOneByte(
+ Vector<const uint8_t> str, PretenureFlag pretenure = NOT_TENURED);
template <size_t N>
inline Handle<String> NewStringFromStaticChars(
@@ -163,17 +170,17 @@ class Factory final {
// UTF8 strings are pretenured when used for regexp literal patterns and
// flags in the parser.
- MUST_USE_RESULT MaybeHandle<String> NewStringFromUtf8(
- Vector<const char> str,
- PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT V8_EXPORT_PRIVATE MaybeHandle<String> NewStringFromUtf8(
+ Vector<const char> str, PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeHandle<String> NewStringFromTwoByte(
- Vector<const uc16> str,
- PretenureFlag pretenure = NOT_TENURED);
+ V8_EXPORT_PRIVATE MUST_USE_RESULT MaybeHandle<String> NewStringFromTwoByte(
+ Vector<const uc16> str, PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT MaybeHandle<String> NewStringFromTwoByte(
const ZoneVector<uc16>* str, PretenureFlag pretenure = NOT_TENURED);
+ Handle<JSStringIterator> NewJSStringIterator(Handle<String> string);
+
// Allocates an internalized string in old space based on the character
// stream.
Handle<String> NewInternalizedStringFromUtf8(Vector<const char> str,
@@ -215,6 +222,10 @@ class Factory final {
MUST_USE_RESULT MaybeHandle<String> NewConsString(Handle<String> left,
Handle<String> right);
+ // Create or lookup a single characters tring made up of a utf16 surrogate
+ // pair.
+ Handle<String> NewSurrogatePairString(uint16_t lead, uint16_t trail);
+
// Create a new string object which holds a proper substring of a string.
Handle<String> NewProperSubString(Handle<String> str,
int begin,
@@ -255,7 +266,9 @@ class Factory final {
Handle<ScriptContextTable> NewScriptContextTable();
// Create a module context.
- Handle<Context> NewModuleContext(Handle<ScopeInfo> scope_info);
+ Handle<Context> NewModuleContext(Handle<Module> module,
+ Handle<JSFunction> function,
+ Handle<ScopeInfo> scope_info);
// Create a function context.
Handle<Context> NewFunctionContext(int length, Handle<JSFunction> function);
@@ -263,15 +276,18 @@ class Factory final {
// Create a catch context.
Handle<Context> NewCatchContext(Handle<JSFunction> function,
Handle<Context> previous,
+ Handle<ScopeInfo> scope_info,
Handle<String> name,
Handle<Object> thrown_object);
// Create a 'with' context.
Handle<Context> NewWithContext(Handle<JSFunction> function,
Handle<Context> previous,
+ Handle<ScopeInfo> scope_info,
Handle<JSReceiver> extension);
Handle<Context> NewDebugEvaluateContext(Handle<Context> previous,
+ Handle<ScopeInfo> scope_info,
Handle<JSReceiver> extension,
Handle<Context> wrapped,
Handle<StringSet> whitelist);
@@ -290,7 +306,7 @@ class Factory final {
Handle<AccessorInfo> NewAccessorInfo();
- Handle<Script> NewScript(Handle<String> source);
+ V8_EXPORT_PRIVATE Handle<Script> NewScript(Handle<String> source);
// Foreign objects are pretenured when allocated by the bootstrapper.
Handle<Foreign> NewForeign(Address addr,
@@ -434,7 +450,7 @@ class Factory final {
// Create a JSArray with a specified length and elements initialized
// according to the specified mode.
- Handle<JSArray> NewJSArray(
+ V8_EXPORT_PRIVATE Handle<JSArray> NewJSArray(
ElementsKind elements_kind, int length, int capacity,
ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS,
PretenureFlag pretenure = NOT_TENURED);
@@ -450,11 +466,11 @@ class Factory final {
}
// Create a JSArray with the given elements.
- Handle<JSArray> NewJSArrayWithElements(Handle<FixedArrayBase> elements,
- ElementsKind elements_kind, int length,
- PretenureFlag pretenure = NOT_TENURED);
+ V8_EXPORT_PRIVATE Handle<JSArray> NewJSArrayWithElements(
+ Handle<FixedArrayBase> elements, ElementsKind elements_kind, int length,
+ PretenureFlag pretenure = NOT_TENURED);
- Handle<JSArray> NewJSArrayWithElements(
+ V8_EXPORT_PRIVATE Handle<JSArray> NewJSArrayWithElements(
Handle<FixedArrayBase> elements,
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
PretenureFlag pretenure = NOT_TENURED) {
@@ -470,6 +486,8 @@ class Factory final {
Handle<JSGeneratorObject> NewJSGeneratorObject(Handle<JSFunction> function);
+ Handle<Module> NewModule(Handle<SharedFunctionInfo> code);
+
Handle<JSArrayBuffer> NewJSArrayBuffer(
SharedFlag shared = SharedFlag::kNotShared,
PretenureFlag pretenure = NOT_TENURED);
@@ -495,6 +513,8 @@ class Factory final {
Handle<JSDataView> NewJSDataView(Handle<JSArrayBuffer> buffer,
size_t byte_offset, size_t byte_length);
+ Handle<JSIteratorResult> NewJSIteratorResult(Handle<Object> value, bool done);
+
Handle<JSMap> NewJSMap();
Handle<JSSet> NewJSSet();
@@ -554,6 +574,9 @@ class Factory final {
// Create a serialized scope info.
Handle<ScopeInfo> NewScopeInfo(int length);
+ Handle<ModuleInfoEntry> NewModuleInfoEntry();
+ Handle<ModuleInfo> NewModuleInfo();
+
// Create an External object for V8's external API.
Handle<JSObject> NewExternal(void* value);
@@ -576,9 +599,7 @@ class Factory final {
Handle<Object> NewError(Handle<JSFunction> constructor,
Handle<String> message);
- Handle<Object> NewInvalidStringLengthError() {
- return NewRangeError(MessageTemplate::kInvalidStringLength);
- }
+ Handle<Object> NewInvalidStringLengthError();
Handle<Object> NewURIError() {
return NewError(isolate()->uri_error_function(),
diff --git a/deps/v8/src/fast-accessor-assembler.cc b/deps/v8/src/fast-accessor-assembler.cc
index ebaab9a529..a9cde70a53 100644
--- a/deps/v8/src/fast-accessor-assembler.cc
+++ b/deps/v8/src/fast-accessor-assembler.cc
@@ -179,27 +179,35 @@ FastAccessorAssembler::ValueId FastAccessorAssembler::Call(
ExternalReference::DIRECT_API_CALL, isolate());
// Create & call API callback via stub.
- CallApiCallbackStub stub(isolate(), 1, true, true);
- DCHECK_EQ(5, stub.GetCallInterfaceDescriptor().GetParameterCount());
- DCHECK_EQ(1, stub.GetCallInterfaceDescriptor().GetStackParameterCount());
+ const int kJSParameterCount = 1;
+ CallApiCallbackStub stub(isolate(), kJSParameterCount, true, true);
+ CallInterfaceDescriptor descriptor = stub.GetCallInterfaceDescriptor();
+ DCHECK_EQ(4, descriptor.GetParameterCount());
+ DCHECK_EQ(0, descriptor.GetStackParameterCount());
// TODO(vogelheim): There is currently no clean way to retrieve the context
// parameter for a stub and the implementation details are hidden in
// compiler/*. The context_paramter is computed as:
// Linkage::GetJSCallContextParamIndex(descriptor->JSParameterCount())
- const int context_parameter = 3;
- Node* call = assembler_->CallStub(
- stub.GetCallInterfaceDescriptor(),
- assembler_->HeapConstant(stub.GetCode()),
- assembler_->Parameter(context_parameter),
-
- // Stub/register parameters:
- assembler_->UndefinedConstant(), /* callee (there's no JSFunction) */
- assembler_->UndefinedConstant(), /* call_data (undefined) */
- assembler_->Parameter(0), /* receiver (same as holder in this case) */
- assembler_->ExternalConstant(callback), /* API callback function */
-
- // JS arguments, on stack:
- FromId(arg));
+ const int kContextParameter = 3;
+ Node* context = assembler_->Parameter(kContextParameter);
+ Node* target = assembler_->HeapConstant(stub.GetCode());
+
+ int param_count = descriptor.GetParameterCount();
+ Node** args = zone()->NewArray<Node*>(param_count + 1 + kJSParameterCount);
+ // Stub/register parameters:
+ args[0] = assembler_->UndefinedConstant(); // callee (there's no JSFunction)
+ args[1] = assembler_->UndefinedConstant(); // call_data (undefined)
+ args[2] = assembler_->Parameter(0); // receiver (same as holder in this case)
+ args[3] = assembler_->ExternalConstant(callback); // API callback function
+
+ // JS arguments, on stack:
+ args[4] = FromId(arg);
+
+ // Context.
+ args[5] = context;
+
+ Node* call =
+ assembler_->CallStubN(descriptor, kJSParameterCount, target, args);
return FromRaw(call);
}
diff --git a/deps/v8/src/field-type.cc b/deps/v8/src/field-type.cc
index 2e4cbfbedd..b3b24e2c14 100644
--- a/deps/v8/src/field-type.cc
+++ b/deps/v8/src/field-type.cc
@@ -4,9 +4,9 @@
#include "src/field-type.h"
+#include "src/ast/ast-types.h"
#include "src/handles-inl.h"
#include "src/ostreams.h"
-#include "src/types.h"
namespace v8 {
namespace internal {
@@ -71,11 +71,11 @@ bool FieldType::NowIs(FieldType* other) {
bool FieldType::NowIs(Handle<FieldType> other) { return NowIs(*other); }
-Type* FieldType::Convert(Zone* zone) {
- if (IsAny()) return Type::NonInternal();
- if (IsNone()) return Type::None();
+AstType* FieldType::Convert(Zone* zone) {
+ if (IsAny()) return AstType::NonInternal();
+ if (IsNone()) return AstType::None();
DCHECK(IsClass());
- return Type::Class(AsClass(), zone);
+ return AstType::Class(AsClass(), zone);
}
void FieldType::PrintTo(std::ostream& os) {
diff --git a/deps/v8/src/field-type.h b/deps/v8/src/field-type.h
index eb7ffcab47..11e1069c83 100644
--- a/deps/v8/src/field-type.h
+++ b/deps/v8/src/field-type.h
@@ -5,6 +5,7 @@
#ifndef V8_FIELD_TYPE_H_
#define V8_FIELD_TYPE_H_
+#include "src/ast/ast-types.h"
#include "src/handles.h"
#include "src/objects.h"
#include "src/ostreams.h"
@@ -38,7 +39,7 @@ class FieldType : public Object {
bool NowStable();
bool NowIs(FieldType* other);
bool NowIs(Handle<FieldType> other);
- Type* Convert(Zone* zone);
+ AstType* Convert(Zone* zone);
void PrintTo(std::ostream& os);
};
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index e5ddbadd2c..779a58949e 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -23,14 +23,21 @@
// this will just be an extern declaration, but for a readonly flag we let the
// compiler make better optimizations by giving it the value.
#if defined(FLAG_MODE_DECLARE)
-#define FLAG_FULL(ftype, ctype, nam, def, cmt) extern ctype FLAG_##nam;
+#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
+ V8_EXPORT_PRIVATE extern ctype FLAG_##nam;
#define FLAG_READONLY(ftype, ctype, nam, def, cmt) \
static ctype const FLAG_##nam = def;
// We want to supply the actual storage and value for the flag variable in the
// .cc file. We only do this for writable flags.
#elif defined(FLAG_MODE_DEFINE)
-#define FLAG_FULL(ftype, ctype, nam, def, cmt) ctype FLAG_##nam = def;
+#ifdef USING_V8_SHARED
+#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
+ V8_EXPORT_PRIVATE extern ctype FLAG_##nam;
+#else
+#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
+ V8_EXPORT_PRIVATE ctype FLAG_##nam = def;
+#endif
// We need to define all of our default values so that the Flag structure can
// access them by pointer. These are just used internally inside of one .cc,
@@ -119,31 +126,27 @@ struct MaybeBoolFlag {
#else
#define DEBUG_BOOL false
#endif
-#if (defined CAN_USE_VFP3_INSTRUCTIONS) || !(defined ARM_TEST_NO_FEATURE_PROBE)
-#define ENABLE_VFP3_DEFAULT true
-#else
-#define ENABLE_VFP3_DEFAULT false
-#endif
-#if (defined CAN_USE_ARMV7_INSTRUCTIONS) || !(defined ARM_TEST_NO_FEATURE_PROBE)
-#define ENABLE_ARMV7_DEFAULT true
-#else
-#define ENABLE_ARMV7_DEFAULT false
-#endif
-#if (defined CAN_USE_ARMV8_INSTRUCTIONS) || !(defined ARM_TEST_NO_FEATURE_PROBE)
-#define ENABLE_ARMV8_DEFAULT true
-#else
-#define ENABLE_ARMV8_DEFAULT false
-#endif
-#if (defined CAN_USE_VFP32DREGS) || !(defined ARM_TEST_NO_FEATURE_PROBE)
-#define ENABLE_32DREGS_DEFAULT true
-#else
-#define ENABLE_32DREGS_DEFAULT false
-#endif
-#if (defined CAN_USE_NEON) || !(defined ARM_TEST_NO_FEATURE_PROBE)
-# define ENABLE_NEON_DEFAULT true
+
+// Supported ARM configurations are:
+// "armv6": ARMv6 + VFPv2
+// "armv7": ARMv7 + VFPv3-D32 + NEON
+// "armv7+sudiv": ARMv7 + VFPv4-D32 + NEON + SUDIV
+// "armv8": ARMv8 (including all of the above)
+#if !defined(ARM_TEST_NO_FEATURE_PROBE) || \
+ (defined(CAN_USE_ARMV8_INSTRUCTIONS) && \
+ defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(CAN_USE_SUDIV) && \
+ defined(CAN_USE_NEON) && defined(CAN_USE_VFP3_INSTRUCTIONS))
+#define ARM_ARCH_DEFAULT "armv8"
+#elif defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(CAN_USE_SUDIV) && \
+ defined(CAN_USE_NEON) && defined(CAN_USE_VFP3_INSTRUCTIONS)
+#define ARM_ARCH_DEFAULT "armv7+sudiv"
+#elif defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(CAN_USE_NEON) && \
+ defined(CAN_USE_VFP3_INSTRUCTIONS)
+#define ARM_ARCH_DEFAULT "armv7"
#else
-# define ENABLE_NEON_DEFAULT false
+#define ARM_ARCH_DEFAULT "armv6"
#endif
+
#ifdef V8_OS_WIN
# define ENABLE_LOG_COLOUR false
#else
@@ -184,9 +187,6 @@ DEFINE_BOOL(harmony, false, "enable all completed harmony features")
DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
DEFINE_IMPLICATION(es_staging, harmony)
-DEFINE_BOOL(intl_extra, false, "additional V8 Intl functions")
-// Removing extra Intl functions is shipped
-DEFINE_NEG_VALUE_IMPLICATION(harmony_shipping, intl_extra, true)
// Activate on ClusterFuzz.
DEFINE_IMPLICATION(es_staging, harmony_regexp_lookbehind)
@@ -198,7 +198,6 @@ DEFINE_IMPLICATION(es_staging, move_object_start)
V(harmony_function_sent, "harmony function.sent") \
V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
V(harmony_simd, "harmony simd") \
- V(harmony_explicit_tailcalls, "harmony explicit tail calls") \
V(harmony_do_expressions, "harmony do-expressions") \
V(harmony_restrictive_generators, \
"harmony restrictions on generator declarations") \
@@ -206,18 +205,19 @@ DEFINE_IMPLICATION(es_staging, move_object_start)
V(harmony_regexp_property, "harmony unicode regexp property classes") \
V(harmony_for_in, "harmony for-in syntax") \
V(harmony_trailing_commas, \
- "harmony trailing commas in function parameter lists")
+ "harmony trailing commas in function parameter lists") \
+ V(harmony_class_fields, "harmony public fields in class literals")
// Features that are complete (but still behind --harmony/es-staging flag).
#define HARMONY_STAGED_BASE(V) \
V(harmony_regexp_lookbehind, "harmony regexp lookbehind") \
V(harmony_tailcalls, "harmony tail calls") \
- V(harmony_async_await, "harmony async-await") \
V(harmony_string_padding, "harmony String-padding methods")
#ifdef V8_I18N_SUPPORT
-#define HARMONY_STAGED(V) \
- HARMONY_STAGED_BASE(V) \
+#define HARMONY_STAGED(V) \
+ HARMONY_STAGED_BASE(V) \
+ V(datetime_format_to_parts, "Intl.DateTimeFormat.formatToParts") \
V(icu_case_mapping, "case mapping with ICU rather than Unibrow")
#else
#define HARMONY_STAGED(V) HARMONY_STAGED_BASE(V)
@@ -225,6 +225,7 @@ DEFINE_IMPLICATION(es_staging, move_object_start)
// Features that are shipping (turned on by default, but internal flag remains).
#define HARMONY_SHIPPING(V) \
+ V(harmony_async_await, "harmony async-await") \
V(harmony_restrictive_declarations, \
"harmony limitations on sloppy mode function declarations") \
V(harmony_object_values_entries, "harmony Object.values / Object.entries") \
@@ -335,7 +336,7 @@ DEFINE_BOOL(use_write_barrier_elimination, true,
DEFINE_INT(max_inlining_levels, 5, "maximum number of inlining levels")
DEFINE_INT(max_inlined_source_size, 600,
"maximum source size in bytes considered for a single inlining")
-DEFINE_INT(max_inlined_nodes, 196,
+DEFINE_INT(max_inlined_nodes, 200,
"maximum number of AST nodes considered for a single inlining")
DEFINE_INT(max_inlined_nodes_cumulative, 400,
"maximum cumulative number of AST nodes considered for inlining")
@@ -405,6 +406,8 @@ DEFINE_BOOL(flush_optimized_code_cache, false,
DEFINE_BOOL(inline_construct, true, "inline constructor calls")
DEFINE_BOOL(inline_arguments, true, "inline functions with arguments object")
DEFINE_BOOL(inline_accessors, true, "inline JavaScript accessors")
+DEFINE_BOOL(inline_into_try, false, "inline into try blocks")
+DEFINE_IMPLICATION(turbo, inline_into_try)
DEFINE_INT(escape_analysis_iterations, 2,
"maximum number of escape analysis fix-point iterations")
@@ -450,6 +453,8 @@ DEFINE_BOOL(turbo_asm, true, "enable TurboFan for asm.js code")
DEFINE_BOOL(turbo_asm_deoptimization, false,
"enable deoptimization in TurboFan for asm.js code")
DEFINE_BOOL(turbo_verify, DEBUG_BOOL, "verify TurboFan graphs at each phase")
+DEFINE_BOOL(turbo_verify_machine_graph, false,
+ "verify TurboFan machine graph before instruction selection")
DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
DEFINE_BOOL(turbo_stats_nvp, false,
"print TurboFan statistics in machine-readable format")
@@ -487,9 +492,8 @@ DEFINE_BOOL(turbo_instruction_scheduling, false,
"enable instruction scheduling in TurboFan")
DEFINE_BOOL(turbo_stress_instruction_scheduling, false,
"randomly schedule instructions to stress dependency tracking")
-DEFINE_BOOL(turbo_store_elimination, false,
+DEFINE_BOOL(turbo_store_elimination, true,
"enable store-store elimination in TurboFan")
-DEFINE_IMPLICATION(turbo, turbo_store_elimination)
// Flags to help platform porters
DEFINE_BOOL(minimal, false,
@@ -529,6 +533,12 @@ DEFINE_BOOL(wasm_simd_prototype, false,
"enable prototype simd opcodes for wasm")
DEFINE_BOOL(wasm_eh_prototype, false,
"enable prototype exception handling opcodes for wasm")
+DEFINE_BOOL(wasm_mv_prototype, false,
+ "enable prototype multi-value support for wasm")
+
+DEFINE_BOOL(wasm_trap_handler, false,
+ "use signal handlers to catch out of bounds memory access in wasm"
+ " (currently Linux x86_64 only)")
// Profiler flags.
DEFINE_INT(frame_count, 1, "number of stack frames inspected by the profiler")
@@ -548,6 +558,7 @@ DEFINE_IMPLICATION(trace_opt_verbose, trace_opt)
DEFINE_BOOL(debug_code, false, "generate extra code (assertions) for debugging")
DEFINE_BOOL(code_comments, false, "emit comments in code disassembly")
DEFINE_BOOL(enable_sse3, true, "enable use of SSE3 instructions if available")
+DEFINE_BOOL(enable_ssse3, true, "enable use of SSSE3 instructions if available")
DEFINE_BOOL(enable_sse4_1, true,
"enable use of SSE4.1 instructions if available")
DEFINE_BOOL(enable_sahf, true,
@@ -559,35 +570,29 @@ DEFINE_BOOL(enable_bmi2, true, "enable use of BMI2 instructions if available")
DEFINE_BOOL(enable_lzcnt, true, "enable use of LZCNT instruction if available")
DEFINE_BOOL(enable_popcnt, true,
"enable use of POPCNT instruction if available")
-DEFINE_BOOL(enable_vfp3, ENABLE_VFP3_DEFAULT,
- "enable use of VFP3 instructions if available")
-DEFINE_BOOL(enable_armv7, ENABLE_ARMV7_DEFAULT,
- "enable use of ARMv7 instructions if available (ARM only)")
-DEFINE_BOOL(enable_armv8, ENABLE_ARMV8_DEFAULT,
- "enable use of ARMv8 instructions if available (ARM 32-bit only)")
-DEFINE_BOOL(enable_neon, ENABLE_NEON_DEFAULT,
- "enable use of NEON instructions if available (ARM only)")
-DEFINE_BOOL(enable_sudiv, true,
- "enable use of SDIV and UDIV instructions if available (ARM only)")
-DEFINE_BOOL(enable_movw_movt, false,
- "enable loading 32-bit constant by means of movw/movt "
- "instruction pairs (ARM only)")
-DEFINE_BOOL(enable_32dregs, ENABLE_32DREGS_DEFAULT,
- "enable use of d16-d31 registers on ARM - this requires VFP3")
+DEFINE_STRING(arm_arch, ARM_ARCH_DEFAULT,
+ "generate instructions for the selected ARM architecture if "
+ "available: armv6, armv7, armv7+sudiv or armv8")
DEFINE_BOOL(enable_vldr_imm, false,
"enable use of constant pools for double immediate (ARM only)")
DEFINE_BOOL(force_long_branches, false,
"force all emitted branches to be in long mode (MIPS/PPC only)")
DEFINE_STRING(mcpu, "auto", "enable optimization for specific cpu")
+// Deprecated ARM flags (replaced by arm_arch).
+DEFINE_MAYBE_BOOL(enable_armv7, "deprecated (use --arm_arch instead)")
+DEFINE_MAYBE_BOOL(enable_vfp3, "deprecated (use --arm_arch instead)")
+DEFINE_MAYBE_BOOL(enable_32dregs, "deprecated (use --arm_arch instead)")
+DEFINE_MAYBE_BOOL(enable_neon, "deprecated (use --arm_arch instead)")
+DEFINE_MAYBE_BOOL(enable_sudiv, "deprecated (use --arm_arch instead)")
+DEFINE_MAYBE_BOOL(enable_armv8, "deprecated (use --arm_arch instead)")
+
// regexp-macro-assembler-*.cc
DEFINE_BOOL(enable_regexp_unaligned_accesses, true,
"enable unaligned accesses for the regexp engine")
-DEFINE_IMPLICATION(enable_armv8, enable_vfp3)
-DEFINE_IMPLICATION(enable_armv8, enable_neon)
-DEFINE_IMPLICATION(enable_armv8, enable_32dregs)
-DEFINE_IMPLICATION(enable_armv8, enable_sudiv)
+// api.cc
+DEFINE_BOOL(script_streaming, true, "enable parsing on background")
// bootstrapper.cc
DEFINE_STRING(expose_natives_as, NULL, "expose natives in global object")
@@ -711,8 +716,6 @@ DEFINE_BOOL(trace_idle_notification, false,
"print one trace line following each idle notification")
DEFINE_BOOL(trace_idle_notification_verbose, false,
"prints the heap state used by the idle notification")
-DEFINE_BOOL(print_cumulative_gc_stat, false,
- "print cumulative GC statistics in name=value format on exit")
DEFINE_BOOL(print_max_heap_committed, false,
"print statistics of the maximum memory committed for the heap "
"in name=value format on exit")
@@ -736,7 +739,7 @@ DEFINE_BOOL(age_code, true,
"track un-executed functions to age code and flush only "
"old code (required for code flushing)")
DEFINE_BOOL(incremental_marking, true, "use incremental marking")
-DEFINE_BOOL(incremental_marking_wrappers, true,
+DEFINE_BOOL(incremental_marking_wrappers, false,
"use incremental marking for marking wrappers")
DEFINE_INT(min_progress_during_incremental_marking_finalization, 32,
"keep finalizing incremental marking as long as we discover at "
@@ -800,6 +803,7 @@ DEFINE_BOOL(use_idle_notification, true,
DEFINE_BOOL(use_ic, true, "use inline caching")
DEFINE_BOOL(trace_ic, false, "trace inline cache state transitions")
DEFINE_BOOL(tf_load_ic_stub, true, "use TF LoadIC stub")
+DEFINE_BOOL(tf_store_ic_stub, true, "use TF StoreIC stub")
// macro-assembler-ia32.cc
DEFINE_BOOL(native_code_counters, false,
@@ -835,6 +839,7 @@ DEFINE_BOOL(trace_maps, false, "trace map creation")
// parser.cc
DEFINE_BOOL(allow_natives_syntax, false, "allow natives syntax")
DEFINE_BOOL(trace_parse, false, "trace parsing and preparsing")
+DEFINE_BOOL(lazy_inner_functions, false, "enable lazy parsing inner functions")
// simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc
DEFINE_BOOL(trace_sim, false, "Trace simulator execution")
@@ -868,6 +873,10 @@ DEFINE_BOOL(stack_trace_on_illegal, false,
"print stack trace when an illegal exception is thrown")
DEFINE_BOOL(abort_on_uncaught_exception, false,
"abort program (dump core) when an uncaught exception is thrown")
+DEFINE_BOOL(abort_on_stack_overflow, false,
+ "Abort program when stack overflow (as opposed to throwing "
+ "RangeError). This is useful for fuzzing where the spec behaviour "
+ "would introduce nondeterminism.")
DEFINE_BOOL(randomize_hashes, true,
"randomize hashes to avoid predictable hash collisions "
"(with snapshots this option cannot override the baked-in seed)")
@@ -925,11 +934,6 @@ DEFINE_BOOL(manual_evacuation_candidates_selection, false,
"Test mode only flag. It allows an unit test to select evacuation "
"candidates pages (requires --stress_compaction).")
-// api.cc
-DEFINE_INT(external_allocation_limit_incremental_time, 1,
- "Time spent in incremental marking steps (in ms) once the external "
- "allocation limit is reached")
-
DEFINE_BOOL(disable_old_api_accessors, false,
"Disable old-style API accessors whose setters trigger through the "
"prototype chain")
@@ -1036,6 +1040,10 @@ DEFINE_BOOL(trace_regexp_parser, false, "trace regexp parsing")
// Debugger
DEFINE_BOOL(print_break_location, false, "print source location on debug break")
+// wasm instance management
+DEFINE_BOOL(trace_wasm_instances, false,
+ "trace creation and collection of wasm instances")
+
//
// Logging and profiling flags
//
@@ -1126,6 +1134,7 @@ DEFINE_BOOL(test_primary_stub_cache, false,
// codegen-ia32.cc / codegen-arm.cc
DEFINE_BOOL(print_code, false, "print generated code")
DEFINE_BOOL(print_opt_code, false, "print optimized code")
+DEFINE_STRING(print_opt_code_filter, "*", "filter for printing optimized code")
DEFINE_BOOL(print_unopt_code, false,
"print unoptimized code before "
"printing optimized code based on it")
@@ -1188,8 +1197,6 @@ DEFINE_BOOL(unbox_double_fields, V8_DOUBLE_FIELDS_UNBOXING,
"enable in-object double fields unboxing (64-bit only)")
DEFINE_IMPLICATION(unbox_double_fields, track_double_fields)
-DEFINE_BOOL(global_var_shortcuts, false, "use ic-less global loads and stores")
-
// Cleanup...
#undef FLAG_FULL
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index 77784b8234..61d0dcd663 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -63,6 +63,8 @@ inline StackHandler* StackFrame::top_handler() const {
inline Code* StackFrame::LookupCode() const {
+ // TODO(jgruber): This should really check that pc is within the returned
+ // code's instruction range [instruction_start(), instruction_end()[.
return GetContainingCode(isolate(), pc());
}
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index f0fa58d27b..c67fdc2d94 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -1461,9 +1461,9 @@ Address WasmFrame::GetCallerStackPointer() const {
}
Object* WasmFrame::wasm_obj() const {
- FixedArray* deopt_data = LookupCode()->deoptimization_data();
- DCHECK(deopt_data->length() == 2);
- return deopt_data->get(0);
+ Object* ret = wasm::GetOwningWasmInstance(LookupCode());
+ if (ret == nullptr) ret = *(isolate()->factory()->undefined_value());
+ return ret;
}
uint32_t WasmFrame::function_index() const {
@@ -1478,6 +1478,15 @@ Script* WasmFrame::script() const {
return wasm::WasmDebugInfo::GetFunctionScript(debug_info, function_index());
}
+int WasmFrame::LookupExceptionHandlerInTable(int* stack_slots) {
+ DCHECK_NOT_NULL(stack_slots);
+ Code* code = LookupCode();
+ HandlerTable* table = HandlerTable::cast(code->handler_table());
+ int pc_offset = static_cast<int>(pc() - code->entry());
+ *stack_slots = code->stack_slots();
+ return table->LookupReturn(pc_offset);
+}
+
namespace {
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 12770231cf..373f4de92c 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -525,6 +525,8 @@ class StackFrame BASE_EMBEDDED {
Isolate* isolate() const { return isolate_; }
+ void operator=(const StackFrame& original) = delete;
+
protected:
inline explicit StackFrame(StackFrameIteratorBase* iterator);
virtual ~StackFrame() { }
@@ -563,9 +565,6 @@ class StackFrame BASE_EMBEDDED {
friend class StackFrameIteratorBase;
friend class StackHandlerIterator;
friend class SafeStackFrameIterator;
-
- private:
- void operator=(const StackFrame& original);
};
@@ -1057,6 +1056,10 @@ class WasmFrame : public StandardFrame {
void Print(StringStream* accumulator, PrintMode mode,
int index) const override;
+ // Lookup exception handler for current {pc}, returns -1 if none found. Also
+ // returns the stack slot count of the entire frame.
+ int LookupExceptionHandlerInTable(int* data);
+
// Determine the code for the frame.
Code* unchecked_code() const override;
diff --git a/deps/v8/src/full-codegen/arm/full-codegen-arm.cc b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
index 7887d32bdb..e8eeb8ecac 100644
--- a/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
+++ b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
@@ -4,14 +4,16 @@
#if V8_TARGET_ARCH_ARM
+#include "src/full-codegen/full-codegen.h"
+#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
#include "src/debug/debug.h"
-#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
-#include "src/parsing/parser.h"
#include "src/arm/code-stubs-arm.h"
#include "src/arm/macro-assembler-arm.h"
@@ -126,6 +128,20 @@ void FullCodeGenerator::Generate() {
info->set_prologue_offset(masm_->pc_offset());
__ Prologue(info->GeneratePreagedPrologue());
+ // Increment invocation count for the function.
+ {
+ Comment cmnt(masm_, "[ Increment invocation count");
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
+ __ ldr(r2, FieldMemOperand(r2, LiteralsArray::kFeedbackVectorOffset));
+ __ ldr(r9, FieldMemOperand(r2, TypeFeedbackVector::kInvocationCountIndex *
+ kPointerSize +
+ TypeFeedbackVector::kHeaderSize));
+ __ add(r9, r9, Operand(Smi::FromInt(1)));
+ __ str(r9, FieldMemOperand(r2, TypeFeedbackVector::kInvocationCountIndex *
+ kPointerSize +
+ TypeFeedbackVector::kHeaderSize));
+ }
+
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
@@ -167,14 +183,14 @@ void FullCodeGenerator::Generate() {
bool function_in_register_r1 = true;
// Possibly allocate a local context.
- if (info->scope()->num_heap_slots() > 0) {
+ if (info->scope()->NeedsContext()) {
// Argument to NewContext is the function, which is still in r1.
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (info->scope()->is_script_scope()) {
__ push(r1);
- __ Push(info->scope()->GetScopeInfo(info->isolate()));
+ __ Push(info->scope()->scope_info());
__ CallRuntime(Runtime::kNewScriptContext);
PrepareForBailoutForId(BailoutId::ScriptContext(),
BailoutState::TOS_REGISTER);
@@ -259,9 +275,8 @@ void FullCodeGenerator::Generate() {
}
// Possibly allocate RestParameters
- int rest_index;
- Variable* rest_param = info->scope()->rest_parameter(&rest_index);
- if (rest_param) {
+ Variable* rest_param = info->scope()->rest_parameter();
+ if (rest_param != nullptr) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
if (!function_in_register_r1) {
__ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -770,7 +785,6 @@ void FullCodeGenerator::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
@@ -821,7 +835,6 @@ void FullCodeGenerator::VisitFunctionDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
@@ -1138,6 +1151,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Generate code for the going to the next element by incrementing
// the index (smi) stored on top of the stack.
__ bind(loop_statement.continue_label());
+ PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
__ pop(r0);
__ add(r0, r0, Operand(Smi::FromInt(1)));
__ push(r0);
@@ -1160,12 +1174,9 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- __ mov(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
__ ldr(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
@@ -1174,12 +1185,9 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Move(StoreDescriptor::ReceiverRegister(), r0);
- __ mov(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
__ ldr(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
@@ -1219,7 +1227,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
Register temp = r4;
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
+ if (s->NeedsContext()) {
if (s->calls_sloppy_eval()) {
// Check that extension is "the hole".
__ ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
@@ -1268,20 +1276,6 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
}
}
-
-void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
- TypeofMode typeof_mode) {
-#ifdef DEBUG
- Variable* var = proxy->var();
- DCHECK(var->IsUnallocatedOrGlobalSlot() ||
- (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-#endif
- __ mov(LoadGlobalDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadGlobalIC(typeof_mode);
-}
-
-
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
@@ -1292,7 +1286,6 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// Three cases: global variables, lookup variables, and all other types of
// variables.
switch (var->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
EmitGlobalVariableLoad(proxy, typeof_mode);
@@ -1415,10 +1408,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
DCHECK(StoreDescriptor::ValueRegister().is(r0));
- __ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
__ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- EmitLoadStoreICSlot(property->GetSlot(0));
- CallStoreIC();
+ CallStoreIC(property->GetSlot(0), key->value());
PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1590,6 +1581,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
+ RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1599,8 +1591,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Emit code to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
- int array_index = 0;
- for (; array_index < length; array_index++) {
+ for (int array_index = 0; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
DCHECK(!subexpr->IsSpread());
@@ -1616,31 +1607,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ mov(StoreDescriptor::NameRegister(), Operand(Smi::FromInt(array_index)));
__ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
- EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
-
- PrepareForBailoutForId(expr->GetIdForElement(array_index),
- BailoutState::NO_REGISTERS);
- }
-
- // In case the array literal contains spread expressions it has two parts. The
- // first part is the "static" array which has a literal index is handled
- // above. The second part is the part after the first spread expression
- // (inclusive) and these elements gets appended to the array. Note that the
- // number elements an iterable produces is unknown ahead of time.
- if (array_index < length && result_saved) {
- PopOperand(r0);
- result_saved = false;
- }
- for (; array_index < length; array_index++) {
- Expression* subexpr = subexprs->at(array_index);
-
- PushOperand(r0);
- DCHECK(!subexpr->IsSpread());
- VisitForStackValue(subexpr);
- CallRuntimeWithOperands(Runtime::kAppendElement);
+ CallKeyedStoreIC(expr->LiteralFeedbackSlot());
PrepareForBailoutForId(expr->GetIdForElement(array_index),
BailoutState::NO_REGISTERS);
@@ -1992,7 +1959,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
for (int i = 0; i < lit->properties()->length(); i++) {
- ObjectLiteral::Property* property = lit->properties()->at(i);
+ ClassLiteral::Property* property = lit->properties()->at(i);
Expression* value = property->value();
Register scratch = r1;
@@ -2019,26 +1986,23 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE();
- case ObjectLiteral::Property::COMPUTED:
+ case ClassLiteral::Property::METHOD:
PushOperand(Smi::FromInt(DONT_ENUM));
PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
break;
- case ObjectLiteral::Property::GETTER:
+ case ClassLiteral::Property::GETTER:
PushOperand(Smi::FromInt(DONT_ENUM));
CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
break;
- case ObjectLiteral::Property::SETTER:
+ case ClassLiteral::Property::SETTER:
PushOperand(Smi::FromInt(DONT_ENUM));
CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
break;
+ case ClassLiteral::Property::FIELD:
default:
UNREACHABLE();
}
@@ -2075,10 +2039,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
VisitForAccumulatorValue(prop->obj());
__ Move(StoreDescriptor::ReceiverRegister(), r0);
PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
- __ mov(StoreDescriptor::NameRegister(),
- Operand(prop->key()->AsLiteral()->value()));
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, prop->key()->AsLiteral()->value());
break;
}
case NAMED_SUPER_PROPERTY: {
@@ -2125,10 +2086,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Move(StoreDescriptor::NameRegister(), r0);
PopOperands(StoreDescriptor::ValueRegister(),
StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(slot);
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
+ CallKeyedStoreIC(slot);
break;
}
}
@@ -2153,10 +2111,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
FeedbackVectorSlot slot) {
if (var->IsUnallocated()) {
// Global var, const, or let.
- __ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
__ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, var->name());
} else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
DCHECK(!var->IsLookupSlot());
@@ -2173,10 +2129,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&assign);
}
- if (var->mode() == CONST) {
- __ CallRuntime(Runtime::kThrowConstAssignError);
- } else {
+ if (var->mode() != CONST) {
EmitStoreToStackLocalOrContextSlot(var, location);
+ } else if (var->throw_on_const_assignment(language_mode())) {
+ __ CallRuntime(Runtime::kThrowConstAssignError);
}
} else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
@@ -2192,7 +2148,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() || op == Token::INIT) {
+ } else {
+ DCHECK(var->mode() != CONST || op == Token::INIT);
if (var->IsLookupSlot()) {
// Assignment to var.
__ Push(var->name());
@@ -2213,13 +2170,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
-
- } else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
- if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError);
- }
- // Silently ignore store in sloppy mode.
}
}
@@ -2230,11 +2180,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
DCHECK(prop != NULL);
DCHECK(prop->key()->IsLiteral());
- __ mov(StoreDescriptor::NameRegister(),
- Operand(prop->key()->AsLiteral()->value()));
PopOperand(StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
+ CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r0);
@@ -2276,10 +2223,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
StoreDescriptor::NameRegister());
DCHECK(StoreDescriptor::ValueRegister().is(r0));
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
+ CallKeyedStoreIC(expr->AssignmentSlot());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r0);
@@ -2839,24 +2783,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- Label done;
- StringCharFromCodeGenerator generator(r0, r1);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(r1);
-}
-
-
void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -3048,7 +2974,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// "delete this" is allowed.
bool is_this = var->is_this();
DCHECK(is_sloppy(language_mode()) || is_this);
- if (var->IsUnallocatedOrGlobalSlot()) {
+ if (var->IsUnallocated()) {
__ LoadGlobalObject(r2);
__ mov(r1, Operand(var->name()));
__ Push(r2, r1);
@@ -3333,11 +3259,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
case NAMED_PROPERTY: {
- __ mov(StoreDescriptor::NameRegister(),
- Operand(prop->key()->AsLiteral()->value()));
PopOperand(StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
+ CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3375,10 +3298,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
PopOperands(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
+ CallKeyedStoreIC(expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
diff --git a/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
index a4f32da2ef..1854f102be 100644
--- a/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
+++ b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
@@ -4,14 +4,16 @@
#if V8_TARGET_ARCH_ARM64
+#include "src/full-codegen/full-codegen.h"
+#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
#include "src/debug/debug.h"
-#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
-#include "src/parsing/parser.h"
#include "src/arm64/code-stubs-arm64.h"
#include "src/arm64/frames-arm64.h"
@@ -130,6 +132,20 @@ void FullCodeGenerator::Generate() {
info->set_prologue_offset(masm_->pc_offset());
__ Prologue(info->GeneratePreagedPrologue());
+ // Increment invocation count for the function.
+ {
+ Comment cmnt(masm_, "[ Increment invocation count");
+ __ Ldr(x11, FieldMemOperand(x1, JSFunction::kLiteralsOffset));
+ __ Ldr(x11, FieldMemOperand(x11, LiteralsArray::kFeedbackVectorOffset));
+ __ Ldr(x10, FieldMemOperand(x11, TypeFeedbackVector::kInvocationCountIndex *
+ kPointerSize +
+ TypeFeedbackVector::kHeaderSize));
+ __ Add(x10, x10, Operand(Smi::FromInt(1)));
+ __ Str(x10, FieldMemOperand(x11, TypeFeedbackVector::kInvocationCountIndex *
+ kPointerSize +
+ TypeFeedbackVector::kHeaderSize));
+ }
+
// Reserve space on the stack for locals.
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
@@ -170,13 +186,13 @@ void FullCodeGenerator::Generate() {
bool function_in_register_x1 = true;
- if (info->scope()->num_heap_slots() > 0) {
+ if (info->scope()->NeedsContext()) {
// Argument to NewContext is the function, which is still in x1.
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (info->scope()->is_script_scope()) {
- __ Mov(x10, Operand(info->scope()->GetScopeInfo(info->isolate())));
+ __ Mov(x10, Operand(info->scope()->scope_info()));
__ Push(x1, x10);
__ CallRuntime(Runtime::kNewScriptContext);
PrepareForBailoutForId(BailoutId::ScriptContext(),
@@ -261,9 +277,8 @@ void FullCodeGenerator::Generate() {
}
// Possibly allocate RestParameters
- int rest_index;
- Variable* rest_param = info->scope()->rest_parameter(&rest_index);
- if (rest_param) {
+ Variable* rest_param = info->scope()->rest_parameter();
+ if (rest_param != nullptr) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
if (!function_in_register_x1) {
__ Ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -765,7 +780,6 @@ void FullCodeGenerator::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
@@ -816,7 +830,6 @@ void FullCodeGenerator::VisitFunctionDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
@@ -1126,6 +1139,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Generate code for going to the next element by incrementing
// the index (smi) stored on top of the stack.
__ Bind(loop_statement.continue_label());
+ PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
// TODO(all): We could use a callee saved register to avoid popping.
__ Pop(x0);
__ Add(x0, x0, Smi::FromInt(1));
@@ -1149,11 +1163,8 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Peek(StoreDescriptor::ReceiverRegister(), 0);
- __ Mov(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
__ Peek(StoreDescriptor::ValueRegister(), offset * kPointerSize);
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
@@ -1162,11 +1173,8 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Move(StoreDescriptor::ReceiverRegister(), x0);
- __ Mov(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
__ Peek(StoreDescriptor::ValueRegister(), offset * kPointerSize);
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
@@ -1206,7 +1214,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
Register temp = x11;
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
+ if (s->NeedsContext()) {
if (s->calls_sloppy_eval()) {
// Check that extension is "the hole".
__ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
@@ -1254,20 +1262,6 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
}
}
-
-void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
- TypeofMode typeof_mode) {
-#ifdef DEBUG
- Variable* var = proxy->var();
- DCHECK(var->IsUnallocatedOrGlobalSlot() ||
- (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-#endif
- __ Mov(LoadGlobalDescriptor::SlotRegister(),
- SmiFromSlot(proxy->VariableFeedbackSlot()));
- CallLoadGlobalIC(typeof_mode);
-}
-
-
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
@@ -1278,7 +1272,6 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// Three cases: global variables, lookup variables, and all other types of
// variables.
switch (var->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "Global variable");
EmitGlobalVariableLoad(proxy, typeof_mode);
@@ -1401,10 +1394,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
DCHECK(StoreDescriptor::ValueRegister().is(x0));
- __ Mov(StoreDescriptor::NameRegister(), Operand(key->value()));
__ Peek(StoreDescriptor::ReceiverRegister(), 0);
- EmitLoadStoreICSlot(property->GetSlot(0));
- CallStoreIC();
+ CallStoreIC(property->GetSlot(0), key->value());
PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1572,6 +1563,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
+ RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1581,8 +1573,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Emit code to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
- int array_index = 0;
- for (; array_index < length; array_index++) {
+ for (int array_index = 0; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
DCHECK(!subexpr->IsSpread());
@@ -1598,31 +1589,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Mov(StoreDescriptor::NameRegister(), Smi::FromInt(array_index));
__ Peek(StoreDescriptor::ReceiverRegister(), 0);
- EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
-
- PrepareForBailoutForId(expr->GetIdForElement(array_index),
- BailoutState::NO_REGISTERS);
- }
-
- // In case the array literal contains spread expressions it has two parts. The
- // first part is the "static" array which has a literal index is handled
- // above. The second part is the part after the first spread expression
- // (inclusive) and these elements gets appended to the array. Note that the
- // number elements an iterable produces is unknown ahead of time.
- if (array_index < length && result_saved) {
- PopOperand(x0);
- result_saved = false;
- }
- for (; array_index < length; array_index++) {
- Expression* subexpr = subexprs->at(array_index);
-
- PushOperand(x0);
- DCHECK(!subexpr->IsSpread());
- VisitForStackValue(subexpr);
- CallRuntimeWithOperands(Runtime::kAppendElement);
+ CallKeyedStoreIC(expr->LiteralFeedbackSlot());
PrepareForBailoutForId(expr->GetIdForElement(array_index),
BailoutState::NO_REGISTERS);
@@ -1892,7 +1859,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
for (int i = 0; i < lit->properties()->length(); i++) {
- ObjectLiteral::Property* property = lit->properties()->at(i);
+ ClassLiteral::Property* property = lit->properties()->at(i);
Expression* value = property->value();
Register scratch = x1;
@@ -1919,26 +1886,23 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE();
- case ObjectLiteral::Property::COMPUTED:
+ case ClassLiteral::Property::METHOD:
PushOperand(Smi::FromInt(DONT_ENUM));
PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
break;
- case ObjectLiteral::Property::GETTER:
+ case ClassLiteral::Property::GETTER:
PushOperand(Smi::FromInt(DONT_ENUM));
CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
break;
- case ObjectLiteral::Property::SETTER:
+ case ClassLiteral::Property::SETTER:
PushOperand(Smi::FromInt(DONT_ENUM));
CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
break;
+ case ClassLiteral::Property::FIELD:
default:
UNREACHABLE();
}
@@ -1967,10 +1931,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
// this copy.
__ Mov(StoreDescriptor::ReceiverRegister(), x0);
PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
- __ Mov(StoreDescriptor::NameRegister(),
- Operand(prop->key()->AsLiteral()->value()));
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, prop->key()->AsLiteral()->value());
break;
}
case NAMED_SUPER_PROPERTY: {
@@ -2017,10 +1978,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Mov(StoreDescriptor::NameRegister(), x0);
PopOperands(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::ValueRegister());
- EmitLoadStoreICSlot(slot);
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
+ CallKeyedStoreIC(slot);
break;
}
}
@@ -2046,10 +2004,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment");
if (var->IsUnallocated()) {
// Global var, const, or let.
- __ Mov(StoreDescriptor::NameRegister(), Operand(var->name()));
__ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, var->name());
} else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
DCHECK(!var->IsLookupSlot());
@@ -2065,10 +2021,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ CallRuntime(Runtime::kThrowReferenceError);
__ Bind(&assign);
}
- if (var->mode() == CONST) {
- __ CallRuntime(Runtime::kThrowConstAssignError);
- } else {
+ if (var->mode() != CONST) {
EmitStoreToStackLocalOrContextSlot(var, location);
+ } else if (var->throw_on_const_assignment(language_mode())) {
+ __ CallRuntime(Runtime::kThrowConstAssignError);
}
} else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
@@ -2083,7 +2039,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() || op == Token::INIT) {
+ } else {
+ DCHECK(var->mode() != CONST || op == Token::INIT);
if (var->IsLookupSlot()) {
// Assignment to var.
__ Push(var->name());
@@ -2103,13 +2060,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
-
- } else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
- if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError);
- }
- // Silently ignore store in sloppy mode.
}
}
@@ -2121,11 +2071,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
DCHECK(prop != NULL);
DCHECK(prop->key()->IsLiteral());
- __ Mov(StoreDescriptor::NameRegister(),
- Operand(prop->key()->AsLiteral()->value()));
PopOperand(StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
+ CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(x0);
@@ -2170,10 +2117,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
StoreDescriptor::ReceiverRegister());
DCHECK(StoreDescriptor::ValueRegister().is(x0));
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
+ CallKeyedStoreIC(expr->AssignmentSlot());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(x0);
@@ -2746,28 +2690,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label done;
- Register code = x0;
- Register result = x1;
-
- StringCharFromCodeGenerator generator(code, result);
- generator.GenerateFast(masm_);
- __ B(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ Bind(&done);
- context()->Plug(result);
-}
-
-
void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -2971,7 +2893,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// "delete this" is allowed.
bool is_this = var->is_this();
DCHECK(is_sloppy(language_mode()) || is_this);
- if (var->IsUnallocatedOrGlobalSlot()) {
+ if (var->IsUnallocated()) {
__ LoadGlobalObject(x12);
__ Mov(x11, Operand(var->name()));
__ Push(x12, x11);
@@ -3254,11 +3176,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
case NAMED_PROPERTY: {
- __ Mov(StoreDescriptor::NameRegister(),
- Operand(prop->key()->AsLiteral()->value()));
PopOperand(StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
+ CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3296,10 +3215,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
PopOperand(StoreDescriptor::NameRegister());
PopOperand(StoreDescriptor::ReceiverRegister());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
+ CallKeyedStoreIC(expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
diff --git a/deps/v8/src/full-codegen/full-codegen.cc b/deps/v8/src/full-codegen/full-codegen.cc
index d83a23b3f8..25d7f920f1 100644
--- a/deps/v8/src/full-codegen/full-codegen.cc
+++ b/deps/v8/src/full-codegen/full-codegen.cc
@@ -10,6 +10,7 @@
#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/codegen.h"
+#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
@@ -25,15 +26,69 @@ namespace internal {
#define __ ACCESS_MASM(masm())
+class FullCodegenCompilationJob final : public CompilationJob {
+ public:
+ explicit FullCodegenCompilationJob(CompilationInfo* info)
+ : CompilationJob(info->isolate(), info, "Full-Codegen") {}
+
+ bool can_execute_on_background_thread() const override { return false; }
+
+ CompilationJob::Status PrepareJobImpl() final { return SUCCEEDED; }
+
+ CompilationJob::Status ExecuteJobImpl() final {
+ DCHECK(ThreadId::Current().Equals(isolate()->thread_id()));
+ return FullCodeGenerator::MakeCode(info(), stack_limit()) ? SUCCEEDED
+ : FAILED;
+ }
+
+ CompilationJob::Status FinalizeJobImpl() final { return SUCCEEDED; }
+};
+
+FullCodeGenerator::FullCodeGenerator(MacroAssembler* masm,
+ CompilationInfo* info,
+ uintptr_t stack_limit)
+ : masm_(masm),
+ info_(info),
+ isolate_(info->isolate()),
+ zone_(info->zone()),
+ scope_(info->scope()),
+ nesting_stack_(NULL),
+ loop_depth_(0),
+ operand_stack_depth_(0),
+ globals_(NULL),
+ context_(NULL),
+ bailout_entries_(info->HasDeoptimizationSupport()
+ ? info->literal()->ast_node_count()
+ : 0,
+ info->zone()),
+ back_edges_(2, info->zone()),
+ handler_table_(info->zone()),
+ source_position_table_builder_(info->zone(),
+ info->SourcePositionRecordingMode()),
+ ic_total_count_(0) {
+ DCHECK(!info->IsStub());
+ Initialize(stack_limit);
+}
+
+// static
+CompilationJob* FullCodeGenerator::NewCompilationJob(CompilationInfo* info) {
+ return new FullCodegenCompilationJob(info);
+}
+
+// static
bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
+ return MakeCode(info, info->isolate()->stack_guard()->real_climit());
+}
+
+// static
+bool FullCodeGenerator::MakeCode(CompilationInfo* info, uintptr_t stack_limit) {
Isolate* isolate = info->isolate();
DCHECK(!FLAG_minimal);
RuntimeCallTimerScope runtimeTimer(isolate,
&RuntimeCallStats::CompileFullCode);
TimerEventScope<TimerEventCompileFullCode> timer(info->isolate());
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate, &tracing::TraceEventStatsTable::CompileFullCode);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileFullCode");
Handle<Script> script = info->script();
if (!script->IsUndefined(isolate) &&
@@ -47,7 +102,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
CodeObjectRequired::kYes);
if (info->will_serialize()) masm.enable_serializer();
- FullCodeGenerator cgen(&masm, info);
+ FullCodeGenerator cgen(&masm, info, stack_limit);
cgen.Generate();
if (cgen.HasStackOverflow()) {
DCHECK(!isolate->has_pending_exception());
@@ -157,9 +212,8 @@ bool FullCodeGenerator::MustCreateArrayLiteralWithRuntime(
expr->values()->length() > JSArray::kInitialMaxFastElementArray;
}
-
-void FullCodeGenerator::Initialize() {
- InitializeAstVisitor(info_->isolate());
+void FullCodeGenerator::Initialize(uintptr_t stack_limit) {
+ InitializeAstVisitor(stack_limit);
masm_->set_emit_debug_code(FLAG_debug_code);
masm_->set_predictable_code_size(true);
}
@@ -169,23 +223,52 @@ void FullCodeGenerator::PrepareForBailout(Expression* node,
PrepareForBailoutForId(node->id(), state);
}
-void FullCodeGenerator::CallLoadIC(TypeFeedbackId id) {
+void FullCodeGenerator::CallLoadIC(FeedbackVectorSlot slot, Handle<Object> name,
+ TypeFeedbackId id) {
+ DCHECK(name->IsName());
+ __ Move(LoadDescriptor::NameRegister(), name);
+
+ EmitLoadSlot(LoadDescriptor::SlotRegister(), slot);
+
Handle<Code> ic = CodeFactory::LoadIC(isolate()).code();
CallIC(ic, id);
if (FLAG_tf_load_ic_stub) RestoreContext();
}
-void FullCodeGenerator::CallLoadGlobalIC(TypeofMode typeof_mode,
- TypeFeedbackId id) {
- Handle<Code> ic = CodeFactory::LoadGlobalIC(isolate(), typeof_mode).code();
- CallIC(ic, id);
-}
+void FullCodeGenerator::CallStoreIC(FeedbackVectorSlot slot,
+ Handle<Object> name, TypeFeedbackId id) {
+ DCHECK(name->IsName());
+ __ Move(StoreDescriptor::NameRegister(), name);
+
+ STATIC_ASSERT(!StoreDescriptor::kPassLastArgsOnStack ||
+ StoreDescriptor::kStackArgumentsCount == 2);
+ if (StoreDescriptor::kPassLastArgsOnStack) {
+ __ Push(StoreDescriptor::ValueRegister());
+ EmitPushSlot(slot);
+ } else {
+ EmitLoadSlot(StoreDescriptor::SlotRegister(), slot);
+ }
-void FullCodeGenerator::CallStoreIC(TypeFeedbackId id) {
Handle<Code> ic = CodeFactory::StoreIC(isolate(), language_mode()).code();
CallIC(ic, id);
+ RestoreContext();
}
+void FullCodeGenerator::CallKeyedStoreIC(FeedbackVectorSlot slot) {
+ STATIC_ASSERT(!StoreDescriptor::kPassLastArgsOnStack ||
+ StoreDescriptor::kStackArgumentsCount == 2);
+ if (StoreDescriptor::kPassLastArgsOnStack) {
+ __ Push(StoreDescriptor::ValueRegister());
+ EmitPushSlot(slot);
+ } else {
+ EmitLoadSlot(StoreDescriptor::SlotRegister(), slot);
+ }
+
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
+ RestoreContext();
+}
void FullCodeGenerator::RecordJSReturnSite(Call* call) {
// We record the offset of the function return so we can rebuild the frame
@@ -411,6 +494,18 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
EmitVariableLoad(expr);
}
+void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
+ TypeofMode typeof_mode) {
+#ifdef DEBUG
+ Variable* var = proxy->var();
+ DCHECK(var->IsUnallocated() ||
+ (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
+#endif
+ EmitLoadSlot(LoadGlobalDescriptor::SlotRegister(),
+ proxy->VariableFeedbackSlot());
+ Handle<Code> ic = CodeFactory::LoadGlobalIC(isolate(), typeof_mode).code();
+ CallIC(ic);
+}
void FullCodeGenerator::VisitSloppyBlockFunctionStatement(
SloppyBlockFunctionStatement* declaration) {
@@ -473,6 +568,7 @@ void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallStub(&stub);
+ RestoreContext();
OperandStackDepthDecrement(3);
context()->Plug(result_register());
}
@@ -816,8 +912,8 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
DCHECK(!context()->IsEffect());
DCHECK(!context()->IsTest());
- if (proxy != NULL && (proxy->var()->IsUnallocatedOrGlobalSlot() ||
- proxy->var()->IsLookupSlot())) {
+ if (proxy != NULL &&
+ (proxy->var()->IsUnallocated() || proxy->var()->IsLookupSlot())) {
EmitVariableLoad(proxy, INSIDE_TYPEOF);
PrepareForBailout(proxy, BailoutState::TOS_REGISTER);
} else {
@@ -896,6 +992,7 @@ void FullCodeGenerator::EmitContinue(Statement* target) {
// accumulator on the stack.
ClearAccumulator();
while (!current->IsContinueTarget(target)) {
+ if (HasStackOverflow()) return;
if (current->IsTryFinally()) {
Comment cmnt(masm(), "[ Deferred continue through finally");
current->Exit(&context_length);
@@ -936,6 +1033,7 @@ void FullCodeGenerator::EmitBreak(Statement* target) {
// accumulator on the stack.
ClearAccumulator();
while (!current->IsBreakTarget(target)) {
+ if (HasStackOverflow()) return;
if (current->IsTryFinally()) {
Comment cmnt(masm(), "[ Deferred break through finally");
current->Exit(&context_length);
@@ -971,6 +1069,7 @@ void FullCodeGenerator::EmitUnwindAndReturn() {
NestedStatement* current = nesting_stack_;
int context_length = 0;
while (current != NULL) {
+ if (HasStackOverflow()) return;
if (current->IsTryFinally()) {
Comment cmnt(masm(), "[ Deferred return through finally");
current->Exit(&context_length);
@@ -1008,10 +1107,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
DCHECK(!key->value()->IsSmi());
DCHECK(!prop->IsSuperAccess());
- __ Move(LoadDescriptor::NameRegister(), key->value());
- __ Move(LoadDescriptor::SlotRegister(),
- SmiFromSlot(prop->PropertyFeedbackSlot()));
- CallLoadIC();
+ CallLoadIC(prop->PropertyFeedbackSlot(), key->value());
}
void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
@@ -1027,11 +1123,12 @@ void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetExpressionPosition(prop);
+
+ EmitLoadSlot(LoadDescriptor::SlotRegister(), prop->PropertyFeedbackSlot());
+
Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
- __ Move(LoadDescriptor::SlotRegister(),
- SmiFromSlot(prop->PropertyFeedbackSlot()));
CallIC(ic);
- if (FLAG_tf_load_ic_stub) RestoreContext();
+ RestoreContext();
}
void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
@@ -1040,7 +1137,7 @@ void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
}
-void FullCodeGenerator::EmitPropertyKey(ObjectLiteralProperty* property,
+void FullCodeGenerator::EmitPropertyKey(LiteralProperty* property,
BailoutId bailout_id) {
VisitForStackValue(property->key());
CallRuntimeWithOperands(Runtime::kToName);
@@ -1048,9 +1145,14 @@ void FullCodeGenerator::EmitPropertyKey(ObjectLiteralProperty* property,
PushOperand(result_register());
}
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
+void FullCodeGenerator::EmitLoadSlot(Register destination,
+ FeedbackVectorSlot slot) {
DCHECK(!slot.IsInvalid());
- __ Move(StoreDescriptor::SlotRegister(), SmiFromSlot(slot));
+ __ Move(destination, SmiFromSlot(slot));
+}
+
+void FullCodeGenerator::EmitPushSlot(FeedbackVectorSlot slot) {
+ __ Push(SmiFromSlot(slot));
}
void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
@@ -1073,6 +1175,7 @@ void FullCodeGenerator::VisitWithStatement(WithStatement* stmt) {
RestoreContext();
PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
PushOperand(result_register());
+ PushOperand(stmt->scope()->scope_info());
PushFunctionArgumentForContextAllocation();
CallRuntimeWithOperands(Runtime::kPushWithContext);
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
@@ -1274,6 +1377,7 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
{ Comment cmnt(masm_, "[ Extend catch context");
PushOperand(stmt->variable()->name());
PushOperand(result_register());
+ PushOperand(stmt->scope()->scope_info());
PushFunctionArgumentForContextAllocation();
CallRuntimeWithOperands(Runtime::kPushCatchContext);
StoreToFrameField(StandardFrameConstants::kContextOffset,
@@ -1466,9 +1570,7 @@ void FullCodeGenerator::VisitClassLiteral(ClassLiteral* lit) {
// Load the "prototype" from the constructor.
__ Move(LoadDescriptor::ReceiverRegister(), result_register());
- __ LoadRoot(LoadDescriptor::NameRegister(), Heap::kprototype_stringRootIndex);
- __ Move(LoadDescriptor::SlotRegister(), SmiFromSlot(lit->PrototypeSlot()));
- CallLoadIC();
+ CallLoadIC(lit->PrototypeSlot(), isolate()->factory()->prototype_string());
PrepareForBailoutForId(lit->PrototypeId(), BailoutState::TOS_REGISTER);
PushOperand(result_register());
@@ -1847,7 +1949,7 @@ FullCodeGenerator::EnterBlockScopeIfNeeded::EnterBlockScopeIfNeeded(
{
if (needs_block_context_) {
Comment cmnt(masm(), "[ Extend block context");
- codegen_->PushOperand(scope->GetScopeInfo(codegen->isolate()));
+ codegen_->PushOperand(scope->scope_info());
codegen_->PushFunctionArgumentForContextAllocation();
codegen_->CallRuntimeWithOperands(Runtime::kPushBlockContext);
@@ -1939,6 +2041,17 @@ bool FullCodeGenerator::NeedsHoleCheckForLoad(VariableProxy* proxy) {
var->initializer_position() >= proxy->position();
}
+Handle<Script> FullCodeGenerator::script() { return info_->script(); }
+
+LanguageMode FullCodeGenerator::language_mode() {
+ return scope()->language_mode();
+}
+
+bool FullCodeGenerator::has_simple_parameters() {
+ return info_->has_simple_parameters();
+}
+
+FunctionLiteral* FullCodeGenerator::literal() const { return info_->literal(); }
#undef __
diff --git a/deps/v8/src/full-codegen/full-codegen.h b/deps/v8/src/full-codegen/full-codegen.h
index 71f065b092..2a4eb9dd3b 100644
--- a/deps/v8/src/full-codegen/full-codegen.h
+++ b/deps/v8/src/full-codegen/full-codegen.h
@@ -13,7 +13,6 @@
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
-#include "src/compiler.h"
#include "src/deoptimizer.h"
#include "src/globals.h"
#include "src/objects.h"
@@ -22,39 +21,24 @@ namespace v8 {
namespace internal {
// Forward declarations.
+class CompilationInfo;
+class CompilationJob;
class JumpPatchSite;
+class Scope;
// -----------------------------------------------------------------------------
// Full code generator.
class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
public:
- FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info)
- : masm_(masm),
- info_(info),
- isolate_(info->isolate()),
- zone_(info->zone()),
- scope_(info->scope()),
- nesting_stack_(NULL),
- loop_depth_(0),
- operand_stack_depth_(0),
- globals_(NULL),
- context_(NULL),
- bailout_entries_(info->HasDeoptimizationSupport()
- ? info->literal()->ast_node_count()
- : 0,
- info->zone()),
- back_edges_(2, info->zone()),
- handler_table_(info->zone()),
- source_position_table_builder_(info->zone(),
- info->SourcePositionRecordingMode()),
- ic_total_count_(0) {
- DCHECK(!info->IsStub());
- Initialize();
- }
+ FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info,
+ uintptr_t stack_limit);
+
+ void Initialize(uintptr_t stack_limit);
- void Initialize();
+ static CompilationJob* NewCompilationJob(CompilationInfo* info);
+ static bool MakeCode(CompilationInfo* info, uintptr_t stack_limit);
static bool MakeCode(CompilationInfo* info);
// Encode bailout state and pc-offset as a BitField<type, start, size>.
@@ -493,7 +477,6 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
F(IsJSProxy) \
F(Call) \
F(NewObject) \
- F(StringCharFromCode) \
F(IsJSReceiver) \
F(HasCachedArrayIndex) \
F(GetCachedArrayIndex) \
@@ -572,7 +555,7 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
void EmitClassDefineProperties(ClassLiteral* lit);
// Pushes the property key as a Name on the stack.
- void EmitPropertyKey(ObjectLiteralProperty* property, BailoutId bailout_id);
+ void EmitPropertyKey(LiteralProperty* property, BailoutId bailout_id);
// Apply the compound assignment operator. Expects the left operand on top
// of the stack and the right one in the accumulator.
@@ -629,16 +612,19 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
void EmitSetHomeObjectAccumulator(Expression* initializer, int offset,
FeedbackVectorSlot slot);
- void EmitLoadStoreICSlot(FeedbackVectorSlot slot);
+ // Platform-specific code for loading a slot to a register.
+ void EmitLoadSlot(Register destination, FeedbackVectorSlot slot);
+ // Platform-specific code for pushing a slot to the stack.
+ void EmitPushSlot(FeedbackVectorSlot slot);
void CallIC(Handle<Code> code,
TypeFeedbackId id = TypeFeedbackId::None());
- void CallLoadIC(TypeFeedbackId id = TypeFeedbackId::None());
- // Inside typeof reference errors are never thrown.
- void CallLoadGlobalIC(TypeofMode typeof_mode,
- TypeFeedbackId id = TypeFeedbackId::None());
- void CallStoreIC(TypeFeedbackId id = TypeFeedbackId::None());
+ void CallLoadIC(FeedbackVectorSlot slot, Handle<Object> name,
+ TypeFeedbackId id = TypeFeedbackId::None());
+ void CallStoreIC(FeedbackVectorSlot slot, Handle<Object> name,
+ TypeFeedbackId id = TypeFeedbackId::None());
+ void CallKeyedStoreIC(FeedbackVectorSlot slot);
void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun);
@@ -695,10 +681,10 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
Isolate* isolate() const { return isolate_; }
Zone* zone() const { return zone_; }
- Handle<Script> script() { return info_->script(); }
- LanguageMode language_mode() { return scope()->language_mode(); }
- bool has_simple_parameters() { return info_->has_simple_parameters(); }
- FunctionLiteral* literal() const { return info_->literal(); }
+ Handle<Script> script();
+ LanguageMode language_mode();
+ bool has_simple_parameters();
+ FunctionLiteral* literal() const;
Scope* scope() { return scope_; }
static Register context_register();
diff --git a/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
index 3571948216..e5f66cd1d4 100644
--- a/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
@@ -4,15 +4,17 @@
#if V8_TARGET_ARCH_IA32
+#include "src/full-codegen/full-codegen.h"
+#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
#include "src/debug/debug.h"
-#include "src/full-codegen/full-codegen.h"
#include "src/ia32/frames-ia32.h"
#include "src/ic/ic.h"
-#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
@@ -115,6 +117,17 @@ void FullCodeGenerator::Generate() {
info->set_prologue_offset(masm_->pc_offset());
__ Prologue(info->GeneratePreagedPrologue());
+ // Increment invocation count for the function.
+ {
+ Comment cmnt(masm_, "[ Increment invocation count");
+ __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
+ __ mov(ecx, FieldOperand(ecx, LiteralsArray::kFeedbackVectorOffset));
+ __ add(FieldOperand(
+ ecx, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+ TypeFeedbackVector::kHeaderSize),
+ Immediate(Smi::FromInt(1)));
+ }
+
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
@@ -159,14 +172,14 @@ void FullCodeGenerator::Generate() {
bool function_in_register = true;
// Possibly allocate a local context.
- if (info->scope()->num_heap_slots() > 0) {
+ if (info->scope()->NeedsContext()) {
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
// Argument to NewContext is the function, which is still in edi.
if (info->scope()->is_script_scope()) {
__ push(edi);
- __ Push(info->scope()->GetScopeInfo(info->isolate()));
+ __ Push(info->scope()->scope_info());
__ CallRuntime(Runtime::kNewScriptContext);
PrepareForBailoutForId(BailoutId::ScriptContext(),
BailoutState::TOS_REGISTER);
@@ -254,9 +267,8 @@ void FullCodeGenerator::Generate() {
}
// Possibly allocate RestParameters
- int rest_index;
- Variable* rest_param = info->scope()->rest_parameter(&rest_index);
- if (rest_param) {
+ Variable* rest_param = info->scope()->rest_parameter();
+ if (rest_param != nullptr) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
if (!function_in_register) {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
@@ -717,7 +729,6 @@ void FullCodeGenerator::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
@@ -767,7 +778,6 @@ void FullCodeGenerator::VisitFunctionDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
@@ -1066,6 +1076,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Generate code for going to the next element by incrementing the
// index (smi) stored on top of the stack.
__ bind(loop_statement.continue_label());
+ PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
__ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
EmitBackEdgeBookkeeping(stmt, &loop);
@@ -1086,11 +1097,8 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
- __ mov(StoreDescriptor::NameRegister(),
- Immediate(isolate()->factory()->home_object_symbol()));
__ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
@@ -1099,11 +1107,8 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ mov(StoreDescriptor::ReceiverRegister(), eax);
- __ mov(StoreDescriptor::NameRegister(),
- Immediate(isolate()->factory()->home_object_symbol()));
__ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
@@ -1141,7 +1146,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
Register temp = ebx;
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
+ if (s->NeedsContext()) {
if (s->calls_sloppy_eval()) {
// Check that extension is "the hole".
__ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
@@ -1189,20 +1194,6 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
}
}
-
-void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
- TypeofMode typeof_mode) {
-#ifdef DEBUG
- Variable* var = proxy->var();
- DCHECK(var->IsUnallocatedOrGlobalSlot() ||
- (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-#endif
- __ mov(LoadGlobalDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadGlobalIC(typeof_mode);
-}
-
-
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
SetExpressionPosition(proxy);
@@ -1212,7 +1203,6 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// Three cases: global variables, lookup variables, and all other types of
// variables.
switch (var->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
EmitGlobalVariableLoad(proxy, typeof_mode);
@@ -1339,10 +1329,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
DCHECK(StoreDescriptor::ValueRegister().is(eax));
- __ mov(StoreDescriptor::NameRegister(), Immediate(key->value()));
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
- EmitLoadStoreICSlot(property->GetSlot(0));
- CallStoreIC();
+ CallStoreIC(property->GetSlot(0), key->value());
PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1506,6 +1494,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ mov(ecx, Immediate(constant_elements));
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
+ RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1515,8 +1504,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Emit code to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
- int array_index = 0;
- for (; array_index < length; array_index++) {
+ for (int array_index = 0; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
DCHECK(!subexpr->IsSpread());
@@ -1533,31 +1521,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ mov(StoreDescriptor::NameRegister(),
Immediate(Smi::FromInt(array_index)));
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
- EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
- PrepareForBailoutForId(expr->GetIdForElement(array_index),
- BailoutState::NO_REGISTERS);
- }
-
- // In case the array literal contains spread expressions it has two parts. The
- // first part is the "static" array which has a literal index is handled
- // above. The second part is the part after the first spread expression
- // (inclusive) and these elements gets appended to the array. Note that the
- // number elements an iterable produces is unknown ahead of time.
- if (array_index < length && result_saved) {
- PopOperand(eax);
- result_saved = false;
- }
- for (; array_index < length; array_index++) {
- Expression* subexpr = subexprs->at(array_index);
-
- PushOperand(eax);
- DCHECK(!subexpr->IsSpread());
- VisitForStackValue(subexpr);
- CallRuntimeWithOperands(Runtime::kAppendElement);
-
+ CallKeyedStoreIC(expr->LiteralFeedbackSlot());
PrepareForBailoutForId(expr->GetIdForElement(array_index),
BailoutState::NO_REGISTERS);
}
@@ -1902,7 +1866,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
for (int i = 0; i < lit->properties()->length(); i++) {
- ObjectLiteral::Property* property = lit->properties()->at(i);
+ ClassLiteral::Property* property = lit->properties()->at(i);
Expression* value = property->value();
if (property->is_static()) {
@@ -1927,25 +1891,25 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE();
- case ObjectLiteral::Property::COMPUTED:
+ case ClassLiteral::Property::METHOD:
PushOperand(Smi::FromInt(DONT_ENUM));
PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
break;
- case ObjectLiteral::Property::GETTER:
+ case ClassLiteral::Property::GETTER:
PushOperand(Smi::FromInt(DONT_ENUM));
CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
break;
- case ObjectLiteral::Property::SETTER:
+ case ClassLiteral::Property::SETTER:
PushOperand(Smi::FromInt(DONT_ENUM));
CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
break;
+
+ case ClassLiteral::Property::FIELD:
+ UNREACHABLE();
+ break;
}
}
}
@@ -1980,10 +1944,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
VisitForAccumulatorValue(prop->obj());
__ Move(StoreDescriptor::ReceiverRegister(), eax);
PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
- __ mov(StoreDescriptor::NameRegister(),
- prop->key()->AsLiteral()->value());
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, prop->key()->AsLiteral()->value());
break;
}
case NAMED_SUPER_PROPERTY: {
@@ -2030,10 +1991,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Move(StoreDescriptor::NameRegister(), eax);
PopOperand(StoreDescriptor::ReceiverRegister()); // Receiver.
PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
- EmitLoadStoreICSlot(slot);
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
+ CallKeyedStoreIC(slot);
break;
}
}
@@ -2056,13 +2014,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
FeedbackVectorSlot slot) {
if (var->IsUnallocated()) {
// Global var, const, or let.
- __ mov(StoreDescriptor::NameRegister(), var->name());
__ mov(StoreDescriptor::ReceiverRegister(), NativeContextOperand());
__ mov(StoreDescriptor::ReceiverRegister(),
ContextOperand(StoreDescriptor::ReceiverRegister(),
Context::EXTENSION_INDEX));
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, var->name());
} else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
DCHECK(!var->IsLookupSlot());
@@ -2078,10 +2034,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&assign);
}
- if (var->mode() == CONST) {
- __ CallRuntime(Runtime::kThrowConstAssignError);
- } else {
+ if (var->mode() != CONST) {
EmitStoreToStackLocalOrContextSlot(var, location);
+ } else if (var->throw_on_const_assignment(language_mode())) {
+ __ CallRuntime(Runtime::kThrowConstAssignError);
}
} else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
@@ -2096,7 +2052,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() || op == Token::INIT) {
+ } else {
+ DCHECK(var->mode() != CONST || op == Token::INIT);
if (var->IsLookupSlot()) {
// Assignment to var.
__ Push(Immediate(var->name()));
@@ -2117,13 +2074,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
-
- } else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
- if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError);
- }
- // Silently ignore store in sloppy mode.
}
}
@@ -2136,10 +2086,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
DCHECK(prop != NULL);
DCHECK(prop->key()->IsLiteral());
- __ mov(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
PopOperand(StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
+ CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(eax);
}
@@ -2182,10 +2130,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
PopOperand(StoreDescriptor::NameRegister()); // Key.
PopOperand(StoreDescriptor::ReceiverRegister());
DCHECK(StoreDescriptor::ValueRegister().is(eax));
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
+ CallKeyedStoreIC(expr->AssignmentSlot());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(eax);
}
@@ -2723,25 +2668,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label done;
- StringCharFromCodeGenerator generator(eax, ebx);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(ebx);
-}
-
-
void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -2936,7 +2862,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// "delete this" is allowed.
bool is_this = var->is_this();
DCHECK(is_sloppy(language_mode()) || is_this);
- if (var->IsUnallocatedOrGlobalSlot()) {
+ if (var->IsUnallocated()) {
__ mov(eax, NativeContextOperand());
__ push(ContextOperand(eax, Context::EXTENSION_INDEX));
__ push(Immediate(var->name()));
@@ -3230,11 +3156,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
case NAMED_PROPERTY: {
- __ mov(StoreDescriptor::NameRegister(),
- prop->key()->AsLiteral()->value());
PopOperand(StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
+ CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3272,10 +3195,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
PopOperand(StoreDescriptor::NameRegister());
PopOperand(StoreDescriptor::ReceiverRegister());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
+ CallKeyedStoreIC(expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
// Result is on the stack
diff --git a/deps/v8/src/full-codegen/mips/full-codegen-mips.cc b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
index 67598d0a25..7f976866a1 100644
--- a/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
+++ b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
@@ -12,14 +12,16 @@
// places where we have to move a previous result in v0 to a0 for the
// next call: mov(a0, v0). This is not needed on the other architectures.
+#include "src/full-codegen/full-codegen.h"
+#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
#include "src/debug/debug.h"
-#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
-#include "src/parsing/parser.h"
#include "src/mips/code-stubs-mips.h"
#include "src/mips/macro-assembler-mips.h"
@@ -135,6 +137,20 @@ void FullCodeGenerator::Generate() {
info->set_prologue_offset(masm_->pc_offset());
__ Prologue(info->GeneratePreagedPrologue());
+ // Increment invocation count for the function.
+ {
+ Comment cmnt(masm_, "[ Increment invocation count");
+ __ lw(a0, FieldMemOperand(a1, JSFunction::kLiteralsOffset));
+ __ lw(a0, FieldMemOperand(a0, LiteralsArray::kFeedbackVectorOffset));
+ __ lw(t0, FieldMemOperand(
+ a0, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+ TypeFeedbackVector::kHeaderSize));
+ __ Addu(t0, t0, Operand(Smi::FromInt(1)));
+ __ sw(t0, FieldMemOperand(
+ a0, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+ TypeFeedbackVector::kHeaderSize));
+ }
+
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
@@ -177,14 +193,14 @@ void FullCodeGenerator::Generate() {
bool function_in_register_a1 = true;
// Possibly allocate a local context.
- if (info->scope()->num_heap_slots() > 0) {
+ if (info->scope()->NeedsContext()) {
Comment cmnt(masm_, "[ Allocate context");
// Argument to NewContext is the function, which is still in a1.
bool need_write_barrier = true;
int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (info->scope()->is_script_scope()) {
__ push(a1);
- __ Push(info->scope()->GetScopeInfo(info->isolate()));
+ __ Push(info->scope()->scope_info());
__ CallRuntime(Runtime::kNewScriptContext);
PrepareForBailoutForId(BailoutId::ScriptContext(),
BailoutState::TOS_REGISTER);
@@ -269,9 +285,8 @@ void FullCodeGenerator::Generate() {
}
// Possibly allocate RestParameters
- int rest_index;
- Variable* rest_param = info->scope()->rest_parameter(&rest_index);
- if (rest_param) {
+ Variable* rest_param = info->scope()->rest_parameter();
+ if (rest_param != nullptr) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
if (!function_in_register_a1) {
__ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -765,7 +780,6 @@ void FullCodeGenerator::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
@@ -816,7 +830,6 @@ void FullCodeGenerator::VisitFunctionDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
@@ -1133,6 +1146,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Generate code for the going to the next element by incrementing
// the index (smi) stored on top of the stack.
__ bind(loop_statement.continue_label());
+ PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
__ pop(a0);
__ Addu(a0, a0, Operand(Smi::FromInt(1)));
__ push(a0);
@@ -1155,12 +1169,9 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- __ li(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
__ lw(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
@@ -1169,12 +1180,9 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Move(StoreDescriptor::ReceiverRegister(), v0);
- __ li(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
__ lw(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
@@ -1214,7 +1222,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
Register temp = t0;
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
+ if (s->NeedsContext()) {
if (s->calls_sloppy_eval()) {
// Check that extension is "the hole".
__ lw(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
@@ -1264,20 +1272,6 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
}
}
-
-void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
- TypeofMode typeof_mode) {
-#ifdef DEBUG
- Variable* var = proxy->var();
- DCHECK(var->IsUnallocatedOrGlobalSlot() ||
- (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-#endif
- __ li(LoadGlobalDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadGlobalIC(typeof_mode);
-}
-
-
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
@@ -1288,7 +1282,6 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// Three cases: global variables, lookup variables, and all other types of
// variables.
switch (var->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
EmitGlobalVariableLoad(proxy, typeof_mode);
@@ -1412,10 +1405,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ mov(StoreDescriptor::ValueRegister(), result_register());
DCHECK(StoreDescriptor::ValueRegister().is(a0));
- __ li(StoreDescriptor::NameRegister(), Operand(key->value()));
__ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- EmitLoadStoreICSlot(property->GetSlot(0));
- CallStoreIC();
+ CallStoreIC(property->GetSlot(0), key->value());
PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1585,6 +1576,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
+ RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1594,8 +1586,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Emit code to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
- int array_index = 0;
- for (; array_index < length; array_index++) {
+ for (int array_index = 0; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
DCHECK(!subexpr->IsSpread());
@@ -1613,31 +1604,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ li(StoreDescriptor::NameRegister(), Operand(Smi::FromInt(array_index)));
__ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
__ mov(StoreDescriptor::ValueRegister(), result_register());
- EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
-
- PrepareForBailoutForId(expr->GetIdForElement(array_index),
- BailoutState::NO_REGISTERS);
- }
-
- // In case the array literal contains spread expressions it has two parts. The
- // first part is the "static" array which has a literal index is handled
- // above. The second part is the part after the first spread expression
- // (inclusive) and these elements gets appended to the array. Note that the
- // number elements an iterable produces is unknown ahead of time.
- if (array_index < length && result_saved) {
- PopOperand(v0);
- result_saved = false;
- }
- for (; array_index < length; array_index++) {
- Expression* subexpr = subexprs->at(array_index);
-
- PushOperand(v0);
- DCHECK(!subexpr->IsSpread());
- VisitForStackValue(subexpr);
- CallRuntimeWithOperands(Runtime::kAppendElement);
+ CallKeyedStoreIC(expr->LiteralFeedbackSlot());
PrepareForBailoutForId(expr->GetIdForElement(array_index),
BailoutState::NO_REGISTERS);
@@ -1995,7 +1962,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
for (int i = 0; i < lit->properties()->length(); i++) {
- ObjectLiteral::Property* property = lit->properties()->at(i);
+ ClassLiteral::Property* property = lit->properties()->at(i);
Expression* value = property->value();
Register scratch = a1;
@@ -2022,26 +1989,23 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE();
- case ObjectLiteral::Property::COMPUTED:
+ case ClassLiteral::Property::METHOD:
PushOperand(Smi::FromInt(DONT_ENUM));
PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
break;
- case ObjectLiteral::Property::GETTER:
+ case ClassLiteral::Property::GETTER:
PushOperand(Smi::FromInt(DONT_ENUM));
CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
break;
- case ObjectLiteral::Property::SETTER:
+ case ClassLiteral::Property::SETTER:
PushOperand(Smi::FromInt(DONT_ENUM));
CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
break;
+ case ClassLiteral::Property::FIELD:
default:
UNREACHABLE();
}
@@ -2079,10 +2043,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
VisitForAccumulatorValue(prop->obj());
__ mov(StoreDescriptor::ReceiverRegister(), result_register());
PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
- __ li(StoreDescriptor::NameRegister(),
- Operand(prop->key()->AsLiteral()->value()));
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, prop->key()->AsLiteral()->value());
break;
}
case NAMED_SUPER_PROPERTY: {
@@ -2129,10 +2090,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ mov(StoreDescriptor::NameRegister(), result_register());
PopOperands(StoreDescriptor::ValueRegister(),
StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(slot);
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
+ CallKeyedStoreIC(slot);
break;
}
}
@@ -2158,10 +2116,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::ValueRegister(), result_register());
- __ li(StoreDescriptor::NameRegister(), Operand(var->name()));
__ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, var->name());
} else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
DCHECK(!var->IsLookupSlot());
@@ -2178,10 +2134,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&assign);
}
- if (var->mode() == CONST) {
- __ CallRuntime(Runtime::kThrowConstAssignError);
- } else {
+ if (var->mode() != CONST) {
EmitStoreToStackLocalOrContextSlot(var, location);
+ } else if (var->throw_on_const_assignment(language_mode())) {
+ __ CallRuntime(Runtime::kThrowConstAssignError);
}
} else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
@@ -2197,7 +2153,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() || op == Token::INIT) {
+ } else {
+ DCHECK(var->mode() != CONST || op == Token::INIT);
if (var->IsLookupSlot()) {
// Assignment to var.
__ Push(var->name());
@@ -2218,13 +2175,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
-
- } else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
- if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError);
- }
- // Silently ignore store in sloppy mode.
}
}
@@ -2236,11 +2186,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
DCHECK(prop->key()->IsLiteral());
__ mov(StoreDescriptor::ValueRegister(), result_register());
- __ li(StoreDescriptor::NameRegister(),
- Operand(prop->key()->AsLiteral()->value()));
PopOperand(StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
+ CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(v0);
@@ -2288,10 +2235,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
StoreDescriptor::NameRegister());
DCHECK(StoreDescriptor::ValueRegister().is(a0));
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
+ CallKeyedStoreIC(expr->AssignmentSlot());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(v0);
@@ -2844,25 +2788,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label done;
- StringCharFromCodeGenerator generator(v0, a1);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(a1);
-}
-
-
void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -3056,7 +2981,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// "delete this" is allowed.
bool is_this = var->is_this();
DCHECK(is_sloppy(language_mode()) || is_this);
- if (var->IsUnallocatedOrGlobalSlot()) {
+ if (var->IsUnallocated()) {
__ LoadGlobalObject(a2);
__ li(a1, Operand(var->name()));
__ Push(a2, a1);
@@ -3339,11 +3264,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
case NAMED_PROPERTY: {
__ mov(StoreDescriptor::ValueRegister(), result_register());
- __ li(StoreDescriptor::NameRegister(),
- Operand(prop->key()->AsLiteral()->value()));
PopOperand(StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
+ CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3382,10 +3304,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(StoreDescriptor::ValueRegister(), result_register());
PopOperands(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
+ CallKeyedStoreIC(expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
diff --git a/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
index c149f137cf..660adb1aa6 100644
--- a/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
+++ b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
@@ -12,14 +12,16 @@
// places where we have to move a previous result in v0 to a0 for the
// next call: mov(a0, v0). This is not needed on the other architectures.
+#include "src/full-codegen/full-codegen.h"
+#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
#include "src/debug/debug.h"
-#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
-#include "src/parsing/parser.h"
#include "src/mips64/code-stubs-mips64.h"
#include "src/mips64/macro-assembler-mips64.h"
@@ -134,6 +136,20 @@ void FullCodeGenerator::Generate() {
info->set_prologue_offset(masm_->pc_offset());
__ Prologue(info->GeneratePreagedPrologue());
+ // Increment invocation count for the function.
+ {
+ Comment cmnt(masm_, "[ Increment invocation count");
+ __ ld(a0, FieldMemOperand(a1, JSFunction::kLiteralsOffset));
+ __ ld(a0, FieldMemOperand(a0, LiteralsArray::kFeedbackVectorOffset));
+ __ ld(a4, FieldMemOperand(
+ a0, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+ TypeFeedbackVector::kHeaderSize));
+ __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
+ __ sd(a4, FieldMemOperand(
+ a0, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+ TypeFeedbackVector::kHeaderSize));
+ }
+
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
@@ -176,14 +192,14 @@ void FullCodeGenerator::Generate() {
bool function_in_register_a1 = true;
// Possibly allocate a local context.
- if (info->scope()->num_heap_slots() > 0) {
+ if (info->scope()->NeedsContext()) {
Comment cmnt(masm_, "[ Allocate context");
// Argument to NewContext is the function, which is still in a1.
bool need_write_barrier = true;
int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (info->scope()->is_script_scope()) {
__ push(a1);
- __ Push(info->scope()->GetScopeInfo(info->isolate()));
+ __ Push(info->scope()->scope_info());
__ CallRuntime(Runtime::kNewScriptContext);
PrepareForBailoutForId(BailoutId::ScriptContext(),
BailoutState::TOS_REGISTER);
@@ -267,9 +283,8 @@ void FullCodeGenerator::Generate() {
}
// Possibly allocate RestParameters
- int rest_index;
- Variable* rest_param = info->scope()->rest_parameter(&rest_index);
- if (rest_param) {
+ Variable* rest_param = info->scope()->rest_parameter();
+ if (rest_param != nullptr) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
if (!function_in_register_a1) {
__ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -764,7 +779,6 @@ void FullCodeGenerator::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
@@ -815,7 +829,6 @@ void FullCodeGenerator::VisitFunctionDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
@@ -1134,6 +1147,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Generate code for the going to the next element by incrementing
// the index (smi) stored on top of the stack.
__ bind(loop_statement.continue_label());
+ PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
__ pop(a0);
__ Daddu(a0, a0, Operand(Smi::FromInt(1)));
__ push(a0);
@@ -1156,12 +1170,9 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- __ li(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
__ ld(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
@@ -1170,12 +1181,9 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Move(StoreDescriptor::ReceiverRegister(), v0);
- __ li(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
__ ld(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
@@ -1215,7 +1223,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
Register temp = a4;
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
+ if (s->NeedsContext()) {
if (s->calls_sloppy_eval()) {
// Check that extension is "the hole".
__ ld(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
@@ -1265,20 +1273,6 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
}
}
-
-void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
- TypeofMode typeof_mode) {
-#ifdef DEBUG
- Variable* var = proxy->var();
- DCHECK(var->IsUnallocatedOrGlobalSlot() ||
- (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-#endif
- __ li(LoadGlobalDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadGlobalIC(typeof_mode);
-}
-
-
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
@@ -1289,7 +1283,6 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// Three cases: global variables, lookup variables, and all other types of
// variables.
switch (var->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
EmitGlobalVariableLoad(proxy, typeof_mode);
@@ -1413,10 +1406,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ mov(StoreDescriptor::ValueRegister(), result_register());
DCHECK(StoreDescriptor::ValueRegister().is(a0));
- __ li(StoreDescriptor::NameRegister(), Operand(key->value()));
__ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- EmitLoadStoreICSlot(property->GetSlot(0));
- CallStoreIC();
+ CallStoreIC(property->GetSlot(0), key->value());
PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1586,6 +1577,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
+ RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1595,8 +1587,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Emit code to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
- int array_index = 0;
- for (; array_index < length; array_index++) {
+ for (int array_index = 0; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
DCHECK(!subexpr->IsSpread());
@@ -1614,31 +1605,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ li(StoreDescriptor::NameRegister(), Operand(Smi::FromInt(array_index)));
__ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
__ mov(StoreDescriptor::ValueRegister(), result_register());
- EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
-
- PrepareForBailoutForId(expr->GetIdForElement(array_index),
- BailoutState::NO_REGISTERS);
- }
-
- // In case the array literal contains spread expressions it has two parts. The
- // first part is the "static" array which has a literal index is handled
- // above. The second part is the part after the first spread expression
- // (inclusive) and these elements gets appended to the array. Note that the
- // number elements an iterable produces is unknown ahead of time.
- if (array_index < length && result_saved) {
- PopOperand(v0);
- result_saved = false;
- }
- for (; array_index < length; array_index++) {
- Expression* subexpr = subexprs->at(array_index);
-
- PushOperand(v0);
- DCHECK(!subexpr->IsSpread());
- VisitForStackValue(subexpr);
- CallRuntimeWithOperands(Runtime::kAppendElement);
+ CallKeyedStoreIC(expr->LiteralFeedbackSlot());
PrepareForBailoutForId(expr->GetIdForElement(array_index),
BailoutState::NO_REGISTERS);
@@ -1995,7 +1962,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
for (int i = 0; i < lit->properties()->length(); i++) {
- ObjectLiteral::Property* property = lit->properties()->at(i);
+ ClassLiteral::Property* property = lit->properties()->at(i);
Expression* value = property->value();
Register scratch = a1;
@@ -2022,26 +1989,23 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE();
- case ObjectLiteral::Property::COMPUTED:
+ case ClassLiteral::Property::METHOD:
PushOperand(Smi::FromInt(DONT_ENUM));
PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
break;
- case ObjectLiteral::Property::GETTER:
+ case ClassLiteral::Property::GETTER:
PushOperand(Smi::FromInt(DONT_ENUM));
CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
break;
- case ObjectLiteral::Property::SETTER:
+ case ClassLiteral::Property::SETTER:
PushOperand(Smi::FromInt(DONT_ENUM));
CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
break;
+ case ClassLiteral::Property::FIELD:
default:
UNREACHABLE();
}
@@ -2079,10 +2043,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
VisitForAccumulatorValue(prop->obj());
__ mov(StoreDescriptor::ReceiverRegister(), result_register());
PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
- __ li(StoreDescriptor::NameRegister(),
- Operand(prop->key()->AsLiteral()->value()));
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, prop->key()->AsLiteral()->value());
break;
}
case NAMED_SUPER_PROPERTY: {
@@ -2129,10 +2090,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Move(StoreDescriptor::NameRegister(), result_register());
PopOperands(StoreDescriptor::ValueRegister(),
StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(slot);
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
+ CallKeyedStoreIC(slot);
break;
}
}
@@ -2158,10 +2116,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::ValueRegister(), result_register());
- __ li(StoreDescriptor::NameRegister(), Operand(var->name()));
__ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, var->name());
} else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
DCHECK(!var->IsLookupSlot());
@@ -2178,10 +2134,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&assign);
}
- if (var->mode() == CONST) {
- __ CallRuntime(Runtime::kThrowConstAssignError);
- } else {
+ if (var->mode() != CONST) {
EmitStoreToStackLocalOrContextSlot(var, location);
+ } else if (var->throw_on_const_assignment(language_mode())) {
+ __ CallRuntime(Runtime::kThrowConstAssignError);
}
} else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
@@ -2197,7 +2153,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() || op == Token::INIT) {
+ } else {
+ DCHECK(var->mode() != CONST || op == Token::INIT);
if (var->IsLookupSlot()) {
__ Push(var->name());
__ Push(v0);
@@ -2217,13 +2174,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
-
- } else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
- if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError);
- }
- // Silently ignore store in sloppy mode.
}
}
@@ -2235,11 +2185,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
DCHECK(prop->key()->IsLiteral());
__ mov(StoreDescriptor::ValueRegister(), result_register());
- __ li(StoreDescriptor::NameRegister(),
- Operand(prop->key()->AsLiteral()->value()));
PopOperand(StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
+ CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(v0);
@@ -2287,10 +2234,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
StoreDescriptor::NameRegister());
DCHECK(StoreDescriptor::ValueRegister().is(a0));
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
+ CallKeyedStoreIC(expr->AssignmentSlot());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(v0);
@@ -2843,25 +2787,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label done;
- StringCharFromCodeGenerator generator(v0, a1);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(a1);
-}
-
-
void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -3055,7 +2980,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// "delete this" is allowed.
bool is_this = var->is_this();
DCHECK(is_sloppy(language_mode()) || is_this);
- if (var->IsUnallocatedOrGlobalSlot()) {
+ if (var->IsUnallocated()) {
__ LoadGlobalObject(a2);
__ li(a1, Operand(var->name()));
__ Push(a2, a1);
@@ -3339,11 +3264,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
case NAMED_PROPERTY: {
__ mov(StoreDescriptor::ValueRegister(), result_register());
- __ li(StoreDescriptor::NameRegister(),
- Operand(prop->key()->AsLiteral()->value()));
PopOperand(StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
+ CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3382,10 +3304,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(StoreDescriptor::ValueRegister(), result_register());
PopOperands(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
+ CallKeyedStoreIC(expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
diff --git a/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
index 6813069d40..de9a8f46cf 100644
--- a/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
+++ b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
@@ -4,14 +4,16 @@
#if V8_TARGET_ARCH_PPC
+#include "src/full-codegen/full-codegen.h"
+#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
#include "src/debug/debug.h"
-#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
-#include "src/parsing/parser.h"
#include "src/ppc/code-stubs-ppc.h"
#include "src/ppc/macro-assembler-ppc.h"
@@ -131,6 +133,22 @@ void FullCodeGenerator::Generate() {
info->set_prologue_offset(prologue_offset);
__ Prologue(info->GeneratePreagedPrologue(), ip, prologue_offset);
+ // Increment invocation count for the function.
+ {
+ Comment cmnt(masm_, "[ Increment invocation count");
+ __ LoadP(r7, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
+ __ LoadP(r7, FieldMemOperand(r7, LiteralsArray::kFeedbackVectorOffset));
+ __ LoadP(r8, FieldMemOperand(r7, TypeFeedbackVector::kInvocationCountIndex *
+ kPointerSize +
+ TypeFeedbackVector::kHeaderSize));
+ __ AddSmiLiteral(r8, r8, Smi::FromInt(1), r0);
+ __ StoreP(r8,
+ FieldMemOperand(
+ r7, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+ TypeFeedbackVector::kHeaderSize),
+ r0);
+ }
+
{
Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
@@ -173,14 +191,14 @@ void FullCodeGenerator::Generate() {
bool function_in_register_r4 = true;
// Possibly allocate a local context.
- if (info->scope()->num_heap_slots() > 0) {
+ if (info->scope()->NeedsContext()) {
// Argument to NewContext is the function, which is still in r4.
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (info->scope()->is_script_scope()) {
__ push(r4);
- __ Push(info->scope()->GetScopeInfo(info->isolate()));
+ __ Push(info->scope()->scope_info());
__ CallRuntime(Runtime::kNewScriptContext);
PrepareForBailoutForId(BailoutId::ScriptContext(),
BailoutState::TOS_REGISTER);
@@ -265,9 +283,8 @@ void FullCodeGenerator::Generate() {
}
// Possibly allocate RestParameters
- int rest_index;
- Variable* rest_param = info->scope()->rest_parameter(&rest_index);
- if (rest_param) {
+ Variable* rest_param = info->scope()->rest_parameter();
+ if (rest_param != nullptr) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
if (!function_in_register_r4) {
__ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -732,7 +749,6 @@ void FullCodeGenerator::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
@@ -783,7 +799,6 @@ void FullCodeGenerator::VisitFunctionDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
@@ -1102,6 +1117,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Generate code for the going to the next element by incrementing
// the index (smi) stored on top of the stack.
__ bind(loop_statement.continue_label());
+ PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
__ pop(r3);
__ AddSmiLiteral(r3, r3, Smi::FromInt(1), r0);
__ push(r3);
@@ -1124,12 +1140,9 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- __ mov(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
__ LoadP(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
@@ -1138,12 +1151,9 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Move(StoreDescriptor::ReceiverRegister(), r3);
- __ mov(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
__ LoadP(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
@@ -1183,7 +1193,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
Register temp = r7;
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
+ if (s->NeedsContext()) {
if (s->calls_sloppy_eval()) {
// Check that extension is "the hole".
__ LoadP(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
@@ -1232,20 +1242,6 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
}
}
-
-void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
- TypeofMode typeof_mode) {
-#ifdef DEBUG
- Variable* var = proxy->var();
- DCHECK(var->IsUnallocatedOrGlobalSlot() ||
- (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-#endif
- __ mov(LoadGlobalDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadGlobalIC(typeof_mode);
-}
-
-
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
@@ -1256,7 +1252,6 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// Three cases: global variables, lookup variables, and all other types of
// variables.
switch (var->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
EmitGlobalVariableLoad(proxy, typeof_mode);
@@ -1379,10 +1374,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
DCHECK(StoreDescriptor::ValueRegister().is(r3));
- __ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- EmitLoadStoreICSlot(property->GetSlot(0));
- CallStoreIC();
+ CallStoreIC(property->GetSlot(0), key->value());
PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1552,6 +1545,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
+ RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1561,8 +1555,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Emit code to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
- int array_index = 0;
- for (; array_index < length; array_index++) {
+ for (int array_index = 0; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
DCHECK(!subexpr->IsSpread());
// If the subexpression is a literal or a simple materialized literal it
@@ -1578,31 +1571,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ LoadSmiLiteral(StoreDescriptor::NameRegister(),
Smi::FromInt(array_index));
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
- EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
-
- PrepareForBailoutForId(expr->GetIdForElement(array_index),
- BailoutState::NO_REGISTERS);
- }
-
- // In case the array literal contains spread expressions it has two parts. The
- // first part is the "static" array which has a literal index is handled
- // above. The second part is the part after the first spread expression
- // (inclusive) and these elements gets appended to the array. Note that the
- // number elements an iterable produces is unknown ahead of time.
- if (array_index < length && result_saved) {
- PopOperand(r3);
- result_saved = false;
- }
- for (; array_index < length; array_index++) {
- Expression* subexpr = subexprs->at(array_index);
-
- PushOperand(r3);
- DCHECK(!subexpr->IsSpread());
- VisitForStackValue(subexpr);
- CallRuntimeWithOperands(Runtime::kAppendElement);
+ CallKeyedStoreIC(expr->LiteralFeedbackSlot());
PrepareForBailoutForId(expr->GetIdForElement(array_index),
BailoutState::NO_REGISTERS);
@@ -1998,7 +1967,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
for (int i = 0; i < lit->properties()->length(); i++) {
- ObjectLiteral::Property* property = lit->properties()->at(i);
+ ClassLiteral::Property* property = lit->properties()->at(i);
Expression* value = property->value();
Register scratch = r4;
@@ -2025,26 +1994,23 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE();
- case ObjectLiteral::Property::COMPUTED:
+ case ClassLiteral::Property::METHOD:
PushOperand(Smi::FromInt(DONT_ENUM));
PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
break;
- case ObjectLiteral::Property::GETTER:
+ case ClassLiteral::Property::GETTER:
PushOperand(Smi::FromInt(DONT_ENUM));
CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
break;
- case ObjectLiteral::Property::SETTER:
+ case ClassLiteral::Property::SETTER:
PushOperand(Smi::FromInt(DONT_ENUM));
CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
break;
+ case ClassLiteral::Property::FIELD:
default:
UNREACHABLE();
}
@@ -2081,10 +2047,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
VisitForAccumulatorValue(prop->obj());
__ Move(StoreDescriptor::ReceiverRegister(), r3);
PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
- __ mov(StoreDescriptor::NameRegister(),
- Operand(prop->key()->AsLiteral()->value()));
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, prop->key()->AsLiteral()->value());
break;
}
case NAMED_SUPER_PROPERTY: {
@@ -2131,10 +2094,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Move(StoreDescriptor::NameRegister(), r3);
PopOperands(StoreDescriptor::ValueRegister(),
StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(slot);
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
+ CallKeyedStoreIC(slot);
break;
}
}
@@ -2159,10 +2119,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
FeedbackVectorSlot slot) {
if (var->IsUnallocated()) {
// Global var, const, or let.
- __ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
__ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, var->name());
} else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
DCHECK(!var->IsLookupSlot());
@@ -2179,10 +2137,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&assign);
}
- if (var->mode() == CONST) {
- __ CallRuntime(Runtime::kThrowConstAssignError);
- } else {
+ if (var->mode() != CONST) {
EmitStoreToStackLocalOrContextSlot(var, location);
+ } else if (var->throw_on_const_assignment(language_mode())) {
+ __ CallRuntime(Runtime::kThrowConstAssignError);
}
} else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
@@ -2198,7 +2156,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() || op == Token::INIT) {
+ } else {
+ DCHECK(var->mode() != CONST || op == Token::INIT);
if (var->IsLookupSlot()) {
// Assignment to var.
__ Push(var->name());
@@ -2219,12 +2178,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
- if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError);
- }
- // Silently ignore store in sloppy mode.
}
}
@@ -2235,11 +2188,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
DCHECK(prop != NULL);
DCHECK(prop->key()->IsLiteral());
- __ mov(StoreDescriptor::NameRegister(),
- Operand(prop->key()->AsLiteral()->value()));
PopOperand(StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
+ CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r3);
@@ -2281,10 +2231,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
StoreDescriptor::NameRegister());
DCHECK(StoreDescriptor::ValueRegister().is(r3));
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
+ CallKeyedStoreIC(expr->AssignmentSlot());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r3);
@@ -2838,24 +2785,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- Label done;
- StringCharFromCodeGenerator generator(r3, r4);
- generator.GenerateFast(masm_);
- __ b(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(r4);
-}
-
-
void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -3048,7 +2977,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// "delete this" is allowed.
bool is_this = var->is_this();
DCHECK(is_sloppy(language_mode()) || is_this);
- if (var->IsUnallocatedOrGlobalSlot()) {
+ if (var->IsUnallocated()) {
__ LoadGlobalObject(r5);
__ mov(r4, Operand(var->name()));
__ Push(r5, r4);
@@ -3328,11 +3257,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
case NAMED_PROPERTY: {
- __ mov(StoreDescriptor::NameRegister(),
- Operand(prop->key()->AsLiteral()->value()));
PopOperand(StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
+ CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3370,10 +3296,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
PopOperands(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
+ CallKeyedStoreIC(expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
diff --git a/deps/v8/src/full-codegen/s390/full-codegen-s390.cc b/deps/v8/src/full-codegen/s390/full-codegen-s390.cc
index bd1509b77d..dfe652755a 100644
--- a/deps/v8/src/full-codegen/s390/full-codegen-s390.cc
+++ b/deps/v8/src/full-codegen/s390/full-codegen-s390.cc
@@ -4,14 +4,16 @@
#if V8_TARGET_ARCH_S390
+#include "src/full-codegen/full-codegen.h"
+#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
#include "src/debug/debug.h"
-#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
-#include "src/parsing/parser.h"
#include "src/s390/code-stubs-s390.h"
#include "src/s390/macro-assembler-s390.h"
@@ -131,6 +133,21 @@ void FullCodeGenerator::Generate() {
info->set_prologue_offset(prologue_offset);
__ Prologue(info->GeneratePreagedPrologue(), ip, prologue_offset);
+ // Increment invocation count for the function.
+ {
+ Comment cmnt(masm_, "[ Increment invocation count");
+ __ LoadP(r6, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
+ __ LoadP(r6, FieldMemOperand(r6, LiteralsArray::kFeedbackVectorOffset));
+ __ LoadP(r1, FieldMemOperand(r6, TypeFeedbackVector::kInvocationCountIndex *
+ kPointerSize +
+ TypeFeedbackVector::kHeaderSize));
+ __ AddSmiLiteral(r1, r1, Smi::FromInt(1), r0);
+ __ StoreP(r1,
+ FieldMemOperand(
+ r6, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+ TypeFeedbackVector::kHeaderSize));
+ }
+
{
Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
@@ -178,14 +195,14 @@ void FullCodeGenerator::Generate() {
bool function_in_register_r3 = true;
// Possibly allocate a local context.
- if (info->scope()->num_heap_slots() > 0) {
+ if (info->scope()->NeedsContext()) {
// Argument to NewContext is the function, which is still in r3.
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (info->scope()->is_script_scope()) {
__ push(r3);
- __ Push(info->scope()->GetScopeInfo(info->isolate()));
+ __ Push(info->scope()->scope_info());
__ CallRuntime(Runtime::kNewScriptContext);
PrepareForBailoutForId(BailoutId::ScriptContext(),
BailoutState::TOS_REGISTER);
@@ -270,9 +287,8 @@ void FullCodeGenerator::Generate() {
}
// Possibly allocate RestParameters
- int rest_index;
- Variable* rest_param = info->scope()->rest_parameter(&rest_index);
- if (rest_param) {
+ Variable* rest_param = info->scope()->rest_parameter();
+ if (rest_param != nullptr) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
if (!function_in_register_r3) {
@@ -708,7 +724,6 @@ void FullCodeGenerator::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
@@ -758,7 +773,6 @@ void FullCodeGenerator::VisitFunctionDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
@@ -1072,6 +1086,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Generate code for the going to the next element by incrementing
// the index (smi) stored on top of the stack.
__ bind(loop_statement.continue_label());
+ PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
__ pop(r2);
__ AddSmiLiteral(r2, r2, Smi::FromInt(1), r0);
__ push(r2);
@@ -1093,12 +1108,9 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- __ mov(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
__ LoadP(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
@@ -1106,12 +1118,9 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Move(StoreDescriptor::ReceiverRegister(), r2);
- __ mov(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
__ LoadP(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
@@ -1149,7 +1158,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
Register temp = r6;
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
+ if (s->NeedsContext()) {
if (s->calls_sloppy_eval()) {
// Check that extension is "the hole".
__ LoadP(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
@@ -1197,18 +1206,6 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
}
}
-void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
- TypeofMode typeof_mode) {
-#ifdef DEBUG
- Variable* var = proxy->var();
- DCHECK(var->IsUnallocatedOrGlobalSlot() ||
- (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-#endif
- __ mov(LoadGlobalDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadGlobalIC(typeof_mode);
-}
-
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
@@ -1219,7 +1216,6 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// Three cases: global variables, lookup variables, and all other types of
// variables.
switch (var->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
EmitGlobalVariableLoad(proxy, typeof_mode);
@@ -1340,10 +1336,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
DCHECK(StoreDescriptor::ValueRegister().is(r2));
- __ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- EmitLoadStoreICSlot(property->GetSlot(0));
- CallStoreIC();
+ CallStoreIC(property->GetSlot(0), key->value());
PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1512,6 +1506,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
+ RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1521,8 +1516,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Emit code to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
- int array_index = 0;
- for (; array_index < length; array_index++) {
+ for (int array_index = 0; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
DCHECK(!subexpr->IsSpread());
// If the subexpression is a literal or a simple materialized literal it
@@ -1538,31 +1532,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ LoadSmiLiteral(StoreDescriptor::NameRegister(),
Smi::FromInt(array_index));
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
- EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
-
- PrepareForBailoutForId(expr->GetIdForElement(array_index),
- BailoutState::NO_REGISTERS);
- }
-
- // In case the array literal contains spread expressions it has two parts. The
- // first part is the "static" array which has a literal index is handled
- // above. The second part is the part after the first spread expression
- // (inclusive) and these elements gets appended to the array. Note that the
- // number elements an iterable produces is unknown ahead of time.
- if (array_index < length && result_saved) {
- PopOperand(r2);
- result_saved = false;
- }
- for (; array_index < length; array_index++) {
- Expression* subexpr = subexprs->at(array_index);
-
- PushOperand(r2);
- DCHECK(!subexpr->IsSpread());
- VisitForStackValue(subexpr);
- CallRuntimeWithOperands(Runtime::kAppendElement);
+ CallKeyedStoreIC(expr->LiteralFeedbackSlot());
PrepareForBailoutForId(expr->GetIdForElement(array_index),
BailoutState::NO_REGISTERS);
@@ -1956,7 +1926,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
for (int i = 0; i < lit->properties()->length(); i++) {
- ObjectLiteral::Property* property = lit->properties()->at(i);
+ ClassLiteral::Property* property = lit->properties()->at(i);
Expression* value = property->value();
Register scratch = r3;
@@ -1983,26 +1953,23 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE();
- case ObjectLiteral::Property::COMPUTED:
+ case ClassLiteral::Property::METHOD:
PushOperand(Smi::FromInt(DONT_ENUM));
PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
break;
- case ObjectLiteral::Property::GETTER:
+ case ClassLiteral::Property::GETTER:
PushOperand(Smi::FromInt(DONT_ENUM));
CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
break;
- case ObjectLiteral::Property::SETTER:
+ case ClassLiteral::Property::SETTER:
PushOperand(Smi::FromInt(DONT_ENUM));
CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
break;
+ case ClassLiteral::Property::FIELD:
default:
UNREACHABLE();
}
@@ -2037,10 +2004,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
VisitForAccumulatorValue(prop->obj());
__ Move(StoreDescriptor::ReceiverRegister(), r2);
PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
- __ mov(StoreDescriptor::NameRegister(),
- Operand(prop->key()->AsLiteral()->value()));
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, prop->key()->AsLiteral()->value());
break;
}
case NAMED_SUPER_PROPERTY: {
@@ -2087,10 +2051,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Move(StoreDescriptor::NameRegister(), r2);
PopOperands(StoreDescriptor::ValueRegister(),
StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(slot);
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
+ CallKeyedStoreIC(slot);
break;
}
}
@@ -2113,10 +2074,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
FeedbackVectorSlot slot) {
if (var->IsUnallocated()) {
// Global var, const, or let.
- __ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
__ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, var->name());
} else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
// Non-initializing assignment to let variable needs a write barrier.
@@ -2134,10 +2093,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&assign);
}
- if (var->mode() == CONST) {
- __ CallRuntime(Runtime::kThrowConstAssignError);
- } else {
+ if (var->mode() != CONST) {
EmitStoreToStackLocalOrContextSlot(var, location);
+ } else if (var->throw_on_const_assignment(language_mode())) {
+ __ CallRuntime(Runtime::kThrowConstAssignError);
}
} else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
@@ -2152,8 +2111,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
-
- } else if (!var->is_const_mode() || op == Token::INIT) {
+ } else {
+ DCHECK(var->mode() != CONST || op == Token::INIT);
if (var->IsLookupSlot()) {
// Assignment to var.
__ Push(var->name());
@@ -2174,12 +2133,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
- if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError);
- }
- // Silently ignore store in sloppy mode.
}
}
@@ -2189,11 +2142,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
DCHECK(prop != NULL);
DCHECK(prop->key()->IsLiteral());
- __ mov(StoreDescriptor::NameRegister(),
- Operand(prop->key()->AsLiteral()->value()));
PopOperand(StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
+ CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r2);
@@ -2232,10 +2182,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
StoreDescriptor::NameRegister());
DCHECK(StoreDescriptor::ValueRegister().is(r2));
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
+ CallKeyedStoreIC(expr->AssignmentSlot());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r2);
@@ -2770,23 +2717,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
context()->Plug(r2);
}
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- Label done;
- StringCharFromCodeGenerator generator(r2, r3);
- generator.GenerateFast(masm_);
- __ b(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(r3);
-}
-
void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -2969,7 +2899,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// "delete this" is allowed.
bool is_this = var->is_this();
DCHECK(is_sloppy(language_mode()) || is_this);
- if (var->IsUnallocatedOrGlobalSlot()) {
+ if (var->IsUnallocated()) {
__ LoadGlobalObject(r4);
__ mov(r3, Operand(var->name()));
__ Push(r4, r3);
@@ -3248,11 +3178,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
case NAMED_PROPERTY: {
- __ mov(StoreDescriptor::NameRegister(),
- Operand(prop->key()->AsLiteral()->value()));
PopOperand(StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
+ CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3290,10 +3217,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
PopOperands(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
+ CallKeyedStoreIC(expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
diff --git a/deps/v8/src/full-codegen/x64/full-codegen-x64.cc b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
index ce94a990d5..525319fe70 100644
--- a/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
+++ b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
@@ -4,14 +4,16 @@
#if V8_TARGET_ARCH_X64
+#include "src/full-codegen/full-codegen.h"
+#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
#include "src/debug/debug.h"
-#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
-#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
@@ -115,6 +117,18 @@ void FullCodeGenerator::Generate() {
info->set_prologue_offset(masm_->pc_offset());
__ Prologue(info->GeneratePreagedPrologue());
+ // Increment invocation count for the function.
+ {
+ Comment cmnt(masm_, "[ Increment invocation count");
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ movp(rcx, FieldOperand(rcx, LiteralsArray::kFeedbackVectorOffset));
+ __ SmiAddConstant(
+ FieldOperand(rcx,
+ TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+ TypeFeedbackVector::kHeaderSize),
+ Smi::FromInt(1));
+ }
+
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
@@ -158,14 +172,14 @@ void FullCodeGenerator::Generate() {
bool function_in_register = true;
// Possibly allocate a local context.
- if (info->scope()->num_heap_slots() > 0) {
+ if (info->scope()->NeedsContext()) {
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
// Argument to NewContext is the function, which is still in rdi.
if (info->scope()->is_script_scope()) {
__ Push(rdi);
- __ Push(info->scope()->GetScopeInfo(info->isolate()));
+ __ Push(info->scope()->scope_info());
__ CallRuntime(Runtime::kNewScriptContext);
PrepareForBailoutForId(BailoutId::ScriptContext(),
BailoutState::TOS_REGISTER);
@@ -249,9 +263,8 @@ void FullCodeGenerator::Generate() {
}
// Possibly allocate RestParameters
- int rest_index;
- Variable* rest_param = info->scope()->rest_parameter(&rest_index);
- if (rest_param) {
+ Variable* rest_param = info->scope()->rest_parameter();
+ if (rest_param != nullptr) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
if (!function_in_register) {
__ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
@@ -730,7 +743,6 @@ void FullCodeGenerator::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
@@ -780,7 +792,6 @@ void FullCodeGenerator::VisitFunctionDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
@@ -1092,6 +1103,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Generate code for going to the next element by incrementing the
// index (smi) stored on top of the stack.
__ bind(loop_statement.continue_label());
+ PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
__ SmiAddConstant(Operand(rsp, 0 * kPointerSize), Smi::FromInt(1));
EmitBackEdgeBookkeeping(stmt, &loop);
@@ -1112,12 +1124,9 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
- __ Move(StoreDescriptor::NameRegister(),
- isolate()->factory()->home_object_symbol());
__ movp(StoreDescriptor::ValueRegister(),
Operand(rsp, offset * kPointerSize));
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
@@ -1126,12 +1135,9 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ movp(StoreDescriptor::ReceiverRegister(), rax);
- __ Move(StoreDescriptor::NameRegister(),
- isolate()->factory()->home_object_symbol());
__ movp(StoreDescriptor::ValueRegister(),
Operand(rsp, offset * kPointerSize));
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
@@ -1169,7 +1175,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
Register temp = rbx;
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
+ if (s->NeedsContext()) {
if (s->calls_sloppy_eval()) {
// Check that extension is "the hole".
__ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
@@ -1217,20 +1223,6 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
}
}
-
-void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
- TypeofMode typeof_mode) {
-#ifdef DEBUG
- Variable* var = proxy->var();
- DCHECK(var->IsUnallocatedOrGlobalSlot() ||
- (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-#endif
- __ Move(LoadGlobalDescriptor::SlotRegister(),
- SmiFromSlot(proxy->VariableFeedbackSlot()));
- CallLoadGlobalIC(typeof_mode);
-}
-
-
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
@@ -1241,7 +1233,6 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// Three cases: global variables, lookup variables, and all other types of
// variables.
switch (var->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
EmitGlobalVariableLoad(proxy, typeof_mode);
@@ -1367,10 +1358,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
DCHECK(StoreDescriptor::ValueRegister().is(rax));
- __ Move(StoreDescriptor::NameRegister(), key->value());
__ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
- EmitLoadStoreICSlot(property->GetSlot(0));
- CallStoreIC();
+ CallStoreIC(property->GetSlot(0), key->value());
PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1533,6 +1522,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Move(rcx, constant_elements);
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
+ RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1542,8 +1532,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Emit code to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
- int array_index = 0;
- for (; array_index < length; array_index++) {
+ for (int array_index = 0; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
DCHECK(!subexpr->IsSpread());
@@ -1559,31 +1548,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Move(StoreDescriptor::NameRegister(), Smi::FromInt(array_index));
__ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
- EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
-
- PrepareForBailoutForId(expr->GetIdForElement(array_index),
- BailoutState::NO_REGISTERS);
- }
-
- // In case the array literal contains spread expressions it has two parts. The
- // first part is the "static" array which has a literal index is handled
- // above. The second part is the part after the first spread expression
- // (inclusive) and these elements gets appended to the array. Note that the
- // number elements an iterable produces is unknown ahead of time.
- if (array_index < length && result_saved) {
- PopOperand(rax);
- result_saved = false;
- }
- for (; array_index < length; array_index++) {
- Expression* subexpr = subexprs->at(array_index);
-
- PushOperand(rax);
- DCHECK(!subexpr->IsSpread());
- VisitForStackValue(subexpr);
- CallRuntimeWithOperands(Runtime::kAppendElement);
+ CallKeyedStoreIC(expr->LiteralFeedbackSlot());
PrepareForBailoutForId(expr->GetIdForElement(array_index),
BailoutState::NO_REGISTERS);
@@ -1893,7 +1858,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
for (int i = 0; i < lit->properties()->length(); i++) {
- ObjectLiteral::Property* property = lit->properties()->at(i);
+ ClassLiteral::Property* property = lit->properties()->at(i);
Expression* value = property->value();
if (property->is_static()) {
@@ -1918,26 +1883,23 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE();
- case ObjectLiteral::Property::COMPUTED:
+ case ClassLiteral::Property::METHOD:
PushOperand(Smi::FromInt(DONT_ENUM));
PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
break;
- case ObjectLiteral::Property::GETTER:
+ case ClassLiteral::Property::GETTER:
PushOperand(Smi::FromInt(DONT_ENUM));
CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
break;
- case ObjectLiteral::Property::SETTER:
+ case ClassLiteral::Property::SETTER:
PushOperand(Smi::FromInt(DONT_ENUM));
CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
break;
+ case ClassLiteral::Property::FIELD:
default:
UNREACHABLE();
}
@@ -1974,10 +1936,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
VisitForAccumulatorValue(prop->obj());
__ Move(StoreDescriptor::ReceiverRegister(), rax);
PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
- __ Move(StoreDescriptor::NameRegister(),
- prop->key()->AsLiteral()->value());
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, prop->key()->AsLiteral()->value());
break;
}
case NAMED_SUPER_PROPERTY: {
@@ -2024,10 +1983,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Move(StoreDescriptor::NameRegister(), rax);
PopOperand(StoreDescriptor::ReceiverRegister());
PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
- EmitLoadStoreICSlot(slot);
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
+ CallKeyedStoreIC(slot);
break;
}
}
@@ -2050,10 +2006,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
FeedbackVectorSlot slot) {
if (var->IsUnallocated()) {
// Global var, const, or let.
- __ Move(StoreDescriptor::NameRegister(), var->name());
__ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, var->name());
} else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
DCHECK(!var->IsLookupSlot());
@@ -2069,10 +2023,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&assign);
}
- if (var->mode() == CONST) {
- __ CallRuntime(Runtime::kThrowConstAssignError);
- } else {
+ if (var->mode() != CONST) {
EmitStoreToStackLocalOrContextSlot(var, location);
+ } else if (var->throw_on_const_assignment(language_mode())) {
+ __ CallRuntime(Runtime::kThrowConstAssignError);
}
} else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
@@ -2088,7 +2042,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() || op == Token::INIT) {
+ } else {
+ DCHECK(var->mode() != CONST || op == Token::INIT);
if (var->IsLookupSlot()) {
// Assignment to var.
__ Push(var->name());
@@ -2109,13 +2064,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
-
- } else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
- if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError);
- }
- // Silently ignore store in sloppy mode.
}
}
@@ -2126,10 +2074,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
DCHECK(prop != NULL);
DCHECK(prop->key()->IsLiteral());
- __ Move(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
PopOperand(StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
+ CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(rax);
@@ -2170,10 +2116,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
PopOperand(StoreDescriptor::NameRegister()); // Key.
PopOperand(StoreDescriptor::ReceiverRegister());
DCHECK(StoreDescriptor::ValueRegister().is(rax));
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
+ CallKeyedStoreIC(expr->AssignmentSlot());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(rax);
@@ -2716,25 +2659,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label done;
- StringCharFromCodeGenerator generator(rax, rbx);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(rbx);
-}
-
-
void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -2929,7 +2853,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// "delete this" is allowed.
bool is_this = var->is_this();
DCHECK(is_sloppy(language_mode()) || is_this);
- if (var->IsUnallocatedOrGlobalSlot()) {
+ if (var->IsUnallocated()) {
__ movp(rax, NativeContextOperand());
__ Push(ContextOperand(rax, Context::EXTENSION_INDEX));
__ Push(var->name());
@@ -3221,11 +3145,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
case NAMED_PROPERTY: {
- __ Move(StoreDescriptor::NameRegister(),
- prop->key()->AsLiteral()->value());
PopOperand(StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
+ CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3263,10 +3184,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
PopOperand(StoreDescriptor::NameRegister());
PopOperand(StoreDescriptor::ReceiverRegister());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
+ CallKeyedStoreIC(expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
diff --git a/deps/v8/src/full-codegen/x87/full-codegen-x87.cc b/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
index 28c8960c4b..47be8b0616 100644
--- a/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
+++ b/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
@@ -4,14 +4,16 @@
#if V8_TARGET_ARCH_X87
+#include "src/full-codegen/full-codegen.h"
+#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
#include "src/debug/debug.h"
-#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
-#include "src/parsing/parser.h"
#include "src/x87/frames-x87.h"
namespace v8 {
@@ -115,6 +117,17 @@ void FullCodeGenerator::Generate() {
info->set_prologue_offset(masm_->pc_offset());
__ Prologue(info->GeneratePreagedPrologue());
+ // Increment invocation count for the function.
+ {
+ Comment cmnt(masm_, "[ Increment invocation count");
+ __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
+ __ mov(ecx, FieldOperand(ecx, LiteralsArray::kFeedbackVectorOffset));
+ __ add(FieldOperand(
+ ecx, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
+ TypeFeedbackVector::kHeaderSize),
+ Immediate(Smi::FromInt(1)));
+ }
+
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
@@ -159,14 +172,14 @@ void FullCodeGenerator::Generate() {
bool function_in_register = true;
// Possibly allocate a local context.
- if (info->scope()->num_heap_slots() > 0) {
+ if (info->scope()->NeedsContext()) {
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
// Argument to NewContext is the function, which is still in edi.
if (info->scope()->is_script_scope()) {
__ push(edi);
- __ Push(info->scope()->GetScopeInfo(info->isolate()));
+ __ Push(info->scope()->scope_info());
__ CallRuntime(Runtime::kNewScriptContext);
PrepareForBailoutForId(BailoutId::ScriptContext(),
BailoutState::TOS_REGISTER);
@@ -251,9 +264,8 @@ void FullCodeGenerator::Generate() {
}
// Possibly allocate RestParameters
- int rest_index;
- Variable* rest_param = info->scope()->rest_parameter(&rest_index);
- if (rest_param) {
+ Variable* rest_param = info->scope()->rest_parameter();
+ if (rest_param != nullptr) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
if (!function_in_register) {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
@@ -714,7 +726,6 @@ void FullCodeGenerator::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
@@ -763,7 +774,6 @@ void FullCodeGenerator::VisitFunctionDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
@@ -1058,6 +1068,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Generate code for going to the next element by incrementing the
// index (smi) stored on top of the stack.
__ bind(loop_statement.continue_label());
+ PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
__ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
EmitBackEdgeBookkeeping(stmt, &loop);
@@ -1078,11 +1089,8 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
- __ mov(StoreDescriptor::NameRegister(),
- Immediate(isolate()->factory()->home_object_symbol()));
__ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
@@ -1091,11 +1099,8 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ mov(StoreDescriptor::ReceiverRegister(), eax);
- __ mov(StoreDescriptor::NameRegister(),
- Immediate(isolate()->factory()->home_object_symbol()));
__ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
@@ -1133,7 +1138,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
Register temp = ebx;
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
+ if (s->NeedsContext()) {
if (s->calls_sloppy_eval()) {
// Check that extension is "the hole".
__ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
@@ -1181,20 +1186,6 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
}
}
-
-void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
- TypeofMode typeof_mode) {
-#ifdef DEBUG
- Variable* var = proxy->var();
- DCHECK(var->IsUnallocatedOrGlobalSlot() ||
- (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-#endif
- __ mov(LoadGlobalDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadGlobalIC(typeof_mode);
-}
-
-
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
SetExpressionPosition(proxy);
@@ -1204,7 +1195,6 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// Three cases: global variables, lookup variables, and all other types of
// variables.
switch (var->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
EmitGlobalVariableLoad(proxy, typeof_mode);
@@ -1331,10 +1321,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
DCHECK(StoreDescriptor::ValueRegister().is(eax));
- __ mov(StoreDescriptor::NameRegister(), Immediate(key->value()));
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
- EmitLoadStoreICSlot(property->GetSlot(0));
- CallStoreIC();
+ CallStoreIC(property->GetSlot(0), key->value());
PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1498,6 +1486,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ mov(ecx, Immediate(constant_elements));
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
+ RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1507,8 +1496,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Emit code to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
- int array_index = 0;
- for (; array_index < length; array_index++) {
+ for (int array_index = 0; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
DCHECK(!subexpr->IsSpread());
@@ -1525,31 +1513,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ mov(StoreDescriptor::NameRegister(),
Immediate(Smi::FromInt(array_index)));
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
- EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
- PrepareForBailoutForId(expr->GetIdForElement(array_index),
- BailoutState::NO_REGISTERS);
- }
-
- // In case the array literal contains spread expressions it has two parts. The
- // first part is the "static" array which has a literal index is handled
- // above. The second part is the part after the first spread expression
- // (inclusive) and these elements gets appended to the array. Note that the
- // number elements an iterable produces is unknown ahead of time.
- if (array_index < length && result_saved) {
- PopOperand(eax);
- result_saved = false;
- }
- for (; array_index < length; array_index++) {
- Expression* subexpr = subexprs->at(array_index);
-
- PushOperand(eax);
- DCHECK(!subexpr->IsSpread());
- VisitForStackValue(subexpr);
- CallRuntimeWithOperands(Runtime::kAppendElement);
-
+ CallKeyedStoreIC(expr->LiteralFeedbackSlot());
PrepareForBailoutForId(expr->GetIdForElement(array_index),
BailoutState::NO_REGISTERS);
}
@@ -1894,7 +1858,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
for (int i = 0; i < lit->properties()->length(); i++) {
- ObjectLiteral::Property* property = lit->properties()->at(i);
+ ClassLiteral::Property* property = lit->properties()->at(i);
Expression* value = property->value();
if (property->is_static()) {
@@ -1919,25 +1883,25 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE();
- case ObjectLiteral::Property::COMPUTED:
+ case ClassLiteral::Property::METHOD:
PushOperand(Smi::FromInt(DONT_ENUM));
PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
break;
- case ObjectLiteral::Property::GETTER:
+ case ClassLiteral::Property::GETTER:
PushOperand(Smi::FromInt(DONT_ENUM));
CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
break;
- case ObjectLiteral::Property::SETTER:
+ case ClassLiteral::Property::SETTER:
PushOperand(Smi::FromInt(DONT_ENUM));
CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
break;
+
+ case ClassLiteral::Property::FIELD:
+ UNREACHABLE();
+ break;
}
}
}
@@ -1972,10 +1936,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
VisitForAccumulatorValue(prop->obj());
__ Move(StoreDescriptor::ReceiverRegister(), eax);
PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
- __ mov(StoreDescriptor::NameRegister(),
- prop->key()->AsLiteral()->value());
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, prop->key()->AsLiteral()->value());
break;
}
case NAMED_SUPER_PROPERTY: {
@@ -2022,10 +1983,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Move(StoreDescriptor::NameRegister(), eax);
PopOperand(StoreDescriptor::ReceiverRegister()); // Receiver.
PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
- EmitLoadStoreICSlot(slot);
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
+ CallKeyedStoreIC(slot);
break;
}
}
@@ -2048,13 +2006,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
FeedbackVectorSlot slot) {
if (var->IsUnallocated()) {
// Global var, const, or let.
- __ mov(StoreDescriptor::NameRegister(), var->name());
__ mov(StoreDescriptor::ReceiverRegister(), NativeContextOperand());
__ mov(StoreDescriptor::ReceiverRegister(),
ContextOperand(StoreDescriptor::ReceiverRegister(),
Context::EXTENSION_INDEX));
- EmitLoadStoreICSlot(slot);
- CallStoreIC();
+ CallStoreIC(slot, var->name());
} else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
DCHECK(!var->IsLookupSlot());
@@ -2070,10 +2026,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&assign);
}
- if (var->mode() == CONST) {
- __ CallRuntime(Runtime::kThrowConstAssignError);
- } else {
+ if (var->mode() != CONST) {
EmitStoreToStackLocalOrContextSlot(var, location);
+ } else if (var->throw_on_const_assignment(language_mode())) {
+ __ CallRuntime(Runtime::kThrowConstAssignError);
}
} else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
@@ -2088,7 +2044,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() || op == Token::INIT) {
+ } else {
+ DCHECK(var->mode() != CONST || op == Token::INIT);
if (var->IsLookupSlot()) {
// Assignment to var.
__ Push(Immediate(var->name()));
@@ -2109,13 +2066,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
-
- } else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
- if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError);
- }
- // Silently ignore store in sloppy mode.
}
}
@@ -2128,10 +2078,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
DCHECK(prop != NULL);
DCHECK(prop->key()->IsLiteral());
- __ mov(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
PopOperand(StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
+ CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(eax);
}
@@ -2174,10 +2122,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
PopOperand(StoreDescriptor::NameRegister()); // Key.
PopOperand(StoreDescriptor::ReceiverRegister());
DCHECK(StoreDescriptor::ValueRegister().is(eax));
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
+ CallKeyedStoreIC(expr->AssignmentSlot());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(eax);
}
@@ -2715,25 +2660,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label done;
- StringCharFromCodeGenerator generator(eax, ebx);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(ebx);
-}
-
-
void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -2928,7 +2854,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// "delete this" is allowed.
bool is_this = var->is_this();
DCHECK(is_sloppy(language_mode()) || is_this);
- if (var->IsUnallocatedOrGlobalSlot()) {
+ if (var->IsUnallocated()) {
__ mov(eax, NativeContextOperand());
__ push(ContextOperand(eax, Context::EXTENSION_INDEX));
__ push(Immediate(var->name()));
@@ -3222,11 +3148,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
case NAMED_PROPERTY: {
- __ mov(StoreDescriptor::NameRegister(),
- prop->key()->AsLiteral()->value());
PopOperand(StoreDescriptor::ReceiverRegister());
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
+ CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3264,10 +3187,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
PopOperand(StoreDescriptor::NameRegister());
PopOperand(StoreDescriptor::ReceiverRegister());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
+ CallKeyedStoreIC(expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
// Result is on the stack
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index a3af1846db..4e73981593 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -9,7 +9,6 @@
#include "src/base/bits.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
-#include "src/compiler.h"
#include "src/frames-inl.h"
#include "src/frames.h"
#include "src/global-handles.h"
@@ -2017,7 +2016,7 @@ static uint32_t HashCodeAddress(Address addr) {
static base::HashMap* GetLineMap() {
static base::HashMap* line_map = NULL;
if (line_map == NULL) {
- line_map = new base::HashMap(&base::HashMap::PointersMatch);
+ line_map = new base::HashMap();
}
return line_map;
}
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 0d02f77fd6..03c5b1dc1a 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -14,6 +14,32 @@
#include "src/base/logging.h"
#include "src/base/macros.h"
+#ifdef V8_OS_WIN
+
+// Setup for Windows shared library export.
+#ifdef BUILDING_V8_SHARED
+#define V8_EXPORT_PRIVATE __declspec(dllexport)
+#elif USING_V8_SHARED
+#define V8_EXPORT_PRIVATE __declspec(dllimport)
+#else
+#define V8_EXPORT_PRIVATE
+#endif // BUILDING_V8_SHARED
+
+#else // V8_OS_WIN
+
+// Setup for Linux shared library export.
+#if V8_HAS_ATTRIBUTE_VISIBILITY
+#ifdef BUILDING_V8_SHARED
+#define V8_EXPORT_PRIVATE __attribute__((visibility("default")))
+#else
+#define V8_EXPORT_PRIVATE
+#endif
+#else
+#define V8_EXPORT_PRIVATE
+#endif
+
+#endif // V8_OS_WIN
+
// Unfortunately, the INFINITY macro cannot be used with the '-pedantic'
// warning flag and certain versions of GCC due to a bug:
// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11931
@@ -161,10 +187,6 @@ const size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
#if V8_OS_WIN
const size_t kMinimumCodeRangeSize = 4 * MB;
const size_t kReservedCodeRangePages = 1;
-// On PPC Linux PageSize is 4MB
-#elif V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
-const size_t kMinimumCodeRangeSize = 12 * MB;
-const size_t kReservedCodeRangePages = 0;
#else
const size_t kMinimumCodeRangeSize = 3 * MB;
const size_t kReservedCodeRangePages = 0;
@@ -193,9 +215,17 @@ const size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
const size_t kReservedCodeRangePages = 0;
#endif
-// The external allocation limit should be below 256 MB on all architectures
-// to avoid that resource-constrained embedders run low on memory.
-const int kExternalAllocationLimit = 192 * 1024 * 1024;
+// Trigger an incremental GCs once the external memory reaches this limit.
+const int kExternalAllocationSoftLimit = 64 * MB;
+
+// Maximum object size that gets allocated into regular pages. Objects larger
+// than that size are allocated in large object space and are never moved in
+// memory. This also applies to new space allocation, since objects are never
+// migrated from new space to large object space. Takes double alignment into
+// account.
+//
+// Current value: Page::kAllocatableMemory (on 32-bit arch) - 512 (slack).
+const int kMaxRegularHeapObjectSize = 507136;
STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
@@ -722,6 +752,7 @@ struct AccessorDescriptor {
enum CpuFeature {
// x86
SSE4_1,
+ SSSE3,
SSE3,
SAHF,
AVX,
@@ -732,13 +763,10 @@ enum CpuFeature {
POPCNT,
ATOM,
// ARM
- VFP3,
- ARMv7,
- ARMv8,
- SUDIV,
- MOVW_MOVT_IMMEDIATE_LOADS,
- VFP32DREGS,
- NEON,
+ // - Standard configurations. The baseline is ARMv6+VFPv2.
+ ARMv7, // ARMv7-A + VFPv3-D32 + NEON
+ ARMv7_SUDIV, // ARMv7-A + VFPv4-D32 + NEON + SUDIV
+ ARMv8, // ARMv8-A (+ all of the above)
// MIPS, MIPS64
FPU,
FP64FPU,
@@ -755,10 +783,14 @@ enum CpuFeature {
DISTINCT_OPS,
GENERAL_INSTR_EXT,
FLOATING_POINT_EXT,
- // PPC/S390
- UNALIGNED_ACCESSES,
- NUMBER_OF_CPU_FEATURES
+ NUMBER_OF_CPU_FEATURES,
+
+ // ARM feature aliases (based on the standard configurations above).
+ VFPv3 = ARMv7,
+ NEON = ARMv7,
+ VFP32DREGS = ARMv7,
+ SUDIV = ARMv7_SUDIV
};
// Defines hints about receiver values based on structural knowledge.
@@ -840,8 +872,7 @@ enum SmiCheckType {
DO_SMI_CHECK
};
-
-enum ScopeType {
+enum ScopeType : uint8_t {
EVAL_SCOPE, // The top-level scope for an eval source.
FUNCTION_SCOPE, // The top-level scope for a function.
MODULE_SCOPE, // The scope introduced by a module literal
@@ -878,12 +909,10 @@ const double kMaxSafeInteger = 9007199254740991.0; // 2^53-1
// The order of this enum has to be kept in sync with the predicates below.
-enum VariableMode {
+enum VariableMode : uint8_t {
// User declared variables:
VAR, // declared via 'var', and 'function' declarations
- CONST_LEGACY, // declared via legacy 'const' declarations
-
LET, // declared via 'let' declarations (first lexical)
CONST, // declared via 'const' declarations (last lexical)
@@ -899,10 +928,44 @@ enum VariableMode {
// variable is global unless it has been shadowed
// by an eval-introduced variable
- DYNAMIC_LOCAL // requires dynamic lookup, but we know that the
- // variable is local and where it is unless it
- // has been shadowed by an eval-introduced
- // variable
+ DYNAMIC_LOCAL, // requires dynamic lookup, but we know that the
+ // variable is local and where it is unless it
+ // has been shadowed by an eval-introduced
+ // variable
+
+ kLastVariableMode = DYNAMIC_LOCAL
+};
+
+// Printing support
+#ifdef DEBUG
+inline const char* VariableMode2String(VariableMode mode) {
+ switch (mode) {
+ case VAR:
+ return "VAR";
+ case LET:
+ return "LET";
+ case CONST:
+ return "CONST";
+ case DYNAMIC:
+ return "DYNAMIC";
+ case DYNAMIC_GLOBAL:
+ return "DYNAMIC_GLOBAL";
+ case DYNAMIC_LOCAL:
+ return "DYNAMIC_LOCAL";
+ case TEMPORARY:
+ return "TEMPORARY";
+ }
+ UNREACHABLE();
+ return NULL;
+}
+#endif
+
+enum VariableKind : uint8_t {
+ NORMAL_VARIABLE,
+ FUNCTION_VARIABLE,
+ THIS_VARIABLE,
+ SLOPPY_FUNCTION_NAME_VARIABLE,
+ kLastKind = SLOPPY_FUNCTION_NAME_VARIABLE
};
inline bool IsDynamicVariableMode(VariableMode mode) {
@@ -911,7 +974,8 @@ inline bool IsDynamicVariableMode(VariableMode mode) {
inline bool IsDeclaredVariableMode(VariableMode mode) {
- return mode >= VAR && mode <= CONST;
+ STATIC_ASSERT(VAR == 0); // Implies that mode >= VAR.
+ return mode <= CONST;
}
@@ -919,12 +983,7 @@ inline bool IsLexicalVariableMode(VariableMode mode) {
return mode >= LET && mode <= CONST;
}
-
-inline bool IsImmutableVariableMode(VariableMode mode) {
- return mode == CONST || mode == CONST_LEGACY;
-}
-
-enum class VariableLocation {
+enum VariableLocation : uint8_t {
// Before and during variable allocation, a variable whose location is
// not yet determined. After allocation, a variable looked up as a
// property on the global object (and possibly absent). name() is the
@@ -945,19 +1004,15 @@ enum class VariableLocation {
// corresponding scope.
CONTEXT,
- // An indexed slot in a script context that contains a respective global
- // property cell. name() is the variable name, index() is the variable
- // index in the context object on the heap, starting at 0. scope() is the
- // corresponding script scope.
- GLOBAL,
-
// A named slot in a heap context. name() is the variable name in the
// context object on the heap, with lookup starting at the current
// context. index() is invalid.
LOOKUP,
// A named slot in a module's export table.
- MODULE
+ MODULE,
+
+ kLastVariableLocation = MODULE
};
// ES6 Draft Rev3 10.2 specifies declarative environment records with mutable
@@ -991,14 +1046,9 @@ enum class VariableLocation {
// The following enum specifies a flag that indicates if the binding needs a
// distinct initialization step (kNeedsInitialization) or if the binding is
// immediately initialized upon creation (kCreatedInitialized).
-enum InitializationFlag {
- kNeedsInitialization,
- kCreatedInitialized
-};
-
-
-enum MaybeAssignedFlag { kNotAssigned, kMaybeAssigned };
+enum InitializationFlag : uint8_t { kNeedsInitialization, kCreatedInitialized };
+enum MaybeAssignedFlag : uint8_t { kNotAssigned, kMaybeAssigned };
// Serialized in PreparseData, so numeric values should not be changed.
enum ParseErrorType { kSyntaxError = 0, kReferenceError = 1 };
@@ -1024,6 +1074,7 @@ enum FunctionKind : uint16_t {
kGetterFunction = 1 << 6,
kSetterFunction = 1 << 7,
kAsyncFunction = 1 << 8,
+ kModule = 1 << 9,
kAccessorFunction = kGetterFunction | kSetterFunction,
kDefaultBaseConstructor = kDefaultConstructor | kBaseConstructor,
kDefaultSubclassConstructor = kDefaultConstructor | kSubclassConstructor,
@@ -1037,6 +1088,7 @@ inline bool IsValidFunctionKind(FunctionKind kind) {
return kind == FunctionKind::kNormalFunction ||
kind == FunctionKind::kArrowFunction ||
kind == FunctionKind::kGeneratorFunction ||
+ kind == FunctionKind::kModule ||
kind == FunctionKind::kConciseMethod ||
kind == FunctionKind::kConciseGeneratorMethod ||
kind == FunctionKind::kGetterFunction ||
@@ -1063,13 +1115,18 @@ inline bool IsGeneratorFunction(FunctionKind kind) {
return kind & FunctionKind::kGeneratorFunction;
}
+inline bool IsModule(FunctionKind kind) {
+ DCHECK(IsValidFunctionKind(kind));
+ return kind & FunctionKind::kModule;
+}
+
inline bool IsAsyncFunction(FunctionKind kind) {
DCHECK(IsValidFunctionKind(kind));
return kind & FunctionKind::kAsyncFunction;
}
inline bool IsResumableFunction(FunctionKind kind) {
- return IsGeneratorFunction(kind) || IsAsyncFunction(kind);
+ return IsGeneratorFunction(kind) || IsAsyncFunction(kind) || IsModule(kind);
}
inline bool IsConciseMethod(FunctionKind kind) {
@@ -1152,11 +1209,59 @@ inline uint32_t ObjectHash(Address address) {
// at different points by performing an 'OR' operation. Type feedback moves
// to a more generic type when we combine feedback.
// kSignedSmall -> kNumber -> kAny
+// kString -> kAny
class BinaryOperationFeedback {
public:
+ enum {
+ kNone = 0x0,
+ kSignedSmall = 0x1,
+ kNumber = 0x3,
+ kString = 0x4,
+ kAny = 0xF
+ };
+};
+
+// TODO(epertoso): consider unifying this with BinaryOperationFeedback.
+class CompareOperationFeedback {
+ public:
enum { kNone = 0x00, kSignedSmall = 0x01, kNumber = 0x3, kAny = 0x7 };
};
+// Describes how exactly a frame has been dropped from stack.
+enum LiveEditFrameDropMode {
+ // No frame has been dropped.
+ LIVE_EDIT_FRAMES_UNTOUCHED,
+ // The top JS frame had been calling debug break slot stub. Patch the
+ // address this stub jumps to in the end.
+ LIVE_EDIT_FRAME_DROPPED_IN_DEBUG_SLOT_CALL,
+ // The top JS frame had been calling some C++ function. The return address
+ // gets patched automatically.
+ LIVE_EDIT_FRAME_DROPPED_IN_DIRECT_CALL,
+ LIVE_EDIT_FRAME_DROPPED_IN_RETURN_CALL,
+ LIVE_EDIT_CURRENTLY_SET_MODE
+};
+
+enum class UnicodeEncoding : uint8_t {
+ // Different unicode encodings in a |word32|:
+ UTF16, // hi 16bits -> trailing surrogate or 0, low 16bits -> lead surrogate
+ UTF32, // full UTF32 code unit / Unicode codepoint
+};
+
+inline size_t hash_value(UnicodeEncoding encoding) {
+ return static_cast<uint8_t>(encoding);
+}
+
+inline std::ostream& operator<<(std::ostream& os, UnicodeEncoding encoding) {
+ switch (encoding) {
+ case UnicodeEncoding::UTF16:
+ return os << "UTF16";
+ case UnicodeEncoding::UTF32:
+ return os << "UTF32";
+ }
+ UNREACHABLE();
+ return os;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index a7cd0e2497..3587d853c6 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -10,7 +10,7 @@
#include "src/base/macros.h"
#include "src/checks.h"
#include "src/globals.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
@@ -63,10 +63,12 @@ class HandleBase {
enum DereferenceCheckMode { INCLUDE_DEFERRED_CHECK, NO_DEFERRED_CHECK };
#ifdef DEBUG
- bool IsDereferenceAllowed(DereferenceCheckMode mode) const;
+ bool V8_EXPORT_PRIVATE IsDereferenceAllowed(DereferenceCheckMode mode) const;
#else
V8_INLINE
- bool IsDereferenceAllowed(DereferenceCheckMode mode) const { return true; }
+ bool V8_EXPORT_PRIVATE IsDereferenceAllowed(DereferenceCheckMode mode) const {
+ return true;
+ }
#endif // DEBUG
Object** location_;
@@ -206,6 +208,10 @@ class MaybeHandle final {
USE(a);
}
+ template <typename S>
+ V8_INLINE MaybeHandle(S* object, Isolate* isolate)
+ : MaybeHandle(handle(object, isolate)) {}
+
V8_INLINE void Assert() const { DCHECK_NOT_NULL(location_); }
V8_INLINE void Check() const { CHECK_NOT_NULL(location_); }
@@ -262,7 +268,7 @@ class HandleScope {
inline ~HandleScope();
// Counts the number of allocated handles.
- static int NumberOfHandles(Isolate* isolate);
+ V8_EXPORT_PRIVATE static int NumberOfHandles(Isolate* isolate);
// Create a new handle or lookup a canonical handle.
V8_INLINE static Object** GetHandle(Isolate* isolate, Object* value);
@@ -271,7 +277,7 @@ class HandleScope {
V8_INLINE static Object** CreateHandle(Isolate* isolate, Object* value);
// Deallocates any extensions used by the current scope.
- static void DeleteExtensions(Isolate* isolate);
+ V8_EXPORT_PRIVATE static void DeleteExtensions(Isolate* isolate);
static Address current_next_address(Isolate* isolate);
static Address current_limit_address(Isolate* isolate);
@@ -293,8 +299,6 @@ class HandleScope {
private:
// Prevent heap allocation or illegal handle scopes.
- HandleScope(const HandleScope&);
- void operator=(const HandleScope&);
void* operator new(size_t size);
void operator delete(void* size_t);
@@ -308,11 +312,11 @@ class HandleScope {
Object** prev_limit);
// Extend the handle scope making room for more handles.
- static Object** Extend(Isolate* isolate);
+ V8_EXPORT_PRIVATE static Object** Extend(Isolate* isolate);
#ifdef ENABLE_HANDLE_ZAPPING
// Zaps the handles in the half-open interval [start, end).
- static void ZapRange(Object** start, Object** end);
+ V8_EXPORT_PRIVATE static void ZapRange(Object** start, Object** end);
#endif
friend class v8::HandleScope;
@@ -320,6 +324,8 @@ class HandleScope {
friend class DeferredHandleScope;
friend class HandleScopeImplementer;
friend class Isolate;
+
+ DISALLOW_COPY_AND_ASSIGN(HandleScope);
};
@@ -340,7 +346,7 @@ class CanonicalHandleScope final {
~CanonicalHandleScope();
private:
- Object** Lookup(Object* object);
+ V8_EXPORT_PRIVATE Object** Lookup(Object* object);
Isolate* isolate_;
Zone zone_;
diff --git a/deps/v8/src/heap-symbols.h b/deps/v8/src/heap-symbols.h
index d83f63fdbe..c7b3370dce 100644
--- a/deps/v8/src/heap-symbols.h
+++ b/deps/v8/src/heap-symbols.h
@@ -8,11 +8,11 @@
#define INTERNALIZED_STRING_LIST(V) \
V(anonymous_string, "anonymous") \
V(apply_string, "apply") \
- V(assign_string, "assign") \
V(arguments_string, "arguments") \
V(Arguments_string, "Arguments") \
- V(Array_string, "Array") \
V(arguments_to_string, "[object Arguments]") \
+ V(Array_string, "Array") \
+ V(assign_string, "assign") \
V(array_to_string, "[object Array]") \
V(boolean_to_string, "[object Boolean]") \
V(date_to_string, "[object Date]") \
@@ -48,6 +48,8 @@
V(construct_string, "construct") \
V(create_string, "create") \
V(Date_string, "Date") \
+ V(dayperiod_string, "dayperiod") \
+ V(day_string, "day") \
V(default_string, "default") \
V(defineProperty_string, "defineProperty") \
V(deleteProperty_string, "deleteProperty") \
@@ -57,10 +59,12 @@
V(dot_string, ".") \
V(entries_string, "entries") \
V(enumerable_string, "enumerable") \
+ V(era_string, "era") \
V(Error_string, "Error") \
V(eval_string, "eval") \
V(EvalError_string, "EvalError") \
V(false_string, "false") \
+ V(flags_string, "flags") \
V(float32x4_string, "float32x4") \
V(Float32x4_string, "Float32x4") \
V(for_api_string, "for_api") \
@@ -74,6 +78,8 @@
V(get_string, "get") \
V(global_string, "global") \
V(has_string, "has") \
+ V(hour_string, "hour") \
+ V(ignoreCase_string, "ignoreCase") \
V(illegal_access_string, "illegal access") \
V(illegal_argument_string, "illegal argument") \
V(index_string, "index") \
@@ -92,10 +98,14 @@
V(last_index_string, "lastIndex") \
V(length_string, "length") \
V(line_string, "line") \
+ V(literal_string, "literal") \
V(Map_string, "Map") \
V(message_string, "message") \
V(minus_infinity_string, "-Infinity") \
V(minus_zero_string, "-0") \
+ V(minute_string, "minute") \
+ V(month_string, "month") \
+ V(multiline_string, "multiline") \
V(name_string, "name") \
V(nan_string, "NaN") \
V(next_string, "next") \
@@ -120,6 +130,7 @@
V(ReferenceError_string, "ReferenceError") \
V(RegExp_string, "RegExp") \
V(script_string, "script") \
+ V(second_string, "second") \
V(setPrototypeOf_string, "setPrototypeOf") \
V(set_string, "set") \
V(Set_string, "Set") \
@@ -128,6 +139,7 @@
V(sourceText_string, "sourceText") \
V(source_url_string, "source_url") \
V(stack_string, "stack") \
+ V(stackTraceLimit_string, "stackTraceLimit") \
V(strict_compare_ic_string, "===") \
V(string_string, "string") \
V(String_string, "String") \
@@ -137,10 +149,12 @@
V(this_string, "this") \
V(throw_string, "throw") \
V(timed_out, "timed-out") \
+ V(timeZoneName_string, "timeZoneName") \
V(toJSON_string, "toJSON") \
V(toString_string, "toString") \
V(true_string, "true") \
V(TypeError_string, "TypeError") \
+ V(type_string, "type") \
V(uint16x8_string, "uint16x8") \
V(Uint16x8_string, "Uint16x8") \
V(uint32x4_string, "uint32x4") \
@@ -155,19 +169,16 @@
V(value_string, "value") \
V(WeakMap_string, "WeakMap") \
V(WeakSet_string, "WeakSet") \
- V(writable_string, "writable")
+ V(weekday_string, "weekday") \
+ V(writable_string, "writable") \
+ V(year_string, "year")
#define PRIVATE_SYMBOL_LIST(V) \
V(array_iteration_kind_symbol) \
V(array_iterator_next_symbol) \
V(array_iterator_object_symbol) \
- V(call_site_constructor_symbol) \
- V(call_site_function_symbol) \
- V(call_site_position_symbol) \
- V(call_site_receiver_symbol) \
- V(call_site_strict_symbol) \
- V(call_site_wasm_obj_symbol) \
- V(call_site_wasm_func_index_symbol) \
+ V(call_site_frame_array_symbol) \
+ V(call_site_frame_index_symbol) \
V(class_end_position_symbol) \
V(class_start_position_symbol) \
V(detailed_stack_trace_symbol) \
@@ -189,10 +200,13 @@
V(normal_ic_symbol) \
V(not_mapped_symbol) \
V(premonomorphic_symbol) \
- V(promise_combined_deferred_symbol) \
+ V(promise_async_stack_id_symbol) \
V(promise_debug_marker_symbol) \
V(promise_deferred_reactions_symbol) \
+ V(promise_forwarding_handler_symbol) \
V(promise_fulfill_reactions_symbol) \
+ V(promise_handled_by_symbol) \
+ V(promise_handled_hint_symbol) \
V(promise_has_handler_symbol) \
V(promise_raw_symbol) \
V(promise_reject_reactions_symbol) \
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 695a259884..8049ce498b 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -23,11 +23,16 @@ static intptr_t CountTotalHolesSize(Heap* heap) {
GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope)
: tracer_(tracer), scope_(scope) {
+ // All accesses to incremental_marking_scope assume that incremental marking
+ // scopes come first.
+ STATIC_ASSERT(FIRST_INCREMENTAL_SCOPE == 0);
start_time_ = tracer_->heap_->MonotonicallyIncreasingTimeInMs();
// TODO(cbruni): remove once we fully moved to a trace-based system.
- if (FLAG_runtime_call_stats) {
- RuntimeCallStats::Enter(tracer_->heap_->isolate(), &timer_,
- &RuntimeCallStats::GC);
+ if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
+ FLAG_runtime_call_stats) {
+ RuntimeCallStats::Enter(
+ tracer_->heap_->isolate()->counters()->runtime_call_stats(), &timer_,
+ &RuntimeCallStats::GC);
}
}
@@ -35,8 +40,10 @@ GCTracer::Scope::~Scope() {
tracer_->AddScopeSample(
scope_, tracer_->heap_->MonotonicallyIncreasingTimeInMs() - start_time_);
// TODO(cbruni): remove once we fully moved to a trace-based system.
- if (FLAG_runtime_call_stats) {
- RuntimeCallStats::Leave(tracer_->heap_->isolate(), &timer_);
+ if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
+ FLAG_runtime_call_stats) {
+ RuntimeCallStats::Leave(
+ tracer_->heap_->isolate()->counters()->runtime_call_stats(), &timer_);
}
}
@@ -53,7 +60,7 @@ const char* GCTracer::Scope::Name(ScopeId id) {
return "(unknown)";
}
-GCTracer::Event::Event(Type type, const char* gc_reason,
+GCTracer::Event::Event(Type type, GarbageCollectionReason gc_reason,
const char* collector_reason)
: type(type),
gc_reason(gc_reason),
@@ -69,10 +76,8 @@ GCTracer::Event::Event(Type type, const char* gc_reason,
end_holes_size(0),
new_space_object_size(0),
survived_new_space_object_size(0),
- cumulative_incremental_marking_bytes(0),
incremental_marking_bytes(0),
- cumulative_pure_incremental_marking_duration(0.0),
- pure_incremental_marking_duration(0.0) {
+ incremental_marking_duration(0.0) {
for (int i = 0; i < Scope::NUMBER_OF_SCOPES; i++) {
scopes[i] = 0;
}
@@ -106,14 +111,11 @@ const char* GCTracer::Event::TypeName(bool short_name) const {
GCTracer::GCTracer(Heap* heap)
: heap_(heap),
- current_(Event::START, nullptr, nullptr),
+ current_(Event::START, GarbageCollectionReason::kUnknown, nullptr),
previous_(current_),
- previous_incremental_mark_compactor_event_(current_),
- cumulative_incremental_marking_bytes_(0),
- cumulative_incremental_marking_duration_(0.0),
- cumulative_pure_incremental_marking_duration_(0.0),
- cumulative_marking_duration_(0.0),
- cumulative_sweeping_duration_(0.0),
+ incremental_marking_bytes_(0),
+ incremental_marking_duration_(0.0),
+ recorded_incremental_marking_speed_(0.0),
allocation_time_ms_(0.0),
new_space_allocation_counter_bytes_(0),
old_generation_allocation_counter_bytes_(0),
@@ -126,19 +128,10 @@ GCTracer::GCTracer(Heap* heap)
}
void GCTracer::ResetForTesting() {
- current_ = Event(Event::START, NULL, NULL);
+ current_ = Event(Event::START, GarbageCollectionReason::kTesting, nullptr);
current_.end_time = heap_->MonotonicallyIncreasingTimeInMs();
- previous_ = previous_incremental_mark_compactor_event_ = current_;
- cumulative_incremental_marking_bytes_ = 0.0;
- cumulative_incremental_marking_duration_ = 0.0;
- cumulative_pure_incremental_marking_duration_ = 0.0;
- cumulative_marking_duration_ = 0.0;
- for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
- incremental_marking_scopes_[i].cumulative_duration = 0.0;
- incremental_marking_scopes_[i].steps = 0;
- incremental_marking_scopes_[i].longest_step = 0.0;
- }
- cumulative_sweeping_duration_ = 0.0;
+ previous_ = current_;
+ ResetIncrementalMarkingCounters();
allocation_time_ms_ = 0.0;
new_space_allocation_counter_bytes_ = 0.0;
old_generation_allocation_counter_bytes_ = 0.0;
@@ -158,7 +151,8 @@ void GCTracer::ResetForTesting() {
start_counter_ = 0;
}
-void GCTracer::Start(GarbageCollector collector, const char* gc_reason,
+void GCTracer::Start(GarbageCollector collector,
+ GarbageCollectionReason gc_reason,
const char* collector_reason) {
start_counter_++;
if (start_counter_ != 1) return;
@@ -167,8 +161,6 @@ void GCTracer::Start(GarbageCollector collector, const char* gc_reason,
double start_time = heap_->MonotonicallyIncreasingTimeInMs();
SampleAllocation(start_time, heap_->NewSpaceAllocationCounter(),
heap_->OldGenerationAllocationCounter());
- if (current_.type == Event::INCREMENTAL_MARK_COMPACTOR)
- previous_incremental_mark_compactor_event_ = current_;
if (collector == SCAVENGER) {
current_ = Event(Event::SCAVENGER, gc_reason, collector_reason);
@@ -189,10 +181,8 @@ void GCTracer::Start(GarbageCollector collector, const char* gc_reason,
current_.new_space_object_size =
heap_->new_space()->top() - heap_->new_space()->bottom();
- current_.cumulative_incremental_marking_bytes =
- cumulative_incremental_marking_bytes_;
- current_.cumulative_pure_incremental_marking_duration =
- cumulative_pure_incremental_marking_duration_;
+ current_.incremental_marking_bytes = 0;
+ current_.incremental_marking_duration = 0;
for (int i = 0; i < Scope::NUMBER_OF_SCOPES; i++) {
current_.scopes[i] = 0;
@@ -200,37 +190,40 @@ void GCTracer::Start(GarbageCollector collector, const char* gc_reason,
int committed_memory = static_cast<int>(heap_->CommittedMemory() / KB);
int used_memory = static_cast<int>(current_.start_object_size / KB);
- heap_->isolate()->counters()->aggregated_memory_heap_committed()->AddSample(
- start_time, committed_memory);
- heap_->isolate()->counters()->aggregated_memory_heap_used()->AddSample(
- start_time, used_memory);
+
+ Counters* counters = heap_->isolate()->counters();
+
+ if (collector == SCAVENGER) {
+ counters->scavenge_reason()->AddSample(static_cast<int>(gc_reason));
+ } else {
+ counters->mark_compact_reason()->AddSample(static_cast<int>(gc_reason));
+ }
+ counters->aggregated_memory_heap_committed()->AddSample(start_time,
+ committed_memory);
+ counters->aggregated_memory_heap_used()->AddSample(start_time, used_memory);
// TODO(cbruni): remove once we fully moved to a trace-based system.
- if (FLAG_runtime_call_stats) {
- RuntimeCallStats::Enter(heap_->isolate(), &timer_, &RuntimeCallStats::GC);
+ if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
+ FLAG_runtime_call_stats) {
+ RuntimeCallStats::Enter(heap_->isolate()->counters()->runtime_call_stats(),
+ &timer_, &RuntimeCallStats::GC);
}
}
-void GCTracer::MergeBaseline(const Event& baseline) {
- current_.incremental_marking_bytes =
- current_.cumulative_incremental_marking_bytes -
- baseline.cumulative_incremental_marking_bytes;
- current_.pure_incremental_marking_duration =
- current_.cumulative_pure_incremental_marking_duration -
- baseline.cumulative_pure_incremental_marking_duration;
- for (int i = Scope::FIRST_INCREMENTAL_SCOPE;
- i <= Scope::LAST_INCREMENTAL_SCOPE; i++) {
- current_.scopes[i] =
- current_.incremental_marking_scopes[i].cumulative_duration -
- baseline.incremental_marking_scopes[i].cumulative_duration;
+void GCTracer::ResetIncrementalMarkingCounters() {
+ incremental_marking_bytes_ = 0;
+ incremental_marking_duration_ = 0;
+ for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
+ incremental_marking_scopes_[i].ResetCurrentCycle();
}
}
void GCTracer::Stop(GarbageCollector collector) {
start_counter_--;
if (start_counter_ != 0) {
- PrintIsolate(heap_->isolate(), "[Finished reentrant %s during %s.]\n",
- collector == SCAVENGER ? "Scavenge" : "Mark-sweep",
- current_.TypeName(false));
+ heap_->isolate()->PrintWithTimestamp(
+ "[Finished reentrant %s during %s.]\n",
+ collector == SCAVENGER ? "Scavenge" : "Mark-sweep",
+ current_.TypeName(false));
return;
}
@@ -240,11 +233,6 @@ void GCTracer::Stop(GarbageCollector collector) {
(current_.type == Event::MARK_COMPACTOR ||
current_.type == Event::INCREMENTAL_MARK_COMPACTOR)));
- for (int i = Scope::FIRST_INCREMENTAL_SCOPE;
- i <= Scope::LAST_INCREMENTAL_SCOPE; i++) {
- current_.incremental_marking_scopes[i] = incremental_marking_scopes_[i];
- }
-
current_.end_time = heap_->MonotonicallyIncreasingTimeInMs();
current_.end_object_size = heap_->SizeOfObjects();
current_.end_memory_size = heap_->memory_allocator()->Size();
@@ -263,36 +251,33 @@ void GCTracer::Stop(GarbageCollector collector) {
double duration = current_.end_time - current_.start_time;
if (current_.type == Event::SCAVENGER) {
- MergeBaseline(previous_);
recorded_scavenges_total_.Push(
MakeBytesAndDuration(current_.new_space_object_size, duration));
recorded_scavenges_survived_.Push(MakeBytesAndDuration(
current_.survived_new_space_object_size, duration));
} else if (current_.type == Event::INCREMENTAL_MARK_COMPACTOR) {
- MergeBaseline(previous_incremental_mark_compactor_event_);
- recorded_incremental_marking_steps_.Push(
- MakeBytesAndDuration(current_.incremental_marking_bytes,
- current_.pure_incremental_marking_duration));
+ current_.incremental_marking_bytes = incremental_marking_bytes_;
+ current_.incremental_marking_duration = incremental_marking_duration_;
+ for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
+ current_.incremental_marking_scopes[i] = incremental_marking_scopes_[i];
+ current_.scopes[i] = incremental_marking_scopes_[i].duration;
+ }
+ RecordIncrementalMarkingSpeed(current_.incremental_marking_bytes,
+ current_.incremental_marking_duration);
recorded_incremental_mark_compacts_.Push(
MakeBytesAndDuration(current_.start_object_size, duration));
+ ResetIncrementalMarkingCounters();
combined_mark_compact_speed_cache_ = 0.0;
- for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
- incremental_marking_scopes_[i].ResetCurrentCycle();
- }
} else {
- DCHECK(current_.incremental_marking_bytes == 0);
- DCHECK(current_.pure_incremental_marking_duration == 0);
+ DCHECK_EQ(0, current_.incremental_marking_bytes);
+ DCHECK_EQ(0, current_.incremental_marking_duration);
recorded_mark_compacts_.Push(
MakeBytesAndDuration(current_.start_object_size, duration));
+ ResetIncrementalMarkingCounters();
combined_mark_compact_speed_cache_ = 0.0;
- for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
- incremental_marking_scopes_[i].ResetCurrentCycle();
- }
}
- double spent_in_mutator = Max(current_.start_time - previous_.end_time, 0.0);
- heap_->UpdateCumulativeGCStatistics(duration, spent_in_mutator,
- current_.scopes[Scope::MC_MARK]);
+ heap_->UpdateTotalGCTime(duration);
if (current_.type == Event::SCAVENGER && FLAG_trace_gc_ignore_scavenger)
return;
@@ -308,8 +293,10 @@ void GCTracer::Stop(GarbageCollector collector) {
}
// TODO(cbruni): remove once we fully moved to a trace-based system.
- if (FLAG_runtime_call_stats) {
- RuntimeCallStats::Leave(heap_->isolate(), &timer_);
+ if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
+ FLAG_runtime_call_stats) {
+ RuntimeCallStats::Leave(heap_->isolate()->counters()->runtime_call_stats(),
+ &timer_);
}
}
@@ -375,11 +362,9 @@ void GCTracer::AddSurvivalRatio(double promotion_ratio) {
void GCTracer::AddIncrementalMarkingStep(double duration, intptr_t bytes) {
- cumulative_incremental_marking_bytes_ += bytes;
- cumulative_incremental_marking_duration_ += duration;
- cumulative_marking_duration_ += duration;
if (bytes > 0) {
- cumulative_pure_incremental_marking_duration_ += duration;
+ incremental_marking_bytes_ += bytes;
+ incremental_marking_duration_ += duration;
}
}
@@ -402,29 +387,20 @@ void GCTracer::Output(const char* format, ...) const {
heap_->AddToRingBuffer(buffer.start());
}
-
void GCTracer::Print() const {
double duration = current_.end_time - current_.start_time;
const size_t kIncrementalStatsSize = 128;
char incremental_buffer[kIncrementalStatsSize] = {0};
- if (current_.incremental_marking_scopes[Scope::MC_INCREMENTAL].steps > 0) {
- if (current_.type == Event::SCAVENGER) {
- base::OS::SNPrintF(
- incremental_buffer, kIncrementalStatsSize,
- " (+ %.1f ms in %d steps since last GC)",
- current_.scopes[Scope::MC_INCREMENTAL],
- current_.incremental_marking_scopes[Scope::MC_INCREMENTAL].steps);
- } else {
- base::OS::SNPrintF(
- incremental_buffer, kIncrementalStatsSize,
- " (+ %.1f ms in %d steps since start of marking, "
- "biggest step %.1f ms)",
- current_.scopes[Scope::MC_INCREMENTAL],
- current_.incremental_marking_scopes[Scope::MC_INCREMENTAL].steps,
- current_.incremental_marking_scopes[Scope::MC_INCREMENTAL]
- .longest_step);
- }
+ if (current_.type == Event::INCREMENTAL_MARK_COMPACTOR) {
+ base::OS::SNPrintF(
+ incremental_buffer, kIncrementalStatsSize,
+ " (+ %.1f ms in %d steps since start of marking, "
+ "biggest step %.1f ms, walltime since start of marking %.f ms)",
+ current_.scopes[Scope::MC_INCREMENTAL],
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL].steps,
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL].longest_step,
+ current_.end_time - incremental_marking_start_time_);
}
// Avoid PrintF as Output also appends the string to the tracing ring buffer
@@ -442,7 +418,7 @@ void GCTracer::Print() const {
static_cast<double>(current_.end_object_size) / MB,
static_cast<double>(current_.end_memory_size) / MB, duration,
TotalExternalTime(), incremental_buffer,
- current_.gc_reason != nullptr ? current_.gc_reason : "",
+ Heap::GarbageCollectionReasonToString(current_.gc_reason),
current_.collector_reason != nullptr ? current_.collector_reason : "");
}
@@ -453,11 +429,16 @@ void GCTracer::PrintNVP() const {
intptr_t allocated_since_last_gc =
current_.start_object_size - previous_.end_object_size;
+ double incremental_walltime_duration = 0;
+
+ if (current_.type == Event::INCREMENTAL_MARK_COMPACTOR) {
+ incremental_walltime_duration =
+ current_.end_time - incremental_marking_start_time_;
+ }
+
switch (current_.type) {
case Event::SCAVENGER:
- PrintIsolate(
- heap_->isolate(),
- "%8.0f ms: "
+ heap_->isolate()->PrintWithTimestamp(
"pause=%.1f "
"mutator=%.1f "
"gc=%s "
@@ -498,9 +479,8 @@ void GCTracer::PrintNVP() const {
"semi_space_copy_rate=%.1f%% "
"new_space_allocation_throughput=%.1f "
"context_disposal_rate=%.1f\n",
- heap_->isolate()->time_millis_since_init(), duration,
- spent_in_mutator, current_.TypeName(true), current_.reduce_memory,
- current_.scopes[Scope::SCAVENGER_SCAVENGE],
+ duration, spent_in_mutator, current_.TypeName(true),
+ current_.reduce_memory, current_.scopes[Scope::SCAVENGER_SCAVENGE],
current_.scopes[Scope::SCAVENGER_OLD_TO_NEW_POINTERS],
current_.scopes[Scope::SCAVENGER_WEAK],
current_.scopes[Scope::SCAVENGER_ROOTS],
@@ -527,9 +507,7 @@ void GCTracer::PrintNVP() const {
break;
case Event::MARK_COMPACTOR:
case Event::INCREMENTAL_MARK_COMPACTOR:
- PrintIsolate(
- heap_->isolate(),
- "%8.0f ms: "
+ heap_->isolate()->PrintWithTimestamp(
"pause=%.1f "
"mutator=%.1f "
"gc=%s "
@@ -580,6 +558,7 @@ void GCTracer::PrintNVP() const {
"incremental.finalize.external.prologue=%.1f "
"incremental.finalize.external.epilogue=%.1f "
"incremental.finalize.object_grouping=%.1f "
+ "incremental.sweeping=%.1f "
"incremental.wrapper_prologue=%.1f "
"incremental.wrapper_tracing=%.1f "
"incremental_wrapper_tracing_longest_step=%.1f "
@@ -588,6 +567,7 @@ void GCTracer::PrintNVP() const {
"incremental_longest_step=%.1f "
"incremental_steps_count=%d "
"incremental_marking_throughput=%.f "
+ "incremental_walltime_duration=%.f "
"total_size_before=%" V8PRIdPTR
" "
"total_size_after=%" V8PRIdPTR
@@ -612,9 +592,8 @@ void GCTracer::PrintNVP() const {
"new_space_allocation_throughput=%.1f "
"context_disposal_rate=%.1f "
"compaction_speed=%.f\n",
- heap_->isolate()->time_millis_since_init(), duration,
- spent_in_mutator, current_.TypeName(true), current_.reduce_memory,
- current_.scopes[Scope::MC_CLEAR],
+ duration, spent_in_mutator, current_.TypeName(true),
+ current_.reduce_memory, current_.scopes[Scope::MC_CLEAR],
current_.scopes[Scope::MC_CLEAR_CODE_FLUSH],
current_.scopes[Scope::MC_CLEAR_DEPENDENT_CODE],
current_.scopes[Scope::MC_CLEAR_GLOBAL_HANDLES],
@@ -659,6 +638,7 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE],
current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE],
current_.scopes[Scope::MC_INCREMENTAL_FINALIZE_OBJECT_GROUPING],
+ current_.scopes[Scope::MC_INCREMENTAL_SWEEPING],
current_.scopes[Scope::MC_INCREMENTAL_WRAPPER_PROLOGUE],
current_.scopes[Scope::MC_INCREMENTAL_WRAPPER_TRACING],
current_
@@ -674,9 +654,10 @@ void GCTracer::PrintNVP() const {
.longest_step,
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL].steps,
IncrementalMarkingSpeedInBytesPerMillisecond(),
- current_.start_object_size, current_.end_object_size,
- current_.start_holes_size, current_.end_holes_size,
- allocated_since_last_gc, heap_->promoted_objects_size(),
+ incremental_walltime_duration, current_.start_object_size,
+ current_.end_object_size, current_.start_holes_size,
+ current_.end_holes_size, allocated_since_last_gc,
+ heap_->promoted_objects_size(),
heap_->semi_space_copied_object_size(),
heap_->nodes_died_in_new_space_, heap_->nodes_copied_in_new_space_,
heap_->nodes_promoted_, heap_->promotion_ratio_,
@@ -716,15 +697,26 @@ double GCTracer::AverageSpeed(const RingBuffer<BytesAndDuration>& buffer) {
return AverageSpeed(buffer, MakeBytesAndDuration(0, 0), 0);
}
+void GCTracer::RecordIncrementalMarkingSpeed(intptr_t bytes, double duration) {
+ if (duration == 0 || bytes == 0) return;
+ double current_speed = bytes / duration;
+ if (recorded_incremental_marking_speed_ == 0) {
+ recorded_incremental_marking_speed_ = current_speed;
+ } else {
+ recorded_incremental_marking_speed_ =
+ (recorded_incremental_marking_speed_ + current_speed) / 2;
+ }
+}
+
double GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
- if (cumulative_incremental_marking_duration_ == 0.0) return 0;
- // We haven't completed an entire round of incremental marking, yet.
- // Use data from GCTracer instead of data from event buffers.
- if (recorded_incremental_marking_steps_.Count() == 0) {
- return cumulative_incremental_marking_bytes_ /
- cumulative_pure_incremental_marking_duration_;
+ const int kConservativeSpeedInBytesPerMillisecond = 128 * KB;
+ if (recorded_incremental_marking_speed_ != 0) {
+ return recorded_incremental_marking_speed_;
+ }
+ if (incremental_marking_duration_ != 0.0) {
+ return incremental_marking_bytes_ / incremental_marking_duration_;
}
- return AverageSpeed(recorded_incremental_marking_steps_);
+ return kConservativeSpeedInBytesPerMillisecond;
}
double GCTracer::ScavengeSpeedInBytesPerMillisecond(
@@ -821,5 +813,10 @@ bool GCTracer::SurvivalEventsRecorded() const {
}
void GCTracer::ResetSurvivalEvents() { recorded_survival_ratios_.Reset(); }
+
+void GCTracer::NotifyIncrementalMarkingStart() {
+ incremental_marking_start_time_ = heap_->MonotonicallyIncreasingTimeInMs();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index a11823e984..e8c72c1e2c 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -63,6 +63,7 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
#define INCREMENTAL_SCOPES(F) \
/* MC_INCREMENTAL is the top-level incremental marking scope. */ \
F(MC_INCREMENTAL) \
+ F(MC_INCREMENTAL_SWEEPING) \
F(MC_INCREMENTAL_WRAPPER_PROLOGUE) \
F(MC_INCREMENTAL_WRAPPER_TRACING) \
F(MC_INCREMENTAL_FINALIZE) \
@@ -134,23 +135,23 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
class GCTracer {
public:
struct IncrementalMarkingInfos {
- IncrementalMarkingInfos()
- : cumulative_duration(0), longest_step(0), steps(0) {}
+ IncrementalMarkingInfos() : duration(0), longest_step(0), steps(0) {}
void Update(double duration) {
steps++;
- cumulative_duration += duration;
+ this->duration += duration;
if (duration > longest_step) {
longest_step = duration;
}
}
void ResetCurrentCycle() {
+ duration = 0;
longest_step = 0;
steps = 0;
}
- double cumulative_duration;
+ double duration;
double longest_step;
int steps;
};
@@ -192,7 +193,8 @@ class GCTracer {
START = 3
};
- Event(Type type, const char* gc_reason, const char* collector_reason);
+ Event(Type type, GarbageCollectionReason gc_reason,
+ const char* collector_reason);
// Returns a string describing the event type.
const char* TypeName(bool short_name) const;
@@ -200,7 +202,7 @@ class GCTracer {
// Type of event
Type type;
- const char* gc_reason;
+ GarbageCollectionReason gc_reason;
const char* collector_reason;
// Timestamp set in the constructor.
@@ -219,10 +221,10 @@ class GCTracer {
intptr_t end_object_size;
// Size of memory allocated from OS set in constructor.
- intptr_t start_memory_size;
+ size_t start_memory_size;
// Size of memory allocated from OS set in destructor.
- intptr_t end_memory_size;
+ size_t end_memory_size;
// Total amount of space either wasted or contained in one of free lists
// before the current GC.
@@ -241,21 +243,11 @@ class GCTracer {
// Bytes marked since creation of tracer (value at start of event).
intptr_t cumulative_incremental_marking_bytes;
- // Bytes marked since
- // - last event for SCAVENGER events
- // - last INCREMENTAL_MARK_COMPACTOR event for INCREMENTAL_MARK_COMPACTOR
- // events
+ // Bytes marked incrementally for INCREMENTAL_MARK_COMPACTOR
intptr_t incremental_marking_bytes;
- // Cumulative pure duration of incremental marking steps since creation of
- // tracer. (value at start of event)
- double cumulative_pure_incremental_marking_duration;
-
- // Duration of pure incremental marking steps since
- // - last event for SCAVENGER events
- // - last INCREMENTAL_MARK_COMPACTOR event for INCREMENTAL_MARK_COMPACTOR
- // events
- double pure_incremental_marking_duration;
+ // Duration of incremental marking steps for INCREMENTAL_MARK_COMPACTOR.
+ double incremental_marking_duration;
// Amounts of time spent in different scopes during GC.
double scopes[Scope::NUMBER_OF_SCOPES];
@@ -270,7 +262,7 @@ class GCTracer {
explicit GCTracer(Heap* heap);
// Start collecting data.
- void Start(GarbageCollector collector, const char* gc_reason,
+ void Start(GarbageCollector collector, GarbageCollectionReason gc_reason,
const char* collector_reason);
// Stop collecting data and print results.
@@ -292,26 +284,6 @@ class GCTracer {
// Log an incremental marking step.
void AddIncrementalMarkingStep(double duration, intptr_t bytes);
- // Log time spent in marking.
- void AddMarkingTime(double duration) {
- cumulative_marking_duration_ += duration;
- }
-
- // Time spent in marking.
- double cumulative_marking_duration() const {
- return cumulative_marking_duration_;
- }
-
- // Log time spent in sweeping on main thread.
- void AddSweepingTime(double duration) {
- cumulative_sweeping_duration_ += duration;
- }
-
- // Time spent in sweeping on main thread.
- double cumulative_sweeping_duration() const {
- return cumulative_sweeping_duration_;
- }
-
// Compute the average incremental marking speed in bytes/millisecond.
// Returns 0 if no events have been recorded.
double IncrementalMarkingSpeedInBytesPerMillisecond() const;
@@ -381,11 +353,14 @@ class GCTracer {
// Discard all recorded survival events.
void ResetSurvivalEvents();
+ void NotifyIncrementalMarkingStart();
+
V8_INLINE void AddScopeSample(Scope::ScopeId scope, double duration) {
DCHECK(scope < Scope::NUMBER_OF_SCOPES);
if (scope >= Scope::FIRST_INCREMENTAL_SCOPE &&
scope <= Scope::LAST_INCREMENTAL_SCOPE) {
- incremental_marking_scopes_[scope].Update(duration);
+ incremental_marking_scopes_[scope - Scope::FIRST_INCREMENTAL_SCOPE]
+ .Update(duration);
} else {
current_.scopes[scope] += duration;
}
@@ -400,6 +375,7 @@ class GCTracer {
FRIEND_TEST(GCTracerTest, RegularScope);
FRIEND_TEST(GCTracerTest, IncrementalMarkingDetails);
FRIEND_TEST(GCTracerTest, IncrementalScope);
+ FRIEND_TEST(GCTracerTest, IncrementalMarkingSpeed);
// Returns the average speed of the events in the buffer.
// If the buffer is empty, the result is 0.
@@ -408,9 +384,9 @@ class GCTracer {
static double AverageSpeed(const RingBuffer<BytesAndDuration>& buffer,
const BytesAndDuration& initial, double time_ms);
- void MergeBaseline(const Event& baseline);
-
void ResetForTesting();
+ void ResetIncrementalMarkingCounters();
+ void RecordIncrementalMarkingSpeed(intptr_t bytes, double duration);
// Print one detailed trace line in name=value format.
// TODO(ernstm): Move to Heap.
@@ -444,37 +420,23 @@ class GCTracer {
// Previous tracer event.
Event previous_;
- // Previous INCREMENTAL_MARK_COMPACTOR event.
- Event previous_incremental_mark_compactor_event_;
+ // Size of incremental marking steps (in bytes) accumulated since the end of
+ // the last mark compact GC.
+ intptr_t incremental_marking_bytes_;
- // Cumulative size of incremental marking steps (in bytes) since creation of
- // tracer.
- intptr_t cumulative_incremental_marking_bytes_;
+ // Duration of incremental marking steps since the end of the last mark-
+ // compact event.
+ double incremental_marking_duration_;
- // Cumulative duration of incremental marking steps since creation of tracer.
- double cumulative_incremental_marking_duration_;
+ double incremental_marking_start_time_;
- // Cumulative duration of pure incremental marking steps since creation of
- // tracer.
- double cumulative_pure_incremental_marking_duration_;
-
- // Total marking time.
- // This timer is precise when run with --print-cumulative-gc-stat
- double cumulative_marking_duration_;
+ double recorded_incremental_marking_speed_;
// Incremental scopes carry more information than just the duration. The infos
// here are merged back upon starting/stopping the GC tracer.
IncrementalMarkingInfos
incremental_marking_scopes_[Scope::NUMBER_OF_INCREMENTAL_SCOPES];
- // Total sweeping time on the main thread.
- // This timer is precise when run with --print-cumulative-gc-stat
- // TODO(hpayer): Account for sweeping time on sweeper threads. Add a
- // different field for that.
- // TODO(hpayer): This timer right now just holds the sweeping time
- // of the initial atomic sweeping pause. Make sure that it accumulates
- // all sweeping operations performed on the main thread.
- double cumulative_sweeping_duration_;
// Timestamp and allocation counter at the last sampled allocation event.
double allocation_time_ms_;
@@ -494,12 +456,11 @@ class GCTracer {
// Separate timer used for --runtime_call_stats
RuntimeCallTimer timer_;
- RingBuffer<BytesAndDuration> recorded_incremental_marking_steps_;
RingBuffer<BytesAndDuration> recorded_scavenges_total_;
RingBuffer<BytesAndDuration> recorded_scavenges_survived_;
RingBuffer<BytesAndDuration> recorded_compactions_;
- RingBuffer<BytesAndDuration> recorded_mark_compacts_;
RingBuffer<BytesAndDuration> recorded_incremental_mark_compacts_;
+ RingBuffer<BytesAndDuration> recorded_mark_compacts_;
RingBuffer<BytesAndDuration> recorded_new_generation_allocations_;
RingBuffer<BytesAndDuration> recorded_old_generation_allocations_;
RingBuffer<double> recorded_context_disposal_times_;
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 21f465fe78..23e171232d 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -8,7 +8,7 @@
#include <cmath>
#include "src/base/platform/platform.h"
-#include "src/counters.h"
+#include "src/counters-inl.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/mark-compact.h"
@@ -25,6 +25,16 @@
namespace v8 {
namespace internal {
+AllocationSpace AllocationResult::RetrySpace() {
+ DCHECK(IsRetry());
+ return static_cast<AllocationSpace>(Smi::cast(object_)->value());
+}
+
+HeapObject* AllocationResult::ToObjectChecked() {
+ CHECK(!IsRetry());
+ return HeapObject::cast(object_);
+}
+
void PromotionQueue::insert(HeapObject* target, int32_t size,
bool was_marked_black) {
if (emergency_stack_ != NULL) {
@@ -50,6 +60,62 @@ void PromotionQueue::insert(HeapObject* target, int32_t size,
#endif
}
+void PromotionQueue::remove(HeapObject** target, int32_t* size,
+ bool* was_marked_black) {
+ DCHECK(!is_empty());
+ if (front_ == rear_) {
+ Entry e = emergency_stack_->RemoveLast();
+ *target = e.obj_;
+ *size = e.size_;
+ *was_marked_black = e.was_marked_black_;
+ return;
+ }
+
+ struct Entry* entry = reinterpret_cast<struct Entry*>(--front_);
+ *target = entry->obj_;
+ *size = entry->size_;
+ *was_marked_black = entry->was_marked_black_;
+
+ // Assert no underflow.
+ SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
+ reinterpret_cast<Address>(front_));
+}
+
+Page* PromotionQueue::GetHeadPage() {
+ return Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
+}
+
+void PromotionQueue::SetNewLimit(Address limit) {
+ // If we are already using an emergency stack, we can ignore it.
+ if (emergency_stack_) return;
+
+ // If the limit is not on the same page, we can ignore it.
+ if (Page::FromAllocationAreaAddress(limit) != GetHeadPage()) return;
+
+ limit_ = reinterpret_cast<struct Entry*>(limit);
+
+ if (limit_ <= rear_) {
+ return;
+ }
+
+ RelocateQueueHead();
+}
+
+bool PromotionQueue::IsBelowPromotionQueue(Address to_space_top) {
+ // If an emergency stack is used, the to-space address cannot interfere
+ // with the promotion queue.
+ if (emergency_stack_) return true;
+
+ // If the given to-space top pointer and the head of the promotion queue
+ // are not on the same page, then the to-space objects are below the
+ // promotion queue.
+ if (GetHeadPage() != Page::FromAddress(to_space_top)) {
+ return true;
+ }
+ // If the to space top pointer is smaller or equal than the promotion
+ // queue head, then the to-space objects are below the promotion queue.
+ return reinterpret_cast<struct Entry*>(to_space_top) <= rear_;
+}
#define ROOT_ACCESSOR(type, name, camel_name) \
type* Heap::name() { return type::cast(roots_[k##camel_name##RootIndex]); }
@@ -89,6 +155,37 @@ WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
+PagedSpace* Heap::paged_space(int idx) {
+ DCHECK_NE(idx, LO_SPACE);
+ DCHECK_NE(idx, NEW_SPACE);
+ return static_cast<PagedSpace*>(space_[idx]);
+}
+
+Space* Heap::space(int idx) { return space_[idx]; }
+
+Address* Heap::NewSpaceAllocationTopAddress() {
+ return new_space_->allocation_top_address();
+}
+
+Address* Heap::NewSpaceAllocationLimitAddress() {
+ return new_space_->allocation_limit_address();
+}
+
+Address* Heap::OldSpaceAllocationTopAddress() {
+ return old_space_->allocation_top_address();
+}
+
+Address* Heap::OldSpaceAllocationLimitAddress() {
+ return old_space_->allocation_limit_address();
+}
+
+void Heap::UpdateNewSpaceAllocationCounter() {
+ new_space_allocation_counter_ = NewSpaceAllocationCounter();
+}
+
+size_t Heap::NewSpaceAllocationCounter() {
+ return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
+}
template <>
bool inline Heap::IsOneByte(Vector<const char> str, int chars) {
@@ -209,14 +306,14 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
isolate_->counters()->objs_since_last_young()->Increment();
#endif
- bool large_object = size_in_bytes > Page::kMaxRegularHeapObjectSize;
+ bool large_object = size_in_bytes > kMaxRegularHeapObjectSize;
HeapObject* object = nullptr;
AllocationResult allocation;
if (NEW_SPACE == space) {
if (large_object) {
space = LO_SPACE;
} else {
- allocation = new_space_.AllocateRaw(size_in_bytes, alignment);
+ allocation = new_space_->AllocateRaw(size_in_bytes, alignment);
if (allocation.To(&object)) {
OnAllocationEvent(object, size_in_bytes);
}
@@ -248,8 +345,6 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
}
if (allocation.To(&object)) {
OnAllocationEvent(object, size_in_bytes);
- } else {
- old_gen_exhausted_ = true;
}
return allocation;
@@ -355,9 +450,17 @@ void Heap::FinalizeExternalString(String* string) {
}
}
+Address Heap::NewSpaceTop() { return new_space_->top(); }
+
+bool Heap::DeoptMaybeTenuredAllocationSites() {
+ return new_space_->IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
+}
bool Heap::InNewSpace(Object* object) {
- bool result = new_space_.Contains(object);
+ // Inlined check from NewSpace::Contains.
+ bool result =
+ object->IsHeapObject() &&
+ Page::FromAddress(HeapObject::cast(object)->address())->InNewSpace();
DCHECK(!result || // Either not in new space
gc_state_ != NOT_IN_GC || // ... or in the middle of GC
InToSpace(object)); // ... or in to-space (where we allocate).
@@ -365,35 +468,32 @@ bool Heap::InNewSpace(Object* object) {
}
bool Heap::InFromSpace(Object* object) {
- return new_space_.FromSpaceContains(object);
+ return object->IsHeapObject() &&
+ MemoryChunk::FromAddress(HeapObject::cast(object)->address())
+ ->IsFlagSet(Page::IN_FROM_SPACE);
}
bool Heap::InToSpace(Object* object) {
- return new_space_.ToSpaceContains(object);
+ return object->IsHeapObject() &&
+ MemoryChunk::FromAddress(HeapObject::cast(object)->address())
+ ->IsFlagSet(Page::IN_TO_SPACE);
}
bool Heap::InOldSpace(Object* object) { return old_space_->Contains(object); }
bool Heap::InNewSpaceSlow(Address address) {
- return new_space_.ContainsSlow(address);
+ return new_space_->ContainsSlow(address);
}
bool Heap::InOldSpaceSlow(Address address) {
return old_space_->ContainsSlow(address);
}
-bool Heap::OldGenerationAllocationLimitReached() {
- if (!incremental_marking()->IsStopped() && !ShouldOptimizeForMemoryUsage()) {
- return false;
- }
- return OldGenerationSpaceAvailable() < 0;
-}
-
template <PromotionMode promotion_mode>
bool Heap::ShouldBePromoted(Address old_address, int object_size) {
Page* page = Page::FromAddress(old_address);
- Address age_mark = new_space_.age_mark();
+ Address age_mark = new_space_->age_mark();
if (promotion_mode == PROMOTE_MARKED) {
MarkBit mark_bit = ObjectMarking::MarkBitFrom(old_address);
@@ -587,8 +687,8 @@ void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) {
site, static_cast<uint32_t>(bit_cast<uintptr_t>(site)));
}
-
-bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason,
+bool Heap::CollectGarbage(AllocationSpace space,
+ GarbageCollectionReason gc_reason,
const v8::GCCallbackFlags callbackFlags) {
const char* collector_reason = NULL;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
@@ -659,35 +759,6 @@ void Heap::ExternalStringTable::ShrinkNewStrings(int position) {
#endif
}
-// static
-int DescriptorLookupCache::Hash(Object* source, Name* name) {
- DCHECK(name->IsUniqueName());
- // Uses only lower 32 bits if pointers are larger.
- uint32_t source_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source)) >>
- kPointerSizeLog2;
- uint32_t name_hash = name->hash_field();
- return (source_hash ^ name_hash) % kLength;
-}
-
-int DescriptorLookupCache::Lookup(Map* source, Name* name) {
- int index = Hash(source, name);
- Key& key = keys_[index];
- if ((key.source == source) && (key.name == name)) return results_[index];
- return kAbsent;
-}
-
-
-void DescriptorLookupCache::Update(Map* source, Name* name, int result) {
- DCHECK(result != kAbsent);
- int index = Hash(source, name);
- Key& key = keys_[index];
- key.source = source;
- key.name = name;
- results_[index] = result;
-}
-
-
void Heap::ClearInstanceofCache() {
set_instanceof_cache_function(Smi::FromInt(0));
}
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 7eb5af3b6a..d823232ac7 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -71,14 +71,14 @@ class IdleScavengeObserver : public AllocationObserver {
Heap::Heap()
: external_memory_(0),
- external_memory_limit_(kExternalAllocationLimit),
+ external_memory_limit_(kExternalAllocationSoftLimit),
external_memory_at_last_mark_compact_(0),
isolate_(nullptr),
code_range_size_(0),
// semispace_size_ should be a power of 2 and old_generation_size_ should
// be a multiple of Page::kPageSize.
max_semi_space_size_(8 * (kPointerSize / 4) * MB),
- initial_semispace_size_(Page::kPageSize),
+ initial_semispace_size_(MB),
max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
initial_old_generation_size_(max_old_generation_size_ /
kInitalOldGenerationLimitFactor),
@@ -96,7 +96,7 @@ Heap::Heap()
contexts_disposed_(0),
number_of_disposed_maps_(0),
global_ic_age_(0),
- new_space_(this),
+ new_space_(nullptr),
old_space_(NULL),
code_space_(NULL),
map_space_(NULL),
@@ -112,11 +112,9 @@ Heap::Heap()
allocation_timeout_(0),
#endif // DEBUG
old_generation_allocation_limit_(initial_old_generation_size_),
- old_gen_exhausted_(false),
inline_allocation_disabled_(false),
total_regexp_code_generated_(0),
tracer_(nullptr),
- high_survival_rate_period_length_(0),
promoted_objects_size_(0),
promotion_ratio_(0),
semi_space_copied_object_size_(0),
@@ -126,12 +124,6 @@ Heap::Heap()
nodes_copied_in_new_space_(0),
nodes_promoted_(0),
maximum_size_scavenges_(0),
- max_gc_pause_(0.0),
- total_gc_time_ms_(0.0),
- max_alive_after_gc_(0),
- min_in_mutator_(kMaxInt),
- marking_time_(0.0),
- sweeping_time_(0.0),
last_idle_notification_time_(0.0),
last_gc_time_(0.0),
scavenge_collector_(nullptr),
@@ -148,7 +140,7 @@ Heap::Heap()
full_codegen_bytes_generated_(0),
crankshaft_codegen_bytes_generated_(0),
new_space_allocation_counter_(0),
- old_generation_allocation_counter_(0),
+ old_generation_allocation_counter_at_last_gc_(0),
old_generation_size_at_last_gc_(0),
gcs_since_last_deopt_(0),
global_pretenuring_feedback_(nullptr),
@@ -163,6 +155,8 @@ Heap::Heap()
deserialization_complete_(false),
strong_roots_list_(NULL),
heap_iterator_depth_(0),
+ embedder_heap_tracer_(nullptr),
+ embedder_reference_reporter_(new TracePossibleWrapperReporter(this)),
force_oom_(false) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
@@ -189,7 +183,7 @@ Heap::Heap()
intptr_t Heap::Capacity() {
if (!HasBeenSetUp()) return 0;
- return new_space_.Capacity() + OldGenerationCapacity();
+ return new_space_->Capacity() + OldGenerationCapacity();
}
intptr_t Heap::OldGenerationCapacity() {
@@ -199,44 +193,41 @@ intptr_t Heap::OldGenerationCapacity() {
map_space_->Capacity() + lo_space_->SizeOfObjects();
}
-
-intptr_t Heap::CommittedOldGenerationMemory() {
+size_t Heap::CommittedOldGenerationMemory() {
if (!HasBeenSetUp()) return 0;
return old_space_->CommittedMemory() + code_space_->CommittedMemory() +
map_space_->CommittedMemory() + lo_space_->Size();
}
-
-intptr_t Heap::CommittedMemory() {
+size_t Heap::CommittedMemory() {
if (!HasBeenSetUp()) return 0;
- return new_space_.CommittedMemory() + CommittedOldGenerationMemory();
+ return new_space_->CommittedMemory() + CommittedOldGenerationMemory();
}
size_t Heap::CommittedPhysicalMemory() {
if (!HasBeenSetUp()) return 0;
- return new_space_.CommittedPhysicalMemory() +
+ return new_space_->CommittedPhysicalMemory() +
old_space_->CommittedPhysicalMemory() +
code_space_->CommittedPhysicalMemory() +
map_space_->CommittedPhysicalMemory() +
lo_space_->CommittedPhysicalMemory();
}
-
-intptr_t Heap::CommittedMemoryExecutable() {
+size_t Heap::CommittedMemoryExecutable() {
if (!HasBeenSetUp()) return 0;
- return memory_allocator()->SizeExecutable();
+ return static_cast<size_t>(memory_allocator()->SizeExecutable());
}
void Heap::UpdateMaximumCommitted() {
if (!HasBeenSetUp()) return;
- intptr_t current_committed_memory = CommittedMemory();
+ const size_t current_committed_memory = CommittedMemory();
if (current_committed_memory > maximum_committed_) {
maximum_committed_ = current_committed_memory;
}
@@ -275,22 +266,6 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
return MARK_COMPACTOR;
}
- // Is enough data promoted to justify a global GC?
- if (OldGenerationAllocationLimitReached()) {
- isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
- *reason = "promotion limit reached";
- return MARK_COMPACTOR;
- }
-
- // Have allocation in OLD and LO failed?
- if (old_gen_exhausted_) {
- isolate_->counters()
- ->gc_compactor_caused_by_oldspace_exhaustion()
- ->Increment();
- *reason = "old generations exhausted";
- return MARK_COMPACTOR;
- }
-
// Is there enough space left in OLD to guarantee that a scavenge can
// succeed?
//
@@ -300,7 +275,8 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
// and does not count available bytes already in the old space or code
// space. Undercounting is safe---we may get an unrequested full GC when
// a scavenge would have succeeded.
- if (memory_allocator()->MaxAvailable() <= new_space_.Size()) {
+ if (static_cast<intptr_t>(memory_allocator()->MaxAvailable()) <=
+ new_space_->Size()) {
isolate_->counters()
->gc_compactor_caused_by_oldspace_exhaustion()
->Increment();
@@ -321,18 +297,18 @@ void Heap::ReportStatisticsBeforeGC() {
// compiled --log-gc is set. The following logic is used to avoid
// double logging.
#ifdef DEBUG
- if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
+ if (FLAG_heap_stats || FLAG_log_gc) new_space_->CollectStatistics();
if (FLAG_heap_stats) {
ReportHeapStatistics("Before GC");
} else if (FLAG_log_gc) {
- new_space_.ReportStatistics();
+ new_space_->ReportStatistics();
}
- if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
+ if (FLAG_heap_stats || FLAG_log_gc) new_space_->ClearHistograms();
#else
if (FLAG_log_gc) {
- new_space_.CollectStatistics();
- new_space_.ReportStatistics();
- new_space_.ClearHistograms();
+ new_space_->CollectStatistics();
+ new_space_->ReportStatistics();
+ new_space_->ClearHistograms();
}
#endif // DEBUG
}
@@ -340,50 +316,51 @@ void Heap::ReportStatisticsBeforeGC() {
void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return;
- PrintIsolate(isolate_, "Memory allocator, used: %6" V8PRIdPTR
- " KB, available: %6" V8PRIdPTR " KB\n",
+ PrintIsolate(isolate_,
+ "Memory allocator, used: %6zu KB,"
+ " available: %6zu KB\n",
memory_allocator()->Size() / KB,
memory_allocator()->Available() / KB);
PrintIsolate(isolate_, "New space, used: %6" V8PRIdPTR
" KB"
", available: %6" V8PRIdPTR
" KB"
- ", committed: %6" V8PRIdPTR " KB\n",
- new_space_.Size() / KB, new_space_.Available() / KB,
- new_space_.CommittedMemory() / KB);
+ ", committed: %6zu KB\n",
+ new_space_->Size() / KB, new_space_->Available() / KB,
+ new_space_->CommittedMemory() / KB);
PrintIsolate(isolate_, "Old space, used: %6" V8PRIdPTR
" KB"
", available: %6" V8PRIdPTR
" KB"
- ", committed: %6" V8PRIdPTR " KB\n",
+ ", committed: %6zu KB\n",
old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
old_space_->CommittedMemory() / KB);
PrintIsolate(isolate_, "Code space, used: %6" V8PRIdPTR
" KB"
", available: %6" V8PRIdPTR
" KB"
- ", committed: %6" V8PRIdPTR " KB\n",
+ ", committed: %6zu KB\n",
code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
code_space_->CommittedMemory() / KB);
PrintIsolate(isolate_, "Map space, used: %6" V8PRIdPTR
" KB"
", available: %6" V8PRIdPTR
" KB"
- ", committed: %6" V8PRIdPTR " KB\n",
+ ", committed: %6zu KB\n",
map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
map_space_->CommittedMemory() / KB);
PrintIsolate(isolate_, "Large object space, used: %6" V8PRIdPTR
" KB"
", available: %6" V8PRIdPTR
" KB"
- ", committed: %6" V8PRIdPTR " KB\n",
+ ", committed: %6zu KB\n",
lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
lo_space_->CommittedMemory() / KB);
PrintIsolate(isolate_, "All spaces, used: %6" V8PRIdPTR
" KB"
", available: %6" V8PRIdPTR
" KB"
- ", committed: %6" V8PRIdPTR " KB\n",
+ ", committed: %6zu KB\n",
this->SizeOfObjects() / KB, this->Available() / KB,
this->CommittedMemory() / KB);
PrintIsolate(isolate_, "External memory reported: %6" V8PRIdPTR " KB\n",
@@ -399,13 +376,13 @@ void Heap::ReportStatisticsAfterGC() {
// NewSpace statistics are logged exactly once when --log-gc is turned on.
#if defined(DEBUG)
if (FLAG_heap_stats) {
- new_space_.CollectStatistics();
+ new_space_->CollectStatistics();
ReportHeapStatistics("After GC");
} else if (FLAG_log_gc) {
- new_space_.ReportStatistics();
+ new_space_->ReportStatistics();
}
#else
- if (FLAG_log_gc) new_space_.ReportStatistics();
+ if (FLAG_log_gc) new_space_->ReportStatistics();
#endif // DEBUG
for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
++i) {
@@ -423,6 +400,7 @@ void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
deferred_counters_[feature]++;
}
+bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); }
void Heap::GarbageCollectionPrologue() {
{
@@ -454,7 +432,7 @@ void Heap::GarbageCollectionPrologue() {
ReportStatisticsBeforeGC();
#endif // DEBUG
- if (new_space_.IsAtMaximumCapacity()) {
+ if (new_space_->IsAtMaximumCapacity()) {
maximum_size_scavenges_++;
} else {
maximum_size_scavenges_ = 0;
@@ -534,8 +512,8 @@ void Heap::MergeAllocationSitePretenuringFeedback(
class Heap::PretenuringScope {
public:
explicit PretenuringScope(Heap* heap) : heap_(heap) {
- heap_->global_pretenuring_feedback_ = new base::HashMap(
- base::HashMap::PointersMatch, kInitialFeedbackCapacity);
+ heap_->global_pretenuring_feedback_ =
+ new base::HashMap(kInitialFeedbackCapacity);
}
~PretenuringScope() {
@@ -789,14 +767,16 @@ void Heap::HandleGCRequest() {
} else if (incremental_marking()->request_type() ==
IncrementalMarking::COMPLETE_MARKING) {
incremental_marking()->reset_request_type();
- CollectAllGarbage(current_gc_flags_, "GC interrupt",
+ CollectAllGarbage(current_gc_flags_,
+ GarbageCollectionReason::kFinalizeMarkingViaStackGuard,
current_gc_callback_flags_);
} else if (incremental_marking()->request_type() ==
IncrementalMarking::FINALIZATION &&
incremental_marking()->IsMarking() &&
!incremental_marking()->finalize_marking_completed()) {
incremental_marking()->reset_request_type();
- FinalizeIncrementalMarking("GC interrupt: finalize incremental marking");
+ FinalizeIncrementalMarking(
+ GarbageCollectionReason::kFinalizeMarkingViaStackGuard);
}
}
@@ -805,10 +785,11 @@ void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated);
}
-
-void Heap::FinalizeIncrementalMarking(const char* gc_reason) {
+void Heap::FinalizeIncrementalMarking(GarbageCollectionReason gc_reason) {
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] (%s).\n", gc_reason);
+ isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] (%s).\n",
+ Heap::GarbageCollectionReasonToString(gc_reason));
}
HistogramTimerScope incremental_marking_scope(
@@ -856,7 +837,7 @@ HistogramTimer* Heap::GCTypeTimer(GarbageCollector collector) {
}
}
-void Heap::CollectAllGarbage(int flags, const char* gc_reason,
+void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
const v8::GCCallbackFlags gc_callback_flags) {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
@@ -866,8 +847,7 @@ void Heap::CollectAllGarbage(int flags, const char* gc_reason,
set_current_gc_flags(kNoGCFlags);
}
-
-void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
+void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
@@ -897,36 +877,46 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
}
}
set_current_gc_flags(kNoGCFlags);
- new_space_.Shrink();
+ new_space_->Shrink();
UncommitFromSpace();
}
-
-void Heap::ReportExternalMemoryPressure(const char* gc_reason) {
+void Heap::ReportExternalMemoryPressure() {
+ if (external_memory_ >
+ (external_memory_at_last_mark_compact_ + external_memory_hard_limit())) {
+ CollectAllGarbage(
+ kReduceMemoryFootprintMask | kFinalizeIncrementalMarkingMask,
+ GarbageCollectionReason::kExternalMemoryPressure,
+ static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage |
+ kGCCallbackFlagCollectAllExternalMemory));
+ return;
+ }
if (incremental_marking()->IsStopped()) {
if (incremental_marking()->CanBeActivated()) {
StartIncrementalMarking(
- i::Heap::kNoGCFlags,
+ i::Heap::kNoGCFlags, GarbageCollectionReason::kExternalMemoryPressure,
static_cast<GCCallbackFlags>(
kGCCallbackFlagSynchronousPhantomCallbackProcessing |
- kGCCallbackFlagCollectAllExternalMemory),
- gc_reason);
+ kGCCallbackFlagCollectAllExternalMemory));
} else {
- CollectAllGarbage(i::Heap::kNoGCFlags, gc_reason,
+ CollectAllGarbage(i::Heap::kNoGCFlags,
+ GarbageCollectionReason::kExternalMemoryPressure,
kGCCallbackFlagSynchronousPhantomCallbackProcessing);
}
} else {
// Incremental marking is turned on an has already been started.
-
- // TODO(mlippautz): Compute the time slice for incremental marking based on
- // memory pressure.
- double deadline = MonotonicallyIncreasingTimeInMs() +
- FLAG_external_allocation_limit_incremental_time;
+ const double pressure =
+ static_cast<double>(external_memory_ -
+ external_memory_at_last_mark_compact_ -
+ kExternalAllocationSoftLimit) /
+ external_memory_hard_limit();
+ DCHECK_GE(1, pressure);
+ const double kMaxStepSizeOnExternalLimit = 25;
+ const double deadline = MonotonicallyIncreasingTimeInMs() +
+ pressure * kMaxStepSizeOnExternalLimit;
incremental_marking()->AdvanceIncrementalMarking(
- deadline,
- IncrementalMarking::StepActions(IncrementalMarking::GC_VIA_STACK_GUARD,
- IncrementalMarking::FORCE_MARKING,
- IncrementalMarking::FORCE_COMPLETION));
+ deadline, IncrementalMarking::GC_VIA_STACK_GUARD,
+ IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
}
}
@@ -936,7 +926,7 @@ void Heap::EnsureFillerObjectAtTop() {
// evacuation of a non-full new space (or if we are on the last page) there
// may be uninitialized memory behind top. We fill the remainder of the page
// with a filler.
- Address to_top = new_space_.top();
+ Address to_top = new_space_->top();
Page* page = Page::FromAddress(to_top - kPointerSize);
if (page->Contains(to_top)) {
int remaining_in_page = static_cast<int>(page->area_end() - to_top);
@@ -944,8 +934,8 @@ void Heap::EnsureFillerObjectAtTop() {
}
}
-
-bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
+bool Heap::CollectGarbage(GarbageCollector collector,
+ GarbageCollectionReason gc_reason,
const char* collector_reason,
const v8::GCCallbackFlags gc_callback_flags) {
// The VM is in the GC state until exiting this function.
@@ -964,19 +954,22 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Scavenge during marking.\n");
+ isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Scavenge during marking.\n");
}
}
- if (collector == MARK_COMPACTOR && !ShouldFinalizeIncrementalMarking() &&
- !ShouldAbortIncrementalMarking() && !incremental_marking()->IsStopped() &&
- !incremental_marking()->should_hurry() && FLAG_incremental_marking &&
- OldGenerationAllocationLimitReached()) {
+ if (collector == MARK_COMPACTOR && FLAG_incremental_marking &&
+ !ShouldFinalizeIncrementalMarking() && !ShouldAbortIncrementalMarking() &&
+ !incremental_marking()->IsStopped() &&
+ !incremental_marking()->should_hurry() &&
+ !IsCloseToOutOfMemory(new_space_->Capacity())) {
if (!incremental_marking()->IsComplete() &&
- !mark_compact_collector()->marking_deque_.IsEmpty() &&
+ !mark_compact_collector()->marking_deque()->IsEmpty() &&
!FLAG_gc_global) {
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
+ isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Delaying MarkSweep.\n");
}
collector = SCAVENGER;
collector_reason = "incremental marking delaying mark-sweep";
@@ -1041,9 +1034,11 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
// Start incremental marking for the next cycle. The heap snapshot
// generator needs incremental marking to stay off after it aborted.
- if (!ShouldAbortIncrementalMarking() && incremental_marking()->IsStopped() &&
- incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) {
- StartIncrementalMarking(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue");
+ // We do this only for scavenger to avoid a loop where mark-compact
+ // causes another mark-compact.
+ if (collector == SCAVENGER && !ShouldAbortIncrementalMarking()) {
+ StartIncrementalMarkingIfAllocationLimitIsReached(kNoGCFlags,
+ kNoGCCallbackFlags);
}
return next_gc_likely_to_collect_more;
@@ -1069,21 +1064,33 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
return ++contexts_disposed_;
}
-
void Heap::StartIncrementalMarking(int gc_flags,
- const GCCallbackFlags gc_callback_flags,
- const char* reason) {
+ GarbageCollectionReason gc_reason,
+ GCCallbackFlags gc_callback_flags) {
DCHECK(incremental_marking()->IsStopped());
set_current_gc_flags(gc_flags);
current_gc_callback_flags_ = gc_callback_flags;
- incremental_marking()->Start(reason);
+ incremental_marking()->Start(gc_reason);
}
+void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
+ int gc_flags, const GCCallbackFlags gc_callback_flags) {
+ if (incremental_marking()->IsStopped()) {
+ IncrementalMarkingLimit reached_limit = IncrementalMarkingLimitReached();
+ if (reached_limit == IncrementalMarkingLimit::kSoftLimit) {
+ incremental_marking()->incremental_marking_job()->ScheduleTask(this);
+ } else if (reached_limit == IncrementalMarkingLimit::kHardLimit) {
+ StartIncrementalMarking(gc_flags,
+ GarbageCollectionReason::kAllocationLimit,
+ gc_callback_flags);
+ }
+ }
+}
-void Heap::StartIdleIncrementalMarking() {
+void Heap::StartIdleIncrementalMarking(GarbageCollectionReason gc_reason) {
gc_idle_time_handler_->ResetNoProgressCounter();
- StartIncrementalMarking(kReduceMemoryFootprintMask, kNoGCCallbackFlags,
- "idle");
+ StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason,
+ kNoGCCallbackFlags);
}
@@ -1192,17 +1199,15 @@ bool Heap::ReserveSpace(Reservation* reservations, List<Address>* maps) {
}
if (perform_gc) {
if (space == NEW_SPACE) {
- CollectGarbage(NEW_SPACE, "failed to reserve space in the new space");
+ CollectGarbage(NEW_SPACE, GarbageCollectionReason::kDeserializer);
} else {
if (counter > 1) {
CollectAllGarbage(
kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
- "failed to reserve space in paged or large "
- "object space, trying to reduce memory footprint");
+ GarbageCollectionReason::kDeserializer);
} else {
- CollectAllGarbage(
- kAbortIncrementalMarkingMask,
- "failed to reserve space in paged or large object space");
+ CollectAllGarbage(kAbortIncrementalMarkingMask,
+ GarbageCollectionReason::kDeserializer);
}
}
gc_performed = true;
@@ -1216,7 +1221,7 @@ bool Heap::ReserveSpace(Reservation* reservations, List<Address>* maps) {
void Heap::EnsureFromSpaceIsCommitted() {
- if (new_space_.CommitFromSpaceIfNeeded()) return;
+ if (new_space_->CommitFromSpaceIfNeeded()) return;
// Committing memory to from space failed.
// Memory is exhausted and we will die.
@@ -1264,11 +1269,6 @@ void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
tracer()->AddSurvivalRatio(survival_rate);
- if (survival_rate > kYoungSurvivalRateHighThreshold) {
- high_survival_rate_period_length_++;
- } else {
- high_survival_rate_period_length_ = 0;
- }
}
bool Heap::PerformGarbageCollection(
@@ -1303,14 +1303,7 @@ bool Heap::PerformGarbageCollection(
EnsureFromSpaceIsCommitted();
- int start_new_space_size = Heap::new_space()->SizeAsInt();
-
- if (IsHighSurvivalRate()) {
- // We speed up the incremental marker if it is running so that it
- // does not fall behind the rate of promotion, which would cause a
- // constantly growing old space.
- incremental_marking()->NotifyOfHighPromotionRate();
- }
+ int start_new_space_size = static_cast<int>(Heap::new_space()->Size());
{
Heap::PretenuringScope pretenuring_scope(this);
@@ -1319,11 +1312,10 @@ bool Heap::PerformGarbageCollection(
UpdateOldGenerationAllocationCounter();
// Perform mark-sweep with optional compaction.
MarkCompact();
- old_gen_exhausted_ = false;
old_generation_size_configured_ = true;
// This should be updated before PostGarbageCollectionProcessing, which
// can cause another GC. Take into account the objects promoted during GC.
- old_generation_allocation_counter_ +=
+ old_generation_allocation_counter_at_last_gc_ +=
static_cast<size_t>(promoted_objects_size_);
old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
} else {
@@ -1360,7 +1352,7 @@ bool Heap::PerformGarbageCollection(
if (collector == MARK_COMPACTOR) {
// Register the amount of external allocated memory.
external_memory_at_last_mark_compact_ = external_memory_;
- external_memory_limit_ = external_memory_ + kExternalAllocationLimit;
+ external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit;
SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
} else if (HasLowYoungGenerationAllocationRate() &&
old_generation_size_configured_) {
@@ -1491,18 +1483,18 @@ void Heap::MarkCompactPrologue() {
void Heap::CheckNewSpaceExpansionCriteria() {
if (FLAG_experimental_new_space_growth_heuristic) {
- if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
- survived_last_scavenge_ * 100 / new_space_.TotalCapacity() >= 10) {
+ if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
+ survived_last_scavenge_ * 100 / new_space_->TotalCapacity() >= 10) {
// Grow the size of new space if there is room to grow, and more than 10%
// have survived the last scavenge.
- new_space_.Grow();
+ new_space_->Grow();
survived_since_last_expansion_ = 0;
}
- } else if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
- survived_since_last_expansion_ > new_space_.TotalCapacity()) {
+ } else if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
+ survived_since_last_expansion_ > new_space_->TotalCapacity()) {
// Grow the size of new space if there is room to grow, and enough data
// has survived scavenge since the last expansion.
- new_space_.Grow();
+ new_space_->Grow();
survived_since_last_expansion_ = 0;
}
}
@@ -1541,6 +1533,11 @@ void PromotionQueue::Initialize() {
emergency_stack_ = NULL;
}
+void PromotionQueue::Destroy() {
+ DCHECK(is_empty());
+ delete emergency_stack_;
+ emergency_stack_ = NULL;
+}
void PromotionQueue::RelocateQueueHead() {
DCHECK(emergency_stack_ == NULL);
@@ -1615,13 +1612,13 @@ void Heap::Scavenge() {
// Register found wrappers with embedder so it can add them to its marking
// deque and correctly manage the case when v8 scavenger collects the
// wrappers by either keeping wrappables alive, or cleaning marking deque.
- mark_compact_collector()->RegisterWrappersWithEmbedderHeapTracer();
+ RegisterWrappersWithEmbedderHeapTracer();
}
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
- new_space_.Flip();
- new_space_.ResetAllocationInfo();
+ new_space_->Flip();
+ new_space_->ResetAllocationInfo();
// We need to sweep newly copied objects which can be either in the
// to space or promoted to the old generation. For to-space
@@ -1640,7 +1637,7 @@ void Heap::Scavenge() {
// for the addresses of promoted objects: every object promoted
// frees up its size in bytes from the top of the new space, and
// objects are at least one pointer in size.
- Address new_space_front = new_space_.ToSpaceStart();
+ Address new_space_front = new_space_->ToSpaceStart();
promotion_queue_.Initialize();
PromotionMode promotion_mode = CurrentPromotionMode();
@@ -1737,16 +1734,17 @@ void Heap::Scavenge() {
ScavengeWeakObjectRetainer weak_object_retainer(this);
ProcessYoungWeakReferences(&weak_object_retainer);
- DCHECK(new_space_front == new_space_.top());
+ DCHECK(new_space_front == new_space_->top());
// Set age mark.
- new_space_.set_age_mark(new_space_.top());
+ new_space_->set_age_mark(new_space_->top());
ArrayBufferTracker::FreeDeadInNewSpace(this);
// Update how much has survived scavenge.
- IncrementYoungSurvivorsCounter(static_cast<int>(
- (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
+ IncrementYoungSurvivorsCounter(
+ static_cast<int>((PromotedSpaceSizeOfObjects() - survived_watermark) +
+ new_space_->Size()));
LOG(isolate_, ResourceEvent("scavenge", "end"));
@@ -1910,11 +1908,11 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
Address new_space_front,
PromotionMode promotion_mode) {
do {
- SemiSpace::AssertValidRange(new_space_front, new_space_.top());
+ SemiSpace::AssertValidRange(new_space_front, new_space_->top());
// The addresses new_space_front and new_space_.top() define a
// queue of unprocessed copied objects. Process them until the
// queue is empty.
- while (new_space_front != new_space_.top()) {
+ while (new_space_front != new_space_->top()) {
if (!Page::IsAlignedToPageSize(new_space_front)) {
HeapObject* object = HeapObject::FromAddress(new_space_front);
if (promotion_mode == PROMOTE_MARKED) {
@@ -1953,7 +1951,7 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// Take another spin if there are now unswept objects in new space
// (there are currently no more unswept promoted objects).
- } while (new_space_front != new_space_.top());
+ } while (new_space_front != new_space_->top());
return new_space_front;
}
@@ -2283,6 +2281,8 @@ bool Heap::CreateInitialMaps() {
DCHECK_NE(fixed_array_map(), fixed_cow_array_map());
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info_entry)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info)
ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
Context::NUMBER_FUNCTION_INDEX)
ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
@@ -2391,6 +2391,12 @@ bool Heap::CreateInitialMaps() {
}
{
+ AllocationResult allocation = AllocateEmptyScopeInfo();
+ if (!allocation.To(&obj)) return false;
+ }
+
+ set_empty_scope_info(ScopeInfo::cast(obj));
+ {
AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
if (!allocation.To(&obj)) return false;
}
@@ -2432,7 +2438,7 @@ AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
// Statically ensure that it is safe to allocate heap numbers in paged
// spaces.
int size = HeapNumber::kSize;
- STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
+ STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize);
AllocationSpace space = SelectSpace(pretenure);
@@ -2452,7 +2458,7 @@ AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
AllocationResult Heap::Allocate##Type(lane_type lanes[lane_count], \
PretenureFlag pretenure) { \
int size = Type::kSize; \
- STATIC_ASSERT(Type::kSize <= Page::kMaxRegularHeapObjectSize); \
+ STATIC_ASSERT(Type::kSize <= kMaxRegularHeapObjectSize); \
\
AllocationSpace space = SelectSpace(pretenure); \
\
@@ -2476,7 +2482,7 @@ SIMD128_TYPES(SIMD_ALLOCATE_DEFINITION)
AllocationResult Heap::AllocateCell(Object* value) {
int size = Cell::kSize;
- STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
+ STATIC_ASSERT(Cell::kSize <= kMaxRegularHeapObjectSize);
HeapObject* result = nullptr;
{
@@ -2488,10 +2494,9 @@ AllocationResult Heap::AllocateCell(Object* value) {
return result;
}
-
AllocationResult Heap::AllocatePropertyCell() {
int size = PropertyCell::kSize;
- STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
+ STATIC_ASSERT(PropertyCell::kSize <= kMaxRegularHeapObjectSize);
HeapObject* result = nullptr;
AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
@@ -2509,7 +2514,7 @@ AllocationResult Heap::AllocatePropertyCell() {
AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
int size = WeakCell::kSize;
- STATIC_ASSERT(WeakCell::kSize <= Page::kMaxRegularHeapObjectSize);
+ STATIC_ASSERT(WeakCell::kSize <= kMaxRegularHeapObjectSize);
HeapObject* result = nullptr;
{
AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
@@ -2729,12 +2734,6 @@ void Heap::CreateInitialObjects() {
#undef SYMBOL_INIT
}
- // Allocate the dictionary of intrinsic function names.
- Handle<NameDictionary> intrinsic_names =
- NameDictionary::New(isolate(), Runtime::kNumFunctions, TENURED);
- Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
- set_intrinsic_function_names(*intrinsic_names);
-
Handle<NameDictionary> empty_properties_dictionary =
NameDictionary::New(isolate(), 0, TENURED);
empty_properties_dictionary->SetRequiresCopyOnCapacityChange();
@@ -2777,18 +2776,18 @@ void Heap::CreateInitialObjects() {
{
StaticFeedbackVectorSpec spec;
- FeedbackVectorSlot load_ic_slot = spec.AddLoadICSlot();
- FeedbackVectorSlot keyed_load_ic_slot = spec.AddKeyedLoadICSlot();
- FeedbackVectorSlot store_ic_slot = spec.AddStoreICSlot();
- FeedbackVectorSlot keyed_store_ic_slot = spec.AddKeyedStoreICSlot();
-
- DCHECK_EQ(load_ic_slot,
- FeedbackVectorSlot(TypeFeedbackVector::kDummyLoadICSlot));
- DCHECK_EQ(keyed_load_ic_slot,
+ FeedbackVectorSlot slot = spec.AddLoadICSlot();
+ DCHECK_EQ(slot, FeedbackVectorSlot(TypeFeedbackVector::kDummyLoadICSlot));
+
+ slot = spec.AddKeyedLoadICSlot();
+ DCHECK_EQ(slot,
FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
- DCHECK_EQ(store_ic_slot,
- FeedbackVectorSlot(TypeFeedbackVector::kDummyStoreICSlot));
- DCHECK_EQ(keyed_store_ic_slot,
+
+ slot = spec.AddStoreICSlot();
+ DCHECK_EQ(slot, FeedbackVectorSlot(TypeFeedbackVector::kDummyStoreICSlot));
+
+ slot = spec.AddKeyedStoreICSlot();
+ DCHECK_EQ(slot,
FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
Handle<TypeFeedbackMetadata> dummy_metadata =
@@ -2796,19 +2795,36 @@ void Heap::CreateInitialObjects() {
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::New(isolate(), dummy_metadata);
- Object* megamorphic = *TypeFeedbackVector::MegamorphicSentinel(isolate());
- dummy_vector->Set(load_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
- dummy_vector->Set(keyed_load_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
- dummy_vector->Set(store_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
- dummy_vector->Set(keyed_store_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
-
set_dummy_vector(*dummy_vector);
+
+ // Now initialize dummy vector's entries.
+ LoadICNexus(isolate()).ConfigureMegamorphic();
+ StoreICNexus(isolate()).ConfigureMegamorphic();
+ KeyedLoadICNexus(isolate()).ConfigureMegamorphicKeyed(PROPERTY);
+ KeyedStoreICNexus(isolate()).ConfigureMegamorphicKeyed(PROPERTY);
}
{
+ // Create a canonical empty TypeFeedbackVector, which is shared by all
+ // functions that don't need actual type feedback slots. Note however
+ // that all these functions will share the same invocation count, but
+ // that shouldn't matter since we only use the invocation count to
+ // relativize the absolute call counts, but we can only have call counts
+ // if we have actual feedback slots.
+ Handle<FixedArray> empty_type_feedback_vector = factory->NewFixedArray(
+ TypeFeedbackVector::kReservedIndexCount, TENURED);
+ empty_type_feedback_vector->set(TypeFeedbackVector::kMetadataIndex,
+ empty_fixed_array());
+ empty_type_feedback_vector->set(TypeFeedbackVector::kInvocationCountIndex,
+ Smi::FromInt(0));
+ set_empty_type_feedback_vector(*empty_type_feedback_vector);
+
+ // We use a canonical empty LiteralsArray for all functions that neither
+ // have literals nor need a TypeFeedbackVector (besides the invocation
+ // count special slot).
Handle<FixedArray> empty_literals_array =
factory->NewFixedArray(1, TENURED);
- empty_literals_array->set(0, *factory->empty_fixed_array());
+ empty_literals_array->set(0, *empty_type_feedback_vector);
set_empty_literals_array(*empty_literals_array);
}
@@ -2882,6 +2898,10 @@ void Heap::CreateInitialObjects() {
handle(Smi::FromInt(Isolate::kArrayProtectorValid), isolate()));
set_species_protector(*species_cell);
+ cell = factory->NewPropertyCell();
+ cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
+ set_string_length_protector(*cell);
+
set_serialized_templates(empty_fixed_array());
set_weak_stack_trace_list(Smi::FromInt(0));
@@ -3009,7 +3029,7 @@ FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
AllocationResult Heap::AllocateForeign(Address address,
PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate foreigns in paged spaces.
- STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
+ STATIC_ASSERT(Foreign::kSize <= kMaxRegularHeapObjectSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
Foreign* result = nullptr;
AllocationResult allocation = Allocate(foreign_map(), space);
@@ -3776,6 +3796,18 @@ AllocationResult Heap::AllocateEmptyFixedArray() {
return result;
}
+AllocationResult Heap::AllocateEmptyScopeInfo() {
+ int size = FixedArray::SizeFor(0);
+ HeapObject* result = nullptr;
+ {
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
+ if (!allocation.To(&result)) return allocation;
+ }
+ // Initialize the object.
+ result->set_map_no_write_barrier(scope_info_map());
+ FixedArray::cast(result)->set_length(0);
+ return result;
+}
AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
if (!InNewSpace(src)) {
@@ -3908,7 +3940,14 @@ AllocationResult Heap::AllocateRawFixedArray(int length,
int size = FixedArray::SizeFor(length);
AllocationSpace space = SelectSpace(pretenure);
- return AllocateRaw(size, space);
+ AllocationResult result = AllocateRaw(size, space);
+ if (!result.IsRetry() && size > kMaxRegularHeapObjectSize &&
+ FLAG_use_marking_progress_bar) {
+ MemoryChunk* chunk =
+ MemoryChunk::FromAddress(result.ToObjectChecked()->address());
+ chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
+ }
+ return result;
}
@@ -3988,7 +4027,7 @@ AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
AllocationResult Heap::AllocateSymbol() {
// Statically ensure that it is safe to allocate symbols in paged spaces.
- STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
+ STATIC_ASSERT(Symbol::kSize <= kMaxRegularHeapObjectSize);
HeapObject* result = nullptr;
AllocationResult allocation = AllocateRaw(Symbol::kSize, OLD_SPACE);
@@ -4049,7 +4088,8 @@ bool Heap::IsHeapIterable() {
void Heap::MakeHeapIterable() {
DCHECK(AllowHeapAllocation::IsAllowed());
if (!IsHeapIterable()) {
- CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
+ CollectAllGarbage(kMakeHeapIterableMask,
+ GarbageCollectionReason::kMakeHeapIterable);
}
if (mark_compact_collector()->sweeping_in_progress()) {
mark_compact_collector()->EnsureSweepingCompleted();
@@ -4081,10 +4121,10 @@ double Heap::YoungGenerationMutatorUtilization() {
tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects);
double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
if (FLAG_trace_mutator_utilization) {
- PrintIsolate(isolate(),
- "Young generation mutator utilization = %.3f ("
- "mutator_speed=%.f, gc_speed=%.f)\n",
- result, mutator_speed, gc_speed);
+ isolate()->PrintWithTimestamp(
+ "Young generation mutator utilization = %.3f ("
+ "mutator_speed=%.f, gc_speed=%.f)\n",
+ result, mutator_speed, gc_speed);
}
return result;
}
@@ -4097,10 +4137,10 @@ double Heap::OldGenerationMutatorUtilization() {
tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
if (FLAG_trace_mutator_utilization) {
- PrintIsolate(isolate(),
- "Old generation mutator utilization = %.3f ("
- "mutator_speed=%.f, gc_speed=%.f)\n",
- result, mutator_speed, gc_speed);
+ isolate()->PrintWithTimestamp(
+ "Old generation mutator utilization = %.3f ("
+ "mutator_speed=%.f, gc_speed=%.f)\n",
+ result, mutator_speed, gc_speed);
}
return result;
}
@@ -4170,44 +4210,49 @@ void Heap::ReduceNewSpaceSize() {
if (ShouldReduceMemory() ||
((allocation_throughput != 0) &&
(allocation_throughput < kLowAllocationThroughput))) {
- new_space_.Shrink();
+ new_space_->Shrink();
UncommitFromSpace();
}
}
+bool Heap::MarkingDequesAreEmpty() {
+ return mark_compact_collector()->marking_deque()->IsEmpty() &&
+ (!UsingEmbedderHeapTracer() ||
+ (wrappers_to_trace() == 0 &&
+ embedder_heap_tracer()->NumberOfWrappersToTrace() == 0));
+}
-void Heap::FinalizeIncrementalMarkingIfComplete(const char* comment) {
+void Heap::FinalizeIncrementalMarkingIfComplete(
+ GarbageCollectionReason gc_reason) {
if (incremental_marking()->IsMarking() &&
(incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
(!incremental_marking()->finalize_marking_completed() &&
- mark_compact_collector()->marking_deque()->IsEmpty()))) {
- FinalizeIncrementalMarking(comment);
+ MarkingDequesAreEmpty()))) {
+ FinalizeIncrementalMarking(gc_reason);
} else if (incremental_marking()->IsComplete() ||
(mark_compact_collector()->marking_deque()->IsEmpty())) {
- CollectAllGarbage(current_gc_flags_, comment);
+ CollectAllGarbage(current_gc_flags_, gc_reason);
}
}
-
-bool Heap::TryFinalizeIdleIncrementalMarking(double idle_time_in_ms) {
+bool Heap::TryFinalizeIdleIncrementalMarking(
+ double idle_time_in_ms, GarbageCollectionReason gc_reason) {
size_t size_of_objects = static_cast<size_t>(SizeOfObjects());
double final_incremental_mark_compact_speed_in_bytes_per_ms =
tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond();
if (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
(!incremental_marking()->finalize_marking_completed() &&
- mark_compact_collector()->marking_deque()->IsEmpty() &&
+ MarkingDequesAreEmpty() &&
gc_idle_time_handler_->ShouldDoOverApproximateWeakClosure(
idle_time_in_ms))) {
- FinalizeIncrementalMarking(
- "Idle notification: finalize incremental marking");
+ FinalizeIncrementalMarking(gc_reason);
return true;
} else if (incremental_marking()->IsComplete() ||
- (mark_compact_collector()->marking_deque()->IsEmpty() &&
+ (MarkingDequesAreEmpty() &&
gc_idle_time_handler_->ShouldDoFinalIncrementalMarkCompact(
idle_time_in_ms, size_of_objects,
final_incremental_mark_compact_speed_in_bytes_per_ms))) {
- CollectAllGarbage(current_gc_flags_,
- "idle notification: finalize incremental marking");
+ CollectAllGarbage(current_gc_flags_, gc_reason);
return true;
}
return false;
@@ -4267,22 +4312,23 @@ bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
result = true;
break;
case DO_INCREMENTAL_STEP: {
- if (incremental_marking()->incremental_marking_job()->IdleTaskPending()) {
- result = true;
- } else {
- incremental_marking()
- ->incremental_marking_job()
- ->NotifyIdleTaskProgress();
- result = IncrementalMarkingJob::IdleTask::Step(this, deadline_in_ms) ==
- IncrementalMarkingJob::IdleTask::kDone;
+ const double remaining_idle_time_in_ms =
+ incremental_marking()->AdvanceIncrementalMarking(
+ deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ IncrementalMarking::FORCE_COMPLETION, StepOrigin::kTask);
+ if (remaining_idle_time_in_ms > 0.0) {
+ TryFinalizeIdleIncrementalMarking(
+ remaining_idle_time_in_ms,
+ GarbageCollectionReason::kFinalizeMarkingViaTask);
}
+ result = incremental_marking()->IsStopped();
break;
}
case DO_FULL_GC: {
DCHECK(contexts_disposed_ > 0);
HistogramTimerScope scope(isolate_->counters()->gc_context());
TRACE_EVENT0("v8", "V8.GCContext");
- CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed");
+ CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kContextDisposal);
break;
}
case DO_NOTHING:
@@ -4328,8 +4374,7 @@ void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
if ((FLAG_trace_idle_notification && action.type > DO_NOTHING) ||
FLAG_trace_idle_notification_verbose) {
- PrintIsolate(isolate_, "%8.0f ms: ", isolate()->time_millis_since_init());
- PrintF(
+ isolate_->PrintWithTimestamp(
"Idle notification: requested idle time %.2f ms, used idle time %.2f "
"ms, deadline usage %.2f ms [",
idle_time_in_ms, idle_time_in_ms - deadline_difference,
@@ -4416,10 +4461,11 @@ void Heap::CheckMemoryPressure() {
}
}
if (memory_pressure_level_.Value() == MemoryPressureLevel::kCritical) {
- CollectGarbageOnMemoryPressure("memory pressure");
+ CollectGarbageOnMemoryPressure();
} else if (memory_pressure_level_.Value() == MemoryPressureLevel::kModerate) {
if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
- StartIdleIncrementalMarking();
+ StartIncrementalMarking(kReduceMemoryFootprintMask,
+ GarbageCollectionReason::kMemoryPressure);
}
}
MemoryReducer::Event event;
@@ -4428,7 +4474,7 @@ void Heap::CheckMemoryPressure() {
memory_reducer_->NotifyPossibleGarbage(event);
}
-void Heap::CollectGarbageOnMemoryPressure(const char* source) {
+void Heap::CollectGarbageOnMemoryPressure() {
const int kGarbageThresholdInBytes = 8 * MB;
const double kGarbageThresholdAsFractionOfTotalMemory = 0.1;
// This constant is the maximum response time in RAIL performance model.
@@ -4436,7 +4482,8 @@ void Heap::CollectGarbageOnMemoryPressure(const char* source) {
double start = MonotonicallyIncreasingTimeInMs();
CollectAllGarbage(kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
- source, kGCCallbackFlagCollectAllAvailableGarbage);
+ GarbageCollectionReason::kMemoryPressure,
+ kGCCallbackFlagCollectAllAvailableGarbage);
double end = MonotonicallyIncreasingTimeInMs();
// Estimate how much memory we can free.
@@ -4451,11 +4498,13 @@ void Heap::CollectGarbageOnMemoryPressure(const char* source) {
// Otherwise, start incremental marking.
if (end - start < kMaxMemoryPressurePauseMs / 2) {
CollectAllGarbage(
- kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask, source,
+ kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
+ GarbageCollectionReason::kMemoryPressure,
kGCCallbackFlagCollectAllAvailableGarbage);
} else {
if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
- StartIdleIncrementalMarking();
+ StartIncrementalMarking(kReduceMemoryFootprintMask,
+ GarbageCollectionReason::kMemoryPressure);
}
}
}
@@ -4527,7 +4576,7 @@ void Heap::ReportHeapStatistics(const char* title) {
PrintF("Heap statistics : ");
memory_allocator()->ReportStatistics();
PrintF("To space : ");
- new_space_.ReportStatistics();
+ new_space_->ReportStatistics();
PrintF("Old space : ");
old_space_->ReportStatistics();
PrintF("Code space : ");
@@ -4541,12 +4590,64 @@ void Heap::ReportHeapStatistics(const char* title) {
#endif // DEBUG
+const char* Heap::GarbageCollectionReasonToString(
+ GarbageCollectionReason gc_reason) {
+ switch (gc_reason) {
+ case GarbageCollectionReason::kAllocationFailure:
+ return "allocation failure";
+ case GarbageCollectionReason::kAllocationLimit:
+ return "allocation limit";
+ case GarbageCollectionReason::kContextDisposal:
+ return "context disposal";
+ case GarbageCollectionReason::kCountersExtension:
+ return "counters extension";
+ case GarbageCollectionReason::kDebugger:
+ return "debugger";
+ case GarbageCollectionReason::kDeserializer:
+ return "deserialize";
+ case GarbageCollectionReason::kExternalMemoryPressure:
+ return "external memory pressure";
+ case GarbageCollectionReason::kFinalizeMarkingViaStackGuard:
+ return "finalize incremental marking via stack guard";
+ case GarbageCollectionReason::kFinalizeMarkingViaTask:
+ return "finalize incremental marking via task";
+ case GarbageCollectionReason::kFullHashtable:
+ return "full hash-table";
+ case GarbageCollectionReason::kHeapProfiler:
+ return "heap profiler";
+ case GarbageCollectionReason::kIdleTask:
+ return "idle task";
+ case GarbageCollectionReason::kLastResort:
+ return "last resort";
+ case GarbageCollectionReason::kLowMemoryNotification:
+ return "low memory notification";
+ case GarbageCollectionReason::kMakeHeapIterable:
+ return "make heap iterable";
+ case GarbageCollectionReason::kMemoryPressure:
+ return "memory pressure";
+ case GarbageCollectionReason::kMemoryReducer:
+ return "memory reducer";
+ case GarbageCollectionReason::kRuntime:
+ return "runtime";
+ case GarbageCollectionReason::kSamplingProfiler:
+ return "sampling profiler";
+ case GarbageCollectionReason::kSnapshotCreator:
+ return "snapshot creator";
+ case GarbageCollectionReason::kTesting:
+ return "testing";
+ case GarbageCollectionReason::kUnknown:
+ return "unknown";
+ }
+ UNREACHABLE();
+ return "";
+}
+
bool Heap::Contains(HeapObject* value) {
if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
return false;
}
return HasBeenSetUp() &&
- (new_space_.ToSpaceContains(value) || old_space_->Contains(value) ||
+ (new_space_->ToSpaceContains(value) || old_space_->Contains(value) ||
code_space_->Contains(value) || map_space_->Contains(value) ||
lo_space_->Contains(value));
}
@@ -4556,7 +4657,7 @@ bool Heap::ContainsSlow(Address addr) {
return false;
}
return HasBeenSetUp() &&
- (new_space_.ToSpaceContainsSlow(addr) ||
+ (new_space_->ToSpaceContainsSlow(addr) ||
old_space_->ContainsSlow(addr) || code_space_->ContainsSlow(addr) ||
map_space_->ContainsSlow(addr) || lo_space_->ContainsSlow(addr));
}
@@ -4569,7 +4670,7 @@ bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
switch (space) {
case NEW_SPACE:
- return new_space_.ToSpaceContains(value);
+ return new_space_->ToSpaceContains(value);
case OLD_SPACE:
return old_space_->Contains(value);
case CODE_SPACE:
@@ -4591,7 +4692,7 @@ bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
switch (space) {
case NEW_SPACE:
- return new_space_.ToSpaceContainsSlow(addr);
+ return new_space_->ToSpaceContainsSlow(addr);
case OLD_SPACE:
return old_space_->ContainsSlow(addr);
case CODE_SPACE:
@@ -4654,7 +4755,7 @@ void Heap::Verify() {
VerifySmisVisitor smis_visitor;
IterateSmiRoots(&smis_visitor);
- new_space_.Verify();
+ new_space_->Verify();
old_space_->Verify(&visitor);
map_space_->Verify(&visitor);
@@ -4673,9 +4774,9 @@ void Heap::Verify() {
void Heap::ZapFromSpace() {
- if (!new_space_.IsFromSpaceCommitted()) return;
- for (Page* page : NewSpacePageRange(new_space_.FromSpaceStart(),
- new_space_.FromSpaceEnd())) {
+ if (!new_space_->IsFromSpaceCommitted()) return;
+ for (Page* page : NewSpacePageRange(new_space_->FromSpaceStart(),
+ new_space_->FromSpaceEnd())) {
for (Address cursor = page->area_start(), limit = page->area_end();
cursor < limit; cursor += kPointerSize) {
Memory::Address_at(cursor) = kFromSpaceZapValue;
@@ -4967,7 +5068,7 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
if (FLAG_stress_compaction) {
// This will cause more frequent GCs when stressing.
- max_semi_space_size_ = Page::kPageSize;
+ max_semi_space_size_ = MB;
}
// The new space size must be a power of two to support single-bit testing
@@ -5018,7 +5119,7 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
old_generation_allocation_limit_ = initial_old_generation_size_;
// We rely on being able to allocate new arrays in paged spaces.
- DCHECK(Page::kMaxRegularHeapObjectSize >=
+ DCHECK(kMaxRegularHeapObjectSize >=
(JSArray::kSize +
FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
AllocationMemento::kSize));
@@ -5060,8 +5161,8 @@ bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->start_marker = HeapStats::kStartMarker;
*stats->end_marker = HeapStats::kEndMarker;
- *stats->new_space_size = new_space_.SizeAsInt();
- *stats->new_space_capacity = new_space_.Capacity();
+ *stats->new_space_size = new_space_->Size();
+ *stats->new_space_capacity = new_space_->Capacity();
*stats->old_space_size = old_space_->SizeOfObjects();
*stats->old_space_capacity = old_space_->Capacity();
*stats->code_space_size = code_space_->SizeOfObjects();
@@ -5183,11 +5284,19 @@ intptr_t Heap::CalculateOldGenerationAllocationLimit(double factor,
CHECK(old_gen_size > 0);
intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
limit = Max(limit, old_gen_size + MinimumAllocationLimitGrowingStep());
- limit += new_space_.Capacity();
+ limit += new_space_->Capacity();
intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
return Min(limit, halfway_to_the_max);
}
+intptr_t Heap::MinimumAllocationLimitGrowingStep() {
+ const double kRegularAllocationLimitGrowingStep = 8;
+ const double kLowMemoryAllocationLimitGrowingStep = 2;
+ intptr_t limit = (Page::kPageSize > MB ? Page::kPageSize : MB);
+ return limit * (ShouldOptimizeForMemoryUsage()
+ ? kLowMemoryAllocationLimitGrowingStep
+ : kRegularAllocationLimitGrowingStep);
+}
void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
double gc_speed,
@@ -5195,11 +5304,11 @@ void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
double factor = HeapGrowingFactor(gc_speed, mutator_speed);
if (FLAG_trace_gc_verbose) {
- PrintIsolate(isolate_,
- "Heap growing factor %.1f based on mu=%.3f, speed_ratio=%.f "
- "(gc=%.f, mutator=%.f)\n",
- factor, kTargetMutatorUtilization, gc_speed / mutator_speed,
- gc_speed, mutator_speed);
+ isolate_->PrintWithTimestamp(
+ "Heap growing factor %.1f based on mu=%.3f, speed_ratio=%.f "
+ "(gc=%.f, mutator=%.f)\n",
+ factor, kTargetMutatorUtilization, gc_speed / mutator_speed, gc_speed,
+ mutator_speed);
}
if (IsMemoryConstrainedDevice()) {
@@ -5223,14 +5332,13 @@ void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
CalculateOldGenerationAllocationLimit(factor, old_gen_size);
if (FLAG_trace_gc_verbose) {
- PrintIsolate(isolate_, "Grow: old size: %" V8PRIdPTR
- " KB, new limit: %" V8PRIdPTR " KB (%.1f)\n",
- old_gen_size / KB, old_generation_allocation_limit_ / KB,
- factor);
+ isolate_->PrintWithTimestamp("Grow: old size: %" V8PRIdPTR
+ " KB, new limit: %" V8PRIdPTR " KB (%.1f)\n",
+ old_gen_size / KB,
+ old_generation_allocation_limit_ / KB, factor);
}
}
-
void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
double gc_speed,
double mutator_speed) {
@@ -5238,17 +5346,64 @@ void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
intptr_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
if (limit < old_generation_allocation_limit_) {
if (FLAG_trace_gc_verbose) {
- PrintIsolate(isolate_,
- "Dampen: old size: %" V8PRIdPTR " KB, old limit: %" V8PRIdPTR
- " KB, "
- "new limit: %" V8PRIdPTR " KB (%.1f)\n",
- old_gen_size / KB, old_generation_allocation_limit_ / KB,
- limit / KB, factor);
+ isolate_->PrintWithTimestamp(
+ "Dampen: old size: %" V8PRIdPTR " KB, old limit: %" V8PRIdPTR
+ " KB, "
+ "new limit: %" V8PRIdPTR " KB (%.1f)\n",
+ old_gen_size / KB, old_generation_allocation_limit_ / KB, limit / KB,
+ factor);
}
old_generation_allocation_limit_ = limit;
}
}
+// This predicate is called when an old generation space cannot allocated from
+// the free list and is about to add a new page. Returning false will cause a
+// major GC. It happens when the old generation allocation limit is reached and
+// - either we need to optimize for memory usage,
+// - or the incremental marking is not in progress and we cannot start it.
+bool Heap::ShouldExpandOldGenerationOnAllocationFailure() {
+ if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
+ // We reached the old generation allocation limit.
+
+ if (ShouldOptimizeForMemoryUsage()) return false;
+
+ if (incremental_marking()->IsStopped() &&
+ IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
+ // We cannot start incremental marking.
+ return false;
+ }
+ return true;
+}
+
+// This function returns either kNoLimit, kSoftLimit, or kHardLimit.
+// The kNoLimit means that either incremental marking is disabled or it is too
+// early to start incremental marking.
+// The kSoftLimit means that incremental marking should be started soon.
+// The kHardLimit means that incremental marking should be started immediately.
+Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
+ if (!incremental_marking()->CanBeActivated() ||
+ PromotedSpaceSizeOfObjects() < IncrementalMarking::kActivationThreshold) {
+ // Incremental marking is disabled or it is too early to start.
+ return IncrementalMarkingLimit::kNoLimit;
+ }
+ if ((FLAG_stress_compaction && (gc_count_ & 1) != 0) ||
+ HighMemoryPressure()) {
+ // If there is high memory pressure or stress testing is enabled, then
+ // start marking immediately.
+ return IncrementalMarkingLimit::kHardLimit;
+ }
+ intptr_t old_generation_space_available = OldGenerationSpaceAvailable();
+ if (old_generation_space_available > new_space_->Capacity()) {
+ return IncrementalMarkingLimit::kNoLimit;
+ }
+ // We are close to the allocation limit.
+ // Choose between the hard and the soft limits.
+ if (old_generation_space_available <= 0 || ShouldOptimizeForMemoryUsage()) {
+ return IncrementalMarkingLimit::kHardLimit;
+ }
+ return IncrementalMarkingLimit::kSoftLimit;
+}
void Heap::EnableInlineAllocation() {
if (!inline_allocation_disabled_) return;
@@ -5316,33 +5471,30 @@ bool Heap::SetUp() {
// Initialize incremental marking.
incremental_marking_ = new IncrementalMarking(this);
- // Set up new space.
- if (!new_space_.SetUp(initial_semispace_size_, max_semi_space_size_)) {
+ for (int i = 0; i <= LAST_SPACE; i++) {
+ space_[i] = nullptr;
+ }
+
+ space_[NEW_SPACE] = new_space_ = new NewSpace(this);
+ if (!new_space_->SetUp(initial_semispace_size_, max_semi_space_size_)) {
return false;
}
new_space_top_after_last_gc_ = new_space()->top();
- // Initialize old space.
- old_space_ = new OldSpace(this, OLD_SPACE, NOT_EXECUTABLE);
- if (old_space_ == NULL) return false;
+ space_[OLD_SPACE] = old_space_ =
+ new OldSpace(this, OLD_SPACE, NOT_EXECUTABLE);
if (!old_space_->SetUp()) return false;
- // Initialize the code space, set its maximum capacity to the old
- // generation size. It needs executable memory.
- code_space_ = new OldSpace(this, CODE_SPACE, EXECUTABLE);
- if (code_space_ == NULL) return false;
+ space_[CODE_SPACE] = code_space_ = new OldSpace(this, CODE_SPACE, EXECUTABLE);
if (!code_space_->SetUp()) return false;
- // Initialize map space.
- map_space_ = new MapSpace(this, MAP_SPACE);
- if (map_space_ == NULL) return false;
+ space_[MAP_SPACE] = map_space_ = new MapSpace(this, MAP_SPACE);
if (!map_space_->SetUp()) return false;
// The large object code space may contain code or data. We set the memory
// to be non-executable here for safety, but this means we need to enable it
// explicitly when allocating large code objects.
- lo_space_ = new LargeObjectSpace(this, LO_SPACE);
- if (lo_space_ == NULL) return false;
+ space_[LO_SPACE] = lo_space_ = new LargeObjectSpace(this, LO_SPACE);
if (!lo_space_->SetUp()) return false;
// Set up the seed that is used to randomize the string hash function.
@@ -5362,20 +5514,14 @@ bool Heap::SetUp() {
}
tracer_ = new GCTracer(this);
-
scavenge_collector_ = new Scavenger(this);
-
mark_compact_collector_ = new MarkCompactCollector(this);
-
gc_idle_time_handler_ = new GCIdleTimeHandler();
-
memory_reducer_ = new MemoryReducer(this);
-
if (FLAG_track_gc_object_stats) {
live_object_stats_ = new ObjectStats(this);
dead_object_stats_ = new ObjectStats(this);
}
-
scavenge_job_ = new ScavengeJob();
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
@@ -5435,28 +5581,52 @@ void Heap::PrintAlloctionsHash() {
void Heap::NotifyDeserializationComplete() {
- deserialization_complete_ = true;
-#ifdef DEBUG
- // All pages right after bootstrapping must be marked as never-evacuate.
+ DCHECK_EQ(0, gc_count());
PagedSpaces spaces(this);
for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) {
+ if (isolate()->snapshot_available()) s->ShrinkImmortalImmovablePages();
+#ifdef DEBUG
+ // All pages right after bootstrapping must be marked as never-evacuate.
for (Page* p : *s) {
CHECK(p->NeverEvacuate());
}
- }
#endif // DEBUG
+ }
+
+ deserialization_complete_ = true;
}
void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
- mark_compact_collector()->SetEmbedderHeapTracer(tracer);
+ DCHECK_NOT_NULL(tracer);
+ CHECK_NULL(embedder_heap_tracer_);
+ embedder_heap_tracer_ = tracer;
}
-bool Heap::UsingEmbedderHeapTracer() {
- return mark_compact_collector()->UsingEmbedderHeapTracer();
+void Heap::RegisterWrappersWithEmbedderHeapTracer() {
+ DCHECK(UsingEmbedderHeapTracer());
+ if (wrappers_to_trace_.empty()) {
+ return;
+ }
+ embedder_heap_tracer()->RegisterV8References(wrappers_to_trace_);
+ wrappers_to_trace_.clear();
}
void Heap::TracePossibleWrapper(JSObject* js_object) {
- mark_compact_collector()->TracePossibleWrapper(js_object);
+ DCHECK(js_object->WasConstructedFromApiFunction());
+ if (js_object->GetInternalFieldCount() >= 2 &&
+ js_object->GetInternalField(0) &&
+ js_object->GetInternalField(0) != undefined_value() &&
+ js_object->GetInternalField(1) != undefined_value()) {
+ DCHECK(reinterpret_cast<intptr_t>(js_object->GetInternalField(0)) % 2 == 0);
+ wrappers_to_trace_.push_back(std::pair<void*, void*>(
+ reinterpret_cast<void*>(js_object->GetInternalField(0)),
+ reinterpret_cast<void*>(js_object->GetInternalField(1))));
+ }
+}
+
+bool Heap::RequiresImmediateWrapperProcessing() {
+ const size_t kTooManyWrappers = 16000;
+ return wrappers_to_trace_.size() > kTooManyWrappers;
}
void Heap::RegisterExternallyReferencedObject(Object** object) {
@@ -5480,33 +5650,18 @@ void Heap::TearDown() {
UpdateMaximumCommitted();
- if (FLAG_print_cumulative_gc_stat) {
- PrintF("\n");
- PrintF("gc_count=%d ", gc_count_);
- PrintF("mark_sweep_count=%d ", ms_count_);
- PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
- PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
- PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
- PrintF("max_alive_after_gc=%" V8PRIdPTR " ", get_max_alive_after_gc());
- PrintF("total_marking_time=%.1f ", tracer()->cumulative_marking_duration());
- PrintF("total_sweeping_time=%.1f ",
- tracer()->cumulative_sweeping_duration());
- PrintF("\n\n");
- }
-
if (FLAG_print_max_heap_committed) {
PrintF("\n");
- PrintF("maximum_committed_by_heap=%" V8PRIdPTR " ",
- MaximumCommittedMemory());
- PrintF("maximum_committed_by_new_space=%" V8PRIdPTR " ",
- new_space_.MaximumCommittedMemory());
- PrintF("maximum_committed_by_old_space=%" V8PRIdPTR " ",
+ PrintF("maximum_committed_by_heap=%" PRIuS " ", MaximumCommittedMemory());
+ PrintF("maximum_committed_by_new_space=%" PRIuS " ",
+ new_space_->MaximumCommittedMemory());
+ PrintF("maximum_committed_by_old_space=%" PRIuS " ",
old_space_->MaximumCommittedMemory());
- PrintF("maximum_committed_by_code_space=%" V8PRIdPTR " ",
+ PrintF("maximum_committed_by_code_space=%" PRIuS " ",
code_space_->MaximumCommittedMemory());
- PrintF("maximum_committed_by_map_space=%" V8PRIdPTR " ",
+ PrintF("maximum_committed_by_map_space=%" PRIuS " ",
map_space_->MaximumCommittedMemory());
- PrintF("maximum_committed_by_lo_space=%" V8PRIdPTR " ",
+ PrintF("maximum_committed_by_lo_space=%" PRIuS " ",
lo_space_->MaximumCommittedMemory());
PrintF("\n\n");
}
@@ -5560,7 +5715,9 @@ void Heap::TearDown() {
delete tracer_;
tracer_ = nullptr;
- new_space_.TearDown();
+ new_space_->TearDown();
+ delete new_space_;
+ new_space_ = nullptr;
if (old_space_ != NULL) {
delete old_space_;
@@ -5599,6 +5756,9 @@ void Heap::TearDown() {
delete memory_allocator_;
memory_allocator_ = nullptr;
+
+ delete embedder_reference_reporter_;
+ embedder_reference_reporter_ = nullptr;
}
@@ -5879,14 +6039,10 @@ OldSpace* OldSpaces::next() {
}
}
-
SpaceIterator::SpaceIterator(Heap* heap)
- : heap_(heap), current_space_(FIRST_SPACE), iterator_(NULL) {}
-
+ : heap_(heap), current_space_(FIRST_SPACE - 1) {}
SpaceIterator::~SpaceIterator() {
- // Delete active iterator if any.
- delete iterator_;
}
@@ -5895,48 +6051,9 @@ bool SpaceIterator::has_next() {
return current_space_ != LAST_SPACE;
}
-
-ObjectIterator* SpaceIterator::next() {
- if (iterator_ != NULL) {
- delete iterator_;
- iterator_ = NULL;
- // Move to the next space
- current_space_++;
- if (current_space_ > LAST_SPACE) {
- return NULL;
- }
- }
-
- // Return iterator for the new current space.
- return CreateIterator();
-}
-
-
-// Create an iterator for the space to iterate.
-ObjectIterator* SpaceIterator::CreateIterator() {
- DCHECK(iterator_ == NULL);
-
- switch (current_space_) {
- case NEW_SPACE:
- iterator_ = new SemiSpaceIterator(heap_->new_space());
- break;
- case OLD_SPACE:
- iterator_ = new HeapObjectIterator(heap_->old_space());
- break;
- case CODE_SPACE:
- iterator_ = new HeapObjectIterator(heap_->code_space());
- break;
- case MAP_SPACE:
- iterator_ = new HeapObjectIterator(heap_->map_space());
- break;
- case LO_SPACE:
- iterator_ = new LargeObjectIterator(heap_->lo_space());
- break;
- }
-
- // Return the newly allocated iterator;
- DCHECK(iterator_ != NULL);
- return iterator_;
+Space* SpaceIterator::next() {
+ DCHECK(has_next());
+ return heap_->space(++current_space_);
}
@@ -6021,7 +6138,7 @@ HeapIterator::HeapIterator(Heap* heap,
default:
break;
}
- object_iterator_ = space_iterator_->next();
+ object_iterator_ = space_iterator_->next()->GetObjectIterator();
}
@@ -6034,8 +6151,6 @@ HeapIterator::~HeapIterator() {
DCHECK(object_iterator_ == nullptr);
}
#endif
- // Make sure the last iterator is deallocated.
- delete object_iterator_;
delete space_iterator_;
delete filter_;
}
@@ -6052,22 +6167,22 @@ HeapObject* HeapIterator::next() {
HeapObject* HeapIterator::NextObject() {
// No iterator means we are done.
- if (object_iterator_ == nullptr) return nullptr;
+ if (object_iterator_.get() == nullptr) return nullptr;
- if (HeapObject* obj = object_iterator_->Next()) {
+ if (HeapObject* obj = object_iterator_.get()->Next()) {
// If the current iterator has more objects we are fine.
return obj;
} else {
// Go though the spaces looking for one that has objects.
while (space_iterator_->has_next()) {
- object_iterator_ = space_iterator_->next();
- if (HeapObject* obj = object_iterator_->Next()) {
+ object_iterator_ = space_iterator_->next()->GetObjectIterator();
+ if (HeapObject* obj = object_iterator_.get()->Next()) {
return obj;
}
}
}
// Done with the last space.
- object_iterator_ = nullptr;
+ object_iterator_.reset(nullptr);
return nullptr;
}
@@ -6260,95 +6375,10 @@ void Heap::TracePathToGlobal() {
}
#endif
-
-void Heap::UpdateCumulativeGCStatistics(double duration,
- double spent_in_mutator,
- double marking_time) {
- if (FLAG_print_cumulative_gc_stat) {
- total_gc_time_ms_ += duration;
- max_gc_pause_ = Max(max_gc_pause_, duration);
- max_alive_after_gc_ = Max(max_alive_after_gc_, SizeOfObjects());
- min_in_mutator_ = Min(min_in_mutator_, spent_in_mutator);
- } else if (FLAG_trace_gc_verbose) {
+void Heap::UpdateTotalGCTime(double duration) {
+ if (FLAG_trace_gc_verbose) {
total_gc_time_ms_ += duration;
}
-
- marking_time_ += marking_time;
-}
-
-
-int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
- DisallowHeapAllocation no_gc;
- // Uses only lower 32 bits if pointers are larger.
- uintptr_t addr_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
- return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
-}
-
-
-int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
- DisallowHeapAllocation no_gc;
- int index = (Hash(map, name) & kHashMask);
- for (int i = 0; i < kEntriesPerBucket; i++) {
- Key& key = keys_[index + i];
- if ((key.map == *map) && key.name->Equals(*name)) {
- return field_offsets_[index + i];
- }
- }
- return kNotFound;
-}
-
-
-void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
- int field_offset) {
- DisallowHeapAllocation no_gc;
- if (!name->IsUniqueName()) {
- if (!StringTable::InternalizeStringIfExists(
- name->GetIsolate(), Handle<String>::cast(name)).ToHandle(&name)) {
- return;
- }
- }
- // This cache is cleared only between mark compact passes, so we expect the
- // cache to only contain old space names.
- DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
-
- int index = (Hash(map, name) & kHashMask);
- // After a GC there will be free slots, so we use them in order (this may
- // help to get the most frequently used one in position 0).
- for (int i = 0; i < kEntriesPerBucket; i++) {
- Key& key = keys_[index];
- Object* free_entry_indicator = NULL;
- if (key.map == free_entry_indicator) {
- key.map = *map;
- key.name = *name;
- field_offsets_[index + i] = field_offset;
- return;
- }
- }
- // No free entry found in this bucket, so we move them all down one and
- // put the new entry at position zero.
- for (int i = kEntriesPerBucket - 1; i > 0; i--) {
- Key& key = keys_[index + i];
- Key& key2 = keys_[index + i - 1];
- key = key2;
- field_offsets_[index + i] = field_offsets_[index + i - 1];
- }
-
- // Write the new first entry.
- Key& key = keys_[index];
- key.map = *map;
- key.name = *name;
- field_offsets_[index] = field_offset;
-}
-
-
-void KeyedLookupCache::Clear() {
- for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
-}
-
-
-void DescriptorLookupCache::Clear() {
- for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
}
void Heap::ExternalStringTable::CleanUp() {
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index b9b058c1cd..ba89686672 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -16,9 +16,8 @@
#include "src/base/atomic-utils.h"
#include "src/globals.h"
#include "src/heap-symbols.h"
-// TODO(mstarzinger): One more include to kill!
-#include "src/heap/spaces.h"
#include "src/list.h"
+#include "src/objects.h"
namespace v8 {
namespace internal {
@@ -49,6 +48,8 @@ using v8::MemoryPressureLevel;
V(Map, one_byte_string_map, OneByteStringMap) \
V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap) \
V(Map, scope_info_map, ScopeInfoMap) \
+ V(Map, module_info_entry_map, ModuleInfoEntryMap) \
+ V(Map, module_info_map, ModuleInfoMap) \
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, code_map, CodeMap) \
V(Map, function_context_map, FunctionContextMap) \
@@ -59,7 +60,9 @@ using v8::MemoryPressureLevel;
V(Map, heap_number_map, HeapNumberMap) \
V(Map, transition_array_map, TransitionArrayMap) \
V(FixedArray, empty_literals_array, EmptyLiteralsArray) \
+ V(FixedArray, empty_type_feedback_vector, EmptyTypeFeedbackVector) \
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
+ V(ScopeInfo, empty_scope_info, EmptyScopeInfo) \
V(FixedArray, cleared_optimized_code_map, ClearedOptimizedCodeMap) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
/* Entries beyond the first 32 */ \
@@ -164,6 +167,7 @@ using v8::MemoryPressureLevel;
V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector) \
V(PropertyCell, has_instance_protector, HasInstanceProtector) \
V(Cell, species_protector, SpeciesProtector) \
+ V(PropertyCell, string_length_protector, StringLengthProtector) \
/* Special numbers */ \
V(HeapNumber, nan_value, NanValue) \
V(HeapNumber, hole_nan_value, HoleNanValue) \
@@ -185,7 +189,6 @@ using v8::MemoryPressureLevel;
V(FixedArray, experimental_extra_natives_source_cache, \
ExperimentalExtraNativesSourceCache) \
/* Lists and dictionaries */ \
- V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
V(NameDictionary, empty_properties_dictionary, EmptyPropertiesDictionary) \
V(Object, symbol_registry, SymbolRegistry) \
V(Object, script_list, ScriptList) \
@@ -275,6 +278,8 @@ using v8::MemoryPressureLevel;
V(FixedArrayMap) \
V(CodeMap) \
V(ScopeInfoMap) \
+ V(ModuleInfoEntryMap) \
+ V(ModuleInfoMap) \
V(FixedCOWArrayMap) \
V(FixedDoubleArrayMap) \
V(WeakCellMap) \
@@ -322,100 +327,87 @@ class HeapObjectsFilter;
class HeapStats;
class HistogramTimer;
class Isolate;
+class MemoryAllocator;
class MemoryReducer;
+class ObjectIterator;
class ObjectStats;
+class Page;
+class PagedSpace;
class Scavenger;
class ScavengeJob;
+class Space;
class StoreBuffer;
+class TracePossibleWrapperReporter;
class WeakObjectRetainer;
+typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
+
enum PromotionMode { PROMOTE_MARKED, DEFAULT_PROMOTION };
-typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
+enum ArrayStorageAllocationMode {
+ DONT_INITIALIZE_ARRAY_ELEMENTS,
+ INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
+};
+
+enum class ClearRecordedSlots { kYes, kNo };
+
+enum class ClearBlackArea { kYes, kNo };
+
+enum class GarbageCollectionReason {
+ kUnknown = 0,
+ kAllocationFailure = 1,
+ kAllocationLimit = 2,
+ kContextDisposal = 3,
+ kCountersExtension = 4,
+ kDebugger = 5,
+ kDeserializer = 6,
+ kExternalMemoryPressure = 7,
+ kFinalizeMarkingViaStackGuard = 8,
+ kFinalizeMarkingViaTask = 9,
+ kFullHashtable = 10,
+ kHeapProfiler = 11,
+ kIdleTask = 12,
+ kLastResort = 13,
+ kLowMemoryNotification = 14,
+ kMakeHeapIterable = 15,
+ kMemoryPressure = 16,
+ kMemoryReducer = 17,
+ kRuntime = 18,
+ kSamplingProfiler = 19,
+ kSnapshotCreator = 20,
+ kTesting = 21
+ // If you add new items here, then update the incremental_marking_reason,
+ // mark_compact_reason, and scavenge_reason counters in counters.h.
+ // Also update src/tools/metrics/histograms/histograms.xml in chromium.
+};
-// A queue of objects promoted during scavenge. Each object is accompanied
-// by it's size to avoid dereferencing a map pointer for scanning.
-// The last page in to-space is used for the promotion queue. On conflict
-// during scavenge, the promotion queue is allocated externally and all
-// entries are copied to the external queue.
+// A queue of objects promoted during scavenge. Each object is accompanied by
+// its size to avoid dereferencing a map pointer for scanning. The last page in
+// to-space is used for the promotion queue. On conflict during scavenge, the
+// promotion queue is allocated externally and all entries are copied to the
+// external queue.
class PromotionQueue {
public:
explicit PromotionQueue(Heap* heap)
- : front_(NULL),
- rear_(NULL),
- limit_(NULL),
- emergency_stack_(0),
+ : front_(nullptr),
+ rear_(nullptr),
+ limit_(nullptr),
+ emergency_stack_(nullptr),
heap_(heap) {}
void Initialize();
+ void Destroy();
- void Destroy() {
- DCHECK(is_empty());
- delete emergency_stack_;
- emergency_stack_ = NULL;
- }
-
- Page* GetHeadPage() {
- return Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
- }
-
- void SetNewLimit(Address limit) {
- // If we are already using an emergency stack, we can ignore it.
- if (emergency_stack_) return;
+ inline void SetNewLimit(Address limit);
+ inline bool IsBelowPromotionQueue(Address to_space_top);
- // If the limit is not on the same page, we can ignore it.
- if (Page::FromAllocationAreaAddress(limit) != GetHeadPage()) return;
-
- limit_ = reinterpret_cast<struct Entry*>(limit);
-
- if (limit_ <= rear_) {
- return;
- }
-
- RelocateQueueHead();
- }
-
- bool IsBelowPromotionQueue(Address to_space_top) {
- // If an emergency stack is used, the to-space address cannot interfere
- // with the promotion queue.
- if (emergency_stack_) return true;
-
- // If the given to-space top pointer and the head of the promotion queue
- // are not on the same page, then the to-space objects are below the
- // promotion queue.
- if (GetHeadPage() != Page::FromAddress(to_space_top)) {
- return true;
- }
- // If the to space top pointer is smaller or equal than the promotion
- // queue head, then the to-space objects are below the promotion queue.
- return reinterpret_cast<struct Entry*>(to_space_top) <= rear_;
- }
+ inline void insert(HeapObject* target, int32_t size, bool was_marked_black);
+ inline void remove(HeapObject** target, int32_t* size,
+ bool* was_marked_black);
bool is_empty() {
return (front_ == rear_) &&
- (emergency_stack_ == NULL || emergency_stack_->length() == 0);
- }
-
- inline void insert(HeapObject* target, int32_t size, bool was_marked_black);
-
- void remove(HeapObject** target, int32_t* size, bool* was_marked_black) {
- DCHECK(!is_empty());
- if (front_ == rear_) {
- Entry e = emergency_stack_->RemoveLast();
- *target = e.obj_;
- *size = e.size_;
- *was_marked_black = e.was_marked_black_;
- return;
- }
-
- struct Entry* entry = reinterpret_cast<struct Entry*>(--front_);
- *target = entry->obj_;
- *size = entry->size_;
- *was_marked_black = entry->was_marked_black_;
-
- // Assert no underflow.
- SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
- reinterpret_cast<Address>(front_));
+ (emergency_stack_ == nullptr || emergency_stack_->length() == 0);
}
private:
@@ -428,6 +420,8 @@ class PromotionQueue {
bool was_marked_black_ : 1;
};
+ inline Page* GetHeadPage();
+
void RelocateQueueHead();
// The front of the queue is higher in the memory page chain than the rear.
@@ -436,21 +430,94 @@ class PromotionQueue {
struct Entry* limit_;
List<Entry>* emergency_stack_;
-
Heap* heap_;
DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
};
+class AllocationResult {
+ public:
+ static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
+ return AllocationResult(space);
+ }
-enum ArrayStorageAllocationMode {
- DONT_INITIALIZE_ARRAY_ELEMENTS,
- INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
+ // Implicit constructor from Object*.
+ AllocationResult(Object* object) // NOLINT
+ : object_(object) {
+ // AllocationResults can't return Smis, which are used to represent
+ // failure and the space to retry in.
+ CHECK(!object->IsSmi());
+ }
+
+ AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
+
+ inline bool IsRetry() { return object_->IsSmi(); }
+ inline HeapObject* ToObjectChecked();
+ inline AllocationSpace RetrySpace();
+
+ template <typename T>
+ bool To(T** obj) {
+ if (IsRetry()) return false;
+ *obj = T::cast(object_);
+ return true;
+ }
+
+ private:
+ explicit AllocationResult(AllocationSpace space)
+ : object_(Smi::FromInt(static_cast<int>(space))) {}
+
+ Object* object_;
};
-enum class ClearRecordedSlots { kYes, kNo };
+STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
-enum class ClearBlackArea { kYes, kNo };
+#ifdef DEBUG
+struct CommentStatistic {
+ const char* comment;
+ int size;
+ int count;
+ void Clear() {
+ comment = NULL;
+ size = 0;
+ count = 0;
+ }
+ // Must be small, since an iteration is used for lookup.
+ static const int kMaxComments = 64;
+};
+#endif
+
+class NumberAndSizeInfo BASE_EMBEDDED {
+ public:
+ NumberAndSizeInfo() : number_(0), bytes_(0) {}
+
+ int number() const { return number_; }
+ void increment_number(int num) { number_ += num; }
+
+ int bytes() const { return bytes_; }
+ void increment_bytes(int size) { bytes_ += size; }
+
+ void clear() {
+ number_ = 0;
+ bytes_ = 0;
+ }
+
+ private:
+ int number_;
+ int bytes_;
+};
+
+// HistogramInfo class for recording a single "bar" of a histogram. This
+// class is used for collecting statistics to print to the log file.
+class HistogramInfo : public NumberAndSizeInfo {
+ public:
+ HistogramInfo() : NumberAndSizeInfo(), name_(nullptr) {}
+
+ const char* name() { return name_; }
+ void set_name(const char* name) { name_ = name; }
+
+ private:
+ const char* name_;
+};
class Heap {
public:
@@ -637,30 +704,10 @@ class Heap {
// should not happen during deserialization.
void NotifyDeserializationComplete();
- intptr_t old_generation_allocation_limit() const {
- return old_generation_allocation_limit_;
- }
-
- bool always_allocate() { return always_allocate_scope_count_.Value() != 0; }
-
- Address* NewSpaceAllocationTopAddress() {
- return new_space_.allocation_top_address();
- }
- Address* NewSpaceAllocationLimitAddress() {
- return new_space_.allocation_limit_address();
- }
-
- Address* OldSpaceAllocationTopAddress() {
- return old_space_->allocation_top_address();
- }
- Address* OldSpaceAllocationLimitAddress() {
- return old_space_->allocation_limit_address();
- }
-
- bool CanExpandOldGeneration(int size) {
- if (force_oom_) return false;
- return (OldGenerationCapacity() + size) < MaxOldGenerationSize();
- }
+ inline Address* NewSpaceAllocationTopAddress();
+ inline Address* NewSpaceAllocationLimitAddress();
+ inline Address* OldSpaceAllocationTopAddress();
+ inline Address* OldSpaceAllocationLimitAddress();
// Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache();
@@ -763,14 +810,6 @@ class Heap {
// Returns false if not able to reserve.
bool ReserveSpace(Reservation* reservations, List<Address>* maps);
- void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
-
- bool UsingEmbedderHeapTracer();
-
- void TracePossibleWrapper(JSObject* js_object);
-
- void RegisterExternallyReferencedObject(Object** object);
-
//
// Support for the API.
//
@@ -792,18 +831,6 @@ class Heap {
// Check new space expansion criteria and expand semispaces if it was hit.
void CheckNewSpaceExpansionCriteria();
- inline bool HeapIsFullEnoughToStartIncrementalMarking(intptr_t limit) {
- if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
-
- intptr_t adjusted_allocation_limit = limit - new_space_.Capacity();
-
- if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
-
- if (HighMemoryPressure()) return true;
-
- return false;
- }
-
void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
// An object should be promoted if the object has survived a
@@ -817,8 +844,6 @@ class Heap {
void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
- inline bool OldGenerationAllocationLimitReached();
-
// Completely clear the Instanceof cache (to stop it keeping objects alive
// around a GC).
inline void CompletelyClearInstanceofCache();
@@ -847,6 +872,8 @@ class Heap {
global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
}
+ int64_t external_memory_hard_limit() { return MaxOldGenerationSize() / 2; }
+
int64_t external_memory() { return external_memory_; }
void update_external_memory(int64_t delta) { external_memory_ += delta; }
@@ -861,9 +888,7 @@ class Heap {
void DeoptMarkedAllocationSites();
- bool DeoptMaybeTenuredAllocationSites() {
- return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
- }
+ inline bool DeoptMaybeTenuredAllocationSites();
void AddWeakNewSpaceObjectToCodeDependency(Handle<HeapObject> obj,
Handle<WeakCell> code);
@@ -937,39 +962,16 @@ class Heap {
// Getters for spaces. =======================================================
// ===========================================================================
- Address NewSpaceTop() { return new_space_.top(); }
+ inline Address NewSpaceTop();
- NewSpace* new_space() { return &new_space_; }
+ NewSpace* new_space() { return new_space_; }
OldSpace* old_space() { return old_space_; }
OldSpace* code_space() { return code_space_; }
MapSpace* map_space() { return map_space_; }
LargeObjectSpace* lo_space() { return lo_space_; }
- PagedSpace* paged_space(int idx) {
- switch (idx) {
- case OLD_SPACE:
- return old_space();
- case MAP_SPACE:
- return map_space();
- case CODE_SPACE:
- return code_space();
- case NEW_SPACE:
- case LO_SPACE:
- UNREACHABLE();
- }
- return NULL;
- }
-
- Space* space(int idx) {
- switch (idx) {
- case NEW_SPACE:
- return new_space();
- case LO_SPACE:
- return lo_space();
- default:
- return paged_space(idx);
- }
- }
+ inline PagedSpace* paged_space(int idx);
+ inline Space* space(int idx);
// Returns name of the space.
const char* GetSpaceName(int idx);
@@ -1090,22 +1092,22 @@ class Heap {
// Returns whether there is a chance that another major GC could
// collect more garbage.
inline bool CollectGarbage(
- AllocationSpace space, const char* gc_reason = NULL,
+ AllocationSpace space, GarbageCollectionReason gc_reason,
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
// Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
// non-zero, then the slower precise sweeper is used, which leaves the heap
// in a state where we can iterate over the heap visiting all objects.
void CollectAllGarbage(
- int flags = kFinalizeIncrementalMarkingMask, const char* gc_reason = NULL,
+ int flags, GarbageCollectionReason gc_reason,
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
// Last hope GC, should try to squeeze as much as possible.
- void CollectAllAvailableGarbage(const char* gc_reason = NULL);
+ void CollectAllAvailableGarbage(GarbageCollectionReason gc_reason);
// Reports and external memory pressure event, either performs a major GC or
// completes incremental marking in order to free external resources.
- void ReportExternalMemoryPressure(const char* gc_reason = NULL);
+ void ReportExternalMemoryPressure();
// Invoked when GC was requested via the stack guard.
void HandleGCRequest();
@@ -1156,24 +1158,54 @@ class Heap {
// Start incremental marking and ensure that idle time handler can perform
// incremental steps.
- void StartIdleIncrementalMarking();
+ void StartIdleIncrementalMarking(GarbageCollectionReason gc_reason);
// Starts incremental marking assuming incremental marking is currently
// stopped.
- void StartIncrementalMarking(int gc_flags = kNoGCFlags,
- const GCCallbackFlags gc_callback_flags =
- GCCallbackFlags::kNoGCCallbackFlags,
- const char* reason = nullptr);
+ void StartIncrementalMarking(
+ int gc_flags, GarbageCollectionReason gc_reason,
+ GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
+
+ void StartIncrementalMarkingIfAllocationLimitIsReached(
+ int gc_flags,
+ GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
- void FinalizeIncrementalMarkingIfComplete(const char* comment);
+ void FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason);
- bool TryFinalizeIdleIncrementalMarking(double idle_time_in_ms);
+ bool TryFinalizeIdleIncrementalMarking(double idle_time_in_ms,
+ GarbageCollectionReason gc_reason);
void RegisterReservationsForBlackAllocation(Reservation* reservations);
IncrementalMarking* incremental_marking() { return incremental_marking_; }
// ===========================================================================
+ // Embedder heap tracer support. =============================================
+ // ===========================================================================
+
+ void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
+
+ bool UsingEmbedderHeapTracer() { return embedder_heap_tracer() != nullptr; }
+
+ void TracePossibleWrapper(JSObject* js_object);
+
+ void RegisterExternallyReferencedObject(Object** object);
+
+ void RegisterWrappersWithEmbedderHeapTracer();
+
+ // In order to avoid running out of memory we force tracing wrappers if there
+ // are too many of them.
+ bool RequiresImmediateWrapperProcessing();
+
+ EmbedderHeapTracer* embedder_heap_tracer() { return embedder_heap_tracer_; }
+
+ EmbedderReachableReferenceReporter* embedder_reachable_reference_reporter() {
+ return embedder_reference_reporter_;
+ }
+
+ size_t wrappers_to_trace() { return wrappers_to_trace_.size(); }
+
+ // ===========================================================================
// External string table API. ================================================
// ===========================================================================
@@ -1258,19 +1290,19 @@ class Heap {
intptr_t OldGenerationCapacity();
// Returns the amount of memory currently committed for the heap.
- intptr_t CommittedMemory();
+ size_t CommittedMemory();
// Returns the amount of memory currently committed for the old space.
- intptr_t CommittedOldGenerationMemory();
+ size_t CommittedOldGenerationMemory();
// Returns the amount of executable memory currently committed for the heap.
- intptr_t CommittedMemoryExecutable();
+ size_t CommittedMemoryExecutable();
// Returns the amount of phyical memory currently committed for the heap.
size_t CommittedPhysicalMemory();
// Returns the maximum amount of memory ever committed for the heap.
- intptr_t MaximumCommittedMemory() { return maximum_committed_; }
+ size_t MaximumCommittedMemory() { return maximum_committed_; }
// Updates the maximum committed memory for the heap. Should be called
// whenever a space grows.
@@ -1326,13 +1358,9 @@ class Heap {
return static_cast<intptr_t>(total);
}
- void UpdateNewSpaceAllocationCounter() {
- new_space_allocation_counter_ = NewSpaceAllocationCounter();
- }
+ inline void UpdateNewSpaceAllocationCounter();
- size_t NewSpaceAllocationCounter() {
- return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
- }
+ inline size_t NewSpaceAllocationCounter();
// This should be used only for testing.
void set_new_space_allocation_counter(size_t new_value) {
@@ -1340,16 +1368,18 @@ class Heap {
}
void UpdateOldGenerationAllocationCounter() {
- old_generation_allocation_counter_ = OldGenerationAllocationCounter();
+ old_generation_allocation_counter_at_last_gc_ =
+ OldGenerationAllocationCounter();
}
size_t OldGenerationAllocationCounter() {
- return old_generation_allocation_counter_ + PromotedSinceLastGC();
+ return old_generation_allocation_counter_at_last_gc_ +
+ PromotedSinceLastGC();
}
// This should be used only for testing.
- void set_old_generation_allocation_counter(size_t new_value) {
- old_generation_allocation_counter_ = new_value;
+ void set_old_generation_allocation_counter_at_last_gc(size_t new_value) {
+ old_generation_allocation_counter_at_last_gc_ = new_value;
}
size_t PromotedSinceLastGC() {
@@ -1456,6 +1486,9 @@ class Heap {
void ReportCodeStatistics(const char* title);
#endif
+ static const char* GarbageCollectionReasonToString(
+ GarbageCollectionReason gc_reason);
+
private:
class PretenuringScope;
@@ -1588,6 +1621,10 @@ class Heap {
return current_gc_flags_ & kFinalizeIncrementalMarkingMask;
}
+ // Checks whether both, the internal marking deque, and the embedder provided
+ // one are empty. Avoid in fast path as it potentially calls through the API.
+ bool MarkingDequesAreEmpty();
+
void PreprocessStackTraces();
// Checks whether a global GC is necessary
@@ -1607,7 +1644,7 @@ class Heap {
// Returns whether there is a chance that another major GC could
// collect more garbage.
bool CollectGarbage(
- GarbageCollector collector, const char* gc_reason,
+ GarbageCollector collector, GarbageCollectionReason gc_reason,
const char* collector_reason,
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
@@ -1646,7 +1683,7 @@ class Heap {
void EnsureFromSpaceIsCommitted();
// Uncommit unused semi space.
- bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
+ bool UncommitFromSpace();
// Fill in bogus values in from space
void ZapFromSpace();
@@ -1669,10 +1706,6 @@ class Heap {
// Flush the number to string cache.
void FlushNumberStringCache();
- // TODO(hpayer): Allocation site pretenuring may make this method obsolete.
- // Re-visit incremental marking heuristics.
- bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
-
void ConfigureInitialOldGenerationSize();
bool HasLowYoungGenerationAllocationRate();
@@ -1682,10 +1715,6 @@ class Heap {
void ReduceNewSpaceSize();
- bool TryFinalizeIdleIncrementalMarking(
- double idle_time_in_ms, size_t size_of_objects,
- size_t mark_compact_speed_in_bytes_per_ms);
-
GCIdleTimeHeapState ComputeHeapState();
bool PerformIdleTimeAction(GCIdleTimeAction action,
@@ -1705,13 +1734,13 @@ class Heap {
void CompactRetainedMaps(ArrayList* retained_maps);
- void CollectGarbageOnMemoryPressure(const char* source);
+ void CollectGarbageOnMemoryPressure();
// Attempt to over-approximate the weak closure by marking object groups and
// implicit references from global handles, but don't atomically complete
// marking. If we continue to mark incrementally, we might have marked
// objects that die later.
- void FinalizeIncrementalMarking(const char* gc_reason);
+ void FinalizeIncrementalMarking(GarbageCollectionReason gc_reason);
// Returns the timer used for a given GC type.
// - GCScavenger: young generation GC
@@ -1772,18 +1801,7 @@ class Heap {
return old_generation_allocation_limit_ - PromotedTotalSize();
}
- // Returns maximum GC pause.
- double get_max_gc_pause() { return max_gc_pause_; }
-
- // Returns maximum size of objects alive after GC.
- intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
-
- // Returns minimal interval between two subsequent collections.
- double get_min_in_mutator() { return min_in_mutator_; }
-
- // Update GC statistics that are tracked on the Heap.
- void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator,
- double marking_time);
+ void UpdateTotalGCTime(double duration);
bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }
@@ -1807,15 +1825,28 @@ class Heap {
void SetOldGenerationAllocationLimit(intptr_t old_gen_size, double gc_speed,
double mutator_speed);
- intptr_t MinimumAllocationLimitGrowingStep() {
- const double kRegularAllocationLimitGrowingStep = 8;
- const double kLowMemoryAllocationLimitGrowingStep = 2;
- intptr_t limit = (Page::kPageSize > MB ? Page::kPageSize : MB);
- return limit * (ShouldOptimizeForMemoryUsage()
- ? kLowMemoryAllocationLimitGrowingStep
- : kRegularAllocationLimitGrowingStep);
+ intptr_t MinimumAllocationLimitGrowingStep();
+
+ intptr_t old_generation_allocation_limit() const {
+ return old_generation_allocation_limit_;
+ }
+
+ bool always_allocate() { return always_allocate_scope_count_.Value() != 0; }
+
+ bool CanExpandOldGeneration(int size) {
+ if (force_oom_) return false;
+ return (OldGenerationCapacity() + size) < MaxOldGenerationSize();
+ }
+
+ bool IsCloseToOutOfMemory(size_t slack) {
+ return OldGenerationCapacity() + slack >= MaxOldGenerationSize();
}
+ bool ShouldExpandOldGenerationOnAllocationFailure();
+
+ enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit };
+ IncrementalMarkingLimit IncrementalMarkingLimitReached();
+
// ===========================================================================
// Idle notification. ========================================================
// ===========================================================================
@@ -2011,6 +2042,9 @@ class Heap {
// Allocate empty fixed array.
MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray();
+ // Allocate empty scope info.
+ MUST_USE_RESULT AllocationResult AllocateEmptyScopeInfo();
+
// Allocate empty fixed typed array of given type.
MUST_USE_RESULT AllocationResult
AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
@@ -2068,7 +2102,7 @@ class Heap {
intptr_t initial_old_generation_size_;
bool old_generation_size_configured_;
intptr_t max_executable_size_;
- intptr_t maximum_committed_;
+ size_t maximum_committed_;
// For keeping track of how much data has survived
// scavenge since last new space expansion.
@@ -2095,11 +2129,13 @@ class Heap {
int global_ic_age_;
- NewSpace new_space_;
+ NewSpace* new_space_;
OldSpace* old_space_;
OldSpace* code_space_;
MapSpace* map_space_;
LargeObjectSpace* lo_space_;
+ // Map from the space id to the space.
+ Space* space_[LAST_SPACE + 1];
HeapState gc_state_;
int gc_post_processing_depth_;
Address new_space_top_after_last_gc_;
@@ -2136,10 +2172,6 @@ class Heap {
// generation and on every allocation in large object space.
intptr_t old_generation_allocation_limit_;
- // Indicates that an allocation has failed in the old generation since the
- // last GC.
- bool old_gen_exhausted_;
-
// Indicates that inline bump-pointer allocation has been globally disabled
// for all spaces. This is used to disable allocations in generated code.
bool inline_allocation_disabled_;
@@ -2168,7 +2200,6 @@ class Heap {
GCTracer* tracer_;
- int high_survival_rate_period_length_;
intptr_t promoted_objects_size_;
double promotion_ratio_;
double promotion_rate_;
@@ -2185,24 +2216,9 @@ class Heap {
// of the allocation site.
unsigned int maximum_size_scavenges_;
- // Maximum GC pause.
- double max_gc_pause_;
-
// Total time spent in GC.
double total_gc_time_ms_;
- // Maximum size of objects alive after GC.
- intptr_t max_alive_after_gc_;
-
- // Minimal interval between two subsequent collections.
- double min_in_mutator_;
-
- // Cumulative GC time spent in marking.
- double marking_time_;
-
- // Cumulative GC time spent in sweeping.
- double sweeping_time_;
-
// Last time an idle notification happened.
double last_idle_notification_time_;
@@ -2242,7 +2258,7 @@ class Heap {
// This counter is increased before each GC and never reset. To
// account for the bytes allocated since the last GC, use the
// OldGenerationAllocationCounter() function.
- size_t old_generation_allocation_counter_;
+ size_t old_generation_allocation_counter_at_last_gc_;
// The size of objects in old generation after the last MarkCompact GC.
size_t old_generation_size_at_last_gc_;
@@ -2293,6 +2309,10 @@ class Heap {
// The depth of HeapIterator nestings.
int heap_iterator_depth_;
+ EmbedderHeapTracer* embedder_heap_tracer_;
+ EmbedderReachableReferenceReporter* embedder_reference_reporter_;
+ std::vector<std::pair<void*, void*>> wrappers_to_trace_;
+
// Used for testing purposes.
bool force_oom_;
@@ -2303,12 +2323,15 @@ class Heap {
friend class HeapIterator;
friend class IdleScavengeObserver;
friend class IncrementalMarking;
+ friend class IncrementalMarkingJob;
friend class IteratePromotedObjectsVisitor;
+ friend class LargeObjectSpace;
friend class MarkCompactCollector;
friend class MarkCompactMarkingVisitor;
friend class NewSpace;
friend class ObjectStatsCollector;
friend class Page;
+ friend class PagedSpace;
friend class Scavenger;
friend class StoreBuffer;
friend class TestMemoryAllocatorScope;
@@ -2402,7 +2425,7 @@ class AllSpaces BASE_EMBEDDED {
// Space iterator for iterating over all old spaces of the heap: Old space
// and code space. Returns each space in turn, and null when it is done.
-class OldSpaces BASE_EMBEDDED {
+class V8_EXPORT_PRIVATE OldSpaces BASE_EMBEDDED {
public:
explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
OldSpace* next();
@@ -2427,23 +2450,17 @@ class PagedSpaces BASE_EMBEDDED {
};
-// Space iterator for iterating over all spaces of the heap.
-// For each space an object iterator is provided. The deallocation of the
-// returned object iterators is handled by the space iterator.
class SpaceIterator : public Malloced {
public:
explicit SpaceIterator(Heap* heap);
virtual ~SpaceIterator();
bool has_next();
- ObjectIterator* next();
+ Space* next();
private:
- ObjectIterator* CreateIterator();
-
Heap* heap_;
int current_space_; // from enum AllocationSpace.
- ObjectIterator* iterator_; // object iterator for the current space.
};
@@ -2489,113 +2506,9 @@ class HeapIterator BASE_EMBEDDED {
// Space iterator for iterating all the spaces.
SpaceIterator* space_iterator_;
// Object iterator for the space currently being iterated.
- ObjectIterator* object_iterator_;
-};
-
-
-// Cache for mapping (map, property name) into field offset.
-// Cleared at startup and prior to mark sweep collection.
-class KeyedLookupCache {
- public:
- // Lookup field offset for (map, name). If absent, -1 is returned.
- int Lookup(Handle<Map> map, Handle<Name> name);
-
- // Update an element in the cache.
- void Update(Handle<Map> map, Handle<Name> name, int field_offset);
-
- // Clear the cache.
- void Clear();
-
- static const int kLength = 256;
- static const int kCapacityMask = kLength - 1;
- static const int kMapHashShift = 5;
- static const int kHashMask = -4; // Zero the last two bits.
- static const int kEntriesPerBucket = 4;
- static const int kEntryLength = 2;
- static const int kMapIndex = 0;
- static const int kKeyIndex = 1;
- static const int kNotFound = -1;
-
- // kEntriesPerBucket should be a power of 2.
- STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0);
- STATIC_ASSERT(kEntriesPerBucket == -kHashMask);
-
- private:
- KeyedLookupCache() {
- for (int i = 0; i < kLength; ++i) {
- keys_[i].map = NULL;
- keys_[i].name = NULL;
- field_offsets_[i] = kNotFound;
- }
- }
-
- static inline int Hash(Handle<Map> map, Handle<Name> name);
-
- // Get the address of the keys and field_offsets arrays. Used in
- // generated code to perform cache lookups.
- Address keys_address() { return reinterpret_cast<Address>(&keys_); }
-
- Address field_offsets_address() {
- return reinterpret_cast<Address>(&field_offsets_);
- }
-
- struct Key {
- Map* map;
- Name* name;
- };
-
- Key keys_[kLength];
- int field_offsets_[kLength];
-
- friend class ExternalReference;
- friend class Isolate;
- DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache);
+ std::unique_ptr<ObjectIterator> object_iterator_;
};
-
-// Cache for mapping (map, property name) into descriptor index.
-// The cache contains both positive and negative results.
-// Descriptor index equals kNotFound means the property is absent.
-// Cleared at startup and prior to any gc.
-class DescriptorLookupCache {
- public:
- // Lookup descriptor index for (map, name).
- // If absent, kAbsent is returned.
- inline int Lookup(Map* source, Name* name);
-
- // Update an element in the cache.
- inline void Update(Map* source, Name* name, int result);
-
- // Clear the cache.
- void Clear();
-
- static const int kAbsent = -2;
-
- private:
- DescriptorLookupCache() {
- for (int i = 0; i < kLength; ++i) {
- keys_[i].source = NULL;
- keys_[i].name = NULL;
- results_[i] = kAbsent;
- }
- }
-
- static inline int Hash(Object* source, Name* name);
-
- static const int kLength = 64;
- struct Key {
- Map* source;
- Name* name;
- };
-
- Key keys_[kLength];
- int results_[kLength];
-
- friend class Isolate;
- DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache);
-};
-
-
// Abstract base class for checking whether a weak object should be retained.
class WeakObjectRetainer {
public:
@@ -2720,6 +2633,18 @@ class AllocationObserver {
DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
};
+class TracePossibleWrapperReporter : public EmbedderReachableReferenceReporter {
+ public:
+ explicit TracePossibleWrapperReporter(Heap* heap) : heap_(heap) {}
+ void ReportExternalReference(Value* object) override {
+ heap_->RegisterExternallyReferencedObject(
+ reinterpret_cast<Object**>(object));
+ }
+
+ private:
+ Heap* heap_;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/incremental-marking-inl.h b/deps/v8/src/heap/incremental-marking-inl.h
index fa22da6d41..ee594b2aee 100644
--- a/deps/v8/src/heap/incremental-marking-inl.h
+++ b/deps/v8/src/heap/incremental-marking-inl.h
@@ -6,6 +6,7 @@
#define V8_HEAP_INCREMENTAL_MARKING_INL_H_
#include "src/heap/incremental-marking.h"
+#include "src/isolate.h"
namespace v8 {
namespace internal {
@@ -33,6 +34,15 @@ void IncrementalMarking::RecordWriteIntoCode(Code* host, RelocInfo* rinfo,
}
}
+void IncrementalMarking::RestartIfNotMarking() {
+ if (state_ == COMPLETE) {
+ state_ = MARKING;
+ if (FLAG_trace_incremental_marking) {
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Restarting (new grey objects)\n");
+ }
+ }
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc
index fe14dd04c6..393b9cce7e 100644
--- a/deps/v8/src/heap/incremental-marking-job.cc
+++ b/deps/v8/src/heap/incremental-marking-job.cc
@@ -14,131 +14,49 @@
namespace v8 {
namespace internal {
-const double IncrementalMarkingJob::kLongDelayInSeconds = 5;
-const double IncrementalMarkingJob::kShortDelayInSeconds = 0.5;
-
void IncrementalMarkingJob::Start(Heap* heap) {
DCHECK(!heap->incremental_marking()->IsStopped());
- // We don't need to reset the flags because tasks from the previous job
- // can still be pending. We just want to ensure that tasks are posted
- // if they are not pending.
- // If delayed task is pending and made_progress_since_last_delayed_task_ is
- // true, then the delayed task will clear that flag when it is rescheduled.
- ScheduleIdleTask(heap);
- ScheduleDelayedTask(heap);
-}
-
-
-void IncrementalMarkingJob::NotifyIdleTask() { idle_task_pending_ = false; }
-
-
-void IncrementalMarkingJob::NotifyDelayedTask() {
- delayed_task_pending_ = false;
-}
-
-
-void IncrementalMarkingJob::NotifyIdleTaskProgress() {
- made_progress_since_last_delayed_task_ = true;
+ ScheduleTask(heap);
}
+void IncrementalMarkingJob::NotifyTask() { task_pending_ = false; }
-void IncrementalMarkingJob::ScheduleIdleTask(Heap* heap) {
- if (!idle_task_pending_) {
+void IncrementalMarkingJob::ScheduleTask(Heap* heap) {
+ if (!task_pending_) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
- if (V8::GetCurrentPlatform()->IdleTasksEnabled(isolate)) {
- idle_task_pending_ = true;
- auto task = new IdleTask(heap->isolate(), this);
- V8::GetCurrentPlatform()->CallIdleOnForegroundThread(isolate, task);
- }
+ task_pending_ = true;
+ auto task = new Task(heap->isolate(), this);
+ V8::GetCurrentPlatform()->CallOnForegroundThread(isolate, task);
}
}
-
-void IncrementalMarkingJob::ScheduleDelayedTask(Heap* heap) {
- if (!delayed_task_pending_ && FLAG_memory_reducer) {
- v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
- delayed_task_pending_ = true;
- made_progress_since_last_delayed_task_ = false;
- auto task = new DelayedTask(heap->isolate(), this);
- double delay =
- heap->HighMemoryPressure() ? kShortDelayInSeconds : kLongDelayInSeconds;
- V8::GetCurrentPlatform()->CallDelayedOnForegroundThread(isolate, task,
- delay);
- }
-}
-
-
-IncrementalMarkingJob::IdleTask::Progress IncrementalMarkingJob::IdleTask::Step(
- Heap* heap, double deadline_in_ms) {
- IncrementalMarking* incremental_marking = heap->incremental_marking();
- if (incremental_marking->IsStopped()) {
- return kDone;
- }
- if (incremental_marking->IsSweeping()) {
- incremental_marking->FinalizeSweeping();
- // TODO(hpayer): We can continue here if enough idle time is left.
- return kMoreWork;
- }
- const double remaining_idle_time_in_ms =
- incremental_marking->AdvanceIncrementalMarking(
- deadline_in_ms, IncrementalMarking::IdleStepActions());
- if (remaining_idle_time_in_ms > 0.0) {
- heap->TryFinalizeIdleIncrementalMarking(remaining_idle_time_in_ms);
- }
- return incremental_marking->IsStopped() ? kDone : kMoreWork;
-}
-
-
-void IncrementalMarkingJob::IdleTask::RunInternal(double deadline_in_seconds) {
- double deadline_in_ms =
- deadline_in_seconds *
- static_cast<double>(base::Time::kMillisecondsPerSecond);
- Heap* heap = isolate()->heap();
- double start_ms = heap->MonotonicallyIncreasingTimeInMs();
- job_->NotifyIdleTask();
- job_->NotifyIdleTaskProgress();
- if (Step(heap, deadline_in_ms) == kMoreWork) {
- job_->ScheduleIdleTask(heap);
- }
- if (FLAG_trace_idle_notification) {
- double current_time_ms = heap->MonotonicallyIncreasingTimeInMs();
- double idle_time_in_ms = deadline_in_ms - start_ms;
- double deadline_difference = deadline_in_ms - current_time_ms;
- PrintIsolate(isolate(), "%8.0f ms: ", isolate()->time_millis_since_init());
- PrintF(
- "Idle task: requested idle time %.2f ms, used idle time %.2f "
- "ms, deadline usage %.2f ms\n",
- idle_time_in_ms, idle_time_in_ms - deadline_difference,
- deadline_difference);
- }
-}
-
-
-void IncrementalMarkingJob::DelayedTask::Step(Heap* heap) {
- const int kIncrementalMarkingDelayMs = 50;
+void IncrementalMarkingJob::Task::Step(Heap* heap) {
+ const int kIncrementalMarkingDelayMs = 1;
double deadline =
heap->MonotonicallyIncreasingTimeInMs() + kIncrementalMarkingDelayMs;
heap->incremental_marking()->AdvanceIncrementalMarking(
- deadline, i::IncrementalMarking::StepActions(
- i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- i::IncrementalMarking::FORCE_MARKING,
- i::IncrementalMarking::FORCE_COMPLETION));
+ deadline, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ i::IncrementalMarking::FORCE_COMPLETION, i::StepOrigin::kTask);
heap->FinalizeIncrementalMarkingIfComplete(
- "Incremental marking task: finalize incremental marking");
+ GarbageCollectionReason::kFinalizeMarkingViaTask);
}
-
-void IncrementalMarkingJob::DelayedTask::RunInternal() {
+void IncrementalMarkingJob::Task::RunInternal() {
Heap* heap = isolate()->heap();
- job_->NotifyDelayedTask();
+ job_->NotifyTask();
IncrementalMarking* incremental_marking = heap->incremental_marking();
- if (!incremental_marking->IsStopped()) {
- if (job_->ShouldForceMarkingStep()) {
- Step(heap);
+ if (incremental_marking->IsStopped()) {
+ if (heap->IncrementalMarkingLimitReached() !=
+ Heap::IncrementalMarkingLimit::kNoLimit) {
+ heap->StartIncrementalMarking(Heap::kNoGCFlags,
+ GarbageCollectionReason::kIdleTask,
+ kNoGCCallbackFlags);
}
- // The Step() above could have finished incremental marking.
+ }
+ if (!incremental_marking->IsStopped()) {
+ Step(heap);
if (!incremental_marking->IsStopped()) {
- job_->ScheduleDelayedTask(heap);
+ job_->ScheduleTask(heap);
}
}
}
diff --git a/deps/v8/src/heap/incremental-marking-job.h b/deps/v8/src/heap/incremental-marking-job.h
index 9c78182f2e..ccc60c55cb 100644
--- a/deps/v8/src/heap/incremental-marking-job.h
+++ b/deps/v8/src/heap/incremental-marking-job.h
@@ -14,31 +14,13 @@ class Heap;
class Isolate;
// The incremental marking job uses platform tasks to perform incremental
-// marking steps. The job posts an idle and a delayed task with a large delay.
-// The delayed task performs steps only if the idle task is not making progress.
-// We expect this to be a rare event since incremental marking should finish
-// quickly with the help of the mutator and the idle task.
-// The delayed task guarantees that we eventually finish incremental marking
-// even if the mutator becomes idle and the platform stops running idle tasks,
-// which can happen for background tabs in Chrome.
+// marking steps. The job posts a foreground task that makes a small (~1ms)
+// step and posts another task until the marking is completed.
class IncrementalMarkingJob {
public:
- class IdleTask : public CancelableIdleTask {
+ class Task : public CancelableTask {
public:
- explicit IdleTask(Isolate* isolate, IncrementalMarkingJob* job)
- : CancelableIdleTask(isolate), job_(job) {}
- enum Progress { kDone, kMoreWork };
- static Progress Step(Heap* heap, double deadline_in_ms);
- // CancelableIdleTask overrides.
- void RunInternal(double deadline_in_seconds) override;
-
- private:
- IncrementalMarkingJob* job_;
- };
-
- class DelayedTask : public CancelableTask {
- public:
- explicit DelayedTask(Isolate* isolate, IncrementalMarkingJob* job)
+ explicit Task(Isolate* isolate, IncrementalMarkingJob* job)
: CancelableTask(isolate), job_(job) {}
static void Step(Heap* heap);
// CancelableTask overrides.
@@ -48,33 +30,18 @@ class IncrementalMarkingJob {
IncrementalMarkingJob* job_;
};
- // Delay of the delayed task.
- static const double kLongDelayInSeconds;
- static const double kShortDelayInSeconds;
-
- IncrementalMarkingJob()
- : idle_task_pending_(false),
- delayed_task_pending_(false),
- made_progress_since_last_delayed_task_(false) {}
+ IncrementalMarkingJob() : task_pending_(false) {}
- bool ShouldForceMarkingStep() {
- return !made_progress_since_last_delayed_task_;
- }
-
- bool IdleTaskPending() { return idle_task_pending_; }
+ bool TaskPending() { return task_pending_; }
void Start(Heap* heap);
- void NotifyIdleTask();
- void NotifyDelayedTask();
- void NotifyIdleTaskProgress();
- void ScheduleIdleTask(Heap* heap);
- void ScheduleDelayedTask(Heap* heap);
+ void NotifyTask();
+
+ void ScheduleTask(Heap* heap);
private:
- bool idle_task_pending_;
- bool delayed_task_pending_;
- bool made_progress_since_last_delayed_task_;
+ bool task_pending_;
};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index b9e7c61ba0..99be9d0123 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -19,33 +19,22 @@
namespace v8 {
namespace internal {
-IncrementalMarking::StepActions IncrementalMarking::IdleStepActions() {
- return StepActions(IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- IncrementalMarking::FORCE_MARKING,
- IncrementalMarking::DO_NOT_FORCE_COMPLETION);
-}
-
IncrementalMarking::IncrementalMarking(Heap* heap)
: heap_(heap),
- observer_(*this, kAllocatedThreshold),
state_(STOPPED),
+ initial_old_generation_size_(0),
+ bytes_marked_ahead_of_schedule_(0),
+ unscanned_bytes_of_large_object_(0),
+ idle_marking_delay_counter_(0),
+ incremental_marking_finalization_rounds_(0),
is_compacting_(false),
- steps_count_(0),
- old_generation_space_available_at_start_of_incremental_(0),
- old_generation_space_used_at_start_of_incremental_(0),
- bytes_rescanned_(0),
should_hurry_(false),
- marking_speed_(0),
- bytes_scanned_(0),
- allocated_(0),
- write_barriers_invoked_since_last_step_(0),
- idle_marking_delay_counter_(0),
- unscanned_bytes_of_large_object_(0),
was_activated_(false),
black_allocation_(false),
finalize_marking_completed_(false),
- incremental_marking_finalization_rounds_(0),
- request_type_(NONE) {}
+ request_type_(NONE),
+ new_generation_observer_(*this, kAllocatedThreshold),
+ old_generation_observer_(*this, kAllocatedThreshold) {}
bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
HeapObject* value_heap_obj = HeapObject::cast(value);
@@ -76,19 +65,7 @@ void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
Isolate* isolate) {
DCHECK(obj->IsHeapObject());
- IncrementalMarking* marking = isolate->heap()->incremental_marking();
-
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- int counter = chunk->write_barrier_counter();
- if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
- marking->write_barriers_invoked_since_last_step_ +=
- MemoryChunk::kWriteBarrierCounterGranularity -
- chunk->write_barrier_counter();
- chunk->set_write_barrier_counter(
- MemoryChunk::kWriteBarrierCounterGranularity);
- }
-
- marking->RecordWrite(obj, slot, *slot);
+ isolate->heap()->incremental_marking()->RecordWrite(obj, slot, *slot);
}
// static
@@ -202,20 +179,15 @@ class IncrementalMarkingMarkingVisitor
StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
- table_.Register(kVisitJSRegExp, &VisitJSRegExp);
}
static const int kProgressBarScanningChunk = 32 * 1024;
static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
- // TODO(mstarzinger): Move setting of the flag to the allocation site of
- // the array. The visitor should just check the flag.
- if (FLAG_use_marking_progress_bar &&
- chunk->owner()->identity() == LO_SPACE) {
- chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
- }
if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+ DCHECK(!FLAG_use_marking_progress_bar ||
+ chunk->owner()->identity() == LO_SPACE);
Heap* heap = map->GetHeap();
// When using a progress bar for large fixed arrays, scan only a chunk of
// the array and try to push it onto the marking deque again until it is
@@ -423,22 +395,6 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
}
-bool IncrementalMarking::ShouldActivateEvenWithoutIdleNotification() {
-#ifndef DEBUG
- static const intptr_t kActivationThreshold = 8 * MB;
-#else
- // TODO(gc) consider setting this to some low level so that some
- // debug tests run with incremental marking and some without.
- static const intptr_t kActivationThreshold = 0;
-#endif
- // Don't switch on for very small heaps.
- return CanBeActivated() &&
- heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold &&
- heap_->HeapIsFullEnoughToStartIncrementalMarking(
- heap_->old_generation_allocation_limit());
-}
-
-
bool IncrementalMarking::WasActivated() { return was_activated_; }
@@ -467,21 +423,6 @@ void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
}
-void IncrementalMarking::NotifyOfHighPromotionRate() {
- if (IsMarking()) {
- if (marking_speed_ < kFastMarking) {
- if (FLAG_trace_gc) {
- PrintIsolate(heap()->isolate(),
- "Increasing marking speed to %d "
- "due to high promotion rate\n",
- static_cast<int>(kFastMarking));
- }
- marking_speed_ = kFastMarking;
- }
- }
-}
-
-
static void PatchIncrementalMarkingRecordWriteStubs(
Heap* heap, RecordWriteStub::Mode mode) {
UnseededNumberDictionary* stubs = heap->code_stubs();
@@ -503,34 +444,60 @@ static void PatchIncrementalMarkingRecordWriteStubs(
}
}
-
-void IncrementalMarking::Start(const char* reason) {
+void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Start (%s)\n",
- (reason == nullptr) ? "unknown reason" : reason);
+ int old_generation_size_mb =
+ static_cast<int>(heap()->PromotedSpaceSizeOfObjects() / MB);
+ int old_generation_limit_mb =
+ static_cast<int>(heap()->old_generation_allocation_limit() / MB);
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Start (%s): old generation %dMB, limit %dMB, "
+ "slack %dMB\n",
+ Heap::GarbageCollectionReasonToString(gc_reason),
+ old_generation_size_mb, old_generation_limit_mb,
+ Max(0, old_generation_limit_mb - old_generation_size_mb));
}
DCHECK(FLAG_incremental_marking);
DCHECK(state_ == STOPPED);
DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
DCHECK(!heap_->isolate()->serializer_enabled());
+ Counters* counters = heap_->isolate()->counters();
+
+ counters->incremental_marking_reason()->AddSample(
+ static_cast<int>(gc_reason));
HistogramTimerScope incremental_marking_scope(
- heap_->isolate()->counters()->gc_incremental_marking_start());
+ counters->gc_incremental_marking_start());
TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart");
- ResetStepCounters();
-
+ heap_->tracer()->NotifyIncrementalMarkingStart();
+
+ start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
+ initial_old_generation_size_ = heap_->PromotedSpaceSizeOfObjects();
+ old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter();
+ bytes_allocated_ = 0;
+ bytes_marked_ahead_of_schedule_ = 0;
+ should_hurry_ = false;
was_activated_ = true;
if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
StartMarking();
} else {
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Start sweeping.\n");
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Start sweeping.\n");
}
state_ = SWEEPING;
}
- heap_->new_space()->AddAllocationObserver(&observer_);
+ SpaceIterator it(heap_);
+ while (it.has_next()) {
+ Space* space = it.next();
+ if (space == heap_->new_space()) {
+ space->AddAllocationObserver(&new_generation_observer_);
+ } else {
+ space->AddAllocationObserver(&old_generation_observer_);
+ }
+ }
incremental_marking_job()->Start(heap_);
}
@@ -542,12 +509,14 @@ void IncrementalMarking::StartMarking() {
// but we cannot enable black allocation while deserializing. Hence, we
// have to delay the start of incremental marking in that case.
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Start delayed - serializer\n");
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Start delayed - serializer\n");
}
return;
}
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Start marking\n");
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Start marking\n");
}
is_compacting_ = !FLAG_never_compact &&
@@ -559,7 +528,8 @@ void IncrementalMarking::StartMarking() {
if (heap_->UsingEmbedderHeapTracer()) {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_INCREMENTAL_WRAPPER_PROLOGUE);
- heap_->mark_compact_collector()->embedder_heap_tracer()->TracePrologue();
+ heap_->embedder_heap_tracer()->TracePrologue(
+ heap_->embedder_reachable_reference_reporter());
}
RecordWriteStub::Mode mode = is_compacting_
@@ -589,7 +559,7 @@ void IncrementalMarking::StartMarking() {
// Ready to start incremental marking.
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Running\n");
+ heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Running\n");
}
}
@@ -601,7 +571,8 @@ void IncrementalMarking::StartBlackAllocation() {
heap()->map_space()->MarkAllocationInfoBlack();
heap()->code_space()->MarkAllocationInfoBlack();
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Black allocation started\n");
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Black allocation started\n");
}
}
@@ -609,11 +580,22 @@ void IncrementalMarking::FinishBlackAllocation() {
if (black_allocation_) {
black_allocation_ = false;
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Black allocation finished\n");
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Black allocation finished\n");
}
}
}
+void IncrementalMarking::AbortBlackAllocation() {
+ for (Page* page : *heap()->old_space()) {
+ page->ReleaseBlackAreaEndMarkerMap();
+ }
+ if (FLAG_trace_incremental_marking) {
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Black allocation aborted\n");
+ }
+}
+
void IncrementalMarking::MarkRoots() {
DCHECK(!finalize_marking_completed_);
DCHECK(IsMarking());
@@ -742,7 +724,6 @@ void IncrementalMarking::RetainMaps() {
}
}
-
void IncrementalMarking::FinalizeIncrementally() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE_BODY);
DCHECK(!finalize_marking_completed_);
@@ -775,11 +756,12 @@ void IncrementalMarking::FinalizeIncrementally() {
abs(old_marking_deque_top -
heap_->mark_compact_collector()->marking_deque()->top());
+ marking_progress += static_cast<int>(heap_->wrappers_to_trace());
+
double end = heap_->MonotonicallyIncreasingTimeInMs();
double delta = end - start;
- heap_->tracer()->AddMarkingTime(delta);
if (FLAG_trace_incremental_marking) {
- PrintF(
+ heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Finalize incrementally round %d, "
"spent %d ms, marking progress %d.\n",
static_cast<int>(delta), incremental_marking_finalization_rounds_,
@@ -926,23 +908,23 @@ void IncrementalMarking::Hurry() {
// because should_hurry_ will force a full GC.
if (!heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
double start = 0.0;
- if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
+ if (FLAG_trace_incremental_marking) {
start = heap_->MonotonicallyIncreasingTimeInMs();
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Hurry\n");
+ heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Hurry\n");
}
}
// TODO(gc) hurry can mark objects it encounters black as mutator
// was stopped.
ProcessMarkingDeque(0, FORCE_COMPLETION);
state_ = COMPLETE;
- if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
+ if (FLAG_trace_incremental_marking) {
double end = heap_->MonotonicallyIncreasingTimeInMs();
double delta = end - start;
- heap_->tracer()->AddMarkingTime(delta);
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
- static_cast<int>(delta));
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Complete (hurry), spent %d ms.\n",
+ static_cast<int>(delta));
}
}
}
@@ -968,12 +950,28 @@ void IncrementalMarking::Hurry() {
void IncrementalMarking::Stop() {
if (IsStopped()) return;
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Stopping.\n");
+ int old_generation_size_mb =
+ static_cast<int>(heap()->PromotedSpaceSizeOfObjects() / MB);
+ int old_generation_limit_mb =
+ static_cast<int>(heap()->old_generation_allocation_limit() / MB);
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Stopping: old generation %dMB, limit %dMB, "
+ "overshoot %dMB\n",
+ old_generation_size_mb, old_generation_limit_mb,
+ Max(0, old_generation_size_mb - old_generation_limit_mb));
+ }
+
+ SpaceIterator it(heap_);
+ while (it.has_next()) {
+ Space* space = it.next();
+ if (space == heap_->new_space()) {
+ space->RemoveAllocationObserver(&new_generation_observer_);
+ } else {
+ space->RemoveAllocationObserver(&old_generation_observer_);
+ }
}
- heap_->new_space()->RemoveAllocationObserver(&observer_);
IncrementalMarking::set_should_hurry(false);
- ResetStepCounters();
if (IsMarking()) {
PatchIncrementalMarkingRecordWriteStubs(heap_,
RecordWriteStub::STORE_BUFFER_ONLY);
@@ -995,7 +993,7 @@ void IncrementalMarking::Finalize() {
void IncrementalMarking::FinalizeMarking(CompletionAction action) {
DCHECK(!finalize_marking_completed_);
if (FLAG_trace_incremental_marking) {
- PrintF(
+ heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] requesting finalization of incremental "
"marking.\n");
}
@@ -1015,7 +1013,8 @@ void IncrementalMarking::MarkingComplete(CompletionAction action) {
// the should-hurry flag to indicate that there can't be much work left to do.
set_should_hurry(true);
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Complete (normal).\n");
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Complete (normal).\n");
}
request_type_ = COMPLETE_MARKING;
if (action == GC_VIA_STACK_GUARD) {
@@ -1031,246 +1030,170 @@ void IncrementalMarking::Epilogue() {
}
double IncrementalMarking::AdvanceIncrementalMarking(
- double deadline_in_ms, IncrementalMarking::StepActions step_actions) {
+ double deadline_in_ms, CompletionAction completion_action,
+ ForceCompletionAction force_completion, StepOrigin step_origin) {
DCHECK(!IsStopped());
- intptr_t step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
- GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs,
- heap()
- ->tracer()
- ->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
double remaining_time_in_ms = 0.0;
- intptr_t bytes_processed = 0;
+ intptr_t step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
+ kStepSizeInMs,
+ heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
do {
- bytes_processed =
- Step(step_size_in_bytes, step_actions.completion_action,
- step_actions.force_marking, step_actions.force_completion);
+ Step(step_size_in_bytes, completion_action, force_completion, step_origin);
remaining_time_in_ms =
deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
- } while (bytes_processed > 0 &&
- remaining_time_in_ms >=
- 2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs &&
- !IsComplete() &&
+ } while (remaining_time_in_ms >= kStepSizeInMs && !IsComplete() &&
!heap()->mark_compact_collector()->marking_deque()->IsEmpty());
return remaining_time_in_ms;
}
-void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
- if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) {
- heap()->StartIncrementalMarking(Heap::kNoGCFlags, kNoGCCallbackFlags,
- "old space step");
- } else {
- Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
- }
-}
-
-
-void IncrementalMarking::SpeedUp() {
- bool speed_up = false;
-
- if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
- if (FLAG_trace_incremental_marking) {
- PrintIsolate(heap()->isolate(), "Speed up marking after %d steps\n",
- static_cast<int>(kMarkingSpeedAccellerationInterval));
- }
- speed_up = true;
- }
-
- bool space_left_is_very_small =
- (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
-
- bool only_1_nth_of_space_that_was_available_still_left =
- (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
- old_generation_space_available_at_start_of_incremental_);
-
- if (space_left_is_very_small ||
- only_1_nth_of_space_that_was_available_still_left) {
- if (FLAG_trace_incremental_marking)
- PrintIsolate(heap()->isolate(),
- "Speed up marking because of low space left\n");
- speed_up = true;
- }
-
- bool size_of_old_space_multiplied_by_n_during_marking =
- (heap_->PromotedTotalSize() >
- (marking_speed_ + 1) *
- old_generation_space_used_at_start_of_incremental_);
- if (size_of_old_space_multiplied_by_n_during_marking) {
- speed_up = true;
- if (FLAG_trace_incremental_marking) {
- PrintIsolate(heap()->isolate(),
- "Speed up marking because of heap size increase\n");
- }
- }
-
- int64_t promoted_during_marking =
- heap_->PromotedTotalSize() -
- old_generation_space_used_at_start_of_incremental_;
- intptr_t delay = marking_speed_ * MB;
- intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
-
- // We try to scan at at least twice the speed that we are allocating.
- if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
- if (FLAG_trace_incremental_marking) {
- PrintIsolate(heap()->isolate(),
- "Speed up marking because marker was not keeping up\n");
- }
- speed_up = true;
- }
-
- if (speed_up) {
- if (state_ != MARKING) {
- if (FLAG_trace_incremental_marking) {
- PrintIsolate(heap()->isolate(),
- "Postponing speeding up marking until marking starts\n");
- }
- } else {
- marking_speed_ += kMarkingSpeedAccelleration;
- marking_speed_ = static_cast<int>(
- Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
- if (FLAG_trace_incremental_marking) {
- PrintIsolate(heap()->isolate(), "Marking speed increased to %d\n",
- marking_speed_);
- }
- }
- }
-}
-
void IncrementalMarking::FinalizeSweeping() {
DCHECK(state_ == SWEEPING);
if (heap_->mark_compact_collector()->sweeping_in_progress() &&
- (heap_->mark_compact_collector()->sweeper().IsSweepingCompleted() ||
- !FLAG_concurrent_sweeping)) {
+ (!FLAG_concurrent_sweeping ||
+ heap_->mark_compact_collector()->sweeper().IsSweepingCompleted())) {
heap_->mark_compact_collector()->EnsureSweepingCompleted();
}
if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
- bytes_scanned_ = 0;
StartMarking();
}
}
-intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
- CompletionAction action,
- ForceMarkingAction marking,
- ForceCompletionAction completion) {
- DCHECK(allocated_bytes >= 0);
+size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
+ // Update bytes_allocated_ based on the allocation counter.
+ size_t current_counter = heap_->OldGenerationAllocationCounter();
+ bytes_allocated_ += current_counter - old_generation_allocation_counter_;
+ old_generation_allocation_counter_ = current_counter;
+ return bytes_allocated_;
+}
+
+size_t IncrementalMarking::StepSizeToMakeProgress() {
+ // We increase step size gradually based on the time passed in order to
+ // leave marking work to standalone tasks. The ramp up duration and the
+ // target step count are chosen based on benchmarks.
+ const int kRampUpIntervalMs = 300;
+ const size_t kTargetStepCount = 128;
+ const size_t kTargetStepCountAtOOM = 16;
+ size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB;
+
+ if (heap()->IsCloseToOutOfMemory(oom_slack)) {
+ return heap()->PromotedSpaceSizeOfObjects() / kTargetStepCountAtOOM;
+ }
+
+ size_t step_size = Max(initial_old_generation_size_ / kTargetStepCount,
+ IncrementalMarking::kAllocatedThreshold);
+ double time_passed_ms =
+ heap_->MonotonicallyIncreasingTimeInMs() - start_time_ms_;
+ double factor = Min(time_passed_ms / kRampUpIntervalMs, 1.0);
+ return static_cast<size_t>(factor * step_size);
+}
+void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
(state_ != SWEEPING && state_ != MARKING)) {
- return 0;
- }
-
- allocated_ += allocated_bytes;
-
- if (marking == DO_NOT_FORCE_MARKING && allocated_ < kAllocatedThreshold &&
- write_barriers_invoked_since_last_step_ <
- kWriteBarriersInvokedThreshold) {
- return 0;
+ return;
}
- // If an idle notification happened recently, we delay marking steps.
- if (marking == DO_NOT_FORCE_MARKING &&
- heap_->RecentIdleNotificationHappened()) {
- return 0;
+ size_t bytes_to_process =
+ StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress();
+
+ if (bytes_to_process >= IncrementalMarking::kAllocatedThreshold) {
+ // The first step after Scavenge will see many allocated bytes.
+ // Cap the step size to distribute the marking work more uniformly.
+ size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
+ kMaxStepSizeInMs,
+ heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
+ bytes_to_process = Min(bytes_to_process, max_step_size);
+
+ size_t bytes_processed = 0;
+ if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
+ // Steps performed in tasks have put us ahead of schedule.
+ // We skip processing of marking dequeue here and thus
+ // shift marking time from inside V8 to standalone tasks.
+ bytes_marked_ahead_of_schedule_ -= bytes_to_process;
+ bytes_processed = bytes_to_process;
+ } else {
+ bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
+ FORCE_COMPLETION, StepOrigin::kV8);
+ }
+ bytes_allocated_ -= Min(bytes_allocated_, bytes_processed);
}
+}
- intptr_t bytes_processed = 0;
- {
- HistogramTimerScope incremental_marking_scope(
- heap_->isolate()->counters()->gc_incremental_marking());
- TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
- TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
- double start = heap_->MonotonicallyIncreasingTimeInMs();
-
- // The marking speed is driven either by the allocation rate or by the rate
- // at which we are having to check the color of objects in the write
- // barrier.
- // It is possible for a tight non-allocating loop to run a lot of write
- // barriers before we get here and check them (marking can only take place
- // on
- // allocation), so to reduce the lumpiness we don't use the write barriers
- // invoked since last step directly to determine the amount of work to do.
- intptr_t bytes_to_process =
- marking_speed_ *
- Max(allocated_, write_barriers_invoked_since_last_step_);
- allocated_ = 0;
- write_barriers_invoked_since_last_step_ = 0;
-
- bytes_scanned_ += bytes_to_process;
-
- // TODO(hpayer): Do not account for sweeping finalization while marking.
- if (state_ == SWEEPING) {
- FinalizeSweeping();
- }
+size_t IncrementalMarking::Step(size_t bytes_to_process,
+ CompletionAction action,
+ ForceCompletionAction completion,
+ StepOrigin step_origin) {
+ HistogramTimerScope incremental_marking_scope(
+ heap_->isolate()->counters()->gc_incremental_marking());
+ TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
+ double start = heap_->MonotonicallyIncreasingTimeInMs();
- if (state_ == MARKING) {
+ if (state_ == SWEEPING) {
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING);
+ FinalizeSweeping();
+ }
+
+ size_t bytes_processed = 0;
+ if (state_ == MARKING) {
+ const bool incremental_wrapper_tracing =
+ FLAG_incremental_marking_wrappers && heap_->UsingEmbedderHeapTracer();
+ const bool process_wrappers =
+ incremental_wrapper_tracing &&
+ (heap_->RequiresImmediateWrapperProcessing() ||
+ heap_->mark_compact_collector()->marking_deque()->IsEmpty());
+ bool wrapper_work_left = incremental_wrapper_tracing;
+ if (!process_wrappers) {
bytes_processed = ProcessMarkingDeque(bytes_to_process);
- if (FLAG_incremental_marking_wrappers &&
- heap_->UsingEmbedderHeapTracer()) {
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
- // This currently marks through all registered wrappers and does not
- // respect bytes_to_process.
- // TODO(hpayer): Integrate incremental marking of wrappers into
- // bytes_to_process logic.
- heap_->mark_compact_collector()
- ->RegisterWrappersWithEmbedderHeapTracer();
- heap_->mark_compact_collector()->embedder_heap_tracer()->AdvanceTracing(
- 0,
- EmbedderHeapTracer::AdvanceTracingActions(
- EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
+ if (step_origin == StepOrigin::kTask) {
+ bytes_marked_ahead_of_schedule_ += bytes_processed;
}
- if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
- if (completion == FORCE_COMPLETION ||
- IsIdleMarkingDelayCounterLimitReached()) {
- if (!finalize_marking_completed_) {
- FinalizeMarking(action);
- } else {
- MarkingComplete(action);
- }
+ } else {
+ const double wrapper_deadline =
+ heap_->MonotonicallyIncreasingTimeInMs() + kStepSizeInMs;
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
+ heap_->RegisterWrappersWithEmbedderHeapTracer();
+ wrapper_work_left = heap_->embedder_heap_tracer()->AdvanceTracing(
+ wrapper_deadline, EmbedderHeapTracer::AdvanceTracingActions(
+ EmbedderHeapTracer::ForceCompletionAction::
+ DO_NOT_FORCE_COMPLETION));
+ }
+
+ if (heap_->mark_compact_collector()->marking_deque()->IsEmpty() &&
+ !wrapper_work_left) {
+ if (completion == FORCE_COMPLETION ||
+ IsIdleMarkingDelayCounterLimitReached()) {
+ if (!finalize_marking_completed_) {
+ FinalizeMarking(action);
} else {
- IncrementIdleMarkingDelayCounter();
+ MarkingComplete(action);
}
+ } else {
+ IncrementIdleMarkingDelayCounter();
}
}
+ }
- steps_count_++;
-
- // Speed up marking if we are marking too slow or if we are almost done
- // with marking.
- SpeedUp();
-
- double end = heap_->MonotonicallyIncreasingTimeInMs();
- double duration = (end - start);
- // Note that we report zero bytes here when sweeping was in progress or
- // when we just started incremental marking. In these cases we did not
- // process the marking deque.
- heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
+ double end = heap_->MonotonicallyIncreasingTimeInMs();
+ double duration = (end - start);
+ // Note that we report zero bytes here when sweeping was in progress or
+ // when we just started incremental marking. In these cases we did not
+ // process the marking deque.
+ heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
+ if (FLAG_trace_incremental_marking) {
+ heap_->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Step %s %zu bytes (%zu) in %.1f\n",
+ step_origin == StepOrigin::kV8 ? "in v8" : "in task", bytes_processed,
+ bytes_to_process, duration);
}
return bytes_processed;
}
-void IncrementalMarking::ResetStepCounters() {
- steps_count_ = 0;
- old_generation_space_available_at_start_of_incremental_ =
- SpaceLeftInOldSpace();
- old_generation_space_used_at_start_of_incremental_ =
- heap_->PromotedTotalSize();
- bytes_rescanned_ = 0;
- marking_speed_ = kInitialMarkingSpeed;
- bytes_scanned_ = 0;
- write_barriers_invoked_since_last_step_ = 0;
-}
-
-
-int64_t IncrementalMarking::SpaceLeftInOldSpace() {
- return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
-}
-
-
bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() {
return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter;
}
@@ -1284,5 +1207,6 @@ void IncrementalMarking::IncrementIdleMarkingDelayCounter() {
void IncrementalMarking::ClearIdleMarkingDelayCounter() {
idle_marking_delay_counter_ = 0;
}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 877f05e0e0..c2290c4d82 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -20,33 +20,18 @@ namespace internal {
class MarkBit;
class PagedSpace;
+enum class StepOrigin { kV8, kTask };
+
class IncrementalMarking {
public:
enum State { STOPPED, SWEEPING, MARKING, COMPLETE };
enum CompletionAction { GC_VIA_STACK_GUARD, NO_GC_VIA_STACK_GUARD };
- enum ForceMarkingAction { FORCE_MARKING, DO_NOT_FORCE_MARKING };
-
enum ForceCompletionAction { FORCE_COMPLETION, DO_NOT_FORCE_COMPLETION };
enum GCRequestType { NONE, COMPLETE_MARKING, FINALIZATION };
- struct StepActions {
- StepActions(CompletionAction complete_action_,
- ForceMarkingAction force_marking_,
- ForceCompletionAction force_completion_)
- : completion_action(complete_action_),
- force_marking(force_marking_),
- force_completion(force_completion_) {}
-
- CompletionAction completion_action;
- ForceMarkingAction force_marking;
- ForceCompletionAction force_completion;
- };
-
- static StepActions IdleStepActions();
-
explicit IncrementalMarking(Heap* heap);
static void Initialize();
@@ -87,11 +72,9 @@ class IncrementalMarking {
bool CanBeActivated();
- bool ShouldActivateEvenWithoutIdleNotification();
-
bool WasActivated();
- void Start(const char* reason = nullptr);
+ void Start(GarbageCollectionReason gc_reason);
void FinalizeIncrementally();
@@ -113,7 +96,9 @@ class IncrementalMarking {
// returns the remaining time that cannot be used for incremental marking
// anymore because a single step would exceed the deadline.
double AdvanceIncrementalMarking(double deadline_in_ms,
- StepActions step_actions);
+ CompletionAction completion_action,
+ ForceCompletionAction force_completion,
+ StepOrigin step_origin);
// It's hard to know how much work the incremental marker should do to make
// progress in the face of the mutator creating new work for it. We start
@@ -121,39 +106,27 @@ class IncrementalMarking {
// incremental marker until it completes.
// Do some marking every time this much memory has been allocated or that many
// heavy (color-checking) write barriers have been invoked.
- static const intptr_t kAllocatedThreshold = 65536;
- static const intptr_t kWriteBarriersInvokedThreshold = 32768;
- // Start off by marking this many times more memory than has been allocated.
- static const intptr_t kInitialMarkingSpeed = 1;
- // But if we are promoting a lot of data we need to mark faster to keep up
- // with the data that is entering the old space through promotion.
- static const intptr_t kFastMarking = 3;
- // After this many steps we increase the marking/allocating factor.
- static const intptr_t kMarkingSpeedAccellerationInterval = 1024;
- // This is how much we increase the marking/allocating factor by.
- static const intptr_t kMarkingSpeedAccelleration = 2;
- static const intptr_t kMaxMarkingSpeed = 1000;
+ static const size_t kAllocatedThreshold = 64 * KB;
+
+ static const int kStepSizeInMs = 1;
+ static const int kMaxStepSizeInMs = 5;
// This is the upper bound for how many times we allow finalization of
// incremental marking to be postponed.
- static const size_t kMaxIdleMarkingDelayCounter = 3;
+ static const int kMaxIdleMarkingDelayCounter = 3;
- void FinalizeSweeping();
+#ifndef DEBUG
+ static const intptr_t kActivationThreshold = 8 * MB;
+#else
+ static const intptr_t kActivationThreshold = 0;
+#endif
- void OldSpaceStep(intptr_t allocated);
+ void FinalizeSweeping();
- intptr_t Step(intptr_t allocated, CompletionAction action,
- ForceMarkingAction marking = DO_NOT_FORCE_MARKING,
- ForceCompletionAction completion = FORCE_COMPLETION);
+ size_t Step(size_t bytes_to_process, CompletionAction action,
+ ForceCompletionAction completion, StepOrigin step_origin);
- inline void RestartIfNotMarking() {
- if (state_ == COMPLETE) {
- state_ = MARKING;
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Restarting (new grey objects)\n");
- }
- }
- }
+ inline void RestartIfNotMarking();
static void RecordWriteFromCode(HeapObject* obj, Object** slot,
Isolate* isolate);
@@ -173,8 +146,8 @@ class IncrementalMarking {
INLINE(void RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
Code* value));
-
- void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
+ V8_EXPORT_PRIVATE void RecordWriteSlow(HeapObject* obj, Object** slot,
+ Object* value);
void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* value);
void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
@@ -194,8 +167,6 @@ class IncrementalMarking {
void ActivateGeneratedStub(Code* stub);
- void NotifyOfHighPromotionRate();
-
void NotifyIncompleteScanOfObject(int unscanned_bytes) {
unscanned_bytes_of_large_object_ = unscanned_bytes;
}
@@ -244,6 +215,8 @@ class IncrementalMarking {
void StartBlackAllocationForTesting() { StartBlackAllocation(); }
+ void AbortBlackAllocation();
+
private:
class Observer : public AllocationObserver {
public:
@@ -252,8 +225,7 @@ class IncrementalMarking {
incremental_marking_(incremental_marking) {}
void Step(int bytes_allocated, Address, size_t) override {
- incremental_marking_.Step(bytes_allocated,
- IncrementalMarking::GC_VIA_STACK_GUARD);
+ incremental_marking_.AdvanceIncrementalMarkingOnAllocation();
}
private:
@@ -262,10 +234,6 @@ class IncrementalMarking {
int64_t SpaceLeftInOldSpace();
- void SpeedUp();
-
- void ResetStepCounters();
-
void StartMarking();
void StartBlackAllocation();
@@ -301,37 +269,36 @@ class IncrementalMarking {
void IncrementIdleMarkingDelayCounter();
- Heap* heap_;
+ void AdvanceIncrementalMarkingOnAllocation();
- Observer observer_;
+ size_t StepSizeToKeepUpWithAllocations();
+ size_t StepSizeToMakeProgress();
+
+ Heap* heap_;
State state_;
- bool is_compacting_;
- int steps_count_;
- int64_t old_generation_space_available_at_start_of_incremental_;
- int64_t old_generation_space_used_at_start_of_incremental_;
- int64_t bytes_rescanned_;
- bool should_hurry_;
- int marking_speed_;
- intptr_t bytes_scanned_;
- intptr_t allocated_;
- intptr_t write_barriers_invoked_since_last_step_;
- size_t idle_marking_delay_counter_;
+ double start_time_ms_;
+ size_t initial_old_generation_size_;
+ size_t old_generation_allocation_counter_;
+ size_t bytes_allocated_;
+ size_t bytes_marked_ahead_of_schedule_;
+ size_t unscanned_bytes_of_large_object_;
- int unscanned_bytes_of_large_object_;
+ int idle_marking_delay_counter_;
+ int incremental_marking_finalization_rounds_;
+ bool is_compacting_;
+ bool should_hurry_;
bool was_activated_;
-
bool black_allocation_;
-
bool finalize_marking_completed_;
- int incremental_marking_finalization_rounds_;
-
GCRequestType request_type_;
IncrementalMarkingJob incremental_marking_job_;
+ Observer new_generation_observer_;
+ Observer old_generation_observer_;
DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
};
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index 7ead42150b..fe71fb1177 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -14,7 +14,7 @@ namespace internal {
void MarkCompactCollector::PushBlack(HeapObject* obj) {
DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(obj)));
- if (marking_deque_.Push(obj)) {
+ if (marking_deque()->Push(obj)) {
MemoryChunk::IncrementLiveBytesFromGC(obj, obj->Size());
} else {
MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
@@ -25,7 +25,7 @@ void MarkCompactCollector::PushBlack(HeapObject* obj) {
void MarkCompactCollector::UnshiftBlack(HeapObject* obj) {
DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(obj)));
- if (!marking_deque_.Unshift(obj)) {
+ if (!marking_deque()->Unshift(obj)) {
MemoryChunk::IncrementLiveBytesFromGC(obj, -obj->Size());
MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
Marking::BlackToGrey(mark_bit);
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 78b4ea8f0e..7e5ef96fc9 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -61,7 +61,6 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
marking_deque_memory_(NULL),
marking_deque_memory_committed_(0),
code_flusher_(nullptr),
- embedder_heap_tracer_(nullptr),
sweeper_(heap) {
}
@@ -567,6 +566,7 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
}
bool MarkCompactCollector::Sweeper::IsSweepingCompleted() {
+ DCHECK(FLAG_concurrent_sweeping);
while (pending_sweeper_tasks_semaphore_.WaitFor(
base::TimeDelta::FromSeconds(0))) {
num_sweeping_tasks_.Increment(-1);
@@ -600,7 +600,7 @@ void MarkCompactCollector::ComputeEvacuationHeuristics(
// For memory reducing and optimize for memory mode we directly define both
// constants.
const int kTargetFragmentationPercentForReduceMemory = 20;
- const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize;
+ const int kMaxEvacuatedBytesForReduceMemory = 12 * MB;
const int kTargetFragmentationPercentForOptimizeMemory = 20;
const int kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
@@ -608,10 +608,10 @@ void MarkCompactCollector::ComputeEvacuationHeuristics(
// defaults to start and switch to a trace-based (using compaction speed)
// approach as soon as we have enough samples.
const int kTargetFragmentationPercent = 70;
- const int kMaxEvacuatedBytes = 4 * Page::kPageSize;
+ const int kMaxEvacuatedBytes = 4 * MB;
// Time to take for a single area (=payload of page). Used as soon as there
// exist enough compaction speed samples.
- const int kTargetMsPerArea = 1;
+ const float kTargetMsPerArea = .5;
if (heap()->ShouldReduceMemory()) {
*target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
@@ -801,13 +801,14 @@ void MarkCompactCollector::Prepare() {
// Clear marking bits if incremental marking is aborted.
if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
heap()->incremental_marking()->Stop();
+ heap()->incremental_marking()->AbortBlackAllocation();
ClearMarkbits();
AbortWeakCollections();
AbortWeakCells();
AbortTransitionArrays();
AbortCompaction();
if (heap_->UsingEmbedderHeapTracer()) {
- heap_->mark_compact_collector()->embedder_heap_tracer()->AbortTracing();
+ heap_->embedder_heap_tracer()->AbortTracing();
}
was_marked_incrementally_ = false;
}
@@ -815,12 +816,13 @@ void MarkCompactCollector::Prepare() {
if (!was_marked_incrementally_) {
if (heap_->UsingEmbedderHeapTracer()) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_PROLOGUE);
- heap_->mark_compact_collector()->embedder_heap_tracer()->TracePrologue();
+ heap_->embedder_heap_tracer()->TracePrologue(
+ heap_->embedder_reachable_reference_reporter());
}
}
- if (UsingEmbedderHeapTracer()) {
- embedder_heap_tracer()->EnterFinalPause();
+ if (heap_->UsingEmbedderHeapTracer()) {
+ heap_->embedder_heap_tracer()->EnterFinalPause();
}
// Don't start compaction if we are in the middle of incremental
@@ -1244,7 +1246,7 @@ class MarkCompactMarkingVisitor
Heap* heap = map->GetHeap();
MarkCompactCollector* collector = heap->mark_compact_collector();
if (!collector->is_code_flushing_enabled()) {
- VisitJSRegExp(map, object);
+ JSObjectVisitor::Visit(map, object);
return;
}
JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
@@ -1252,7 +1254,7 @@ class MarkCompactMarkingVisitor
UpdateRegExpCodeAgeAndFlush(heap, re, true);
UpdateRegExpCodeAgeAndFlush(heap, re, false);
// Visit the fields of the RegExp, including the updated FixedArray.
- VisitJSRegExp(map, object);
+ JSObjectVisitor::Visit(map, object);
}
};
@@ -1975,7 +1977,7 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
MarkStringTable(visitor);
// There may be overflowed objects in the heap. Visit them now.
- while (marking_deque_.overflowed()) {
+ while (marking_deque()->overflowed()) {
RefillMarkingDeque();
EmptyMarkingDeque();
}
@@ -2018,8 +2020,8 @@ void MarkCompactCollector::MarkImplicitRefGroups(
// After: the marking stack is empty, and all objects reachable from the
// marking stack have been marked, or are overflowed in the heap.
void MarkCompactCollector::EmptyMarkingDeque() {
- while (!marking_deque_.IsEmpty()) {
- HeapObject* object = marking_deque_.Pop();
+ while (!marking_deque()->IsEmpty()) {
+ HeapObject* object = marking_deque()->Pop();
DCHECK(!object->IsFiller());
DCHECK(object->IsHeapObject());
@@ -2042,25 +2044,25 @@ void MarkCompactCollector::EmptyMarkingDeque() {
// is cleared.
void MarkCompactCollector::RefillMarkingDeque() {
isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
- DCHECK(marking_deque_.overflowed());
+ DCHECK(marking_deque()->overflowed());
DiscoverGreyObjectsInNewSpace();
- if (marking_deque_.IsFull()) return;
+ if (marking_deque()->IsFull()) return;
DiscoverGreyObjectsInSpace(heap()->old_space());
- if (marking_deque_.IsFull()) return;
+ if (marking_deque()->IsFull()) return;
DiscoverGreyObjectsInSpace(heap()->code_space());
- if (marking_deque_.IsFull()) return;
+ if (marking_deque()->IsFull()) return;
DiscoverGreyObjectsInSpace(heap()->map_space());
- if (marking_deque_.IsFull()) return;
+ if (marking_deque()->IsFull()) return;
LargeObjectIterator lo_it(heap()->lo_space());
DiscoverGreyObjectsWithIterator(&lo_it);
- if (marking_deque_.IsFull()) return;
+ if (marking_deque()->IsFull()) return;
- marking_deque_.ClearOverflowed();
+ marking_deque()->ClearOverflowed();
}
@@ -2070,7 +2072,7 @@ void MarkCompactCollector::RefillMarkingDeque() {
// objects in the heap.
void MarkCompactCollector::ProcessMarkingDeque() {
EmptyMarkingDeque();
- while (marking_deque_.overflowed()) {
+ while (marking_deque()->overflowed()) {
RefillMarkingDeque();
EmptyMarkingDeque();
}
@@ -2080,13 +2082,13 @@ void MarkCompactCollector::ProcessMarkingDeque() {
// stack including references only considered in the atomic marking pause.
void MarkCompactCollector::ProcessEphemeralMarking(
ObjectVisitor* visitor, bool only_process_harmony_weak_collections) {
- DCHECK(marking_deque_.IsEmpty() && !marking_deque_.overflowed());
+ DCHECK(marking_deque()->IsEmpty() && !marking_deque()->overflowed());
bool work_to_do = true;
while (work_to_do) {
- if (UsingEmbedderHeapTracer()) {
+ if (heap_->UsingEmbedderHeapTracer()) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
- RegisterWrappersWithEmbedderHeapTracer();
- embedder_heap_tracer()->AdvanceTracing(
+ heap_->RegisterWrappersWithEmbedderHeapTracer();
+ heap_->embedder_heap_tracer()->AdvanceTracing(
0, EmbedderHeapTracer::AdvanceTracingActions(
EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
}
@@ -2097,7 +2099,7 @@ void MarkCompactCollector::ProcessEphemeralMarking(
MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject);
}
ProcessWeakCollections();
- work_to_do = !marking_deque_.IsEmpty();
+ work_to_do = !marking_deque()->IsEmpty();
ProcessMarkingDeque();
}
}
@@ -2121,7 +2123,7 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
void MarkCompactCollector::EnsureMarkingDequeIsReserved() {
- DCHECK(!marking_deque_.in_use());
+ DCHECK(!marking_deque()->in_use());
if (marking_deque_memory_ == NULL) {
marking_deque_memory_ = new base::VirtualMemory(kMaxMarkingDequeSize);
marking_deque_memory_committed_ = 0;
@@ -2135,7 +2137,7 @@ void MarkCompactCollector::EnsureMarkingDequeIsReserved() {
void MarkCompactCollector::EnsureMarkingDequeIsCommitted(size_t max_size) {
// If the marking deque is too small, we try to allocate a bigger one.
// If that fails, make do with a smaller one.
- CHECK(!marking_deque_.in_use());
+ CHECK(!marking_deque()->in_use());
for (size_t size = max_size; size >= kMinMarkingDequeSize; size >>= 1) {
base::VirtualMemory* memory = marking_deque_memory_;
size_t currently_committed = marking_deque_memory_committed_;
@@ -2167,12 +2169,12 @@ void MarkCompactCollector::EnsureMarkingDequeIsCommitted(size_t max_size) {
void MarkCompactCollector::InitializeMarkingDeque() {
- DCHECK(!marking_deque_.in_use());
+ DCHECK(!marking_deque()->in_use());
DCHECK(marking_deque_memory_committed_ > 0);
Address addr = static_cast<Address>(marking_deque_memory_->address());
size_t size = marking_deque_memory_committed_;
if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
- marking_deque_.Initialize(addr, addr + size);
+ marking_deque()->Initialize(addr, addr + size);
}
@@ -2200,34 +2202,6 @@ void MarkingDeque::Uninitialize(bool aborting) {
in_use_ = false;
}
-void MarkCompactCollector::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
- DCHECK_NOT_NULL(tracer);
- CHECK_NULL(embedder_heap_tracer_);
- embedder_heap_tracer_ = tracer;
-}
-
-void MarkCompactCollector::RegisterWrappersWithEmbedderHeapTracer() {
- DCHECK(UsingEmbedderHeapTracer());
- if (wrappers_to_trace_.empty()) {
- return;
- }
- embedder_heap_tracer()->RegisterV8References(wrappers_to_trace_);
- wrappers_to_trace_.clear();
-}
-
-void MarkCompactCollector::TracePossibleWrapper(JSObject* js_object) {
- DCHECK(js_object->WasConstructedFromApiFunction());
- if (js_object->GetInternalFieldCount() >= 2 &&
- js_object->GetInternalField(0) &&
- js_object->GetInternalField(0) != heap_->undefined_value() &&
- js_object->GetInternalField(1) != heap_->undefined_value()) {
- DCHECK(reinterpret_cast<intptr_t>(js_object->GetInternalField(0)) % 2 == 0);
- wrappers_to_trace_.push_back(std::pair<void*, void*>(
- reinterpret_cast<void*>(js_object->GetInternalField(0)),
- reinterpret_cast<void*>(js_object->GetInternalField(1))));
- }
-}
-
class MarkCompactCollector::ObjectStatsVisitor
: public MarkCompactCollector::HeapObjectVisitor {
public:
@@ -2259,8 +2233,9 @@ void MarkCompactCollector::VisitAllObjects(HeapObjectVisitor* visitor) {
SpaceIterator space_it(heap());
HeapObject* obj = nullptr;
while (space_it.has_next()) {
- ObjectIterator* it = space_it.next();
- while ((obj = it->Next()) != nullptr) {
+ std::unique_ptr<ObjectIterator> it(space_it.next()->GetObjectIterator());
+ ObjectIterator* obj_it = it.get();
+ while ((obj = obj_it->Next()) != nullptr) {
visitor->Visit(obj);
}
}
@@ -2271,6 +2246,13 @@ void MarkCompactCollector::RecordObjectStats() {
ObjectStatsVisitor visitor(heap(), heap()->live_object_stats_,
heap()->dead_object_stats_);
VisitAllObjects(&visitor);
+ std::stringstream live, dead;
+ heap()->live_object_stats_->Dump(live);
+ heap()->dead_object_stats_->Dump(dead);
+ TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
+ "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
+ "live", TRACE_STR_COPY(live.str().c_str()), "dead",
+ TRACE_STR_COPY(dead.str().c_str()));
if (FLAG_trace_gc_object_stats) {
heap()->live_object_stats_->PrintJSON("live");
heap()->dead_object_stats_->PrintJSON("dead");
@@ -2282,10 +2264,6 @@ void MarkCompactCollector::RecordObjectStats() {
void MarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
- double start_time = 0.0;
- if (FLAG_print_cumulative_gc_stat) {
- start_time = heap_->MonotonicallyIncreasingTimeInMs();
- }
// The recursive GC marker detects when it is nearing stack overflow,
// and switches to a different marking system. JS interrupts interfere
// with the C stack limit check.
@@ -2299,8 +2277,8 @@ void MarkCompactCollector::MarkLiveObjects() {
} else {
// Abort any pending incremental activities e.g. incremental sweeping.
incremental_marking->Stop();
- if (marking_deque_.in_use()) {
- marking_deque_.Uninitialize(true);
+ if (marking_deque()->in_use()) {
+ marking_deque()->Uninitialize(true);
}
}
}
@@ -2369,17 +2347,12 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
ProcessEphemeralMarking(&root_visitor, true);
- if (UsingEmbedderHeapTracer()) {
+ if (heap_->UsingEmbedderHeapTracer()) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_EPILOGUE);
- embedder_heap_tracer()->TraceEpilogue();
+ heap()->embedder_heap_tracer()->TraceEpilogue();
}
}
}
-
- if (FLAG_print_cumulative_gc_stat) {
- heap_->tracer()->AddMarkingTime(heap_->MonotonicallyIncreasingTimeInMs() -
- start_time);
- }
}
@@ -3079,8 +3052,7 @@ class MarkCompactCollector::Evacuator : public Malloced {
explicit Evacuator(MarkCompactCollector* collector)
: collector_(collector),
compaction_spaces_(collector->heap()),
- local_pretenuring_feedback_(base::HashMap::PointersMatch,
- kInitialLocalPretenuringFeedbackCapacity),
+ local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
new_space_visitor_(collector->heap(), &compaction_spaces_,
&local_pretenuring_feedback_),
new_space_page_visitor(collector->heap()),
@@ -3221,7 +3193,7 @@ int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
// The number of parallel compaction tasks is limited by:
// - #evacuation pages
// - (#cores - 1)
- const double kTargetCompactionTimeInMs = 1;
+ const double kTargetCompactionTimeInMs = .5;
const int kNumSweepingTasks = 3;
double compaction_speed =
@@ -3299,10 +3271,11 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
job.AddPage(page, &abandoned_pages);
}
+ const bool reduce_memory = heap()->ShouldReduceMemory();
const Address age_mark = heap()->new_space()->age_mark();
for (Page* page : newspace_evacuation_candidates_) {
live_bytes += page->LiveBytes();
- if (!page->NeverEvacuate() &&
+ if (!reduce_memory && !page->NeverEvacuate() &&
(page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
!page->Contains(age_mark)) {
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
@@ -3700,20 +3673,10 @@ int NumberOfPointerUpdateTasks(int pages) {
template <PointerDirection direction>
void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
- // Work-around bug in clang-3.4
- // https://github.com/nodejs/node/issues/8323
- struct MemoryChunkVisitor {
- PageParallelJob<PointerUpdateJobTraits<direction> >& job_;
- MemoryChunkVisitor(PageParallelJob<PointerUpdateJobTraits<direction> >& job)
- : job_(job) {}
- void operator()(MemoryChunk* chunk) {
- job_.AddPage(chunk, 0);
- }
- };
-
PageParallelJob<PointerUpdateJobTraits<direction> > job(
heap, heap->isolate()->cancelable_task_manager(), semaphore);
- RememberedSet<direction>::IterateMemoryChunks(heap, MemoryChunkVisitor(job));
+ RememberedSet<direction>::IterateMemoryChunks(
+ heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
int num_pages = job.NumberOfPages();
int num_tasks = NumberOfPointerUpdateTasks(num_pages);
job.Run(num_tasks, [](int i) { return 0; });
@@ -3868,6 +3831,15 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
} else {
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
}
+
+ // After finishing sweeping of a page we clean up its remembered set.
+ if (page->typed_old_to_new_slots()) {
+ page->typed_old_to_new_slots()->FreeToBeFreedChunks();
+ }
+ if (page->old_to_new_slots()) {
+ page->old_to_new_slots()->FreeToBeFreedBuckets();
+ }
+
{
base::LockGuard<base::Mutex> guard(&mutex_);
swept_list_[identity].Add(page);
@@ -3974,11 +3946,6 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
void MarkCompactCollector::SweepSpaces() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
- double start_time = 0.0;
- if (FLAG_print_cumulative_gc_stat) {
- start_time = heap_->MonotonicallyIncreasingTimeInMs();
- }
-
#ifdef DEBUG
state_ = SWEEP_SPACES;
#endif
@@ -4004,11 +3971,6 @@ void MarkCompactCollector::SweepSpaces() {
// Deallocate unmarked large objects.
heap_->lo_space()->FreeUnmarkedObjects();
-
- if (FLAG_print_cumulative_gc_stat) {
- heap_->tracer()->AddSweepingTime(heap_->MonotonicallyIncreasingTimeInMs() -
- start_time);
- }
}
Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index b2c637bc63..2cbb369f76 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -467,7 +467,7 @@ class MarkCompactCollector {
static const size_t kMinMarkingDequeSize = 256 * KB;
void EnsureMarkingDequeIsCommittedAndInitialize(size_t max_size) {
- if (!marking_deque_.in_use()) {
+ if (!marking_deque()->in_use()) {
EnsureMarkingDequeIsCommitted(max_size);
InitializeMarkingDeque();
}
@@ -490,16 +490,6 @@ class MarkCompactCollector {
Sweeper& sweeper() { return sweeper_; }
- void RegisterWrappersWithEmbedderHeapTracer();
-
- void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
-
- EmbedderHeapTracer* embedder_heap_tracer() { return embedder_heap_tracer_; }
-
- bool UsingEmbedderHeapTracer() { return embedder_heap_tracer(); }
-
- void TracePossibleWrapper(JSObject* js_object);
-
private:
class EvacuateNewSpacePageVisitor;
class EvacuateNewSpaceVisitor;
@@ -739,12 +729,9 @@ class MarkCompactCollector {
base::VirtualMemory* marking_deque_memory_;
size_t marking_deque_memory_committed_;
MarkingDeque marking_deque_;
- std::vector<std::pair<void*, void*>> wrappers_to_trace_;
CodeFlusher* code_flusher_;
- EmbedderHeapTracer* embedder_heap_tracer_;
-
List<Page*> evacuation_candidates_;
List<Page*> newspace_evacuation_candidates_;
@@ -768,8 +755,7 @@ class EvacuationScope BASE_EMBEDDED {
MarkCompactCollector* collector_;
};
-
-const char* AllocationSpaceName(AllocationSpace space);
+V8_EXPORT_PRIVATE const char* AllocationSpaceName(AllocationSpace space);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index 699e10e603..ba9010e7bc 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -73,7 +73,8 @@ void MemoryReducer::NotifyTimer(const Event& event) {
PrintIsolate(heap()->isolate(), "Memory reducer: started GC #%d\n",
state_.started_gcs);
}
- heap()->StartIdleIncrementalMarking();
+ heap()->StartIdleIncrementalMarking(
+ GarbageCollectionReason::kMemoryReducer);
} else if (state_.action == kWait) {
if (!heap()->incremental_marking()->IsStopped() &&
heap()->ShouldOptimizeForMemoryUsage()) {
@@ -84,12 +85,10 @@ void MemoryReducer::NotifyTimer(const Event& event) {
double deadline = heap()->MonotonicallyIncreasingTimeInMs() +
kIncrementalMarkingDelayMs;
heap()->incremental_marking()->AdvanceIncrementalMarking(
- deadline, i::IncrementalMarking::StepActions(
- i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- i::IncrementalMarking::FORCE_MARKING,
- i::IncrementalMarking::FORCE_COMPLETION));
+ deadline, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ IncrementalMarking::FORCE_COMPLETION, StepOrigin::kTask);
heap()->FinalizeIncrementalMarkingIfComplete(
- "Memory reducer: finalize incremental marking");
+ GarbageCollectionReason::kFinalizeMarkingViaTask);
}
// Re-schedule the timer.
ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms);
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index 3f43212151..6e4b50ec24 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -42,6 +42,16 @@ V8_NOINLINE static void PrintJSONArray(size_t* array, const int len) {
PrintF(" ]");
}
+V8_NOINLINE static void DumpJSONArray(std::stringstream& stream, size_t* array,
+ const int len) {
+ stream << "[";
+ for (int i = 0; i < len; i++) {
+ stream << array[i];
+ if (i != (len - 1)) stream << ",";
+ }
+ stream << "]";
+}
+
void ObjectStats::PrintJSON(const char* key) {
double time = isolate()->time_millis_since_init();
int gc_count = heap()->gc_count();
@@ -102,6 +112,60 @@ void ObjectStats::PrintJSON(const char* key) {
#undef FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER
#undef CODE_AGE_WRAPPER
#undef PRINT_INSTANCE_TYPE_DATA
+#undef PRINT_KEY_AND_ID
+}
+
+void ObjectStats::Dump(std::stringstream& stream) {
+ double time = isolate()->time_millis_since_init();
+ int gc_count = heap()->gc_count();
+
+ stream << "{";
+ stream << "\"isolate\":\"" << reinterpret_cast<void*>(isolate()) << "\",";
+ stream << "\"id\":" << gc_count << ",";
+ stream << "\"time\":" << time << ",";
+ stream << "\"bucket_sizes\":[";
+ for (int i = 0; i < kNumberOfBuckets; i++) {
+ stream << (1 << (kFirstBucketShift + i));
+ if (i != (kNumberOfBuckets - 1)) stream << ",";
+ }
+ stream << "],";
+ stream << "\"type_data\":{";
+
+#define PRINT_INSTANCE_TYPE_DATA(name, index) \
+ stream << "\"" << name << "\":{"; \
+ stream << "\"type\":" << static_cast<int>(index) << ","; \
+ stream << "\"overall\":" << object_sizes_[index] << ","; \
+ stream << "\"count\":" << object_counts_[index] << ","; \
+ stream << "\"over_allocated\":" << over_allocated_[index] << ","; \
+ stream << "\"histogram\":"; \
+ DumpJSONArray(stream, size_histogram_[index], kNumberOfBuckets); \
+ stream << ",\"over_allocated_histogram\":"; \
+ DumpJSONArray(stream, over_allocated_histogram_[index], kNumberOfBuckets); \
+ stream << "},";
+
+#define INSTANCE_TYPE_WRAPPER(name) PRINT_INSTANCE_TYPE_DATA(#name, name)
+#define CODE_KIND_WRAPPER(name) \
+ PRINT_INSTANCE_TYPE_DATA("*CODE_" #name, \
+ FIRST_CODE_KIND_SUB_TYPE + Code::name)
+#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name) \
+ PRINT_INSTANCE_TYPE_DATA("*FIXED_ARRAY_" #name, \
+ FIRST_FIXED_ARRAY_SUB_TYPE + name)
+#define CODE_AGE_WRAPPER(name) \
+ PRINT_INSTANCE_TYPE_DATA( \
+ "*CODE_AGE_" #name, \
+ FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge)
+
+ INSTANCE_TYPE_LIST(INSTANCE_TYPE_WRAPPER);
+ CODE_KIND_LIST(CODE_KIND_WRAPPER);
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER);
+ CODE_AGE_LIST_COMPLETE(CODE_AGE_WRAPPER);
+ stream << "\"END\":{}}}";
+
+#undef INSTANCE_TYPE_WRAPPER
+#undef CODE_KIND_WRAPPER
+#undef FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER
+#undef CODE_AGE_WRAPPER
+#undef PRINT_INSTANCE_TYPE_DATA
}
void ObjectStats::CheckpointObjectStats() {
@@ -246,8 +310,6 @@ void ObjectStatsCollector::CollectGlobalStatistics() {
OBJECT_TO_CODE_SUB_TYPE);
RecordHashTableHelper(nullptr, heap_->code_stubs(),
CODE_STUBS_TABLE_SUB_TYPE);
- RecordHashTableHelper(nullptr, heap_->intrinsic_function_names(),
- INTRINSIC_FUNCTION_NAMES_SUB_TYPE);
RecordHashTableHelper(nullptr, heap_->empty_properties_dictionary(),
EMPTY_PROPERTIES_DICTIONARY_SUB_TYPE);
CompilationCache* compilation_cache = heap_->isolate()->compilation_cache();
@@ -447,9 +509,11 @@ void ObjectStatsCollector::RecordCodeDetails(Code* code) {
if (code->kind() == Code::Kind::OPTIMIZED_FUNCTION) {
DeoptimizationInputData* input_data =
DeoptimizationInputData::cast(code->deoptimization_data());
- RecordFixedArrayHelper(code->deoptimization_data(),
- input_data->LiteralArray(),
- OPTIMIZED_CODE_LITERALS_SUB_TYPE, 0);
+ if (input_data->length() > 0) {
+ RecordFixedArrayHelper(code->deoptimization_data(),
+ input_data->LiteralArray(),
+ OPTIMIZED_CODE_LITERALS_SUB_TYPE, 0);
+ }
}
RecordFixedArrayHelper(code, code->handler_table(), HANDLER_TABLE_SUB_TYPE,
0);
diff --git a/deps/v8/src/heap/object-stats.h b/deps/v8/src/heap/object-stats.h
index 4780696952..add5a12b04 100644
--- a/deps/v8/src/heap/object-stats.h
+++ b/deps/v8/src/heap/object-stats.h
@@ -35,6 +35,7 @@ class ObjectStats {
void CheckpointObjectStats();
void PrintJSON(const char* key);
+ void Dump(std::stringstream& stream);
void RecordObjectStats(InstanceType type, size_t size) {
DCHECK(type <= LAST_TYPE);
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index 148975f630..252b2fe5e2 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -147,11 +147,17 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitNativeContext, &VisitNativeContext);
- table_.Register(kVisitAllocationSite, &VisitAllocationSite);
+ table_.Register(
+ kVisitAllocationSite,
+ &FixedBodyVisitor<StaticVisitor, AllocationSite::MarkingBodyDescriptor,
+ void>::Visit);
table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
- table_.Register(kVisitBytecodeArray, &VisitBytecodeArray);
+ table_.Register(
+ kVisitBytecodeArray,
+ &FixedBodyVisitor<StaticVisitor, BytecodeArray::MarkingBodyDescriptor,
+ void>::Visit);
table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
@@ -178,13 +184,15 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
&FlexibleBodyVisitor<StaticVisitor, JSArrayBuffer::BodyDescriptor,
void>::Visit);
- // Registration for kVisitJSRegExp is done by StaticVisitor.
+ table_.Register(kVisitJSRegExp, &JSObjectVisitor::Visit);
table_.Register(
kVisitCell,
&FixedBodyVisitor<StaticVisitor, Cell::BodyDescriptor, void>::Visit);
- table_.Register(kVisitPropertyCell, &VisitPropertyCell);
+ table_.Register(kVisitPropertyCell,
+ &FixedBodyVisitor<StaticVisitor, PropertyCell::BodyDescriptor,
+ void>::Visit);
table_.Register(kVisitWeakCell, &VisitWeakCell);
@@ -319,19 +327,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitMap(Map* map,
}
}
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitPropertyCell(
- Map* map, HeapObject* object) {
- Heap* heap = map->GetHeap();
-
- StaticVisitor::VisitPointers(
- heap, object,
- HeapObject::RawField(object, PropertyCell::kPointerFieldsBeginOffset),
- HeapObject::RawField(object, PropertyCell::kPointerFieldsEndOffset));
-}
-
-
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitWeakCell(Map* map,
HeapObject* object) {
@@ -384,19 +379,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitTransitionArray(
}
}
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitAllocationSite(
- Map* map, HeapObject* object) {
- Heap* heap = map->GetHeap();
-
- StaticVisitor::VisitPointers(
- heap, object,
- HeapObject::RawField(object, AllocationSite::kPointerFieldsBeginOffset),
- HeapObject::RawField(object, AllocationSite::kPointerFieldsEndOffset));
-}
-
-
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitWeakCollection(
Map* map, HeapObject* object) {
@@ -467,11 +449,11 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
// optimized code.
collector->code_flusher()->AddCandidate(shared);
// Treat the reference to the code object weakly.
- VisitSharedFunctionInfoWeakCode(heap, object);
+ VisitSharedFunctionInfoWeakCode(map, object);
return;
}
}
- VisitSharedFunctionInfoStrongCode(heap, object);
+ VisitSharedFunctionInfoStrongCode(map, object);
}
@@ -504,23 +486,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction(Map* map,
VisitJSFunctionStrongCode(map, object);
}
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSRegExp(Map* map,
- HeapObject* object) {
- JSObjectVisitor::Visit(map, object);
-}
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitBytecodeArray(
- Map* map, HeapObject* object) {
- StaticVisitor::VisitPointers(
- map->GetHeap(), object,
- HeapObject::RawField(object, BytecodeArray::kConstantPoolOffset),
- HeapObject::RawField(object, BytecodeArray::kFrameSizeOffset));
-}
-
-
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(Heap* heap,
Map* map) {
@@ -623,7 +588,7 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
// We do not (yet?) flush code for generator functions, or async functions,
// because we don't know if there are still live activations
// (generator objects) on the heap.
- if (shared_info->is_resumable()) {
+ if (IsResumableFunction(shared_info->kind())) {
return false;
}
@@ -656,39 +621,23 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
return true;
}
-
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoStrongCode(
- Heap* heap, HeapObject* object) {
- Object** start_slot = HeapObject::RawField(
- object, SharedFunctionInfo::BodyDescriptor::kStartOffset);
- Object** end_slot = HeapObject::RawField(
- object, SharedFunctionInfo::BodyDescriptor::kEndOffset);
- StaticVisitor::VisitPointers(heap, object, start_slot, end_slot);
+ Map* map, HeapObject* object) {
+ FixedBodyVisitor<StaticVisitor, SharedFunctionInfo::BodyDescriptor,
+ void>::Visit(map, object);
}
-
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoWeakCode(
- Heap* heap, HeapObject* object) {
- Object** name_slot =
- HeapObject::RawField(object, SharedFunctionInfo::kNameOffset);
- StaticVisitor::VisitPointer(heap, object, name_slot);
-
+ Map* map, HeapObject* object) {
// Skip visiting kCodeOffset as it is treated weakly here.
- STATIC_ASSERT(SharedFunctionInfo::kNameOffset + kPointerSize ==
- SharedFunctionInfo::kCodeOffset);
- STATIC_ASSERT(SharedFunctionInfo::kCodeOffset + kPointerSize ==
- SharedFunctionInfo::kOptimizedCodeMapOffset);
-
- Object** start_slot =
- HeapObject::RawField(object, SharedFunctionInfo::kOptimizedCodeMapOffset);
- Object** end_slot = HeapObject::RawField(
- object, SharedFunctionInfo::BodyDescriptor::kEndOffset);
- StaticVisitor::VisitPointers(heap, object, start_slot, end_slot);
+ STATIC_ASSERT(SharedFunctionInfo::kCodeOffset <
+ SharedFunctionInfo::BodyDescriptorWeakCode::kStartOffset);
+ FixedBodyVisitor<StaticVisitor, SharedFunctionInfo::BodyDescriptorWeakCode,
+ void>::Visit(map, object);
}
-
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionStrongCode(
Map* map, HeapObject* object) {
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index 83e2e1c820..9393fcc615 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -107,7 +107,6 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_ARGUMENTS_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
- case JS_MODULE_TYPE:
case JS_VALUE_TYPE:
case JS_DATE_TYPE:
case JS_ARRAY_TYPE:
@@ -120,6 +119,7 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_MAP_TYPE:
case JS_SET_ITERATOR_TYPE:
case JS_MAP_ITERATOR_TYPE:
+ case JS_STRING_ITERATOR_TYPE:
case JS_PROMISE_TYPE:
case JS_BOUND_FUNCTION_TYPE:
return GetVisitorIdForSize(kVisitJSObject, kVisitJSObjectGeneric,
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 303db0eb07..633c277eb0 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -132,7 +132,7 @@ class StaticVisitorBase : public AllStatic {
(base == kVisitJSObject) || (base == kVisitJSApiObject));
DCHECK(IsAligned(object_size, kPointerSize));
DCHECK(Heap::kMinObjectSizeInWords * kPointerSize <= object_size);
- DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(object_size <= kMaxRegularHeapObjectSize);
DCHECK(!has_unboxed_fields || (base == kVisitJSObject) ||
(base == kVisitJSApiObject));
@@ -354,7 +354,6 @@ class StaticMarkingVisitor : public StaticVisitorBase {
table_.GetVisitor(map)(map, obj);
}
- INLINE(static void VisitPropertyCell(Map* map, HeapObject* object));
INLINE(static void VisitWeakCell(Map* map, HeapObject* object));
INLINE(static void VisitTransitionArray(Map* map, HeapObject* object));
INLINE(static void VisitCodeEntry(Heap* heap, HeapObject* object,
@@ -374,12 +373,9 @@ class StaticMarkingVisitor : public StaticVisitorBase {
INLINE(static void VisitMap(Map* map, HeapObject* object));
INLINE(static void VisitCode(Map* map, HeapObject* object));
INLINE(static void VisitSharedFunctionInfo(Map* map, HeapObject* object));
- INLINE(static void VisitAllocationSite(Map* map, HeapObject* object));
INLINE(static void VisitWeakCollection(Map* map, HeapObject* object));
INLINE(static void VisitJSFunction(Map* map, HeapObject* object));
- INLINE(static void VisitJSRegExp(Map* map, HeapObject* object));
INLINE(static void VisitNativeContext(Map* map, HeapObject* object));
- INLINE(static void VisitBytecodeArray(Map* map, HeapObject* object));
// Mark pointers in a Map treating some elements of the descriptor array weak.
static void MarkMapContents(Heap* heap, Map* map);
@@ -390,8 +386,8 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// Helpers used by code flushing support that visit pointer fields and treat
// references to code objects either strongly or weakly.
- static void VisitSharedFunctionInfoStrongCode(Heap* heap, HeapObject* object);
- static void VisitSharedFunctionInfoWeakCode(Heap* heap, HeapObject* object);
+ static void VisitSharedFunctionInfoStrongCode(Map* map, HeapObject* object);
+ static void VisitSharedFunctionInfoWeakCode(Map* map, HeapObject* object);
static void VisitJSFunctionStrongCode(Map* map, HeapObject* object);
static void VisitJSFunctionWeakCode(Map* map, HeapObject* object);
diff --git a/deps/v8/src/heap/remembered-set.cc b/deps/v8/src/heap/remembered-set.cc
index 6575d55d52..c5dab90515 100644
--- a/deps/v8/src/heap/remembered-set.cc
+++ b/deps/v8/src/heap/remembered-set.cc
@@ -20,10 +20,12 @@ void RememberedSet<direction>::ClearInvalidSlots(Heap* heap) {
for (MemoryChunk* chunk : *heap->old_space()) {
SlotSet* slots = GetSlotSet(chunk);
if (slots != nullptr) {
- slots->Iterate([heap, chunk](Address addr) {
- Object** slot = reinterpret_cast<Object**>(addr);
- return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
- });
+ slots->Iterate(
+ [heap, chunk](Address addr) {
+ Object** slot = reinterpret_cast<Object**>(addr);
+ return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
+ },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
}
}
for (MemoryChunk* chunk : *heap->code_space()) {
@@ -36,20 +38,24 @@ void RememberedSet<direction>::ClearInvalidSlots(Heap* heap) {
} else {
return REMOVE_SLOT;
}
- });
+ },
+ TypedSlotSet::PREFREE_EMPTY_CHUNKS);
}
}
for (MemoryChunk* chunk : *heap->map_space()) {
SlotSet* slots = GetSlotSet(chunk);
if (slots != nullptr) {
- slots->Iterate([heap, chunk](Address addr) {
- Object** slot = reinterpret_cast<Object**>(addr);
- // TODO(mlippautz): In map space all allocations would ideally be map
- // aligned. After establishing this invariant IsValidSlot could just
- // refer to the containing object using alignment and check the mark
- // bits.
- return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
- });
+ slots->Iterate(
+ [heap, chunk](Address addr) {
+ Object** slot = reinterpret_cast<Object**>(addr);
+ // TODO(mlippautz): In map space all allocations would ideally be
+ // map
+ // aligned. After establishing this invariant IsValidSlot could just
+ // refer to the containing object using alignment and check the mark
+ // bits.
+ return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
+ },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
}
}
}
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index 8022d52775..74791b926b 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -116,10 +116,13 @@ class RememberedSet {
size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
int new_count = 0;
for (size_t page = 0; page < pages; page++) {
- new_count += slots[page].Iterate(callback);
+ new_count +=
+ slots[page].Iterate(callback, SlotSet::PREFREE_EMPTY_BUCKETS);
}
- if (new_count == 0) {
- ReleaseSlotSet(chunk);
+ // Only old-to-old slot sets are released eagerly. Old-new-slot sets are
+ // released by the sweeper threads.
+ if (direction == OLD_TO_OLD && new_count == 0) {
+ chunk->ReleaseOldToOldSlots();
}
}
}
@@ -149,10 +152,13 @@ class RememberedSet {
static void RemoveRangeTyped(MemoryChunk* page, Address start, Address end) {
TypedSlotSet* slots = GetTypedSlotSet(page);
if (slots != nullptr) {
- slots->Iterate([start, end](SlotType slot_type, Address host_addr,
- Address slot_addr) {
- return start <= slot_addr && slot_addr < end ? REMOVE_SLOT : KEEP_SLOT;
- });
+ slots->Iterate(
+ [start, end](SlotType slot_type, Address host_addr,
+ Address slot_addr) {
+ return start <= slot_addr && slot_addr < end ? REMOVE_SLOT
+ : KEEP_SLOT;
+ },
+ TypedSlotSet::PREFREE_EMPTY_CHUNKS);
}
}
@@ -173,7 +179,7 @@ class RememberedSet {
static void IterateTyped(MemoryChunk* chunk, Callback callback) {
TypedSlotSet* slots = GetTypedSlotSet(chunk);
if (slots != nullptr) {
- int new_count = slots->Iterate(callback);
+ int new_count = slots->Iterate(callback, TypedSlotSet::KEEP_EMPTY_CHUNKS);
if (new_count == 0) {
ReleaseTypedSlotSet(chunk);
}
@@ -216,19 +222,9 @@ class RememberedSet {
}
}
- static void ReleaseSlotSet(MemoryChunk* chunk) {
- if (direction == OLD_TO_OLD) {
- chunk->ReleaseOldToOldSlots();
- } else {
- chunk->ReleaseOldToNewSlots();
- }
- }
-
static void ReleaseTypedSlotSet(MemoryChunk* chunk) {
if (direction == OLD_TO_OLD) {
chunk->ReleaseTypedOldToOldSlots();
- } else {
- chunk->ReleaseTypedOldToNewSlots();
}
}
@@ -363,7 +359,7 @@ class UpdateTypedSlotHelper {
case OBJECT_SLOT: {
return callback(reinterpret_cast<Object**>(addr));
}
- case NUMBER_OF_SLOT_TYPES:
+ case CLEARED_SLOT:
break;
}
UNREACHABLE();
@@ -382,7 +378,7 @@ inline SlotType SlotTypeForRelocInfoMode(RelocInfo::Mode rmode) {
return DEBUG_TARGET_SLOT;
}
UNREACHABLE();
- return NUMBER_OF_SLOT_TYPES;
+ return CLEARED_SLOT;
}
} // namespace internal
diff --git a/deps/v8/src/heap/scavenge-job.cc b/deps/v8/src/heap/scavenge-job.cc
index d89c9453c5..66d4307b6c 100644
--- a/deps/v8/src/heap/scavenge-job.cc
+++ b/deps/v8/src/heap/scavenge-job.cc
@@ -34,7 +34,7 @@ void ScavengeJob::IdleTask::RunInternal(double deadline_in_seconds) {
new_space_capacity)) {
if (EnoughIdleTimeForScavenge(
idle_time_in_ms, scavenge_speed_in_bytes_per_ms, new_space_size)) {
- heap->CollectGarbage(NEW_SPACE, "idle task: scavenge");
+ heap->CollectGarbage(NEW_SPACE, GarbageCollectionReason::kIdleTask);
} else {
// Immediately request another idle task that can get larger idle time.
job_->RescheduleIdleTask(heap);
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index 651af88bf8..017667b482 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -5,7 +5,10 @@
#ifndef V8_SLOT_SET_H
#define V8_SLOT_SET_H
+#include <stack>
+
#include "src/allocation.h"
+#include "src/base/atomic-utils.h"
#include "src/base/bits.h"
#include "src/utils.h"
@@ -22,9 +25,11 @@ enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
// Each bucket is a bitmap with a bit corresponding to a single slot offset.
class SlotSet : public Malloced {
public:
+ enum IterationMode { PREFREE_EMPTY_BUCKETS, KEEP_EMPTY_BUCKETS };
+
SlotSet() {
for (int i = 0; i < kBuckets; i++) {
- bucket[i] = nullptr;
+ bucket[i].SetValue(nullptr);
}
}
@@ -32,30 +37,38 @@ class SlotSet : public Malloced {
for (int i = 0; i < kBuckets; i++) {
ReleaseBucket(i);
}
+ FreeToBeFreedBuckets();
}
void SetPageStart(Address page_start) { page_start_ = page_start; }
// The slot offset specifies a slot at address page_start_ + slot_offset.
+ // This method should only be called on the main thread because concurrent
+ // allocation of the bucket is not thread-safe.
void Insert(int slot_offset) {
int bucket_index, cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
- if (bucket[bucket_index] == nullptr) {
- bucket[bucket_index] = AllocateBucket();
+ base::AtomicValue<uint32_t>* current_bucket = bucket[bucket_index].Value();
+ if (current_bucket == nullptr) {
+ current_bucket = AllocateBucket();
+ bucket[bucket_index].SetValue(current_bucket);
+ }
+ if (!(current_bucket[cell_index].Value() & (1u << bit_index))) {
+ current_bucket[cell_index].SetBit(bit_index);
}
- bucket[bucket_index][cell_index] |= 1u << bit_index;
}
// The slot offset specifies a slot at address page_start_ + slot_offset.
void Remove(int slot_offset) {
int bucket_index, cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
- if (bucket[bucket_index] != nullptr) {
- uint32_t cell = bucket[bucket_index][cell_index];
+ base::AtomicValue<uint32_t>* current_bucket = bucket[bucket_index].Value();
+ if (current_bucket != nullptr) {
+ uint32_t cell = current_bucket[cell_index].Value();
if (cell) {
uint32_t bit_mask = 1u << bit_index;
if (cell & bit_mask) {
- bucket[bucket_index][cell_index] ^= bit_mask;
+ current_bucket[cell_index].ClearBit(bit_index);
}
}
}
@@ -73,17 +86,17 @@ class SlotSet : public Malloced {
uint32_t start_mask = (1u << start_bit) - 1;
uint32_t end_mask = ~((1u << end_bit) - 1);
if (start_bucket == end_bucket && start_cell == end_cell) {
- MaskCell(start_bucket, start_cell, start_mask | end_mask);
+ ClearCell(start_bucket, start_cell, ~(start_mask | end_mask));
return;
}
int current_bucket = start_bucket;
int current_cell = start_cell;
- MaskCell(current_bucket, current_cell, start_mask);
+ ClearCell(current_bucket, current_cell, ~start_mask);
current_cell++;
if (current_bucket < end_bucket) {
- if (bucket[current_bucket] != nullptr) {
+ if (bucket[current_bucket].Value() != nullptr) {
while (current_cell < kCellsPerBucket) {
- bucket[current_bucket][current_cell] = 0;
+ bucket[current_bucket].Value()[current_cell].SetValue(0);
current_cell++;
}
}
@@ -100,24 +113,25 @@ class SlotSet : public Malloced {
}
// All buckets between start_bucket and end_bucket are cleared.
DCHECK(current_bucket == end_bucket && current_cell <= end_cell);
- if (current_bucket == kBuckets || bucket[current_bucket] == nullptr) {
+ if (current_bucket == kBuckets ||
+ bucket[current_bucket].Value() == nullptr) {
return;
}
while (current_cell < end_cell) {
- bucket[current_bucket][current_cell] = 0;
+ bucket[current_bucket].Value()[current_cell].SetValue(0);
current_cell++;
}
// All cells between start_cell and end_cell are cleared.
DCHECK(current_bucket == end_bucket && current_cell == end_cell);
- MaskCell(end_bucket, end_cell, end_mask);
+ ClearCell(end_bucket, end_cell, ~end_mask);
}
// The slot offset specifies a slot at address page_start_ + slot_offset.
bool Lookup(int slot_offset) {
int bucket_index, cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
- if (bucket[bucket_index] != nullptr) {
- uint32_t cell = bucket[bucket_index][cell_index];
+ if (bucket[bucket_index].Value() != nullptr) {
+ uint32_t cell = bucket[bucket_index].Value()[cell_index].Value();
return (cell & (1u << bit_index)) != 0;
}
return false;
@@ -126,6 +140,7 @@ class SlotSet : public Malloced {
// Iterate over all slots in the set and for each slot invoke the callback.
// If the callback returns REMOVE_SLOT then the slot is removed from the set.
// Returns the new number of slots.
+ // This method should only be called on the main thread.
//
// Sample usage:
// Iterate([](Address slot_address) {
@@ -133,16 +148,17 @@ class SlotSet : public Malloced {
// else return REMOVE_SLOT;
// });
template <typename Callback>
- int Iterate(Callback callback) {
+ int Iterate(Callback callback, IterationMode mode) {
int new_count = 0;
for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
- if (bucket[bucket_index] != nullptr) {
+ if (bucket[bucket_index].Value() != nullptr) {
int in_bucket_count = 0;
- uint32_t* current_bucket = bucket[bucket_index];
+ base::AtomicValue<uint32_t>* current_bucket =
+ bucket[bucket_index].Value();
int cell_offset = bucket_index * kBitsPerBucket;
for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
- if (current_bucket[i]) {
- uint32_t cell = current_bucket[i];
+ if (current_bucket[i].Value()) {
+ uint32_t cell = current_bucket[i].Value();
uint32_t old_cell = cell;
uint32_t new_cell = cell;
while (cell) {
@@ -157,12 +173,24 @@ class SlotSet : public Malloced {
cell ^= bit_mask;
}
if (old_cell != new_cell) {
- current_bucket[i] = new_cell;
+ while (!current_bucket[i].TrySetValue(old_cell, new_cell)) {
+ // If TrySetValue fails, the cell must have changed. We just
+ // have to read the current value of the cell, & it with the
+ // computed value, and retry. We can do this, because this
+ // method will only be called on the main thread and filtering
+ // threads will only remove slots.
+ old_cell = current_bucket[i].Value();
+ new_cell &= old_cell;
+ }
}
}
}
- if (in_bucket_count == 0) {
- ReleaseBucket(bucket_index);
+ if (mode == PREFREE_EMPTY_BUCKETS && in_bucket_count == 0) {
+ base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
+ base::AtomicValue<uint32_t>* bucket_ptr =
+ bucket[bucket_index].Value();
+ to_be_freed_buckets_.push(bucket_ptr);
+ bucket[bucket_index].SetValue(nullptr);
}
new_count += in_bucket_count;
}
@@ -170,6 +198,15 @@ class SlotSet : public Malloced {
return new_count;
}
+ void FreeToBeFreedBuckets() {
+ base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
+ while (!to_be_freed_buckets_.empty()) {
+ base::AtomicValue<uint32_t>* top = to_be_freed_buckets_.top();
+ to_be_freed_buckets_.pop();
+ DeleteArray<base::AtomicValue<uint32_t>>(top);
+ }
+ }
+
private:
static const int kMaxSlots = (1 << kPageSizeBits) / kPointerSize;
static const int kCellsPerBucket = 32;
@@ -180,24 +217,26 @@ class SlotSet : public Malloced {
static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell;
- uint32_t* AllocateBucket() {
- uint32_t* result = NewArray<uint32_t>(kCellsPerBucket);
+ base::AtomicValue<uint32_t>* AllocateBucket() {
+ base::AtomicValue<uint32_t>* result =
+ NewArray<base::AtomicValue<uint32_t>>(kCellsPerBucket);
for (int i = 0; i < kCellsPerBucket; i++) {
- result[i] = 0;
+ result[i].SetValue(0);
}
return result;
}
void ReleaseBucket(int bucket_index) {
- DeleteArray<uint32_t>(bucket[bucket_index]);
- bucket[bucket_index] = nullptr;
+ DeleteArray<base::AtomicValue<uint32_t>>(bucket[bucket_index].Value());
+ bucket[bucket_index].SetValue(nullptr);
}
- void MaskCell(int bucket_index, int cell_index, uint32_t mask) {
+ void ClearCell(int bucket_index, int cell_index, uint32_t mask) {
if (bucket_index < kBuckets) {
- uint32_t* cells = bucket[bucket_index];
- if (cells != nullptr && cells[cell_index] != 0) {
- cells[cell_index] &= mask;
+ base::AtomicValue<uint32_t>* cells = bucket[bucket_index].Value();
+ if (cells != nullptr) {
+ uint32_t cell = cells[cell_index].Value();
+ if (cell) cells[cell_index].SetBits(0, mask);
}
} else {
// GCC bug 59124: Emits wrong warnings
@@ -217,8 +256,10 @@ class SlotSet : public Malloced {
*bit_index = slot & (kBitsPerCell - 1);
}
- uint32_t* bucket[kBuckets];
+ base::AtomicValue<base::AtomicValue<uint32_t>*> bucket[kBuckets];
Address page_start_;
+ base::Mutex to_be_freed_buckets_mutex_;
+ std::stack<base::AtomicValue<uint32_t>*> to_be_freed_buckets_;
};
enum SlotType {
@@ -228,7 +269,7 @@ enum SlotType {
CODE_TARGET_SLOT,
CODE_ENTRY_SLOT,
DEBUG_TARGET_SLOT,
- NUMBER_OF_SLOT_TYPES
+ CLEARED_SLOT
};
// Data structure for maintaining a multiset of typed slots in a page.
@@ -240,51 +281,85 @@ enum SlotType {
// typed slots contain V8 internal pointers that are not directly exposed to JS.
class TypedSlotSet {
public:
+ enum IterationMode { PREFREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
+
+ typedef std::pair<SlotType, uint32_t> TypeAndOffset;
+
struct TypedSlot {
- TypedSlot() : type_and_offset_(0), host_offset_(0) {}
+ TypedSlot() {
+ type_and_offset_.SetValue(0);
+ host_offset_.SetValue(0);
+ }
- TypedSlot(SlotType type, uint32_t host_offset, uint32_t offset)
- : type_and_offset_(TypeField::encode(type) |
- OffsetField::encode(offset)),
- host_offset_(host_offset) {}
+ TypedSlot(SlotType type, uint32_t host_offset, uint32_t offset) {
+ type_and_offset_.SetValue(TypeField::encode(type) |
+ OffsetField::encode(offset));
+ host_offset_.SetValue(host_offset);
+ }
bool operator==(const TypedSlot other) {
- return type_and_offset_ == other.type_and_offset_ &&
- host_offset_ == other.host_offset_;
+ return type_and_offset_.Value() == other.type_and_offset_.Value() &&
+ host_offset_.Value() == other.host_offset_.Value();
}
bool operator!=(const TypedSlot other) { return !(*this == other); }
- SlotType type() { return TypeField::decode(type_and_offset_); }
+ SlotType type() { return TypeField::decode(type_and_offset_.Value()); }
+
+ uint32_t offset() { return OffsetField::decode(type_and_offset_.Value()); }
- uint32_t offset() { return OffsetField::decode(type_and_offset_); }
+ TypeAndOffset GetTypeAndOffset() {
+ uint32_t type_and_offset = type_and_offset_.Value();
+ return std::make_pair(TypeField::decode(type_and_offset),
+ OffsetField::decode(type_and_offset));
+ }
- uint32_t host_offset() { return host_offset_; }
+ uint32_t host_offset() { return host_offset_.Value(); }
- uint32_t type_and_offset_;
- uint32_t host_offset_;
+ void Set(TypedSlot slot) {
+ type_and_offset_.SetValue(slot.type_and_offset_.Value());
+ host_offset_.SetValue(slot.host_offset_.Value());
+ }
+
+ void Clear() {
+ type_and_offset_.SetValue(TypeField::encode(CLEARED_SLOT) |
+ OffsetField::encode(0));
+ host_offset_.SetValue(0);
+ }
+
+ base::AtomicValue<uint32_t> type_and_offset_;
+ base::AtomicValue<uint32_t> host_offset_;
};
static const int kMaxOffset = 1 << 29;
explicit TypedSlotSet(Address page_start) : page_start_(page_start) {
- chunk_ = new Chunk(nullptr, kInitialBufferSize);
+ chunk_.SetValue(new Chunk(nullptr, kInitialBufferSize));
}
~TypedSlotSet() {
- Chunk* chunk = chunk_;
+ Chunk* chunk = chunk_.Value();
while (chunk != nullptr) {
- Chunk* next = chunk->next;
+ Chunk* next = chunk->next.Value();
delete chunk;
chunk = next;
}
+ FreeToBeFreedChunks();
}
// The slot offset specifies a slot at address page_start_ + offset.
+ // This method can only be called on the main thread.
void Insert(SlotType type, uint32_t host_offset, uint32_t offset) {
TypedSlot slot(type, host_offset, offset);
- if (!chunk_->AddSlot(slot)) {
- chunk_ = new Chunk(chunk_, NextCapacity(chunk_->capacity));
- bool added = chunk_->AddSlot(slot);
+ Chunk* top_chunk = chunk_.Value();
+ if (!top_chunk) {
+ top_chunk = new Chunk(nullptr, kInitialBufferSize);
+ chunk_.SetValue(top_chunk);
+ }
+ if (!top_chunk->AddSlot(slot)) {
+ Chunk* new_top_chunk =
+ new Chunk(top_chunk, NextCapacity(top_chunk->capacity.Value()));
+ bool added = new_top_chunk->AddSlot(slot);
+ chunk_.SetValue(new_top_chunk);
DCHECK(added);
USE(added);
}
@@ -300,32 +375,60 @@ class TypedSlotSet {
// else return REMOVE_SLOT;
// });
template <typename Callback>
- int Iterate(Callback callback) {
- STATIC_ASSERT(NUMBER_OF_SLOT_TYPES < 8);
- const TypedSlot kRemovedSlot(NUMBER_OF_SLOT_TYPES, 0, 0);
- Chunk* chunk = chunk_;
+ int Iterate(Callback callback, IterationMode mode) {
+ STATIC_ASSERT(CLEARED_SLOT < 8);
+ Chunk* chunk = chunk_.Value();
+ Chunk* previous = nullptr;
int new_count = 0;
while (chunk != nullptr) {
- TypedSlot* buffer = chunk->buffer;
- int count = chunk->count;
+ TypedSlot* buffer = chunk->buffer.Value();
+ int count = chunk->count.Value();
+ bool empty = true;
for (int i = 0; i < count; i++) {
- TypedSlot slot = buffer[i];
- if (slot != kRemovedSlot) {
- SlotType type = slot.type();
- Address addr = page_start_ + slot.offset();
- Address host_addr = page_start_ + slot.host_offset();
+ // Order is important here. We have to read out the slot type last to
+ // observe the concurrent removal case consistently.
+ Address host_addr = page_start_ + buffer[i].host_offset();
+ TypeAndOffset type_and_offset = buffer[i].GetTypeAndOffset();
+ SlotType type = type_and_offset.first;
+ if (type != CLEARED_SLOT) {
+ Address addr = page_start_ + type_and_offset.second;
if (callback(type, host_addr, addr) == KEEP_SLOT) {
new_count++;
+ empty = false;
} else {
- buffer[i] = kRemovedSlot;
+ buffer[i].Clear();
}
}
}
- chunk = chunk->next;
+
+ Chunk* next = chunk->next.Value();
+ if (mode == PREFREE_EMPTY_CHUNKS && empty) {
+ // We remove the chunk from the list but let it still point its next
+ // chunk to allow concurrent iteration.
+ if (previous) {
+ previous->next.SetValue(next);
+ } else {
+ chunk_.SetValue(next);
+ }
+ base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_);
+ to_be_freed_chunks_.push(chunk);
+ } else {
+ previous = chunk;
+ }
+ chunk = next;
}
return new_count;
}
+ void FreeToBeFreedChunks() {
+ base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_);
+ while (!to_be_freed_chunks_.empty()) {
+ Chunk* top = to_be_freed_chunks_.top();
+ to_be_freed_chunks_.pop();
+ delete top;
+ }
+ }
+
private:
static const int kInitialBufferSize = 100;
static const int kMaxBufferSize = 16 * KB;
@@ -338,24 +441,34 @@ class TypedSlotSet {
class TypeField : public BitField<SlotType, 29, 3> {};
struct Chunk : Malloced {
- explicit Chunk(Chunk* next_chunk, int capacity)
- : next(next_chunk), count(0), capacity(capacity) {
- buffer = NewArray<TypedSlot>(capacity);
+ explicit Chunk(Chunk* next_chunk, int chunk_capacity) {
+ count.SetValue(0);
+ capacity.SetValue(chunk_capacity);
+ buffer.SetValue(NewArray<TypedSlot>(chunk_capacity));
+ next.SetValue(next_chunk);
}
bool AddSlot(TypedSlot slot) {
- if (count == capacity) return false;
- buffer[count++] = slot;
+ int current_count = count.Value();
+ if (current_count == capacity.Value()) return false;
+ TypedSlot* current_buffer = buffer.Value();
+ // Order is important here. We have to write the slot first before
+ // increasing the counter to guarantee that a consistent state is
+ // observed by concurrent threads.
+ current_buffer[current_count].Set(slot);
+ count.SetValue(current_count + 1);
return true;
}
- ~Chunk() { DeleteArray(buffer); }
- Chunk* next;
- int count;
- int capacity;
- TypedSlot* buffer;
+ ~Chunk() { DeleteArray(buffer.Value()); }
+ base::AtomicValue<Chunk*> next;
+ base::AtomicValue<int> count;
+ base::AtomicValue<int> capacity;
+ base::AtomicValue<TypedSlot*> buffer;
};
Address page_start_;
- Chunk* chunk_;
+ base::AtomicValue<Chunk*> chunk_;
+ base::Mutex to_be_freed_chunks_mutex_;
+ std::stack<Chunk*> to_be_freed_chunks_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 0fd69dacfe..314d22f9a6 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -165,14 +165,6 @@ bool NewSpace::FromSpaceContainsSlow(Address a) {
bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
-// --------------------------------------------------------------------------
-// AllocationResult
-
-AllocationSpace AllocationResult::RetrySpace() {
- DCHECK(IsRetry());
- return static_cast<AllocationSpace>(Smi::cast(object_)->value());
-}
-
Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
SemiSpace* owner) {
DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 95d5687a8f..c2043ed902 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -398,7 +398,7 @@ bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
// We cannot free memory chunks in new space while the sweeper is running
// since a sweeper thread might be stuck right before trying to lock the
// corresponding page.
- return !chunk->InNewSpace() || (mc == nullptr) ||
+ return !chunk->InNewSpace() || (mc == nullptr) || !FLAG_concurrent_sweeping ||
mc->sweeper().IsSweepingCompleted();
}
@@ -446,7 +446,7 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
base::VirtualMemory reservation(size, alignment);
if (!reservation.IsReserved()) return NULL;
- size_.Increment(static_cast<intptr_t>(reservation.size()));
+ size_.Increment(reservation.size());
Address base =
RoundUp(static_cast<Address>(reservation.address()), alignment);
controller->TakeControl(&reservation);
@@ -505,12 +505,12 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->size_ = size;
chunk->area_start_ = area_start;
chunk->area_end_ = area_end;
- chunk->flags_ = 0;
+ chunk->flags_ = Flags(NO_FLAGS);
chunk->set_owner(owner);
chunk->InitializeReservedMemory();
- chunk->old_to_new_slots_ = nullptr;
+ chunk->old_to_new_slots_.SetValue(nullptr);
chunk->old_to_old_slots_ = nullptr;
- chunk->typed_old_to_new_slots_ = nullptr;
+ chunk->typed_old_to_new_slots_.SetValue(nullptr);
chunk->typed_old_to_old_slots_ = nullptr;
chunk->skip_list_ = nullptr;
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
@@ -528,7 +528,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->black_area_end_marker_map_ = nullptr;
DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
- DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
if (executable == EXECUTABLE) {
chunk->SetFlag(IS_EXECUTABLE);
@@ -617,6 +616,21 @@ void MemoryChunk::Unlink() {
set_next_chunk(NULL);
}
+void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
+ DCHECK_GE(bytes_to_shrink, static_cast<size_t>(base::OS::CommitPageSize()));
+ DCHECK_EQ(0, bytes_to_shrink % base::OS::CommitPageSize());
+ Address free_start = chunk->area_end_ - bytes_to_shrink;
+ // Don't adjust the size of the page. The area is just uncomitted but not
+ // released.
+ chunk->area_end_ -= bytes_to_shrink;
+ UncommitBlock(free_start, bytes_to_shrink);
+ if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
+ if (chunk->reservation_.IsReserved())
+ chunk->reservation_.Guard(chunk->area_end_);
+ else
+ base::OS::Guard(chunk->area_end_, base::OS::CommitPageSize());
+ }
+}
MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
intptr_t commit_area_size,
@@ -667,8 +681,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
CodePageGuardSize();
// Check executable memory limit.
- if ((size_executable_.Value() + static_cast<intptr_t>(chunk_size)) >
- capacity_executable_) {
+ if ((size_executable_.Value() + chunk_size) > capacity_executable_) {
LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
"V8 Executable Allocation capacity exceeded"));
return NULL;
@@ -691,16 +704,16 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
DCHECK(
IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
if (base == NULL) return NULL;
- size_.Increment(static_cast<intptr_t>(chunk_size));
+ size_.Increment(chunk_size);
// Update executable memory size.
- size_executable_.Increment(static_cast<intptr_t>(chunk_size));
+ size_executable_.Increment(chunk_size);
} else {
base = AllocateAlignedMemory(chunk_size, commit_size,
MemoryChunk::kAlignment, executable,
&reservation);
if (base == NULL) return NULL;
// Update executable memory size.
- size_executable_.Increment(static_cast<intptr_t>(reservation.size()));
+ size_executable_.Increment(reservation.size());
}
if (Heap::ShouldZapGarbage()) {
@@ -745,9 +758,9 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
last_chunk_.TakeControl(&reservation);
UncommitBlock(reinterpret_cast<Address>(last_chunk_.address()),
last_chunk_.size());
- size_.Increment(-static_cast<intptr_t>(chunk_size));
+ size_.Decrement(chunk_size);
if (executable == EXECUTABLE) {
- size_executable_.Increment(-static_cast<intptr_t>(chunk_size));
+ size_executable_.Decrement(chunk_size);
}
CHECK(last_chunk_.IsReserved());
return AllocateChunk(reserve_area_size, commit_area_size, executable,
@@ -764,6 +777,53 @@ void Page::ResetFreeListStatistics() {
available_in_free_list_ = 0;
}
+size_t Page::ShrinkToHighWaterMark() {
+ // Shrink pages to high water mark. The water mark points either to a filler
+ // or the area_end.
+ HeapObject* filler = HeapObject::FromAddress(HighWaterMark());
+ if (filler->address() == area_end()) return 0;
+ CHECK(filler->IsFiller());
+ if (!filler->IsFreeSpace()) return 0;
+
+#ifdef DEBUG
+ // Check the the filler is indeed the last filler on the page.
+ HeapObjectIterator it(this);
+ HeapObject* filler2 = nullptr;
+ for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
+ filler2 = HeapObject::FromAddress(obj->address() + obj->Size());
+ }
+ if (filler2 == nullptr || filler2->address() == area_end()) return 0;
+ DCHECK(filler2->IsFiller());
+ // The deserializer might leave behind fillers. In this case we need to
+ // iterate even further.
+ while ((filler2->address() + filler2->Size()) != area_end()) {
+ filler2 = HeapObject::FromAddress(filler2->address() + filler2->Size());
+ DCHECK(filler2->IsFiller());
+ }
+ DCHECK_EQ(filler->address(), filler2->address());
+#endif // DEBUG
+
+ size_t unused = RoundDown(
+ static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize),
+ base::OS::CommitPageSize());
+ if (unused > 0) {
+ if (FLAG_trace_gc_verbose) {
+ PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
+ reinterpret_cast<void*>(this),
+ reinterpret_cast<void*>(area_end()),
+ reinterpret_cast<void*>(area_end() - unused));
+ }
+ heap()->CreateFillerObjectAt(
+ filler->address(),
+ static_cast<int>(area_end() - filler->address() - unused),
+ ClearRecordedSlots::kNo);
+ heap()->memory_allocator()->ShrinkChunk(this, unused);
+ CHECK(filler->IsFiller());
+ CHECK_EQ(filler->address() + filler->Size(), area_end());
+ }
+ return unused;
+}
+
void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk,
Address start_free) {
// We do not allow partial shrink for code.
@@ -776,8 +836,8 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk,
size_t to_free_size = size - (start_free - chunk->address());
- DCHECK(size_.Value() >= static_cast<intptr_t>(to_free_size));
- size_.Increment(-static_cast<intptr_t>(to_free_size));
+ DCHECK(size_.Value() >= to_free_size);
+ size_.Decrement(to_free_size);
isolate_->counters()->memory_allocated()->Decrement(
static_cast<int>(to_free_size));
chunk->set_size(size - to_free_size);
@@ -792,20 +852,15 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
chunk->IsEvacuationCandidate());
- intptr_t size;
base::VirtualMemory* reservation = chunk->reserved_memory();
- if (reservation->IsReserved()) {
- size = static_cast<intptr_t>(reservation->size());
- } else {
- size = static_cast<intptr_t>(chunk->size());
- }
- DCHECK(size_.Value() >= size);
- size_.Increment(-size);
+ const size_t size =
+ reservation->IsReserved() ? reservation->size() : chunk->size();
+ DCHECK_GE(size_.Value(), static_cast<size_t>(size));
+ size_.Decrement(size);
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
-
if (chunk->executable() == EXECUTABLE) {
- DCHECK(size_executable_.Value() >= size);
- size_executable_.Increment(-size);
+ DCHECK_GE(size_executable_.Value(), size);
+ size_executable_.Decrement(size);
}
chunk->SetFlag(MemoryChunk::PRE_FREED);
@@ -938,10 +993,9 @@ void MemoryAllocator::ZapBlock(Address start, size_t size) {
#ifdef DEBUG
void MemoryAllocator::ReportStatistics() {
- intptr_t size = Size();
+ size_t size = Size();
float pct = static_cast<float>(capacity_ - size) / capacity_;
- PrintF(" capacity: %" V8PRIdPTR ", used: %" V8PRIdPTR
- ", available: %%%d\n\n",
+ PrintF(" capacity: %zu , used: %" V8PRIdPTR ", available: %%%d\n\n",
capacity_, size, static_cast<int>(pct * 100));
}
#endif
@@ -1014,9 +1068,9 @@ void MemoryChunk::ReleaseAllocatedMemory() {
delete mutex_;
mutex_ = nullptr;
}
- if (old_to_new_slots_ != nullptr) ReleaseOldToNewSlots();
+ if (old_to_new_slots_.Value() != nullptr) ReleaseOldToNewSlots();
if (old_to_old_slots_ != nullptr) ReleaseOldToOldSlots();
- if (typed_old_to_new_slots_ != nullptr) ReleaseTypedOldToNewSlots();
+ if (typed_old_to_new_slots_.Value() != nullptr) ReleaseTypedOldToNewSlots();
if (typed_old_to_old_slots_ != nullptr) ReleaseTypedOldToOldSlots();
if (local_tracker_ != nullptr) ReleaseLocalTracker();
}
@@ -1032,13 +1086,14 @@ static SlotSet* AllocateSlotSet(size_t size, Address page_start) {
}
void MemoryChunk::AllocateOldToNewSlots() {
- DCHECK(nullptr == old_to_new_slots_);
- old_to_new_slots_ = AllocateSlotSet(size_, address());
+ DCHECK(nullptr == old_to_new_slots_.Value());
+ old_to_new_slots_.SetValue(AllocateSlotSet(size_, address()));
}
void MemoryChunk::ReleaseOldToNewSlots() {
- delete[] old_to_new_slots_;
- old_to_new_slots_ = nullptr;
+ SlotSet* old_to_new_slots = old_to_new_slots_.Value();
+ delete[] old_to_new_slots;
+ old_to_new_slots_.SetValue(nullptr);
}
void MemoryChunk::AllocateOldToOldSlots() {
@@ -1052,13 +1107,14 @@ void MemoryChunk::ReleaseOldToOldSlots() {
}
void MemoryChunk::AllocateTypedOldToNewSlots() {
- DCHECK(nullptr == typed_old_to_new_slots_);
- typed_old_to_new_slots_ = new TypedSlotSet(address());
+ DCHECK(nullptr == typed_old_to_new_slots_.Value());
+ typed_old_to_new_slots_.SetValue(new TypedSlotSet(address()));
}
void MemoryChunk::ReleaseTypedOldToNewSlots() {
- delete typed_old_to_new_slots_;
- typed_old_to_new_slots_ = nullptr;
+ TypedSlotSet* typed_old_to_new_slots = typed_old_to_new_slots_.Value();
+ delete typed_old_to_new_slots;
+ typed_old_to_new_slots_.SetValue(nullptr);
}
void MemoryChunk::AllocateTypedOldToOldSlots() {
@@ -1235,18 +1291,29 @@ Object* PagedSpace::FindObject(Address addr) {
return Smi::FromInt(0);
}
-bool PagedSpace::Expand() {
- int size = AreaSize();
- if (snapshotable() && !HasPages()) {
- size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
+void PagedSpace::ShrinkImmortalImmovablePages() {
+ DCHECK(!heap()->deserialization_complete());
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ EmptyAllocationInfo();
+ ResetFreeList();
+
+ for (Page* page : *this) {
+ DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
+ size_t unused = page->ShrinkToHighWaterMark();
+ accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
+ AccountUncommitted(unused);
}
+}
+
+bool PagedSpace::Expand() {
+ const int size = AreaSize();
if (!heap()->CanExpandOldGeneration(size)) return false;
Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable());
if (p == nullptr) return false;
- AccountCommitted(static_cast<intptr_t>(p->size()));
+ AccountCommitted(p->size());
// Pages created during bootstrapping may contain immortal immovable objects.
if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
@@ -1336,7 +1403,6 @@ void PagedSpace::IncreaseCapacity(size_t bytes) {
void PagedSpace::ReleasePage(Page* page) {
DCHECK_EQ(page->LiveBytes(), 0);
- DCHECK_EQ(AreaSize(), page->area_size());
DCHECK_EQ(page->owner(), this);
free_list_.EvictFreeListItems(page);
@@ -1354,11 +1420,13 @@ void PagedSpace::ReleasePage(Page* page) {
page->Unlink();
}
- AccountUncommitted(static_cast<intptr_t>(page->size()));
+ AccountUncommitted(page->size());
+ accounting_stats_.ShrinkSpace(page->area_size());
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
+}
- DCHECK(Capacity() > 0);
- accounting_stats_.ShrinkSpace(AreaSize());
+std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
+ return std::unique_ptr<ObjectIterator>(new HeapObjectIterator(this));
}
#ifdef DEBUG
@@ -1481,7 +1549,7 @@ void NewSpace::Grow() {
void NewSpace::Shrink() {
- int new_capacity = Max(InitialTotalCapacity(), 2 * SizeAsInt());
+ int new_capacity = Max(InitialTotalCapacity(), 2 * static_cast<int>(Size()));
int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
if (rounded_new_capacity < TotalCapacity() &&
to_space_.ShrinkTo(rounded_new_capacity)) {
@@ -1747,6 +1815,10 @@ void NewSpace::InlineAllocationStep(Address top, Address new_top,
}
}
+std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator() {
+ return std::unique_ptr<ObjectIterator>(new SemiSpaceIterator(this));
+}
+
#ifdef VERIFY_HEAP
// We do not use the SemiSpaceIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).
@@ -1903,7 +1975,7 @@ bool SemiSpace::GrowTo(int new_capacity) {
new_page->SetFlags(last_page->GetFlags(), Page::kCopyOnFlipFlagsMask);
last_page = new_page;
}
- AccountCommitted(static_cast<intptr_t>(delta));
+ AccountCommitted(delta);
current_capacity_ = new_capacity;
return true;
}
@@ -1940,7 +2012,7 @@ bool SemiSpace::ShrinkTo(int new_capacity) {
last_page);
delta_pages--;
}
- AccountUncommitted(static_cast<intptr_t>(delta));
+ AccountUncommitted(delta);
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
current_capacity_ = new_capacity;
@@ -2010,7 +2082,6 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
from->FixPagesFlags(0, 0);
}
-
void SemiSpace::set_age_mark(Address mark) {
DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
age_mark_ = mark;
@@ -2020,6 +2091,11 @@ void SemiSpace::set_age_mark(Address mark) {
}
}
+std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator() {
+ // Use the NewSpace::NewObjectIterator to iterate the ToSpace.
+ UNREACHABLE();
+ return std::unique_ptr<ObjectIterator>();
+}
#ifdef DEBUG
void SemiSpace::Print() {}
@@ -2490,14 +2566,13 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// Don't free list allocate if there is linear space available.
DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
- int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap. This also puts it back in the free list
// if it is big enough.
owner_->EmptyAllocationInfo();
- owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
- old_linear_size);
+ owner_->heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
+ Heap::kNoGCFlags, kNoGCCallbackFlags);
int new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
@@ -2778,19 +2853,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
}
}
- // Free list allocation failed and there is no next page. Fail if we have
- // hit the old generation size limit that should cause a garbage
- // collection.
- if (!heap()->always_allocate() &&
- heap()->OldGenerationAllocationLimitReached()) {
- // If sweeper threads are active, wait for them at that point and steal
- // elements form their free-lists.
- HeapObject* object = SweepAndRetryAllocation(size_in_bytes);
- return object;
- }
-
- // Try to expand the space and allocate in the new next page.
- if (Expand()) {
+ if (heap()->ShouldExpandOldGenerationOnAllocationFailure() && Expand()) {
DCHECK((CountTotalPages() > 1) ||
(size_in_bytes <= free_list_.Available()));
return free_list_.Allocate(size_in_bytes);
@@ -2874,7 +2937,7 @@ LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
size_(0),
page_count_(0),
objects_size_(0),
- chunk_map_(base::HashMap::PointersMatch, 1024) {}
+ chunk_map_(1024) {}
LargeObjectSpace::~LargeObjectSpace() {}
@@ -2914,7 +2977,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
DCHECK(page->area_size() >= object_size);
size_ += static_cast<int>(page->size());
- AccountCommitted(static_cast<intptr_t>(page->size()));
+ AccountCommitted(page->size());
objects_size_ += object_size;
page_count_++;
page->set_next_page(first_page_);
@@ -2933,7 +2996,8 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
}
- heap()->incremental_marking()->OldSpaceStep(object_size);
+ heap()->StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
+ kNoGCCallbackFlags);
AllocationStep(object->address(), object_size);
if (heap()->incremental_marking()->black_allocation()) {
@@ -3050,7 +3114,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
// Free the chunk.
size_ -= static_cast<int>(page->size());
- AccountUncommitted(static_cast<intptr_t>(page->size()));
+ AccountUncommitted(page->size());
objects_size_ -= object->Size();
page_count_--;
@@ -3072,6 +3136,9 @@ bool LargeObjectSpace::Contains(HeapObject* object) {
return owned;
}
+std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() {
+ return std::unique_ptr<ObjectIterator>(new LargeObjectIterator(this));
+}
#ifdef VERIFY_HEAP
// We do not assume that the large object iterator works, because it depends
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index de5ea1b16a..732ba7ead5 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -16,6 +16,8 @@
#include "src/base/hashmap.h"
#include "src/base/platform/mutex.h"
#include "src/flags.h"
+#include "src/globals.h"
+#include "src/heap/heap.h"
#include "src/heap/marking.h"
#include "src/list.h"
#include "src/objects.h"
@@ -57,7 +59,7 @@ class Space;
// area.
//
// There is a separate large object space for objects larger than
-// Page::kMaxRegularHeapObjectSize, so that they do not have to move during
+// kMaxRegularHeapObjectSize, so that they do not have to move during
// collection. The large object space is paged. Pages in large object space
// may be larger than the page size.
//
@@ -105,7 +107,7 @@ class Space;
DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
#define DCHECK_OBJECT_SIZE(size) \
- DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
+ DCHECK((0 < size) && (size <= kMaxRegularHeapObjectSize))
#define DCHECK_CODEOBJECT_SIZE(size, code_space) \
DCHECK((0 < size) && (size <= code_space->AreaSize()))
@@ -227,62 +229,75 @@ class FreeListCategory {
// any heap object.
class MemoryChunk {
public:
- enum MemoryChunkFlags {
- IS_EXECUTABLE,
- POINTERS_TO_HERE_ARE_INTERESTING,
- POINTERS_FROM_HERE_ARE_INTERESTING,
- IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
- IN_TO_SPACE, // All pages in new space has one of these two set.
- NEW_SPACE_BELOW_AGE_MARK,
- EVACUATION_CANDIDATE,
- NEVER_EVACUATE, // May contain immortal immutables.
+ enum Flag {
+ NO_FLAGS = 0u,
+ IS_EXECUTABLE = 1u << 0,
+ POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
+ POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
+ // A page in new space has one of the next to flags set.
+ IN_FROM_SPACE = 1u << 3,
+ IN_TO_SPACE = 1u << 4,
+ NEW_SPACE_BELOW_AGE_MARK = 1u << 5,
+ EVACUATION_CANDIDATE = 1u << 6,
+ NEVER_EVACUATE = 1u << 7,
// Large objects can have a progress bar in their page header. These object
// are scanned in increments and will be kept black while being scanned.
// Even if the mutator writes to them they will be kept black and a white
// to grey transition is performed in the value.
- HAS_PROGRESS_BAR,
+ HAS_PROGRESS_BAR = 1u << 8,
// |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
// from new to old space during evacuation.
- PAGE_NEW_OLD_PROMOTION,
+ PAGE_NEW_OLD_PROMOTION = 1u << 9,
// |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
// within the new space during evacuation.
- PAGE_NEW_NEW_PROMOTION,
+ PAGE_NEW_NEW_PROMOTION = 1u << 10,
// This flag is intended to be used for testing. Works only when both
// FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
// are set. It forces the page to become an evacuation candidate at next
// candidates selection cycle.
- FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
+ FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
// This flag is intended to be used for testing.
- NEVER_ALLOCATE_ON_PAGE,
+ NEVER_ALLOCATE_ON_PAGE = 1u << 12,
// The memory chunk is already logically freed, however the actual freeing
// still has to be performed.
- PRE_FREED,
+ PRE_FREED = 1u << 13,
// |POOLED|: When actually freeing this chunk, only uncommit and do not
// give up the reservation as we still reuse the chunk at some point.
- POOLED,
+ POOLED = 1u << 14,
// |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
// has been aborted and needs special handling by the sweeper.
- COMPACTION_WAS_ABORTED,
+ COMPACTION_WAS_ABORTED = 1u << 15,
// |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
// on pages is sometimes aborted. The flag is used to avoid repeatedly
// triggering on the same page.
- COMPACTION_WAS_ABORTED_FOR_TESTING,
+ COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
// |ANCHOR|: Flag is set if page is an anchor.
- ANCHOR,
-
- // Last flag, keep at bottom.
- NUM_MEMORY_CHUNK_FLAGS
+ ANCHOR = 1u << 17,
};
+ typedef base::Flags<Flag, uintptr_t> Flags;
+
+ static const int kPointersToHereAreInterestingMask =
+ POINTERS_TO_HERE_ARE_INTERESTING;
+
+ static const int kPointersFromHereAreInterestingMask =
+ POINTERS_FROM_HERE_ARE_INTERESTING;
+
+ static const int kEvacuationCandidateMask = EVACUATION_CANDIDATE;
+
+ static const int kIsInNewSpaceMask = IN_FROM_SPACE | IN_TO_SPACE;
+
+ static const int kSkipEvacuationSlotsRecordingMask =
+ kEvacuationCandidateMask | kIsInNewSpaceMask;
// |kSweepingDone|: The page state when sweeping is complete or sweeping must
// not be performed on that page. Sweeper threads that are done with their
@@ -300,17 +315,6 @@ class MemoryChunk {
// whether we have hit the limit and should do some more marking.
static const int kWriteBarrierCounterGranularity = 500;
- static const int kPointersToHereAreInterestingMask =
- 1 << POINTERS_TO_HERE_ARE_INTERESTING;
-
- static const int kPointersFromHereAreInterestingMask =
- 1 << POINTERS_FROM_HERE_ARE_INTERESTING;
-
- static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
-
- static const int kSkipEvacuationSlotsRecordingMask =
- (1 << EVACUATION_CANDIDATE) | (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
-
static const intptr_t kAlignment =
(static_cast<uintptr_t>(1) << kPageSizeBits);
@@ -320,25 +324,21 @@ class MemoryChunk {
static const intptr_t kFlagsOffset = kSizeOffset + kPointerSize;
- static const intptr_t kLiveBytesOffset =
+ static const size_t kWriteBarrierCounterOffset =
kSizeOffset + kPointerSize // size_t size
- + kIntptrSize // intptr_t flags_
+ + kIntptrSize // Flags flags_
+ kPointerSize // Address area_start_
+ kPointerSize // Address area_end_
+ 2 * kPointerSize // base::VirtualMemory reservation_
+ kPointerSize // Address owner_
+ kPointerSize // Heap* heap_
- + kIntSize; // int progress_bar_
-
- static const size_t kOldToNewSlotsOffset =
- kLiveBytesOffset + kIntSize; // int live_byte_count_
-
- static const size_t kWriteBarrierCounterOffset =
- kOldToNewSlotsOffset + kPointerSize // SlotSet* old_to_new_slots_;
- + kPointerSize // SlotSet* old_to_old_slots_;
- + kPointerSize // TypedSlotSet* typed_old_to_new_slots_;
- + kPointerSize // TypedSlotSet* typed_old_to_old_slots_;
- + kPointerSize; // SkipList* skip_list_;
+ + kIntSize // int progress_bar_
+ + kIntSize // int live_bytes_count_
+ + kPointerSize // SlotSet* old_to_new_slots_;
+ + kPointerSize // SlotSet* old_to_old_slots_;
+ + kPointerSize // TypedSlotSet* typed_old_to_new_slots_;
+ + kPointerSize // TypedSlotSet* typed_old_to_old_slots_;
+ + kPointerSize; // SkipList* skip_list_;
static const size_t kMinHeaderSize =
kWriteBarrierCounterOffset +
@@ -351,7 +351,7 @@ class MemoryChunk {
+ kPointerSize // AtomicValue prev_chunk_
// FreeListCategory categories_[kNumberOfCategories]
+ FreeListCategory::kSize * kNumberOfCategories +
- kPointerSize // LocalArrayBufferTracker* local_tracker_;
+ kPointerSize // LocalArrayBufferTracker* local_tracker_
// std::unordered_set<Address>* black_area_end_marker_map_
+ kPointerSize;
@@ -453,17 +453,17 @@ class MemoryChunk {
inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
- inline SlotSet* old_to_new_slots() { return old_to_new_slots_; }
+ inline SlotSet* old_to_new_slots() { return old_to_new_slots_.Value(); }
inline SlotSet* old_to_old_slots() { return old_to_old_slots_; }
inline TypedSlotSet* typed_old_to_new_slots() {
- return typed_old_to_new_slots_;
+ return typed_old_to_new_slots_.Value();
}
inline TypedSlotSet* typed_old_to_old_slots() {
return typed_old_to_old_slots_;
}
inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
- void AllocateOldToNewSlots();
+ V8_EXPORT_PRIVATE void AllocateOldToNewSlots();
void ReleaseOldToNewSlots();
void AllocateOldToOldSlots();
void ReleaseOldToOldSlots();
@@ -498,7 +498,6 @@ class MemoryChunk {
void ResetProgressBar() {
if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
set_progress_bar(0);
- ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
}
}
@@ -518,22 +517,18 @@ class MemoryChunk {
void PrintMarkbits() { markbits()->Print(); }
- void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
-
- void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
-
- bool IsFlagSet(int flag) {
- return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
- }
+ void SetFlag(Flag flag) { flags_ |= flag; }
+ void ClearFlag(Flag flag) { flags_ &= ~Flags(flag); }
+ bool IsFlagSet(Flag flag) { return flags_ & flag; }
// Set or clear multiple flags at a time. The flags in the mask are set to
// the value in "flags", the rest retain the current value in |flags_|.
- void SetFlags(intptr_t flags, intptr_t mask) {
- flags_ = (flags_ & ~mask) | (flags & mask);
+ void SetFlags(uintptr_t flags, uintptr_t mask) {
+ flags_ = (flags_ & ~Flags(mask)) | (Flags(flags) & Flags(mask));
}
// Return all current flags.
- intptr_t GetFlags() { return flags_; }
+ uintptr_t GetFlags() { return flags_; }
bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
@@ -557,9 +552,7 @@ class MemoryChunk {
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
}
- bool InNewSpace() {
- return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
- }
+ bool InNewSpace() { return (flags_ & kIsInNewSpaceMask) != 0; }
bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
@@ -634,7 +627,7 @@ class MemoryChunk {
base::VirtualMemory* reserved_memory() { return &reservation_; }
size_t size_;
- intptr_t flags_;
+ Flags flags_;
// Start and end of allocatable memory on this chunk.
Address area_start_;
@@ -660,9 +653,9 @@ class MemoryChunk {
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
- SlotSet* old_to_new_slots_;
+ base::AtomicValue<SlotSet*> old_to_new_slots_;
SlotSet* old_to_old_slots_;
- TypedSlotSet* typed_old_to_new_slots_;
+ base::AtomicValue<TypedSlotSet*> typed_old_to_new_slots_;
TypedSlotSet* typed_old_to_old_slots_;
SkipList* skip_list_;
@@ -700,6 +693,11 @@ class MemoryChunk {
friend class MemoryChunkValidator;
};
+DEFINE_OPERATORS_FOR_FLAGS(MemoryChunk::Flags)
+
+static_assert(kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory,
+ "kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory");
+
// -----------------------------------------------------------------------------
// A page is a memory chunk of a size 1MB. Large object pages may be larger.
//
@@ -712,17 +710,8 @@ class Page : public MemoryChunk {
// Page flags copied from from-space to to-space when flipping semispaces.
static const intptr_t kCopyOnFlipFlagsMask =
- (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
- (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
-
- // Maximum object size that gets allocated into regular pages. Objects larger
- // than that size are allocated in large object space and are never moved in
- // memory. This also applies to new space allocation, since objects are never
- // migrated from new space to large object space. Takes double alignment into
- // account.
- // TODO(hpayer): This limit should be way smaller but we currently have
- // short living objects >256K.
- static const int kMaxRegularHeapObjectSize = 600 * KB;
+ static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
+ static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
static inline Page* ConvertNewToOld(Page* old_page, PagedSpace* new_owner);
@@ -823,6 +812,8 @@ class Page : public MemoryChunk {
available_in_free_list_.Increment(available);
}
+ size_t ShrinkToHighWaterMark();
+
#ifdef DEBUG
void Print();
#endif // DEBUG
@@ -918,9 +909,9 @@ class Space : public Malloced {
// Return the total amount committed memory for this space, i.e., allocatable
// memory and page headers.
- virtual intptr_t CommittedMemory() { return committed_; }
+ virtual size_t CommittedMemory() { return committed_; }
- virtual intptr_t MaximumCommittedMemory() { return max_committed_; }
+ virtual size_t MaximumCommittedMemory() { return max_committed_; }
// Returns allocated size.
virtual intptr_t Size() = 0;
@@ -943,18 +934,19 @@ class Space : public Malloced {
}
}
- void AccountCommitted(intptr_t bytes) {
- DCHECK_GE(bytes, 0);
+ virtual std::unique_ptr<ObjectIterator> GetObjectIterator() = 0;
+
+ void AccountCommitted(size_t bytes) {
+ DCHECK_GE(committed_ + bytes, committed_);
committed_ += bytes;
if (committed_ > max_committed_) {
max_committed_ = committed_;
}
}
- void AccountUncommitted(intptr_t bytes) {
- DCHECK_GE(bytes, 0);
+ void AccountUncommitted(size_t bytes) {
+ DCHECK_GE(committed_, committed_ - bytes);
committed_ -= bytes;
- DCHECK_GE(committed_, 0);
}
#ifdef DEBUG
@@ -971,8 +963,8 @@ class Space : public Malloced {
Executability executable_;
// Keeps track of committed memory in a space.
- intptr_t committed_;
- intptr_t max_committed_;
+ size_t committed_;
+ size_t max_committed_;
DISALLOW_COPY_AND_ASSIGN(Space);
};
@@ -981,10 +973,6 @@ class Space : public Malloced {
class MemoryChunkValidator {
// Computed offsets should match the compiler generated ones.
STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
- STATIC_ASSERT(MemoryChunk::kLiveBytesOffset ==
- offsetof(MemoryChunk, live_byte_count_));
- STATIC_ASSERT(MemoryChunk::kOldToNewSlotsOffset ==
- offsetof(MemoryChunk, old_to_new_slots_));
STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset ==
offsetof(MemoryChunk, write_barrier_counter_));
@@ -1242,12 +1230,31 @@ class MemoryAllocator {
kRegular,
kPooled,
};
+
enum FreeMode {
kFull,
kPreFreeAndQueue,
kPooledAndQueue,
};
+ static int CodePageGuardStartOffset();
+
+ static int CodePageGuardSize();
+
+ static int CodePageAreaStartOffset();
+
+ static int CodePageAreaEndOffset();
+
+ static int CodePageAreaSize() {
+ return CodePageAreaEndOffset() - CodePageAreaStartOffset();
+ }
+
+ static int PageAreaSize(AllocationSpace space) {
+ DCHECK_NE(LO_SPACE, space);
+ return (space == CODE_SPACE) ? CodePageAreaSize()
+ : Page::kAllocatableMemory;
+ }
+
explicit MemoryAllocator(Isolate* isolate);
// Initializes its internal bookkeeping structures.
@@ -1273,26 +1280,26 @@ class MemoryAllocator {
bool CanFreeMemoryChunk(MemoryChunk* chunk);
// Returns allocated spaces in bytes.
- intptr_t Size() { return size_.Value(); }
+ size_t Size() { return size_.Value(); }
// Returns allocated executable spaces in bytes.
- intptr_t SizeExecutable() { return size_executable_.Value(); }
+ size_t SizeExecutable() { return size_executable_.Value(); }
// Returns the maximum available bytes of heaps.
- intptr_t Available() {
- intptr_t size = Size();
+ size_t Available() {
+ const size_t size = Size();
return capacity_ < size ? 0 : capacity_ - size;
}
// Returns the maximum available executable bytes of heaps.
- intptr_t AvailableExecutable() {
- intptr_t executable_size = SizeExecutable();
+ size_t AvailableExecutable() {
+ const size_t executable_size = SizeExecutable();
if (capacity_executable_ < executable_size) return 0;
return capacity_executable_ - executable_size;
}
// Returns maximum available bytes that the old space can have.
- intptr_t MaxAvailable() {
+ size_t MaxAvailable() {
return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
}
@@ -1303,11 +1310,6 @@ class MemoryAllocator {
address >= highest_ever_allocated_.Value();
}
-#ifdef DEBUG
- // Reports statistic info of the space.
- void ReportStatistics();
-#endif
-
// Returns a MemoryChunk in which the memory region from commit_area_size to
// reserve_area_size of the chunk area is reserved but not committed, it
// could be committed later by calling MemoryChunk::CommitArea.
@@ -1315,6 +1317,8 @@ class MemoryAllocator {
intptr_t commit_area_size,
Executability executable, Space* space);
+ void ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink);
+
Address ReserveAlignedMemory(size_t requested, size_t alignment,
base::VirtualMemory* controller);
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
@@ -1343,24 +1347,6 @@ class MemoryAllocator {
// filling it up with a recognizable non-NULL bit pattern.
void ZapBlock(Address start, size_t size);
- static int CodePageGuardStartOffset();
-
- static int CodePageGuardSize();
-
- static int CodePageAreaStartOffset();
-
- static int CodePageAreaEndOffset();
-
- static int CodePageAreaSize() {
- return CodePageAreaEndOffset() - CodePageAreaStartOffset();
- }
-
- static int PageAreaSize(AllocationSpace space) {
- DCHECK_NE(LO_SPACE, space);
- return (space == CODE_SPACE) ? CodePageAreaSize()
- : Page::kAllocatableMemory;
- }
-
MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
Address start, size_t commit_size,
size_t reserved_size);
@@ -1368,6 +1354,11 @@ class MemoryAllocator {
CodeRange* code_range() { return code_range_; }
Unmapper* unmapper() { return &unmapper_; }
+#ifdef DEBUG
+ // Reports statistic info of the space.
+ void ReportStatistics();
+#endif
+
private:
// PreFree logically frees the object, i.e., it takes care of the size
// bookkeeping and calls the allocation callback.
@@ -1381,28 +1372,6 @@ class MemoryAllocator {
template <typename SpaceType>
MemoryChunk* AllocatePagePooled(SpaceType* owner);
- Isolate* isolate_;
-
- CodeRange* code_range_;
-
- // Maximum space size in bytes.
- intptr_t capacity_;
- // Maximum subset of capacity_ that can be executable
- intptr_t capacity_executable_;
-
- // Allocated space size in bytes.
- base::AtomicNumber<intptr_t> size_;
- // Allocated executable space size in bytes.
- base::AtomicNumber<intptr_t> size_executable_;
-
- // We keep the lowest and highest addresses allocated as a quick way
- // of determining that pointers are outside the heap. The estimate is
- // conservative, i.e. not all addrsses in 'allocated' space are allocated
- // to our heap. The range is [lowest, highest[, inclusive on the low end
- // and exclusive on the high end.
- base::AtomicValue<void*> lowest_ever_allocated_;
- base::AtomicValue<void*> highest_ever_allocated_;
-
// Initializes pages in a chunk. Returns the first page address.
// This function and GetChunkId() are provided for the mark-compact
// collector to rebuild page headers in the from space, which is
@@ -1423,6 +1392,27 @@ class MemoryAllocator {
} while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
}
+ Isolate* isolate_;
+ CodeRange* code_range_;
+
+ // Maximum space size in bytes.
+ size_t capacity_;
+ // Maximum subset of capacity_ that can be executable
+ size_t capacity_executable_;
+
+ // Allocated space size in bytes.
+ base::AtomicNumber<size_t> size_;
+ // Allocated executable space size in bytes.
+ base::AtomicNumber<size_t> size_executable_;
+
+ // We keep the lowest and highest addresses allocated as a quick way
+ // of determining that pointers are outside the heap. The estimate is
+ // conservative, i.e. not all addresses in 'allocated' space are allocated
+ // to our heap. The range is [lowest, highest[, inclusive on the low end
+ // and exclusive on the high end.
+ base::AtomicValue<void*> lowest_ever_allocated_;
+ base::AtomicValue<void*> highest_ever_allocated_;
+
base::VirtualMemory last_chunk_;
Unmapper unmapper_;
@@ -1440,7 +1430,7 @@ class MemoryAllocator {
// method which is used to avoid using virtual functions
// iterating a specific space.
-class ObjectIterator : public Malloced {
+class V8_EXPORT_PRIVATE ObjectIterator : public Malloced {
public:
virtual ~ObjectIterator() {}
virtual HeapObject* Next() = 0;
@@ -1491,7 +1481,7 @@ class PageRange {
// If objects are allocated in the page during iteration the iterator may
// or may not iterate over those objects. The caller must create a new
// iterator in order to be sure to visit these new objects.
-class HeapObjectIterator : public ObjectIterator {
+class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
public:
// Creates a new object iterator in a given space.
explicit HeapObjectIterator(PagedSpace* space);
@@ -1880,50 +1870,6 @@ class FreeList {
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
};
-
-class AllocationResult {
- public:
- // Implicit constructor from Object*.
- AllocationResult(Object* object) // NOLINT
- : object_(object) {
- // AllocationResults can't return Smis, which are used to represent
- // failure and the space to retry in.
- CHECK(!object->IsSmi());
- }
-
- AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
-
- static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
- return AllocationResult(space);
- }
-
- inline bool IsRetry() { return object_->IsSmi(); }
-
- template <typename T>
- bool To(T** obj) {
- if (IsRetry()) return false;
- *obj = T::cast(object_);
- return true;
- }
-
- Object* ToObjectChecked() {
- CHECK(!IsRetry());
- return object_;
- }
-
- inline AllocationSpace RetrySpace();
-
- private:
- explicit AllocationResult(AllocationSpace space)
- : object_(Smi::FromInt(static_cast<int>(space))) {}
-
- Object* object_;
-};
-
-
-STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
-
-
// LocalAllocationBuffer represents a linear allocation area that is created
// from a given {AllocationResult} and can be used to allocate memory without
// synchronization.
@@ -2196,6 +2142,12 @@ class PagedSpace : public Space {
iterator begin() { return iterator(anchor_.next_page()); }
iterator end() { return iterator(&anchor_); }
+ // Shrink immortal immovable pages of the space to be exactly the size needed
+ // using the high water mark.
+ void ShrinkImmortalImmovablePages();
+
+ std::unique_ptr<ObjectIterator> GetObjectIterator() override;
+
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
@@ -2255,41 +2207,6 @@ class PagedSpace : public Space {
friend class HeapTester;
};
-
-class NumberAndSizeInfo BASE_EMBEDDED {
- public:
- NumberAndSizeInfo() : number_(0), bytes_(0) {}
-
- int number() const { return number_; }
- void increment_number(int num) { number_ += num; }
-
- int bytes() const { return bytes_; }
- void increment_bytes(int size) { bytes_ += size; }
-
- void clear() {
- number_ = 0;
- bytes_ = 0;
- }
-
- private:
- int number_;
- int bytes_;
-};
-
-
-// HistogramInfo class for recording a single "bar" of a histogram. This
-// class is used for collecting statistics to print to the log file.
-class HistogramInfo : public NumberAndSizeInfo {
- public:
- HistogramInfo() : NumberAndSizeInfo() {}
-
- const char* name() { return name_; }
- void set_name(const char* name) { name_ = name; }
-
- private:
- const char* name_;
-};
-
enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
// -----------------------------------------------------------------------------
@@ -2411,6 +2328,11 @@ class SemiSpace : public Space {
return 0;
}
+ iterator begin() { return iterator(anchor_.next_page()); }
+ iterator end() { return iterator(anchor()); }
+
+ std::unique_ptr<ObjectIterator> GetObjectIterator() override;
+
#ifdef DEBUG
void Print() override;
// Validate a range of of addresses in a SemiSpace.
@@ -2426,9 +2348,6 @@ class SemiSpace : public Space {
virtual void Verify();
#endif
- iterator begin() { return iterator(anchor_.next_page()); }
- iterator end() { return iterator(anchor()); }
-
private:
void RewindPages(Page* start, int num_pages);
@@ -2534,10 +2453,7 @@ class NewSpace : public Space {
static_cast<int>(top() - to_space_.page_low());
}
- // The same, but returning an int. We have to have the one that returns
- // intptr_t because it is inherited, but if we know we are dealing with the
- // new space, which can't get as big as the other spaces then this is useful:
- int SizeAsInt() { return static_cast<int>(Size()); }
+ intptr_t SizeOfObjects() override { return Size(); }
// Return the allocatable capacity of a semispace.
intptr_t Capacity() {
@@ -2555,11 +2471,11 @@ class NewSpace : public Space {
// Committed memory for NewSpace is the committed memory of both semi-spaces
// combined.
- intptr_t CommittedMemory() override {
+ size_t CommittedMemory() override {
return from_space_.CommittedMemory() + to_space_.CommittedMemory();
}
- intptr_t MaximumCommittedMemory() override {
+ size_t MaximumCommittedMemory() override {
return from_space_.MaximumCommittedMemory() +
to_space_.MaximumCommittedMemory();
}
@@ -2760,6 +2676,8 @@ class NewSpace : public Space {
iterator begin() { return to_space_.begin(); }
iterator end() { return to_space_.end(); }
+ std::unique_ptr<ObjectIterator> GetObjectIterator() override;
+
private:
// Update allocation info to match the current to-space page.
void UpdateAllocationInfo();
@@ -2895,7 +2813,7 @@ class MapSpace : public PagedSpace {
// -----------------------------------------------------------------------------
-// Large objects ( > Page::kMaxRegularHeapObjectSize ) are allocated and
+// Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
// managed by the large object space. A large object is allocated from OS
// heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
// A large object always starts at Page::kObjectStartOffset to a page.
@@ -2973,6 +2891,8 @@ class LargeObjectSpace : public Space {
iterator begin() { return iterator(first_page_); }
iterator end() { return iterator(nullptr); }
+ std::unique_ptr<ObjectIterator> GetObjectIterator() override;
+
#ifdef VERIFY_HEAP
virtual void Verify();
#endif
@@ -3030,20 +2950,6 @@ class MemoryChunkIterator BASE_EMBEDDED {
LargePageIterator lo_iterator_;
};
-#ifdef DEBUG
-struct CommentStatistic {
- const char* comment;
- int size;
- int count;
- void Clear() {
- comment = NULL;
- size = 0;
- count = 0;
- }
- // Must be small, since an iteration is used for lookup.
- static const int kMaxComments = 64;
-};
-#endif
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/i18n.cc b/deps/v8/src/i18n.cc
index 3418ae79f1..58b8a8dc5c 100644
--- a/deps/v8/src/i18n.cc
+++ b/deps/v8/src/i18n.cc
@@ -5,6 +5,8 @@
#include "src/i18n.h"
+#include <memory>
+
#include "src/api.h"
#include "src/factory.h"
#include "src/isolate.h"
@@ -115,13 +117,11 @@ icu::SimpleDateFormat* CreateICUDateFormat(
icu::SimpleDateFormat* date_format = NULL;
icu::UnicodeString skeleton;
if (ExtractStringSetting(isolate, options, "skeleton", &skeleton)) {
- icu::DateTimePatternGenerator* generator =
- icu::DateTimePatternGenerator::createInstance(icu_locale, status);
+ std::unique_ptr<icu::DateTimePatternGenerator> generator(
+ icu::DateTimePatternGenerator::createInstance(icu_locale, status));
icu::UnicodeString pattern;
- if (U_SUCCESS(status)) {
+ if (U_SUCCESS(status))
pattern = generator->getBestPattern(skeleton, status);
- delete generator;
- }
date_format = new icu::SimpleDateFormat(pattern, icu_locale, status);
if (U_SUCCESS(status)) {
@@ -132,7 +132,7 @@ icu::SimpleDateFormat* CreateICUDateFormat(
if (U_FAILURE(status)) {
delete calendar;
delete date_format;
- date_format = NULL;
+ date_format = nullptr;
}
return date_format;
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 6f2fb97908..edab277fbe 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -1301,7 +1301,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// edi : the function to call
Isolate* isolate = masm->isolate();
Label initialize, done, miss, megamorphic, not_array_function;
- Label done_increment_count, done_initialize_count;
// Load the cache state into ecx.
__ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
@@ -1314,7 +1313,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// type-feedback-vector.h).
Label check_allocation_site;
__ cmp(edi, FieldOperand(ecx, WeakCell::kValueOffset));
- __ j(equal, &done_increment_count, Label::kFar);
+ __ j(equal, &done, Label::kFar);
__ CompareRoot(ecx, Heap::kmegamorphic_symbolRootIndex);
__ j(equal, &done, Label::kFar);
__ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset),
@@ -1337,7 +1336,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
__ cmp(edi, ecx);
__ j(not_equal, &megamorphic);
- __ jmp(&done_increment_count, Label::kFar);
+ __ jmp(&done, Label::kFar);
__ bind(&miss);
@@ -1366,26 +1365,17 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// slot.
CreateAllocationSiteStub create_stub(isolate);
CallStubInRecordCallTarget(masm, &create_stub);
- __ jmp(&done_initialize_count);
+ __ jmp(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(isolate);
CallStubInRecordCallTarget(masm, &weak_cell_stub);
- __ bind(&done_initialize_count);
-
- // Initialize the call counter.
- __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize),
- Immediate(Smi::FromInt(1)));
- __ jmp(&done);
- __ bind(&done_increment_count);
- // Increment the call count for monomorphic function calls.
+ __ bind(&done);
+ // Increment the call count for all function calls.
__ add(FieldOperand(ebx, edx, times_half_pointer_size,
FixedArray::kHeaderSize + kPointerSize),
Immediate(Smi::FromInt(1)));
-
- __ bind(&done);
}
@@ -1431,6 +1421,12 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
+static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
+ Register slot) {
+ __ add(FieldOperand(feedback_vector, slot, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize),
+ Immediate(Smi::FromInt(1)));
+}
void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// edi - function
@@ -1446,9 +1442,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
FixedArray::kHeaderSize));
// Increment the call count for monomorphic function calls.
- __ add(FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize),
- Immediate(Smi::FromInt(1)));
+ IncrementCallCount(masm, ebx, edx);
__ mov(ebx, ecx);
__ mov(edx, edi);
@@ -1464,7 +1458,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// edx - slot id
// ebx - vector
Isolate* isolate = masm->isolate();
- Label extra_checks_or_miss, call, call_function;
+ Label extra_checks_or_miss, call, call_function, call_count_incremented;
int argc = arg_count();
ParameterCount actual(argc);
@@ -1493,12 +1487,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
// convincing us that we have a monomorphic JSFunction.
__ JumpIfSmi(edi, &extra_checks_or_miss);
+ __ bind(&call_function);
+
// Increment the call count for monomorphic function calls.
- __ add(FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize),
- Immediate(Smi::FromInt(1)));
+ IncrementCallCount(masm, ebx, edx);
- __ bind(&call_function);
__ Set(eax, argc);
__ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
tail_call_mode()),
@@ -1539,6 +1532,12 @@ void CallICStub::Generate(MacroAssembler* masm) {
Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
__ bind(&call);
+
+ // Increment the call count for megamorphic function calls.
+ IncrementCallCount(masm, ebx, edx);
+
+ __ bind(&call_count_incremented);
+
__ Set(eax, argc);
__ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET);
@@ -1564,11 +1563,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ cmp(ecx, NativeContextOperand());
__ j(not_equal, &miss);
- // Initialize the call counter.
- __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize),
- Immediate(Smi::FromInt(1)));
-
// Store the function. Use a stub since we need a frame for allocation.
// ebx - vector
// edx - slot
@@ -1576,11 +1570,15 @@ void CallICStub::Generate(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(isolate);
+ __ push(ebx);
+ __ push(edx);
__ push(edi);
__ push(esi);
__ CallStub(&create_stub);
__ pop(esi);
__ pop(edi);
+ __ pop(edx);
+ __ pop(ebx);
}
__ jmp(&call_function);
@@ -1590,7 +1588,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&miss);
GenerateMiss(masm);
- __ jmp(&call);
+ __ jmp(&call_count_incremented);
// Unreachable
__ int3();
@@ -2068,297 +2066,6 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
}
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[4]: to
- // esp[8]: from
- // esp[12]: string
-
- // Make sure first argument is a string.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(eax, &runtime);
- Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
- __ j(NegateCondition(is_string), &runtime);
-
- // eax: string
- // ebx: instance type
-
- // Calculate length of sub string using the smi values.
- __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
- __ JumpIfNotSmi(ecx, &runtime);
- __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
- __ JumpIfNotSmi(edx, &runtime);
- __ sub(ecx, edx);
- __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
- Label not_original_string;
- // Shorter than original string's length: an actual substring.
- __ j(below, &not_original_string, Label::kNear);
- // Longer than original string's length or negative: unsafe arguments.
- __ j(above, &runtime);
- // Return original string.
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(3 * kPointerSize);
- __ bind(&not_original_string);
-
- Label single_char;
- __ cmp(ecx, Immediate(Smi::FromInt(1)));
- __ j(equal, &single_char);
-
- // eax: string
- // ebx: instance type
- // ecx: sub string length (smi)
- // edx: from index (smi)
- // Deal with different string types: update the index if necessary
- // and put the underlying string into edi.
- Label underlying_unpacked, sliced_string, seq_or_external_string;
- // If the string is not indirect, it can only be sequential or external.
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
- STATIC_ASSERT(kIsIndirectStringMask != 0);
- __ test(ebx, Immediate(kIsIndirectStringMask));
- __ j(zero, &seq_or_external_string, Label::kNear);
-
- Factory* factory = isolate()->factory();
- __ test(ebx, Immediate(kSlicedNotConsMask));
- __ j(not_zero, &sliced_string, Label::kNear);
- // Cons string. Check whether it is flat, then fetch first part.
- // Flat cons strings have an empty second part.
- __ cmp(FieldOperand(eax, ConsString::kSecondOffset),
- factory->empty_string());
- __ j(not_equal, &runtime);
- __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
- // Update instance type.
- __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked, Label::kNear);
-
- __ bind(&sliced_string);
- // Sliced string. Fetch parent and adjust start index by offset.
- __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
- __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
- // Update instance type.
- __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked, Label::kNear);
-
- __ bind(&seq_or_external_string);
- // Sequential or external string. Just move string to the expected register.
- __ mov(edi, eax);
-
- __ bind(&underlying_unpacked);
-
- if (FLAG_string_slices) {
- Label copy_routine;
- // edi: underlying subject string
- // ebx: instance type of underlying subject string
- // edx: adjusted start index (smi)
- // ecx: length (smi)
- __ cmp(ecx, Immediate(Smi::FromInt(SlicedString::kMinLength)));
- // Short slice. Copy instead of slicing.
- __ j(less, &copy_routine);
- // Allocate new sliced string. At this point we do not reload the instance
- // type including the string encoding because we simply rely on the info
- // provided by the original string. It does not matter if the original
- // string's encoding is wrong because we always have to recheck encoding of
- // the newly created string's parent anyways due to externalized strings.
- Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ test(ebx, Immediate(kStringEncodingMask));
- __ j(zero, &two_byte_slice, Label::kNear);
- __ AllocateOneByteSlicedString(eax, ebx, no_reg, &runtime);
- __ jmp(&set_slice_header, Label::kNear);
- __ bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
- __ bind(&set_slice_header);
- __ mov(FieldOperand(eax, SlicedString::kLengthOffset), ecx);
- __ mov(FieldOperand(eax, SlicedString::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
- __ mov(FieldOperand(eax, SlicedString::kParentOffset), edi);
- __ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx);
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(3 * kPointerSize);
-
- __ bind(&copy_routine);
- }
-
- // edi: underlying subject string
- // ebx: instance type of underlying subject string
- // edx: adjusted start index (smi)
- // ecx: length (smi)
- // The subject string can only be external or sequential string of either
- // encoding at this point.
- Label two_byte_sequential, runtime_drop_two, sequential_string;
- STATIC_ASSERT(kExternalStringTag != 0);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test_b(ebx, Immediate(kExternalStringTag));
- __ j(zero, &sequential_string);
-
- // Handle external string.
- // Rule out short external strings.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ test_b(ebx, Immediate(kShortExternalStringMask));
- __ j(not_zero, &runtime);
- __ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
- // Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- __ bind(&sequential_string);
- // Stash away (adjusted) index and (underlying) string.
- __ push(edx);
- __ push(edi);
- __ SmiUntag(ecx);
- STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
- __ test_b(ebx, Immediate(kStringEncodingMask));
- __ j(zero, &two_byte_sequential);
-
- // Sequential one byte string. Allocate the result.
- __ AllocateOneByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
-
- // eax: result string
- // ecx: result string length
- // Locate first character of result.
- __ mov(edi, eax);
- __ add(edi, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- // Load string argument and locate character of sub string start.
- __ pop(edx);
- __ pop(ebx);
- __ SmiUntag(ebx);
- __ lea(edx, FieldOperand(edx, ebx, times_1, SeqOneByteString::kHeaderSize));
-
- // eax: result string
- // ecx: result length
- // edi: first character of result
- // edx: character of sub string start
- StringHelper::GenerateCopyCharacters(
- masm, edi, edx, ecx, ebx, String::ONE_BYTE_ENCODING);
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(3 * kPointerSize);
-
- __ bind(&two_byte_sequential);
- // Sequential two-byte string. Allocate the result.
- __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
-
- // eax: result string
- // ecx: result string length
- // Locate first character of result.
- __ mov(edi, eax);
- __ add(edi,
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Load string argument and locate character of sub string start.
- __ pop(edx);
- __ pop(ebx);
- // As from is a smi it is 2 times the value which matches the size of a two
- // byte character.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ lea(edx, FieldOperand(edx, ebx, times_1, SeqTwoByteString::kHeaderSize));
-
- // eax: result string
- // ecx: result length
- // edi: first character of result
- // edx: character of sub string start
- StringHelper::GenerateCopyCharacters(
- masm, edi, edx, ecx, ebx, String::TWO_BYTE_ENCODING);
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(3 * kPointerSize);
-
- // Drop pushed values on the stack before tail call.
- __ bind(&runtime_drop_two);
- __ Drop(2);
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString);
-
- __ bind(&single_char);
- // eax: string
- // ebx: instance type
- // ecx: sub string length (smi)
- // edx: from index (smi)
- StringCharAtGenerator generator(eax, edx, ecx, eax, &runtime, &runtime,
- &runtime, RECEIVER_IS_STRING);
- generator.GenerateFast(masm);
- __ ret(3 * kPointerSize);
- generator.SkipSlow(masm, &runtime);
-}
-
-void ToStringStub::Generate(MacroAssembler* masm) {
- // The ToString stub takes one argument in eax.
- Label is_number;
- __ JumpIfSmi(eax, &is_number, Label::kNear);
-
- Label not_string;
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edi);
- // eax: receiver
- // edi: receiver map
- __ j(above_equal, &not_string, Label::kNear);
- __ Ret();
- __ bind(&not_string);
-
- Label not_heap_number;
- __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &not_heap_number, Label::kNear);
- __ bind(&is_number);
- NumberToStringStub stub(isolate());
- __ TailCallStub(&stub);
- __ bind(&not_heap_number);
-
- Label not_oddball;
- __ CmpInstanceType(edi, ODDBALL_TYPE);
- __ j(not_equal, &not_oddball, Label::kNear);
- __ mov(eax, FieldOperand(eax, Oddball::kToStringOffset));
- __ Ret();
- __ bind(&not_oddball);
-
- __ pop(ecx); // Pop return address.
- __ push(eax); // Push argument.
- __ push(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kToString);
-}
-
-
-void ToNameStub::Generate(MacroAssembler* masm) {
- // The ToName stub takes one argument in eax.
- Label is_number;
- __ JumpIfSmi(eax, &is_number, Label::kNear);
-
- Label not_name;
- STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
- __ CmpObjectType(eax, LAST_NAME_TYPE, edi);
- // eax: receiver
- // edi: receiver map
- __ j(above, &not_name, Label::kNear);
- __ Ret();
- __ bind(&not_name);
-
- Label not_heap_number;
- __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &not_heap_number, Label::kNear);
- __ bind(&is_number);
- NumberToStringStub stub(isolate());
- __ TailCallStub(&stub);
- __ bind(&not_heap_number);
-
- Label not_oddball;
- __ CmpInstanceType(edi, ODDBALL_TYPE);
- __ j(not_equal, &not_oddball, Label::kNear);
- __ mov(eax, FieldOperand(eax, Oddball::kToStringOffset));
- __ Ret();
- __ bind(&not_oddball);
-
- __ pop(ecx); // Pop return address.
- __ push(eax); // Push argument.
- __ push(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kToName);
-}
-
-
void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left,
Register right,
@@ -3228,17 +2935,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Mode mode) {
Label object_is_black, need_incremental, need_incremental_pop_object;
- __ mov(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
- __ and_(regs_.scratch0(), regs_.object());
- __ mov(regs_.scratch1(),
- Operand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset));
- __ sub(regs_.scratch1(), Immediate(1));
- __ mov(Operand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset),
- regs_.scratch1());
- __ j(negative, &need_incremental);
-
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(),
@@ -3580,11 +3276,10 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
Label load_smi_map, compare_map;
Label start_polymorphic;
Label pop_and_miss;
- ExternalReference virtual_register =
- ExternalReference::virtual_handler_register(masm->isolate());
__ push(receiver);
- __ push(vector);
+ // Value, vector and slot are passed on the stack, so no need to save/restore
+ // them.
Register receiver_map = receiver;
Register cached_map = vector;
@@ -3605,12 +3300,9 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
Register handler = feedback;
DCHECK(handler.is(StoreWithVectorDescriptor::ValueRegister()));
__ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
- __ pop(vector);
__ pop(receiver);
__ lea(handler, FieldOperand(handler, Code::kHeaderSize));
- __ mov(Operand::StaticVariable(virtual_register), handler);
- __ pop(handler); // Pop "value".
- __ jmp(Operand::StaticVariable(virtual_register));
+ __ jmp(handler);
// Polymorphic, we have to loop from 2 to N
__ bind(&start_polymorphic);
@@ -3634,11 +3326,8 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
FixedArray::kHeaderSize + kPointerSize));
__ lea(handler, FieldOperand(handler, Code::kHeaderSize));
__ pop(key);
- __ pop(vector);
__ pop(receiver);
- __ mov(Operand::StaticVariable(virtual_register), handler);
- __ pop(handler); // Pop "value".
- __ jmp(Operand::StaticVariable(virtual_register));
+ __ jmp(handler);
__ bind(&prepare_next);
__ add(counter, Immediate(Smi::FromInt(2)));
@@ -3648,7 +3337,6 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
// We exhausted our array of map handler pairs.
__ bind(&pop_and_miss);
__ pop(key);
- __ pop(vector);
__ pop(receiver);
__ jmp(miss);
@@ -3664,8 +3352,6 @@ static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver,
Label* miss) {
// The store ic value is on the stack.
DCHECK(weak_cell.is(StoreWithVectorDescriptor::ValueRegister()));
- ExternalReference virtual_register =
- ExternalReference::virtual_handler_register(masm->isolate());
// feedback initially contains the feedback array
Label compare_smi_map;
@@ -3681,11 +3367,8 @@ static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver,
__ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
FixedArray::kHeaderSize + kPointerSize));
__ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
- // Put the store ic value back in it's register.
- __ mov(Operand::StaticVariable(virtual_register), weak_cell);
- __ pop(weak_cell); // Pop "value".
// jump to the handler.
- __ jmp(Operand::StaticVariable(virtual_register));
+ __ jmp(weak_cell);
// In microbenchmarks, it made sense to unroll this code so that the call to
// the handler is duplicated for a HeapObject receiver and a Smi receiver.
@@ -3695,10 +3378,8 @@ static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver,
__ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
FixedArray::kHeaderSize + kPointerSize));
__ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
- __ mov(Operand::StaticVariable(virtual_register), weak_cell);
- __ pop(weak_cell); // Pop "value".
// jump to the handler.
- __ jmp(Operand::StaticVariable(virtual_register));
+ __ jmp(weak_cell);
}
void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
@@ -3709,7 +3390,26 @@ void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register slot = StoreWithVectorDescriptor::SlotRegister(); // edi
Label miss;
- __ push(value);
+ if (StoreWithVectorDescriptor::kPassLastArgsOnStack) {
+ // Current stack layout:
+ // - esp[8] -- value
+ // - esp[4] -- slot
+ // - esp[0] -- return address
+ STATIC_ASSERT(StoreDescriptor::kStackArgumentsCount == 2);
+ STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
+ if (in_frame) {
+ __ RecordComment("[ StoreDescriptor -> StoreWithVectorDescriptor");
+ // If the vector is not on the stack, then insert the vector beneath
+ // return address in order to prepare for calling handler with
+ // StoreWithVector calling convention.
+ __ push(Operand(esp, 0));
+ __ mov(Operand(esp, 4), StoreWithVectorDescriptor::VectorRegister());
+ __ RecordComment("]");
+ } else {
+ __ mov(vector, Operand(esp, 1 * kPointerSize));
+ }
+ __ mov(slot, Operand(esp, 2 * kPointerSize));
+ }
Register scratch = value;
__ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
@@ -3733,19 +3433,9 @@ void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
__ j(not_equal, &miss);
- __ pop(value);
- __ push(slot);
- __ push(vector);
masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, slot,
no_reg);
- __ pop(vector);
- __ pop(slot);
- Label no_pop_miss;
- __ jmp(&no_pop_miss);
-
__ bind(&miss);
- __ pop(value);
- __ bind(&no_pop_miss);
StoreIC::GenerateMiss(masm);
}
@@ -3767,17 +3457,13 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
Label load_smi_map, compare_map;
Label transition_call;
Label pop_and_miss;
- ExternalReference virtual_register =
- ExternalReference::virtual_handler_register(masm->isolate());
- ExternalReference virtual_slot =
- ExternalReference::virtual_slot_register(masm->isolate());
__ push(receiver);
- __ push(vector);
+ // Value, vector and slot are passed on the stack, so no need to save/restore
+ // them.
Register receiver_map = receiver;
Register cached_map = vector;
- Register value = StoreDescriptor::ValueRegister();
// Receiver might not be a heap object.
__ JumpIfSmi(receiver, &load_smi_map);
@@ -3788,15 +3474,18 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
__ push(key);
// Current stack layout:
// - esp[0] -- key
- // - esp[4] -- vector
- // - esp[8] -- receiver
- // - esp[12] -- value
- // - esp[16] -- return address
+ // - esp[4] -- receiver
+ // - esp[8] -- return address
+ // - esp[12] -- vector
+ // - esp[16] -- slot
+ // - esp[20] -- value
//
- // Required stack layout for handler call:
+ // Required stack layout for handler call (see StoreWithVectorDescriptor):
// - esp[0] -- return address
- // - receiver, key, value, vector, slot in registers.
- // - handler in virtual register.
+ // - esp[4] -- vector
+ // - esp[8] -- slot
+ // - esp[12] -- value
+ // - receiver, key, handler in registers.
Register counter = key;
__ mov(counter, Immediate(Smi::FromInt(0)));
__ bind(&next_loop);
@@ -3811,43 +3500,57 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
__ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
FixedArray::kHeaderSize + 2 * kPointerSize));
__ pop(key);
- __ pop(vector);
__ pop(receiver);
__ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
- __ mov(Operand::StaticVariable(virtual_register), feedback);
- __ pop(value);
- __ jmp(Operand::StaticVariable(virtual_register));
+ __ jmp(feedback);
__ bind(&transition_call);
// Current stack layout:
// - esp[0] -- key
- // - esp[4] -- vector
- // - esp[8] -- receiver
- // - esp[12] -- value
- // - esp[16] -- return address
+ // - esp[4] -- receiver
+ // - esp[8] -- return address
+ // - esp[12] -- vector
+ // - esp[16] -- slot
+ // - esp[20] -- value
//
- // Required stack layout for handler call:
+ // Required stack layout for handler call (see StoreTransitionDescriptor):
// - esp[0] -- return address
- // - receiver, key, value, map, vector in registers.
- // - handler and slot in virtual registers.
- __ mov(Operand::StaticVariable(virtual_slot), slot);
+ // - esp[4] -- vector
+ // - esp[8] -- slot
+ // - esp[12] -- value
+ // - receiver, key, map, handler in registers.
__ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
FixedArray::kHeaderSize + 2 * kPointerSize));
__ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
- __ mov(Operand::StaticVariable(virtual_register), feedback);
__ mov(cached_map, FieldOperand(cached_map, WeakCell::kValueOffset));
// The weak cell may have been cleared.
__ JumpIfSmi(cached_map, &pop_and_miss);
- DCHECK(!cached_map.is(VectorStoreTransitionDescriptor::MapRegister()));
- __ mov(VectorStoreTransitionDescriptor::MapRegister(), cached_map);
+ DCHECK(!cached_map.is(StoreTransitionDescriptor::MapRegister()));
+ __ mov(StoreTransitionDescriptor::MapRegister(), cached_map);
- // Pop key into place.
+ // Call store transition handler using StoreTransitionDescriptor calling
+ // convention.
__ pop(key);
- __ pop(vector);
__ pop(receiver);
- __ pop(value);
- __ jmp(Operand::StaticVariable(virtual_register));
+ // Ensure that the transition handler we are going to call has the same
+ // number of stack arguments which means that we don't have to adapt them
+ // before the call.
+ STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
+ STATIC_ASSERT(StoreTransitionDescriptor::kStackArgumentsCount == 3);
+ STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
+ StoreWithVectorDescriptor::kValue ==
+ StoreTransitionDescriptor::kParameterCount -
+ StoreTransitionDescriptor::kValue);
+ STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
+ StoreWithVectorDescriptor::kSlot ==
+ StoreTransitionDescriptor::kParameterCount -
+ StoreTransitionDescriptor::kSlot);
+ STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
+ StoreWithVectorDescriptor::kVector ==
+ StoreTransitionDescriptor::kParameterCount -
+ StoreTransitionDescriptor::kVector);
+ __ jmp(feedback);
__ bind(&prepare_next);
__ add(counter, Immediate(Smi::FromInt(3)));
@@ -3857,7 +3560,6 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
// We exhausted our array of map handler pairs.
__ bind(&pop_and_miss);
__ pop(key);
- __ pop(vector);
__ pop(receiver);
__ jmp(miss);
@@ -3874,7 +3576,26 @@ void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register slot = StoreWithVectorDescriptor::SlotRegister(); // edi
Label miss;
- __ push(value);
+ if (StoreWithVectorDescriptor::kPassLastArgsOnStack) {
+ // Current stack layout:
+ // - esp[8] -- value
+ // - esp[4] -- slot
+ // - esp[0] -- return address
+ STATIC_ASSERT(StoreDescriptor::kStackArgumentsCount == 2);
+ STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
+ if (in_frame) {
+ __ RecordComment("[ StoreDescriptor -> StoreWithVectorDescriptor");
+ // If the vector is not on the stack, then insert the vector beneath
+ // return address in order to prepare for calling handler with
+ // StoreWithVector calling convention.
+ __ push(Operand(esp, 0));
+ __ mov(Operand(esp, 4), StoreWithVectorDescriptor::VectorRegister());
+ __ RecordComment("]");
+ } else {
+ __ mov(vector, Operand(esp, 1 * kPointerSize));
+ }
+ __ mov(slot, Operand(esp, 2 * kPointerSize));
+ }
Register scratch = value;
__ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
@@ -3899,8 +3620,6 @@ void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
__ j(not_equal, &try_poly_name);
- __ pop(value);
-
Handle<Code> megamorphic_stub =
KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
@@ -3917,7 +3636,6 @@ void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
&miss);
__ bind(&miss);
- __ pop(value);
KeyedStoreIC::GenerateMiss(masm);
}
@@ -4564,7 +4282,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// Fall back to %AllocateInNewSpace (if not too big).
Label too_big_for_new_space;
__ bind(&allocate);
- __ cmp(ecx, Immediate(Page::kMaxRegularHeapObjectSize));
+ __ cmp(ecx, Immediate(kMaxRegularHeapObjectSize));
__ j(greater, &too_big_for_new_space);
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -4953,7 +4671,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// Fall back to %AllocateInNewSpace (if not too big).
Label too_big_for_new_space;
__ bind(&allocate);
- __ cmp(ecx, Immediate(Page::kMaxRegularHeapObjectSize));
+ __ cmp(ecx, Immediate(kMaxRegularHeapObjectSize));
__ j(greater, &too_big_for_new_space);
{
FrameScope scope(masm, StackFrame::INTERNAL);
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index f1972b9561..220484c1bf 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -39,19 +39,11 @@ const Register StoreDescriptor::SlotRegister() { return edi; }
const Register StoreWithVectorDescriptor::VectorRegister() { return ebx; }
-const Register VectorStoreTransitionDescriptor::SlotRegister() {
- return no_reg;
-}
-
-
-const Register VectorStoreTransitionDescriptor::VectorRegister() { return ebx; }
-
+const Register StoreTransitionDescriptor::SlotRegister() { return no_reg; }
-const Register VectorStoreTransitionDescriptor::MapRegister() { return edi; }
-
-
-const Register StoreTransitionDescriptor::MapRegister() { return ebx; }
+const Register StoreTransitionDescriptor::VectorRegister() { return ebx; }
+const Register StoreTransitionDescriptor::MapRegister() { return edi; }
const Register StoreGlobalViaContextDescriptor::SlotRegister() { return ebx; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return eax; }
@@ -365,7 +357,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ApiCallbackDescriptorBase::InitializePlatformSpecific(
+void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
edi, // callee
@@ -400,7 +392,19 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
eax, // argument count (not including receiver)
edx, // new target
edi, // constructor
- ebx, // address of first argument
+ ebx, // allocation site feedback
+ ecx, // address of first argument
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterPushArgsAndConstructArrayDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ eax, // argument count (not including receiver)
+ edx, // target to the call. It is checked to be Array function.
+ ebx, // allocation site feedback
+ ecx, // address of first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 83c7ce8917..2bd8760c3a 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -173,9 +173,8 @@ void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
void MacroAssembler::InNewSpace(Register object, Register scratch, Condition cc,
Label* condition_met,
Label::Distance distance) {
- const int mask =
- (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
- CheckPageFlag(object, scratch, mask, cc, condition_met, distance);
+ CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cc,
+ condition_met, distance);
}
@@ -1545,7 +1544,7 @@ void MacroAssembler::Allocate(int object_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(object_size <= kMaxRegularHeapObjectSize);
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 08cc7ceb64..2220ca7c4f 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -798,6 +798,24 @@ class MacroAssembler: public Assembler {
// may be bigger than 2^16 - 1. Requires a scratch register.
void Ret(int bytes_dropped, Register scratch);
+ // Emit code that loads |parameter_index|'th parameter from the stack to
+ // the register according to the CallInterfaceDescriptor definition.
+ // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
+ // below the caller's sp (on ia32 it's at least return address).
+ template <class Descriptor>
+ void LoadParameterFromStack(
+ Register reg, typename Descriptor::ParameterIndices parameter_index,
+ int sp_to_ra_offset_in_words = 1) {
+ DCHECK(Descriptor::kPassLastArgsOnStack);
+ DCHECK_LT(parameter_index, Descriptor::kParameterCount);
+ DCHECK_LE(Descriptor::kParameterCount - Descriptor::kStackArgumentsCount,
+ parameter_index);
+ int offset = (Descriptor::kParameterCount - parameter_index - 1 +
+ sp_to_ra_offset_in_words) *
+ kPointerSize;
+ mov(reg, Operand(esp, offset));
+ }
+
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the esp register.
void Drop(int element_count);
diff --git a/deps/v8/src/ic/arm/handler-compiler-arm.cc b/deps/v8/src/ic/arm/handler-compiler-arm.cc
index 4ed765e73f..691fe3d23d 100644
--- a/deps/v8/src/ic/arm/handler-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/handler-compiler-arm.cc
@@ -111,15 +111,21 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
Register slot) {
MacroAssembler* masm = this->masm();
- __ push(vector);
+ STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
+ LoadWithVectorDescriptor::kVector);
+ STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
+ StoreWithVectorDescriptor::kVector);
+ STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
+ StoreTransitionDescriptor::kVector);
__ push(slot);
+ __ push(vector);
}
void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
MacroAssembler* masm = this->masm();
- __ pop(slot);
__ pop(vector);
+ __ pop(slot);
}
@@ -129,6 +135,13 @@ void PropertyHandlerCompiler::DiscardVectorAndSlot() {
__ add(sp, sp, Operand(2 * kPointerSize));
}
+void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
+ // No-op. Return address is in lr register.
+}
+
+void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
+ // No-op. Return address is in lr register.
+}
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
@@ -330,24 +343,6 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ TailCallStub(&stub);
}
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister(),
- StoreWithVectorDescriptor::SlotRegister(),
- StoreWithVectorDescriptor::VectorRegister());
-}
-
-
-void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-
#undef __
#define __ ACCESS_MASM(masm())
@@ -366,12 +361,6 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
-void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
- Register current_map, Register destination_map) {
- DCHECK(false); // Not implemented.
-}
-
-
void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
Register map_reg,
Register scratch,
@@ -629,6 +618,9 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
__ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
+void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
+ STATIC_ASSERT(!StoreWithVectorDescriptor::kPassLastArgsOnStack);
+}
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
diff --git a/deps/v8/src/ic/arm/ic-arm.cc b/deps/v8/src/ic/arm/ic-arm.cc
index fee6ebf259..10ec578f7b 100644
--- a/deps/v8/src/ic/arm/ic-arm.cc
+++ b/deps/v8/src/ic/arm/ic-arm.cc
@@ -441,10 +441,11 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
static void StoreIC_PushArgs(MacroAssembler* masm) {
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister(),
+ __ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),
- StoreWithVectorDescriptor::VectorRegister());
+ StoreWithVectorDescriptor::VectorRegister(),
+ StoreWithVectorDescriptor::ReceiverRegister(),
+ StoreWithVectorDescriptor::NameRegister());
}
@@ -454,6 +455,13 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ StoreIC_PushArgs(masm);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
+}
static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
diff --git a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
index 277b4e7117..3f97fddcd5 100644
--- a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
@@ -20,15 +20,21 @@ namespace internal {
void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
Register slot) {
MacroAssembler* masm = this->masm();
- __ Push(vector);
+ STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
+ LoadWithVectorDescriptor::kVector);
+ STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
+ StoreWithVectorDescriptor::kVector);
+ STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
+ StoreTransitionDescriptor::kVector);
__ Push(slot);
+ __ Push(vector);
}
void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
MacroAssembler* masm = this->masm();
- __ Pop(slot);
__ Pop(vector);
+ __ Pop(slot);
}
@@ -38,6 +44,13 @@ void PropertyHandlerCompiler::DiscardVectorAndSlot() {
__ Drop(2);
}
+void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
+ // No-op. Return address is in lr register.
+}
+
+void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
+ // No-op. Return address is in lr register.
+}
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
@@ -323,25 +336,6 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
__ Ret();
}
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister(),
- StoreWithVectorDescriptor::SlotRegister(),
- StoreWithVectorDescriptor::VectorRegister());
-}
-
-
-void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
- ASM_LOCATION("ElementHandlerCompiler::GenerateStoreSlow");
- StoreIC_PushArgs(masm);
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-
#undef __
#define __ ACCESS_MASM(masm())
@@ -398,12 +392,6 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
-void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
- Register current_map, Register destination_map) {
- DCHECK(false); // Not implemented.
-}
-
-
void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
Register map_reg,
Register scratch,
@@ -664,6 +652,9 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
__ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
+void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
+ STATIC_ASSERT(!StoreWithVectorDescriptor::kPassLastArgsOnStack);
+}
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
diff --git a/deps/v8/src/ic/arm64/ic-arm64.cc b/deps/v8/src/ic/arm64/ic-arm64.cc
index 9d66eb2495..fa9d7c16b7 100644
--- a/deps/v8/src/ic/arm64/ic-arm64.cc
+++ b/deps/v8/src/ic/arm64/ic-arm64.cc
@@ -445,10 +445,11 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
static void StoreIC_PushArgs(MacroAssembler* masm) {
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister(),
+ __ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),
- StoreWithVectorDescriptor::VectorRegister());
+ StoreWithVectorDescriptor::VectorRegister(),
+ StoreWithVectorDescriptor::ReceiverRegister(),
+ StoreWithVectorDescriptor::NameRegister());
}
@@ -458,6 +459,14 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ ASM_LOCATION("KeyedStoreIC::GenerateSlow");
+ StoreIC_PushArgs(masm);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
+}
static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
diff --git a/deps/v8/src/ic/handler-compiler.cc b/deps/v8/src/ic/handler-compiler.cc
index b6b81def54..3b2e115b4f 100644
--- a/deps/v8/src/ic/handler-compiler.cc
+++ b/deps/v8/src/ic/handler-compiler.cc
@@ -129,13 +129,13 @@ Register NamedStoreHandlerCompiler::FrontendHeader(Register object_reg,
Register PropertyHandlerCompiler::Frontend(Handle<Name> name) {
Label miss;
- if (IC::ICUseVector(kind())) {
+ if (IC::ShouldPushPopSlotAndVector(kind())) {
PushVectorAndSlot();
}
Register reg = FrontendHeader(receiver(), name, &miss, RETURN_HOLDER);
FrontendFooter(name, &miss);
// The footer consumes the vector and slot from the stack if miss occurs.
- if (IC::ICUseVector(kind())) {
+ if (IC::ShouldPushPopSlotAndVector(kind())) {
DiscardVectorAndSlot();
}
return reg;
@@ -209,12 +209,12 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadConstant(Handle<Name> name,
Handle<Code> NamedLoadHandlerCompiler::CompileLoadNonexistent(
Handle<Name> name) {
Label miss;
- if (IC::ICUseVector(kind())) {
+ if (IC::ShouldPushPopSlotAndVector(kind())) {
DCHECK(kind() == Code::LOAD_IC);
PushVectorAndSlot();
}
NonexistentFrontendHeader(name, &miss, scratch2(), scratch3());
- if (IC::ICUseVector(kind())) {
+ if (IC::ShouldPushPopSlotAndVector(kind())) {
DiscardVectorAndSlot();
}
GenerateLoadConstant(isolate()->factory()->undefined_value());
@@ -247,7 +247,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
void NamedLoadHandlerCompiler::InterceptorVectorSlotPush(Register holder_reg) {
- if (IC::ICUseVector(kind())) {
+ if (IC::ShouldPushPopSlotAndVector(kind())) {
if (holder_reg.is(receiver())) {
PushVectorAndSlot();
} else {
@@ -260,7 +260,7 @@ void NamedLoadHandlerCompiler::InterceptorVectorSlotPush(Register holder_reg) {
void NamedLoadHandlerCompiler::InterceptorVectorSlotPop(Register holder_reg,
PopMode mode) {
- if (IC::ICUseVector(kind())) {
+ if (IC::ShouldPushPopSlotAndVector(kind())) {
if (mode == DISCARD) {
DiscardVectorAndSlot();
} else {
@@ -438,7 +438,31 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
Handle<Map> transition, Handle<Name> name) {
Label miss;
- PushVectorAndSlot();
+ // Ensure that the StoreTransitionStub we are going to call has the same
+ // number of stack arguments. This means that we don't have to adapt them
+ // if we decide to call the transition or miss stub.
+ STATIC_ASSERT(Descriptor::kStackArgumentsCount ==
+ StoreTransitionDescriptor::kStackArgumentsCount);
+ STATIC_ASSERT(Descriptor::kStackArgumentsCount == 0 ||
+ Descriptor::kStackArgumentsCount == 3);
+ STATIC_ASSERT(Descriptor::kParameterCount - Descriptor::kValue ==
+ StoreTransitionDescriptor::kParameterCount -
+ StoreTransitionDescriptor::kValue);
+ STATIC_ASSERT(Descriptor::kParameterCount - Descriptor::kSlot ==
+ StoreTransitionDescriptor::kParameterCount -
+ StoreTransitionDescriptor::kSlot);
+ STATIC_ASSERT(Descriptor::kParameterCount - Descriptor::kVector ==
+ StoreTransitionDescriptor::kParameterCount -
+ StoreTransitionDescriptor::kVector);
+
+ if (Descriptor::kPassLastArgsOnStack) {
+ __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
+ }
+
+ bool need_save_restore = IC::ShouldPushPopSlotAndVector(kind());
+ if (need_save_restore) {
+ PushVectorAndSlot();
+ }
// Check that we are allowed to write this.
bool is_nonexistent = holder()->map() == transition->GetBackPointer();
@@ -470,23 +494,17 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
DCHECK(!transition->is_access_check_needed());
// Call to respective StoreTransitionStub.
- bool virtual_args = StoreTransitionHelper::HasVirtualSlotArg();
- Register map_reg = StoreTransitionHelper::MapRegister();
+ Register map_reg = StoreTransitionDescriptor::MapRegister();
if (details.type() == DATA_CONSTANT) {
DCHECK(descriptors->GetValue(descriptor)->IsJSFunction());
- Register tmp =
- virtual_args ? StoreWithVectorDescriptor::VectorRegister() : map_reg;
- GenerateRestoreMap(transition, tmp, scratch2(), &miss);
- GenerateConstantCheck(tmp, descriptor, value(), scratch2(), &miss);
- if (virtual_args) {
- // This will move the map from tmp into map_reg.
- RearrangeVectorAndSlot(tmp, map_reg);
- } else {
+ GenerateRestoreMap(transition, map_reg, scratch1(), &miss);
+ GenerateConstantCheck(map_reg, descriptor, value(), scratch1(), &miss);
+ if (need_save_restore) {
PopVectorAndSlot();
}
GenerateRestoreName(name);
- StoreTransitionStub stub(isolate());
+ StoreMapStub stub(isolate());
GenerateTailCall(masm(), stub.GetCode());
} else {
@@ -498,24 +516,29 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
Map::cast(transition->GetBackPointer())->unused_property_fields() == 0
? StoreTransitionStub::ExtendStorageAndStoreMapAndValue
: StoreTransitionStub::StoreMapAndValue;
-
- Register tmp =
- virtual_args ? StoreWithVectorDescriptor::VectorRegister() : map_reg;
- GenerateRestoreMap(transition, tmp, scratch2(), &miss);
- if (virtual_args) {
- RearrangeVectorAndSlot(tmp, map_reg);
- } else {
+ GenerateRestoreMap(transition, map_reg, scratch1(), &miss);
+ if (need_save_restore) {
PopVectorAndSlot();
}
- GenerateRestoreName(name);
- StoreTransitionStub stub(isolate(),
- FieldIndex::ForDescriptor(*transition, descriptor),
- representation, store_mode);
+ // We need to pass name on the stack.
+ PopReturnAddress(this->name());
+ __ Push(name);
+ PushReturnAddress(this->name());
+
+ FieldIndex index = FieldIndex::ForDescriptor(*transition, descriptor);
+ __ Move(StoreNamedTransitionDescriptor::FieldOffsetRegister(),
+ Smi::FromInt(index.index() << kPointerSizeLog2));
+
+ StoreTransitionStub stub(isolate(), index.is_inobject(), representation,
+ store_mode);
GenerateTailCall(masm(), stub.GetCode());
}
- GenerateRestoreName(&miss, name);
- PopVectorAndSlot();
+ __ bind(&miss);
+ if (need_save_restore) {
+ PopVectorAndSlot();
+ }
+ GenerateRestoreName(name);
TailCallBuiltin(masm(), MissBuiltin(kind()));
return GetCode(kind(), name);
@@ -534,7 +557,10 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreField(LookupIterator* it) {
FieldType* field_type = *it->GetFieldType();
bool need_save_restore = false;
if (RequiresFieldTypeChecks(field_type)) {
- need_save_restore = IC::ICUseVector(kind());
+ need_save_restore = IC::ShouldPushPopSlotAndVector(kind());
+ if (Descriptor::kPassLastArgsOnStack) {
+ __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
+ }
if (need_save_restore) PushVectorAndSlot();
GenerateFieldTypeChecks(field_type, value(), &miss);
if (need_save_restore) PopVectorAndSlot();
@@ -568,6 +594,9 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
GenerateTailCall(masm(), slow_stub);
}
Register holder = Frontend(name);
+ if (Descriptor::kPassLastArgsOnStack) {
+ __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
+ }
GenerateApiAccessorCall(masm(), call_optimization, handle(object->map()),
receiver(), scratch2(), true, value(), holder,
accessor_index);
@@ -601,13 +630,21 @@ Handle<Object> ElementHandlerCompiler::GetKeyedLoadHandler(
TRACE_HANDLER_STATS(isolate, KeyedLoadIC_KeyedLoadSloppyArgumentsStub);
return KeyedLoadSloppyArgumentsStub(isolate).GetCode();
}
+ bool is_js_array = instance_type == JS_ARRAY_TYPE;
if (elements_kind == DICTIONARY_ELEMENTS) {
+ if (FLAG_tf_load_ic_stub) {
+ int config = KeyedLoadElementsKind::encode(elements_kind) |
+ KeyedLoadConvertHole::encode(false) |
+ KeyedLoadIsJsArray::encode(is_js_array) |
+ LoadHandlerTypeBit::encode(kLoadICHandlerForElements);
+ return handle(Smi::FromInt(config), isolate);
+ }
TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadDictionaryElementStub);
return LoadDictionaryElementStub(isolate).GetCode();
}
DCHECK(IsFastElementsKind(elements_kind) ||
IsFixedTypedArrayElementsKind(elements_kind));
- bool is_js_array = instance_type == JS_ARRAY_TYPE;
+ // TODO(jkummerow): Use IsHoleyElementsKind(elements_kind).
bool convert_hole_to_undefined =
is_js_array && elements_kind == FAST_HOLEY_ELEMENTS &&
*receiver_map == isolate->get_initial_js_array_map(elements_kind);
diff --git a/deps/v8/src/ic/handler-compiler.h b/deps/v8/src/ic/handler-compiler.h
index 525889b80b..63ca050ca2 100644
--- a/deps/v8/src/ic/handler-compiler.h
+++ b/deps/v8/src/ic/handler-compiler.h
@@ -53,6 +53,9 @@ class PropertyHandlerCompiler : public PropertyAccessCompiler {
void DiscardVectorAndSlot();
+ void PushReturnAddress(Register tmp);
+ void PopReturnAddress(Register tmp);
+
// TODO(verwaest): Make non-static.
static void GenerateApiAccessorCall(MacroAssembler* masm,
const CallOptimization& optimization,
@@ -212,13 +215,24 @@ class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
public:
+ // All store handlers use StoreWithVectorDescriptor calling convention.
+ typedef StoreWithVectorDescriptor Descriptor;
+
explicit NamedStoreHandlerCompiler(Isolate* isolate, Handle<Map> map,
Handle<JSObject> holder)
: PropertyHandlerCompiler(isolate, Code::STORE_IC, map, holder,
- kCacheOnReceiver) {}
+ kCacheOnReceiver) {
+#ifdef DEBUG
+ if (Descriptor::kPassLastArgsOnStack) {
+ ZapStackArgumentsRegisterAliases();
+ }
+#endif
+ }
virtual ~NamedStoreHandlerCompiler() {}
+ void ZapStackArgumentsRegisterAliases();
+
Handle<Code> CompileStoreTransition(Handle<Map> transition,
Handle<Name> name);
Handle<Code> CompileStoreField(LookupIterator* it);
@@ -249,10 +263,6 @@ class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
virtual void FrontendFooter(Handle<Name> name, Label* miss);
void GenerateRestoreName(Label* label, Handle<Name> name);
- // Pop the vector and slot into appropriate registers, moving the map in
- // the process. (This is an accomodation for register pressure on ia32).
- void RearrangeVectorAndSlot(Register current_map, Register destination_map);
-
private:
void GenerateRestoreName(Handle<Name> name);
void GenerateRestoreMap(Handle<Map> transition, Register map_reg,
@@ -283,8 +293,6 @@ class ElementHandlerCompiler : public PropertyHandlerCompiler {
Isolate* isolate);
void CompileElementHandlers(MapHandleList* receiver_maps,
List<Handle<Object>>* handlers);
-
- static void GenerateStoreSlow(MacroAssembler* masm);
};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
index b332f117b8..06c58b8aae 100644
--- a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
@@ -59,15 +59,21 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
Register slot) {
MacroAssembler* masm = this->masm();
- __ push(vector);
+ STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
+ LoadWithVectorDescriptor::kVector);
+ STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
+ StoreWithVectorDescriptor::kVector);
+ STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
+ StoreTransitionDescriptor::kVector);
__ push(slot);
+ __ push(vector);
}
void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
MacroAssembler* masm = this->masm();
- __ pop(slot);
__ pop(vector);
+ __ pop(slot);
}
@@ -77,6 +83,15 @@ void PropertyHandlerCompiler::DiscardVectorAndSlot() {
__ add(esp, Immediate(2 * kPointerSize));
}
+void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
+ MacroAssembler* masm = this->masm();
+ __ push(tmp);
+}
+
+void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
+ MacroAssembler* masm = this->masm();
+ __ pop(tmp);
+}
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
@@ -150,12 +165,16 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
DCHECK(!accessor_holder.is(scratch));
// Copy return value.
__ pop(scratch);
- // receiver
+
+ if (is_store) {
+ // Discard stack arguments.
+ __ add(esp, Immediate(StoreWithVectorDescriptor::kStackArgumentsCount *
+ kPointerSize));
+ }
+ // Write the receiver and arguments to stack frame.
__ push(receiver);
- // Write the arguments to stack frame.
if (is_store) {
- DCHECK(!receiver.is(store_parameter));
- DCHECK(!scratch.is(store_parameter));
+ DCHECK(!AreAliased(receiver, scratch, store_parameter));
__ push(store_parameter);
}
__ push(scratch);
@@ -252,8 +271,13 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
int accessor_index, int expected_arguments, Register scratch) {
// ----------- S t a t e -------------
- // -- esp[0] : return address
+ // -- esp[12] : value
+ // -- esp[8] : slot
+ // -- esp[4] : vector
+ // -- esp[0] : return address
// -----------------------------------
+ __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
+
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -290,7 +314,14 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
// Restore context register.
__ pop(esi);
}
- __ ret(0);
+ if (accessor_index >= 0) {
+ __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
+ } else {
+ // If we generate a global code snippet for deoptimization only, don't try
+ // to drop stack arguments for the StoreIC because they are not a part of
+ // expression stack and deoptimizer does not reconstruct them.
+ __ ret(0);
+ }
}
@@ -316,32 +347,6 @@ static void CompileCallLoadPropertyWithInterceptor(
__ CallRuntime(id);
}
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Register slot = StoreWithVectorDescriptor::SlotRegister();
- Register vector = StoreWithVectorDescriptor::VectorRegister();
-
- __ xchg(receiver, Operand(esp, 0));
- __ push(name);
- __ push(value);
- __ push(slot);
- __ push(vector);
- __ push(receiver); // which contains the return address.
-}
-
-
-void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-
#undef __
#define __ ACCESS_MASM(masm())
@@ -360,19 +365,6 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
-void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
- Register current_map, Register destination_map) {
- DCHECK(destination_map.is(StoreTransitionHelper::MapRegister()));
- DCHECK(current_map.is(StoreTransitionHelper::VectorRegister()));
- ExternalReference virtual_slot =
- ExternalReference::virtual_slot_register(isolate());
- __ mov(destination_map, current_map);
- __ pop(current_map);
- __ mov(Operand::StaticVariable(virtual_slot), current_map);
- __ pop(current_map); // put vector in place.
-}
-
-
void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
Register map_reg,
Register scratch,
@@ -532,7 +524,7 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ jmp(&success);
__ bind(miss);
- if (IC::ICUseVector(kind())) {
+ if (IC::ShouldPushPopSlotAndVector(kind())) {
DCHECK(kind() == Code::LOAD_IC);
PopVectorAndSlot();
}
@@ -547,7 +539,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ jmp(&success);
GenerateRestoreName(miss, name);
- if (IC::ICUseVector(kind())) PopVectorAndSlot();
+ DCHECK(!IC::ShouldPushPopSlotAndVector(kind()));
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
@@ -641,13 +633,26 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
__ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
+void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
+ // Zap register aliases of the arguments passed on the stack to ensure they
+ // are properly loaded by the handler (debug-only).
+ STATIC_ASSERT(Descriptor::kPassLastArgsOnStack);
+ STATIC_ASSERT(Descriptor::kStackArgumentsCount == 3);
+ __ mov(Descriptor::ValueRegister(), Immediate(kDebugZapValue));
+ __ mov(Descriptor::SlotRegister(), Immediate(kDebugZapValue));
+ __ mov(Descriptor::VectorRegister(), Immediate(kDebugZapValue));
+}
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
LanguageMode language_mode) {
Register holder_reg = Frontend(name);
+ __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
__ pop(scratch1()); // remove the return address
+ // Discard stack arguments.
+ __ add(esp, Immediate(StoreWithVectorDescriptor::kStackArgumentsCount *
+ kPointerSize));
__ push(receiver());
__ push(holder_reg);
// If the callback cannot leak, then push the callback directly,
@@ -679,7 +684,7 @@ Register NamedStoreHandlerCompiler::value() {
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
- if (IC::ICUseVector(kind())) {
+ if (IC::ShouldPushPopSlotAndVector(kind())) {
PushVectorAndSlot();
}
FrontendHeader(receiver(), name, &miss, DONT_RETURN_ANYTHING);
@@ -701,7 +706,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->ic_named_load_global_stub(), 1);
// The code above already loads the result into the return register.
- if (IC::ICUseVector(kind())) {
+ if (IC::ShouldPushPopSlotAndVector(kind())) {
DiscardVectorAndSlot();
}
__ ret(0);
diff --git a/deps/v8/src/ic/ia32/ic-compiler-ia32.cc b/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
index d93b67bffc..a52f04689a 100644
--- a/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
@@ -15,14 +15,21 @@ namespace internal {
void PropertyICCompiler::GenerateRuntimeSetProperty(
MacroAssembler* masm, LanguageMode language_mode) {
- // Return address is on the stack.
- DCHECK(!ebx.is(StoreDescriptor::ReceiverRegister()) &&
- !ebx.is(StoreDescriptor::NameRegister()) &&
- !ebx.is(StoreDescriptor::ValueRegister()));
+ typedef StoreWithVectorDescriptor Descriptor;
+ STATIC_ASSERT(Descriptor::kStackArgumentsCount == 3);
+ // ----------- S t a t e -------------
+ // -- esp[12] : value
+ // -- esp[8] : slot
+ // -- esp[4] : vector
+ // -- esp[0] : return address
+ // -----------------------------------
+ __ LoadParameterFromStack<Descriptor>(Descriptor::ValueRegister(),
+ Descriptor::kValue);
+
+ __ mov(Operand(esp, 12), Descriptor::ReceiverRegister());
+ __ mov(Operand(esp, 8), Descriptor::NameRegister());
+ __ mov(Operand(esp, 4), Descriptor::ValueRegister());
__ pop(ebx);
- __ push(StoreDescriptor::ReceiverRegister());
- __ push(StoreDescriptor::NameRegister());
- __ push(StoreDescriptor::ValueRegister());
__ push(Immediate(Smi::FromInt(language_mode)));
__ push(ebx); // return address
diff --git a/deps/v8/src/ic/ia32/ic-ia32.cc b/deps/v8/src/ic/ia32/ic-ia32.cc
index 0550d92e91..b7496d4624 100644
--- a/deps/v8/src/ic/ia32/ic-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-ia32.cc
@@ -409,7 +409,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
}
// It's irrelevant whether array is smi-only or not when writing a smi.
__ mov(FixedArrayElementOperand(ebx, key), value);
- __ ret(0);
+ __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
__ bind(&non_smi_value);
// Escape to elements kind transition case.
@@ -428,7 +428,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ mov(edx, value); // Preserve the value which is returned.
__ RecordWriteArray(ebx, edx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- __ ret(0);
+ __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
__ bind(fast_double);
if (check_map == kCheckMap) {
@@ -457,7 +457,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ add(FieldOperand(receiver, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1)));
}
- __ ret(0);
+ __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
__ bind(&transition_smi_elements);
__ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
@@ -504,12 +504,13 @@ static void KeyedStoreGenerateMegamorphicHelper(
void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
LanguageMode language_mode) {
+ typedef StoreWithVectorDescriptor Descriptor;
// Return address is on the stack.
Label slow, fast_object, fast_object_grow;
Label fast_double, fast_double_grow;
Label array, extra, check_if_double_array, maybe_name_key, miss;
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register key = StoreDescriptor::NameRegister();
+ Register receiver = Descriptor::ReceiverRegister();
+ Register key = Descriptor::NameRegister();
DCHECK(receiver.is(edx));
DCHECK(key.is(ecx));
@@ -522,6 +523,10 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ test_b(FieldOperand(edi, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow);
+
+ __ LoadParameterFromStack<Descriptor>(Descriptor::ValueRegister(),
+ Descriptor::kValue);
+
// Check that the key is a smi.
__ JumpIfNotSmi(key, &maybe_name_key);
__ CmpInstanceType(edi, JS_ARRAY_TYPE);
@@ -551,22 +556,9 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(ebx, &slow);
-
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
- __ push(Immediate(Smi::FromInt(slot)));
- __ push(Immediate(dummy_vector));
-
masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, edi,
no_reg);
- __ pop(StoreWithVectorDescriptor::VectorRegister());
- __ pop(StoreWithVectorDescriptor::SlotRegister());
-
// Cache miss.
__ jmp(&miss);
@@ -705,18 +697,21 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
static void StoreIC_PushArgs(MacroAssembler* masm) {
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Register slot = StoreWithVectorDescriptor::SlotRegister();
- Register vector = StoreWithVectorDescriptor::VectorRegister();
-
- __ xchg(receiver, Operand(esp, 0));
+ Register receiver = StoreWithVectorDescriptor::ReceiverRegister();
+ Register name = StoreWithVectorDescriptor::NameRegister();
+
+ STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
+ // Current stack layout:
+ // - esp[12] -- value
+ // - esp[8] -- slot
+ // - esp[4] -- vector
+ // - esp[0] -- return address
+
+ Register return_address = StoreWithVectorDescriptor::SlotRegister();
+ __ pop(return_address);
+ __ push(receiver);
__ push(name);
- __ push(value);
- __ push(slot);
- __ push(vector);
- __ push(receiver); // Contains the return address.
+ __ push(return_address);
}
@@ -730,32 +725,33 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
void StoreIC::GenerateNormal(MacroAssembler* masm) {
+ typedef StoreWithVectorDescriptor Descriptor;
Label restore_miss;
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Register vector = StoreWithVectorDescriptor::VectorRegister();
- Register slot = StoreWithVectorDescriptor::SlotRegister();
-
- // A lot of registers are needed for storing to slow case
- // objects. Push and restore receiver but rely on
- // GenerateDictionaryStore preserving the value and name.
+ Register receiver = Descriptor::ReceiverRegister();
+ Register name = Descriptor::NameRegister();
+ Register value = Descriptor::ValueRegister();
+ // Since the slot and vector values are passed on the stack we can use
+ // respective registers as scratch registers.
+ Register scratch1 = Descriptor::VectorRegister();
+ Register scratch2 = Descriptor::SlotRegister();
+
+ __ LoadParameterFromStack<Descriptor>(value, Descriptor::kValue);
+
+ // A lot of registers are needed for storing to slow case objects.
+ // Push and restore receiver but rely on GenerateDictionaryStore preserving
+ // the value and name.
__ push(receiver);
- __ push(vector);
- __ push(slot);
- Register dictionary = ebx;
+ Register dictionary = receiver;
__ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value,
- receiver, edi);
- __ Drop(3);
+ scratch1, scratch2);
+ __ Drop(1);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->ic_store_normal_hit(), 1);
- __ ret(0);
+ __ ret(Descriptor::kStackArgumentsCount * kPointerSize);
__ bind(&restore_miss);
- __ pop(slot);
- __ pop(vector);
__ pop(receiver);
__ IncrementCounter(counters->ic_store_normal_miss(), 1);
GenerateMiss(masm);
@@ -770,6 +766,13 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ // Return address is on the stack.
+ StoreIC_PushArgs(masm);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
+}
#undef __
diff --git a/deps/v8/src/ic/ia32/stub-cache-ia32.cc b/deps/v8/src/ic/ia32/stub-cache-ia32.cc
index 939e7fc0fd..82700d34a7 100644
--- a/deps/v8/src/ic/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ic/ia32/stub-cache-ia32.cc
@@ -22,8 +22,6 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
ExternalReference key_offset(stub_cache->key_reference(table));
ExternalReference value_offset(stub_cache->value_reference(table));
ExternalReference map_offset(stub_cache->map_reference(table));
- ExternalReference virtual_register =
- ExternalReference::virtual_handler_register(masm->isolate());
Label miss;
Code::Kind ic_kind = stub_cache->ic_kind();
@@ -55,19 +53,15 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
}
#endif
- // The vector and slot were pushed onto the stack before starting the
- // probe, and need to be dropped before calling the handler.
if (is_vector_store) {
- // The overlap here is rather embarrassing. One does what one must.
- Register vector = StoreWithVectorDescriptor::VectorRegister();
+ // The value, vector and slot were passed to the IC on the stack and
+ // they are still there. So we can just jump to the handler.
DCHECK(extra.is(StoreWithVectorDescriptor::SlotRegister()));
__ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ pop(vector);
- __ mov(Operand::StaticVariable(virtual_register), extra);
- __ pop(extra); // Pop "slot".
- // Jump to the first instruction in the code stub.
- __ jmp(Operand::StaticVariable(virtual_register));
+ __ jmp(extra);
} else {
+ // The vector and slot were pushed onto the stack before starting the
+ // probe, and need to be dropped before calling the handler.
__ pop(LoadWithVectorDescriptor::VectorRegister());
__ pop(LoadDescriptor::SlotRegister());
__ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
@@ -110,19 +104,10 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
// Jump to the first instruction in the code stub.
if (is_vector_store) {
- // The vector and slot were pushed onto the stack before starting the
- // probe, and need to be dropped before calling the handler.
- Register vector = StoreWithVectorDescriptor::VectorRegister();
DCHECK(offset.is(StoreWithVectorDescriptor::SlotRegister()));
- __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ mov(Operand::StaticVariable(virtual_register), offset);
- __ pop(vector);
- __ pop(offset); // Pop "slot".
- __ jmp(Operand::StaticVariable(virtual_register));
- } else {
- __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(offset);
}
+ __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(offset);
// Pop at miss.
__ bind(&miss);
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index f77c40a396..4fc8ada8df 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -7,7 +7,6 @@
#include "src/ic/ic.h"
-#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/macro-assembler.h"
#include "src/prototype.h"
diff --git a/deps/v8/src/ic/ic-state.cc b/deps/v8/src/ic/ic-state.cc
index d157c926dd..ea1f16c824 100644
--- a/deps/v8/src/ic/ic-state.cc
+++ b/deps/v8/src/ic/ic-state.cc
@@ -189,15 +189,14 @@ void BinaryOpICState::GenerateAheadOfTime(
#undef GENERATE
}
-
-Type* BinaryOpICState::GetResultType() const {
+AstType* BinaryOpICState::GetResultType() const {
Kind result_kind = result_kind_;
if (HasSideEffects()) {
result_kind = NONE;
} else if (result_kind == GENERIC && op_ == Token::ADD) {
- return Type::NumberOrString();
+ return AstType::NumberOrString();
} else if (result_kind == NUMBER && op_ == Token::SHR) {
- return Type::Unsigned32();
+ return AstType::Unsigned32();
}
DCHECK_NE(GENERIC, result_kind);
return KindToType(result_kind);
@@ -318,20 +317,20 @@ const char* BinaryOpICState::KindToString(Kind kind) {
// static
-Type* BinaryOpICState::KindToType(Kind kind) {
+AstType* BinaryOpICState::KindToType(Kind kind) {
switch (kind) {
case NONE:
- return Type::None();
+ return AstType::None();
case SMI:
- return Type::SignedSmall();
+ return AstType::SignedSmall();
case INT32:
- return Type::Signed32();
+ return AstType::Signed32();
case NUMBER:
- return Type::Number();
+ return AstType::Number();
case STRING:
- return Type::String();
+ return AstType::String();
case GENERIC:
- return Type::Any();
+ return AstType::Any();
}
UNREACHABLE();
return NULL;
@@ -365,29 +364,28 @@ const char* CompareICState::GetStateName(State state) {
return NULL;
}
-
-Type* CompareICState::StateToType(Zone* zone, State state, Handle<Map> map) {
+AstType* CompareICState::StateToType(Zone* zone, State state, Handle<Map> map) {
switch (state) {
case UNINITIALIZED:
- return Type::None();
+ return AstType::None();
case BOOLEAN:
- return Type::Boolean();
+ return AstType::Boolean();
case SMI:
- return Type::SignedSmall();
+ return AstType::SignedSmall();
case NUMBER:
- return Type::Number();
+ return AstType::Number();
case STRING:
- return Type::String();
+ return AstType::String();
case INTERNALIZED_STRING:
- return Type::InternalizedString();
+ return AstType::InternalizedString();
case UNIQUE_NAME:
- return Type::UniqueName();
+ return AstType::UniqueName();
case RECEIVER:
- return Type::Receiver();
+ return AstType::Receiver();
case KNOWN_RECEIVER:
- return map.is_null() ? Type::Receiver() : Type::Class(map, zone);
+ return map.is_null() ? AstType::Receiver() : AstType::Class(map, zone);
case GENERIC:
- return Type::Any();
+ return AstType::Any();
}
UNREACHABLE();
return NULL;
diff --git a/deps/v8/src/ic/ic-state.h b/deps/v8/src/ic/ic-state.h
index 6888a7ab5c..38be57ac04 100644
--- a/deps/v8/src/ic/ic-state.h
+++ b/deps/v8/src/ic/ic-state.h
@@ -6,6 +6,7 @@
#define V8_IC_STATE_H_
#include "src/macro-assembler.h"
+#include "src/parsing/token.h"
namespace v8 {
namespace internal {
@@ -120,9 +121,9 @@ class BinaryOpICState final BASE_EMBEDDED {
Token::Value op() const { return op_; }
Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
- Type* GetLeftType() const { return KindToType(left_kind_); }
- Type* GetRightType() const { return KindToType(right_kind_); }
- Type* GetResultType() const;
+ AstType* GetLeftType() const { return KindToType(left_kind_); }
+ AstType* GetRightType() const { return KindToType(right_kind_); }
+ AstType* GetResultType() const;
void Update(Handle<Object> left, Handle<Object> right, Handle<Object> result);
@@ -140,7 +141,7 @@ class BinaryOpICState final BASE_EMBEDDED {
Kind UpdateKind(Handle<Object> object, Kind kind) const;
static const char* KindToString(Kind kind);
- static Type* KindToType(Kind kind);
+ static AstType* KindToType(Kind kind);
static bool KindMaybeSmi(Kind kind) {
return (kind >= SMI && kind <= NUMBER) || kind == GENERIC;
}
@@ -202,8 +203,8 @@ class CompareICState {
GENERIC
};
- static Type* StateToType(Zone* zone, State state,
- Handle<Map> map = Handle<Map>());
+ static AstType* StateToType(Zone* zone, State state,
+ Handle<Map> map = Handle<Map>());
static State NewInputState(State old_state, Handle<Object> value);
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index b72791aa9e..0e751bd358 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -183,6 +183,19 @@ IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus)
extra_ic_state_ = target->extra_ic_state();
}
+// The ICs that don't pass slot and vector through the stack have to
+// save/restore them in the dispatcher.
+bool IC::ShouldPushPopSlotAndVector(Code::Kind kind) {
+ if (kind == Code::LOAD_IC || kind == Code::LOAD_GLOBAL_IC ||
+ kind == Code::KEYED_LOAD_IC || kind == Code::CALL_IC) {
+ return true;
+ }
+ if (kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC) {
+ return !StoreWithVectorDescriptor::kPassLastArgsOnStack;
+ }
+ return false;
+}
+
InlineCacheState IC::StateFromCode(Code* code) {
Isolate* isolate = code->GetIsolate();
switch (code->kind()) {
@@ -231,13 +244,6 @@ Code* IC::GetCode() const {
return code;
}
-
-bool IC::AddressIsOptimizedCode() const {
- Code* host =
- isolate()->inner_pointer_to_code_cache()->GetCacheEntry(address())->code;
- return host->kind() == Code::OPTIMIZED_FUNCTION;
-}
-
static void LookupForRead(LookupIterator* it) {
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
@@ -270,7 +276,7 @@ static void LookupForRead(LookupIterator* it) {
}
}
-bool IC::ShouldRecomputeHandler(Handle<Object> receiver, Handle<String> name) {
+bool IC::ShouldRecomputeHandler(Handle<String> name) {
if (!RecomputeHandlerForName(name)) return false;
DCHECK(UseVector());
@@ -320,7 +326,7 @@ void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) {
// Remove the target from the code cache if it became invalid
// because of changes in the prototype chain to avoid hitting it
// again.
- if (ShouldRecomputeHandler(receiver, Handle<String>::cast(name))) {
+ if (ShouldRecomputeHandler(Handle<String>::cast(name))) {
MarkRecomputeHandler(name);
}
}
@@ -728,7 +734,6 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Object> code) {
number_of_valid_maps++;
if (number_of_valid_maps > 1 && is_keyed()) return false;
- Handle<Code> ic;
if (number_of_valid_maps == 1) {
ConfigureVectorState(name, receiver_map(), code);
} else {
@@ -1413,17 +1418,18 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
Object);
} else if (FLAG_use_ic && !object->IsAccessCheckNeeded() &&
!object->IsJSValue()) {
- if (object->IsJSObject() || (object->IsString() && key->IsNumber())) {
- Handle<HeapObject> receiver = Handle<HeapObject>::cast(object);
- if (object->IsString() || key->IsSmi()) UpdateLoadElement(receiver);
+ if ((object->IsJSObject() && key->IsSmi()) ||
+ (object->IsString() && key->IsNumber())) {
+ UpdateLoadElement(Handle<HeapObject>::cast(object));
+ TRACE_IC("LoadIC", key);
}
}
if (!is_vector_set()) {
ConfigureVectorState(MEGAMORPHIC, key);
TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
+ TRACE_IC("LoadIC", key);
}
- TRACE_IC("LoadIC", key);
if (!load_handle.is_null()) return load_handle;
@@ -2237,7 +2243,8 @@ void CallIC::HandleMiss(Handle<Object> function) {
RUNTIME_FUNCTION(Runtime_CallIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
+ // Runtime functions don't follow the IC's calling convention.
Handle<Object> function = args.at<Object>(0);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(1);
Handle<Smi> slot = args.at<Smi>(2);
@@ -2253,9 +2260,9 @@ RUNTIME_FUNCTION(Runtime_CallIC_Miss) {
RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
- Handle<Object> receiver = args.at<Object>(0);
-
DCHECK_EQ(4, args.length());
+ // Runtime functions don't follow the IC's calling convention.
+ Handle<Object> receiver = args.at<Object>(0);
Handle<Smi> slot = args.at<Smi>(2);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
@@ -2294,6 +2301,7 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
+ // Runtime functions don't follow the IC's calling convention.
Handle<JSGlobalObject> global = isolate->global_object();
Handle<Smi> slot = args.at<Smi>(0);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(1);
@@ -2364,10 +2372,10 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Slow) {
RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
+ DCHECK_EQ(4, args.length());
+ // Runtime functions don't follow the IC's calling convention.
Handle<Object> receiver = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
-
- DCHECK(args.length() == 4);
Handle<Smi> slot = args.at<Smi>(2);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
@@ -2381,8 +2389,8 @@ RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
RUNTIME_FUNCTION(Runtime_KeyedLoadIC_MissFromStubFailure) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
typedef LoadWithVectorDescriptor Descriptor;
+ DCHECK_EQ(Descriptor::kParameterCount, args.length());
Handle<Object> receiver = args.at<Object>(Descriptor::kReceiver);
Handle<Object> key = args.at<Object>(Descriptor::kName);
Handle<Smi> slot = args.at<Smi>(Descriptor::kSlot);
@@ -2400,13 +2408,13 @@ RUNTIME_FUNCTION(Runtime_KeyedLoadIC_MissFromStubFailure) {
RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
- Handle<Object> receiver = args.at<Object>(0);
- Handle<Name> key = args.at<Name>(1);
- Handle<Object> value = args.at<Object>(2);
-
- DCHECK(args.length() == 5 || args.length() == 6);
- Handle<Smi> slot = args.at<Smi>(3);
- Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
+ DCHECK_EQ(5, args.length());
+ // Runtime functions don't follow the IC's calling convention.
+ Handle<Object> value = args.at<Object>(0);
+ Handle<Smi> slot = args.at<Smi>(1);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(2);
+ Handle<Object> receiver = args.at<Object>(3);
+ Handle<Name> key = args.at<Name>(4);
FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) {
StoreICNexus nexus(vector, vector_slot);
@@ -2424,88 +2432,17 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
}
-RUNTIME_FUNCTION(Runtime_StoreIC_MissFromStubFailure) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
- HandleScope scope(isolate);
- DCHECK_EQ(5, args.length());
- typedef StoreWithVectorDescriptor Descriptor;
- Handle<Object> receiver = args.at<Object>(Descriptor::kReceiver);
- Handle<Name> key = args.at<Name>(Descriptor::kName);
- Handle<Object> value = args.at<Object>(Descriptor::kValue);
- Handle<Smi> slot = args.at<Smi>(Descriptor::kSlot);
- Handle<TypeFeedbackVector> vector =
- args.at<TypeFeedbackVector>(Descriptor::kVector);
-
- FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
- if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) {
- StoreICNexus nexus(vector, vector_slot);
- StoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
- } else {
- DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC,
- vector->GetKind(vector_slot));
- KeyedStoreICNexus nexus(vector, vector_slot);
- KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
- }
-}
-
-RUNTIME_FUNCTION(Runtime_TransitionStoreIC_MissFromStubFailure) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
- HandleScope scope(isolate);
- Handle<Object> receiver = args.at<Object>(0);
- Handle<Name> key = args.at<Name>(1);
- Handle<Object> value = args.at<Object>(2);
-
- int length = args.length();
- DCHECK(length == 5 || length == 6);
- // TODO(ishell): use VectorStoreTransitionDescriptor indices here and update
- // this comment:
- //
- // We might have slot and vector, for a normal miss (slot(3), vector(4)).
- // Or, map and vector for a transitioning store miss (map(3), vector(4)).
- // In this case, we need to recover the slot from a virtual register.
- // If length == 6, then a map is included (map(3), slot(4), vector(5)).
- Handle<Smi> slot;
- Handle<TypeFeedbackVector> vector;
- if (length == 5) {
- vector = args.at<TypeFeedbackVector>(4);
- slot = handle(
- *reinterpret_cast<Smi**>(isolate->virtual_slot_register_address()),
- isolate);
- } else {
- vector = args.at<TypeFeedbackVector>(5);
- slot = args.at<Smi>(4);
- }
-
- FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
- if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) {
- StoreICNexus nexus(vector, vector_slot);
- StoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
- } else {
- DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC,
- vector->GetKind(vector_slot));
- KeyedStoreICNexus nexus(vector, vector_slot);
- KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
- }
-}
-
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
- Handle<Object> receiver = args.at<Object>(0);
- Handle<Object> key = args.at<Object>(1);
- Handle<Object> value = args.at<Object>(2);
- Handle<Smi> slot = args.at<Smi>(3);
- Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
+ // Runtime functions don't follow the IC's calling convention.
+ Handle<Object> value = args.at<Object>(0);
+ Handle<Smi> slot = args.at<Smi>(1);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(2);
+ Handle<Object> receiver = args.at<Object>(3);
+ Handle<Object> key = args.at<Object>(4);
FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
KeyedStoreICNexus nexus(vector, vector_slot);
KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
@@ -2514,31 +2451,14 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
}
-RUNTIME_FUNCTION(Runtime_KeyedStoreIC_MissFromStubFailure) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
- HandleScope scope(isolate);
- DCHECK_EQ(5, args.length());
- typedef StoreWithVectorDescriptor Descriptor;
- Handle<Object> receiver = args.at<Object>(Descriptor::kReceiver);
- Handle<Object> key = args.at<Object>(Descriptor::kName);
- Handle<Object> value = args.at<Object>(Descriptor::kValue);
- Handle<Smi> slot = args.at<Smi>(Descriptor::kSlot);
- Handle<TypeFeedbackVector> vector =
- args.at<TypeFeedbackVector>(Descriptor::kVector);
- FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
- KeyedStoreICNexus nexus(vector, vector_slot);
- KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
-}
-
-
RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
- Handle<Object> object = args.at<Object>(0);
- Handle<Object> key = args.at<Object>(1);
- Handle<Object> value = args.at<Object>(2);
+ // Runtime functions don't follow the IC's calling convention.
+ Handle<Object> value = args.at<Object>(0);
+ // slot and vector parameters are not used.
+ Handle<Object> object = args.at<Object>(3);
+ Handle<Object> key = args.at<Object>(4);
LanguageMode language_mode;
KeyedStoreICNexus nexus(isolate);
KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
@@ -2552,16 +2472,14 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
- // Length == 5 or 6, depending on whether the vector slot
- // is passed in a virtual register or not.
- DCHECK(args.length() == 5 || args.length() == 6);
+ // Runtime functions don't follow the IC's calling convention.
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
Handle<Map> map = args.at<Map>(3);
LanguageMode language_mode;
KeyedStoreICNexus nexus(isolate);
- KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+ KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
language_mode = ic.language_mode();
if (object->IsJSObject()) {
JSObject::TransitionElementsKind(Handle<JSObject>::cast(object),
@@ -3000,35 +2918,5 @@ RUNTIME_FUNCTION(Runtime_LoadElementWithInterceptor) {
return *result;
}
-
-
-RUNTIME_FUNCTION(Runtime_LoadIC_MissFromStubFailure) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
- HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
- typedef LoadWithVectorDescriptor Descriptor;
- Handle<Object> receiver = args.at<Object>(Descriptor::kReceiver);
- Handle<Name> key = args.at<Name>(Descriptor::kName);
- Handle<Smi> slot = args.at<Smi>(Descriptor::kSlot);
- Handle<TypeFeedbackVector> vector =
- args.at<TypeFeedbackVector>(Descriptor::kVector);
- FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
- // A monomorphic or polymorphic KeyedLoadIC with a string key can call the
- // LoadIC miss handler if the handler misses. Since the vector Nexus is
- // set up outside the IC, handle that here.
- if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::LOAD_IC) {
- LoadICNexus nexus(vector, vector_slot);
- LoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
- } else {
- DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC,
- vector->GetKind(vector_slot));
- KeyedLoadICNexus nexus(vector, vector_slot);
- KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
- }
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index 35f3844464..bf395f1f2a 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -75,6 +75,10 @@ class IC {
kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC;
}
+ // The ICs that don't pass slot and vector through the stack have to
+ // save/restore them in the dispatcher.
+ static bool ShouldPushPopSlotAndVector(Code::Kind kind);
+
static InlineCacheState StateFromCode(Code* code);
protected:
@@ -87,7 +91,6 @@ class IC {
// Get the code object of the caller.
Code* GetCode() const;
- bool AddressIsOptimizedCode() const;
inline bool AddressIsDeoptimizedCode() const;
inline static bool AddressIsDeoptimizedCode(Isolate* isolate,
Address address);
@@ -168,7 +171,7 @@ class IC {
kind_ == Code::KEYED_STORE_IC);
return kind_;
}
- bool ShouldRecomputeHandler(Handle<Object> receiver, Handle<String> name);
+ bool ShouldRecomputeHandler(Handle<String> name);
ExtraICState extra_ic_state() const { return extra_ic_state_; }
diff --git a/deps/v8/src/ic/mips/handler-compiler-mips.cc b/deps/v8/src/ic/mips/handler-compiler-mips.cc
index f4e0f0baba..df7a0df175 100644
--- a/deps/v8/src/ic/mips/handler-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/handler-compiler-mips.cc
@@ -107,13 +107,19 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
Register slot) {
MacroAssembler* masm = this->masm();
- __ Push(vector, slot);
+ STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
+ LoadWithVectorDescriptor::kVector);
+ STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
+ StoreWithVectorDescriptor::kVector);
+ STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
+ StoreTransitionDescriptor::kVector);
+ __ Push(slot, vector);
}
void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
MacroAssembler* masm = this->masm();
- __ Pop(vector, slot);
+ __ Pop(slot, vector);
}
@@ -123,6 +129,13 @@ void PropertyHandlerCompiler::DiscardVectorAndSlot() {
__ Addu(sp, sp, Operand(2 * kPointerSize));
}
+void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
+ // No-op. Return address is in ra register.
+}
+
+void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
+ // No-op. Return address is in ra register.
+}
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
@@ -317,24 +330,6 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ TailCallStub(&stub);
}
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister(),
- StoreWithVectorDescriptor::SlotRegister(),
- StoreWithVectorDescriptor::VectorRegister());
-}
-
-
-void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-
#undef __
#define __ ACCESS_MASM(masm())
@@ -353,12 +348,6 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
-void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
- Register current_map, Register destination_map) {
- DCHECK(false); // Not implemented.
-}
-
-
void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
Register map_reg,
Register scratch,
@@ -615,6 +604,9 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
__ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
+void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
+ STATIC_ASSERT(!StoreWithVectorDescriptor::kPassLastArgsOnStack);
+}
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
@@ -675,7 +667,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
DiscardVectorAndSlot();
}
__ Ret(USE_DELAY_SLOT);
- __ mov(v0, result);
+ __ Move(v0, result); // Ensure the stub returns correct value.
FrontendFooter(name, &miss);
diff --git a/deps/v8/src/ic/mips/ic-mips.cc b/deps/v8/src/ic/mips/ic-mips.cc
index 3a28b13bd8..ce9e3d9403 100644
--- a/deps/v8/src/ic/mips/ic-mips.cc
+++ b/deps/v8/src/ic/mips/ic-mips.cc
@@ -494,7 +494,8 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
__ sw(value, MemOperand(address));
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ Move(v0, value); // Ensure the stub returns correct value.
__ bind(&non_smi_value);
// Escape to elements kind transition case.
@@ -514,7 +515,8 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ mov(scratch, value); // Preserve the value which is returned.
__ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ Move(v0, value); // Ensure the stub returns correct value.
__ bind(fast_double);
if (check_map == kCheckMap) {
@@ -543,7 +545,8 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ Addu(scratch, key, Operand(Smi::FromInt(1)));
__ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ Move(v0, value); // Ensure the stub returns correct value.
__ bind(&transition_smi_elements);
// Transition the array appropriately depending on the value type.
@@ -710,10 +713,11 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
static void StoreIC_PushArgs(MacroAssembler* masm) {
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister(),
+ __ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),
- StoreWithVectorDescriptor::VectorRegister());
+ StoreWithVectorDescriptor::VectorRegister(),
+ StoreWithVectorDescriptor::ReceiverRegister(),
+ StoreWithVectorDescriptor::NameRegister());
}
@@ -723,6 +727,14 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ StoreIC_PushArgs(masm);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
+}
+
void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
@@ -748,7 +760,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
GenerateDictionaryStore(masm, &miss, dictionary, name, value, t2, t5);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->ic_store_normal_hit(), 1, t2, t5);
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ Move(v0, value); // Ensure the stub returns correct value.
__ bind(&miss);
__ IncrementCounter(counters->ic_store_normal_miss(), 1, t2, t5);
diff --git a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
index 53b097f8ce..2190f6d63e 100644
--- a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
@@ -107,13 +107,19 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
Register slot) {
MacroAssembler* masm = this->masm();
- __ Push(vector, slot);
+ STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
+ LoadWithVectorDescriptor::kVector);
+ STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
+ StoreWithVectorDescriptor::kVector);
+ STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
+ StoreTransitionDescriptor::kVector);
+ __ Push(slot, vector);
}
void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
MacroAssembler* masm = this->masm();
- __ Pop(vector, slot);
+ __ Pop(slot, vector);
}
@@ -123,6 +129,13 @@ void PropertyHandlerCompiler::DiscardVectorAndSlot() {
__ Daddu(sp, sp, Operand(2 * kPointerSize));
}
+void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
+ // No-op. Return address is in ra register.
+}
+
+void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
+ // No-op. Return address is in ra register.
+}
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
@@ -317,24 +330,6 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ TailCallStub(&stub);
}
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister(),
- StoreWithVectorDescriptor::SlotRegister(),
- StoreWithVectorDescriptor::VectorRegister());
-}
-
-
-void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-
#undef __
#define __ ACCESS_MASM(masm())
@@ -353,12 +348,6 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
-void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
- Register current_map, Register destination_map) {
- DCHECK(false); // Not implemented.
-}
-
-
void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
Register map_reg,
Register scratch,
@@ -615,6 +604,9 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
__ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
+void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
+ STATIC_ASSERT(!StoreWithVectorDescriptor::kPassLastArgsOnStack);
+}
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
@@ -675,7 +667,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
DiscardVectorAndSlot();
}
__ Ret(USE_DELAY_SLOT);
- __ mov(v0, result);
+ __ Move(v0, result); // Ensure the stub returns correct value.
FrontendFooter(name, &miss);
diff --git a/deps/v8/src/ic/mips64/ic-mips64.cc b/deps/v8/src/ic/mips64/ic-mips64.cc
index b551bc70f6..c2f3cb6024 100644
--- a/deps/v8/src/ic/mips64/ic-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-mips64.cc
@@ -496,7 +496,8 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ SmiScale(scratch, key, kPointerSizeLog2);
__ Daddu(address, address, scratch);
__ sd(value, MemOperand(address));
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ Move(v0, value); // Ensure the stub returns correct value.
__ bind(&non_smi_value);
// Escape to elements kind transition case.
@@ -518,7 +519,8 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ mov(scratch, value); // Preserve the value which is returned.
__ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ Move(v0, value); // Ensure the stub returns correct value.
__ bind(fast_double);
if (check_map == kCheckMap) {
@@ -549,7 +551,8 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ Daddu(scratch, key, Operand(Smi::FromInt(1)));
__ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ Move(v0, value); // Ensure the stub returns correct value.
__ bind(&transition_smi_elements);
// Transition the array appropriately depending on the value type.
@@ -714,10 +717,11 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
static void StoreIC_PushArgs(MacroAssembler* masm) {
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister(),
+ __ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),
- StoreWithVectorDescriptor::VectorRegister());
+ StoreWithVectorDescriptor::VectorRegister(),
+ StoreWithVectorDescriptor::ReceiverRegister(),
+ StoreWithVectorDescriptor::NameRegister());
}
@@ -727,6 +731,14 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ StoreIC_PushArgs(masm);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
+}
+
void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
@@ -750,7 +762,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
GenerateDictionaryStore(masm, &miss, dictionary, name, value, a6, a7);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->ic_store_normal_hit(), 1, a6, a7);
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ Move(v0, value); // Ensure the stub returns correct value.
__ bind(&miss);
__ IncrementCounter(counters->ic_store_normal_miss(), 1, a6, a7);
diff --git a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
index 22c0608c97..aafdc77c9b 100644
--- a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
@@ -108,13 +108,19 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
Register slot) {
MacroAssembler* masm = this->masm();
- __ Push(vector, slot);
+ STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
+ LoadWithVectorDescriptor::kVector);
+ STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
+ StoreWithVectorDescriptor::kVector);
+ STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
+ StoreTransitionDescriptor::kVector);
+ __ Push(slot, vector);
}
void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
MacroAssembler* masm = this->masm();
- __ Pop(vector, slot);
+ __ Pop(slot, vector);
}
@@ -124,6 +130,13 @@ void PropertyHandlerCompiler::DiscardVectorAndSlot() {
__ addi(sp, sp, Operand(2 * kPointerSize));
}
+void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
+ // No-op. Return address is in lr register.
+}
+
+void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
+ // No-op. Return address is in lr register.
+}
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
@@ -325,24 +338,6 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ TailCallStub(&stub);
}
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister(),
- StoreWithVectorDescriptor::SlotRegister(),
- StoreWithVectorDescriptor::VectorRegister());
-}
-
-
-void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-
#undef __
#define __ ACCESS_MASM(masm())
@@ -361,12 +356,6 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
-void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
- Register current_map, Register destination_map) {
- DCHECK(false); // Not implemented.
-}
-
-
void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
Register map_reg,
Register scratch,
@@ -624,6 +613,9 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
__ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
+void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
+ STATIC_ASSERT(!StoreWithVectorDescriptor::kPassLastArgsOnStack);
+}
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
diff --git a/deps/v8/src/ic/ppc/ic-ppc.cc b/deps/v8/src/ic/ppc/ic-ppc.cc
index fd2962d0fa..6dd788146b 100644
--- a/deps/v8/src/ic/ppc/ic-ppc.cc
+++ b/deps/v8/src/ic/ppc/ic-ppc.cc
@@ -451,10 +451,11 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
static void StoreIC_PushArgs(MacroAssembler* masm) {
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister(),
+ __ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),
- StoreWithVectorDescriptor::VectorRegister());
+ StoreWithVectorDescriptor::VectorRegister(),
+ StoreWithVectorDescriptor::ReceiverRegister(),
+ StoreWithVectorDescriptor::NameRegister());
}
@@ -464,6 +465,13 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ StoreIC_PushArgs(masm);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
+}
static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
diff --git a/deps/v8/src/ic/s390/handler-compiler-s390.cc b/deps/v8/src/ic/s390/handler-compiler-s390.cc
index b399c5a601..504bacebaf 100644
--- a/deps/v8/src/ic/s390/handler-compiler-s390.cc
+++ b/deps/v8/src/ic/s390/handler-compiler-s390.cc
@@ -105,12 +105,18 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
Register slot) {
MacroAssembler* masm = this->masm();
- __ Push(vector, slot);
+ STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
+ LoadWithVectorDescriptor::kVector);
+ STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
+ StoreWithVectorDescriptor::kVector);
+ STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
+ StoreTransitionDescriptor::kVector);
+ __ Push(slot, vector);
}
void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
MacroAssembler* masm = this->masm();
- __ Pop(vector, slot);
+ __ Pop(slot, vector);
}
void PropertyHandlerCompiler::DiscardVectorAndSlot() {
@@ -119,6 +125,14 @@ void PropertyHandlerCompiler::DiscardVectorAndSlot() {
__ la(sp, MemOperand(sp, 2 * kPointerSize));
}
+void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
+ // No-op. Return address is in lr register.
+}
+
+void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
+ // No-op. Return address is in lr register.
+}
+
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
@@ -310,21 +324,6 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ TailCallStub(&stub);
}
-static void StoreIC_PushArgs(MacroAssembler* masm) {
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister(),
- StoreWithVectorDescriptor::SlotRegister(),
- StoreWithVectorDescriptor::VectorRegister());
-}
-
-void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
#undef __
#define __ ACCESS_MASM(masm())
@@ -340,11 +339,6 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
__ mov(this->name(), Operand(name));
}
-void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
- Register current_map, Register destination_map) {
- DCHECK(false); // Not implemented.
-}
-
void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
Register map_reg,
Register scratch,
@@ -593,6 +587,10 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
__ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
+void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
+ STATIC_ASSERT(!StoreWithVectorDescriptor::kPassLastArgsOnStack);
+}
+
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
LanguageMode language_mode) {
diff --git a/deps/v8/src/ic/s390/ic-s390.cc b/deps/v8/src/ic/s390/ic-s390.cc
index 6bb484a2fd..08eb3e4ff1 100644
--- a/deps/v8/src/ic/s390/ic-s390.cc
+++ b/deps/v8/src/ic/s390/ic-s390.cc
@@ -437,10 +437,11 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
}
static void StoreIC_PushArgs(MacroAssembler* masm) {
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister(),
+ __ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),
- StoreWithVectorDescriptor::VectorRegister());
+ StoreWithVectorDescriptor::VectorRegister(),
+ StoreWithVectorDescriptor::ReceiverRegister(),
+ StoreWithVectorDescriptor::NameRegister());
}
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
@@ -449,6 +450,14 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ StoreIC_PushArgs(masm);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
+}
+
static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
diff --git a/deps/v8/src/ic/stub-cache.cc b/deps/v8/src/ic/stub-cache.cc
index 31d7e2e0a8..fe1adaaadb 100644
--- a/deps/v8/src/ic/stub-cache.cc
+++ b/deps/v8/src/ic/stub-cache.cc
@@ -4,6 +4,7 @@
#include "src/ic/stub-cache.h"
+#include "src/ast/ast.h"
#include "src/base/bits.h"
#include "src/type-info.h"
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index a053555d9f..ebcff448ad 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -10,6 +10,7 @@
namespace v8 {
namespace internal {
+class SmallMapList;
// The stub cache is used for megamorphic property accesses.
// It maps (map, name, type) to property access handlers. The cache does not
diff --git a/deps/v8/src/ic/x64/handler-compiler-x64.cc b/deps/v8/src/ic/x64/handler-compiler-x64.cc
index ba4daed32c..f386fc5b65 100644
--- a/deps/v8/src/ic/x64/handler-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/handler-compiler-x64.cc
@@ -20,15 +20,21 @@ namespace internal {
void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
Register slot) {
MacroAssembler* masm = this->masm();
- __ Push(vector);
+ STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
+ LoadWithVectorDescriptor::kVector);
+ STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
+ StoreWithVectorDescriptor::kVector);
+ STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
+ StoreTransitionDescriptor::kVector);
__ Push(slot);
+ __ Push(vector);
}
void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
MacroAssembler* masm = this->masm();
- __ Pop(slot);
__ Pop(vector);
+ __ Pop(slot);
}
@@ -38,6 +44,15 @@ void PropertyHandlerCompiler::DiscardVectorAndSlot() {
__ addp(rsp, Immediate(2 * kPointerSize));
}
+void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
+ MacroAssembler* masm = this->masm();
+ __ Push(tmp);
+}
+
+void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
+ MacroAssembler* masm = this->masm();
+ __ Pop(tmp);
+}
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
@@ -321,34 +336,6 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
__ ret(0);
}
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
-
- Register slot = StoreWithVectorDescriptor::SlotRegister();
- Register vector = StoreWithVectorDescriptor::VectorRegister();
-
- __ PopReturnAddressTo(r11);
- __ Push(receiver);
- __ Push(name);
- __ Push(value);
- __ Push(slot);
- __ Push(vector);
- __ PushReturnAddressFrom(r11);
-}
-
-
-void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-
#undef __
#define __ ACCESS_MASM((masm()))
@@ -367,12 +354,6 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
-void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
- Register current_map, Register destination_map) {
- DCHECK(false); // Not implemented.
-}
-
-
void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
Register map_reg,
Register scratch,
@@ -638,6 +619,9 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
__ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
+void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
+ STATIC_ASSERT(!StoreWithVectorDescriptor::kPassLastArgsOnStack);
+}
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
diff --git a/deps/v8/src/ic/x64/ic-x64.cc b/deps/v8/src/ic/x64/ic-x64.cc
index 21a114830f..d0445a229a 100644
--- a/deps/v8/src/ic/x64/ic-x64.cc
+++ b/deps/v8/src/ic/x64/ic-x64.cc
@@ -706,21 +706,20 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
static void StoreIC_PushArgs(MacroAssembler* masm) {
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
+ Register receiver = StoreWithVectorDescriptor::ReceiverRegister();
+ Register name = StoreWithVectorDescriptor::NameRegister();
+ Register value = StoreWithVectorDescriptor::ValueRegister();
+ Register slot = StoreWithVectorDescriptor::SlotRegister();
+ Register vector = StoreWithVectorDescriptor::VectorRegister();
Register temp = r11;
- DCHECK(!temp.is(receiver) && !temp.is(name) && !temp.is(value));
+ DCHECK(!AreAliased(receiver, name, value, slot, vector, temp));
__ PopReturnAddressTo(temp);
- __ Push(receiver);
- __ Push(name);
__ Push(value);
- Register slot = StoreWithVectorDescriptor::SlotRegister();
- Register vector = StoreWithVectorDescriptor::VectorRegister();
- DCHECK(!temp.is(slot) && !temp.is(vector));
__ Push(slot);
__ Push(vector);
+ __ Push(receiver);
+ __ Push(name);
__ PushReturnAddressFrom(temp);
}
@@ -764,6 +763,13 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ // Return address is on the stack.
+ StoreIC_PushArgs(masm);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
+}
#undef __
diff --git a/deps/v8/src/ic/x87/handler-compiler-x87.cc b/deps/v8/src/ic/x87/handler-compiler-x87.cc
index 4bf0af2569..5eca3dc0cb 100644
--- a/deps/v8/src/ic/x87/handler-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/handler-compiler-x87.cc
@@ -59,15 +59,21 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
Register slot) {
MacroAssembler* masm = this->masm();
- __ push(vector);
+ STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
+ LoadWithVectorDescriptor::kVector);
+ STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
+ StoreWithVectorDescriptor::kVector);
+ STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
+ StoreTransitionDescriptor::kVector);
__ push(slot);
+ __ push(vector);
}
void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
MacroAssembler* masm = this->masm();
- __ pop(slot);
__ pop(vector);
+ __ pop(slot);
}
@@ -77,6 +83,15 @@ void PropertyHandlerCompiler::DiscardVectorAndSlot() {
__ add(esp, Immediate(2 * kPointerSize));
}
+void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
+ MacroAssembler* masm = this->masm();
+ __ push(tmp);
+}
+
+void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
+ MacroAssembler* masm = this->masm();
+ __ pop(tmp);
+}
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
@@ -150,12 +165,16 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
DCHECK(!accessor_holder.is(scratch));
// Copy return value.
__ pop(scratch);
- // receiver
+
+ if (is_store) {
+ // Discard stack arguments.
+ __ add(esp, Immediate(StoreWithVectorDescriptor::kStackArgumentsCount *
+ kPointerSize));
+ }
+ // Write the receiver and arguments to stack frame.
__ push(receiver);
- // Write the arguments to stack frame.
if (is_store) {
- DCHECK(!receiver.is(store_parameter));
- DCHECK(!scratch.is(store_parameter));
+ DCHECK(!AreAliased(receiver, scratch, store_parameter));
__ push(store_parameter);
}
__ push(scratch);
@@ -252,8 +271,13 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
int accessor_index, int expected_arguments, Register scratch) {
// ----------- S t a t e -------------
- // -- esp[0] : return address
+ // -- esp[12] : value
+ // -- esp[8] : slot
+ // -- esp[4] : vector
+ // -- esp[0] : return address
// -----------------------------------
+ __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
+
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -290,7 +314,14 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
// Restore context register.
__ pop(esi);
}
- __ ret(0);
+ if (accessor_index >= 0) {
+ __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
+ } else {
+ // If we generate a global code snippet for deoptimization only, don't try
+ // to drop stack arguments for the StoreIC because they are not a part of
+ // expression stack and deoptimizer does not reconstruct them.
+ __ ret(0);
+ }
}
@@ -316,32 +347,6 @@ static void CompileCallLoadPropertyWithInterceptor(
__ CallRuntime(id);
}
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Register slot = StoreWithVectorDescriptor::SlotRegister();
- Register vector = StoreWithVectorDescriptor::VectorRegister();
-
- __ xchg(receiver, Operand(esp, 0));
- __ push(name);
- __ push(value);
- __ push(slot);
- __ push(vector);
- __ push(receiver); // which contains the return address.
-}
-
-
-void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-
#undef __
#define __ ACCESS_MASM(masm())
@@ -360,19 +365,6 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
-void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
- Register current_map, Register destination_map) {
- DCHECK(destination_map.is(StoreTransitionHelper::MapRegister()));
- DCHECK(current_map.is(StoreTransitionHelper::VectorRegister()));
- ExternalReference virtual_slot =
- ExternalReference::virtual_slot_register(isolate());
- __ mov(destination_map, current_map);
- __ pop(current_map);
- __ mov(Operand::StaticVariable(virtual_slot), current_map);
- __ pop(current_map); // put vector in place.
-}
-
-
void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
Register map_reg,
Register scratch,
@@ -532,7 +524,7 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ jmp(&success);
__ bind(miss);
- if (IC::ICUseVector(kind())) {
+ if (IC::ShouldPushPopSlotAndVector(kind())) {
DCHECK(kind() == Code::LOAD_IC);
PopVectorAndSlot();
}
@@ -547,7 +539,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ jmp(&success);
GenerateRestoreName(miss, name);
- if (IC::ICUseVector(kind())) PopVectorAndSlot();
+ DCHECK(!IC::ShouldPushPopSlotAndVector(kind()));
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
@@ -641,13 +633,26 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
__ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
+void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
+ // Zap register aliases of the arguments passed on the stack to ensure they
+ // are properly loaded by the handler (debug-only).
+ STATIC_ASSERT(Descriptor::kPassLastArgsOnStack);
+ STATIC_ASSERT(Descriptor::kStackArgumentsCount == 3);
+ __ mov(Descriptor::ValueRegister(), Immediate(kDebugZapValue));
+ __ mov(Descriptor::SlotRegister(), Immediate(kDebugZapValue));
+ __ mov(Descriptor::VectorRegister(), Immediate(kDebugZapValue));
+}
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
LanguageMode language_mode) {
Register holder_reg = Frontend(name);
+ __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
__ pop(scratch1()); // remove the return address
+ // Discard stack arguments.
+ __ add(esp, Immediate(StoreWithVectorDescriptor::kStackArgumentsCount *
+ kPointerSize));
__ push(receiver());
__ push(holder_reg);
// If the callback cannot leak, then push the callback directly,
@@ -679,7 +684,7 @@ Register NamedStoreHandlerCompiler::value() {
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
- if (IC::ICUseVector(kind())) {
+ if (IC::ShouldPushPopSlotAndVector(kind())) {
PushVectorAndSlot();
}
FrontendHeader(receiver(), name, &miss, DONT_RETURN_ANYTHING);
@@ -701,7 +706,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->ic_named_load_global_stub(), 1);
// The code above already loads the result into the return register.
- if (IC::ICUseVector(kind())) {
+ if (IC::ShouldPushPopSlotAndVector(kind())) {
DiscardVectorAndSlot();
}
__ ret(0);
diff --git a/deps/v8/src/ic/x87/ic-compiler-x87.cc b/deps/v8/src/ic/x87/ic-compiler-x87.cc
index 9edf63b722..11a8cdcd34 100644
--- a/deps/v8/src/ic/x87/ic-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/ic-compiler-x87.cc
@@ -15,14 +15,21 @@ namespace internal {
void PropertyICCompiler::GenerateRuntimeSetProperty(
MacroAssembler* masm, LanguageMode language_mode) {
- // Return address is on the stack.
- DCHECK(!ebx.is(StoreDescriptor::ReceiverRegister()) &&
- !ebx.is(StoreDescriptor::NameRegister()) &&
- !ebx.is(StoreDescriptor::ValueRegister()));
+ typedef StoreWithVectorDescriptor Descriptor;
+ STATIC_ASSERT(Descriptor::kStackArgumentsCount == 3);
+ // ----------- S t a t e -------------
+ // -- esp[12] : value
+ // -- esp[8] : slot
+ // -- esp[4] : vector
+ // -- esp[0] : return address
+ // -----------------------------------
+ __ LoadParameterFromStack<Descriptor>(Descriptor::ValueRegister(),
+ Descriptor::kValue);
+
+ __ mov(Operand(esp, 12), Descriptor::ReceiverRegister());
+ __ mov(Operand(esp, 8), Descriptor::NameRegister());
+ __ mov(Operand(esp, 4), Descriptor::ValueRegister());
__ pop(ebx);
- __ push(StoreDescriptor::ReceiverRegister());
- __ push(StoreDescriptor::NameRegister());
- __ push(StoreDescriptor::ValueRegister());
__ push(Immediate(Smi::FromInt(language_mode)));
__ push(ebx); // return address
diff --git a/deps/v8/src/ic/x87/ic-x87.cc b/deps/v8/src/ic/x87/ic-x87.cc
index 76933f01bb..baf435e0f2 100644
--- a/deps/v8/src/ic/x87/ic-x87.cc
+++ b/deps/v8/src/ic/x87/ic-x87.cc
@@ -409,7 +409,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
}
// It's irrelevant whether array is smi-only or not when writing a smi.
__ mov(FixedArrayElementOperand(ebx, key), value);
- __ ret(0);
+ __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
__ bind(&non_smi_value);
// Escape to elements kind transition case.
@@ -428,7 +428,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ mov(edx, value); // Preserve the value which is returned.
__ RecordWriteArray(ebx, edx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- __ ret(0);
+ __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
__ bind(fast_double);
if (check_map == kCheckMap) {
@@ -457,7 +457,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ add(FieldOperand(receiver, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1)));
}
- __ ret(0);
+ __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
__ bind(&transition_smi_elements);
__ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
@@ -504,12 +504,13 @@ static void KeyedStoreGenerateMegamorphicHelper(
void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
LanguageMode language_mode) {
+ typedef StoreWithVectorDescriptor Descriptor;
// Return address is on the stack.
Label slow, fast_object, fast_object_grow;
Label fast_double, fast_double_grow;
Label array, extra, check_if_double_array, maybe_name_key, miss;
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register key = StoreDescriptor::NameRegister();
+ Register receiver = Descriptor::ReceiverRegister();
+ Register key = Descriptor::NameRegister();
DCHECK(receiver.is(edx));
DCHECK(key.is(ecx));
@@ -522,6 +523,10 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ test_b(FieldOperand(edi, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow);
+
+ __ LoadParameterFromStack<Descriptor>(Descriptor::ValueRegister(),
+ Descriptor::kValue);
+
// Check that the key is a smi.
__ JumpIfNotSmi(key, &maybe_name_key);
__ CmpInstanceType(edi, JS_ARRAY_TYPE);
@@ -551,22 +556,9 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(ebx, &slow);
-
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
- __ push(Immediate(Smi::FromInt(slot)));
- __ push(Immediate(dummy_vector));
-
masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, edi,
no_reg);
- __ pop(StoreWithVectorDescriptor::VectorRegister());
- __ pop(StoreWithVectorDescriptor::SlotRegister());
-
// Cache miss.
__ jmp(&miss);
@@ -705,18 +697,21 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
static void StoreIC_PushArgs(MacroAssembler* masm) {
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Register slot = StoreWithVectorDescriptor::SlotRegister();
- Register vector = StoreWithVectorDescriptor::VectorRegister();
-
- __ xchg(receiver, Operand(esp, 0));
+ Register receiver = StoreWithVectorDescriptor::ReceiverRegister();
+ Register name = StoreWithVectorDescriptor::NameRegister();
+
+ STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
+ // Current stack layout:
+ // - esp[12] -- value
+ // - esp[8] -- slot
+ // - esp[4] -- vector
+ // - esp[0] -- return address
+
+ Register return_address = StoreWithVectorDescriptor::SlotRegister();
+ __ pop(return_address);
+ __ push(receiver);
__ push(name);
- __ push(value);
- __ push(slot);
- __ push(vector);
- __ push(receiver); // Contains the return address.
+ __ push(return_address);
}
@@ -730,32 +725,33 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
void StoreIC::GenerateNormal(MacroAssembler* masm) {
+ typedef StoreWithVectorDescriptor Descriptor;
Label restore_miss;
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Register vector = StoreWithVectorDescriptor::VectorRegister();
- Register slot = StoreWithVectorDescriptor::SlotRegister();
-
- // A lot of registers are needed for storing to slow case
- // objects. Push and restore receiver but rely on
- // GenerateDictionaryStore preserving the value and name.
+ Register receiver = Descriptor::ReceiverRegister();
+ Register name = Descriptor::NameRegister();
+ Register value = Descriptor::ValueRegister();
+ // Since the slot and vector values are passed on the stack we can use
+ // respective registers as scratch registers.
+ Register scratch1 = Descriptor::VectorRegister();
+ Register scratch2 = Descriptor::SlotRegister();
+
+ __ LoadParameterFromStack<Descriptor>(value, Descriptor::kValue);
+
+ // A lot of registers are needed for storing to slow case objects.
+ // Push and restore receiver but rely on GenerateDictionaryStore preserving
+ // the value and name.
__ push(receiver);
- __ push(vector);
- __ push(slot);
- Register dictionary = ebx;
+ Register dictionary = receiver;
__ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value,
- receiver, edi);
- __ Drop(3);
+ scratch1, scratch2);
+ __ Drop(1);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->ic_store_normal_hit(), 1);
- __ ret(0);
+ __ ret(Descriptor::kStackArgumentsCount * kPointerSize);
__ bind(&restore_miss);
- __ pop(slot);
- __ pop(vector);
__ pop(receiver);
__ IncrementCounter(counters->ic_store_normal_miss(), 1);
GenerateMiss(masm);
@@ -770,6 +766,13 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ // Return address is on the stack.
+ StoreIC_PushArgs(masm);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
+}
#undef __
diff --git a/deps/v8/src/ic/x87/stub-cache-x87.cc b/deps/v8/src/ic/x87/stub-cache-x87.cc
index e0656f7cff..68fa615420 100644
--- a/deps/v8/src/ic/x87/stub-cache-x87.cc
+++ b/deps/v8/src/ic/x87/stub-cache-x87.cc
@@ -22,8 +22,6 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
ExternalReference key_offset(stub_cache->key_reference(table));
ExternalReference value_offset(stub_cache->value_reference(table));
ExternalReference map_offset(stub_cache->map_reference(table));
- ExternalReference virtual_register =
- ExternalReference::virtual_handler_register(masm->isolate());
Label miss;
Code::Kind ic_kind = stub_cache->ic_kind();
@@ -55,19 +53,15 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
}
#endif
- // The vector and slot were pushed onto the stack before starting the
- // probe, and need to be dropped before calling the handler.
if (is_vector_store) {
- // The overlap here is rather embarrassing. One does what one must.
- Register vector = StoreWithVectorDescriptor::VectorRegister();
+ // The value, vector and slot were passed to the IC on the stack and
+ // they are still there. So we can just jump to the handler.
DCHECK(extra.is(StoreWithVectorDescriptor::SlotRegister()));
__ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ pop(vector);
- __ mov(Operand::StaticVariable(virtual_register), extra);
- __ pop(extra); // Pop "slot".
- // Jump to the first instruction in the code stub.
- __ jmp(Operand::StaticVariable(virtual_register));
+ __ jmp(extra);
} else {
+ // The vector and slot were pushed onto the stack before starting the
+ // probe, and need to be dropped before calling the handler.
__ pop(LoadWithVectorDescriptor::VectorRegister());
__ pop(LoadDescriptor::SlotRegister());
__ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
@@ -110,19 +104,10 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
// Jump to the first instruction in the code stub.
if (is_vector_store) {
- // The vector and slot were pushed onto the stack before starting the
- // probe, and need to be dropped before calling the handler.
- Register vector = StoreWithVectorDescriptor::VectorRegister();
DCHECK(offset.is(StoreWithVectorDescriptor::SlotRegister()));
- __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ mov(Operand::StaticVariable(virtual_register), offset);
- __ pop(vector);
- __ pop(offset); // Pop "slot".
- __ jmp(Operand::StaticVariable(virtual_register));
- } else {
- __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(offset);
}
+ __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(offset);
// Pop at miss.
__ bind(&miss);
diff --git a/deps/v8/src/identity-map.cc b/deps/v8/src/identity-map.cc
index 97b70ae2fd..58dbf6b1cc 100644
--- a/deps/v8/src/identity-map.cc
+++ b/deps/v8/src/identity-map.cc
@@ -6,7 +6,7 @@
#include "src/base/functional.h"
#include "src/heap/heap-inl.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/inspector/BUILD.gn b/deps/v8/src/inspector/BUILD.gn
index 56b96e1cd6..15c090ff22 100644
--- a/deps/v8/src/inspector/BUILD.gn
+++ b/deps/v8/src/inspector/BUILD.gn
@@ -2,57 +2,98 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-protocol_path = "//third_party/WebKit/Source/platform/inspector_protocol"
-protocol_sources = [
- "$target_gen_dir/Console.cpp",
- "$target_gen_dir/Console.h",
- "$target_gen_dir/Debugger.cpp",
- "$target_gen_dir/Debugger.h",
- "$target_gen_dir/HeapProfiler.cpp",
- "$target_gen_dir/HeapProfiler.h",
- "$target_gen_dir/Profiler.cpp",
- "$target_gen_dir/Profiler.h",
- "$target_gen_dir/public/Debugger.h",
- "$target_gen_dir/public/Runtime.h",
- "$target_gen_dir/Runtime.cpp",
- "$target_gen_dir/Runtime.h",
+import("../../gni/v8.gni")
+
+_inspector_protocol = "//third_party/WebKit/Source/platform/inspector_protocol"
+import("$_inspector_protocol/inspector_protocol.gni")
+
+_protocol_generated = [
+ "protocol/Forward.h",
+ "protocol/Protocol.cpp",
+ "protocol/Protocol.h",
+ "protocol/Console.cpp",
+ "protocol/Console.h",
+ "protocol/Debugger.cpp",
+ "protocol/Debugger.h",
+ "protocol/HeapProfiler.cpp",
+ "protocol/HeapProfiler.h",
+ "protocol/Profiler.cpp",
+ "protocol/Profiler.h",
+ "protocol/Runtime.cpp",
+ "protocol/Runtime.h",
+ "protocol/Schema.cpp",
+ "protocol/Schema.h",
+ "../../include/inspector/Debugger.h",
+ "../../include/inspector/Runtime.h",
+ "../../include/inspector/Schema.h",
]
-action("inspector_protocol_sources") {
+action("protocol_compatibility") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
- script = "$protocol_path/CodeGenerator.py"
- sources = [
- "$protocol_path/CodeGenerator.py",
- "$protocol_path/Exported_h.template",
- "$protocol_path/Imported_h.template",
- "$protocol_path/TypeBuilder_cpp.template",
- "$protocol_path/TypeBuilder_h.template",
- ]
+ script = "$_inspector_protocol/CheckProtocolCompatibility.py"
inputs = [
"js_protocol.json",
]
- outputs = protocol_sources
+ _stamp = "$target_gen_dir/js_protocol.stamp"
+ outputs = [
+ _stamp,
+ ]
args = [
- "--protocol",
+ "--stamp",
+ rebase_path(_stamp, root_build_dir),
rebase_path("js_protocol.json", root_build_dir),
- "--string_type",
- "String16",
- "--export_macro",
- "PLATFORM_EXPORT",
- "--output_dir",
- rebase_path(target_gen_dir, root_build_dir),
- "--output_package",
- "inspector",
- "--exported_dir",
- rebase_path("$target_gen_dir/public", root_build_dir),
- "--exported_package",
- "inspector/public",
]
}
-config("inspector_protocol_config") {
- include_dirs = [ "$protocol_path/../.." ]
- defines = [ "V8_INSPECTOR_USE_STL" ]
+inspector_protocol_generate("protocol_generated_sources") {
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+ deps = [
+ ":protocol_compatibility",
+ ]
+
+ out_dir = target_gen_dir
+ config_file = "inspector_protocol_config.json"
+ inputs = [
+ "js_protocol.json",
+ "inspector_protocol_config.json",
+ ]
+ outputs = _protocol_generated
+}
+
+action("inspector_injected_script") {
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+ script = "build/xxd.py"
+ inputs = [
+ "injected-script-source.js",
+ ]
+ outputs = [
+ "$target_gen_dir/injected-script-source.h",
+ ]
+ args = [
+ "InjectedScriptSource_js",
+ rebase_path("injected-script-source.js", root_build_dir),
+ rebase_path("$target_gen_dir/injected-script-source.h", root_build_dir),
+ ]
+}
+
+action("inspector_debugger_script") {
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+ script = "build/xxd.py"
+ inputs = [
+ "debugger-script.js",
+ ]
+ outputs = [
+ "$target_gen_dir/debugger-script.h",
+ ]
+ args = [
+ "DebuggerScript_js",
+ rebase_path("debugger-script.js", root_build_dir),
+ rebase_path("$target_gen_dir/debugger-script.h", root_build_dir),
+ ]
+}
+
+config("inspector_config") {
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
cflags = []
if (is_win) {
cflags += [
@@ -64,38 +105,85 @@ config("inspector_protocol_config") {
"/wd4996", # Deprecated function call.
]
}
+ if (is_component_build) {
+ defines = [ "BUILDING_V8_SHARED" ]
+ }
}
-source_set("inspector_protocol") {
+v8_source_set("inspector") {
deps = [
- ":inspector_protocol_sources",
+ ":inspector_debugger_script",
+ ":inspector_injected_script",
+ ":protocol_generated_sources",
+ ]
+ configs = [ ":inspector_config" ]
+ include_dirs = [
+ "../..",
+ "../../include",
+ "$target_gen_dir/../..",
+ "$target_gen_dir/../../include",
+ ]
+ sources = rebase_path(_protocol_generated, ".", target_gen_dir)
+ sources += [
+ "../../include/v8-inspector-protocol.h",
+ "../../include/v8-inspector.h",
+ ]
+ sources += get_target_outputs(":inspector_injected_script")
+ sources += get_target_outputs(":inspector_debugger_script")
+ sources += [
+ "injected-script-native.cc",
+ "injected-script-native.h",
+ "injected-script.cc",
+ "injected-script.h",
+ "inspected-context.cc",
+ "inspected-context.h",
+ "java-script-call-frame.cc",
+ "java-script-call-frame.h",
+ "protocol-platform.h",
+ "remote-object-id.cc",
+ "remote-object-id.h",
+ "script-breakpoint.h",
+ "search-util.cc",
+ "search-util.h",
+ "string-16.cc",
+ "string-16.h",
+ "string-util.cc",
+ "string-util.h",
+ "v8-console-agent-impl.cc",
+ "v8-console-agent-impl.h",
+ "v8-console-message.cc",
+ "v8-console-message.h",
+ "v8-console.cc",
+ "v8-console.h",
+ "v8-debugger-agent-impl.cc",
+ "v8-debugger-agent-impl.h",
+ "v8-debugger-script.cc",
+ "v8-debugger-script.h",
+ "v8-debugger.cc",
+ "v8-debugger.h",
+ "v8-function-call.cc",
+ "v8-function-call.h",
+ "v8-heap-profiler-agent-impl.cc",
+ "v8-heap-profiler-agent-impl.h",
+ "v8-injected-script-host.cc",
+ "v8-injected-script-host.h",
+ "v8-inspector-impl.cc",
+ "v8-inspector-impl.h",
+ "v8-inspector-session-impl.cc",
+ "v8-inspector-session-impl.h",
+ "v8-internal-value-type.cc",
+ "v8-internal-value-type.h",
+ "v8-profiler-agent-impl.cc",
+ "v8-profiler-agent-impl.h",
+ "v8-regex.cc",
+ "v8-regex.h",
+ "v8-runtime-agent-impl.cc",
+ "v8-runtime-agent-impl.h",
+ "v8-schema-agent-impl.cc",
+ "v8-schema-agent-impl.h",
+ "v8-stack-trace-impl.cc",
+ "v8-stack-trace-impl.h",
+ "v8-value-copier.cc",
+ "v8-value-copier.h",
]
- configs += [ ":inspector_protocol_config" ]
- include_dirs = [ "$target_gen_dir/.." ]
- sources = protocol_sources + [
- "$protocol_path/Allocator.h",
- "$protocol_path/Array.h",
- "$protocol_path/BackendCallback.h",
- "$protocol_path/CodeGenerator.py",
- "$protocol_path/Collections.h",
- "$protocol_path/DispatcherBase.cpp",
- "$protocol_path/DispatcherBase.h",
- "$protocol_path/ErrorSupport.cpp",
- "$protocol_path/ErrorSupport.h",
- "$protocol_path/FrontendChannel.h",
- "$protocol_path/Maybe.h",
- "$protocol_path/Object.cpp",
- "$protocol_path/Object.h",
- "$protocol_path/Parser.cpp",
- "$protocol_path/Parser.h",
- "$protocol_path/Platform.h",
- "$protocol_path/PlatformSTL.h",
- "$protocol_path/String16.cpp",
- "$protocol_path/String16.h",
- "$protocol_path/String16STL.cpp",
- "$protocol_path/String16STL.h",
- "$protocol_path/ValueConversions.h",
- "$protocol_path/Values.cpp",
- "$protocol_path/Values.h",
- ]
}
diff --git a/deps/v8/src/inspector/DEPS b/deps/v8/src/inspector/DEPS
new file mode 100644
index 0000000000..4486204d74
--- /dev/null
+++ b/deps/v8/src/inspector/DEPS
@@ -0,0 +1,8 @@
+include_rules = [
+ "-src",
+ "+src/inspector",
+ "+src/base/atomicops.h",
+ "+src/base/macros.h",
+ "+src/base/logging.h",
+ "+src/base/platform/platform.h",
+]
diff --git a/deps/v8/src/inspector/OWNERS b/deps/v8/src/inspector/OWNERS
new file mode 100644
index 0000000000..2c4bd8d24b
--- /dev/null
+++ b/deps/v8/src/inspector/OWNERS
@@ -0,0 +1,15 @@
+set noparent
+
+alph@chromium.org
+caseq@chromium.org
+dgozman@chromium.org
+jochen@chromium.org
+kozyatinskiy@chromium.org
+pfeldman@chromium.org
+yangguo@chromium.org
+
+# Changes to remote debugging protocol require devtools review to
+# ensure backwards compatibility and committment to maintain.
+per-file js_protocol.json=set noparent
+per-file js_protocol.json=dgozman@chromium.org
+per-file js_protocol.json=pfeldman@chromium.org
diff --git a/deps/v8/src/inspector/PRESUBMIT.py b/deps/v8/src/inspector/PRESUBMIT.py
new file mode 100644
index 0000000000..491564b2d9
--- /dev/null
+++ b/deps/v8/src/inspector/PRESUBMIT.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""v8_inspect presubmit script
+
+See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
+for more details about the presubmit API built into gcl.
+"""
+
+compile_note = "Be sure to run your patch by the compile-scripts.py script prior to committing!"
+
+
+def _CompileScripts(input_api, output_api):
+ local_paths = [f.LocalPath() for f in input_api.AffectedFiles()]
+
+ compilation_related_files = [
+ "js_protocol.json"
+ "compile-scripts.js",
+ "injected-script-source.js",
+ "debugger_script_externs.js",
+ "injected_script_externs.js",
+ "check_injected_script_source.js",
+ "debugger-script.js"
+ ]
+
+ for file in compilation_related_files:
+ if (any(file in path for path in local_paths)):
+ script_path = input_api.os_path.join(input_api.PresubmitLocalPath(),
+ "build", "compile-scripts.py")
+ proc = input_api.subprocess.Popen(
+ [input_api.python_executable, script_path],
+ stdout=input_api.subprocess.PIPE,
+ stderr=input_api.subprocess.STDOUT)
+ out, _ = proc.communicate()
+ if "ERROR" in out or "WARNING" in out or proc.returncode:
+ return [output_api.PresubmitError(out)]
+ if "NOTE" in out:
+ return [output_api.PresubmitPromptWarning(out + compile_note)]
+ return []
+ return []
+
+
+def CheckChangeOnUpload(input_api, output_api):
+ results = []
+ results.extend(_CompileScripts(input_api, output_api))
+ return results
+
+
+def CheckChangeOnCommit(input_api, output_api):
+ results = []
+ results.extend(_CompileScripts(input_api, output_api))
+ return results
diff --git a/deps/v8/src/inspector/build/check_injected_script_source.py b/deps/v8/src/inspector/build/check_injected_script_source.py
new file mode 100644
index 0000000000..0f2509cd8c
--- /dev/null
+++ b/deps/v8/src/inspector/build/check_injected_script_source.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python
+# Copyright (c) 2014 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Copied from blink:
+# WebKit/Source/devtools/scripts/check_injected_script_source.py
+#
+
+import re
+import sys
+import os
+
+
+def validate_injected_script(fileName):
+ f = open(fileName, "r")
+ lines = f.readlines()
+ f.close()
+
+ proto_functions = "|".join([
+ # Array.prototype.*
+ "concat", "every", "filter", "forEach", "indexOf", "join", "lastIndexOf", "map", "pop",
+ "push", "reduce", "reduceRight", "reverse", "shift", "slice", "some", "sort", "splice", "toLocaleString", "toString", "unshift",
+ # Function.prototype.*
+ "apply", "bind", "call", "isGenerator", "toSource",
+ # Object.prototype.*
+ "toString",
+ ])
+
+ global_functions = "|".join([
+ "eval", "uneval", "isFinite", "isNaN", "parseFloat", "parseInt", "decodeURI", "decodeURIComponent",
+ "encodeURI", "encodeURIComponent", "escape", "unescape", "Map", "Set"
+ ])
+
+ # Black list:
+ # - instanceof, since e.g. "obj instanceof Error" may throw if Error is overridden and is not a function
+ # - Object.prototype.toString()
+ # - Array.prototype.*
+ # - Function.prototype.*
+ # - Math.*
+ # - Global functions
+ black_list_call_regex = re.compile(r"\sinstanceof\s+\w*|\bMath\.\w+\(|(?<!InjectedScriptHost)\.(" + proto_functions + r")\(|[^\.]\b(" + global_functions + r")\(")
+
+ errors_found = False
+ for i, line in enumerate(lines):
+ if line.find("suppressBlacklist") != -1:
+ continue
+ for match in re.finditer(black_list_call_regex, line):
+ errors_found = True
+ print "ERROR: Black listed expression in %s at line %02d column %02d: %s" % (os.path.basename(fileName), i + 1, match.start(), match.group(0))
+
+ if not errors_found:
+ print "OK"
+
+
+def main(argv):
+ if len(argv) < 2:
+ print('ERROR: Usage: %s path/to/injected-script-source.js' % argv[0])
+ return 1
+
+ validate_injected_script(argv[1])
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv))
diff --git a/deps/v8/src/inspector/build/closure-compiler.tar.gz.sha1 b/deps/v8/src/inspector/build/closure-compiler.tar.gz.sha1
new file mode 100644
index 0000000000..5366f51b21
--- /dev/null
+++ b/deps/v8/src/inspector/build/closure-compiler.tar.gz.sha1
@@ -0,0 +1 @@
+69937d3c239ca63e4c9045718886ddd096ffc054 \ No newline at end of file
diff --git a/deps/v8/src/inspector/build/compile-scripts.py b/deps/v8/src/inspector/build/compile-scripts.py
new file mode 100755
index 0000000000..abe167af2d
--- /dev/null
+++ b/deps/v8/src/inspector/build/compile-scripts.py
@@ -0,0 +1,169 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import os.path as path
+import generate_protocol_externs
+import re
+import subprocess
+import sys
+
+if len(sys.argv) == 2 and sys.argv[1] == '--help':
+ print("Usage: %s" % path.basename(sys.argv[0]))
+ sys.exit(0)
+
+java_required_major = 1
+java_required_minor = 7
+
+v8_inspector_path = path.dirname(path.dirname(path.abspath(__file__)))
+
+protocol_externs_file = path.join(v8_inspector_path, 'protocol_externs.js')
+injected_script_source_name = path.join(v8_inspector_path,
+ 'injected-script-source.js')
+injected_script_externs_file = path.join(v8_inspector_path,
+ 'injected_script_externs.js')
+debugger_script_source_name = path.join(v8_inspector_path,
+ 'debugger-script.js')
+debugger_script_externs_file = path.join(v8_inspector_path,
+ 'debugger_script_externs.js')
+
+generate_protocol_externs.generate_protocol_externs(protocol_externs_file,
+ path.join(v8_inspector_path, 'js_protocol.json'))
+
+error_warning_regex = re.compile(r'WARNING|ERROR')
+
+closure_compiler_jar = path.join(v8_inspector_path, 'build',
+ 'closure-compiler', 'closure-compiler.jar')
+
+common_closure_args = [
+ '--checks_only',
+ '--warning_level', 'VERBOSE'
+]
+
+# Error reporting and checking.
+errors_found = False
+
+def popen(arguments):
+ return subprocess.Popen(arguments, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+
+def error_excepthook(exctype, value, traceback):
+ print 'ERROR:'
+ sys.__excepthook__(exctype, value, traceback)
+sys.excepthook = error_excepthook
+
+def has_errors(output):
+ return re.search(error_warning_regex, output) != None
+
+# Find java. Based on
+# http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python.
+def which(program):
+ def is_exe(fpath):
+ return path.isfile(fpath) and os.access(fpath, os.X_OK)
+
+ fpath, fname = path.split(program)
+ if fpath:
+ if is_exe(program):
+ return program
+ else:
+ for part in os.environ['PATH'].split(os.pathsep):
+ part = part.strip('"')
+ exe_file = path.join(part, program)
+ if is_exe(exe_file):
+ return exe_file
+ return None
+
+def find_java():
+ exec_command = None
+ has_server_jvm = True
+ java_path = which('java')
+ if not java_path:
+ java_path = which('java.exe')
+
+ if not java_path:
+ print 'NOTE: No Java executable found in $PATH.'
+ sys.exit(0)
+
+ is_ok = False
+ java_version_out, _ = popen([java_path, '-version']).communicate()
+ java_build_regex = re.compile(r'^\w+ version "(\d+)\.(\d+)')
+ # pylint: disable=E1103
+ match = re.search(java_build_regex, java_version_out)
+ if match:
+ major = int(match.group(1))
+ minor = int(match.group(2))
+ is_ok = major >= java_required_major and minor >= java_required_minor
+ if is_ok:
+ exec_command = [java_path, '-Xms1024m', '-server',
+ '-XX:+TieredCompilation']
+ check_server_proc = popen(exec_command + ['-version'])
+ check_server_proc.communicate()
+ if check_server_proc.returncode != 0:
+ # Not all Java installs have server JVMs.
+ exec_command = exec_command.remove('-server')
+ has_server_jvm = False
+
+ if not is_ok:
+ print 'NOTE: Java executable version %d.%d or above not found in $PATH.' % (java_required_major, java_required_minor)
+ sys.exit(0)
+ print 'Java executable: %s%s' % (java_path, '' if has_server_jvm else ' (no server JVM)')
+ return exec_command
+
+java_exec = find_java()
+
+spawned_compiler_command = java_exec + [
+ '-jar',
+ closure_compiler_jar
+] + common_closure_args
+
+print 'Compiling injected-script-source.js...'
+
+command = spawned_compiler_command + [
+ '--externs', injected_script_externs_file,
+ '--externs', protocol_externs_file,
+ '--js', injected_script_source_name
+]
+
+injected_script_compile_proc = popen(command)
+
+print 'Compiling debugger-script.js...'
+
+command = spawned_compiler_command + [
+ '--externs', debugger_script_externs_file,
+ '--js', debugger_script_source_name,
+ '--new_type_inf'
+]
+
+debugger_script_compile_proc = popen(command)
+
+print 'Validating injected-script-source.js...'
+injectedscript_check_script_path = path.join(v8_inspector_path, 'build',
+ 'check_injected_script_source.py')
+validate_injected_script_proc = popen([sys.executable,
+ injectedscript_check_script_path, injected_script_source_name])
+
+print
+
+(injected_script_compile_out, _) = injected_script_compile_proc.communicate()
+print 'injected-script-source.js compilation output:%s' % os.linesep
+print injected_script_compile_out
+errors_found |= has_errors(injected_script_compile_out)
+
+(debugger_script_compiler_out, _) = debugger_script_compile_proc.communicate()
+print 'debugger-script.js compilation output:%s' % os.linesep
+print debugger_script_compiler_out
+errors_found |= has_errors(debugger_script_compiler_out)
+
+(validate_injected_script_out, _) = validate_injected_script_proc.communicate()
+print 'Validate injected-script-source.js output:%s' % os.linesep
+print validate_injected_script_out if validate_injected_script_out else '<empty>'
+errors_found |= has_errors(validate_injected_script_out)
+
+os.remove(protocol_externs_file)
+
+if errors_found:
+ print 'ERRORS DETECTED'
+ sys.exit(1)
diff --git a/deps/v8/src/inspector/build/generate_protocol_externs.py b/deps/v8/src/inspector/build/generate_protocol_externs.py
new file mode 100755
index 0000000000..c2ba2c5b84
--- /dev/null
+++ b/deps/v8/src/inspector/build/generate_protocol_externs.py
@@ -0,0 +1,246 @@
+#!/usr/bin/env python
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import re
+import json
+
+type_traits = {
+ "any": "*",
+ "string": "string",
+ "integer": "number",
+ "number": "number",
+ "boolean": "boolean",
+ "array": "!Array.<*>",
+ "object": "!Object",
+}
+
+promisified_domains = {
+ "Accessibility",
+ "Animation",
+ "CSS",
+ "Emulation",
+ "Profiler"
+}
+
+ref_types = {}
+
+def full_qualified_type_id(domain_name, type_id):
+ if type_id.find(".") == -1:
+ return "%s.%s" % (domain_name, type_id)
+ return type_id
+
+
+def fix_camel_case(name):
+ prefix = ""
+ if name[0] == "-":
+ prefix = "Negative"
+ name = name[1:]
+ refined = re.sub(r'-(\w)', lambda pat: pat.group(1).upper(), name)
+ refined = to_title_case(refined)
+ return prefix + re.sub(r'(?i)HTML|XML|WML|API', lambda pat: pat.group(0).upper(), refined)
+
+
+def to_title_case(name):
+ return name[:1].upper() + name[1:]
+
+
+def generate_enum(name, json):
+ enum_members = []
+ for member in json["enum"]:
+ enum_members.append(" %s: \"%s\"" % (fix_camel_case(member), member))
+ return "\n/** @enum {string} */\n%s = {\n%s\n};\n" % (name, (",\n".join(enum_members)))
+
+
+def param_type(domain_name, param):
+ if "type" in param:
+ if param["type"] == "array":
+ items = param["items"]
+ return "!Array.<%s>" % param_type(domain_name, items)
+ else:
+ return type_traits[param["type"]]
+ if "$ref" in param:
+ type_id = full_qualified_type_id(domain_name, param["$ref"])
+ if type_id in ref_types:
+ return ref_types[type_id]
+ else:
+ print "Type not found: " + type_id
+ return "!! Type not found: " + type_id
+
+
+def load_schema(file, domains):
+ input_file = open(file, "r")
+ json_string = input_file.read()
+ parsed_json = json.loads(json_string)
+ domains.extend(parsed_json["domains"])
+
+
+def generate_protocol_externs(output_path, file1):
+ domains = []
+ load_schema(file1, domains)
+ output_file = open(output_path, "w")
+
+ output_file.write(
+"""
+var InspectorBackend = {}
+
+var Protocol = {};
+/** @typedef {string}*/
+Protocol.Error;
+""")
+
+ for domain in domains:
+ domain_name = domain["domain"]
+ if "types" in domain:
+ for type in domain["types"]:
+ type_id = full_qualified_type_id(domain_name, type["id"])
+ ref_types[type_id] = "%sAgent.%s" % (domain_name, type["id"])
+
+ for domain in domains:
+ domain_name = domain["domain"]
+ promisified = domain_name in promisified_domains
+
+ output_file.write("\n\n/**\n * @constructor\n*/\n")
+ output_file.write("Protocol.%sAgent = function(){};\n" % domain_name)
+
+ if "commands" in domain:
+ for command in domain["commands"]:
+ output_file.write("\n/**\n")
+ params = []
+ has_return_value = "returns" in command
+ explicit_parameters = promisified and has_return_value
+ if ("parameters" in command):
+ for in_param in command["parameters"]:
+ # All parameters are not optional in case of promisified domain with return value.
+ if (not explicit_parameters and "optional" in in_param):
+ params.append("opt_%s" % in_param["name"])
+ output_file.write(" * @param {%s=} opt_%s\n" % (param_type(domain_name, in_param), in_param["name"]))
+ else:
+ params.append(in_param["name"])
+ output_file.write(" * @param {%s} %s\n" % (param_type(domain_name, in_param), in_param["name"]))
+ returns = []
+ returns.append("?Protocol.Error")
+ if ("error" in command):
+ returns.append("%s=" % param_type(domain_name, command["error"]))
+ if (has_return_value):
+ for out_param in command["returns"]:
+ if ("optional" in out_param):
+ returns.append("%s=" % param_type(domain_name, out_param))
+ else:
+ returns.append("%s" % param_type(domain_name, out_param))
+ callback_return_type = "void="
+ if explicit_parameters:
+ callback_return_type = "T"
+ elif promisified:
+ callback_return_type = "T="
+ output_file.write(" * @param {function(%s):%s} opt_callback\n" % (", ".join(returns), callback_return_type))
+ if (promisified):
+ output_file.write(" * @return {!Promise.<T>}\n")
+ output_file.write(" * @template T\n")
+ params.append("opt_callback")
+
+ output_file.write(" */\n")
+ output_file.write("Protocol.%sAgent.prototype.%s = function(%s) {}\n" % (domain_name, command["name"], ", ".join(params)))
+ output_file.write("/** @param {function(%s):void=} opt_callback */\n" % ", ".join(returns))
+ output_file.write("Protocol.%sAgent.prototype.invoke_%s = function(obj, opt_callback) {}\n" % (domain_name, command["name"]))
+
+ output_file.write("\n\n\nvar %sAgent = function(){};\n" % domain_name)
+
+ if "types" in domain:
+ for type in domain["types"]:
+ if type["type"] == "object":
+ typedef_args = []
+ if "properties" in type:
+ for property in type["properties"]:
+ suffix = ""
+ if ("optional" in property):
+ suffix = "|undefined"
+ if "enum" in property:
+ enum_name = "%sAgent.%s%s" % (domain_name, type["id"], to_title_case(property["name"]))
+ output_file.write(generate_enum(enum_name, property))
+ typedef_args.append("%s:(%s%s)" % (property["name"], enum_name, suffix))
+ else:
+ typedef_args.append("%s:(%s%s)" % (property["name"], param_type(domain_name, property), suffix))
+ if (typedef_args):
+ output_file.write("\n/** @typedef {!{%s}} */\n%sAgent.%s;\n" % (", ".join(typedef_args), domain_name, type["id"]))
+ else:
+ output_file.write("\n/** @typedef {!Object} */\n%sAgent.%s;\n" % (domain_name, type["id"]))
+ elif type["type"] == "string" and "enum" in type:
+ output_file.write(generate_enum("%sAgent.%s" % (domain_name, type["id"]), type))
+ elif type["type"] == "array":
+ output_file.write("\n/** @typedef {!Array.<!%s>} */\n%sAgent.%s;\n" % (param_type(domain_name, type["items"]), domain_name, type["id"]))
+ else:
+ output_file.write("\n/** @typedef {%s} */\n%sAgent.%s;\n" % (type_traits[type["type"]], domain_name, type["id"]))
+
+ output_file.write("/** @interface */\n")
+ output_file.write("%sAgent.Dispatcher = function() {};\n" % domain_name)
+ if "events" in domain:
+ for event in domain["events"]:
+ params = []
+ if ("parameters" in event):
+ output_file.write("/**\n")
+ for param in event["parameters"]:
+ if ("optional" in param):
+ params.append("opt_%s" % param["name"])
+ output_file.write(" * @param {%s=} opt_%s\n" % (param_type(domain_name, param), param["name"]))
+ else:
+ params.append(param["name"])
+ output_file.write(" * @param {%s} %s\n" % (param_type(domain_name, param), param["name"]))
+ output_file.write(" */\n")
+ output_file.write("%sAgent.Dispatcher.prototype.%s = function(%s) {};\n" % (domain_name, event["name"], ", ".join(params)))
+
+ output_file.write("\n/** @constructor\n * @param {!Object.<string, !Object>} agentsMap\n */\n")
+ output_file.write("Protocol.Agents = function(agentsMap){this._agentsMap;};\n")
+ output_file.write("/**\n * @param {string} domain\n * @param {!Object} dispatcher\n */\n")
+ output_file.write("Protocol.Agents.prototype.registerDispatcher = function(domain, dispatcher){};\n")
+ for domain in domains:
+ domain_name = domain["domain"]
+ uppercase_length = 0
+ while uppercase_length < len(domain_name) and domain_name[uppercase_length].isupper():
+ uppercase_length += 1
+
+ output_file.write("/** @return {!Protocol.%sAgent}*/\n" % domain_name)
+ output_file.write("Protocol.Agents.prototype.%s = function(){};\n" % (domain_name[:uppercase_length].lower() + domain_name[uppercase_length:] + "Agent"))
+
+ output_file.write("/**\n * @param {!%sAgent.Dispatcher} dispatcher\n */\n" % domain_name)
+ output_file.write("Protocol.Agents.prototype.register%sDispatcher = function(dispatcher) {}\n" % domain_name)
+
+
+ output_file.close()
+
+if __name__ == "__main__":
+ import sys
+ import os.path
+ program_name = os.path.basename(__file__)
+ if len(sys.argv) < 4 or sys.argv[1] != "-o":
+ sys.stderr.write("Usage: %s -o OUTPUT_FILE INPUT_FILE\n" % program_name)
+ exit(1)
+ output_path = sys.argv[2]
+ input_path = sys.argv[3]
+ generate_protocol_externs(output_path, input_path)
diff --git a/deps/v8/src/inspector/build/rjsmin.py b/deps/v8/src/inspector/build/rjsmin.py
new file mode 100755
index 0000000000..8357a6dcc1
--- /dev/null
+++ b/deps/v8/src/inspector/build/rjsmin.py
@@ -0,0 +1,295 @@
+#!/usr/bin/env python
+#
+# Copyright 2011 - 2013
+# Andr\xe9 Malo or his licensors, as applicable
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+r"""
+=====================
+ Javascript Minifier
+=====================
+
+rJSmin is a javascript minifier written in python.
+
+The minifier is based on the semantics of `jsmin.c by Douglas Crockford`_\.
+
+The module is a re-implementation aiming for speed, so it can be used at
+runtime (rather than during a preprocessing step). Usually it produces the
+same results as the original ``jsmin.c``. It differs in the following ways:
+
+- there is no error detection: unterminated string, regex and comment
+ literals are treated as regular javascript code and minified as such.
+- Control characters inside string and regex literals are left untouched; they
+ are not converted to spaces (nor to \n)
+- Newline characters are not allowed inside string and regex literals, except
+ for line continuations in string literals (ECMA-5).
+- "return /regex/" is recognized correctly.
+- "+ +" and "- -" sequences are not collapsed to '++' or '--'
+- Newlines before ! operators are removed more sensibly
+- rJSmin does not handle streams, but only complete strings. (However, the
+ module provides a "streamy" interface).
+
+Since most parts of the logic are handled by the regex engine it's way
+faster than the original python port of ``jsmin.c`` by Baruch Even. The speed
+factor varies between about 6 and 55 depending on input and python version
+(it gets faster the more compressed the input already is). Compared to the
+speed-refactored python port by Dave St.Germain the performance gain is less
+dramatic but still between 1.2 and 7. See the docs/BENCHMARKS file for
+details.
+
+rjsmin.c is a reimplementation of rjsmin.py in C and speeds it up even more.
+
+Both python 2 and python 3 are supported.
+
+.. _jsmin.c by Douglas Crockford:
+ http://www.crockford.com/javascript/jsmin.c
+"""
+__author__ = "Andr\xe9 Malo"
+__author__ = getattr(__author__, 'decode', lambda x: __author__)('latin-1')
+__docformat__ = "restructuredtext en"
+__license__ = "Apache License, Version 2.0"
+__version__ = '1.0.7'
+__all__ = ['jsmin']
+
+import re as _re
+
+
+def _make_jsmin(python_only=False):
+ """
+ Generate JS minifier based on `jsmin.c by Douglas Crockford`_
+
+ .. _jsmin.c by Douglas Crockford:
+ http://www.crockford.com/javascript/jsmin.c
+
+ :Parameters:
+ `python_only` : ``bool``
+ Use only the python variant. If true, the c extension is not even
+ tried to be loaded.
+
+ :Return: Minifier
+ :Rtype: ``callable``
+ """
+ # pylint: disable = R0912, R0914, W0612
+ if not python_only:
+ try:
+ import _rjsmin
+ except ImportError:
+ pass
+ else:
+ return _rjsmin.jsmin
+ try:
+ xrange
+ except NameError:
+ xrange = range # pylint: disable = W0622
+
+ space_chars = r'[\000-\011\013\014\016-\040]'
+
+ line_comment = r'(?://[^\r\n]*)'
+ space_comment = r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)'
+ string1 = \
+ r'(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^\047\\\r\n]*)*\047)'
+ string2 = r'(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^"\\\r\n]*)*")'
+ strings = r'(?:%s|%s)' % (string1, string2)
+
+ charclass = r'(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\])'
+ nospecial = r'[^/\\\[\r\n]'
+ regex = r'(?:/(?![\r\n/*])%s*(?:(?:\\[^\r\n]|%s)%s*)*/)' % (
+ nospecial, charclass, nospecial)
+ space = r'(?:%s|%s)' % (space_chars, space_comment)
+ newline = r'(?:%s?[\r\n])' % line_comment
+
+ def fix_charclass(result):
+ """ Fixup string of chars to fit into a regex char class """
+ pos = result.find('-')
+ if pos >= 0:
+ result = r'%s%s-' % (result[:pos], result[pos + 1:])
+
+ def sequentize(string):
+ """
+ Notate consecutive characters as sequence
+
+ (1-4 instead of 1234)
+ """
+ first, last, result = None, None, []
+ for char in map(ord, string):
+ if last is None:
+ first = last = char
+ elif last + 1 == char:
+ last = char
+ else:
+ result.append((first, last))
+ first = last = char
+ if last is not None:
+ result.append((first, last))
+ return ''.join(['%s%s%s' % (
+ chr(first),
+ last > first + 1 and '-' or '',
+ last != first and chr(last) or '') for first, last in result])
+
+ return _re.sub(r'([\000-\040\047])', # for better portability
+ lambda m: '\\%03o' % ord(m.group(1)), (sequentize(result)
+ .replace('\\', '\\\\')
+ .replace('[', '\\[')
+ .replace(']', '\\]')))
+
+ def id_literal_(what):
+ """ Make id_literal like char class """
+ match = _re.compile(what).match
+ result = ''.join([chr(c) for c in xrange(127) if not match(chr(c))])
+ return '[^%s]' % fix_charclass(result)
+
+ def not_id_literal_(keep):
+ """ Make negated id_literal like char class """
+ match = _re.compile(id_literal_(keep)).match
+ result = ''.join([chr(c) for c in xrange(127) if not match(chr(c))])
+ return r'[%s]' % fix_charclass(result)
+
+ not_id_literal = not_id_literal_(r'[a-zA-Z0-9_$]')
+ preregex1 = r'[(,=:\[!&|?{};\r\n]'
+ preregex2 = r'%(not_id_literal)sreturn' % locals()
+
+ id_literal = id_literal_(r'[a-zA-Z0-9_$]')
+ id_literal_open = id_literal_(r'[a-zA-Z0-9_${\[(!+-]')
+ id_literal_close = id_literal_(r'[a-zA-Z0-9_$}\])"\047+-]')
+
+ dull = r'[^\047"/\000-\040]'
+
+ space_sub = _re.compile((
+ r'(%(dull)s+)'
+ r'|(%(strings)s%(dull)s*)'
+ r'|(?<=%(preregex1)s)'
+ r'%(space)s*(?:%(newline)s%(space)s*)*'
+ r'(%(regex)s%(dull)s*)'
+ r'|(?<=%(preregex2)s)'
+ r'%(space)s*(?:%(newline)s%(space)s)*'
+ r'(%(regex)s%(dull)s*)'
+ r'|(?<=%(id_literal_close)s)'
+ r'%(space)s*(?:(%(newline)s)%(space)s*)+'
+ r'(?=%(id_literal_open)s)'
+ r'|(?<=%(id_literal)s)(%(space)s)+(?=%(id_literal)s)'
+ r'|(?<=\+)(%(space)s)+(?=\+)'
+ r'|(?<=-)(%(space)s)+(?=-)'
+ r'|%(space)s+'
+ r'|(?:%(newline)s%(space)s*)+') % locals()).sub
+ #print space_sub.__self__.pattern
+
+ def space_subber(match):
+ """ Substitution callback """
+ # pylint: disable = C0321, R0911
+ groups = match.groups()
+ if groups[0]:
+ return groups[0]
+ elif groups[1]:
+ return groups[1]
+ elif groups[2]:
+ return groups[2]
+ elif groups[3]:
+ return groups[3]
+ elif groups[4]:
+ return '\n'
+ elif groups[5] or groups[6] or groups[7]:
+ return ' '
+ else:
+ return ''
+
+ def jsmin(script): # pylint: disable = W0621
+ r"""
+ Minify javascript based on `jsmin.c by Douglas Crockford`_\.
+
+ Instead of parsing the stream char by char, it uses a regular
+ expression approach which minifies the whole script with one big
+ substitution regex.
+
+ .. _jsmin.c by Douglas Crockford:
+ http://www.crockford.com/javascript/jsmin.c
+
+ :Parameters:
+ `script` : ``str``
+ Script to minify
+
+ :Return: Minified script
+ :Rtype: ``str``
+ """
+ return space_sub(space_subber, '\n%s\n' % script).strip()
+
+ return jsmin
+
+jsmin = _make_jsmin()
+
+
+def jsmin_for_posers(script):
+ r"""
+ Minify javascript based on `jsmin.c by Douglas Crockford`_\.
+
+ Instead of parsing the stream char by char, it uses a regular
+ expression approach which minifies the whole script with one big
+ substitution regex.
+
+ .. _jsmin.c by Douglas Crockford:
+ http://www.crockford.com/javascript/jsmin.c
+
+ :Warning: This function is the digest of a _make_jsmin() call. It just
+ utilizes the resulting regex. It's just for fun here and may
+ vanish any time. Use the `jsmin` function instead.
+
+ :Parameters:
+ `script` : ``str``
+ Script to minify
+
+ :Return: Minified script
+ :Rtype: ``str``
+ """
+ def subber(match):
+ """ Substitution callback """
+ groups = match.groups()
+ return (
+ groups[0] or
+ groups[1] or
+ groups[2] or
+ groups[3] or
+ (groups[4] and '\n') or
+ (groups[5] and ' ') or
+ (groups[6] and ' ') or
+ (groups[7] and ' ') or
+ '')
+
+ return _re.sub(
+ r'([^\047"/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?'
+ r'\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|'
+ r'\r)[^"\\\r\n]*)*"))[^\047"/\000-\040]*)|(?<=[(,=:\[!&|?{};\r\n])(?'
+ r':[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*'
+ r'(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*'
+ r'[^*]*\*+(?:[^/*][^*]*\*+)*/))*)*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:('
+ r'?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\['
+ r'\r\n]*)*/)[^\047"/\000-\040]*)|(?<=[\000-#%-,./:-@\[-^`{-~-]return'
+ r')(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/'
+ r'))*(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:'
+ r'/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?'
+ r':(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/'
+ r'\\\[\r\n]*)*/)[^\047"/\000-\040]*)|(?<=[^\000-!#%&(*,./:-@\[\\^`{|'
+ r'~])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)'
+ r'*/))*(?:((?:(?://[^\r\n]*)?[\r\n]))(?:[\000-\011\013\014\016-\040]'
+ r'|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000-\040"#%-\047)*,./'
+ r':-@\\-^`|-~])|(?<=[^\000-#%-,./:-@\[-^`{-~-])((?:[\000-\011\013\01'
+ r'4\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=[^\000-#%-,./:'
+ r'-@\[-^`{-~-])|(?<=\+)((?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*'
+ r'\*+(?:[^/*][^*]*\*+)*/)))+(?=\+)|(?<=-)((?:[\000-\011\013\014\016-'
+ r'\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=-)|(?:[\000-\011\013'
+ r'\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))+|(?:(?:(?://[^'
+ r'\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^'
+ r'/*][^*]*\*+)*/))*)+', subber, '\n%s\n' % script).strip()
+
+
+if __name__ == '__main__':
+ import sys as _sys
+ _sys.stdout.write(jsmin(_sys.stdin.read()))
diff --git a/deps/v8/src/inspector/build/xxd.py b/deps/v8/src/inspector/build/xxd.py
new file mode 100644
index 0000000000..5a63a7cb8d
--- /dev/null
+++ b/deps/v8/src/inspector/build/xxd.py
@@ -0,0 +1,28 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Represent a file as a C++ constant string.
+
+Usage:
+python xxd.py VAR SOURCE DEST
+"""
+
+
+import sys
+import rjsmin
+
+
+def main():
+ variable_name, input_filename, output_filename = sys.argv[1:]
+ with open(input_filename) as input_file:
+ input_text = input_file.read()
+ input_text = rjsmin.jsmin(input_text)
+ hex_values = ['0x{0:02x}'.format(ord(char)) for char in input_text]
+ const_declaration = 'const char %s[] = {\n%s\n};\n' % (
+ variable_name, ', '.join(hex_values))
+ with open(output_filename, 'w') as output_file:
+ output_file.write(const_declaration)
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/deps/v8/src/inspector/debugger-script.js b/deps/v8/src/inspector/debugger-script.js
new file mode 100644
index 0000000000..98910d69df
--- /dev/null
+++ b/deps/v8/src/inspector/debugger-script.js
@@ -0,0 +1,712 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+"use strict";
+
+(function () {
+
+var DebuggerScript = {};
+
+/** @enum */
+const PauseOnExceptionsState = {
+ DontPauseOnExceptions: 0,
+ PauseOnAllExceptions: 1,
+ PauseOnUncaughtExceptions: 2
+};
+DebuggerScript.PauseOnExceptionsState = PauseOnExceptionsState;
+
+DebuggerScript._pauseOnExceptionsState = DebuggerScript.PauseOnExceptionsState.DontPauseOnExceptions;
+Debug.clearBreakOnException();
+Debug.clearBreakOnUncaughtException();
+
+/**
+ * @param {?CompileEvent} eventData
+ */
+DebuggerScript.getAfterCompileScript = function(eventData)
+{
+ var script = eventData.script().value();
+ if (!script.is_debugger_script)
+ return DebuggerScript._formatScript(eventData.script().value());
+ return null;
+}
+
+/** @type {!Map<!ScopeType, string>} */
+DebuggerScript._scopeTypeNames = new Map();
+DebuggerScript._scopeTypeNames.set(ScopeType.Global, "global");
+DebuggerScript._scopeTypeNames.set(ScopeType.Local, "local");
+DebuggerScript._scopeTypeNames.set(ScopeType.With, "with");
+DebuggerScript._scopeTypeNames.set(ScopeType.Closure, "closure");
+DebuggerScript._scopeTypeNames.set(ScopeType.Catch, "catch");
+DebuggerScript._scopeTypeNames.set(ScopeType.Block, "block");
+DebuggerScript._scopeTypeNames.set(ScopeType.Script, "script");
+
+/**
+ * @param {function()} fun
+ * @return {?Array<!Scope>}
+ */
+DebuggerScript.getFunctionScopes = function(fun)
+{
+ var mirror = MakeMirror(fun);
+ if (!mirror.isFunction())
+ return null;
+ var functionMirror = /** @type {!FunctionMirror} */(mirror);
+ var count = functionMirror.scopeCount();
+ if (count == 0)
+ return null;
+ var result = [];
+ for (var i = 0; i < count; i++) {
+ var scopeDetails = functionMirror.scope(i).details();
+ var scopeObject = DebuggerScript._buildScopeObject(scopeDetails.type(), scopeDetails.object());
+ if (!scopeObject)
+ continue;
+ result.push({
+ type: /** @type {string} */(DebuggerScript._scopeTypeNames.get(scopeDetails.type())),
+ object: scopeObject,
+ name: scopeDetails.name() || ""
+ });
+ }
+ return result;
+}
+
+/**
+ * @param {Object} object
+ * @return {?RawLocation}
+ */
+DebuggerScript.getGeneratorObjectLocation = function(object)
+{
+ var mirror = MakeMirror(object, true /* transient */);
+ if (!mirror.isGenerator())
+ return null;
+ var generatorMirror = /** @type {!GeneratorMirror} */(mirror);
+ var funcMirror = generatorMirror.func();
+ if (!funcMirror.resolved())
+ return null;
+ var location = generatorMirror.sourceLocation() || funcMirror.sourceLocation();
+ var script = funcMirror.script();
+ if (script && location) {
+ return {
+ scriptId: "" + script.id(),
+ lineNumber: location.line,
+ columnNumber: location.column
+ };
+ }
+ return null;
+}
+
+/**
+ * @param {Object} object
+ * @return {!Array<!{value: *}>|undefined}
+ */
+DebuggerScript.getCollectionEntries = function(object)
+{
+ var mirror = MakeMirror(object, true /* transient */);
+ if (mirror.isMap())
+ return /** @type {!MapMirror} */(mirror).entries();
+ if (mirror.isSet() || mirror.isIterator()) {
+ var result = [];
+ var values = mirror.isSet() ? /** @type {!SetMirror} */(mirror).values() : /** @type {!IteratorMirror} */(mirror).preview();
+ for (var i = 0; i < values.length; ++i)
+ result.push({ value: values[i] });
+ return result;
+ }
+}
+
+/**
+ * @param {string|undefined} contextData
+ * @return {number}
+ */
+DebuggerScript._executionContextId = function(contextData)
+{
+ if (!contextData)
+ return 0;
+ var match = contextData.match(/^[^,]*,([^,]*),.*$/);
+ if (!match)
+ return 0;
+ return parseInt(match[1], 10) || 0;
+}
+
+/**
+ * @param {string|undefined} contextData
+ * @return {string}
+ */
+DebuggerScript._executionContextAuxData = function(contextData)
+{
+ if (!contextData)
+ return "";
+ var match = contextData.match(/^[^,]*,[^,]*,(.*)$/);
+ return match ? match[1] : "";
+}
+
+/**
+ * @param {string} contextGroupId
+ * @return {!Array<!FormattedScript>}
+ */
+DebuggerScript.getScripts = function(contextGroupId)
+{
+ var result = [];
+ var scripts = Debug.scripts();
+ var contextDataPrefix = null;
+ if (contextGroupId)
+ contextDataPrefix = contextGroupId + ",";
+ for (var i = 0; i < scripts.length; ++i) {
+ var script = scripts[i];
+ if (contextDataPrefix) {
+ if (!script.context_data)
+ continue;
+ // Context data is a string in the following format:
+ // <contextGroupId>,<contextId>,<auxData>
+ if (script.context_data.indexOf(contextDataPrefix) !== 0)
+ continue;
+ }
+ if (script.is_debugger_script)
+ continue;
+ result.push(DebuggerScript._formatScript(script));
+ }
+ return result;
+}
+
+/**
+ * @param {!Script} script
+ * @return {!FormattedScript}
+ */
+DebuggerScript._formatScript = function(script)
+{
+ var lineEnds = script.line_ends;
+ var lineCount = lineEnds.length;
+ var endLine = script.line_offset + lineCount - 1;
+ var endColumn;
+ // V8 will not count last line if script source ends with \n.
+ if (script.source[script.source.length - 1] === '\n') {
+ endLine += 1;
+ endColumn = 0;
+ } else {
+ if (lineCount === 1)
+ endColumn = script.source.length + script.column_offset;
+ else
+ endColumn = script.source.length - (lineEnds[lineCount - 2] + 1);
+ }
+ return {
+ id: script.id,
+ name: script.nameOrSourceURL(),
+ sourceURL: script.source_url,
+ sourceMappingURL: script.source_mapping_url,
+ source: script.source,
+ startLine: script.line_offset,
+ startColumn: script.column_offset,
+ endLine: endLine,
+ endColumn: endColumn,
+ executionContextId: DebuggerScript._executionContextId(script.context_data),
+ // Note that we cannot derive aux data from context id because of compilation cache.
+ executionContextAuxData: DebuggerScript._executionContextAuxData(script.context_data)
+ };
+}
+
+/**
+ * @param {!ExecutionState} execState
+ * @param {!BreakpointInfo} info
+ * @return {string|undefined}
+ */
+DebuggerScript.setBreakpoint = function(execState, info)
+{
+ var breakId = Debug.setScriptBreakPointById(info.sourceID, info.lineNumber, info.columnNumber, info.condition, undefined, Debug.BreakPositionAlignment.Statement);
+ var locations = Debug.findBreakPointActualLocations(breakId);
+ if (!locations.length)
+ return undefined;
+ info.lineNumber = locations[0].line;
+ info.columnNumber = locations[0].column;
+ return breakId.toString();
+}
+
+/**
+ * @param {!ExecutionState} execState
+ * @param {!{breakpointId: number}} info
+ */
+DebuggerScript.removeBreakpoint = function(execState, info)
+{
+ Debug.findBreakPoint(info.breakpointId, true);
+}
+
+/**
+ * @return {number}
+ */
+DebuggerScript.pauseOnExceptionsState = function()
+{
+ return DebuggerScript._pauseOnExceptionsState;
+}
+
+/**
+ * @param {number} newState
+ */
+DebuggerScript.setPauseOnExceptionsState = function(newState)
+{
+ DebuggerScript._pauseOnExceptionsState = newState;
+
+ if (DebuggerScript.PauseOnExceptionsState.PauseOnAllExceptions === newState)
+ Debug.setBreakOnException();
+ else
+ Debug.clearBreakOnException();
+
+ if (DebuggerScript.PauseOnExceptionsState.PauseOnUncaughtExceptions === newState)
+ Debug.setBreakOnUncaughtException();
+ else
+ Debug.clearBreakOnUncaughtException();
+}
+
+/**
+ * @param {!ExecutionState} execState
+ * @param {number} limit
+ * @return {!Array<!JavaScriptCallFrame>}
+ */
+DebuggerScript.currentCallFrames = function(execState, limit)
+{
+ var frames = [];
+ for (var i = 0; i < execState.frameCount() && (!limit || i < limit); ++i)
+ frames.push(DebuggerScript._frameMirrorToJSCallFrame(execState.frame(i)));
+ return frames;
+}
+
+/**
+ * @param {!ExecutionState} execState
+ */
+DebuggerScript.stepIntoStatement = function(execState)
+{
+ execState.prepareStep(Debug.StepAction.StepIn);
+}
+
+/**
+ * @param {!ExecutionState} execState
+ */
+DebuggerScript.stepFrameStatement = function(execState)
+{
+ execState.prepareStep(Debug.StepAction.StepFrame);
+}
+
+/**
+ * @param {!ExecutionState} execState
+ */
+DebuggerScript.stepOverStatement = function(execState)
+{
+ execState.prepareStep(Debug.StepAction.StepNext);
+}
+
+/**
+ * @param {!ExecutionState} execState
+ */
+DebuggerScript.stepOutOfFunction = function(execState)
+{
+ execState.prepareStep(Debug.StepAction.StepOut);
+}
+
+DebuggerScript.clearStepping = function()
+{
+ Debug.clearStepping();
+}
+
+// Returns array in form:
+// [ 0, <v8_result_report> ] in case of success
+// or [ 1, <general_error_message>, <compiler_message>, <line_number>, <column_number> ] in case of compile error, numbers are 1-based.
+// or throws exception with message.
+/**
+ * @param {number} scriptId
+ * @param {string} newSource
+ * @param {boolean} preview
+ * @return {!Array<*>}
+ */
+DebuggerScript.liveEditScriptSource = function(scriptId, newSource, preview)
+{
+ var scripts = Debug.scripts();
+ var scriptToEdit = null;
+ for (var i = 0; i < scripts.length; i++) {
+ if (scripts[i].id == scriptId) {
+ scriptToEdit = scripts[i];
+ break;
+ }
+ }
+ if (!scriptToEdit)
+ throw("Script not found");
+
+ var changeLog = [];
+ try {
+ var result = Debug.LiveEdit.SetScriptSource(scriptToEdit, newSource, preview, changeLog);
+ return [0, result.stack_modified];
+ } catch (e) {
+ if (e instanceof Debug.LiveEdit.Failure && "details" in e) {
+ var details = /** @type {!LiveEditErrorDetails} */(e.details);
+ if (details.type === "liveedit_compile_error") {
+ var startPosition = details.position.start;
+ return [1, String(e), String(details.syntaxErrorMessage), Number(startPosition.line), Number(startPosition.column)];
+ }
+ }
+ throw e;
+ }
+}
+
+/**
+ * @param {!ExecutionState} execState
+ */
+DebuggerScript.clearBreakpoints = function(execState)
+{
+ Debug.clearAllBreakPoints();
+}
+
+/**
+ * @param {!ExecutionState} execState
+ * @param {!{enabled: boolean}} info
+ */
+DebuggerScript.setBreakpointsActivated = function(execState, info)
+{
+ Debug.debuggerFlags().breakPointsActive.setValue(info.enabled);
+}
+
+/**
+ * @param {!BreakEvent} eventData
+ */
+DebuggerScript.getBreakpointNumbers = function(eventData)
+{
+ var breakpoints = eventData.breakPointsHit();
+ var numbers = [];
+ if (!breakpoints)
+ return numbers;
+
+ for (var i = 0; i < breakpoints.length; i++) {
+ var breakpoint = breakpoints[i];
+ var scriptBreakPoint = breakpoint.script_break_point();
+ numbers.push(scriptBreakPoint ? scriptBreakPoint.number() : breakpoint.number());
+ }
+ return numbers;
+}
+
+// NOTE: This function is performance critical, as it can be run on every
+// statement that generates an async event (like addEventListener) to support
+// asynchronous call stacks. Thus, when possible, initialize the data lazily.
+/**
+ * @param {!FrameMirror} frameMirror
+ * @return {!JavaScriptCallFrame}
+ */
+DebuggerScript._frameMirrorToJSCallFrame = function(frameMirror)
+{
+ // Stuff that can not be initialized lazily (i.e. valid while paused with a valid break_id).
+ // The frameMirror and scopeMirror can be accessed only while paused on the debugger.
+ var frameDetails = frameMirror.details();
+
+ var funcObject = frameDetails.func();
+ var sourcePosition = frameDetails.sourcePosition();
+ var thisObject = frameDetails.receiver();
+
+ var isAtReturn = !!frameDetails.isAtReturn();
+ var returnValue = isAtReturn ? frameDetails.returnValue() : undefined;
+
+ var scopeMirrors = frameMirror.allScopes(false);
+ /** @type {!Array<number>} */
+ var scopeTypes = new Array(scopeMirrors.length);
+ /** @type {?Array<!Object>} */
+ var scopeObjects = new Array(scopeMirrors.length);
+ /** @type {!Array<string|undefined>} */
+ var scopeNames = new Array(scopeMirrors.length);
+ /** @type {?Array<number>} */
+ var scopeStartPositions = new Array(scopeMirrors.length);
+ /** @type {?Array<number>} */
+ var scopeEndPositions = new Array(scopeMirrors.length);
+ /** @type {?Array<function()|null>} */
+ var scopeFunctions = new Array(scopeMirrors.length);
+ for (var i = 0; i < scopeMirrors.length; ++i) {
+ var scopeDetails = scopeMirrors[i].details();
+ scopeTypes[i] = scopeDetails.type();
+ scopeObjects[i] = scopeDetails.object();
+ scopeNames[i] = scopeDetails.name();
+ scopeStartPositions[i] = scopeDetails.startPosition ? scopeDetails.startPosition() : 0;
+ scopeEndPositions[i] = scopeDetails.endPosition ? scopeDetails.endPosition() : 0;
+ scopeFunctions[i] = scopeDetails.func ? scopeDetails.func() : null;
+ }
+
+ // Calculated lazily.
+ var scopeChain;
+ var funcMirror;
+ var location;
+ /** @type {!Array<?RawLocation>} */
+ var scopeStartLocations;
+ /** @type {!Array<?RawLocation>} */
+ var scopeEndLocations;
+ var details;
+
+ /**
+ * @param {!ScriptMirror|undefined} script
+ * @param {number} pos
+ * @return {?RawLocation}
+ */
+ function createLocation(script, pos)
+ {
+ if (!script)
+ return null;
+
+ var location = script.locationFromPosition(pos, true);
+ return {
+ "lineNumber": location.line,
+ "columnNumber": location.column,
+ "scriptId": String(script.id())
+ }
+ }
+
+ /**
+ * @return {!Array<!Object>}
+ */
+ function ensureScopeChain()
+ {
+ if (!scopeChain) {
+ scopeChain = [];
+ scopeStartLocations = [];
+ scopeEndLocations = [];
+ for (var i = 0, j = 0; i < scopeObjects.length; ++i) {
+ var scopeObject = DebuggerScript._buildScopeObject(scopeTypes[i], scopeObjects[i]);
+ if (scopeObject) {
+ scopeTypes[j] = scopeTypes[i];
+ scopeNames[j] = scopeNames[i];
+ scopeChain[j] = scopeObject;
+
+ var funcMirror = scopeFunctions ? MakeMirror(scopeFunctions[i]) : null;
+ if (!funcMirror || !funcMirror.isFunction())
+ funcMirror = new UnresolvedFunctionMirror(funcObject);
+
+ var script = /** @type {!FunctionMirror} */(funcMirror).script();
+ scopeStartLocations[j] = createLocation(script, scopeStartPositions[i]);
+ scopeEndLocations[j] = createLocation(script, scopeEndPositions[i]);
+ ++j;
+ }
+ }
+ scopeTypes.length = scopeChain.length;
+ scopeNames.length = scopeChain.length;
+ scopeObjects = null; // Free for GC.
+ scopeFunctions = null;
+ scopeStartPositions = null;
+ scopeEndPositions = null;
+ }
+ return scopeChain;
+ }
+
+ /**
+ * @return {!JavaScriptCallFrameDetails}
+ */
+ function lazyDetails()
+ {
+ if (!details) {
+ var scopeObjects = ensureScopeChain();
+ var script = ensureFuncMirror().script();
+ /** @type {!Array<Scope>} */
+ var scopes = [];
+ for (var i = 0; i < scopeObjects.length; ++i) {
+ var scope = {
+ "type": /** @type {string} */(DebuggerScript._scopeTypeNames.get(scopeTypes[i])),
+ "object": scopeObjects[i],
+ };
+ if (scopeNames[i])
+ scope.name = scopeNames[i];
+ if (scopeStartLocations[i])
+ scope.startLocation = /** @type {!RawLocation} */(scopeStartLocations[i]);
+ if (scopeEndLocations[i])
+ scope.endLocation = /** @type {!RawLocation} */(scopeEndLocations[i]);
+ scopes.push(scope);
+ }
+ details = {
+ "functionName": ensureFuncMirror().debugName(),
+ "location": {
+ "lineNumber": line(),
+ "columnNumber": column(),
+ "scriptId": String(script.id())
+ },
+ "this": thisObject,
+ "scopeChain": scopes
+ };
+ var functionLocation = ensureFuncMirror().sourceLocation();
+ if (functionLocation) {
+ details.functionLocation = {
+ "lineNumber": functionLocation.line,
+ "columnNumber": functionLocation.column,
+ "scriptId": String(script.id())
+ };
+ }
+ if (isAtReturn)
+ details.returnValue = returnValue;
+ }
+ return details;
+ }
+
+ /**
+ * @return {!FunctionMirror}
+ */
+ function ensureFuncMirror()
+ {
+ if (!funcMirror) {
+ funcMirror = MakeMirror(funcObject);
+ if (!funcMirror.isFunction())
+ funcMirror = new UnresolvedFunctionMirror(funcObject);
+ }
+ return /** @type {!FunctionMirror} */(funcMirror);
+ }
+
+ /**
+ * @return {!{line: number, column: number}}
+ */
+ function ensureLocation()
+ {
+ if (!location) {
+ var script = ensureFuncMirror().script();
+ if (script)
+ location = script.locationFromPosition(sourcePosition, true);
+ if (!location)
+ location = { line: 0, column: 0 };
+ }
+ return location;
+ }
+
+ /**
+ * @return {number}
+ */
+ function line()
+ {
+ return ensureLocation().line;
+ }
+
+ /**
+ * @return {number}
+ */
+ function column()
+ {
+ return ensureLocation().column;
+ }
+
+ /**
+ * @return {number}
+ */
+ function contextId()
+ {
+ var mirror = ensureFuncMirror();
+ // Old V8 do not have context() function on these objects
+ if (!mirror.context)
+ return DebuggerScript._executionContextId(mirror.script().value().context_data);
+ var context = mirror.context();
+ if (context)
+ return DebuggerScript._executionContextId(context.data());
+ return 0;
+ }
+
+ /**
+ * @return {number|undefined}
+ */
+ function sourceID()
+ {
+ var script = ensureFuncMirror().script();
+ return script && script.id();
+ }
+
+ /**
+ * @param {string} expression
+ * @return {*}
+ */
+ function evaluate(expression)
+ {
+ return frameMirror.evaluate(expression, false).value();
+ }
+
+ /** @return {undefined} */
+ function restart()
+ {
+ return frameMirror.restart();
+ }
+
+ /**
+ * @param {number} scopeNumber
+ * @param {string} variableName
+ * @param {*} newValue
+ */
+ function setVariableValue(scopeNumber, variableName, newValue)
+ {
+ var scopeMirror = frameMirror.scope(scopeNumber);
+ if (!scopeMirror)
+ throw new Error("Incorrect scope index");
+ scopeMirror.setVariableValue(variableName, newValue);
+ }
+
+ return {
+ "sourceID": sourceID,
+ "line": line,
+ "column": column,
+ "contextId": contextId,
+ "thisObject": thisObject,
+ "evaluate": evaluate,
+ "restart": restart,
+ "setVariableValue": setVariableValue,
+ "isAtReturn": isAtReturn,
+ "details": lazyDetails
+ };
+}
+
+/**
+ * @param {number} scopeType
+ * @param {!Object} scopeObject
+ * @return {!Object|undefined}
+ */
+DebuggerScript._buildScopeObject = function(scopeType, scopeObject)
+{
+ var result;
+ switch (scopeType) {
+ case ScopeType.Local:
+ case ScopeType.Closure:
+ case ScopeType.Catch:
+ case ScopeType.Block:
+ case ScopeType.Script:
+ // For transient objects we create a "persistent" copy that contains
+ // the same properties.
+ // Reset scope object prototype to null so that the proto properties
+ // don't appear in the local scope section.
+ var properties = /** @type {!ObjectMirror} */(MakeMirror(scopeObject, true /* transient */)).properties();
+ // Almost always Script scope will be empty, so just filter out that noise.
+ // Also drop empty Block scopes, should we get any.
+ if (!properties.length && (scopeType === ScopeType.Script || scopeType === ScopeType.Block))
+ break;
+ result = { __proto__: null };
+ for (var j = 0; j < properties.length; j++) {
+ var name = properties[j].name();
+ if (name.length === 0 || name.charAt(0) === ".")
+ continue; // Skip internal variables like ".arguments" and variables with empty name
+ result[name] = properties[j].value_;
+ }
+ break;
+ case ScopeType.Global:
+ case ScopeType.With:
+ result = scopeObject;
+ break;
+ }
+ return result;
+}
+
+// We never resolve Mirror by its handle so to avoid memory leaks caused by Mirrors in the cache we disable it.
+ToggleMirrorCache(false);
+
+return DebuggerScript;
+})();
diff --git a/deps/v8/src/inspector/debugger_script_externs.js b/deps/v8/src/inspector/debugger_script_externs.js
new file mode 100644
index 0000000000..c7df61f3f4
--- /dev/null
+++ b/deps/v8/src/inspector/debugger_script_externs.js
@@ -0,0 +1,522 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/** @typedef {{
+ type: string,
+ object: !Object,
+ name: (string|undefined),
+ startLocation: (!RawLocation|undefined),
+ endLocation: (!RawLocation|undefined)
+ }} */
+var Scope;
+
+/** @typedef {{
+ scriptId: string,
+ lineNumber: number,
+ columnNumber: number
+ }} */
+var RawLocation;
+
+/** @typedef {{
+ id: number,
+ name: string,
+ sourceURL: (string|undefined),
+ sourceMappingURL: (string|undefined),
+ source: string,
+ startLine: number,
+ endLine: number,
+ startColumn: number,
+ endColumn: number,
+ executionContextId: number,
+ executionContextAuxData: string
+ }} */
+var FormattedScript;
+
+/** @typedef {{
+ functionName: string,
+ location: !RawLocation,
+ this: !Object,
+ scopeChain: !Array<!Scope>,
+ functionLocation: (RawLocation|undefined),
+ returnValue: (*|undefined)
+ }} */
+var JavaScriptCallFrameDetails;
+
+/** @typedef {{
+ sourceID: function():(number|undefined),
+ line: function():number,
+ column: function():number,
+ thisObject: !Object,
+ evaluate: function(string):*,
+ restart: function():undefined,
+ setVariableValue: function(number, string, *):undefined,
+ isAtReturn: boolean,
+ details: function():!JavaScriptCallFrameDetails
+ }} */
+var JavaScriptCallFrame;
+
+/**
+ * @const
+ */
+var Debug = {};
+
+Debug.setBreakOnException = function() {}
+
+Debug.clearBreakOnException = function() {}
+
+Debug.setBreakOnUncaughtException = function() {}
+
+/**
+ * @return {undefined}
+ */
+Debug.clearBreakOnUncaughtException = function() {}
+
+Debug.clearStepping = function() {}
+
+Debug.clearAllBreakPoints = function() {}
+
+/** @return {!Array<!Script>} */
+Debug.scripts = function() {}
+
+/**
+ * @param {number} scriptId
+ * @param {number=} line
+ * @param {number=} column
+ * @param {string=} condition
+ * @param {string=} groupId
+ * @param {Debug.BreakPositionAlignment=} positionAlignment
+ */
+Debug.setScriptBreakPointById = function(scriptId, line, column, condition, groupId, positionAlignment) {}
+
+/**
+ * @param {number} breakId
+ * @return {!Array<!SourceLocation>}
+ */
+Debug.findBreakPointActualLocations = function(breakId) {}
+
+/**
+ * @param {number} breakId
+ * @param {boolean} remove
+ * @return {!BreakPoint|undefined}
+ */
+Debug.findBreakPoint = function(breakId, remove) {}
+
+/** @return {!DebuggerFlags} */
+Debug.debuggerFlags = function() {}
+
+
+/** @enum */
+const BreakPositionAlignment = {
+ Statement: 0,
+ BreakPosition: 1
+};
+Debug.BreakPositionAlignment = BreakPositionAlignment;
+
+/** @enum */
+Debug.StepAction = { StepOut: 0,
+ StepNext: 1,
+ StepIn: 2,
+ StepFrame: 3 };
+
+/** @enum */
+const ScriptCompilationType = { Host: 0,
+ Eval: 1,
+ JSON: 2 };
+Debug.ScriptCompilationType = ScriptCompilationType;
+
+
+/** @interface */
+function DebuggerFlag() {}
+
+/** @param {boolean} value */
+DebuggerFlag.prototype.setValue = function(value) {}
+
+
+/** @typedef {{
+ * breakPointsActive: !DebuggerFlag
+ * }}
+ */
+var DebuggerFlags;
+
+/** @const */
+var LiveEdit = {}
+
+/**
+ * @param {!Script} script
+ * @param {string} newSource
+ * @param {boolean} previewOnly
+ * @return {!{stack_modified: (boolean|undefined)}}
+ */
+LiveEdit.SetScriptSource = function(script, newSource, previewOnly, change_log) {}
+
+/** @constructor */
+function Failure() {}
+LiveEdit.Failure = Failure;
+
+Debug.LiveEdit = LiveEdit;
+
+/** @typedef {{
+ * type: string,
+ * syntaxErrorMessage: string,
+ * position: !{start: !{line: number, column: number}},
+ * }}
+ */
+var LiveEditErrorDetails;
+
+/** @typedef {{
+ * breakpointId: number,
+ * sourceID: number,
+ * lineNumber: (number|undefined),
+ * columnNumber: (number|undefined),
+ * condition: (string|undefined),
+ * interstatementLocation: (boolean|undefined),
+ * }}
+ */
+var BreakpointInfo;
+
+
+/** @interface */
+function BreakPoint() {}
+
+/** @return {!BreakPoint|undefined} */
+BreakPoint.prototype.script_break_point = function() {}
+
+/** @return {number} */
+BreakPoint.prototype.number = function() {}
+
+
+/** @interface */
+function CompileEvent() {}
+
+/** @return {!ScriptMirror} */
+CompileEvent.prototype.script = function() {}
+
+
+/** @interface */
+function BreakEvent() {}
+
+/** @return {!Array<!BreakPoint>|undefined} */
+BreakEvent.prototype.breakPointsHit = function() {}
+
+
+/** @interface */
+function ExecutionState() {}
+
+/** @param {!Debug.StepAction} action */
+ExecutionState.prototype.prepareStep = function(action) {}
+
+/**
+ * @param {string} source
+ * @param {boolean} disableBreak
+ * @param {*=} additionalContext
+ */
+ExecutionState.prototype.evaluateGlobal = function(source, disableBreak, additionalContext) {}
+
+/** @return {number} */
+ExecutionState.prototype.frameCount = function() {}
+
+/**
+ * @param {number} index
+ * @return {!FrameMirror}
+ */
+ExecutionState.prototype.frame = function(index) {}
+
+/** @param {number} index */
+ExecutionState.prototype.setSelectedFrame = function(index) {}
+
+/** @return {number} */
+ExecutionState.prototype.selectedFrame = function() {}
+
+
+/** @enum */
+var ScopeType = { Global: 0,
+ Local: 1,
+ With: 2,
+ Closure: 3,
+ Catch: 4,
+ Block: 5,
+ Script: 6 };
+
+
+/** @typedef {{
+ * script: number,
+ * position: number,
+ * line: number,
+ * column:number,
+ * start: number,
+ * end: number,
+ * }}
+ */
+var SourceLocation;
+
+/** @typedef{{
+ * id: number,
+ * context_data: (string|undefined),
+ * source_url: (string|undefined),
+ * source_mapping_url: (string|undefined),
+ * is_debugger_script: boolean,
+ * source: string,
+ * line_ends: !Array<number>,
+ * line_offset: number,
+ * column_offset: number,
+ * nameOrSourceURL: function():string,
+ * compilationType: function():!ScriptCompilationType,
+ * }}
+ */
+var Script;
+
+/** @interface */
+function ScopeDetails() {}
+
+/** @return {!Object} */
+ScopeDetails.prototype.object = function() {}
+
+/** @return {string|undefined} */
+ScopeDetails.prototype.name = function() {}
+
+/** @return {number} */
+ScopeDetails.prototype.type = function() {}
+
+
+/** @interface */
+function FrameDetails() {}
+
+/** @return {!Object} */
+FrameDetails.prototype.receiver = function() {}
+
+/** @return {function()} */
+FrameDetails.prototype.func = function() {}
+
+/** @return {boolean} */
+FrameDetails.prototype.isAtReturn = function() {}
+
+/** @return {number} */
+FrameDetails.prototype.sourcePosition = function() {}
+
+/** @return {*} */
+FrameDetails.prototype.returnValue = function() {}
+
+/** @return {number} */
+FrameDetails.prototype.scopeCount = function() {}
+
+
+/** @param {boolean} value */
+function ToggleMirrorCache(value) {}
+
+/**
+ * @param {*} value
+ * @param {boolean=} transient
+ * @return {!Mirror}
+ */
+function MakeMirror(value, transient) {}
+
+
+/** @interface */
+function Mirror() {}
+
+/** @return {boolean} */
+Mirror.prototype.isFunction = function() {}
+
+/** @return {boolean} */
+Mirror.prototype.isGenerator = function() {}
+
+/** @return {boolean} */
+Mirror.prototype.isMap = function() {}
+
+/** @return {boolean} */
+Mirror.prototype.isSet = function() {}
+
+/** @return {boolean} */
+Mirror.prototype.isIterator = function() {}
+
+
+/**
+ * @interface
+ * @extends {Mirror}
+ */
+function ObjectMirror() {}
+
+/** @return {!Array<!PropertyMirror>} */
+ObjectMirror.prototype.properties = function() {}
+
+
+/**
+ * @interface
+ * @extends {ObjectMirror}
+ */
+function FunctionMirror () {}
+
+/** @return {number} */
+FunctionMirror.prototype.scopeCount = function() {}
+
+/**
+ * @param {number} index
+ * @return {!ScopeMirror|undefined}
+ */
+FunctionMirror.prototype.scope = function(index) {}
+
+/** @return {boolean} */
+FunctionMirror.prototype.resolved = function() {}
+
+/** @return {function()} */
+FunctionMirror.prototype.value = function() {}
+
+/** @return {string} */
+FunctionMirror.prototype.debugName = function() {}
+
+/** @return {!ScriptMirror|undefined} */
+FunctionMirror.prototype.script = function() {}
+
+/** @return {!SourceLocation|undefined} */
+FunctionMirror.prototype.sourceLocation = function() {}
+
+/** @return {!ContextMirror|undefined} */
+FunctionMirror.prototype.context = function() {}
+
+/**
+ * @constructor
+ * @param {*} value
+ */
+function UnresolvedFunctionMirror(value) {}
+
+
+/**
+ * @interface
+ * @extends {ObjectMirror}
+ */
+function MapMirror () {}
+
+/**
+ * @param {number=} limit
+ * @return {!Array<!{key: *, value: *}>}
+ */
+MapMirror.prototype.entries = function(limit) {}
+
+
+/**
+ * @interface
+ * @extends {ObjectMirror}
+ */
+function SetMirror () {}
+
+/**
+ * @param {number=} limit
+ * @return {!Array<*>}
+ */
+SetMirror.prototype.values = function(limit) {}
+
+
+/**
+ * @interface
+ * @extends {ObjectMirror}
+ */
+function IteratorMirror () {}
+
+/**
+ * @param {number=} limit
+ * @return {!Array<*>}
+ */
+IteratorMirror.prototype.preview = function(limit) {}
+
+
+/**
+ * @interface
+ * @extends {ObjectMirror}
+ */
+function GeneratorMirror () {}
+
+/** @return {string} */
+GeneratorMirror.prototype.status = function() {}
+
+/** @return {!SourceLocation|undefined} */
+GeneratorMirror.prototype.sourceLocation = function() {}
+
+/** @return {!FunctionMirror} */
+GeneratorMirror.prototype.func = function() {}
+
+
+/**
+ * @interface
+ * @extends {Mirror}
+ */
+function PropertyMirror() {}
+
+/** @return {!Mirror} */
+PropertyMirror.prototype.value = function() {}
+
+/** @return {string} */
+PropertyMirror.prototype.name = function() {}
+
+/** @type {*} */
+PropertyMirror.prototype.value_;
+
+/**
+ * @interface
+ * @extends {Mirror}
+ */
+function FrameMirror() {}
+
+/**
+ * @param {boolean=} ignoreNestedScopes
+ * @return {!Array<!ScopeMirror>}
+ */
+FrameMirror.prototype.allScopes = function(ignoreNestedScopes) {}
+
+/** @return {!FrameDetails} */
+FrameMirror.prototype.details = function() {}
+
+/**
+ * @param {string} source
+ * @param {boolean} disableBreak
+ */
+FrameMirror.prototype.evaluate = function(source, disableBreak) {}
+
+FrameMirror.prototype.restart = function() {}
+
+/** @param {number} index */
+FrameMirror.prototype.scope = function(index) {}
+
+
+/**
+ * @interface
+ * @extends {Mirror}
+ */
+function ScriptMirror() {}
+
+/** @return {!Script} */
+ScriptMirror.prototype.value = function() {}
+
+/** @return {number} */
+ScriptMirror.prototype.id = function() {}
+
+/**
+ * @param {number} position
+ * @param {boolean=} includeResourceOffset
+ */
+ScriptMirror.prototype.locationFromPosition = function(position, includeResourceOffset) {}
+
+
+/**
+ * @interface
+ * @extends {Mirror}
+ */
+function ScopeMirror() {}
+
+/** @return {!ScopeDetails} */
+ScopeMirror.prototype.details = function() {}
+
+/**
+ * @param {string} name
+ * @param {*} newValue
+ */
+ScopeMirror.prototype.setVariableValue = function(name, newValue) {}
+
+/**
+ * @interface
+ * @extends {Mirror}
+ */
+function ContextMirror() {}
+
+/** @return {string|undefined} */
+ContextMirror.prototype.data = function() {}
diff --git a/deps/v8/src/inspector/injected-script-native.cc b/deps/v8/src/inspector/injected-script-native.cc
new file mode 100644
index 0000000000..fcf2ead94b
--- /dev/null
+++ b/deps/v8/src/inspector/injected-script-native.cc
@@ -0,0 +1,89 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/injected-script-native.h"
+
+namespace v8_inspector {
+
+InjectedScriptNative::InjectedScriptNative(v8::Isolate* isolate)
+ : m_lastBoundObjectId(1), m_isolate(isolate) {}
+
+static const char privateKeyName[] = "v8-inspector#injectedScript";
+
+InjectedScriptNative::~InjectedScriptNative() {}
+
+void InjectedScriptNative::setOnInjectedScriptHost(
+ v8::Local<v8::Object> injectedScriptHost) {
+ v8::HandleScope handleScope(m_isolate);
+ v8::Local<v8::External> external = v8::External::New(m_isolate, this);
+ v8::Local<v8::Private> privateKey = v8::Private::ForApi(
+ m_isolate, v8::String::NewFromUtf8(m_isolate, privateKeyName,
+ v8::NewStringType::kInternalized)
+ .ToLocalChecked());
+ injectedScriptHost->SetPrivate(m_isolate->GetCurrentContext(), privateKey,
+ external);
+}
+
+InjectedScriptNative* InjectedScriptNative::fromInjectedScriptHost(
+ v8::Isolate* isolate, v8::Local<v8::Object> injectedScriptObject) {
+ v8::HandleScope handleScope(isolate);
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8::Local<v8::Private> privateKey = v8::Private::ForApi(
+ isolate, v8::String::NewFromUtf8(isolate, privateKeyName,
+ v8::NewStringType::kInternalized)
+ .ToLocalChecked());
+ v8::Local<v8::Value> value =
+ injectedScriptObject->GetPrivate(context, privateKey).ToLocalChecked();
+ DCHECK(value->IsExternal());
+ v8::Local<v8::External> external = value.As<v8::External>();
+ return static_cast<InjectedScriptNative*>(external->Value());
+}
+
+int InjectedScriptNative::bind(v8::Local<v8::Value> value,
+ const String16& groupName) {
+ if (m_lastBoundObjectId <= 0) m_lastBoundObjectId = 1;
+ int id = m_lastBoundObjectId++;
+ m_idToWrappedObject[id] =
+ wrapUnique(new v8::Global<v8::Value>(m_isolate, value));
+ addObjectToGroup(id, groupName);
+ return id;
+}
+
+void InjectedScriptNative::unbind(int id) {
+ m_idToWrappedObject.erase(id);
+ m_idToObjectGroupName.erase(id);
+}
+
+v8::Local<v8::Value> InjectedScriptNative::objectForId(int id) {
+ auto iter = m_idToWrappedObject.find(id);
+ return iter != m_idToWrappedObject.end() ? iter->second->Get(m_isolate)
+ : v8::Local<v8::Value>();
+}
+
+void InjectedScriptNative::addObjectToGroup(int objectId,
+ const String16& groupName) {
+ if (groupName.isEmpty()) return;
+ if (objectId <= 0) return;
+ m_idToObjectGroupName[objectId] = groupName;
+ m_nameToObjectGroup[groupName].push_back(
+ objectId); // Creates an empty vector if key is not there
+}
+
+void InjectedScriptNative::releaseObjectGroup(const String16& groupName) {
+ if (groupName.isEmpty()) return;
+ NameToObjectGroup::iterator groupIt = m_nameToObjectGroup.find(groupName);
+ if (groupIt == m_nameToObjectGroup.end()) return;
+ for (int id : groupIt->second) unbind(id);
+ m_nameToObjectGroup.erase(groupIt);
+}
+
+String16 InjectedScriptNative::groupName(int objectId) const {
+ if (objectId <= 0) return String16();
+ IdToObjectGroupName::const_iterator iterator =
+ m_idToObjectGroupName.find(objectId);
+ return iterator != m_idToObjectGroupName.end() ? iterator->second
+ : String16();
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/injected-script-native.h b/deps/v8/src/inspector/injected-script-native.h
new file mode 100644
index 0000000000..3bdf24709d
--- /dev/null
+++ b/deps/v8/src/inspector/injected-script-native.h
@@ -0,0 +1,47 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_INJECTEDSCRIPTNATIVE_H_
+#define V8_INSPECTOR_INJECTEDSCRIPTNATIVE_H_
+
+#include <vector>
+
+#include "src/inspector/protocol/Protocol.h"
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+class InjectedScriptNative final {
+ public:
+ explicit InjectedScriptNative(v8::Isolate*);
+ ~InjectedScriptNative();
+
+ void setOnInjectedScriptHost(v8::Local<v8::Object>);
+ static InjectedScriptNative* fromInjectedScriptHost(v8::Isolate* isolate,
+ v8::Local<v8::Object>);
+
+ int bind(v8::Local<v8::Value>, const String16& groupName);
+ void unbind(int id);
+ v8::Local<v8::Value> objectForId(int id);
+
+ void releaseObjectGroup(const String16& groupName);
+ String16 groupName(int objectId) const;
+
+ private:
+ void addObjectToGroup(int objectId, const String16& groupName);
+
+ int m_lastBoundObjectId;
+ v8::Isolate* m_isolate;
+ protocol::HashMap<int, std::unique_ptr<v8::Global<v8::Value>>>
+ m_idToWrappedObject;
+ typedef protocol::HashMap<int, String16> IdToObjectGroupName;
+ IdToObjectGroupName m_idToObjectGroupName;
+ typedef protocol::HashMap<String16, std::vector<int>> NameToObjectGroup;
+ NameToObjectGroup m_nameToObjectGroup;
+};
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_INJECTEDSCRIPTNATIVE_H_
diff --git a/deps/v8/src/inspector/injected-script-source.js b/deps/v8/src/inspector/injected-script-source.js
new file mode 100644
index 0000000000..39c6c9c1e8
--- /dev/null
+++ b/deps/v8/src/inspector/injected-script-source.js
@@ -0,0 +1,1076 @@
+/*
+ * Copyright (C) 2007 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+"use strict";
+
+/**
+ * @param {!InjectedScriptHostClass} InjectedScriptHost
+ * @param {!Window|!WorkerGlobalScope} inspectedGlobalObject
+ * @param {number} injectedScriptId
+ * @suppress {uselessCode}
+ */
+(function (InjectedScriptHost, inspectedGlobalObject, injectedScriptId) {
+
+/**
+ * Protect against Object overwritten by the user code.
+ * @suppress {duplicate}
+ */
+var Object = /** @type {function(new:Object, *=)} */ ({}.constructor);
+
+/**
+ * @param {!Array.<T>} array
+ * @param {...} var_args
+ * @template T
+ */
+function push(array, var_args)
+{
+ for (var i = 1; i < arguments.length; ++i)
+ array[array.length] = arguments[i];
+}
+
+/**
+ * @param {*} obj
+ * @return {string}
+ * @suppress {uselessCode}
+ */
+function toString(obj)
+{
+ // We don't use String(obj) because String could be overridden.
+ // Also the ("" + obj) expression may throw.
+ try {
+ return "" + obj;
+ } catch (e) {
+ var name = InjectedScriptHost.internalConstructorName(obj) || InjectedScriptHost.subtype(obj) || (typeof obj);
+ return "#<" + name + ">";
+ }
+}
+
+/**
+ * @param {*} obj
+ * @return {string}
+ */
+function toStringDescription(obj)
+{
+ if (typeof obj === "number" && obj === 0 && 1 / obj < 0)
+ return "-0"; // Negative zero.
+ return toString(obj);
+}
+
+/**
+ * @param {T} obj
+ * @return {T}
+ * @template T
+ */
+function nullifyObjectProto(obj)
+{
+ if (obj && typeof obj === "object")
+ obj.__proto__ = null;
+ return obj;
+}
+
+/**
+ * @param {number|string} obj
+ * @return {boolean}
+ */
+function isUInt32(obj)
+{
+ if (typeof obj === "number")
+ return obj >>> 0 === obj && (obj > 0 || 1 / obj > 0);
+ return "" + (obj >>> 0) === obj;
+}
+
+/**
+ * FireBug's array detection.
+ * @param {*} obj
+ * @return {boolean}
+ */
+function isArrayLike(obj)
+{
+ if (typeof obj !== "object")
+ return false;
+ try {
+ if (typeof obj.splice === "function") {
+ if (!InjectedScriptHost.objectHasOwnProperty(/** @type {!Object} */ (obj), "length"))
+ return false;
+ var len = obj.length;
+ return typeof len === "number" && isUInt32(len);
+ }
+ } catch (e) {
+ }
+ return false;
+}
+
+/**
+ * @param {number} a
+ * @param {number} b
+ * @return {number}
+ */
+function max(a, b)
+{
+ return a > b ? a : b;
+}
+
+/**
+ * FIXME: Remove once ES6 is supported natively by JS compiler.
+ * @param {*} obj
+ * @return {boolean}
+ */
+function isSymbol(obj)
+{
+ var type = typeof obj;
+ return (type === "symbol");
+}
+
+/**
+ * DOM Attributes which have observable side effect on getter, in the form of
+ * {interfaceName1: {attributeName1: true,
+ * attributeName2: true,
+ * ...},
+ * interfaceName2: {...},
+ * ...}
+ * @type {!Object<string, !Object<string, boolean>>}
+ * @const
+ */
+var domAttributesWithObservableSideEffectOnGet = nullifyObjectProto({});
+domAttributesWithObservableSideEffectOnGet["Request"] = nullifyObjectProto({});
+domAttributesWithObservableSideEffectOnGet["Request"]["body"] = true;
+domAttributesWithObservableSideEffectOnGet["Response"] = nullifyObjectProto({});
+domAttributesWithObservableSideEffectOnGet["Response"]["body"] = true;
+
+/**
+ * @param {!Object} object
+ * @param {string} attribute
+ * @return {boolean}
+ */
+function doesAttributeHaveObservableSideEffectOnGet(object, attribute)
+{
+ for (var interfaceName in domAttributesWithObservableSideEffectOnGet) {
+ var interfaceFunction = inspectedGlobalObject[interfaceName];
+ // Call to instanceOf looks safe after typeof check.
+ var isInstance = typeof interfaceFunction === "function" && /* suppressBlacklist */ object instanceof interfaceFunction;
+ if (isInstance)
+ return attribute in domAttributesWithObservableSideEffectOnGet[interfaceName];
+ }
+ return false;
+}
+
+/**
+ * @constructor
+ */
+var InjectedScript = function()
+{
+}
+
+/**
+ * @type {!Object.<string, boolean>}
+ * @const
+ */
+InjectedScript.primitiveTypes = {
+ "undefined": true,
+ "boolean": true,
+ "number": true,
+ "string": true,
+ __proto__: null
+}
+
+/**
+ * @type {!Object<string, string>}
+ * @const
+ */
+InjectedScript.closureTypes = { __proto__: null };
+InjectedScript.closureTypes["local"] = "Local";
+InjectedScript.closureTypes["closure"] = "Closure";
+InjectedScript.closureTypes["catch"] = "Catch";
+InjectedScript.closureTypes["block"] = "Block";
+InjectedScript.closureTypes["script"] = "Script";
+InjectedScript.closureTypes["with"] = "With Block";
+InjectedScript.closureTypes["global"] = "Global";
+
+InjectedScript.prototype = {
+ /**
+ * @param {*} object
+ * @return {boolean}
+ */
+ isPrimitiveValue: function(object)
+ {
+ // FIXME(33716): typeof document.all is always 'undefined'.
+ return InjectedScript.primitiveTypes[typeof object] && !this._isHTMLAllCollection(object);
+ },
+
+ /**
+ * @param {*} object
+ * @return {boolean}
+ */
+ _shouldPassByValue: function(object)
+ {
+ return typeof object === "object" && InjectedScriptHost.subtype(object) === "internal#location";
+ },
+
+ /**
+ * @param {*} object
+ * @param {string} groupName
+ * @param {boolean} forceValueType
+ * @param {boolean} generatePreview
+ * @return {!RuntimeAgent.RemoteObject}
+ */
+ wrapObject: function(object, groupName, forceValueType, generatePreview)
+ {
+ return this._wrapObject(object, groupName, forceValueType, generatePreview);
+ },
+
+ /**
+ * @param {!Array<!Object>} array
+ * @param {string} property
+ * @param {string} groupName
+ * @param {boolean} forceValueType
+ * @param {boolean} generatePreview
+ */
+ wrapPropertyInArray: function(array, property, groupName, forceValueType, generatePreview)
+ {
+ for (var i = 0; i < array.length; ++i) {
+ if (typeof array[i] === "object" && property in array[i])
+ array[i][property] = this.wrapObject(array[i][property], groupName, forceValueType, generatePreview);
+ }
+ },
+
+ /**
+ * @param {!Array<*>} array
+ * @param {string} groupName
+ * @param {boolean} forceValueType
+ * @param {boolean} generatePreview
+ */
+ wrapObjectsInArray: function(array, groupName, forceValueType, generatePreview)
+ {
+ for (var i = 0; i < array.length; ++i)
+ array[i] = this.wrapObject(array[i], groupName, forceValueType, generatePreview);
+ },
+
+ /**
+ * @param {!Object} table
+ * @param {!Array.<string>|string|boolean} columns
+ * @return {!RuntimeAgent.RemoteObject}
+ */
+ wrapTable: function(table, columns)
+ {
+ var columnNames = null;
+ if (typeof columns === "string")
+ columns = [columns];
+ if (InjectedScriptHost.subtype(columns) === "array") {
+ columnNames = [];
+ for (var i = 0; i < columns.length; ++i)
+ columnNames[i] = toString(columns[i]);
+ }
+ return this._wrapObject(table, "console", false, true, columnNames, true);
+ },
+
+ /**
+ * This method cannot throw.
+ * @param {*} object
+ * @param {string=} objectGroupName
+ * @param {boolean=} forceValueType
+ * @param {boolean=} generatePreview
+ * @param {?Array.<string>=} columnNames
+ * @param {boolean=} isTable
+ * @param {boolean=} doNotBind
+ * @param {*=} customObjectConfig
+ * @return {!RuntimeAgent.RemoteObject}
+ * @suppress {checkTypes}
+ */
+ _wrapObject: function(object, objectGroupName, forceValueType, generatePreview, columnNames, isTable, doNotBind, customObjectConfig)
+ {
+ try {
+ return new InjectedScript.RemoteObject(object, objectGroupName, doNotBind, forceValueType, generatePreview, columnNames, isTable, undefined, customObjectConfig);
+ } catch (e) {
+ try {
+ var description = injectedScript._describe(e);
+ } catch (ex) {
+ var description = "<failed to convert exception to string>";
+ }
+ return new InjectedScript.RemoteObject(description);
+ }
+ },
+
+ /**
+ * @param {!Object|symbol} object
+ * @param {string=} objectGroupName
+ * @return {string}
+ */
+ _bind: function(object, objectGroupName)
+ {
+ var id = InjectedScriptHost.bind(object, objectGroupName || "");
+ return "{\"injectedScriptId\":" + injectedScriptId + ",\"id\":" + id + "}";
+ },
+
+ /**
+ * @param {!Object} object
+ * @param {string} objectGroupName
+ * @param {boolean} ownProperties
+ * @param {boolean} accessorPropertiesOnly
+ * @param {boolean} generatePreview
+ * @return {!Array<!RuntimeAgent.PropertyDescriptor>|boolean}
+ */
+ getProperties: function(object, objectGroupName, ownProperties, accessorPropertiesOnly, generatePreview)
+ {
+ var subtype = this._subtype(object);
+ if (subtype === "internal#scope") {
+ // Internally, scope contains object with scope variables and additional information like type,
+ // we use additional information for preview and would like to report variables as scope
+ // properties.
+ object = object.object;
+ }
+
+ var descriptors = [];
+ var iter = this._propertyDescriptors(object, ownProperties, accessorPropertiesOnly, undefined);
+ // Go over properties, wrap object values.
+ for (var descriptor of iter) {
+ if (subtype === "internal#scopeList" && descriptor.name === "length")
+ continue;
+ if ("get" in descriptor)
+ descriptor.get = this._wrapObject(descriptor.get, objectGroupName);
+ if ("set" in descriptor)
+ descriptor.set = this._wrapObject(descriptor.set, objectGroupName);
+ if ("value" in descriptor)
+ descriptor.value = this._wrapObject(descriptor.value, objectGroupName, false, generatePreview);
+ if (!("configurable" in descriptor))
+ descriptor.configurable = false;
+ if (!("enumerable" in descriptor))
+ descriptor.enumerable = false;
+ if ("symbol" in descriptor)
+ descriptor.symbol = this._wrapObject(descriptor.symbol, objectGroupName);
+ push(descriptors, descriptor);
+ }
+ return descriptors;
+ },
+
+ /**
+ * @param {!Object} object
+ * @return {?Object}
+ */
+ _objectPrototype: function(object)
+ {
+ if (InjectedScriptHost.subtype(object) === "proxy")
+ return null;
+ try {
+ return Object.getPrototypeOf(object);
+ } catch (e) {
+ return null;
+ }
+ },
+
+ /**
+ * @param {!Object} object
+ * @param {boolean=} ownProperties
+ * @param {boolean=} accessorPropertiesOnly
+ * @param {?Array.<string>=} propertyNamesOnly
+ */
+ _propertyDescriptors: function*(object, ownProperties, accessorPropertiesOnly, propertyNamesOnly)
+ {
+ var propertyProcessed = { __proto__: null };
+
+ /**
+ * @param {?Object} o
+ * @param {!Iterable<string|symbol|number>|!Array<string|number|symbol>} properties
+ */
+ function* process(o, properties)
+ {
+ for (var property of properties) {
+ var name;
+ if (isSymbol(property))
+ name = /** @type {string} */ (injectedScript._describe(property));
+ else
+ name = typeof property === "number" ? ("" + property) : /** @type {string} */(property);
+
+ if (propertyProcessed[property])
+ continue;
+
+ try {
+ propertyProcessed[property] = true;
+ var descriptor = nullifyObjectProto(Object.getOwnPropertyDescriptor(o, property));
+ if (descriptor) {
+ if (accessorPropertiesOnly && !("get" in descriptor || "set" in descriptor))
+ continue;
+ if ("get" in descriptor && "set" in descriptor && name != "__proto__" && InjectedScriptHost.formatAccessorsAsProperties(object, descriptor.get) && !doesAttributeHaveObservableSideEffectOnGet(object, name)) {
+ descriptor.value = object[property];
+ descriptor.isOwn = true;
+ delete descriptor.get;
+ delete descriptor.set;
+ }
+ } else {
+ // Not all bindings provide proper descriptors. Fall back to the writable, configurable property.
+ if (accessorPropertiesOnly)
+ continue;
+ try {
+ descriptor = { name: name, value: o[property], writable: false, configurable: false, enumerable: false, __proto__: null };
+ if (o === object)
+ descriptor.isOwn = true;
+ yield descriptor;
+ } catch (e) {
+ // Silent catch.
+ }
+ continue;
+ }
+ } catch (e) {
+ if (accessorPropertiesOnly)
+ continue;
+ var descriptor = { __proto__: null };
+ descriptor.value = e;
+ descriptor.wasThrown = true;
+ }
+
+ descriptor.name = name;
+ if (o === object)
+ descriptor.isOwn = true;
+ if (isSymbol(property))
+ descriptor.symbol = property;
+ yield descriptor;
+ }
+ }
+
+ if (propertyNamesOnly) {
+ for (var i = 0; i < propertyNamesOnly.length; ++i) {
+ var name = propertyNamesOnly[i];
+ for (var o = object; this._isDefined(o); o = this._objectPrototype(o)) {
+ if (InjectedScriptHost.objectHasOwnProperty(o, name)) {
+ for (var descriptor of process(o, [name]))
+ yield descriptor;
+ break;
+ }
+ if (ownProperties)
+ break;
+ }
+ }
+ return;
+ }
+
+ /**
+ * @param {number} length
+ */
+ function* arrayIndexNames(length)
+ {
+ for (var i = 0; i < length; ++i)
+ yield "" + i;
+ }
+
+ var skipGetOwnPropertyNames;
+ try {
+ skipGetOwnPropertyNames = InjectedScriptHost.subtype(object) === "typedarray" && object.length > 500000;
+ } catch (e) {
+ }
+
+ for (var o = object; this._isDefined(o); o = this._objectPrototype(o)) {
+ if (InjectedScriptHost.subtype(o) === "proxy")
+ continue;
+ if (skipGetOwnPropertyNames && o === object) {
+ // Avoid OOM crashes from getting all own property names of a large TypedArray.
+ for (var descriptor of process(o, arrayIndexNames(o.length)))
+ yield descriptor;
+ } else {
+ // First call Object.keys() to enforce ordering of the property descriptors.
+ for (var descriptor of process(o, Object.keys(/** @type {!Object} */ (o))))
+ yield descriptor;
+ for (var descriptor of process(o, Object.getOwnPropertyNames(/** @type {!Object} */ (o))))
+ yield descriptor;
+ }
+ if (Object.getOwnPropertySymbols) {
+ for (var descriptor of process(o, Object.getOwnPropertySymbols(/** @type {!Object} */ (o))))
+ yield descriptor;
+ }
+ if (ownProperties) {
+ var proto = this._objectPrototype(o);
+ if (proto && !accessorPropertiesOnly)
+ yield { name: "__proto__", value: proto, writable: true, configurable: true, enumerable: false, isOwn: true, __proto__: null };
+ break;
+ }
+ }
+ },
+
+ /**
+ * @param {string|undefined} objectGroupName
+ * @param {*} jsonMLObject
+ * @throws {string} error message
+ */
+ _substituteObjectTagsInCustomPreview: function(objectGroupName, jsonMLObject)
+ {
+ var maxCustomPreviewRecursionDepth = 20;
+ this._customPreviewRecursionDepth = (this._customPreviewRecursionDepth || 0) + 1
+ try {
+ if (this._customPreviewRecursionDepth >= maxCustomPreviewRecursionDepth)
+ throw new Error("Too deep hierarchy of inlined custom previews");
+
+ if (!isArrayLike(jsonMLObject))
+ return;
+
+ if (jsonMLObject[0] === "object") {
+ var attributes = jsonMLObject[1];
+ var originObject = attributes["object"];
+ var config = attributes["config"];
+ if (typeof originObject === "undefined")
+ throw new Error("Illegal format: obligatory attribute \"object\" isn't specified");
+
+ jsonMLObject[1] = this._wrapObject(originObject, objectGroupName, false, false, null, false, false, config);
+ return;
+ }
+
+ for (var i = 0; i < jsonMLObject.length; ++i)
+ this._substituteObjectTagsInCustomPreview(objectGroupName, jsonMLObject[i]);
+ } finally {
+ this._customPreviewRecursionDepth--;
+ }
+ },
+
+ /**
+ * @param {*} object
+ * @return {boolean}
+ */
+ _isDefined: function(object)
+ {
+ return !!object || this._isHTMLAllCollection(object);
+ },
+
+ /**
+ * @param {*} object
+ * @return {boolean}
+ */
+ _isHTMLAllCollection: function(object)
+ {
+ // document.all is reported as undefined, but we still want to process it.
+ return (typeof object === "undefined") && !!InjectedScriptHost.subtype(object);
+ },
+
+ /**
+ * @param {*} obj
+ * @return {?string}
+ */
+ _subtype: function(obj)
+ {
+ if (obj === null)
+ return "null";
+
+ if (this.isPrimitiveValue(obj))
+ return null;
+
+ var subtype = InjectedScriptHost.subtype(obj);
+ if (subtype)
+ return subtype;
+
+ if (isArrayLike(obj))
+ return "array";
+
+ // If owning frame has navigated to somewhere else window properties will be undefined.
+ return null;
+ },
+
+ /**
+ * @param {*} obj
+ * @return {?string}
+ */
+ _describe: function(obj)
+ {
+ if (this.isPrimitiveValue(obj))
+ return null;
+
+ var subtype = this._subtype(obj);
+
+ if (subtype === "regexp")
+ return toString(obj);
+
+ if (subtype === "date")
+ return toString(obj);
+
+ if (subtype === "node") {
+ var description = "";
+ if (obj.nodeName)
+ description = obj.nodeName.toLowerCase();
+ else if (obj.constructor)
+ description = obj.constructor.name.toLowerCase();
+
+ switch (obj.nodeType) {
+ case 1 /* Node.ELEMENT_NODE */:
+ description += obj.id ? "#" + obj.id : "";
+ var className = obj.className;
+ description += (className && typeof className === "string") ? "." + className.trim().replace(/\s+/g, ".") : "";
+ break;
+ case 10 /*Node.DOCUMENT_TYPE_NODE */:
+ description = "<!DOCTYPE " + description + ">";
+ break;
+ }
+ return description;
+ }
+
+ if (subtype === "proxy")
+ return "Proxy";
+
+ var className = InjectedScriptHost.internalConstructorName(obj);
+ if (subtype === "array" || subtype === "typedarray") {
+ if (typeof obj.length === "number")
+ className += "[" + obj.length + "]";
+ return className;
+ }
+
+ if (typeof obj === "function")
+ return toString(obj);
+
+ if (isSymbol(obj)) {
+ try {
+ // It isn't safe, because Symbol.prototype.toString can be overriden.
+ return /* suppressBlacklist */ obj.toString() || "Symbol";
+ } catch (e) {
+ return "Symbol";
+ }
+ }
+
+ if (InjectedScriptHost.subtype(obj) === "error") {
+ try {
+ var stack = obj.stack;
+ var message = obj.message && obj.message.length ? ": " + obj.message : "";
+ var firstCallFrame = /^\s+at\s/m.exec(stack);
+ var stackMessageEnd = firstCallFrame ? firstCallFrame.index : -1;
+ if (stackMessageEnd !== -1) {
+ var stackTrace = stack.substr(stackMessageEnd);
+ return className + message + "\n" + stackTrace;
+ }
+ return className + message;
+ } catch(e) {
+ }
+ }
+
+ if (subtype === "internal#entry") {
+ if ("key" in obj)
+ return "{" + this._describeIncludingPrimitives(obj.key) + " => " + this._describeIncludingPrimitives(obj.value) + "}";
+ return this._describeIncludingPrimitives(obj.value);
+ }
+
+ if (subtype === "internal#scopeList")
+ return "Scopes[" + obj.length + "]";
+
+ if (subtype === "internal#scope")
+ return (InjectedScript.closureTypes[obj.type] || "Unknown") + (obj.name ? " (" + obj.name + ")" : "");
+
+ return className;
+ },
+
+ /**
+ * @param {*} value
+ * @return {string}
+ */
+ _describeIncludingPrimitives: function(value)
+ {
+ if (typeof value === "string")
+ return "\"" + value.replace(/\n/g, "\u21B5") + "\"";
+ if (value === null)
+ return "" + value;
+ return this.isPrimitiveValue(value) ? toStringDescription(value) : (this._describe(value) || "");
+ },
+
+ /**
+ * @param {boolean} enabled
+ */
+ setCustomObjectFormatterEnabled: function(enabled)
+ {
+ this._customObjectFormatterEnabled = enabled;
+ }
+}
+
+/**
+ * @type {!InjectedScript}
+ * @const
+ */
+var injectedScript = new InjectedScript();
+
+/**
+ * @constructor
+ * @param {*} object
+ * @param {string=} objectGroupName
+ * @param {boolean=} doNotBind
+ * @param {boolean=} forceValueType
+ * @param {boolean=} generatePreview
+ * @param {?Array.<string>=} columnNames
+ * @param {boolean=} isTable
+ * @param {boolean=} skipEntriesPreview
+ * @param {*=} customObjectConfig
+ */
+InjectedScript.RemoteObject = function(object, objectGroupName, doNotBind, forceValueType, generatePreview, columnNames, isTable, skipEntriesPreview, customObjectConfig)
+{
+ this.type = typeof object;
+ if (this.type === "undefined" && injectedScript._isHTMLAllCollection(object))
+ this.type = "object";
+
+ if (injectedScript.isPrimitiveValue(object) || object === null || forceValueType) {
+ // We don't send undefined values over JSON.
+ if (this.type !== "undefined")
+ this.value = object;
+
+ // Null object is object with 'null' subtype.
+ if (object === null)
+ this.subtype = "null";
+
+ // Provide user-friendly number values.
+ if (this.type === "number") {
+ this.description = toStringDescription(object);
+ switch (this.description) {
+ case "NaN":
+ case "Infinity":
+ case "-Infinity":
+ case "-0":
+ delete this.value;
+ this.unserializableValue = this.description;
+ break;
+ }
+ }
+
+ return;
+ }
+
+ if (injectedScript._shouldPassByValue(object)) {
+ this.value = object;
+ this.subtype = injectedScript._subtype(object);
+ this.description = injectedScript._describeIncludingPrimitives(object);
+ return;
+ }
+
+ object = /** @type {!Object} */ (object);
+
+ if (!doNotBind)
+ this.objectId = injectedScript._bind(object, objectGroupName);
+ var subtype = injectedScript._subtype(object);
+ if (subtype)
+ this.subtype = subtype;
+ var className = InjectedScriptHost.internalConstructorName(object);
+ if (className)
+ this.className = className;
+ this.description = injectedScript._describe(object);
+
+ if (generatePreview && this.type === "object") {
+ if (this.subtype === "proxy")
+ this.preview = this._generatePreview(InjectedScriptHost.proxyTargetValue(object), undefined, columnNames, isTable, skipEntriesPreview);
+ else if (this.subtype !== "node")
+ this.preview = this._generatePreview(object, undefined, columnNames, isTable, skipEntriesPreview);
+ }
+
+ if (injectedScript._customObjectFormatterEnabled) {
+ var customPreview = this._customPreview(object, objectGroupName, customObjectConfig);
+ if (customPreview)
+ this.customPreview = customPreview;
+ }
+}
+
+InjectedScript.RemoteObject.prototype = {
+
+ /**
+ * @param {*} object
+ * @param {string=} objectGroupName
+ * @param {*=} customObjectConfig
+ * @return {?RuntimeAgent.CustomPreview}
+ */
+ _customPreview: function(object, objectGroupName, customObjectConfig)
+ {
+ /**
+ * @param {!Error} error
+ */
+ function logError(error)
+ {
+ // We use user code to generate custom output for object, we can use user code for reporting error too.
+ Promise.resolve().then(/* suppressBlacklist */ inspectedGlobalObject.console.error.bind(inspectedGlobalObject.console, "Custom Formatter Failed: " + error.message));
+ }
+
+ /**
+ * @param {*} object
+ * @param {*=} customObjectConfig
+ * @return {*}
+ */
+ function wrap(object, customObjectConfig)
+ {
+ return injectedScript._wrapObject(object, objectGroupName, false, false, null, false, false, customObjectConfig);
+ }
+
+ try {
+ var formatters = inspectedGlobalObject["devtoolsFormatters"];
+ if (!formatters || !isArrayLike(formatters))
+ return null;
+
+ for (var i = 0; i < formatters.length; ++i) {
+ try {
+ var formatted = formatters[i].header(object, customObjectConfig);
+ if (!formatted)
+ continue;
+
+ var hasBody = formatters[i].hasBody(object, customObjectConfig);
+ injectedScript._substituteObjectTagsInCustomPreview(objectGroupName, formatted);
+ var formatterObjectId = injectedScript._bind(formatters[i], objectGroupName);
+ var bindRemoteObjectFunctionId = injectedScript._bind(wrap, objectGroupName);
+ var result = {header: JSON.stringify(formatted), hasBody: !!hasBody, formatterObjectId: formatterObjectId, bindRemoteObjectFunctionId: bindRemoteObjectFunctionId};
+ if (customObjectConfig)
+ result["configObjectId"] = injectedScript._bind(customObjectConfig, objectGroupName);
+ return result;
+ } catch (e) {
+ logError(e);
+ }
+ }
+ } catch (e) {
+ logError(e);
+ }
+ return null;
+ },
+
+ /**
+ * @return {!RuntimeAgent.ObjectPreview} preview
+ */
+ _createEmptyPreview: function()
+ {
+ var preview = {
+ type: /** @type {!RuntimeAgent.ObjectPreviewType.<string>} */ (this.type),
+ description: this.description || toStringDescription(this.value),
+ overflow: false,
+ properties: [],
+ __proto__: null
+ };
+ if (this.subtype)
+ preview.subtype = /** @type {!RuntimeAgent.ObjectPreviewSubtype.<string>} */ (this.subtype);
+ return preview;
+ },
+
+ /**
+ * @param {!Object} object
+ * @param {?Array.<string>=} firstLevelKeys
+ * @param {?Array.<string>=} secondLevelKeys
+ * @param {boolean=} isTable
+ * @param {boolean=} skipEntriesPreview
+ * @return {!RuntimeAgent.ObjectPreview} preview
+ */
+ _generatePreview: function(object, firstLevelKeys, secondLevelKeys, isTable, skipEntriesPreview)
+ {
+ var preview = this._createEmptyPreview();
+ var firstLevelKeysCount = firstLevelKeys ? firstLevelKeys.length : 0;
+
+ var propertiesThreshold = {
+ properties: isTable ? 1000 : max(5, firstLevelKeysCount),
+ indexes: isTable ? 1000 : max(100, firstLevelKeysCount),
+ __proto__: null
+ };
+
+ try {
+ var descriptors = injectedScript._propertyDescriptors(object, undefined, undefined, firstLevelKeys);
+
+ this._appendPropertyDescriptors(preview, descriptors, propertiesThreshold, secondLevelKeys, isTable);
+ if (propertiesThreshold.indexes < 0 || propertiesThreshold.properties < 0)
+ return preview;
+
+ // Add internal properties to preview.
+ var rawInternalProperties = InjectedScriptHost.getInternalProperties(object) || [];
+ var internalProperties = [];
+ var entries = null;
+ for (var i = 0; i < rawInternalProperties.length; i += 2) {
+ if (rawInternalProperties[i] === "[[Entries]]") {
+ entries = /** @type {!Array<*>} */(rawInternalProperties[i + 1]);
+ continue;
+ }
+ push(internalProperties, {
+ name: rawInternalProperties[i],
+ value: rawInternalProperties[i + 1],
+ isOwn: true,
+ enumerable: true,
+ __proto__: null
+ });
+ }
+ this._appendPropertyDescriptors(preview, internalProperties, propertiesThreshold, secondLevelKeys, isTable);
+
+ if (this.subtype === "map" || this.subtype === "set" || this.subtype === "iterator")
+ this._appendEntriesPreview(entries, preview, skipEntriesPreview);
+
+ } catch (e) {}
+
+ return preview;
+ },
+
+ /**
+ * @param {!RuntimeAgent.ObjectPreview} preview
+ * @param {!Array.<*>|!Iterable.<*>} descriptors
+ * @param {!Object} propertiesThreshold
+ * @param {?Array.<string>=} secondLevelKeys
+ * @param {boolean=} isTable
+ */
+ _appendPropertyDescriptors: function(preview, descriptors, propertiesThreshold, secondLevelKeys, isTable)
+ {
+ for (var descriptor of descriptors) {
+ if (propertiesThreshold.indexes < 0 || propertiesThreshold.properties < 0)
+ break;
+ if (!descriptor || descriptor.wasThrown)
+ continue;
+
+ var name = descriptor.name;
+
+ // Ignore __proto__ property.
+ if (name === "__proto__")
+ continue;
+
+ // Ignore length property of array.
+ if ((this.subtype === "array" || this.subtype === "typedarray") && name === "length")
+ continue;
+
+ // Ignore size property of map, set.
+ if ((this.subtype === "map" || this.subtype === "set") && name === "size")
+ continue;
+
+ // Never preview prototype properties.
+ if (!descriptor.isOwn)
+ continue;
+
+ // Ignore computed properties.
+ if (!("value" in descriptor))
+ continue;
+
+ var value = descriptor.value;
+ var type = typeof value;
+
+ // Never render functions in object preview.
+ if (type === "function" && (this.subtype !== "array" || !isUInt32(name)))
+ continue;
+
+ // Special-case HTMLAll.
+ if (type === "undefined" && injectedScript._isHTMLAllCollection(value))
+ type = "object";
+
+ // Render own properties.
+ if (value === null) {
+ this._appendPropertyPreview(preview, { name: name, type: "object", subtype: "null", value: "null", __proto__: null }, propertiesThreshold);
+ continue;
+ }
+
+ var maxLength = 100;
+ if (InjectedScript.primitiveTypes[type]) {
+ if (type === "string" && value.length > maxLength)
+ value = this._abbreviateString(value, maxLength, true);
+ this._appendPropertyPreview(preview, { name: name, type: type, value: toStringDescription(value), __proto__: null }, propertiesThreshold);
+ continue;
+ }
+
+ var property = { name: name, type: type, __proto__: null };
+ var subtype = injectedScript._subtype(value);
+ if (subtype)
+ property.subtype = subtype;
+
+ if (secondLevelKeys === null || secondLevelKeys) {
+ var subPreview = this._generatePreview(value, secondLevelKeys || undefined, undefined, isTable);
+ property.valuePreview = subPreview;
+ if (subPreview.overflow)
+ preview.overflow = true;
+ } else {
+ var description = "";
+ if (type !== "function")
+ description = this._abbreviateString(/** @type {string} */ (injectedScript._describe(value)), maxLength, subtype === "regexp");
+ property.value = description;
+ }
+ this._appendPropertyPreview(preview, property, propertiesThreshold);
+ }
+ },
+
+ /**
+ * @param {!RuntimeAgent.ObjectPreview} preview
+ * @param {!Object} property
+ * @param {!Object} propertiesThreshold
+ */
+ _appendPropertyPreview: function(preview, property, propertiesThreshold)
+ {
+ if (toString(property.name >>> 0) === property.name)
+ propertiesThreshold.indexes--;
+ else
+ propertiesThreshold.properties--;
+ if (propertiesThreshold.indexes < 0 || propertiesThreshold.properties < 0) {
+ preview.overflow = true;
+ } else {
+ push(preview.properties, property);
+ }
+ },
+
+ /**
+ * @param {?Array<*>} entries
+ * @param {!RuntimeAgent.ObjectPreview} preview
+ * @param {boolean=} skipEntriesPreview
+ */
+ _appendEntriesPreview: function(entries, preview, skipEntriesPreview)
+ {
+ if (!entries)
+ return;
+ if (skipEntriesPreview) {
+ if (entries.length)
+ preview.overflow = true;
+ return;
+ }
+ preview.entries = [];
+ var entriesThreshold = 5;
+ for (var i = 0; i < entries.length; ++i) {
+ if (preview.entries.length >= entriesThreshold) {
+ preview.overflow = true;
+ break;
+ }
+ var entry = nullifyObjectProto(entries[i]);
+ var previewEntry = {
+ value: generateValuePreview(entry.value),
+ __proto__: null
+ };
+ if ("key" in entry)
+ previewEntry.key = generateValuePreview(entry.key);
+ push(preview.entries, previewEntry);
+ }
+
+ /**
+ * @param {*} value
+ * @return {!RuntimeAgent.ObjectPreview}
+ */
+ function generateValuePreview(value)
+ {
+ var remoteObject = new InjectedScript.RemoteObject(value, undefined, true, undefined, true, undefined, undefined, true);
+ var valuePreview = remoteObject.preview || remoteObject._createEmptyPreview();
+ return valuePreview;
+ }
+ },
+
+ /**
+ * @param {string} string
+ * @param {number} maxLength
+ * @param {boolean=} middle
+ * @return {string}
+ */
+ _abbreviateString: function(string, maxLength, middle)
+ {
+ if (string.length <= maxLength)
+ return string;
+ if (middle) {
+ var leftHalf = maxLength >> 1;
+ var rightHalf = maxLength - leftHalf - 1;
+ return string.substr(0, leftHalf) + "\u2026" + string.substr(string.length - rightHalf, rightHalf);
+ }
+ return string.substr(0, maxLength) + "\u2026";
+ },
+
+ __proto__: null
+}
+
+return injectedScript;
+})
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
new file mode 100644
index 0000000000..a100dea2e1
--- /dev/null
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -0,0 +1,581 @@
+/*
+ * Copyright (C) 2012 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/inspector/injected-script.h"
+
+#include "src/inspector/injected-script-native.h"
+#include "src/inspector/injected-script-source.h"
+#include "src/inspector/inspected-context.h"
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/remote-object-id.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-console.h"
+#include "src/inspector/v8-function-call.h"
+#include "src/inspector/v8-injected-script-host.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-inspector-session-impl.h"
+#include "src/inspector/v8-stack-trace-impl.h"
+#include "src/inspector/v8-value-copier.h"
+
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+using protocol::Array;
+using protocol::Runtime::PropertyDescriptor;
+using protocol::Runtime::InternalPropertyDescriptor;
+using protocol::Runtime::RemoteObject;
+using protocol::Maybe;
+
+static bool hasInternalError(ErrorString* errorString, bool hasError) {
+ if (hasError) *errorString = "Internal error";
+ return hasError;
+}
+
+std::unique_ptr<InjectedScript> InjectedScript::create(
+ InspectedContext* inspectedContext) {
+ v8::Isolate* isolate = inspectedContext->isolate();
+ v8::HandleScope handles(isolate);
+ v8::Local<v8::Context> context = inspectedContext->context();
+ v8::Context::Scope scope(context);
+
+ std::unique_ptr<InjectedScriptNative> injectedScriptNative(
+ new InjectedScriptNative(isolate));
+ v8::Local<v8::Object> scriptHostWrapper =
+ V8InjectedScriptHost::create(context, inspectedContext->inspector());
+ injectedScriptNative->setOnInjectedScriptHost(scriptHostWrapper);
+
+ // Inject javascript into the context. The compiled script is supposed to
+ // evaluate into
+ // a single anonymous function(it's anonymous to avoid cluttering the global
+ // object with
+ // inspector's stuff) the function is called a few lines below with
+ // InjectedScriptHost wrapper,
+ // injected script id and explicit reference to the inspected global object.
+ // The function is expected
+ // to create and configure InjectedScript instance that is going to be used by
+ // the inspector.
+ String16 injectedScriptSource(
+ reinterpret_cast<const char*>(InjectedScriptSource_js),
+ sizeof(InjectedScriptSource_js));
+ v8::Local<v8::Value> value;
+ if (!inspectedContext->inspector()
+ ->compileAndRunInternalScript(
+ context, toV8String(isolate, injectedScriptSource))
+ .ToLocal(&value))
+ return nullptr;
+ DCHECK(value->IsFunction());
+ v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(value);
+ v8::Local<v8::Object> windowGlobal = context->Global();
+ v8::Local<v8::Value> info[] = {
+ scriptHostWrapper, windowGlobal,
+ v8::Number::New(isolate, inspectedContext->contextId())};
+ v8::MicrotasksScope microtasksScope(isolate,
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+
+ int contextGroupId = inspectedContext->contextGroupId();
+ int contextId = inspectedContext->contextId();
+ V8InspectorImpl* inspector = inspectedContext->inspector();
+ v8::Local<v8::Value> injectedScriptValue;
+ if (!function->Call(context, windowGlobal, arraysize(info), info)
+ .ToLocal(&injectedScriptValue))
+ return nullptr;
+ if (inspector->getContext(contextGroupId, contextId) != inspectedContext)
+ return nullptr;
+ if (!injectedScriptValue->IsObject()) return nullptr;
+ return wrapUnique(new InjectedScript(inspectedContext,
+ injectedScriptValue.As<v8::Object>(),
+ std::move(injectedScriptNative)));
+}
+
+InjectedScript::InjectedScript(
+ InspectedContext* context, v8::Local<v8::Object> object,
+ std::unique_ptr<InjectedScriptNative> injectedScriptNative)
+ : m_context(context),
+ m_value(context->isolate(), object),
+ m_native(std::move(injectedScriptNative)) {}
+
+InjectedScript::~InjectedScript() {}
+
+void InjectedScript::getProperties(
+ ErrorString* errorString, v8::Local<v8::Object> object,
+ const String16& groupName, bool ownProperties, bool accessorPropertiesOnly,
+ bool generatePreview,
+ std::unique_ptr<Array<PropertyDescriptor>>* properties,
+ Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
+ v8::HandleScope handles(m_context->isolate());
+ v8::Local<v8::Context> context = m_context->context();
+ V8FunctionCall function(m_context->inspector(), m_context->context(),
+ v8Value(), "getProperties");
+ function.appendArgument(object);
+ function.appendArgument(groupName);
+ function.appendArgument(ownProperties);
+ function.appendArgument(accessorPropertiesOnly);
+ function.appendArgument(generatePreview);
+
+ v8::TryCatch tryCatch(m_context->isolate());
+ v8::Local<v8::Value> resultValue = function.callWithoutExceptionHandling();
+ if (tryCatch.HasCaught()) {
+ *exceptionDetails = createExceptionDetails(errorString, tryCatch, groupName,
+ generatePreview);
+ // FIXME: make properties optional
+ *properties = Array<PropertyDescriptor>::create();
+ return;
+ }
+ if (hasInternalError(errorString, resultValue.IsEmpty())) return;
+ std::unique_ptr<protocol::Value> protocolValue =
+ toProtocolValue(errorString, context, resultValue);
+ if (!protocolValue) return;
+ protocol::ErrorSupport errors(errorString);
+ std::unique_ptr<Array<PropertyDescriptor>> result =
+ Array<PropertyDescriptor>::parse(protocolValue.get(), &errors);
+ if (!hasInternalError(errorString, errors.hasErrors()))
+ *properties = std::move(result);
+}
+
+void InjectedScript::releaseObject(const String16& objectId) {
+ std::unique_ptr<protocol::Value> parsedObjectId =
+ protocol::parseJSON(objectId);
+ if (!parsedObjectId) return;
+ protocol::DictionaryValue* object =
+ protocol::DictionaryValue::cast(parsedObjectId.get());
+ if (!object) return;
+ int boundId = 0;
+ if (!object->getInteger("id", &boundId)) return;
+ m_native->unbind(boundId);
+}
+
+std::unique_ptr<protocol::Runtime::RemoteObject> InjectedScript::wrapObject(
+ ErrorString* errorString, v8::Local<v8::Value> value,
+ const String16& groupName, bool forceValueType,
+ bool generatePreview) const {
+ v8::HandleScope handles(m_context->isolate());
+ v8::Local<v8::Value> wrappedObject;
+ v8::Local<v8::Context> context = m_context->context();
+ if (!wrapValue(errorString, value, groupName, forceValueType, generatePreview)
+ .ToLocal(&wrappedObject))
+ return nullptr;
+ protocol::ErrorSupport errors;
+ std::unique_ptr<protocol::Value> protocolValue =
+ toProtocolValue(errorString, context, wrappedObject);
+ if (!protocolValue) return nullptr;
+ std::unique_ptr<protocol::Runtime::RemoteObject> remoteObject =
+ protocol::Runtime::RemoteObject::parse(protocolValue.get(), &errors);
+ if (!remoteObject) *errorString = errors.errors();
+ return remoteObject;
+}
+
+bool InjectedScript::wrapObjectProperty(ErrorString* errorString,
+ v8::Local<v8::Object> object,
+ v8::Local<v8::Name> key,
+ const String16& groupName,
+ bool forceValueType,
+ bool generatePreview) const {
+ v8::Local<v8::Value> property;
+ v8::Local<v8::Context> context = m_context->context();
+ if (hasInternalError(errorString,
+ !object->Get(context, key).ToLocal(&property)))
+ return false;
+ v8::Local<v8::Value> wrappedProperty;
+ if (!wrapValue(errorString, property, groupName, forceValueType,
+ generatePreview)
+ .ToLocal(&wrappedProperty))
+ return false;
+ v8::Maybe<bool> success =
+ createDataProperty(context, object, key, wrappedProperty);
+ if (hasInternalError(errorString, success.IsNothing() || !success.FromJust()))
+ return false;
+ return true;
+}
+
+bool InjectedScript::wrapPropertyInArray(ErrorString* errorString,
+ v8::Local<v8::Array> array,
+ v8::Local<v8::String> property,
+ const String16& groupName,
+ bool forceValueType,
+ bool generatePreview) const {
+ V8FunctionCall function(m_context->inspector(), m_context->context(),
+ v8Value(), "wrapPropertyInArray");
+ function.appendArgument(array);
+ function.appendArgument(property);
+ function.appendArgument(groupName);
+ function.appendArgument(forceValueType);
+ function.appendArgument(generatePreview);
+ bool hadException = false;
+ function.call(hadException);
+ return !hasInternalError(errorString, hadException);
+}
+
+bool InjectedScript::wrapObjectsInArray(ErrorString* errorString,
+ v8::Local<v8::Array> array,
+ const String16& groupName,
+ bool forceValueType,
+ bool generatePreview) const {
+ V8FunctionCall function(m_context->inspector(), m_context->context(),
+ v8Value(), "wrapObjectsInArray");
+ function.appendArgument(array);
+ function.appendArgument(groupName);
+ function.appendArgument(forceValueType);
+ function.appendArgument(generatePreview);
+ bool hadException = false;
+ function.call(hadException);
+ return !hasInternalError(errorString, hadException);
+}
+
+v8::MaybeLocal<v8::Value> InjectedScript::wrapValue(
+ ErrorString* errorString, v8::Local<v8::Value> value,
+ const String16& groupName, bool forceValueType,
+ bool generatePreview) const {
+ V8FunctionCall function(m_context->inspector(), m_context->context(),
+ v8Value(), "wrapObject");
+ function.appendArgument(value);
+ function.appendArgument(groupName);
+ function.appendArgument(forceValueType);
+ function.appendArgument(generatePreview);
+ bool hadException = false;
+ v8::Local<v8::Value> r = function.call(hadException);
+ if (hasInternalError(errorString, hadException || r.IsEmpty()))
+ return v8::MaybeLocal<v8::Value>();
+ return r;
+}
+
+std::unique_ptr<protocol::Runtime::RemoteObject> InjectedScript::wrapTable(
+ v8::Local<v8::Value> table, v8::Local<v8::Value> columns) const {
+ v8::HandleScope handles(m_context->isolate());
+ v8::Local<v8::Context> context = m_context->context();
+ V8FunctionCall function(m_context->inspector(), context, v8Value(),
+ "wrapTable");
+ function.appendArgument(table);
+ if (columns.IsEmpty())
+ function.appendArgument(false);
+ else
+ function.appendArgument(columns);
+ bool hadException = false;
+ v8::Local<v8::Value> r = function.call(hadException);
+ if (hadException || r.IsEmpty()) return nullptr;
+ protocol::ErrorString errorString;
+ std::unique_ptr<protocol::Value> protocolValue =
+ toProtocolValue(&errorString, context, r);
+ if (!protocolValue) return nullptr;
+ protocol::ErrorSupport errors;
+ return protocol::Runtime::RemoteObject::parse(protocolValue.get(), &errors);
+}
+
+bool InjectedScript::findObject(ErrorString* errorString,
+ const RemoteObjectId& objectId,
+ v8::Local<v8::Value>* outObject) const {
+ *outObject = m_native->objectForId(objectId.id());
+ if (outObject->IsEmpty())
+ *errorString = "Could not find object with given id";
+ return !outObject->IsEmpty();
+}
+
+String16 InjectedScript::objectGroupName(const RemoteObjectId& objectId) const {
+ return m_native->groupName(objectId.id());
+}
+
+void InjectedScript::releaseObjectGroup(const String16& objectGroup) {
+ m_native->releaseObjectGroup(objectGroup);
+ if (objectGroup == "console") m_lastEvaluationResult.Reset();
+}
+
+void InjectedScript::setCustomObjectFormatterEnabled(bool enabled) {
+ v8::HandleScope handles(m_context->isolate());
+ V8FunctionCall function(m_context->inspector(), m_context->context(),
+ v8Value(), "setCustomObjectFormatterEnabled");
+ function.appendArgument(enabled);
+ bool hadException = false;
+ function.call(hadException);
+ DCHECK(!hadException);
+}
+
+v8::Local<v8::Value> InjectedScript::v8Value() const {
+ return m_value.Get(m_context->isolate());
+}
+
+v8::Local<v8::Value> InjectedScript::lastEvaluationResult() const {
+ if (m_lastEvaluationResult.IsEmpty())
+ return v8::Undefined(m_context->isolate());
+ return m_lastEvaluationResult.Get(m_context->isolate());
+}
+
+v8::MaybeLocal<v8::Value> InjectedScript::resolveCallArgument(
+ ErrorString* errorString, protocol::Runtime::CallArgument* callArgument) {
+ if (callArgument->hasObjectId()) {
+ std::unique_ptr<RemoteObjectId> remoteObjectId =
+ RemoteObjectId::parse(errorString, callArgument->getObjectId(""));
+ if (!remoteObjectId) return v8::MaybeLocal<v8::Value>();
+ if (remoteObjectId->contextId() != m_context->contextId()) {
+ *errorString =
+ "Argument should belong to the same JavaScript world as target "
+ "object";
+ return v8::MaybeLocal<v8::Value>();
+ }
+ v8::Local<v8::Value> object;
+ if (!findObject(errorString, *remoteObjectId, &object))
+ return v8::MaybeLocal<v8::Value>();
+ return object;
+ }
+ if (callArgument->hasValue() || callArgument->hasUnserializableValue()) {
+ String16 value =
+ callArgument->hasValue()
+ ? callArgument->getValue(nullptr)->toJSONString()
+ : "Number(\"" + callArgument->getUnserializableValue("") + "\")";
+ v8::Local<v8::Value> object;
+ if (!m_context->inspector()
+ ->compileAndRunInternalScript(
+ m_context->context(), toV8String(m_context->isolate(), value))
+ .ToLocal(&object)) {
+ *errorString = "Couldn't parse value object in call argument";
+ return v8::MaybeLocal<v8::Value>();
+ }
+ return object;
+ }
+ return v8::Undefined(m_context->isolate());
+}
+
+std::unique_ptr<protocol::Runtime::ExceptionDetails>
+InjectedScript::createExceptionDetails(ErrorString* errorString,
+ const v8::TryCatch& tryCatch,
+ const String16& objectGroup,
+ bool generatePreview) {
+ if (!tryCatch.HasCaught()) return nullptr;
+ v8::Local<v8::Message> message = tryCatch.Message();
+ v8::Local<v8::Value> exception = tryCatch.Exception();
+ String16 messageText =
+ message.IsEmpty() ? String16() : toProtocolString(message->Get());
+ std::unique_ptr<protocol::Runtime::ExceptionDetails> exceptionDetails =
+ protocol::Runtime::ExceptionDetails::create()
+ .setExceptionId(m_context->inspector()->nextExceptionId())
+ .setText(exception.IsEmpty() ? messageText : String16("Uncaught"))
+ .setLineNumber(
+ message.IsEmpty()
+ ? 0
+ : message->GetLineNumber(m_context->context()).FromMaybe(1) -
+ 1)
+ .setColumnNumber(
+ message.IsEmpty()
+ ? 0
+ : message->GetStartColumn(m_context->context()).FromMaybe(0))
+ .build();
+ if (!message.IsEmpty()) {
+ exceptionDetails->setScriptId(String16::fromInteger(
+ static_cast<int>(message->GetScriptOrigin().ScriptID()->Value())));
+ v8::Local<v8::StackTrace> stackTrace = message->GetStackTrace();
+ if (!stackTrace.IsEmpty() && stackTrace->GetFrameCount() > 0)
+ exceptionDetails->setStackTrace(m_context->inspector()
+ ->debugger()
+ ->createStackTrace(stackTrace)
+ ->buildInspectorObjectImpl());
+ }
+ if (!exception.IsEmpty()) {
+ std::unique_ptr<protocol::Runtime::RemoteObject> wrapped = wrapObject(
+ errorString, exception, objectGroup, false /* forceValueType */,
+ generatePreview && !exception->IsNativeError());
+ if (!wrapped) return nullptr;
+ exceptionDetails->setException(std::move(wrapped));
+ }
+ return exceptionDetails;
+}
+
+void InjectedScript::wrapEvaluateResult(
+ ErrorString* errorString, v8::MaybeLocal<v8::Value> maybeResultValue,
+ const v8::TryCatch& tryCatch, const String16& objectGroup,
+ bool returnByValue, bool generatePreview,
+ std::unique_ptr<protocol::Runtime::RemoteObject>* result,
+ Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
+ v8::Local<v8::Value> resultValue;
+ if (!tryCatch.HasCaught()) {
+ if (hasInternalError(errorString, !maybeResultValue.ToLocal(&resultValue)))
+ return;
+ std::unique_ptr<RemoteObject> remoteObject = wrapObject(
+ errorString, resultValue, objectGroup, returnByValue, generatePreview);
+ if (!remoteObject) return;
+ if (objectGroup == "console")
+ m_lastEvaluationResult.Reset(m_context->isolate(), resultValue);
+ *result = std::move(remoteObject);
+ } else {
+ v8::Local<v8::Value> exception = tryCatch.Exception();
+ std::unique_ptr<RemoteObject> remoteObject =
+ wrapObject(errorString, exception, objectGroup, false,
+ generatePreview && !exception->IsNativeError());
+ if (!remoteObject) return;
+ // We send exception in result for compatibility reasons, even though it's
+ // accessible through exceptionDetails.exception.
+ *result = std::move(remoteObject);
+ *exceptionDetails = createExceptionDetails(errorString, tryCatch,
+ objectGroup, generatePreview);
+ }
+}
+
+v8::Local<v8::Object> InjectedScript::commandLineAPI() {
+ if (m_commandLineAPI.IsEmpty())
+ m_commandLineAPI.Reset(m_context->isolate(),
+ V8Console::createCommandLineAPI(m_context));
+ return m_commandLineAPI.Get(m_context->isolate());
+}
+
+InjectedScript::Scope::Scope(ErrorString* errorString,
+ V8InspectorImpl* inspector, int contextGroupId)
+ : m_errorString(errorString),
+ m_inspector(inspector),
+ m_contextGroupId(contextGroupId),
+ m_injectedScript(nullptr),
+ m_handleScope(inspector->isolate()),
+ m_tryCatch(inspector->isolate()),
+ m_ignoreExceptionsAndMuteConsole(false),
+ m_previousPauseOnExceptionsState(V8Debugger::DontPauseOnExceptions),
+ m_userGesture(false) {}
+
+bool InjectedScript::Scope::initialize() {
+ cleanup();
+ // TODO(dgozman): what if we reattach to the same context group during
+ // evaluate? Introduce a session id?
+ V8InspectorSessionImpl* session =
+ m_inspector->sessionForContextGroup(m_contextGroupId);
+ if (!session) {
+ *m_errorString = "Internal error";
+ return false;
+ }
+ findInjectedScript(session);
+ if (!m_injectedScript) return false;
+ m_context = m_injectedScript->context()->context();
+ m_context->Enter();
+ return true;
+}
+
+bool InjectedScript::Scope::installCommandLineAPI() {
+ DCHECK(m_injectedScript && !m_context.IsEmpty() &&
+ !m_commandLineAPIScope.get());
+ m_commandLineAPIScope.reset(new V8Console::CommandLineAPIScope(
+ m_context, m_injectedScript->commandLineAPI(), m_context->Global()));
+ return true;
+}
+
+void InjectedScript::Scope::ignoreExceptionsAndMuteConsole() {
+ DCHECK(!m_ignoreExceptionsAndMuteConsole);
+ m_ignoreExceptionsAndMuteConsole = true;
+ m_inspector->client()->muteMetrics(m_contextGroupId);
+ m_inspector->muteExceptions(m_contextGroupId);
+ m_previousPauseOnExceptionsState =
+ setPauseOnExceptionsState(V8Debugger::DontPauseOnExceptions);
+}
+
+V8Debugger::PauseOnExceptionsState
+InjectedScript::Scope::setPauseOnExceptionsState(
+ V8Debugger::PauseOnExceptionsState newState) {
+ if (!m_inspector->debugger()->enabled()) return newState;
+ V8Debugger::PauseOnExceptionsState presentState =
+ m_inspector->debugger()->getPauseOnExceptionsState();
+ if (presentState != newState)
+ m_inspector->debugger()->setPauseOnExceptionsState(newState);
+ return presentState;
+}
+
+void InjectedScript::Scope::pretendUserGesture() {
+ DCHECK(!m_userGesture);
+ m_userGesture = true;
+ m_inspector->client()->beginUserGesture();
+}
+
+void InjectedScript::Scope::cleanup() {
+ m_commandLineAPIScope.reset();
+ if (!m_context.IsEmpty()) {
+ m_context->Exit();
+ m_context.Clear();
+ }
+}
+
+InjectedScript::Scope::~Scope() {
+ if (m_ignoreExceptionsAndMuteConsole) {
+ setPauseOnExceptionsState(m_previousPauseOnExceptionsState);
+ m_inspector->client()->unmuteMetrics(m_contextGroupId);
+ m_inspector->unmuteExceptions(m_contextGroupId);
+ }
+ if (m_userGesture) m_inspector->client()->endUserGesture();
+ cleanup();
+}
+
+InjectedScript::ContextScope::ContextScope(ErrorString* errorString,
+ V8InspectorImpl* inspector,
+ int contextGroupId,
+ int executionContextId)
+ : InjectedScript::Scope(errorString, inspector, contextGroupId),
+ m_executionContextId(executionContextId) {}
+
+InjectedScript::ContextScope::~ContextScope() {}
+
+void InjectedScript::ContextScope::findInjectedScript(
+ V8InspectorSessionImpl* session) {
+ m_injectedScript =
+ session->findInjectedScript(m_errorString, m_executionContextId);
+}
+
+InjectedScript::ObjectScope::ObjectScope(ErrorString* errorString,
+ V8InspectorImpl* inspector,
+ int contextGroupId,
+ const String16& remoteObjectId)
+ : InjectedScript::Scope(errorString, inspector, contextGroupId),
+ m_remoteObjectId(remoteObjectId) {}
+
+InjectedScript::ObjectScope::~ObjectScope() {}
+
+void InjectedScript::ObjectScope::findInjectedScript(
+ V8InspectorSessionImpl* session) {
+ std::unique_ptr<RemoteObjectId> remoteId =
+ RemoteObjectId::parse(m_errorString, m_remoteObjectId);
+ if (!remoteId) return;
+ InjectedScript* injectedScript =
+ session->findInjectedScript(m_errorString, remoteId.get());
+ if (!injectedScript) return;
+ m_objectGroupName = injectedScript->objectGroupName(*remoteId);
+ if (!injectedScript->findObject(m_errorString, *remoteId, &m_object)) return;
+ m_injectedScript = injectedScript;
+}
+
+InjectedScript::CallFrameScope::CallFrameScope(ErrorString* errorString,
+ V8InspectorImpl* inspector,
+ int contextGroupId,
+ const String16& remoteObjectId)
+ : InjectedScript::Scope(errorString, inspector, contextGroupId),
+ m_remoteCallFrameId(remoteObjectId) {}
+
+InjectedScript::CallFrameScope::~CallFrameScope() {}
+
+void InjectedScript::CallFrameScope::findInjectedScript(
+ V8InspectorSessionImpl* session) {
+ std::unique_ptr<RemoteCallFrameId> remoteId =
+ RemoteCallFrameId::parse(m_errorString, m_remoteCallFrameId);
+ if (!remoteId) return;
+ m_frameOrdinal = static_cast<size_t>(remoteId->frameOrdinal());
+ m_injectedScript = session->findInjectedScript(m_errorString, remoteId.get());
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/injected-script.h b/deps/v8/src/inspector/injected-script.h
new file mode 100644
index 0000000000..9b324c948d
--- /dev/null
+++ b/deps/v8/src/inspector/injected-script.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2012 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef V8_INSPECTOR_INJECTEDSCRIPT_H_
+#define V8_INSPECTOR_INJECTEDSCRIPT_H_
+
+#include "src/base/macros.h"
+#include "src/inspector/injected-script-native.h"
+#include "src/inspector/inspected-context.h"
+#include "src/inspector/protocol/Forward.h"
+#include "src/inspector/protocol/Runtime.h"
+#include "src/inspector/v8-console.h"
+#include "src/inspector/v8-debugger.h"
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+class RemoteObjectId;
+class V8FunctionCall;
+class V8InspectorImpl;
+class V8InspectorSessionImpl;
+
+using protocol::ErrorString;
+using protocol::Maybe;
+
+class InjectedScript final {
+ public:
+ static std::unique_ptr<InjectedScript> create(InspectedContext*);
+ ~InjectedScript();
+
+ InspectedContext* context() const { return m_context; }
+
+ void getProperties(
+ ErrorString*, v8::Local<v8::Object>, const String16& groupName,
+ bool ownProperties, bool accessorPropertiesOnly, bool generatePreview,
+ std::unique_ptr<protocol::Array<protocol::Runtime::PropertyDescriptor>>*
+ result,
+ Maybe<protocol::Runtime::ExceptionDetails>*);
+ void releaseObject(const String16& objectId);
+
+ std::unique_ptr<protocol::Runtime::RemoteObject> wrapObject(
+ ErrorString*, v8::Local<v8::Value>, const String16& groupName,
+ bool forceValueType = false, bool generatePreview = false) const;
+ bool wrapObjectProperty(ErrorString*, v8::Local<v8::Object>,
+ v8::Local<v8::Name> key, const String16& groupName,
+ bool forceValueType = false,
+ bool generatePreview = false) const;
+ bool wrapPropertyInArray(ErrorString*, v8::Local<v8::Array>,
+ v8::Local<v8::String> property,
+ const String16& groupName,
+ bool forceValueType = false,
+ bool generatePreview = false) const;
+ bool wrapObjectsInArray(ErrorString*, v8::Local<v8::Array>,
+ const String16& groupName,
+ bool forceValueType = false,
+ bool generatePreview = false) const;
+ std::unique_ptr<protocol::Runtime::RemoteObject> wrapTable(
+ v8::Local<v8::Value> table, v8::Local<v8::Value> columns) const;
+
+ bool findObject(ErrorString*, const RemoteObjectId&,
+ v8::Local<v8::Value>*) const;
+ String16 objectGroupName(const RemoteObjectId&) const;
+ void releaseObjectGroup(const String16&);
+ void setCustomObjectFormatterEnabled(bool);
+ v8::MaybeLocal<v8::Value> resolveCallArgument(
+ ErrorString*, protocol::Runtime::CallArgument*);
+
+ std::unique_ptr<protocol::Runtime::ExceptionDetails> createExceptionDetails(
+ ErrorString*, const v8::TryCatch&, const String16& groupName,
+ bool generatePreview);
+ void wrapEvaluateResult(
+ ErrorString*, v8::MaybeLocal<v8::Value> maybeResultValue,
+ const v8::TryCatch&, const String16& objectGroup, bool returnByValue,
+ bool generatePreview,
+ std::unique_ptr<protocol::Runtime::RemoteObject>* result,
+ Maybe<protocol::Runtime::ExceptionDetails>*);
+ v8::Local<v8::Value> lastEvaluationResult() const;
+
+ class Scope {
+ public:
+ bool initialize();
+ bool installCommandLineAPI();
+ void ignoreExceptionsAndMuteConsole();
+ void pretendUserGesture();
+ v8::Local<v8::Context> context() const { return m_context; }
+ InjectedScript* injectedScript() const { return m_injectedScript; }
+ const v8::TryCatch& tryCatch() const { return m_tryCatch; }
+
+ protected:
+ Scope(ErrorString*, V8InspectorImpl*, int contextGroupId);
+ virtual ~Scope();
+ virtual void findInjectedScript(V8InspectorSessionImpl*) = 0;
+
+ ErrorString* m_errorString;
+ V8InspectorImpl* m_inspector;
+ int m_contextGroupId;
+ InjectedScript* m_injectedScript;
+
+ private:
+ void cleanup();
+ V8Debugger::PauseOnExceptionsState setPauseOnExceptionsState(
+ V8Debugger::PauseOnExceptionsState);
+
+ v8::HandleScope m_handleScope;
+ v8::TryCatch m_tryCatch;
+ v8::Local<v8::Context> m_context;
+ std::unique_ptr<V8Console::CommandLineAPIScope> m_commandLineAPIScope;
+ bool m_ignoreExceptionsAndMuteConsole;
+ V8Debugger::PauseOnExceptionsState m_previousPauseOnExceptionsState;
+ bool m_userGesture;
+ };
+
+ class ContextScope : public Scope {
+ public:
+ ContextScope(ErrorString*, V8InspectorImpl*, int contextGroupId,
+ int executionContextId);
+ ~ContextScope();
+
+ private:
+ void findInjectedScript(V8InspectorSessionImpl*) override;
+ int m_executionContextId;
+
+ DISALLOW_COPY_AND_ASSIGN(ContextScope);
+ };
+
+ class ObjectScope : public Scope {
+ public:
+ ObjectScope(ErrorString*, V8InspectorImpl*, int contextGroupId,
+ const String16& remoteObjectId);
+ ~ObjectScope();
+ const String16& objectGroupName() const { return m_objectGroupName; }
+ v8::Local<v8::Value> object() const { return m_object; }
+
+ private:
+ void findInjectedScript(V8InspectorSessionImpl*) override;
+ String16 m_remoteObjectId;
+ String16 m_objectGroupName;
+ v8::Local<v8::Value> m_object;
+
+ DISALLOW_COPY_AND_ASSIGN(ObjectScope);
+ };
+
+ class CallFrameScope : public Scope {
+ public:
+ CallFrameScope(ErrorString*, V8InspectorImpl*, int contextGroupId,
+ const String16& remoteCallFrameId);
+ ~CallFrameScope();
+ size_t frameOrdinal() const { return m_frameOrdinal; }
+
+ private:
+ void findInjectedScript(V8InspectorSessionImpl*) override;
+ String16 m_remoteCallFrameId;
+ size_t m_frameOrdinal;
+
+ DISALLOW_COPY_AND_ASSIGN(CallFrameScope);
+ };
+
+ private:
+ InjectedScript(InspectedContext*, v8::Local<v8::Object>,
+ std::unique_ptr<InjectedScriptNative>);
+ v8::Local<v8::Value> v8Value() const;
+ v8::MaybeLocal<v8::Value> wrapValue(ErrorString*, v8::Local<v8::Value>,
+ const String16& groupName,
+ bool forceValueType,
+ bool generatePreview) const;
+ v8::Local<v8::Object> commandLineAPI();
+
+ InspectedContext* m_context;
+ v8::Global<v8::Value> m_value;
+ v8::Global<v8::Value> m_lastEvaluationResult;
+ std::unique_ptr<InjectedScriptNative> m_native;
+ v8::Global<v8::Object> m_commandLineAPI;
+
+ DISALLOW_COPY_AND_ASSIGN(InjectedScript);
+};
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_INJECTEDSCRIPT_H_
diff --git a/deps/v8/src/inspector/injected_script_externs.js b/deps/v8/src/inspector/injected_script_externs.js
new file mode 100644
index 0000000000..b6339c6eb0
--- /dev/null
+++ b/deps/v8/src/inspector/injected_script_externs.js
@@ -0,0 +1,66 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/** @interface */
+function InjectedScriptHostClass()
+{
+}
+
+/**
+ * @param {*} obj
+ * @return {string}
+ */
+InjectedScriptHostClass.prototype.internalConstructorName = function(obj) {}
+
+/**
+ * @param {*} obj
+ * @param {function()|undefined} func
+ * @return {boolean}
+ */
+InjectedScriptHostClass.prototype.formatAccessorsAsProperties = function(obj, func) {}
+
+/**
+ * @param {*} obj
+ * @return {string}
+ */
+InjectedScriptHostClass.prototype.subtype = function(obj) {}
+
+/**
+ * @param {*} obj
+ * @return {boolean}
+ */
+InjectedScriptHostClass.prototype.isTypedArray = function(obj) {}
+
+/**
+ * @param {*} obj
+ * @return {!Array.<*>}
+ */
+InjectedScriptHostClass.prototype.getInternalProperties = function(obj) {}
+
+/**
+ * @param {!Object} object
+ * @param {string} propertyName
+ * @return {boolean}
+ */
+InjectedScriptHostClass.prototype.objectHasOwnProperty = function(object, propertyName) {}
+
+/**
+ * @param {*} value
+ * @param {string} groupName
+ * @return {number}
+ */
+InjectedScriptHostClass.prototype.bind = function(value, groupName) {}
+
+/**
+ * @param {!Object} object
+ * @return {!Object}
+ */
+InjectedScriptHostClass.prototype.proxyTargetValue = function(object) {}
+
+/** @type {!InjectedScriptHostClass} */
+var InjectedScriptHost;
+/** @type {!Window} */
+var inspectedGlobalObject;
+/** @type {number} */
+var injectedScriptId;
diff --git a/deps/v8/src/inspector/inspected-context.cc b/deps/v8/src/inspector/inspected-context.cc
new file mode 100644
index 0000000000..9100f64b2a
--- /dev/null
+++ b/deps/v8/src/inspector/inspected-context.cc
@@ -0,0 +1,88 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/inspected-context.h"
+
+#include "src/inspector/injected-script.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-console.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-value-copier.h"
+
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+void InspectedContext::weakCallback(
+ const v8::WeakCallbackInfo<InspectedContext>& data) {
+ InspectedContext* context = data.GetParameter();
+ if (!context->m_context.IsEmpty()) {
+ context->m_context.Reset();
+ data.SetSecondPassCallback(&InspectedContext::weakCallback);
+ } else {
+ context->m_inspector->discardInspectedContext(context->m_contextGroupId,
+ context->m_contextId);
+ }
+}
+
+void InspectedContext::consoleWeakCallback(
+ const v8::WeakCallbackInfo<InspectedContext>& data) {
+ data.GetParameter()->m_console.Reset();
+}
+
+InspectedContext::InspectedContext(V8InspectorImpl* inspector,
+ const V8ContextInfo& info, int contextId)
+ : m_inspector(inspector),
+ m_context(info.context->GetIsolate(), info.context),
+ m_contextId(contextId),
+ m_contextGroupId(info.contextGroupId),
+ m_origin(toString16(info.origin)),
+ m_humanReadableName(toString16(info.humanReadableName)),
+ m_auxData(toString16(info.auxData)),
+ m_reported(false) {
+ m_context.SetWeak(this, &InspectedContext::weakCallback,
+ v8::WeakCallbackType::kParameter);
+
+ v8::Isolate* isolate = m_inspector->isolate();
+ v8::Local<v8::Object> global = info.context->Global();
+ v8::Local<v8::Object> console =
+ V8Console::createConsole(this, info.hasMemoryOnConsole);
+ if (!global
+ ->Set(info.context, toV8StringInternalized(isolate, "console"),
+ console)
+ .FromMaybe(false))
+ return;
+ m_console.Reset(isolate, console);
+ m_console.SetWeak(this, &InspectedContext::consoleWeakCallback,
+ v8::WeakCallbackType::kParameter);
+}
+
+InspectedContext::~InspectedContext() {
+ if (!m_context.IsEmpty() && !m_console.IsEmpty()) {
+ v8::HandleScope scope(isolate());
+ V8Console::clearInspectedContextIfNeeded(context(),
+ m_console.Get(isolate()));
+ }
+}
+
+v8::Local<v8::Context> InspectedContext::context() const {
+ return m_context.Get(isolate());
+}
+
+v8::Isolate* InspectedContext::isolate() const {
+ return m_inspector->isolate();
+}
+
+bool InspectedContext::createInjectedScript() {
+ DCHECK(!m_injectedScript);
+ std::unique_ptr<InjectedScript> injectedScript = InjectedScript::create(this);
+ // InjectedScript::create can destroy |this|.
+ if (!injectedScript) return false;
+ m_injectedScript = std::move(injectedScript);
+ return true;
+}
+
+void InspectedContext::discardInjectedScript() { m_injectedScript.reset(); }
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/inspected-context.h b/deps/v8/src/inspector/inspected-context.h
new file mode 100644
index 0000000000..d8e72cc353
--- /dev/null
+++ b/deps/v8/src/inspector/inspected-context.h
@@ -0,0 +1,64 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_INSPECTEDCONTEXT_H_
+#define V8_INSPECTOR_INSPECTEDCONTEXT_H_
+
+#include "src/base/macros.h"
+#include "src/inspector/string-16.h"
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+class InjectedScript;
+class InjectedScriptHost;
+class V8ContextInfo;
+class V8InspectorImpl;
+
+class InspectedContext {
+ public:
+ ~InspectedContext();
+
+ v8::Local<v8::Context> context() const;
+ int contextId() const { return m_contextId; }
+ int contextGroupId() const { return m_contextGroupId; }
+ String16 origin() const { return m_origin; }
+ String16 humanReadableName() const { return m_humanReadableName; }
+ String16 auxData() const { return m_auxData; }
+
+ bool isReported() const { return m_reported; }
+ void setReported(bool reported) { m_reported = reported; }
+
+ v8::Isolate* isolate() const;
+ V8InspectorImpl* inspector() const { return m_inspector; }
+
+ InjectedScript* getInjectedScript() { return m_injectedScript.get(); }
+ bool createInjectedScript();
+ void discardInjectedScript();
+
+ private:
+ friend class V8InspectorImpl;
+ InspectedContext(V8InspectorImpl*, const V8ContextInfo&, int contextId);
+ static void weakCallback(const v8::WeakCallbackInfo<InspectedContext>&);
+ static void consoleWeakCallback(
+ const v8::WeakCallbackInfo<InspectedContext>&);
+
+ V8InspectorImpl* m_inspector;
+ v8::Global<v8::Context> m_context;
+ int m_contextId;
+ int m_contextGroupId;
+ const String16 m_origin;
+ const String16 m_humanReadableName;
+ const String16 m_auxData;
+ bool m_reported;
+ std::unique_ptr<InjectedScript> m_injectedScript;
+ v8::Global<v8::Object> m_console;
+
+ DISALLOW_COPY_AND_ASSIGN(InspectedContext);
+};
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_INSPECTEDCONTEXT_H_
diff --git a/deps/v8/src/inspector/inspector.gyp b/deps/v8/src/inspector/inspector.gyp
index 5fc49b15ea..2d5c7a5153 100644
--- a/deps/v8/src/inspector/inspector.gyp
+++ b/deps/v8/src/inspector/inspector.gyp
@@ -2,111 +2,107 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-{ 'variables': {
- 'protocol_path': '../../third_party/WebKit/Source/platform/inspector_protocol',
- 'protocol_sources': [
- '<(SHARED_INTERMEDIATE_DIR)/inspector/Console.cpp',
- '<(SHARED_INTERMEDIATE_DIR)/inspector/Console.h',
- '<(SHARED_INTERMEDIATE_DIR)/inspector/Debugger.cpp',
- '<(SHARED_INTERMEDIATE_DIR)/inspector/Debugger.h',
- '<(SHARED_INTERMEDIATE_DIR)/inspector/HeapProfiler.cpp',
- '<(SHARED_INTERMEDIATE_DIR)/inspector/HeapProfiler.h',
- '<(SHARED_INTERMEDIATE_DIR)/inspector/Profiler.cpp',
- '<(SHARED_INTERMEDIATE_DIR)/inspector/Profiler.h',
- '<(SHARED_INTERMEDIATE_DIR)/inspector/public/Debugger.h',
- '<(SHARED_INTERMEDIATE_DIR)/inspector/public/Runtime.h',
- '<(SHARED_INTERMEDIATE_DIR)/inspector/Runtime.cpp',
- '<(SHARED_INTERMEDIATE_DIR)/inspector/Runtime.h',
- ]
+{
+ 'variables': {
+ 'protocol_path': '<(PRODUCT_DIR)/../../third_party/WebKit/Source/platform/inspector_protocol',
},
+ 'includes': [
+ 'inspector.gypi',
+ '<(PRODUCT_DIR)/../../../third_party/WebKit/Source/platform/inspector_protocol/inspector_protocol.gypi',
+ ],
'targets': [
- { 'target_name': 'inspector_protocol_sources',
+ { 'target_name': 'inspector_injected_script',
'type': 'none',
- 'variables': {
- 'jinja_module_files': [
- # jinja2/__init__.py contains version string, so sufficient for package
- '../third_party/jinja2/__init__.py',
- '../third_party/markupsafe/__init__.py', # jinja2 dep
- ]
- },
'actions': [
{
- 'action_name': 'generate_inspector_protocol_sources',
+ 'action_name': 'convert_js_to_cpp_char_array',
+ 'inputs': [
+ 'build/xxd.py',
+ '<(inspector_injected_script_source)',
+ ],
+ 'outputs': [
+ '<(inspector_generated_injected_script)',
+ ],
+ 'action': [
+ 'python',
+ 'build/xxd.py',
+ 'InjectedScriptSource_js',
+ 'injected-script-source.js',
+ '<@(_outputs)'
+ ],
+ },
+ ],
+ # Since this target generates header files, it needs to be a hard dependency.
+ 'hard_dependency': 1,
+ },
+ { 'target_name': 'inspector_debugger_script',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'convert_js_to_cpp_char_array',
+ 'inputs': [
+ 'build/xxd.py',
+ '<(inspector_debugger_script_source)',
+ ],
+ 'outputs': [
+ '<(inspector_generated_debugger_script)',
+ ],
+ 'action': [
+ 'python',
+ 'build/xxd.py',
+ 'DebuggerScript_js',
+ 'debugger-script.js',
+ '<@(_outputs)'
+ ],
+ },
+ ],
+ # Since this target generates header files, it needs to be a hard dependency.
+ 'hard_dependency': 1,
+ },
+ { 'target_name': 'protocol_compatibility',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'protocol_compatibility',
'inputs': [
- # Source generator script.
- '<(protocol_path)/CodeGenerator.py',
- # Source code templates.
- '<(protocol_path)/Exported_h.template',
- '<(protocol_path)/Imported_h.template',
- '<(protocol_path)/TypeBuilder_h.template',
- '<(protocol_path)/TypeBuilder_cpp.template',
- # Protocol definition.
'js_protocol.json',
],
'outputs': [
- '<@(protocol_sources)',
+ '<@(SHARED_INTERMEDIATE_DIR)/src/js_protocol.stamp',
],
'action': [
'python',
- '<(protocol_path)/CodeGenerator.py',
- '--protocol', 'js_protocol.json',
- '--string_type', 'String16',
- '--export_macro', 'PLATFORM_EXPORT',
- '--output_dir', '<(SHARED_INTERMEDIATE_DIR)/inspector',
- '--output_package', 'inspector',
- '--exported_dir', '<(SHARED_INTERMEDIATE_DIR)/inspector/public',
- '--exported_package', 'inspector/public',
+ '<(protocol_path)/CheckProtocolCompatibility.py',
+ '--stamp', '<@(_outputs)',
+ 'js_protocol.json',
],
- 'message': 'Generating Inspector protocol backend sources from json definitions',
+ 'message': 'Generating inspector protocol sources from protocol json definition',
},
]
},
- { 'target_name': 'inspector_protocol',
- 'type': 'static_library',
- 'dependencies': [
- 'inspector_protocol_sources',
- ],
- 'include_dirs+': [
- '<(protocol_path)/../..',
- '<(SHARED_INTERMEDIATE_DIR)',
- ],
- 'defines': [
- 'V8_INSPECTOR_USE_STL',
- ],
- 'msvs_disabled_warnings': [
- 4267, # Truncation from size_t to int.
- 4305, # Truncation from 'type1' to 'type2'.
- 4324, # Struct padded due to declspec(align).
- 4714, # Function marked forceinline not inlined.
- 4800, # Value forced to bool.
- 4996, # Deprecated function call.
- ],
- 'sources': [
- '<@(protocol_sources)',
- '<(protocol_path)/Allocator.h',
- '<(protocol_path)/Array.h',
- '<(protocol_path)/BackendCallback.h',
- '<(protocol_path)/CodeGenerator.py',
- '<(protocol_path)/Collections.h',
- '<(protocol_path)/DispatcherBase.cpp',
- '<(protocol_path)/DispatcherBase.h',
- '<(protocol_path)/ErrorSupport.cpp',
- '<(protocol_path)/ErrorSupport.h',
- '<(protocol_path)/FrontendChannel.h',
- '<(protocol_path)/Maybe.h',
- '<(protocol_path)/Object.cpp',
- '<(protocol_path)/Object.h',
- '<(protocol_path)/Parser.cpp',
- '<(protocol_path)/Parser.h',
- '<(protocol_path)/Platform.h',
- '<(protocol_path)/PlatformSTL.h',
- '<(protocol_path)/String16.cpp',
- '<(protocol_path)/String16.h',
- '<(protocol_path)/String16STL.cpp',
- '<(protocol_path)/String16STL.h',
- '<(protocol_path)/ValueConversions.h',
- '<(protocol_path)/Values.cpp',
- '<(protocol_path)/Values.h',
+ { 'target_name': 'protocol_generated_sources',
+ 'type': 'none',
+ 'dependencies': [ 'protocol_compatibility' ],
+ 'actions': [
+ {
+ 'action_name': 'protocol_generated_sources',
+ 'inputs': [
+ 'js_protocol.json',
+ 'inspector_protocol_config.json',
+ '<@(inspector_protocol_files)',
+ ],
+ 'outputs': [
+ '<@(inspector_generated_sources)',
+ ],
+ 'action': [
+ 'python',
+ '<(protocol_path)/CodeGenerator.py',
+ '--jinja_dir', '<(PRODUCT_DIR)/../../third_party',
+ '--output_base', '<(SHARED_INTERMEDIATE_DIR)/src/inspector',
+ '--config', 'inspector_protocol_config.json',
+ ],
+ 'message': 'Generating inspector protocol sources from protocol json',
+ },
]
},
],
diff --git a/deps/v8/src/inspector/inspector.gypi b/deps/v8/src/inspector/inspector.gypi
new file mode 100644
index 0000000000..863c038d6a
--- /dev/null
+++ b/deps/v8/src/inspector/inspector.gypi
@@ -0,0 +1,95 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'inspector_generated_sources': [
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Forward.h',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Protocol.cpp',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Protocol.h',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Console.cpp',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Console.h',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Debugger.cpp',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Debugger.h',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/HeapProfiler.cpp',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/HeapProfiler.h',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Profiler.cpp',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Profiler.h',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Runtime.cpp',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Runtime.h',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Schema.cpp',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Schema.h',
+ '<(SHARED_INTERMEDIATE_DIR)/include/inspector/Debugger.h',
+ '<(SHARED_INTERMEDIATE_DIR)/include/inspector/Runtime.h',
+ '<(SHARED_INTERMEDIATE_DIR)/include/inspector/Schema.h',
+ ],
+
+ 'inspector_injected_script_source': 'injected-script-source.js',
+ 'inspector_generated_injected_script': '<(SHARED_INTERMEDIATE_DIR)/src/inspector/injected-script-source.h',
+ 'inspector_debugger_script_source': 'debugger-script.js',
+ 'inspector_generated_debugger_script': '<(SHARED_INTERMEDIATE_DIR)/src/inspector/debugger-script.h',
+
+ 'inspector_all_sources': [
+ '<@(inspector_generated_sources)',
+ '<(inspector_generated_injected_script)',
+ '<(inspector_generated_debugger_script)',
+ '../../include/v8-inspector.h',
+ '../../include/v8-inspector-protocol.h',
+ 'inspector/injected-script.cc',
+ 'inspector/injected-script.h',
+ 'inspector/injected-script-native.cc',
+ 'inspector/injected-script-native.h',
+ 'inspector/inspected-context.cc',
+ 'inspector/inspected-context.h',
+ 'inspector/java-script-call-frame.cc',
+ 'inspector/java-script-call-frame.h',
+ 'inspector/protocol-platform.h',
+ 'inspector/remote-object-id.cc',
+ 'inspector/remote-object-id.h',
+ 'inspector/script-breakpoint.h',
+ 'inspector/search-util.cc',
+ 'inspector/search-util.h',
+ 'inspector/string-16.cc',
+ 'inspector/string-16.h',
+ 'inspector/string-util.cc',
+ 'inspector/string-util.h',
+ 'inspector/v8-console.cc',
+ 'inspector/v8-console.h',
+ 'inspector/v8-console-agent-impl.cc',
+ 'inspector/v8-console-agent-impl.h',
+ 'inspector/v8-console-message.cc',
+ 'inspector/v8-console-message.h',
+ 'inspector/v8-debugger.cc',
+ 'inspector/v8-debugger.h',
+ 'inspector/v8-debugger-agent-impl.cc',
+ 'inspector/v8-debugger-agent-impl.h',
+ 'inspector/v8-debugger-script.cc',
+ 'inspector/v8-debugger-script.h',
+ 'inspector/v8-function-call.cc',
+ 'inspector/v8-function-call.h',
+ 'inspector/v8-heap-profiler-agent-impl.cc',
+ 'inspector/v8-heap-profiler-agent-impl.h',
+ 'inspector/v8-injected-script-host.cc',
+ 'inspector/v8-injected-script-host.h',
+ 'inspector/v8-inspector-impl.cc',
+ 'inspector/v8-inspector-impl.h',
+ 'inspector/v8-inspector-session-impl.cc',
+ 'inspector/v8-inspector-session-impl.h',
+ 'inspector/v8-internal-value-type.cc',
+ 'inspector/v8-internal-value-type.h',
+ 'inspector/v8-profiler-agent-impl.cc',
+ 'inspector/v8-profiler-agent-impl.h',
+ 'inspector/v8-regex.cc',
+ 'inspector/v8-regex.h',
+ 'inspector/v8-runtime-agent-impl.cc',
+ 'inspector/v8-runtime-agent-impl.h',
+ 'inspector/v8-schema-agent-impl.cc',
+ 'inspector/v8-schema-agent-impl.h',
+ 'inspector/v8-stack-trace-impl.cc',
+ 'inspector/v8-stack-trace-impl.h',
+ 'inspector/v8-value-copier.cc',
+ 'inspector/v8-value-copier.h',
+ ]
+ }
+}
diff --git a/deps/v8/src/inspector/inspector_protocol_config.json b/deps/v8/src/inspector/inspector_protocol_config.json
new file mode 100644
index 0000000000..cb9e6698d1
--- /dev/null
+++ b/deps/v8/src/inspector/inspector_protocol_config.json
@@ -0,0 +1,25 @@
+{
+ "protocol": {
+ "path": "js_protocol.json",
+ "package": "src/inspector/protocol",
+ "output": "protocol",
+ "namespace": ["v8_inspector", "protocol"]
+ },
+
+ "exported": {
+ "package": "include/inspector",
+ "output": "../../include/inspector",
+ "string_header": "v8-inspector.h",
+ "string_in": "StringView",
+ "string_out": "std::unique_ptr<StringBuffer>",
+ "to_string_out": "StringBufferImpl::adopt(%s)",
+ "export_macro": "V8_EXPORT"
+ },
+
+ "lib": {
+ "package": "src/inspector/protocol",
+ "output": "protocol",
+ "string_header": "src/inspector/string-util.h",
+ "platform_header": "src/inspector/protocol-platform.h"
+ }
+}
diff --git a/deps/v8/src/inspector/java-script-call-frame.cc b/deps/v8/src/inspector/java-script-call-frame.cc
new file mode 100644
index 0000000000..b70af21f86
--- /dev/null
+++ b/deps/v8/src/inspector/java-script-call-frame.cc
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/inspector/java-script-call-frame.h"
+
+#include "src/inspector/string-util.h"
+
+#include "include/v8-debug.h"
+
+namespace v8_inspector {
+
+JavaScriptCallFrame::JavaScriptCallFrame(v8::Local<v8::Context> debuggerContext,
+ v8::Local<v8::Object> callFrame)
+ : m_isolate(debuggerContext->GetIsolate()),
+ m_debuggerContext(m_isolate, debuggerContext),
+ m_callFrame(m_isolate, callFrame) {}
+
+JavaScriptCallFrame::~JavaScriptCallFrame() {}
+
+int JavaScriptCallFrame::callV8FunctionReturnInt(const char* name) const {
+ v8::HandleScope handleScope(m_isolate);
+ v8::MicrotasksScope microtasks(m_isolate,
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(m_isolate, m_debuggerContext);
+ v8::Local<v8::Object> callFrame =
+ v8::Local<v8::Object>::New(m_isolate, m_callFrame);
+ v8::Local<v8::Function> func = v8::Local<v8::Function>::Cast(
+ callFrame->Get(context, toV8StringInternalized(m_isolate, name))
+ .ToLocalChecked());
+ v8::Local<v8::Value> result;
+ if (!func->Call(context, callFrame, 0, nullptr).ToLocal(&result) ||
+ !result->IsInt32())
+ return 0;
+ return result.As<v8::Int32>()->Value();
+}
+
+int JavaScriptCallFrame::sourceID() const {
+ return callV8FunctionReturnInt("sourceID");
+}
+
+int JavaScriptCallFrame::line() const {
+ return callV8FunctionReturnInt("line");
+}
+
+int JavaScriptCallFrame::column() const {
+ return callV8FunctionReturnInt("column");
+}
+
+int JavaScriptCallFrame::contextId() const {
+ return callV8FunctionReturnInt("contextId");
+}
+
+bool JavaScriptCallFrame::isAtReturn() const {
+ v8::HandleScope handleScope(m_isolate);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(m_isolate, m_debuggerContext);
+ v8::Local<v8::Object> callFrame =
+ v8::Local<v8::Object>::New(m_isolate, m_callFrame);
+ v8::Local<v8::Value> result;
+ if (!callFrame->Get(context, toV8StringInternalized(m_isolate, "isAtReturn"))
+ .ToLocal(&result) ||
+ !result->IsBoolean())
+ return false;
+ return result.As<v8::Boolean>()->BooleanValue(context).FromMaybe(false);
+}
+
+v8::Local<v8::Object> JavaScriptCallFrame::details() const {
+ v8::MicrotasksScope microtasks(m_isolate,
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(m_isolate, m_debuggerContext);
+ v8::Local<v8::Object> callFrame =
+ v8::Local<v8::Object>::New(m_isolate, m_callFrame);
+ v8::Local<v8::Function> func = v8::Local<v8::Function>::Cast(
+ callFrame->Get(context, toV8StringInternalized(m_isolate, "details"))
+ .ToLocalChecked());
+ return v8::Local<v8::Object>::Cast(
+ func->Call(context, callFrame, 0, nullptr).ToLocalChecked());
+}
+
+v8::MaybeLocal<v8::Value> JavaScriptCallFrame::evaluate(
+ v8::Local<v8::Value> expression) {
+ v8::MicrotasksScope microtasks(m_isolate,
+ v8::MicrotasksScope::kRunMicrotasks);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(m_isolate, m_debuggerContext);
+ v8::Local<v8::Object> callFrame =
+ v8::Local<v8::Object>::New(m_isolate, m_callFrame);
+ v8::Local<v8::Function> evalFunction = v8::Local<v8::Function>::Cast(
+ callFrame->Get(context, toV8StringInternalized(m_isolate, "evaluate"))
+ .ToLocalChecked());
+ return evalFunction->Call(context, callFrame, 1, &expression);
+}
+
+v8::MaybeLocal<v8::Value> JavaScriptCallFrame::restart() {
+ v8::MicrotasksScope microtasks(m_isolate,
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(m_isolate, m_debuggerContext);
+ v8::Local<v8::Object> callFrame =
+ v8::Local<v8::Object>::New(m_isolate, m_callFrame);
+ v8::Local<v8::Function> restartFunction = v8::Local<v8::Function>::Cast(
+ callFrame->Get(context, toV8StringInternalized(m_isolate, "restart"))
+ .ToLocalChecked());
+ v8::Debug::SetLiveEditEnabled(m_isolate, true);
+ v8::MaybeLocal<v8::Value> result = restartFunction->Call(
+ m_debuggerContext.Get(m_isolate), callFrame, 0, nullptr);
+ v8::Debug::SetLiveEditEnabled(m_isolate, false);
+ return result;
+}
+
+v8::MaybeLocal<v8::Value> JavaScriptCallFrame::setVariableValue(
+ int scopeNumber, v8::Local<v8::Value> variableName,
+ v8::Local<v8::Value> newValue) {
+ v8::MicrotasksScope microtasks(m_isolate,
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(m_isolate, m_debuggerContext);
+ v8::Local<v8::Object> callFrame =
+ v8::Local<v8::Object>::New(m_isolate, m_callFrame);
+ v8::Local<v8::Function> setVariableValueFunction =
+ v8::Local<v8::Function>::Cast(
+ callFrame
+ ->Get(context,
+ toV8StringInternalized(m_isolate, "setVariableValue"))
+ .ToLocalChecked());
+ v8::Local<v8::Value> argv[] = {
+ v8::Local<v8::Value>(v8::Integer::New(m_isolate, scopeNumber)),
+ variableName, newValue};
+ return setVariableValueFunction->Call(context, callFrame, arraysize(argv),
+ argv);
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/java-script-call-frame.h b/deps/v8/src/inspector/java-script-call-frame.h
new file mode 100644
index 0000000000..5a4ce19cc2
--- /dev/null
+++ b/deps/v8/src/inspector/java-script-call-frame.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef V8_INSPECTOR_JAVASCRIPTCALLFRAME_H_
+#define V8_INSPECTOR_JAVASCRIPTCALLFRAME_H_
+
+#include <vector>
+
+#include "src/base/macros.h"
+#include "src/inspector/protocol-platform.h"
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+class JavaScriptCallFrame {
+ public:
+ static std::unique_ptr<JavaScriptCallFrame> create(
+ v8::Local<v8::Context> debuggerContext, v8::Local<v8::Object> callFrame) {
+ return wrapUnique(new JavaScriptCallFrame(debuggerContext, callFrame));
+ }
+ ~JavaScriptCallFrame();
+
+ int sourceID() const;
+ int line() const;
+ int column() const;
+ int contextId() const;
+
+ bool isAtReturn() const;
+ v8::Local<v8::Object> details() const;
+
+ v8::MaybeLocal<v8::Value> evaluate(v8::Local<v8::Value> expression);
+ v8::MaybeLocal<v8::Value> restart();
+ v8::MaybeLocal<v8::Value> setVariableValue(int scopeNumber,
+ v8::Local<v8::Value> variableName,
+ v8::Local<v8::Value> newValue);
+
+ private:
+ JavaScriptCallFrame(v8::Local<v8::Context> debuggerContext,
+ v8::Local<v8::Object> callFrame);
+
+ int callV8FunctionReturnInt(const char* name) const;
+
+ v8::Isolate* m_isolate;
+ v8::Global<v8::Context> m_debuggerContext;
+ v8::Global<v8::Object> m_callFrame;
+
+ DISALLOW_COPY_AND_ASSIGN(JavaScriptCallFrame);
+};
+
+using JavaScriptCallFrames = std::vector<std::unique_ptr<JavaScriptCallFrame>>;
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_JAVASCRIPTCALLFRAME_H_
diff --git a/deps/v8/src/inspector/js_protocol-1.2.json b/deps/v8/src/inspector/js_protocol-1.2.json
new file mode 100644
index 0000000000..aff6806222
--- /dev/null
+++ b/deps/v8/src/inspector/js_protocol-1.2.json
@@ -0,0 +1,997 @@
+{
+ "version": { "major": "1", "minor": "2" },
+ "domains": [
+ {
+ "domain": "Schema",
+ "description": "Provides information about the protocol schema.",
+ "types": [
+ {
+ "id": "Domain",
+ "type": "object",
+ "description": "Description of the protocol domain.",
+ "exported": true,
+ "properties": [
+ { "name": "name", "type": "string", "description": "Domain name." },
+ { "name": "version", "type": "string", "description": "Domain version." }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "getDomains",
+ "description": "Returns supported domains.",
+ "handlers": ["browser", "renderer"],
+ "returns": [
+ { "name": "domains", "type": "array", "items": { "$ref": "Domain" }, "description": "List of supported domains." }
+ ]
+ }
+ ]
+ },
+ {
+ "domain": "Runtime",
+ "description": "Runtime domain exposes JavaScript runtime by means of remote evaluation and mirror objects. Evaluation results are returned as mirror object that expose object type, string representation and unique identifier that can be used for further object reference. Original objects are maintained in memory unless they are either explicitly released or are released along with the other objects in their object group.",
+ "types": [
+ {
+ "id": "ScriptId",
+ "type": "string",
+ "description": "Unique script identifier."
+ },
+ {
+ "id": "RemoteObjectId",
+ "type": "string",
+ "description": "Unique object identifier."
+ },
+ {
+ "id": "UnserializableValue",
+ "type": "string",
+ "enum": ["Infinity", "NaN", "-Infinity", "-0"],
+ "description": "Primitive value which cannot be JSON-stringified."
+ },
+ {
+ "id": "RemoteObject",
+ "type": "object",
+ "description": "Mirror object referencing original JavaScript object.",
+ "exported": true,
+ "properties": [
+ { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol"], "description": "Object type." },
+ { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "iterator", "generator", "error", "proxy", "promise", "typedarray"], "description": "Object subtype hint. Specified for <code>object</code> type values only." },
+ { "name": "className", "type": "string", "optional": true, "description": "Object class (constructor) name. Specified for <code>object</code> type values only." },
+ { "name": "value", "type": "any", "optional": true, "description": "Remote object value in case of primitive values or JSON values (if it was requested)." },
+ { "name": "unserializableValue", "$ref": "UnserializableValue", "optional": true, "description": "Primitive value which can not be JSON-stringified does not have <code>value</code>, but gets this property." },
+ { "name": "description", "type": "string", "optional": true, "description": "String representation of the object." },
+ { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Unique object identifier (for non-primitive values)." },
+ { "name": "preview", "$ref": "ObjectPreview", "optional": true, "description": "Preview containing abbreviated property values. Specified for <code>object</code> type values only.", "experimental": true },
+ { "name": "customPreview", "$ref": "CustomPreview", "optional": true, "experimental": true}
+ ]
+ },
+ {
+ "id": "CustomPreview",
+ "type": "object",
+ "experimental": true,
+ "properties": [
+ { "name": "header", "type": "string"},
+ { "name": "hasBody", "type": "boolean"},
+ { "name": "formatterObjectId", "$ref": "RemoteObjectId"},
+ { "name": "bindRemoteObjectFunctionId", "$ref": "RemoteObjectId" },
+ { "name": "configObjectId", "$ref": "RemoteObjectId", "optional": true }
+ ]
+ },
+ {
+ "id": "ObjectPreview",
+ "type": "object",
+ "experimental": true,
+ "description": "Object containing abbreviated remote object value.",
+ "properties": [
+ { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol"], "description": "Object type." },
+ { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "iterator", "generator", "error"], "description": "Object subtype hint. Specified for <code>object</code> type values only." },
+ { "name": "description", "type": "string", "optional": true, "description": "String representation of the object." },
+ { "name": "overflow", "type": "boolean", "description": "True iff some of the properties or entries of the original object did not fit." },
+ { "name": "properties", "type": "array", "items": { "$ref": "PropertyPreview" }, "description": "List of the properties." },
+ { "name": "entries", "type": "array", "items": { "$ref": "EntryPreview" }, "optional": true, "description": "List of the entries. Specified for <code>map</code> and <code>set</code> subtype values only." }
+ ]
+ },
+ {
+ "id": "PropertyPreview",
+ "type": "object",
+ "experimental": true,
+ "properties": [
+ { "name": "name", "type": "string", "description": "Property name." },
+ { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol", "accessor"], "description": "Object type. Accessor means that the property itself is an accessor property." },
+ { "name": "value", "type": "string", "optional": true, "description": "User-friendly property value string." },
+ { "name": "valuePreview", "$ref": "ObjectPreview", "optional": true, "description": "Nested value preview." },
+ { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "iterator", "generator", "error"], "description": "Object subtype hint. Specified for <code>object</code> type values only." }
+ ]
+ },
+ {
+ "id": "EntryPreview",
+ "type": "object",
+ "experimental": true,
+ "properties": [
+ { "name": "key", "$ref": "ObjectPreview", "optional": true, "description": "Preview of the key. Specified for map-like collection entries." },
+ { "name": "value", "$ref": "ObjectPreview", "description": "Preview of the value." }
+ ]
+ },
+ {
+ "id": "PropertyDescriptor",
+ "type": "object",
+ "description": "Object property descriptor.",
+ "properties": [
+ { "name": "name", "type": "string", "description": "Property name or symbol description." },
+ { "name": "value", "$ref": "RemoteObject", "optional": true, "description": "The value associated with the property." },
+ { "name": "writable", "type": "boolean", "optional": true, "description": "True if the value associated with the property may be changed (data descriptors only)." },
+ { "name": "get", "$ref": "RemoteObject", "optional": true, "description": "A function which serves as a getter for the property, or <code>undefined</code> if there is no getter (accessor descriptors only)." },
+ { "name": "set", "$ref": "RemoteObject", "optional": true, "description": "A function which serves as a setter for the property, or <code>undefined</code> if there is no setter (accessor descriptors only)." },
+ { "name": "configurable", "type": "boolean", "description": "True if the type of this property descriptor may be changed and if the property may be deleted from the corresponding object." },
+ { "name": "enumerable", "type": "boolean", "description": "True if this property shows up during enumeration of the properties on the corresponding object." },
+ { "name": "wasThrown", "type": "boolean", "optional": true, "description": "True if the result was thrown during the evaluation." },
+ { "name": "isOwn", "optional": true, "type": "boolean", "description": "True if the property is owned for the object." },
+ { "name": "symbol", "$ref": "RemoteObject", "optional": true, "description": "Property symbol object, if the property is of the <code>symbol</code> type." }
+ ]
+ },
+ {
+ "id": "InternalPropertyDescriptor",
+ "type": "object",
+ "description": "Object internal property descriptor. This property isn't normally visible in JavaScript code.",
+ "properties": [
+ { "name": "name", "type": "string", "description": "Conventional property name." },
+ { "name": "value", "$ref": "RemoteObject", "optional": true, "description": "The value associated with the property." }
+ ]
+ },
+ {
+ "id": "CallArgument",
+ "type": "object",
+ "description": "Represents function call argument. Either remote object id <code>objectId</code>, primitive <code>value</code>, unserializable primitive value or neither of (for undefined) them should be specified.",
+ "properties": [
+ { "name": "value", "type": "any", "optional": true, "description": "Primitive value." },
+ { "name": "unserializableValue", "$ref": "UnserializableValue", "optional": true, "description": "Primitive value which can not be JSON-stringified." },
+ { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Remote object handle." }
+ ]
+ },
+ {
+ "id": "ExecutionContextId",
+ "type": "integer",
+ "description": "Id of an execution context."
+ },
+ {
+ "id": "ExecutionContextDescription",
+ "type": "object",
+ "description": "Description of an isolated world.",
+ "properties": [
+ { "name": "id", "$ref": "ExecutionContextId", "description": "Unique id of the execution context. It can be used to specify in which execution context script evaluation should be performed." },
+ { "name": "origin", "type": "string", "description": "Execution context origin." },
+ { "name": "name", "type": "string", "description": "Human readable name describing given context." },
+ { "name": "auxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." }
+ ]
+ },
+ {
+ "id": "ExceptionDetails",
+ "type": "object",
+ "description": "Detailed information about exception (or error) that was thrown during script compilation or execution.",
+ "properties": [
+ { "name": "exceptionId", "type": "integer", "description": "Exception id." },
+ { "name": "text", "type": "string", "description": "Exception text, which should be used together with exception object when available." },
+ { "name": "lineNumber", "type": "integer", "description": "Line number of the exception location (0-based)." },
+ { "name": "columnNumber", "type": "integer", "description": "Column number of the exception location (0-based)." },
+ { "name": "scriptId", "$ref": "ScriptId", "optional": true, "description": "Script ID of the exception location." },
+ { "name": "url", "type": "string", "optional": true, "description": "URL of the exception location, to be used when the script was not reported." },
+ { "name": "stackTrace", "$ref": "StackTrace", "optional": true, "description": "JavaScript stack trace if available." },
+ { "name": "exception", "$ref": "RemoteObject", "optional": true, "description": "Exception object if available." },
+ { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Identifier of the context where exception happened." }
+ ]
+ },
+ {
+ "id": "Timestamp",
+ "type": "number",
+ "description": "Number of milliseconds since epoch."
+ },
+ {
+ "id": "CallFrame",
+ "type": "object",
+ "description": "Stack entry for runtime errors and assertions.",
+ "properties": [
+ { "name": "functionName", "type": "string", "description": "JavaScript function name." },
+ { "name": "scriptId", "$ref": "ScriptId", "description": "JavaScript script id." },
+ { "name": "url", "type": "string", "description": "JavaScript script name or url." },
+ { "name": "lineNumber", "type": "integer", "description": "JavaScript script line number (0-based)." },
+ { "name": "columnNumber", "type": "integer", "description": "JavaScript script column number (0-based)." }
+ ]
+ },
+ {
+ "id": "StackTrace",
+ "type": "object",
+ "description": "Call frames for assertions or error messages.",
+ "exported": true,
+ "properties": [
+ { "name": "description", "type": "string", "optional": true, "description": "String label of this stack trace. For async traces this may be a name of the function that initiated the async call." },
+ { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "JavaScript function name." },
+ { "name": "parent", "$ref": "StackTrace", "optional": true, "description": "Asynchronous JavaScript stack trace that preceded this stack, if available." }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "evaluate",
+ "async": true,
+ "parameters": [
+ { "name": "expression", "type": "string", "description": "Expression to evaluate." },
+ { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." },
+ { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Determines whether Command Line API should be available during the evaluation." },
+ { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
+ { "name": "contextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform evaluation. If the parameter is omitted the evaluation will be performed in the context of the inspected page." },
+ { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
+ { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
+ { "name": "userGesture", "type": "boolean", "optional": true, "experimental": true, "description": "Whether execution should be treated as initiated by user in the UI." },
+ { "name": "awaitPromise", "type": "boolean", "optional":true, "description": "Whether execution should wait for promise to be resolved. If the result of evaluation is not a Promise, it's considered to be an error." }
+ ],
+ "returns": [
+ { "name": "result", "$ref": "RemoteObject", "description": "Evaluation result." },
+ { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
+ ],
+ "description": "Evaluates expression on global object."
+ },
+ {
+ "name": "awaitPromise",
+ "async": true,
+ "parameters": [
+ { "name": "promiseObjectId", "$ref": "RemoteObjectId", "description": "Identifier of the promise." },
+ { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
+ { "name": "generatePreview", "type": "boolean", "optional": true, "description": "Whether preview should be generated for the result." }
+ ],
+ "returns": [
+ { "name": "result", "$ref": "RemoteObject", "description": "Promise result. Will contain rejected value if promise was rejected." },
+ { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details if stack strace is available."}
+ ],
+ "description": "Add handler to promise with given promise object id."
+ },
+ {
+ "name": "callFunctionOn",
+ "async": true,
+ "parameters": [
+ { "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to call function on." },
+ { "name": "functionDeclaration", "type": "string", "description": "Declaration of the function to call." },
+ { "name": "arguments", "type": "array", "items": { "$ref": "CallArgument", "description": "Call argument." }, "optional": true, "description": "Call arguments. All call arguments must belong to the same JavaScript world as the target object." },
+ { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
+ { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object which should be sent by value." },
+ { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
+ { "name": "userGesture", "type": "boolean", "optional": true, "experimental": true, "description": "Whether execution should be treated as initiated by user in the UI." },
+ { "name": "awaitPromise", "type": "boolean", "optional":true, "description": "Whether execution should wait for promise to be resolved. If the result of evaluation is not a Promise, it's considered to be an error." }
+ ],
+ "returns": [
+ { "name": "result", "$ref": "RemoteObject", "description": "Call result." },
+ { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
+ ],
+ "description": "Calls function with given declaration on the given object. Object group of the result is inherited from the target object."
+ },
+ {
+ "name": "getProperties",
+ "parameters": [
+ { "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to return properties for." },
+ { "name": "ownProperties", "optional": true, "type": "boolean", "description": "If true, returns properties belonging only to the element itself, not to its prototype chain." },
+ { "name": "accessorPropertiesOnly", "optional": true, "type": "boolean", "description": "If true, returns accessor properties (with getter/setter) only; internal properties are not returned either.", "experimental": true },
+ { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the results." }
+ ],
+ "returns": [
+ { "name": "result", "type": "array", "items": { "$ref": "PropertyDescriptor" }, "description": "Object properties." },
+ { "name": "internalProperties", "optional": true, "type": "array", "items": { "$ref": "InternalPropertyDescriptor" }, "description": "Internal object properties (only of the element itself)." },
+ { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
+ ],
+ "description": "Returns properties of a given object. Object group of the result is inherited from the target object."
+ },
+ {
+ "name": "releaseObject",
+ "parameters": [
+ { "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to release." }
+ ],
+ "description": "Releases remote object with given id."
+ },
+ {
+ "name": "releaseObjectGroup",
+ "parameters": [
+ { "name": "objectGroup", "type": "string", "description": "Symbolic object group name." }
+ ],
+ "description": "Releases all remote objects that belong to a given group."
+ },
+ {
+ "name": "runIfWaitingForDebugger",
+ "description": "Tells inspected instance to run if it was waiting for debugger to attach."
+ },
+ {
+ "name": "enable",
+ "description": "Enables reporting of execution contexts creation by means of <code>executionContextCreated</code> event. When the reporting gets enabled the event will be sent immediately for each existing execution context."
+ },
+ {
+ "name": "disable",
+ "description": "Disables reporting of execution contexts creation."
+ },
+ {
+ "name": "discardConsoleEntries",
+ "description": "Discards collected exceptions and console API calls."
+ },
+ {
+ "name": "setCustomObjectFormatterEnabled",
+ "parameters": [
+ {
+ "name": "enabled",
+ "type": "boolean"
+ }
+ ],
+ "experimental": true
+ },
+ {
+ "name": "compileScript",
+ "parameters": [
+ { "name": "expression", "type": "string", "description": "Expression to compile." },
+ { "name": "sourceURL", "type": "string", "description": "Source url to be set for the script." },
+ { "name": "persistScript", "type": "boolean", "description": "Specifies whether the compiled script should be persisted." },
+ { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page." }
+ ],
+ "returns": [
+ { "name": "scriptId", "$ref": "ScriptId", "optional": true, "description": "Id of the script." },
+ { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
+ ],
+ "description": "Compiles expression."
+ },
+ {
+ "name": "runScript",
+ "async": true,
+ "parameters": [
+ { "name": "scriptId", "$ref": "ScriptId", "description": "Id of the script to run." },
+ { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page." },
+ { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." },
+ { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
+ { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Determines whether Command Line API should be available during the evaluation." },
+ { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object which should be sent by value." },
+ { "name": "generatePreview", "type": "boolean", "optional": true, "description": "Whether preview should be generated for the result." },
+ { "name": "awaitPromise", "type": "boolean", "optional": true, "description": "Whether execution should wait for promise to be resolved. If the result of evaluation is not a Promise, it's considered to be an error." }
+ ],
+ "returns": [
+ { "name": "result", "$ref": "RemoteObject", "description": "Run result." },
+ { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
+ ],
+ "description": "Runs script with given id in a given context."
+ }
+ ],
+ "events": [
+ {
+ "name": "executionContextCreated",
+ "parameters": [
+ { "name": "context", "$ref": "ExecutionContextDescription", "description": "A newly created execution contex." }
+ ],
+ "description": "Issued when new execution context is created."
+ },
+ {
+ "name": "executionContextDestroyed",
+ "parameters": [
+ { "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Id of the destroyed context" }
+ ],
+ "description": "Issued when execution context is destroyed."
+ },
+ {
+ "name": "executionContextsCleared",
+ "description": "Issued when all executionContexts were cleared in browser"
+ },
+ {
+ "name": "exceptionThrown",
+ "description": "Issued when exception was thrown and unhandled.",
+ "parameters": [
+ { "name": "timestamp", "$ref": "Timestamp", "description": "Timestamp of the exception." },
+ { "name": "exceptionDetails", "$ref": "ExceptionDetails" }
+ ]
+ },
+ {
+ "name": "exceptionRevoked",
+ "description": "Issued when unhandled exception was revoked.",
+ "parameters": [
+ { "name": "reason", "type": "string", "description": "Reason describing why exception was revoked." },
+ { "name": "exceptionId", "type": "integer", "description": "The id of revoked exception, as reported in <code>exceptionUnhandled</code>." }
+ ]
+ },
+ {
+ "name": "consoleAPICalled",
+ "description": "Issued when console API was called.",
+ "parameters": [
+ { "name": "type", "type": "string", "enum": ["log", "debug", "info", "error", "warning", "dir", "dirxml", "table", "trace", "clear", "startGroup", "startGroupCollapsed", "endGroup", "assert", "profile", "profileEnd"], "description": "Type of the call." },
+ { "name": "args", "type": "array", "items": { "$ref": "RemoteObject" }, "description": "Call arguments." },
+ { "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Identifier of the context where the call was made." },
+ { "name": "timestamp", "$ref": "Timestamp", "description": "Call timestamp." },
+ { "name": "stackTrace", "$ref": "StackTrace", "optional": true, "description": "Stack trace captured when the call was made." }
+ ]
+ },
+ {
+ "name": "inspectRequested",
+ "description": "Issued when object should be inspected (for example, as a result of inspect() command line API call).",
+ "parameters": [
+ { "name": "object", "$ref": "RemoteObject" },
+ { "name": "hints", "type": "object" }
+ ]
+ }
+ ]
+ },
+ {
+ "domain": "Debugger",
+ "description": "Debugger domain exposes JavaScript debugging capabilities. It allows setting and removing breakpoints, stepping through execution, exploring stack traces, etc.",
+ "dependencies": ["Runtime"],
+ "types": [
+ {
+ "id": "BreakpointId",
+ "type": "string",
+ "description": "Breakpoint identifier."
+ },
+ {
+ "id": "CallFrameId",
+ "type": "string",
+ "description": "Call frame identifier."
+ },
+ {
+ "id": "Location",
+ "type": "object",
+ "properties": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Script identifier as reported in the <code>Debugger.scriptParsed</code>." },
+ { "name": "lineNumber", "type": "integer", "description": "Line number in the script (0-based)." },
+ { "name": "columnNumber", "type": "integer", "optional": true, "description": "Column number in the script (0-based)." }
+ ],
+ "description": "Location in the source code."
+ },
+ {
+ "id": "ScriptPosition",
+ "experimental": true,
+ "type": "object",
+ "properties": [
+ { "name": "lineNumber", "type": "integer" },
+ { "name": "columnNumber", "type": "integer" }
+ ],
+ "description": "Location in the source code."
+ },
+ {
+ "id": "CallFrame",
+ "type": "object",
+ "properties": [
+ { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier. This identifier is only valid while the virtual machine is paused." },
+ { "name": "functionName", "type": "string", "description": "Name of the JavaScript function called on this call frame." },
+ { "name": "functionLocation", "$ref": "Location", "optional": true, "experimental": true, "description": "Location in the source code." },
+ { "name": "location", "$ref": "Location", "description": "Location in the source code." },
+ { "name": "scopeChain", "type": "array", "items": { "$ref": "Scope" }, "description": "Scope chain for this call frame." },
+ { "name": "this", "$ref": "Runtime.RemoteObject", "description": "<code>this</code> object for this call frame." },
+ { "name": "returnValue", "$ref": "Runtime.RemoteObject", "optional": true, "description": "The value being returned, if the function is at return point." }
+ ],
+ "description": "JavaScript call frame. Array of call frames form the call stack."
+ },
+ {
+ "id": "Scope",
+ "type": "object",
+ "properties": [
+ { "name": "type", "type": "string", "enum": ["global", "local", "with", "closure", "catch", "block", "script"], "description": "Scope type." },
+ { "name": "object", "$ref": "Runtime.RemoteObject", "description": "Object representing the scope. For <code>global</code> and <code>with</code> scopes it represents the actual object; for the rest of the scopes, it is artificial transient object enumerating scope variables as its properties." },
+ { "name": "name", "type": "string", "optional": true },
+ { "name": "startLocation", "$ref": "Location", "optional": true, "description": "Location in the source code where scope starts" },
+ { "name": "endLocation", "$ref": "Location", "optional": true, "description": "Location in the source code where scope ends" }
+ ],
+ "description": "Scope description."
+ },
+ {
+ "id": "SearchMatch",
+ "type": "object",
+ "description": "Search match for resource.",
+ "exported": true,
+ "properties": [
+ { "name": "lineNumber", "type": "number", "description": "Line number in resource content." },
+ { "name": "lineContent", "type": "string", "description": "Line with match content." }
+ ],
+ "experimental": true
+ }
+ ],
+ "commands": [
+ {
+ "name": "enable",
+ "description": "Enables debugger for the given page. Clients should not assume that the debugging has been enabled until the result for this command is received."
+ },
+ {
+ "name": "disable",
+ "description": "Disables debugger for given page."
+ },
+ {
+ "name": "setBreakpointsActive",
+ "parameters": [
+ { "name": "active", "type": "boolean", "description": "New value for breakpoints active state." }
+ ],
+ "description": "Activates / deactivates all breakpoints on the page."
+ },
+ {
+ "name": "setSkipAllPauses",
+ "parameters": [
+ { "name": "skip", "type": "boolean", "description": "New value for skip pauses state." }
+ ],
+ "description": "Makes page not interrupt on any pauses (breakpoint, exception, dom exception etc)."
+ },
+ {
+ "name": "setBreakpointByUrl",
+ "parameters": [
+ { "name": "lineNumber", "type": "integer", "description": "Line number to set breakpoint at." },
+ { "name": "url", "type": "string", "optional": true, "description": "URL of the resources to set breakpoint on." },
+ { "name": "urlRegex", "type": "string", "optional": true, "description": "Regex pattern for the URLs of the resources to set breakpoints on. Either <code>url</code> or <code>urlRegex</code> must be specified." },
+ { "name": "columnNumber", "type": "integer", "optional": true, "description": "Offset in the line to set breakpoint at." },
+ { "name": "condition", "type": "string", "optional": true, "description": "Expression to use as a breakpoint condition. When specified, debugger will only stop on the breakpoint if this expression evaluates to true." }
+ ],
+ "returns": [
+ { "name": "breakpointId", "$ref": "BreakpointId", "description": "Id of the created breakpoint for further reference." },
+ { "name": "locations", "type": "array", "items": { "$ref": "Location" }, "description": "List of the locations this breakpoint resolved into upon addition." }
+ ],
+ "description": "Sets JavaScript breakpoint at given location specified either by URL or URL regex. Once this command is issued, all existing parsed scripts will have breakpoints resolved and returned in <code>locations</code> property. Further matching script parsing will result in subsequent <code>breakpointResolved</code> events issued. This logical breakpoint will survive page reloads."
+ },
+ {
+ "name": "setBreakpoint",
+ "parameters": [
+ { "name": "location", "$ref": "Location", "description": "Location to set breakpoint in." },
+ { "name": "condition", "type": "string", "optional": true, "description": "Expression to use as a breakpoint condition. When specified, debugger will only stop on the breakpoint if this expression evaluates to true." }
+ ],
+ "returns": [
+ { "name": "breakpointId", "$ref": "BreakpointId", "description": "Id of the created breakpoint for further reference." },
+ { "name": "actualLocation", "$ref": "Location", "description": "Location this breakpoint resolved into." }
+ ],
+ "description": "Sets JavaScript breakpoint at a given location."
+ },
+ {
+ "name": "removeBreakpoint",
+ "parameters": [
+ { "name": "breakpointId", "$ref": "BreakpointId" }
+ ],
+ "description": "Removes JavaScript breakpoint."
+ },
+ {
+ "name": "continueToLocation",
+ "parameters": [
+ { "name": "location", "$ref": "Location", "description": "Location to continue to." }
+ ],
+ "description": "Continues execution until specific location is reached."
+ },
+ {
+ "name": "stepOver",
+ "description": "Steps over the statement."
+ },
+ {
+ "name": "stepInto",
+ "description": "Steps into the function call."
+ },
+ {
+ "name": "stepOut",
+ "description": "Steps out of the function call."
+ },
+ {
+ "name": "pause",
+ "description": "Stops on the next JavaScript statement."
+ },
+ {
+ "name": "resume",
+ "description": "Resumes JavaScript execution."
+ },
+ {
+ "name": "searchInContent",
+ "parameters": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to search in." },
+ { "name": "query", "type": "string", "description": "String to search for." },
+ { "name": "caseSensitive", "type": "boolean", "optional": true, "description": "If true, search is case sensitive." },
+ { "name": "isRegex", "type": "boolean", "optional": true, "description": "If true, treats string parameter as regex." }
+ ],
+ "returns": [
+ { "name": "result", "type": "array", "items": { "$ref": "SearchMatch" }, "description": "List of search matches." }
+ ],
+ "experimental": true,
+ "description": "Searches for given string in script content."
+ },
+ {
+ "name": "setScriptSource",
+ "parameters": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to edit." },
+ { "name": "scriptSource", "type": "string", "description": "New content of the script." },
+ { "name": "dryRun", "type": "boolean", "optional": true, "description": " If true the change will not actually be applied. Dry run may be used to get result description without actually modifying the code." }
+ ],
+ "returns": [
+ { "name": "callFrames", "type": "array", "optional": true, "items": { "$ref": "CallFrame" }, "description": "New stack trace in case editing has happened while VM was stopped." },
+ { "name": "stackChanged", "type": "boolean", "optional": true, "description": "Whether current call stack was modified after applying the changes." },
+ { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." },
+ { "name": "exceptionDetails", "optional": true, "$ref": "Runtime.ExceptionDetails", "description": "Exception details if any." }
+ ],
+ "description": "Edits JavaScript source live."
+ },
+ {
+ "name": "restartFrame",
+ "parameters": [
+ { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier to evaluate on." }
+ ],
+ "returns": [
+ { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "New stack trace." },
+ { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." }
+ ],
+ "description": "Restarts particular call frame from the beginning."
+ },
+ {
+ "name": "getScriptSource",
+ "parameters": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to get source for." }
+ ],
+ "returns": [
+ { "name": "scriptSource", "type": "string", "description": "Script source." }
+ ],
+ "description": "Returns source for the script with given id."
+ },
+ {
+ "name": "setPauseOnExceptions",
+ "parameters": [
+ { "name": "state", "type": "string", "enum": ["none", "uncaught", "all"], "description": "Pause on exceptions mode." }
+ ],
+ "description": "Defines pause on exceptions state. Can be set to stop on all exceptions, uncaught exceptions or no exceptions. Initial pause on exceptions state is <code>none</code>."
+ },
+ {
+ "name": "evaluateOnCallFrame",
+ "parameters": [
+ { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier to evaluate on." },
+ { "name": "expression", "type": "string", "description": "Expression to evaluate." },
+ { "name": "objectGroup", "type": "string", "optional": true, "description": "String object group name to put result into (allows rapid releasing resulting object handles using <code>releaseObjectGroup</code>)." },
+ { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Specifies whether command line API should be available to the evaluated expression, defaults to false." },
+ { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
+ { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
+ { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." }
+ ],
+ "returns": [
+ { "name": "result", "$ref": "Runtime.RemoteObject", "description": "Object wrapper for the evaluation result." },
+ { "name": "exceptionDetails", "$ref": "Runtime.ExceptionDetails", "optional": true, "description": "Exception details."}
+ ],
+ "description": "Evaluates expression on a given call frame."
+ },
+ {
+ "name": "setVariableValue",
+ "parameters": [
+ { "name": "scopeNumber", "type": "integer", "description": "0-based number of scope as was listed in scope chain. Only 'local', 'closure' and 'catch' scope types are allowed. Other scopes could be manipulated manually." },
+ { "name": "variableName", "type": "string", "description": "Variable name." },
+ { "name": "newValue", "$ref": "Runtime.CallArgument", "description": "New variable value." },
+ { "name": "callFrameId", "$ref": "CallFrameId", "description": "Id of callframe that holds variable." }
+ ],
+ "description": "Changes value of variable in a callframe. Object-based scopes are not supported and must be mutated manually."
+ },
+ {
+ "name": "setAsyncCallStackDepth",
+ "parameters": [
+ { "name": "maxDepth", "type": "integer", "description": "Maximum depth of async call stacks. Setting to <code>0</code> will effectively disable collecting async call stacks (default)." }
+ ],
+ "description": "Enables or disables async call stacks tracking."
+ },
+ {
+ "name": "setBlackboxPatterns",
+ "parameters": [
+ { "name": "patterns", "type": "array", "items": { "type": "string" }, "description": "Array of regexps that will be used to check script url for blackbox state." }
+ ],
+ "experimental": true,
+ "description": "Replace previous blackbox patterns with passed ones. Forces backend to skip stepping/pausing in scripts with url matching one of the patterns. VM will try to leave blackboxed script by performing 'step in' several times, finally resorting to 'step out' if unsuccessful."
+ },
+ {
+ "name": "setBlackboxedRanges",
+ "parameters": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script." },
+ { "name": "positions", "type": "array", "items": { "$ref": "ScriptPosition" } }
+ ],
+ "experimental": true,
+ "description": "Makes backend skip steps in the script in blackboxed ranges. VM will try leave blacklisted scripts by performing 'step in' several times, finally resorting to 'step out' if unsuccessful. Positions array contains positions where blackbox state is changed. First interval isn't blackboxed. Array should be sorted."
+ }
+ ],
+ "events": [
+ {
+ "name": "scriptParsed",
+ "parameters": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Identifier of the script parsed." },
+ { "name": "url", "type": "string", "description": "URL or name of the script parsed (if any)." },
+ { "name": "startLine", "type": "integer", "description": "Line offset of the script within the resource with given URL (for script tags)." },
+ { "name": "startColumn", "type": "integer", "description": "Column offset of the script within the resource with given URL." },
+ { "name": "endLine", "type": "integer", "description": "Last line of the script." },
+ { "name": "endColumn", "type": "integer", "description": "Length of the last line of the script." },
+ { "name": "executionContextId", "$ref": "Runtime.ExecutionContextId", "description": "Specifies script creation context." },
+ { "name": "hash", "type": "string", "description": "Content hash of the script."},
+ { "name": "executionContextAuxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." },
+ { "name": "isLiveEdit", "type": "boolean", "optional": true, "description": "True, if this script is generated as a result of the live edit operation.", "experimental": true },
+ { "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
+ { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "experimental": true }
+ ],
+ "description": "Fired when virtual machine parses script. This event is also fired for all known and uncollected scripts upon enabling debugger."
+ },
+ {
+ "name": "scriptFailedToParse",
+ "parameters": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Identifier of the script parsed." },
+ { "name": "url", "type": "string", "description": "URL or name of the script parsed (if any)." },
+ { "name": "startLine", "type": "integer", "description": "Line offset of the script within the resource with given URL (for script tags)." },
+ { "name": "startColumn", "type": "integer", "description": "Column offset of the script within the resource with given URL." },
+ { "name": "endLine", "type": "integer", "description": "Last line of the script." },
+ { "name": "endColumn", "type": "integer", "description": "Length of the last line of the script." },
+ { "name": "executionContextId", "$ref": "Runtime.ExecutionContextId", "description": "Specifies script creation context." },
+ { "name": "hash", "type": "string", "description": "Content hash of the script."},
+ { "name": "executionContextAuxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." },
+ { "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
+ { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "experimental": true }
+ ],
+ "description": "Fired when virtual machine fails to parse the script."
+ },
+ {
+ "name": "breakpointResolved",
+ "parameters": [
+ { "name": "breakpointId", "$ref": "BreakpointId", "description": "Breakpoint unique identifier." },
+ { "name": "location", "$ref": "Location", "description": "Actual breakpoint location." }
+ ],
+ "description": "Fired when breakpoint is resolved to an actual script and location."
+ },
+ {
+ "name": "paused",
+ "parameters": [
+ { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "Call stack the virtual machine stopped on." },
+ { "name": "reason", "type": "string", "enum": [ "XHR", "DOM", "EventListener", "exception", "assert", "debugCommand", "promiseRejection", "other" ], "description": "Pause reason.", "exported": true },
+ { "name": "data", "type": "object", "optional": true, "description": "Object containing break-specific auxiliary properties." },
+ { "name": "hitBreakpoints", "type": "array", "optional": true, "items": { "type": "string" }, "description": "Hit breakpoints IDs" },
+ { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." }
+ ],
+ "description": "Fired when the virtual machine stopped on breakpoint or exception or any other stop criteria."
+ },
+ {
+ "name": "resumed",
+ "description": "Fired when the virtual machine resumed execution."
+ }
+ ]
+ },
+ {
+ "domain": "Console",
+ "description": "This domain is deprecated - use Runtime or Log instead.",
+ "dependencies": ["Runtime"],
+ "deprecated": true,
+ "types": [
+ {
+ "id": "ConsoleMessage",
+ "type": "object",
+ "description": "Console message.",
+ "properties": [
+ { "name": "source", "type": "string", "enum": ["xml", "javascript", "network", "console-api", "storage", "appcache", "rendering", "security", "other", "deprecation", "worker"], "description": "Message source." },
+ { "name": "level", "type": "string", "enum": ["log", "warning", "error", "debug", "info"], "description": "Message severity." },
+ { "name": "text", "type": "string", "description": "Message text." },
+ { "name": "url", "type": "string", "optional": true, "description": "URL of the message origin." },
+ { "name": "line", "type": "integer", "optional": true, "description": "Line number in the resource that generated this message (1-based)." },
+ { "name": "column", "type": "integer", "optional": true, "description": "Column number in the resource that generated this message (1-based)." }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "enable",
+ "description": "Enables console domain, sends the messages collected so far to the client by means of the <code>messageAdded</code> notification."
+ },
+ {
+ "name": "disable",
+ "description": "Disables console domain, prevents further console messages from being reported to the client."
+ },
+ {
+ "name": "clearMessages",
+ "description": "Does nothing."
+ }
+ ],
+ "events": [
+ {
+ "name": "messageAdded",
+ "parameters": [
+ { "name": "message", "$ref": "ConsoleMessage", "description": "Console message that has been added." }
+ ],
+ "description": "Issued when new console message is added."
+ }
+ ]
+ },
+ {
+ "domain": "Profiler",
+ "dependencies": ["Runtime", "Debugger"],
+ "types": [
+ {
+ "id": "ProfileNode",
+ "type": "object",
+ "description": "Profile node. Holds callsite information, execution statistics and child nodes.",
+ "properties": [
+ { "name": "id", "type": "integer", "description": "Unique id of the node." },
+ { "name": "callFrame", "$ref": "Runtime.CallFrame", "description": "Function location." },
+ { "name": "hitCount", "type": "integer", "optional": true, "experimental": true, "description": "Number of samples where this node was on top of the call stack." },
+ { "name": "children", "type": "array", "items": { "type": "integer" }, "optional": true, "description": "Child node ids." },
+ { "name": "deoptReason", "type": "string", "optional": true, "description": "The reason of being not optimized. The function may be deoptimized or marked as don't optimize."},
+ { "name": "positionTicks", "type": "array", "items": { "$ref": "PositionTickInfo" }, "optional": true, "experimental": true, "description": "An array of source position ticks." }
+ ]
+ },
+ {
+ "id": "Profile",
+ "type": "object",
+ "description": "Profile.",
+ "properties": [
+ { "name": "nodes", "type": "array", "items": { "$ref": "ProfileNode" }, "description": "The list of profile nodes. First item is the root node." },
+ { "name": "startTime", "type": "number", "description": "Profiling start timestamp in microseconds." },
+ { "name": "endTime", "type": "number", "description": "Profiling end timestamp in microseconds." },
+ { "name": "samples", "optional": true, "type": "array", "items": { "type": "integer" }, "description": "Ids of samples top nodes." },
+ { "name": "timeDeltas", "optional": true, "type": "array", "items": { "type": "integer" }, "description": "Time intervals between adjacent samples in microseconds. The first delta is relative to the profile startTime." }
+ ]
+ },
+ {
+ "id": "PositionTickInfo",
+ "type": "object",
+ "experimental": true,
+ "description": "Specifies a number of samples attributed to a certain source position.",
+ "properties": [
+ { "name": "line", "type": "integer", "description": "Source line number (1-based)." },
+ { "name": "ticks", "type": "integer", "description": "Number of samples attributed to the source line." }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "enable"
+ },
+ {
+ "name": "disable"
+ },
+ {
+ "name": "setSamplingInterval",
+ "parameters": [
+ { "name": "interval", "type": "integer", "description": "New sampling interval in microseconds." }
+ ],
+ "description": "Changes CPU profiler sampling interval. Must be called before CPU profiles recording started."
+ },
+ {
+ "name": "start"
+ },
+ {
+ "name": "stop",
+ "returns": [
+ { "name": "profile", "$ref": "Profile", "description": "Recorded profile." }
+ ]
+ }
+ ],
+ "events": [
+ {
+ "name": "consoleProfileStarted",
+ "parameters": [
+ { "name": "id", "type": "string" },
+ { "name": "location", "$ref": "Debugger.Location", "description": "Location of console.profile()." },
+ { "name": "title", "type": "string", "optional": true, "description": "Profile title passed as an argument to console.profile()." }
+ ],
+ "description": "Sent when new profile recodring is started using console.profile() call."
+ },
+ {
+ "name": "consoleProfileFinished",
+ "parameters": [
+ { "name": "id", "type": "string" },
+ { "name": "location", "$ref": "Debugger.Location", "description": "Location of console.profileEnd()." },
+ { "name": "profile", "$ref": "Profile" },
+ { "name": "title", "type": "string", "optional": true, "description": "Profile title passed as an argument to console.profile()." }
+ ]
+ }
+ ]
+ },
+ {
+ "domain": "HeapProfiler",
+ "dependencies": ["Runtime"],
+ "experimental": true,
+ "types": [
+ {
+ "id": "HeapSnapshotObjectId",
+ "type": "string",
+ "description": "Heap snapshot object id."
+ },
+ {
+ "id": "SamplingHeapProfileNode",
+ "type": "object",
+ "description": "Sampling Heap Profile node. Holds callsite information, allocation statistics and child nodes.",
+ "properties": [
+ { "name": "callFrame", "$ref": "Runtime.CallFrame", "description": "Function location." },
+ { "name": "selfSize", "type": "number", "description": "Allocations size in bytes for the node excluding children." },
+ { "name": "children", "type": "array", "items": { "$ref": "SamplingHeapProfileNode" }, "description": "Child nodes." }
+ ]
+ },
+ {
+ "id": "SamplingHeapProfile",
+ "type": "object",
+ "description": "Profile.",
+ "properties": [
+ { "name": "head", "$ref": "SamplingHeapProfileNode" }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "enable"
+ },
+ {
+ "name": "disable"
+ },
+ {
+ "name": "startTrackingHeapObjects",
+ "parameters": [
+ { "name": "trackAllocations", "type": "boolean", "optional": true }
+ ]
+ },
+ {
+ "name": "stopTrackingHeapObjects",
+ "parameters": [
+ { "name": "reportProgress", "type": "boolean", "optional": true, "description": "If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken when the tracking is stopped." }
+ ]
+ },
+ {
+ "name": "takeHeapSnapshot",
+ "parameters": [
+ { "name": "reportProgress", "type": "boolean", "optional": true, "description": "If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken." }
+ ]
+ },
+ {
+ "name": "collectGarbage"
+ },
+ {
+ "name": "getObjectByHeapObjectId",
+ "parameters": [
+ { "name": "objectId", "$ref": "HeapSnapshotObjectId" },
+ { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." }
+ ],
+ "returns": [
+ { "name": "result", "$ref": "Runtime.RemoteObject", "description": "Evaluation result." }
+ ]
+ },
+ {
+ "name": "addInspectedHeapObject",
+ "parameters": [
+ { "name": "heapObjectId", "$ref": "HeapSnapshotObjectId", "description": "Heap snapshot object id to be accessible by means of $x command line API." }
+ ],
+ "description": "Enables console to refer to the node with given id via $x (see Command Line API for more details $x functions)."
+ },
+ {
+ "name": "getHeapObjectId",
+ "parameters": [
+ { "name": "objectId", "$ref": "Runtime.RemoteObjectId", "description": "Identifier of the object to get heap object id for." }
+ ],
+ "returns": [
+ { "name": "heapSnapshotObjectId", "$ref": "HeapSnapshotObjectId", "description": "Id of the heap snapshot object corresponding to the passed remote object id." }
+ ]
+ },
+ {
+ "name": "startSampling",
+ "parameters": [
+ { "name": "samplingInterval", "type": "number", "optional": true, "description": "Average sample interval in bytes. Poisson distribution is used for the intervals. The default value is 32768 bytes." }
+ ]
+ },
+ {
+ "name": "stopSampling",
+ "returns": [
+ { "name": "profile", "$ref": "SamplingHeapProfile", "description": "Recorded sampling heap profile." }
+ ]
+ }
+ ],
+ "events": [
+ {
+ "name": "addHeapSnapshotChunk",
+ "parameters": [
+ { "name": "chunk", "type": "string" }
+ ]
+ },
+ {
+ "name": "resetProfiles"
+ },
+ {
+ "name": "reportHeapSnapshotProgress",
+ "parameters": [
+ { "name": "done", "type": "integer" },
+ { "name": "total", "type": "integer" },
+ { "name": "finished", "type": "boolean", "optional": true }
+ ]
+ },
+ {
+ "name": "lastSeenObjectId",
+ "description": "If heap objects tracking has been started then backend regulary sends a current value for last seen object id and corresponding timestamp. If the were changes in the heap since last event then one or more heapStatsUpdate events will be sent before a new lastSeenObjectId event.",
+ "parameters": [
+ { "name": "lastSeenObjectId", "type": "integer" },
+ { "name": "timestamp", "type": "number" }
+ ]
+ },
+ {
+ "name": "heapStatsUpdate",
+ "description": "If heap objects tracking has been started then backend may send update for one or more fragments",
+ "parameters": [
+ { "name": "statsUpdate", "type": "array", "items": { "type": "integer" }, "description": "An array of triplets. Each triplet describes a fragment. The first integer is the fragment index, the second integer is a total count of objects for the fragment, the third integer is a total size of the objects for the fragment."}
+ ]
+ }
+ ]
+ }]
+}
diff --git a/deps/v8/src/inspector/js_protocol.json b/deps/v8/src/inspector/js_protocol.json
index 314cb5f13c..aff6806222 100644
--- a/deps/v8/src/inspector/js_protocol.json
+++ b/deps/v8/src/inspector/js_protocol.json
@@ -1,6 +1,33 @@
{
- "version": { "major": "1", "minor": "1" },
- "domains": [{
+ "version": { "major": "1", "minor": "2" },
+ "domains": [
+ {
+ "domain": "Schema",
+ "description": "Provides information about the protocol schema.",
+ "types": [
+ {
+ "id": "Domain",
+ "type": "object",
+ "description": "Description of the protocol domain.",
+ "exported": true,
+ "properties": [
+ { "name": "name", "type": "string", "description": "Domain name." },
+ { "name": "version", "type": "string", "description": "Domain version." }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "getDomains",
+ "description": "Returns supported domains.",
+ "handlers": ["browser", "renderer"],
+ "returns": [
+ { "name": "domains", "type": "array", "items": { "$ref": "Domain" }, "description": "List of supported domains." }
+ ]
+ }
+ ]
+ },
+ {
"domain": "Runtime",
"description": "Runtime domain exposes JavaScript runtime by means of remote evaluation and mirror objects. Evaluation results are returned as mirror object that expose object type, string representation and unique identifier that can be used for further object reference. Original objects are maintained in memory unless they are either explicitly released or are released along with the other objects in their object group.",
"types": [
@@ -15,25 +42,32 @@
"description": "Unique object identifier."
},
{
+ "id": "UnserializableValue",
+ "type": "string",
+ "enum": ["Infinity", "NaN", "-Infinity", "-0"],
+ "description": "Primitive value which cannot be JSON-stringified."
+ },
+ {
"id": "RemoteObject",
"type": "object",
"description": "Mirror object referencing original JavaScript object.",
"exported": true,
"properties": [
{ "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol"], "description": "Object type." },
- { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "iterator", "generator", "error"], "description": "Object subtype hint. Specified for <code>object</code> type values only." },
+ { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "iterator", "generator", "error", "proxy", "promise", "typedarray"], "description": "Object subtype hint. Specified for <code>object</code> type values only." },
{ "name": "className", "type": "string", "optional": true, "description": "Object class (constructor) name. Specified for <code>object</code> type values only." },
- { "name": "value", "type": "any", "optional": true, "description": "Remote object value in case of primitive values or JSON values (if it was requested), or description string if the value can not be JSON-stringified (like NaN, Infinity, -Infinity, -0)." },
+ { "name": "value", "type": "any", "optional": true, "description": "Remote object value in case of primitive values or JSON values (if it was requested)." },
+ { "name": "unserializableValue", "$ref": "UnserializableValue", "optional": true, "description": "Primitive value which can not be JSON-stringified does not have <code>value</code>, but gets this property." },
{ "name": "description", "type": "string", "optional": true, "description": "String representation of the object." },
{ "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Unique object identifier (for non-primitive values)." },
- { "name": "preview", "$ref": "ObjectPreview", "optional": true, "description": "Preview containing abbreviated property values. Specified for <code>object</code> type values only.", "hidden": true },
- { "name": "customPreview", "$ref": "CustomPreview", "optional": true, "hidden": true}
+ { "name": "preview", "$ref": "ObjectPreview", "optional": true, "description": "Preview containing abbreviated property values. Specified for <code>object</code> type values only.", "experimental": true },
+ { "name": "customPreview", "$ref": "CustomPreview", "optional": true, "experimental": true}
]
},
{
"id": "CustomPreview",
"type": "object",
- "hidden": true,
+ "experimental": true,
"properties": [
{ "name": "header", "type": "string"},
{ "name": "hasBody", "type": "boolean"},
@@ -45,7 +79,7 @@
{
"id": "ObjectPreview",
"type": "object",
- "hidden": true,
+ "experimental": true,
"description": "Object containing abbreviated remote object value.",
"properties": [
{ "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol"], "description": "Object type." },
@@ -59,7 +93,7 @@
{
"id": "PropertyPreview",
"type": "object",
- "hidden": true,
+ "experimental": true,
"properties": [
{ "name": "name", "type": "string", "description": "Property name." },
{ "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol", "accessor"], "description": "Object type. Accessor means that the property itself is an accessor property." },
@@ -71,7 +105,7 @@
{
"id": "EntryPreview",
"type": "object",
- "hidden": true,
+ "experimental": true,
"properties": [
{ "name": "key", "$ref": "ObjectPreview", "optional": true, "description": "Preview of the key. Specified for map-like collection entries." },
{ "name": "value", "$ref": "ObjectPreview", "description": "Preview of the value." }
@@ -90,8 +124,8 @@
{ "name": "configurable", "type": "boolean", "description": "True if the type of this property descriptor may be changed and if the property may be deleted from the corresponding object." },
{ "name": "enumerable", "type": "boolean", "description": "True if this property shows up during enumeration of the properties on the corresponding object." },
{ "name": "wasThrown", "type": "boolean", "optional": true, "description": "True if the result was thrown during the evaluation." },
- { "name": "isOwn", "optional": true, "type": "boolean", "description": "True if the property is owned for the object.", "hidden": true },
- { "name": "symbol", "$ref": "RemoteObject", "optional": true, "description": "Property symbol object, if the property is of the <code>symbol</code> type.", "hidden": true }
+ { "name": "isOwn", "optional": true, "type": "boolean", "description": "True if the property is owned for the object." },
+ { "name": "symbol", "$ref": "RemoteObject", "optional": true, "description": "Property symbol object, if the property is of the <code>symbol</code> type." }
]
},
{
@@ -101,17 +135,16 @@
"properties": [
{ "name": "name", "type": "string", "description": "Conventional property name." },
{ "name": "value", "$ref": "RemoteObject", "optional": true, "description": "The value associated with the property." }
- ],
- "hidden": true
+ ]
},
{
"id": "CallArgument",
"type": "object",
- "description": "Represents function call argument. Either remote object id <code>objectId</code> or primitive <code>value</code> or neither of (for undefined) them should be specified.",
+ "description": "Represents function call argument. Either remote object id <code>objectId</code>, primitive <code>value</code>, unserializable primitive value or neither of (for undefined) them should be specified.",
"properties": [
- { "name": "value", "type": "any", "optional": true, "description": "Primitive value, or description string if the value can not be JSON-stringified (like NaN, Infinity, -Infinity, -0)." },
- { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Remote object handle." },
- { "name": "type", "optional": true, "hidden": true, "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol"], "description": "Object type." }
+ { "name": "value", "type": "any", "optional": true, "description": "Primitive value." },
+ { "name": "unserializableValue", "$ref": "UnserializableValue", "optional": true, "description": "Primitive value which can not be JSON-stringified." },
+ { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Remote object handle." }
]
},
{
@@ -125,31 +158,31 @@
"description": "Description of an isolated world.",
"properties": [
{ "name": "id", "$ref": "ExecutionContextId", "description": "Unique id of the execution context. It can be used to specify in which execution context script evaluation should be performed." },
- { "name": "isDefault", "type": "boolean", "description": "Whether context is the default page context (as opposite to e.g. context of content script).", "hidden": true },
- { "name": "origin", "type": "string", "description": "Execution context origin.", "hidden": true},
- { "name": "name", "type": "string", "description": "Human readable name describing given context.", "hidden": true},
- { "name": "frameId", "type": "string", "description": "Id of the owning frame. May be an empty string if the context is not associated with a frame." }
+ { "name": "origin", "type": "string", "description": "Execution context origin." },
+ { "name": "name", "type": "string", "description": "Human readable name describing given context." },
+ { "name": "auxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." }
]
},
{
"id": "ExceptionDetails",
"type": "object",
- "hidden": true,
"description": "Detailed information about exception (or error) that was thrown during script compilation or execution.",
"properties": [
- { "name": "text", "type": "string", "description": "Exception text." },
- { "name": "scriptId", "$ref": "ScriptId", "description": "Script ID of the exception location." },
+ { "name": "exceptionId", "type": "integer", "description": "Exception id." },
+ { "name": "text", "type": "string", "description": "Exception text, which should be used together with exception object when available." },
{ "name": "lineNumber", "type": "integer", "description": "Line number of the exception location (0-based)." },
{ "name": "columnNumber", "type": "integer", "description": "Column number of the exception location (0-based)." },
+ { "name": "scriptId", "$ref": "ScriptId", "optional": true, "description": "Script ID of the exception location." },
{ "name": "url", "type": "string", "optional": true, "description": "URL of the exception location, to be used when the script was not reported." },
- { "name": "stackTrace", "$ref": "StackTrace", "optional": true, "description": "JavaScript stack trace if available." }
+ { "name": "stackTrace", "$ref": "StackTrace", "optional": true, "description": "JavaScript stack trace if available." },
+ { "name": "exception", "$ref": "RemoteObject", "optional": true, "description": "Exception object if available." },
+ { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Identifier of the context where exception happened." }
]
},
{
"id": "Timestamp",
"type": "number",
- "description": "Number of milliseconds since epoch.",
- "hidden": true
+ "description": "Number of milliseconds since epoch."
},
{
"id": "CallFrame",
@@ -171,7 +204,7 @@
"properties": [
{ "name": "description", "type": "string", "optional": true, "description": "String label of this stack trace. For async traces this may be a name of the function that initiated the async call." },
{ "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "JavaScript function name." },
- { "name": "parent", "$ref": "StackTrace", "optional": true, "hidden": true, "description": "Asynchronous JavaScript stack trace that preceded this stack, if available." }
+ { "name": "parent", "$ref": "StackTrace", "optional": true, "description": "Asynchronous JavaScript stack trace that preceded this stack, if available." }
]
}
],
@@ -182,24 +215,22 @@
"parameters": [
{ "name": "expression", "type": "string", "description": "Expression to evaluate." },
{ "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." },
- { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Determines whether Command Line API should be available during the evaluation.", "hidden": true },
- { "name": "doNotPauseOnExceptionsAndMuteConsole", "type": "boolean", "optional": true, "description": "Specifies whether evaluation should stop on exceptions and mute console. Overrides setPauseOnException state.", "hidden": true },
- { "name": "contextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which isolated context to perform evaluation. Each content script lives in an isolated context and this parameter may be used to specify one of those contexts. If the parameter is omitted or 0 the evaluation will be performed in the context of the inspected page." },
+ { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Determines whether Command Line API should be available during the evaluation." },
+ { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
+ { "name": "contextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform evaluation. If the parameter is omitted the evaluation will be performed in the context of the inspected page." },
{ "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
- { "name": "generatePreview", "type": "boolean", "optional": true, "hidden": true, "description": "Whether preview should be generated for the result." },
- { "name": "userGesture", "type": "boolean", "optional": true, "hidden": true, "description": "Whether execution should be treated as initiated by user in the UI." },
- { "name": "awaitPromise", "type": "boolean", "optional":true, "hidden": true, "description": "Whether execution should wait for promise to be resolved. If the result of evaluation is not a Promise, it's considered to be an error." }
+ { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
+ { "name": "userGesture", "type": "boolean", "optional": true, "experimental": true, "description": "Whether execution should be treated as initiated by user in the UI." },
+ { "name": "awaitPromise", "type": "boolean", "optional":true, "description": "Whether execution should wait for promise to be resolved. If the result of evaluation is not a Promise, it's considered to be an error." }
],
"returns": [
{ "name": "result", "$ref": "RemoteObject", "description": "Evaluation result." },
- { "name": "wasThrown", "type": "boolean", "optional": true, "description": "True if the result was thrown during the evaluation." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "hidden": true, "description": "Exception details."}
+ { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
],
"description": "Evaluates expression on global object."
},
{
"name": "awaitPromise",
- "hidden": true,
"async": true,
"parameters": [
{ "name": "promiseObjectId", "$ref": "RemoteObjectId", "description": "Identifier of the promise." },
@@ -208,25 +239,26 @@
],
"returns": [
{ "name": "result", "$ref": "RemoteObject", "description": "Promise result. Will contain rejected value if promise was rejected." },
- { "name": "wasThrown", "type": "boolean", "optional": true, "description": "True if the promise was rejected." },
{ "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details if stack strace is available."}
],
"description": "Add handler to promise with given promise object id."
},
{
"name": "callFunctionOn",
+ "async": true,
"parameters": [
{ "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to call function on." },
{ "name": "functionDeclaration", "type": "string", "description": "Declaration of the function to call." },
{ "name": "arguments", "type": "array", "items": { "$ref": "CallArgument", "description": "Call argument." }, "optional": true, "description": "Call arguments. All call arguments must belong to the same JavaScript world as the target object." },
- { "name": "doNotPauseOnExceptionsAndMuteConsole", "type": "boolean", "optional": true, "description": "Specifies whether function call should stop on exceptions and mute console. Overrides setPauseOnException state.", "hidden": true },
+ { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
{ "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object which should be sent by value." },
- { "name": "generatePreview", "type": "boolean", "optional": true, "hidden": true, "description": "Whether preview should be generated for the result." },
- { "name": "userGesture", "type": "boolean", "optional": true, "hidden": true, "description": "Whether execution should be treated as initiated by user in the UI." }
+ { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
+ { "name": "userGesture", "type": "boolean", "optional": true, "experimental": true, "description": "Whether execution should be treated as initiated by user in the UI." },
+ { "name": "awaitPromise", "type": "boolean", "optional":true, "description": "Whether execution should wait for promise to be resolved. If the result of evaluation is not a Promise, it's considered to be an error." }
],
"returns": [
{ "name": "result", "$ref": "RemoteObject", "description": "Call result." },
- { "name": "wasThrown", "type": "boolean", "optional": true, "description": "True if the result was thrown during the evaluation." }
+ { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
],
"description": "Calls function with given declaration on the given object. Object group of the result is inherited from the target object."
},
@@ -235,13 +267,13 @@
"parameters": [
{ "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to return properties for." },
{ "name": "ownProperties", "optional": true, "type": "boolean", "description": "If true, returns properties belonging only to the element itself, not to its prototype chain." },
- { "name": "accessorPropertiesOnly", "optional": true, "type": "boolean", "description": "If true, returns accessor properties (with getter/setter) only; internal properties are not returned either.", "hidden": true },
- { "name": "generatePreview", "type": "boolean", "optional": true, "hidden": true, "description": "Whether preview should be generated for the results." }
+ { "name": "accessorPropertiesOnly", "optional": true, "type": "boolean", "description": "If true, returns accessor properties (with getter/setter) only; internal properties are not returned either.", "experimental": true },
+ { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the results." }
],
"returns": [
{ "name": "result", "type": "array", "items": { "$ref": "PropertyDescriptor" }, "description": "Object properties." },
- { "name": "internalProperties", "optional": true, "type": "array", "items": { "$ref": "InternalPropertyDescriptor" }, "description": "Internal object properties (only of the element itself).", "hidden": true },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "hidden": true, "description": "Exception details."}
+ { "name": "internalProperties", "optional": true, "type": "array", "items": { "$ref": "InternalPropertyDescriptor" }, "description": "Internal object properties (only of the element itself)." },
+ { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
],
"description": "Returns properties of a given object. Object group of the result is inherited from the target object."
},
@@ -260,9 +292,8 @@
"description": "Releases all remote objects that belong to a given group."
},
{
- "name": "run",
- "hidden": true,
- "description": "Tells inspected instance(worker or page) that it can run in case it was started paused."
+ "name": "runIfWaitingForDebugger",
+ "description": "Tells inspected instance to run if it was waiting for debugger to attach."
},
{
"name": "enable",
@@ -270,12 +301,10 @@
},
{
"name": "disable",
- "hidden": true,
"description": "Disables reporting of execution contexts creation."
},
{
"name": "discardConsoleEntries",
- "hidden": true,
"description": "Discards collected exceptions and console API calls."
},
{
@@ -286,16 +315,15 @@
"type": "boolean"
}
],
- "hidden": true
+ "experimental": true
},
{
"name": "compileScript",
- "hidden": true,
"parameters": [
{ "name": "expression", "type": "string", "description": "Expression to compile." },
{ "name": "sourceURL", "type": "string", "description": "Source url to be set for the script." },
{ "name": "persistScript", "type": "boolean", "description": "Specifies whether the compiled script should be persisted." },
- { "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Specifies in which isolated context to perform script run. Each content script lives in an isolated context and this parameter is used to specify one of those contexts." }
+ { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page." }
],
"returns": [
{ "name": "scriptId", "$ref": "ScriptId", "optional": true, "description": "Id of the script." },
@@ -305,13 +333,16 @@
},
{
"name": "runScript",
- "hidden": true,
+ "async": true,
"parameters": [
{ "name": "scriptId", "$ref": "ScriptId", "description": "Id of the script to run." },
- { "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Specifies in which isolated context to perform script run. Each content script lives in an isolated context and this parameter is used to specify one of those contexts." },
+ { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page." },
{ "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." },
- { "name": "doNotPauseOnExceptionsAndMuteConsole", "type": "boolean", "optional": true, "description": "Specifies whether script run should stop on exceptions and mute console. Overrides setPauseOnException state." },
- { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Determines whether Command Line API should be available during the evaluation." }
+ { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
+ { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Determines whether Command Line API should be available during the evaluation." },
+ { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object which should be sent by value." },
+ { "name": "generatePreview", "type": "boolean", "optional": true, "description": "Whether preview should be generated for the result." },
+ { "name": "awaitPromise", "type": "boolean", "optional": true, "description": "Whether execution should wait for promise to be resolved. If the result of evaluation is not a Promise, it's considered to be an error." }
],
"returns": [
{ "name": "result", "$ref": "RemoteObject", "description": "Run result." },
@@ -343,22 +374,17 @@
"name": "exceptionThrown",
"description": "Issued when exception was thrown and unhandled.",
"parameters": [
- { "name": "exceptionId", "type": "integer", "description": "Exception id." },
{ "name": "timestamp", "$ref": "Timestamp", "description": "Timestamp of the exception." },
- { "name": "details", "$ref": "ExceptionDetails" },
- { "name": "exception", "$ref": "RemoteObject", "optional": true, "description": "Exception object." },
- { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Identifier of the context where exception happened." }
- ],
- "hidden": true
+ { "name": "exceptionDetails", "$ref": "ExceptionDetails" }
+ ]
},
{
"name": "exceptionRevoked",
"description": "Issued when unhandled exception was revoked.",
"parameters": [
- { "name": "message", "type": "string", "description": "Message describing why exception was revoked." },
+ { "name": "reason", "type": "string", "description": "Reason describing why exception was revoked." },
{ "name": "exceptionId", "type": "integer", "description": "The id of revoked exception, as reported in <code>exceptionUnhandled</code>." }
- ],
- "hidden": true
+ ]
},
{
"name": "consoleAPICalled",
@@ -369,16 +395,15 @@
{ "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Identifier of the context where the call was made." },
{ "name": "timestamp", "$ref": "Timestamp", "description": "Call timestamp." },
{ "name": "stackTrace", "$ref": "StackTrace", "optional": true, "description": "Stack trace captured when the call was made." }
- ],
- "hidden": true
+ ]
},
{
"name": "inspectRequested",
+ "description": "Issued when object should be inspected (for example, as a result of inspect() command line API call).",
"parameters": [
{ "name": "object", "$ref": "RemoteObject" },
{ "name": "hints", "type": "object" }
- ],
- "hidden": true
+ ]
}
]
},
@@ -409,7 +434,7 @@
},
{
"id": "ScriptPosition",
- "hidden": true,
+ "experimental": true,
"type": "object",
"properties": [
{ "name": "lineNumber", "type": "integer" },
@@ -423,11 +448,11 @@
"properties": [
{ "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier. This identifier is only valid while the virtual machine is paused." },
{ "name": "functionName", "type": "string", "description": "Name of the JavaScript function called on this call frame." },
- { "name": "functionLocation", "$ref": "Location", "optional": true, "hidden": true, "description": "Location in the source code." },
+ { "name": "functionLocation", "$ref": "Location", "optional": true, "experimental": true, "description": "Location in the source code." },
{ "name": "location", "$ref": "Location", "description": "Location in the source code." },
{ "name": "scopeChain", "type": "array", "items": { "$ref": "Scope" }, "description": "Scope chain for this call frame." },
{ "name": "this", "$ref": "Runtime.RemoteObject", "description": "<code>this</code> object for this call frame." },
- { "name": "returnValue", "$ref": "Runtime.RemoteObject", "optional": true, "hidden": true, "description": "The value being returned, if the function is at return point." }
+ { "name": "returnValue", "$ref": "Runtime.RemoteObject", "optional": true, "description": "The value being returned, if the function is at return point." }
],
"description": "JavaScript call frame. Array of call frames form the call stack."
},
@@ -437,9 +462,9 @@
"properties": [
{ "name": "type", "type": "string", "enum": ["global", "local", "with", "closure", "catch", "block", "script"], "description": "Scope type." },
{ "name": "object", "$ref": "Runtime.RemoteObject", "description": "Object representing the scope. For <code>global</code> and <code>with</code> scopes it represents the actual object; for the rest of the scopes, it is artificial transient object enumerating scope variables as its properties." },
- { "name": "name", "type": "string", "optional": true, "hidden": true },
- { "name": "startLocation", "$ref": "Location", "optional": true, "hidden": true, "description": "Location in the source code where scope starts" },
- { "name": "endLocation", "$ref": "Location", "optional": true, "hidden": true, "description": "Location in the source code where scope ends" }
+ { "name": "name", "type": "string", "optional": true },
+ { "name": "startLocation", "$ref": "Location", "optional": true, "description": "Location in the source code where scope starts" },
+ { "name": "endLocation", "$ref": "Location", "optional": true, "description": "Location in the source code where scope ends" }
],
"description": "Scope description."
},
@@ -452,7 +477,7 @@
{ "name": "lineNumber", "type": "number", "description": "Line number in resource content." },
{ "name": "lineContent", "type": "string", "description": "Line with match content." }
],
- "hidden": true
+ "experimental": true
}
],
"commands": [
@@ -473,9 +498,8 @@
},
{
"name": "setSkipAllPauses",
- "hidden": true,
"parameters": [
- { "name": "skipped", "type": "boolean", "description": "New value for skip pauses state." }
+ { "name": "skip", "type": "boolean", "description": "New value for skip pauses state." }
],
"description": "Makes page not interrupt on any pauses (breakpoint, exception, dom exception etc)."
},
@@ -516,8 +540,7 @@
{
"name": "continueToLocation",
"parameters": [
- { "name": "location", "$ref": "Location", "description": "Location to continue to." },
- { "name": "interstatementLocation", "type": "boolean", "optional": true, "hidden": true, "description": "Allows breakpoints at the intemediate positions inside statements." }
+ { "name": "location", "$ref": "Location", "description": "Location to continue to." }
],
"description": "Continues execution until specific location is reached."
},
@@ -552,27 +575,21 @@
"returns": [
{ "name": "result", "type": "array", "items": { "$ref": "SearchMatch" }, "description": "List of search matches." }
],
+ "experimental": true,
"description": "Searches for given string in script content."
},
{
- "name": "canSetScriptSource",
- "returns": [
- { "name": "result", "type": "boolean", "description": "True if <code>setScriptSource</code> is supported." }
- ],
- "description": "Always returns true."
- },
- {
"name": "setScriptSource",
"parameters": [
{ "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to edit." },
{ "name": "scriptSource", "type": "string", "description": "New content of the script." },
- { "name": "preview", "type": "boolean", "optional": true, "description": " If true the change will not actually be applied. Preview mode may be used to get result description without actually modifying the code.", "hidden": true }
+ { "name": "dryRun", "type": "boolean", "optional": true, "description": " If true the change will not actually be applied. Dry run may be used to get result description without actually modifying the code." }
],
"returns": [
{ "name": "callFrames", "type": "array", "optional": true, "items": { "$ref": "CallFrame" }, "description": "New stack trace in case editing has happened while VM was stopped." },
- { "name": "stackChanged", "type": "boolean", "optional": true, "description": "Whether current call stack was modified after applying the changes.", "hidden": true },
- { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any.", "hidden": true },
- { "name": "compileError", "optional": true, "$ref": "Runtime.ExceptionDetails", "description": "Error data if any." }
+ { "name": "stackChanged", "type": "boolean", "optional": true, "description": "Whether current call stack was modified after applying the changes." },
+ { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." },
+ { "name": "exceptionDetails", "optional": true, "$ref": "Runtime.ExceptionDetails", "description": "Exception details if any." }
],
"description": "Edits JavaScript source live."
},
@@ -585,7 +602,6 @@
{ "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "New stack trace." },
{ "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." }
],
- "hidden": true,
"description": "Restarts particular call frame from the beginning."
},
{
@@ -611,15 +627,14 @@
{ "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier to evaluate on." },
{ "name": "expression", "type": "string", "description": "Expression to evaluate." },
{ "name": "objectGroup", "type": "string", "optional": true, "description": "String object group name to put result into (allows rapid releasing resulting object handles using <code>releaseObjectGroup</code>)." },
- { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Specifies whether command line API should be available to the evaluated expression, defaults to false.", "hidden": true },
- { "name": "doNotPauseOnExceptionsAndMuteConsole", "type": "boolean", "optional": true, "description": "Specifies whether evaluation should stop on exceptions and mute console. Overrides setPauseOnException state.", "hidden": true },
+ { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Specifies whether command line API should be available to the evaluated expression, defaults to false." },
+ { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
{ "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
- { "name": "generatePreview", "type": "boolean", "optional": true, "hidden": true, "description": "Whether preview should be generated for the result." }
+ { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." }
],
"returns": [
{ "name": "result", "$ref": "Runtime.RemoteObject", "description": "Object wrapper for the evaluation result." },
- { "name": "wasThrown", "type": "boolean", "optional": true, "description": "True if the result was thrown during the evaluation." },
- { "name": "exceptionDetails", "$ref": "Runtime.ExceptionDetails", "optional": true, "hidden": true, "description": "Exception details."}
+ { "name": "exceptionDetails", "$ref": "Runtime.ExceptionDetails", "optional": true, "description": "Exception details."}
],
"description": "Evaluates expression on a given call frame."
},
@@ -631,24 +646,13 @@
{ "name": "newValue", "$ref": "Runtime.CallArgument", "description": "New variable value." },
{ "name": "callFrameId", "$ref": "CallFrameId", "description": "Id of callframe that holds variable." }
],
- "hidden": true,
"description": "Changes value of variable in a callframe. Object-based scopes are not supported and must be mutated manually."
},
{
- "name": "getBacktrace",
- "returns": [
- { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "Call stack the virtual machine stopped on." },
- { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." }
- ],
- "hidden": true,
- "description": "Returns call stack including variables changed since VM was paused. VM must be paused."
- },
- {
"name": "setAsyncCallStackDepth",
"parameters": [
{ "name": "maxDepth", "type": "integer", "description": "Maximum depth of async call stacks. Setting to <code>0</code> will effectively disable collecting async call stacks (default)." }
],
- "hidden": true,
"description": "Enables or disables async call stacks tracking."
},
{
@@ -656,7 +660,7 @@
"parameters": [
{ "name": "patterns", "type": "array", "items": { "type": "string" }, "description": "Array of regexps that will be used to check script url for blackbox state." }
],
- "hidden": true,
+ "experimental": true,
"description": "Replace previous blackbox patterns with passed ones. Forces backend to skip stepping/pausing in scripts with url matching one of the patterns. VM will try to leave blackboxed script by performing 'step in' several times, finally resorting to 'step out' if unsuccessful."
},
{
@@ -665,7 +669,7 @@
{ "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script." },
{ "name": "positions", "type": "array", "items": { "$ref": "ScriptPosition" } }
],
- "hidden": true,
+ "experimental": true,
"description": "Makes backend skip steps in the script in blackboxed ranges. VM will try leave blacklisted scripts by performing 'step in' several times, finally resorting to 'step out' if unsuccessful. Positions array contains positions where blackbox state is changed. First interval isn't blackboxed. Array should be sorted."
}
],
@@ -679,14 +683,12 @@
{ "name": "startColumn", "type": "integer", "description": "Column offset of the script within the resource with given URL." },
{ "name": "endLine", "type": "integer", "description": "Last line of the script." },
{ "name": "endColumn", "type": "integer", "description": "Length of the last line of the script." },
- { "name": "executionContextId", "$ref": "Runtime.ExecutionContextId", "description": "Specifies script creation context.", "hidden": true },
- { "name": "hash", "type": "string", "hidden": true, "description": "Content hash of the script."},
- { "name": "isContentScript", "type": "boolean", "optional": true, "description": "Determines whether this script is a user extension script." },
- { "name": "isInternalScript", "type": "boolean", "optional": true, "description": "Determines whether this script is an internal script.", "hidden": true },
- { "name": "isLiveEdit", "type": "boolean", "optional": true, "description": "True, if this script is generated as a result of the live edit operation.", "hidden": true },
+ { "name": "executionContextId", "$ref": "Runtime.ExecutionContextId", "description": "Specifies script creation context." },
+ { "name": "hash", "type": "string", "description": "Content hash of the script."},
+ { "name": "executionContextAuxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." },
+ { "name": "isLiveEdit", "type": "boolean", "optional": true, "description": "True, if this script is generated as a result of the live edit operation.", "experimental": true },
{ "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
- { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "hidden": true },
- { "name": "deprecatedCommentWasUsed", "type": "boolean", "optional": true, "hidden": true, "description": "True, if '//@ sourceURL' or '//@ sourceMappingURL' was used."}
+ { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "experimental": true }
],
"description": "Fired when virtual machine parses script. This event is also fired for all known and uncollected scripts upon enabling debugger."
},
@@ -699,13 +701,11 @@
{ "name": "startColumn", "type": "integer", "description": "Column offset of the script within the resource with given URL." },
{ "name": "endLine", "type": "integer", "description": "Last line of the script." },
{ "name": "endColumn", "type": "integer", "description": "Length of the last line of the script." },
- { "name": "executionContextId", "$ref": "Runtime.ExecutionContextId", "description": "Specifies script creation context.", "hidden": true },
- { "name": "hash", "type": "string", "hidden": true, "description": "Content hash of the script."},
- { "name": "isContentScript", "type": "boolean", "optional": true, "description": "Determines whether this script is a user extension script." },
- { "name": "isInternalScript", "type": "boolean", "optional": true, "description": "Determines whether this script is an internal script.", "hidden": true },
+ { "name": "executionContextId", "$ref": "Runtime.ExecutionContextId", "description": "Specifies script creation context." },
+ { "name": "hash", "type": "string", "description": "Content hash of the script."},
+ { "name": "executionContextAuxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." },
{ "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
- { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "hidden": true },
- { "name": "deprecatedCommentWasUsed", "type": "boolean", "optional": true, "hidden": true, "description": "True, if '//@ sourceURL' or '//@ sourceMappingURL' was used."}
+ { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "experimental": true }
],
"description": "Fired when virtual machine fails to parse the script."
},
@@ -723,8 +723,8 @@
{ "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "Call stack the virtual machine stopped on." },
{ "name": "reason", "type": "string", "enum": [ "XHR", "DOM", "EventListener", "exception", "assert", "debugCommand", "promiseRejection", "other" ], "description": "Pause reason.", "exported": true },
{ "name": "data", "type": "object", "optional": true, "description": "Object containing break-specific auxiliary properties." },
- { "name": "hitBreakpoints", "type": "array", "optional": true, "items": { "type": "string" }, "description": "Hit breakpoints IDs", "hidden": true },
- { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any.", "hidden": true }
+ { "name": "hitBreakpoints", "type": "array", "optional": true, "items": { "type": "string" }, "description": "Hit breakpoints IDs" },
+ { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." }
],
"description": "Fired when the virtual machine stopped on breakpoint or exception or any other stop criteria."
},
@@ -775,56 +775,42 @@
{ "name": "message", "$ref": "ConsoleMessage", "description": "Console message that has been added." }
],
"description": "Issued when new console message is added."
- },
- {
- "name": "messageRepeatCountUpdated",
- "parameters": [
- { "name": "count", "type": "integer", "description": "New repeat count value." },
- { "name": "timestamp", "$ref": "Runtime.Timestamp", "description": "Timestamp of most recent message in batch.", "hidden": true }
- ],
- "description": "Not issued.",
- "deprecated": true
- },
- {
- "name": "messagesCleared",
- "description": "Not issued.",
- "deprecated": true
}
]
},
{
"domain": "Profiler",
"dependencies": ["Runtime", "Debugger"],
- "hidden": true,
"types": [
{
- "id": "CPUProfileNode",
+ "id": "ProfileNode",
"type": "object",
- "description": "CPU Profile node. Holds callsite information, execution statistics and child nodes.",
+ "description": "Profile node. Holds callsite information, execution statistics and child nodes.",
"properties": [
- { "name": "callFrame", "$ref": "Runtime.CallFrame", "description": "Function location." },
- { "name": "hitCount", "type": "integer", "description": "Number of samples where this node was on top of the call stack." },
- { "name": "children", "type": "array", "items": { "$ref": "CPUProfileNode" }, "description": "Child nodes." },
- { "name": "deoptReason", "type": "string", "description": "The reason of being not optimized. The function may be deoptimized or marked as don't optimize."},
{ "name": "id", "type": "integer", "description": "Unique id of the node." },
- { "name": "positionTicks", "type": "array", "items": { "$ref": "PositionTickInfo" }, "description": "An array of source position ticks." }
+ { "name": "callFrame", "$ref": "Runtime.CallFrame", "description": "Function location." },
+ { "name": "hitCount", "type": "integer", "optional": true, "experimental": true, "description": "Number of samples where this node was on top of the call stack." },
+ { "name": "children", "type": "array", "items": { "type": "integer" }, "optional": true, "description": "Child node ids." },
+ { "name": "deoptReason", "type": "string", "optional": true, "description": "The reason of being not optimized. The function may be deoptimized or marked as don't optimize."},
+ { "name": "positionTicks", "type": "array", "items": { "$ref": "PositionTickInfo" }, "optional": true, "experimental": true, "description": "An array of source position ticks." }
]
},
{
- "id": "CPUProfile",
+ "id": "Profile",
"type": "object",
"description": "Profile.",
"properties": [
- { "name": "head", "$ref": "CPUProfileNode" },
- { "name": "startTime", "type": "number", "description": "Profiling start time in seconds." },
- { "name": "endTime", "type": "number", "description": "Profiling end time in seconds." },
+ { "name": "nodes", "type": "array", "items": { "$ref": "ProfileNode" }, "description": "The list of profile nodes. First item is the root node." },
+ { "name": "startTime", "type": "number", "description": "Profiling start timestamp in microseconds." },
+ { "name": "endTime", "type": "number", "description": "Profiling end timestamp in microseconds." },
{ "name": "samples", "optional": true, "type": "array", "items": { "type": "integer" }, "description": "Ids of samples top nodes." },
- { "name": "timestamps", "optional": true, "type": "array", "items": { "type": "number" }, "description": "Timestamps of the samples in microseconds." }
+ { "name": "timeDeltas", "optional": true, "type": "array", "items": { "type": "integer" }, "description": "Time intervals between adjacent samples in microseconds. The first delta is relative to the profile startTime." }
]
},
{
"id": "PositionTickInfo",
"type": "object",
+ "experimental": true,
"description": "Specifies a number of samples attributed to a certain source position.",
"properties": [
{ "name": "line", "type": "integer", "description": "Source line number (1-based)." },
@@ -852,7 +838,7 @@
{
"name": "stop",
"returns": [
- { "name": "profile", "$ref": "CPUProfile", "description": "Recorded profile." }
+ { "name": "profile", "$ref": "Profile", "description": "Recorded profile." }
]
}
],
@@ -862,7 +848,7 @@
"parameters": [
{ "name": "id", "type": "string" },
{ "name": "location", "$ref": "Debugger.Location", "description": "Location of console.profile()." },
- { "name": "title", "type": "string", "optional": true, "description": "Profile title passed as argument to console.profile()." }
+ { "name": "title", "type": "string", "optional": true, "description": "Profile title passed as an argument to console.profile()." }
],
"description": "Sent when new profile recodring is started using console.profile() call."
},
@@ -871,8 +857,8 @@
"parameters": [
{ "name": "id", "type": "string" },
{ "name": "location", "$ref": "Debugger.Location", "description": "Location of console.profileEnd()." },
- { "name": "profile", "$ref": "CPUProfile" },
- { "name": "title", "type": "string", "optional": true, "description": "Profile title passed as argunet to console.profile()." }
+ { "name": "profile", "$ref": "Profile" },
+ { "name": "title", "type": "string", "optional": true, "description": "Profile title passed as an argument to console.profile()." }
]
}
]
@@ -880,7 +866,7 @@
{
"domain": "HeapProfiler",
"dependencies": ["Runtime"],
- "hidden": true,
+ "experimental": true,
"types": [
{
"id": "HeapSnapshotObjectId",
diff --git a/deps/v8/src/inspector/protocol-platform.h b/deps/v8/src/inspector/protocol-platform.h
new file mode 100644
index 0000000000..c7723932b4
--- /dev/null
+++ b/deps/v8/src/inspector/protocol-platform.h
@@ -0,0 +1,21 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_PROTOCOLPLATFORM_H_
+#define V8_INSPECTOR_PROTOCOLPLATFORM_H_
+
+#include <memory>
+
+#include "src/base/logging.h"
+
+namespace v8_inspector {
+
+template <typename T>
+std::unique_ptr<T> wrapUnique(T* ptr) {
+ return std::unique_ptr<T>(ptr);
+}
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_PROTOCOLPLATFORM_H_
diff --git a/deps/v8/src/inspector/remote-object-id.cc b/deps/v8/src/inspector/remote-object-id.cc
new file mode 100644
index 0000000000..d83020c6f2
--- /dev/null
+++ b/deps/v8/src/inspector/remote-object-id.cc
@@ -0,0 +1,76 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/remote-object-id.h"
+
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/string-util.h"
+
+namespace v8_inspector {
+
+RemoteObjectIdBase::RemoteObjectIdBase() : m_injectedScriptId(0) {}
+
+std::unique_ptr<protocol::DictionaryValue>
+RemoteObjectIdBase::parseInjectedScriptId(const String16& objectId) {
+ std::unique_ptr<protocol::Value> parsedValue = protocol::parseJSON(objectId);
+ if (!parsedValue || parsedValue->type() != protocol::Value::TypeObject)
+ return nullptr;
+
+ std::unique_ptr<protocol::DictionaryValue> parsedObjectId(
+ protocol::DictionaryValue::cast(parsedValue.release()));
+ bool success =
+ parsedObjectId->getInteger("injectedScriptId", &m_injectedScriptId);
+ if (success) return parsedObjectId;
+ return nullptr;
+}
+
+RemoteObjectId::RemoteObjectId() : RemoteObjectIdBase(), m_id(0) {}
+
+std::unique_ptr<RemoteObjectId> RemoteObjectId::parse(
+ ErrorString* errorString, const String16& objectId) {
+ std::unique_ptr<RemoteObjectId> result(new RemoteObjectId());
+ std::unique_ptr<protocol::DictionaryValue> parsedObjectId =
+ result->parseInjectedScriptId(objectId);
+ if (!parsedObjectId) {
+ *errorString = "Invalid remote object id";
+ return nullptr;
+ }
+
+ bool success = parsedObjectId->getInteger("id", &result->m_id);
+ if (!success) {
+ *errorString = "Invalid remote object id";
+ return nullptr;
+ }
+ return result;
+}
+
+RemoteCallFrameId::RemoteCallFrameId()
+ : RemoteObjectIdBase(), m_frameOrdinal(0) {}
+
+std::unique_ptr<RemoteCallFrameId> RemoteCallFrameId::parse(
+ ErrorString* errorString, const String16& objectId) {
+ std::unique_ptr<RemoteCallFrameId> result(new RemoteCallFrameId());
+ std::unique_ptr<protocol::DictionaryValue> parsedObjectId =
+ result->parseInjectedScriptId(objectId);
+ if (!parsedObjectId) {
+ *errorString = "Invalid call frame id";
+ return nullptr;
+ }
+
+ bool success = parsedObjectId->getInteger("ordinal", &result->m_frameOrdinal);
+ if (!success) {
+ *errorString = "Invalid call frame id";
+ return nullptr;
+ }
+
+ return result;
+}
+
+String16 RemoteCallFrameId::serialize(int injectedScriptId, int frameOrdinal) {
+ return "{\"ordinal\":" + String16::fromInteger(frameOrdinal) +
+ ",\"injectedScriptId\":" + String16::fromInteger(injectedScriptId) +
+ "}";
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/remote-object-id.h b/deps/v8/src/inspector/remote-object-id.h
new file mode 100644
index 0000000000..a32f568fb8
--- /dev/null
+++ b/deps/v8/src/inspector/remote-object-id.h
@@ -0,0 +1,58 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_REMOTEOBJECTID_H_
+#define V8_INSPECTOR_REMOTEOBJECTID_H_
+
+#include "src/inspector/protocol/Forward.h"
+
+namespace v8_inspector {
+
+using protocol::ErrorString;
+
+class RemoteObjectIdBase {
+ public:
+ int contextId() const { return m_injectedScriptId; }
+
+ protected:
+ RemoteObjectIdBase();
+ ~RemoteObjectIdBase() {}
+
+ std::unique_ptr<protocol::DictionaryValue> parseInjectedScriptId(
+ const String16&);
+
+ int m_injectedScriptId;
+};
+
+class RemoteObjectId final : public RemoteObjectIdBase {
+ public:
+ static std::unique_ptr<RemoteObjectId> parse(ErrorString*, const String16&);
+ ~RemoteObjectId() {}
+ int id() const { return m_id; }
+
+ private:
+ RemoteObjectId();
+
+ int m_id;
+};
+
+class RemoteCallFrameId final : public RemoteObjectIdBase {
+ public:
+ static std::unique_ptr<RemoteCallFrameId> parse(ErrorString*,
+ const String16&);
+ ~RemoteCallFrameId() {}
+
+ int frameOrdinal() const { return m_frameOrdinal; }
+
+ static String16 serialize(int injectedScriptId, int frameOrdinal);
+
+ private:
+ RemoteCallFrameId();
+
+ int m_frameOrdinal;
+};
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_REMOTEOBJECTID_H_
diff --git a/deps/v8/src/inspector/script-breakpoint.h b/deps/v8/src/inspector/script-breakpoint.h
new file mode 100644
index 0000000000..025233dd19
--- /dev/null
+++ b/deps/v8/src/inspector/script-breakpoint.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2009 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef V8_INSPECTOR_SCRIPTBREAKPOINT_H_
+#define V8_INSPECTOR_SCRIPTBREAKPOINT_H_
+
+#include "src/inspector/string-16.h"
+
+namespace v8_inspector {
+
+struct ScriptBreakpoint {
+ ScriptBreakpoint() : ScriptBreakpoint(0, 0, String16()) {}
+
+ ScriptBreakpoint(int lineNumber, int columnNumber, const String16& condition)
+ : lineNumber(lineNumber),
+ columnNumber(columnNumber),
+ condition(condition) {}
+
+ int lineNumber;
+ int columnNumber;
+ String16 condition;
+};
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_SCRIPTBREAKPOINT_H_
diff --git a/deps/v8/src/inspector/search-util.cc b/deps/v8/src/inspector/search-util.cc
new file mode 100644
index 0000000000..a6fba06c11
--- /dev/null
+++ b/deps/v8/src/inspector/search-util.cc
@@ -0,0 +1,164 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/search-util.h"
+
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-inspector-session-impl.h"
+#include "src/inspector/v8-regex.h"
+
+namespace v8_inspector {
+
+namespace {
+
+String16 findMagicComment(const String16& content, const String16& name,
+ bool multiline) {
+ DCHECK(name.find("=") == String16::kNotFound);
+ size_t length = content.length();
+ size_t nameLength = name.length();
+
+ size_t pos = length;
+ size_t equalSignPos = 0;
+ size_t closingCommentPos = 0;
+ while (true) {
+ pos = content.reverseFind(name, pos);
+ if (pos == String16::kNotFound) return String16();
+
+ // Check for a /\/[\/*][@#][ \t]/ regexp (length of 4) before found name.
+ if (pos < 4) return String16();
+ pos -= 4;
+ if (content[pos] != '/') continue;
+ if ((content[pos + 1] != '/' || multiline) &&
+ (content[pos + 1] != '*' || !multiline))
+ continue;
+ if (content[pos + 2] != '#' && content[pos + 2] != '@') continue;
+ if (content[pos + 3] != ' ' && content[pos + 3] != '\t') continue;
+ equalSignPos = pos + 4 + nameLength;
+ if (equalSignPos < length && content[equalSignPos] != '=') continue;
+ if (multiline) {
+ closingCommentPos = content.find("*/", equalSignPos + 1);
+ if (closingCommentPos == String16::kNotFound) return String16();
+ }
+
+ break;
+ }
+
+ DCHECK(equalSignPos);
+ DCHECK(!multiline || closingCommentPos);
+ size_t urlPos = equalSignPos + 1;
+ String16 match = multiline
+ ? content.substring(urlPos, closingCommentPos - urlPos)
+ : content.substring(urlPos);
+
+ size_t newLine = match.find("\n");
+ if (newLine != String16::kNotFound) match = match.substring(0, newLine);
+ match = match.stripWhiteSpace();
+
+ for (size_t i = 0; i < match.length(); ++i) {
+ UChar c = match[i];
+ if (c == '"' || c == '\'' || c == ' ' || c == '\t') return "";
+ }
+
+ return match;
+}
+
+String16 createSearchRegexSource(const String16& text) {
+ String16Builder result;
+
+ for (size_t i = 0; i < text.length(); i++) {
+ UChar c = text[i];
+ if (c == '[' || c == ']' || c == '(' || c == ')' || c == '{' || c == '}' ||
+ c == '+' || c == '-' || c == '*' || c == '.' || c == ',' || c == '?' ||
+ c == '\\' || c == '^' || c == '$' || c == '|') {
+ result.append('\\');
+ }
+ result.append(c);
+ }
+
+ return result.toString();
+}
+
+std::unique_ptr<std::vector<size_t>> lineEndings(const String16& text) {
+ std::unique_ptr<std::vector<size_t>> result(new std::vector<size_t>());
+
+ const String16 lineEndString = "\n";
+ size_t start = 0;
+ while (start < text.length()) {
+ size_t lineEnd = text.find(lineEndString, start);
+ if (lineEnd == String16::kNotFound) break;
+
+ result->push_back(lineEnd);
+ start = lineEnd + 1;
+ }
+ result->push_back(text.length());
+
+ return result;
+}
+
+std::vector<std::pair<int, String16>> scriptRegexpMatchesByLines(
+ const V8Regex& regex, const String16& text) {
+ std::vector<std::pair<int, String16>> result;
+ if (text.isEmpty()) return result;
+
+ std::unique_ptr<std::vector<size_t>> endings(lineEndings(text));
+ size_t size = endings->size();
+ size_t start = 0;
+ for (size_t lineNumber = 0; lineNumber < size; ++lineNumber) {
+ size_t lineEnd = endings->at(lineNumber);
+ String16 line = text.substring(start, lineEnd - start);
+ if (line.length() && line[line.length() - 1] == '\r')
+ line = line.substring(0, line.length() - 1);
+
+ int matchLength;
+ if (regex.match(line, 0, &matchLength) != -1)
+ result.push_back(std::pair<int, String16>(lineNumber, line));
+
+ start = lineEnd + 1;
+ }
+ return result;
+}
+
+std::unique_ptr<protocol::Debugger::SearchMatch> buildObjectForSearchMatch(
+ int lineNumber, const String16& lineContent) {
+ return protocol::Debugger::SearchMatch::create()
+ .setLineNumber(lineNumber)
+ .setLineContent(lineContent)
+ .build();
+}
+
+std::unique_ptr<V8Regex> createSearchRegex(V8InspectorImpl* inspector,
+ const String16& query,
+ bool caseSensitive, bool isRegex) {
+ String16 regexSource = isRegex ? query : createSearchRegexSource(query);
+ return wrapUnique(new V8Regex(inspector, regexSource, caseSensitive));
+}
+
+} // namespace
+
+std::vector<std::unique_ptr<protocol::Debugger::SearchMatch>>
+searchInTextByLinesImpl(V8InspectorSession* session, const String16& text,
+ const String16& query, const bool caseSensitive,
+ const bool isRegex) {
+ std::unique_ptr<V8Regex> regex = createSearchRegex(
+ static_cast<V8InspectorSessionImpl*>(session)->inspector(), query,
+ caseSensitive, isRegex);
+ std::vector<std::pair<int, String16>> matches =
+ scriptRegexpMatchesByLines(*regex.get(), text);
+
+ std::vector<std::unique_ptr<protocol::Debugger::SearchMatch>> result;
+ for (const auto& match : matches)
+ result.push_back(buildObjectForSearchMatch(match.first, match.second));
+ return result;
+}
+
+String16 findSourceURL(const String16& content, bool multiline) {
+ return findMagicComment(content, "sourceURL", multiline);
+}
+
+String16 findSourceMapURL(const String16& content, bool multiline) {
+ return findMagicComment(content, "sourceMappingURL", multiline);
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/search-util.h b/deps/v8/src/inspector/search-util.h
new file mode 100644
index 0000000000..8f5753b620
--- /dev/null
+++ b/deps/v8/src/inspector/search-util.h
@@ -0,0 +1,24 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_SEARCHUTIL_H_
+#define V8_INSPECTOR_SEARCHUTIL_H_
+
+#include "src/inspector/protocol/Debugger.h"
+#include "src/inspector/string-util.h"
+
+namespace v8_inspector {
+
+class V8InspectorSession;
+
+String16 findSourceURL(const String16& content, bool multiline);
+String16 findSourceMapURL(const String16& content, bool multiline);
+std::vector<std::unique_ptr<protocol::Debugger::SearchMatch>>
+searchInTextByLinesImpl(V8InspectorSession*, const String16& text,
+ const String16& query, bool caseSensitive,
+ bool isRegex);
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_SEARCHUTIL_H_
diff --git a/deps/v8/src/inspector/string-16.cc b/deps/v8/src/inspector/string-16.cc
new file mode 100644
index 0000000000..f6084602f4
--- /dev/null
+++ b/deps/v8/src/inspector/string-16.cc
@@ -0,0 +1,518 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/string-16.h"
+
+#include <algorithm>
+#include <cctype>
+#include <cstdlib>
+#include <cstring>
+#include <iomanip>
+#include <limits>
+#include <locale>
+#include <sstream>
+#include <string>
+
+#include "src/base/platform/platform.h"
+#include "src/inspector/protocol-platform.h"
+
+namespace v8_inspector {
+
+namespace {
+
+bool isASCII(UChar c) { return !(c & ~0x7F); }
+
+bool isSpaceOrNewLine(UChar c) {
+ return isASCII(c) && c <= ' ' && (c == ' ' || (c <= 0xD && c >= 0x9));
+}
+
+int charactersToInteger(const UChar* characters, size_t length,
+ bool* ok = nullptr) {
+ std::vector<char> buffer;
+ buffer.reserve(length + 1);
+ for (size_t i = 0; i < length; ++i) {
+ if (!isASCII(characters[i])) {
+ if (ok) *ok = false;
+ return 0;
+ }
+ buffer.push_back(static_cast<char>(characters[i]));
+ }
+ buffer.push_back('\0');
+
+ char* endptr;
+ int64_t result =
+ static_cast<int64_t>(std::strtol(buffer.data(), &endptr, 10));
+ if (ok) {
+ *ok = !(*endptr) && result <= std::numeric_limits<int>::max() &&
+ result >= std::numeric_limits<int>::min();
+ }
+ return static_cast<int>(result);
+}
+
+const UChar replacementCharacter = 0xFFFD;
+using UChar32 = uint32_t;
+
+inline int inlineUTF8SequenceLengthNonASCII(char b0) {
+ if ((b0 & 0xC0) != 0xC0) return 0;
+ if ((b0 & 0xE0) == 0xC0) return 2;
+ if ((b0 & 0xF0) == 0xE0) return 3;
+ if ((b0 & 0xF8) == 0xF0) return 4;
+ return 0;
+}
+
+inline int inlineUTF8SequenceLength(char b0) {
+ return isASCII(b0) ? 1 : inlineUTF8SequenceLengthNonASCII(b0);
+}
+
+// Once the bits are split out into bytes of UTF-8, this is a mask OR-ed
+// into the first byte, depending on how many bytes follow. There are
+// as many entries in this table as there are UTF-8 sequence types.
+// (I.e., one byte sequence, two byte... etc.). Remember that sequences
+// for *legal* UTF-8 will be 4 or fewer bytes total.
+static const unsigned char firstByteMark[7] = {0x00, 0x00, 0xC0, 0xE0,
+ 0xF0, 0xF8, 0xFC};
+
+typedef enum {
+ conversionOK, // conversion successful
+ sourceExhausted, // partial character in source, but hit end
+ targetExhausted, // insuff. room in target for conversion
+ sourceIllegal // source sequence is illegal/malformed
+} ConversionResult;
+
+ConversionResult convertUTF16ToUTF8(const UChar** sourceStart,
+ const UChar* sourceEnd, char** targetStart,
+ char* targetEnd, bool strict) {
+ ConversionResult result = conversionOK;
+ const UChar* source = *sourceStart;
+ char* target = *targetStart;
+ while (source < sourceEnd) {
+ UChar32 ch;
+ uint32_t bytesToWrite = 0;
+ const UChar32 byteMask = 0xBF;
+ const UChar32 byteMark = 0x80;
+ const UChar* oldSource =
+ source; // In case we have to back up because of target overflow.
+ ch = static_cast<uint16_t>(*source++);
+ // If we have a surrogate pair, convert to UChar32 first.
+ if (ch >= 0xD800 && ch <= 0xDBFF) {
+ // If the 16 bits following the high surrogate are in the source buffer...
+ if (source < sourceEnd) {
+ UChar32 ch2 = static_cast<uint16_t>(*source);
+ // If it's a low surrogate, convert to UChar32.
+ if (ch2 >= 0xDC00 && ch2 <= 0xDFFF) {
+ ch = ((ch - 0xD800) << 10) + (ch2 - 0xDC00) + 0x0010000;
+ ++source;
+ } else if (strict) { // it's an unpaired high surrogate
+ --source; // return to the illegal value itself
+ result = sourceIllegal;
+ break;
+ }
+ } else { // We don't have the 16 bits following the high surrogate.
+ --source; // return to the high surrogate
+ result = sourceExhausted;
+ break;
+ }
+ } else if (strict) {
+ // UTF-16 surrogate values are illegal in UTF-32
+ if (ch >= 0xDC00 && ch <= 0xDFFF) {
+ --source; // return to the illegal value itself
+ result = sourceIllegal;
+ break;
+ }
+ }
+ // Figure out how many bytes the result will require
+ if (ch < (UChar32)0x80) {
+ bytesToWrite = 1;
+ } else if (ch < (UChar32)0x800) {
+ bytesToWrite = 2;
+ } else if (ch < (UChar32)0x10000) {
+ bytesToWrite = 3;
+ } else if (ch < (UChar32)0x110000) {
+ bytesToWrite = 4;
+ } else {
+ bytesToWrite = 3;
+ ch = replacementCharacter;
+ }
+
+ target += bytesToWrite;
+ if (target > targetEnd) {
+ source = oldSource; // Back up source pointer!
+ target -= bytesToWrite;
+ result = targetExhausted;
+ break;
+ }
+ switch (bytesToWrite) { // note: everything falls through.
+ case 4:
+ *--target = static_cast<char>((ch | byteMark) & byteMask);
+ ch >>= 6;
+ case 3:
+ *--target = static_cast<char>((ch | byteMark) & byteMask);
+ ch >>= 6;
+ case 2:
+ *--target = static_cast<char>((ch | byteMark) & byteMask);
+ ch >>= 6;
+ case 1:
+ *--target = static_cast<char>(ch | firstByteMark[bytesToWrite]);
+ }
+ target += bytesToWrite;
+ }
+ *sourceStart = source;
+ *targetStart = target;
+ return result;
+}
+
+/**
+ * Is this code point a BMP code point (U+0000..U+ffff)?
+ * @param c 32-bit code point
+ * @return TRUE or FALSE
+ * @stable ICU 2.8
+ */
+#define U_IS_BMP(c) ((uint32_t)(c) <= 0xffff)
+
+/**
+ * Is this code point a supplementary code point (U+10000..U+10ffff)?
+ * @param c 32-bit code point
+ * @return TRUE or FALSE
+ * @stable ICU 2.8
+ */
+#define U_IS_SUPPLEMENTARY(c) ((uint32_t)((c)-0x10000) <= 0xfffff)
+
+/**
+ * Is this code point a surrogate (U+d800..U+dfff)?
+ * @param c 32-bit code point
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define U_IS_SURROGATE(c) (((c)&0xfffff800) == 0xd800)
+
+/**
+ * Get the lead surrogate (0xd800..0xdbff) for a
+ * supplementary code point (0x10000..0x10ffff).
+ * @param supplementary 32-bit code point (U+10000..U+10ffff)
+ * @return lead surrogate (U+d800..U+dbff) for supplementary
+ * @stable ICU 2.4
+ */
+#define U16_LEAD(supplementary) (UChar)(((supplementary) >> 10) + 0xd7c0)
+
+/**
+ * Get the trail surrogate (0xdc00..0xdfff) for a
+ * supplementary code point (0x10000..0x10ffff).
+ * @param supplementary 32-bit code point (U+10000..U+10ffff)
+ * @return trail surrogate (U+dc00..U+dfff) for supplementary
+ * @stable ICU 2.4
+ */
+#define U16_TRAIL(supplementary) (UChar)(((supplementary)&0x3ff) | 0xdc00)
+
+// This must be called with the length pre-determined by the first byte.
+// If presented with a length > 4, this returns false. The Unicode
+// definition of UTF-8 goes up to 4-byte sequences.
+static bool isLegalUTF8(const unsigned char* source, int length) {
+ unsigned char a;
+ const unsigned char* srcptr = source + length;
+ switch (length) {
+ default:
+ return false;
+ // Everything else falls through when "true"...
+ case 4:
+ if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
+ case 3:
+ if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
+ case 2:
+ if ((a = (*--srcptr)) > 0xBF) return false;
+
+ // no fall-through in this inner switch
+ switch (*source) {
+ case 0xE0:
+ if (a < 0xA0) return false;
+ break;
+ case 0xED:
+ if (a > 0x9F) return false;
+ break;
+ case 0xF0:
+ if (a < 0x90) return false;
+ break;
+ case 0xF4:
+ if (a > 0x8F) return false;
+ break;
+ default:
+ if (a < 0x80) return false;
+ }
+
+ case 1:
+ if (*source >= 0x80 && *source < 0xC2) return false;
+ }
+ if (*source > 0xF4) return false;
+ return true;
+}
+
+// Magic values subtracted from a buffer value during UTF8 conversion.
+// This table contains as many values as there might be trailing bytes
+// in a UTF-8 sequence.
+static const UChar32 offsetsFromUTF8[6] = {0x00000000UL,
+ 0x00003080UL,
+ 0x000E2080UL,
+ 0x03C82080UL,
+ static_cast<UChar32>(0xFA082080UL),
+ static_cast<UChar32>(0x82082080UL)};
+
+static inline UChar32 readUTF8Sequence(const char*& sequence, size_t length) {
+ UChar32 character = 0;
+
+ // The cases all fall through.
+ switch (length) {
+ case 6:
+ character += static_cast<unsigned char>(*sequence++);
+ character <<= 6;
+ case 5:
+ character += static_cast<unsigned char>(*sequence++);
+ character <<= 6;
+ case 4:
+ character += static_cast<unsigned char>(*sequence++);
+ character <<= 6;
+ case 3:
+ character += static_cast<unsigned char>(*sequence++);
+ character <<= 6;
+ case 2:
+ character += static_cast<unsigned char>(*sequence++);
+ character <<= 6;
+ case 1:
+ character += static_cast<unsigned char>(*sequence++);
+ }
+
+ return character - offsetsFromUTF8[length - 1];
+}
+
+ConversionResult convertUTF8ToUTF16(const char** sourceStart,
+ const char* sourceEnd, UChar** targetStart,
+ UChar* targetEnd, bool* sourceAllASCII,
+ bool strict) {
+ ConversionResult result = conversionOK;
+ const char* source = *sourceStart;
+ UChar* target = *targetStart;
+ UChar orAllData = 0;
+ while (source < sourceEnd) {
+ int utf8SequenceLength = inlineUTF8SequenceLength(*source);
+ if (sourceEnd - source < utf8SequenceLength) {
+ result = sourceExhausted;
+ break;
+ }
+ // Do this check whether lenient or strict
+ if (!isLegalUTF8(reinterpret_cast<const unsigned char*>(source),
+ utf8SequenceLength)) {
+ result = sourceIllegal;
+ break;
+ }
+
+ UChar32 character = readUTF8Sequence(source, utf8SequenceLength);
+
+ if (target >= targetEnd) {
+ source -= utf8SequenceLength; // Back up source pointer!
+ result = targetExhausted;
+ break;
+ }
+
+ if (U_IS_BMP(character)) {
+ // UTF-16 surrogate values are illegal in UTF-32
+ if (U_IS_SURROGATE(character)) {
+ if (strict) {
+ source -= utf8SequenceLength; // return to the illegal value itself
+ result = sourceIllegal;
+ break;
+ }
+ *target++ = replacementCharacter;
+ orAllData |= replacementCharacter;
+ } else {
+ *target++ = static_cast<UChar>(character); // normal case
+ orAllData |= character;
+ }
+ } else if (U_IS_SUPPLEMENTARY(character)) {
+ // target is a character in range 0xFFFF - 0x10FFFF
+ if (target + 1 >= targetEnd) {
+ source -= utf8SequenceLength; // Back up source pointer!
+ result = targetExhausted;
+ break;
+ }
+ *target++ = U16_LEAD(character);
+ *target++ = U16_TRAIL(character);
+ orAllData = 0xffff;
+ } else {
+ if (strict) {
+ source -= utf8SequenceLength; // return to the start
+ result = sourceIllegal;
+ break; // Bail out; shouldn't continue
+ } else {
+ *target++ = replacementCharacter;
+ orAllData |= replacementCharacter;
+ }
+ }
+ }
+ *sourceStart = source;
+ *targetStart = target;
+
+ if (sourceAllASCII) *sourceAllASCII = !(orAllData & ~0x7f);
+
+ return result;
+}
+
+// Helper to write a three-byte UTF-8 code point to the buffer, caller must
+// check room is available.
+static inline void putUTF8Triple(char*& buffer, UChar ch) {
+ *buffer++ = static_cast<char>(((ch >> 12) & 0x0F) | 0xE0);
+ *buffer++ = static_cast<char>(((ch >> 6) & 0x3F) | 0x80);
+ *buffer++ = static_cast<char>((ch & 0x3F) | 0x80);
+}
+
+} // namespace
+
+// static
+String16 String16::fromInteger(int number) {
+ const size_t kBufferSize = 50;
+ char buffer[kBufferSize];
+ v8::base::OS::SNPrintF(buffer, kBufferSize, "%d", number);
+ return String16(buffer);
+}
+
+// static
+String16 String16::fromInteger(size_t number) {
+ const size_t kBufferSize = 50;
+ char buffer[kBufferSize];
+ v8::base::OS::SNPrintF(buffer, kBufferSize, "%zu", number);
+ return String16(buffer);
+}
+
+// static
+String16 String16::fromDouble(double number) {
+ std::ostringstream s;
+ s.imbue(std::locale("C"));
+ s << std::fixed << std::setprecision(std::numeric_limits<double>::digits10)
+ << number;
+ return String16(s.str().c_str());
+}
+
+// static
+String16 String16::fromDouble(double number, int precision) {
+ std::ostringstream s;
+ s.imbue(std::locale("C"));
+ s << std::fixed << std::setprecision(precision) << number;
+ return String16(s.str().c_str());
+}
+
+int String16::toInteger(bool* ok) const {
+ return charactersToInteger(characters16(), length(), ok);
+}
+
+String16 String16::stripWhiteSpace() const {
+ if (!length()) return String16();
+
+ size_t start = 0;
+ size_t end = length() - 1;
+
+ // skip white space from start
+ while (start <= end && isSpaceOrNewLine(characters16()[start])) ++start;
+
+ // only white space
+ if (start > end) return String16();
+
+ // skip white space from end
+ while (end && isSpaceOrNewLine(characters16()[end])) --end;
+
+ if (!start && end == length() - 1) return *this;
+ return String16(characters16() + start, end + 1 - start);
+}
+
+String16Builder::String16Builder() {}
+
+void String16Builder::append(const String16& s) {
+ m_buffer.insert(m_buffer.end(), s.characters16(),
+ s.characters16() + s.length());
+}
+
+void String16Builder::append(UChar c) { m_buffer.push_back(c); }
+
+void String16Builder::append(char c) {
+ UChar u = c;
+ m_buffer.push_back(u);
+}
+
+void String16Builder::append(const UChar* characters, size_t length) {
+ m_buffer.insert(m_buffer.end(), characters, characters + length);
+}
+
+void String16Builder::append(const char* characters, size_t length) {
+ m_buffer.insert(m_buffer.end(), characters, characters + length);
+}
+
+String16 String16Builder::toString() {
+ return String16(m_buffer.data(), m_buffer.size());
+}
+
+void String16Builder::reserveCapacity(size_t capacity) {
+ m_buffer.reserve(capacity);
+}
+
+String16 String16::fromUTF8(const char* stringStart, size_t length) {
+ if (!stringStart || !length) return String16();
+
+ std::vector<UChar> buffer(length);
+ UChar* bufferStart = buffer.data();
+
+ UChar* bufferCurrent = bufferStart;
+ const char* stringCurrent = stringStart;
+ if (convertUTF8ToUTF16(&stringCurrent, stringStart + length, &bufferCurrent,
+ bufferCurrent + buffer.size(), 0,
+ true) != conversionOK)
+ return String16();
+
+ size_t utf16Length = bufferCurrent - bufferStart;
+ return String16(bufferStart, utf16Length);
+}
+
+std::string String16::utf8() const {
+ size_t length = this->length();
+
+ if (!length) return std::string("");
+
+ // Allocate a buffer big enough to hold all the characters
+ // (an individual UTF-16 UChar can only expand to 3 UTF-8 bytes).
+ // Optimization ideas, if we find this function is hot:
+ // * We could speculatively create a CStringBuffer to contain 'length'
+ // characters, and resize if necessary (i.e. if the buffer contains
+ // non-ascii characters). (Alternatively, scan the buffer first for
+ // ascii characters, so we know this will be sufficient).
+ // * We could allocate a CStringBuffer with an appropriate size to
+ // have a good chance of being able to write the string into the
+ // buffer without reallocing (say, 1.5 x length).
+ if (length > std::numeric_limits<unsigned>::max() / 3) return std::string();
+ std::vector<char> bufferVector(length * 3);
+ char* buffer = bufferVector.data();
+ const UChar* characters = m_impl.data();
+
+ ConversionResult result =
+ convertUTF16ToUTF8(&characters, characters + length, &buffer,
+ buffer + bufferVector.size(), false);
+ DCHECK(
+ result !=
+ targetExhausted); // (length * 3) should be sufficient for any conversion
+
+ // Only produced from strict conversion.
+ DCHECK(result != sourceIllegal);
+
+ // Check for an unconverted high surrogate.
+ if (result == sourceExhausted) {
+ // This should be one unpaired high surrogate. Treat it the same
+ // was as an unpaired high surrogate would have been handled in
+ // the middle of a string with non-strict conversion - which is
+ // to say, simply encode it to UTF-8.
+ DCHECK((characters + 1) == (m_impl.data() + length));
+ DCHECK((*characters >= 0xD800) && (*characters <= 0xDBFF));
+ // There should be room left, since one UChar hasn't been
+ // converted.
+ DCHECK((buffer + 3) <= (buffer + bufferVector.size()));
+ putUTF8Triple(buffer, *characters);
+ }
+
+ return std::string(bufferVector.data(), buffer - bufferVector.data());
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/string-16.h b/deps/v8/src/inspector/string-16.h
new file mode 100644
index 0000000000..6dc7759de0
--- /dev/null
+++ b/deps/v8/src/inspector/string-16.h
@@ -0,0 +1,133 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_STRING16_H_
+#define V8_INSPECTOR_STRING16_H_
+
+#include <stdint.h>
+#include <cctype>
+#include <climits>
+#include <cstring>
+#include <string>
+#include <vector>
+
+namespace v8_inspector {
+
+using UChar = uint16_t;
+
+class String16 {
+ public:
+ static const size_t kNotFound = static_cast<size_t>(-1);
+
+ String16() {}
+ String16(const String16& other) : m_impl(other.m_impl) {}
+ String16(const UChar* characters, size_t size) : m_impl(characters, size) {}
+ String16(const UChar* characters) // NOLINT(runtime/explicit)
+ : m_impl(characters) {}
+ String16(const char* characters) // NOLINT(runtime/explicit)
+ : String16(characters, std::strlen(characters)) {}
+ String16(const char* characters, size_t size) {
+ m_impl.resize(size);
+ for (size_t i = 0; i < size; ++i) m_impl[i] = characters[i];
+ }
+
+ static String16 fromInteger(int);
+ static String16 fromInteger(size_t);
+ static String16 fromDouble(double);
+ static String16 fromDouble(double, int precision);
+
+ int toInteger(bool* ok = nullptr) const;
+ String16 stripWhiteSpace() const;
+ const UChar* characters16() const { return m_impl.c_str(); }
+ size_t length() const { return m_impl.length(); }
+ bool isEmpty() const { return !m_impl.length(); }
+ UChar operator[](size_t index) const { return m_impl[index]; }
+ String16 substring(size_t pos, size_t len = UINT_MAX) const {
+ return String16(m_impl.substr(pos, len));
+ }
+ size_t find(const String16& str, size_t start = 0) const {
+ return m_impl.find(str.m_impl, start);
+ }
+ size_t reverseFind(const String16& str, size_t start = UINT_MAX) const {
+ return m_impl.rfind(str.m_impl, start);
+ }
+ void swap(String16& other) { m_impl.swap(other.m_impl); }
+
+ // Convenience methods.
+ std::string utf8() const;
+ static String16 fromUTF8(const char* stringStart, size_t length);
+
+ const std::basic_string<UChar>& impl() const { return m_impl; }
+ explicit String16(const std::basic_string<UChar>& impl) : m_impl(impl) {}
+
+ std::size_t hash() const {
+ if (!has_hash) {
+ size_t hash = 0;
+ for (size_t i = 0; i < length(); ++i) hash = 31 * hash + m_impl[i];
+ hash_code = hash;
+ has_hash = true;
+ }
+ return hash_code;
+ }
+
+ private:
+ std::basic_string<UChar> m_impl;
+ mutable bool has_hash = false;
+ mutable std::size_t hash_code = 0;
+};
+
+inline bool operator==(const String16& a, const String16& b) {
+ return a.impl() == b.impl();
+}
+inline bool operator<(const String16& a, const String16& b) {
+ return a.impl() < b.impl();
+}
+inline bool operator!=(const String16& a, const String16& b) {
+ return a.impl() != b.impl();
+}
+inline bool operator==(const String16& a, const char* b) {
+ return a.impl() == String16(b).impl();
+}
+inline String16 operator+(const String16& a, const char* b) {
+ return String16(a.impl() + String16(b).impl());
+}
+inline String16 operator+(const char* a, const String16& b) {
+ return String16(String16(a).impl() + b.impl());
+}
+inline String16 operator+(const String16& a, const String16& b) {
+ return String16(a.impl() + b.impl());
+}
+
+class String16Builder {
+ public:
+ String16Builder();
+ void append(const String16&);
+ void append(UChar);
+ void append(char);
+ void append(const UChar*, size_t);
+ void append(const char*, size_t);
+ String16 toString();
+ void reserveCapacity(size_t);
+
+ private:
+ std::vector<UChar> m_buffer;
+};
+
+} // namespace v8_inspector
+
+#if !defined(__APPLE__) || defined(_LIBCPP_VERSION)
+
+namespace std {
+template <>
+struct hash<v8_inspector::String16> {
+ std::size_t operator()(const v8_inspector::String16& string) const {
+ return string.hash();
+ }
+};
+
+} // namespace std
+
+#endif // !defined(__APPLE__) || defined(_LIBCPP_VERSION)
+
+#endif // V8_INSPECTOR_STRING16_H_
diff --git a/deps/v8/src/inspector/string-util.cc b/deps/v8/src/inspector/string-util.cc
new file mode 100644
index 0000000000..e6b83a5d7d
--- /dev/null
+++ b/deps/v8/src/inspector/string-util.cc
@@ -0,0 +1,218 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/string-util.h"
+
+#include "src/inspector/protocol/Protocol.h"
+
+namespace v8_inspector {
+
+v8::Local<v8::String> toV8String(v8::Isolate* isolate, const String16& string) {
+ if (string.isEmpty()) return v8::String::Empty(isolate);
+ DCHECK(string.length() < v8::String::kMaxLength);
+ return v8::String::NewFromTwoByte(
+ isolate, reinterpret_cast<const uint16_t*>(string.characters16()),
+ v8::NewStringType::kNormal, static_cast<int>(string.length()))
+ .ToLocalChecked();
+}
+
+v8::Local<v8::String> toV8StringInternalized(v8::Isolate* isolate,
+ const String16& string) {
+ if (string.isEmpty()) return v8::String::Empty(isolate);
+ DCHECK(string.length() < v8::String::kMaxLength);
+ return v8::String::NewFromTwoByte(
+ isolate, reinterpret_cast<const uint16_t*>(string.characters16()),
+ v8::NewStringType::kInternalized,
+ static_cast<int>(string.length()))
+ .ToLocalChecked();
+}
+
+v8::Local<v8::String> toV8StringInternalized(v8::Isolate* isolate,
+ const char* str) {
+ return v8::String::NewFromUtf8(isolate, str, v8::NewStringType::kInternalized)
+ .ToLocalChecked();
+}
+
+v8::Local<v8::String> toV8String(v8::Isolate* isolate,
+ const StringView& string) {
+ if (!string.length()) return v8::String::Empty(isolate);
+ DCHECK(string.length() < v8::String::kMaxLength);
+ if (string.is8Bit())
+ return v8::String::NewFromOneByte(
+ isolate, reinterpret_cast<const uint8_t*>(string.characters8()),
+ v8::NewStringType::kNormal, static_cast<int>(string.length()))
+ .ToLocalChecked();
+ return v8::String::NewFromTwoByte(
+ isolate, reinterpret_cast<const uint16_t*>(string.characters16()),
+ v8::NewStringType::kNormal, static_cast<int>(string.length()))
+ .ToLocalChecked();
+}
+
+String16 toProtocolString(v8::Local<v8::String> value) {
+ if (value.IsEmpty() || value->IsNull() || value->IsUndefined())
+ return String16();
+ std::unique_ptr<UChar[]> buffer(new UChar[value->Length()]);
+ value->Write(reinterpret_cast<uint16_t*>(buffer.get()), 0, value->Length());
+ return String16(buffer.get(), value->Length());
+}
+
+String16 toProtocolStringWithTypeCheck(v8::Local<v8::Value> value) {
+ if (value.IsEmpty() || !value->IsString()) return String16();
+ return toProtocolString(value.As<v8::String>());
+}
+
+String16 toString16(const StringView& string) {
+ if (!string.length()) return String16();
+ if (string.is8Bit())
+ return String16(reinterpret_cast<const char*>(string.characters8()),
+ string.length());
+ return String16(reinterpret_cast<const UChar*>(string.characters16()),
+ string.length());
+}
+
+StringView toStringView(const String16& string) {
+ if (string.isEmpty()) return StringView();
+ return StringView(reinterpret_cast<const uint16_t*>(string.characters16()),
+ string.length());
+}
+
+bool stringViewStartsWith(const StringView& string, const char* prefix) {
+ if (!string.length()) return !(*prefix);
+ if (string.is8Bit()) {
+ for (size_t i = 0, j = 0; prefix[j] && i < string.length(); ++i, ++j) {
+ if (string.characters8()[i] != prefix[j]) return false;
+ }
+ } else {
+ for (size_t i = 0, j = 0; prefix[j] && i < string.length(); ++i, ++j) {
+ if (string.characters16()[i] != prefix[j]) return false;
+ }
+ }
+ return true;
+}
+
+namespace protocol {
+
+std::unique_ptr<protocol::Value> parseJSON(const StringView& string) {
+ if (!string.length()) return nullptr;
+ if (string.is8Bit()) {
+ return protocol::parseJSON(string.characters8(),
+ static_cast<int>(string.length()));
+ }
+ return protocol::parseJSON(string.characters16(),
+ static_cast<int>(string.length()));
+}
+
+std::unique_ptr<protocol::Value> parseJSON(const String16& string) {
+ if (!string.length()) return nullptr;
+ return protocol::parseJSON(string.characters16(),
+ static_cast<int>(string.length()));
+}
+
+} // namespace protocol
+
+std::unique_ptr<protocol::Value> toProtocolValue(protocol::String* errorString,
+ v8::Local<v8::Context> context,
+ v8::Local<v8::Value> value,
+ int maxDepth) {
+ if (value.IsEmpty()) {
+ UNREACHABLE();
+ return nullptr;
+ }
+
+ if (!maxDepth) {
+ *errorString = "Object reference chain is too long";
+ return nullptr;
+ }
+ maxDepth--;
+
+ if (value->IsNull() || value->IsUndefined()) return protocol::Value::null();
+ if (value->IsBoolean())
+ return protocol::FundamentalValue::create(value.As<v8::Boolean>()->Value());
+ if (value->IsNumber()) {
+ double doubleValue = value.As<v8::Number>()->Value();
+ int intValue = static_cast<int>(doubleValue);
+ if (intValue == doubleValue)
+ return protocol::FundamentalValue::create(intValue);
+ return protocol::FundamentalValue::create(doubleValue);
+ }
+ if (value->IsString())
+ return protocol::StringValue::create(
+ toProtocolString(value.As<v8::String>()));
+ if (value->IsArray()) {
+ v8::Local<v8::Array> array = value.As<v8::Array>();
+ std::unique_ptr<protocol::ListValue> inspectorArray =
+ protocol::ListValue::create();
+ uint32_t length = array->Length();
+ for (uint32_t i = 0; i < length; i++) {
+ v8::Local<v8::Value> value;
+ if (!array->Get(context, i).ToLocal(&value)) {
+ *errorString = "Internal error";
+ return nullptr;
+ }
+ std::unique_ptr<protocol::Value> element =
+ toProtocolValue(errorString, context, value, maxDepth);
+ if (!element) return nullptr;
+ inspectorArray->pushValue(std::move(element));
+ }
+ return std::move(inspectorArray);
+ }
+ if (value->IsObject()) {
+ std::unique_ptr<protocol::DictionaryValue> jsonObject =
+ protocol::DictionaryValue::create();
+ v8::Local<v8::Object> object = v8::Local<v8::Object>::Cast(value);
+ v8::Local<v8::Array> propertyNames;
+ if (!object->GetPropertyNames(context).ToLocal(&propertyNames)) {
+ *errorString = "Internal error";
+ return nullptr;
+ }
+ uint32_t length = propertyNames->Length();
+ for (uint32_t i = 0; i < length; i++) {
+ v8::Local<v8::Value> name;
+ if (!propertyNames->Get(context, i).ToLocal(&name)) {
+ *errorString = "Internal error";
+ return nullptr;
+ }
+ // FIXME(yurys): v8::Object should support GetOwnPropertyNames
+ if (name->IsString()) {
+ v8::Maybe<bool> hasRealNamedProperty = object->HasRealNamedProperty(
+ context, v8::Local<v8::String>::Cast(name));
+ if (!hasRealNamedProperty.IsJust() || !hasRealNamedProperty.FromJust())
+ continue;
+ }
+ v8::Local<v8::String> propertyName;
+ if (!name->ToString(context).ToLocal(&propertyName)) continue;
+ v8::Local<v8::Value> property;
+ if (!object->Get(context, name).ToLocal(&property)) {
+ *errorString = "Internal error";
+ return nullptr;
+ }
+ std::unique_ptr<protocol::Value> propertyValue =
+ toProtocolValue(errorString, context, property, maxDepth);
+ if (!propertyValue) return nullptr;
+ jsonObject->setValue(toProtocolString(propertyName),
+ std::move(propertyValue));
+ }
+ return std::move(jsonObject);
+ }
+ *errorString = "Object couldn't be returned by value";
+ return nullptr;
+}
+
+// static
+std::unique_ptr<StringBuffer> StringBuffer::create(const StringView& string) {
+ String16 owner = toString16(string);
+ return StringBufferImpl::adopt(owner);
+}
+
+// static
+std::unique_ptr<StringBufferImpl> StringBufferImpl::adopt(String16& string) {
+ return wrapUnique(new StringBufferImpl(string));
+}
+
+StringBufferImpl::StringBufferImpl(String16& string) {
+ m_owner.swap(string);
+ m_string = toStringView(m_owner);
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/string-util.h b/deps/v8/src/inspector/string-util.h
new file mode 100644
index 0000000000..30137b8b78
--- /dev/null
+++ b/deps/v8/src/inspector/string-util.h
@@ -0,0 +1,75 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_STRINGUTIL_H_
+#define V8_INSPECTOR_STRINGUTIL_H_
+
+#include "src/base/macros.h"
+#include "src/inspector/string-16.h"
+
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+namespace protocol {
+
+class Value;
+
+using String = v8_inspector::String16;
+using StringBuilder = v8_inspector::String16Builder;
+
+class StringUtil {
+ public:
+ static String substring(const String& s, size_t pos, size_t len) {
+ return s.substring(pos, len);
+ }
+ static String fromInteger(int number) { return String::fromInteger(number); }
+ static String fromInteger(size_t number) {
+ return String::fromInteger(number);
+ }
+ static String fromDouble(double number) { return String::fromDouble(number); }
+ static const size_t kNotFound = String::kNotFound;
+ static void builderReserve(StringBuilder& builder, size_t capacity) {
+ builder.reserveCapacity(capacity);
+ }
+};
+
+std::unique_ptr<protocol::Value> parseJSON(const StringView& json);
+std::unique_ptr<protocol::Value> parseJSON(const String16& json);
+
+} // namespace protocol
+
+std::unique_ptr<protocol::Value> toProtocolValue(protocol::String* errorString,
+ v8::Local<v8::Context>,
+ v8::Local<v8::Value>,
+ int maxDepth = 1000);
+
+v8::Local<v8::String> toV8String(v8::Isolate*, const String16&);
+v8::Local<v8::String> toV8StringInternalized(v8::Isolate*, const String16&);
+v8::Local<v8::String> toV8StringInternalized(v8::Isolate*, const char*);
+v8::Local<v8::String> toV8String(v8::Isolate*, const StringView&);
+// TODO(dgozman): rename to toString16.
+String16 toProtocolString(v8::Local<v8::String>);
+String16 toProtocolStringWithTypeCheck(v8::Local<v8::Value>);
+String16 toString16(const StringView&);
+StringView toStringView(const String16&);
+bool stringViewStartsWith(const StringView&, const char*);
+
+class StringBufferImpl : public StringBuffer {
+ public:
+ // Destroys string's content.
+ static std::unique_ptr<StringBufferImpl> adopt(String16&);
+ const StringView& string() override { return m_string; }
+
+ private:
+ explicit StringBufferImpl(String16&);
+ String16 m_owner;
+ StringView m_string;
+
+ DISALLOW_COPY_AND_ASSIGN(StringBufferImpl);
+};
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_STRINGUTIL_H_
diff --git a/deps/v8/src/inspector/v8-console-agent-impl.cc b/deps/v8/src/inspector/v8-console-agent-impl.cc
new file mode 100644
index 0000000000..8eb883cb75
--- /dev/null
+++ b/deps/v8/src/inspector/v8-console-agent-impl.cc
@@ -0,0 +1,79 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-console-agent-impl.h"
+
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/v8-console-message.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-inspector-session-impl.h"
+#include "src/inspector/v8-stack-trace-impl.h"
+
+namespace v8_inspector {
+
+namespace ConsoleAgentState {
+static const char consoleEnabled[] = "consoleEnabled";
+}
+
+V8ConsoleAgentImpl::V8ConsoleAgentImpl(
+ V8InspectorSessionImpl* session, protocol::FrontendChannel* frontendChannel,
+ protocol::DictionaryValue* state)
+ : m_session(session),
+ m_state(state),
+ m_frontend(frontendChannel),
+ m_enabled(false) {}
+
+V8ConsoleAgentImpl::~V8ConsoleAgentImpl() {}
+
+void V8ConsoleAgentImpl::enable(ErrorString* errorString) {
+ if (m_enabled) return;
+ m_state->setBoolean(ConsoleAgentState::consoleEnabled, true);
+ m_enabled = true;
+ m_session->inspector()->enableStackCapturingIfNeeded();
+ reportAllMessages();
+}
+
+void V8ConsoleAgentImpl::disable(ErrorString* errorString) {
+ if (!m_enabled) return;
+ m_session->inspector()->disableStackCapturingIfNeeded();
+ m_state->setBoolean(ConsoleAgentState::consoleEnabled, false);
+ m_enabled = false;
+}
+
+void V8ConsoleAgentImpl::clearMessages(ErrorString* errorString) {}
+
+void V8ConsoleAgentImpl::restore() {
+ if (!m_state->booleanProperty(ConsoleAgentState::consoleEnabled, false))
+ return;
+ ErrorString ignored;
+ enable(&ignored);
+}
+
+void V8ConsoleAgentImpl::messageAdded(V8ConsoleMessage* message) {
+ if (m_enabled) reportMessage(message, true);
+}
+
+bool V8ConsoleAgentImpl::enabled() { return m_enabled; }
+
+void V8ConsoleAgentImpl::reportAllMessages() {
+ V8ConsoleMessageStorage* storage =
+ m_session->inspector()->ensureConsoleMessageStorage(
+ m_session->contextGroupId());
+ for (const auto& message : storage->messages()) {
+ if (message->origin() == V8MessageOrigin::kConsole) {
+ if (!reportMessage(message.get(), false)) return;
+ }
+ }
+}
+
+bool V8ConsoleAgentImpl::reportMessage(V8ConsoleMessage* message,
+ bool generatePreview) {
+ DCHECK(message->origin() == V8MessageOrigin::kConsole);
+ message->reportToFrontend(&m_frontend);
+ m_frontend.flush();
+ return m_session->inspector()->hasConsoleMessageStorage(
+ m_session->contextGroupId());
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-console-agent-impl.h b/deps/v8/src/inspector/v8-console-agent-impl.h
new file mode 100644
index 0000000000..f3d598bb34
--- /dev/null
+++ b/deps/v8/src/inspector/v8-console-agent-impl.h
@@ -0,0 +1,48 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8CONSOLEAGENTIMPL_H_
+#define V8_INSPECTOR_V8CONSOLEAGENTIMPL_H_
+
+#include "src/base/macros.h"
+#include "src/inspector/protocol/Console.h"
+#include "src/inspector/protocol/Forward.h"
+
+namespace v8_inspector {
+
+class V8ConsoleMessage;
+class V8InspectorSessionImpl;
+
+using protocol::ErrorString;
+
+class V8ConsoleAgentImpl : public protocol::Console::Backend {
+ public:
+ V8ConsoleAgentImpl(V8InspectorSessionImpl*, protocol::FrontendChannel*,
+ protocol::DictionaryValue* state);
+ ~V8ConsoleAgentImpl() override;
+
+ void enable(ErrorString*) override;
+ void disable(ErrorString*) override;
+ void clearMessages(ErrorString*) override;
+
+ void restore();
+ void messageAdded(V8ConsoleMessage*);
+ void reset();
+ bool enabled();
+
+ private:
+ void reportAllMessages();
+ bool reportMessage(V8ConsoleMessage*, bool generatePreview);
+
+ V8InspectorSessionImpl* m_session;
+ protocol::DictionaryValue* m_state;
+ protocol::Console::Frontend m_frontend;
+ bool m_enabled;
+
+ DISALLOW_COPY_AND_ASSIGN(V8ConsoleAgentImpl);
+};
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_V8CONSOLEAGENTIMPL_H_
diff --git a/deps/v8/src/inspector/v8-console-message.cc b/deps/v8/src/inspector/v8-console-message.cc
new file mode 100644
index 0000000000..63f1d49faf
--- /dev/null
+++ b/deps/v8/src/inspector/v8-console-message.cc
@@ -0,0 +1,485 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-console-message.h"
+
+#include "src/inspector/inspected-context.h"
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-console-agent-impl.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-inspector-session-impl.h"
+#include "src/inspector/v8-runtime-agent-impl.h"
+#include "src/inspector/v8-stack-trace-impl.h"
+
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+namespace {
+
+String16 consoleAPITypeValue(ConsoleAPIType type) {
+ switch (type) {
+ case ConsoleAPIType::kLog:
+ return protocol::Runtime::ConsoleAPICalled::TypeEnum::Log;
+ case ConsoleAPIType::kDebug:
+ return protocol::Runtime::ConsoleAPICalled::TypeEnum::Debug;
+ case ConsoleAPIType::kInfo:
+ return protocol::Runtime::ConsoleAPICalled::TypeEnum::Info;
+ case ConsoleAPIType::kError:
+ return protocol::Runtime::ConsoleAPICalled::TypeEnum::Error;
+ case ConsoleAPIType::kWarning:
+ return protocol::Runtime::ConsoleAPICalled::TypeEnum::Warning;
+ case ConsoleAPIType::kClear:
+ return protocol::Runtime::ConsoleAPICalled::TypeEnum::Clear;
+ case ConsoleAPIType::kDir:
+ return protocol::Runtime::ConsoleAPICalled::TypeEnum::Dir;
+ case ConsoleAPIType::kDirXML:
+ return protocol::Runtime::ConsoleAPICalled::TypeEnum::Dirxml;
+ case ConsoleAPIType::kTable:
+ return protocol::Runtime::ConsoleAPICalled::TypeEnum::Table;
+ case ConsoleAPIType::kTrace:
+ return protocol::Runtime::ConsoleAPICalled::TypeEnum::Trace;
+ case ConsoleAPIType::kStartGroup:
+ return protocol::Runtime::ConsoleAPICalled::TypeEnum::StartGroup;
+ case ConsoleAPIType::kStartGroupCollapsed:
+ return protocol::Runtime::ConsoleAPICalled::TypeEnum::StartGroupCollapsed;
+ case ConsoleAPIType::kEndGroup:
+ return protocol::Runtime::ConsoleAPICalled::TypeEnum::EndGroup;
+ case ConsoleAPIType::kAssert:
+ return protocol::Runtime::ConsoleAPICalled::TypeEnum::Assert;
+ case ConsoleAPIType::kTimeEnd:
+ return protocol::Runtime::ConsoleAPICalled::TypeEnum::Debug;
+ case ConsoleAPIType::kCount:
+ return protocol::Runtime::ConsoleAPICalled::TypeEnum::Debug;
+ }
+ return protocol::Runtime::ConsoleAPICalled::TypeEnum::Log;
+}
+
+const unsigned maxConsoleMessageCount = 1000;
+const unsigned maxArrayItemsLimit = 10000;
+const unsigned maxStackDepthLimit = 32;
+
+class V8ValueStringBuilder {
+ public:
+ static String16 toString(v8::Local<v8::Value> value,
+ v8::Local<v8::Context> context) {
+ V8ValueStringBuilder builder(context);
+ if (!builder.append(value)) return String16();
+ return builder.toString();
+ }
+
+ private:
+ enum {
+ IgnoreNull = 1 << 0,
+ IgnoreUndefined = 1 << 1,
+ };
+
+ explicit V8ValueStringBuilder(v8::Local<v8::Context> context)
+ : m_arrayLimit(maxArrayItemsLimit),
+ m_isolate(context->GetIsolate()),
+ m_tryCatch(context->GetIsolate()),
+ m_context(context) {}
+
+ bool append(v8::Local<v8::Value> value, unsigned ignoreOptions = 0) {
+ if (value.IsEmpty()) return true;
+ if ((ignoreOptions & IgnoreNull) && value->IsNull()) return true;
+ if ((ignoreOptions & IgnoreUndefined) && value->IsUndefined()) return true;
+ if (value->IsString()) return append(v8::Local<v8::String>::Cast(value));
+ if (value->IsStringObject())
+ return append(v8::Local<v8::StringObject>::Cast(value)->ValueOf());
+ if (value->IsSymbol()) return append(v8::Local<v8::Symbol>::Cast(value));
+ if (value->IsSymbolObject())
+ return append(v8::Local<v8::SymbolObject>::Cast(value)->ValueOf());
+ if (value->IsNumberObject()) {
+ m_builder.append(String16::fromDouble(
+ v8::Local<v8::NumberObject>::Cast(value)->ValueOf(), 6));
+ return true;
+ }
+ if (value->IsBooleanObject()) {
+ m_builder.append(v8::Local<v8::BooleanObject>::Cast(value)->ValueOf()
+ ? "true"
+ : "false");
+ return true;
+ }
+ if (value->IsArray()) return append(v8::Local<v8::Array>::Cast(value));
+ if (value->IsProxy()) {
+ m_builder.append("[object Proxy]");
+ return true;
+ }
+ if (value->IsObject() && !value->IsDate() && !value->IsFunction() &&
+ !value->IsNativeError() && !value->IsRegExp()) {
+ v8::Local<v8::Object> object = v8::Local<v8::Object>::Cast(value);
+ v8::Local<v8::String> stringValue;
+ if (object->ObjectProtoToString(m_isolate->GetCurrentContext())
+ .ToLocal(&stringValue))
+ return append(stringValue);
+ }
+ v8::Local<v8::String> stringValue;
+ if (!value->ToString(m_isolate->GetCurrentContext()).ToLocal(&stringValue))
+ return false;
+ return append(stringValue);
+ }
+
+ bool append(v8::Local<v8::Array> array) {
+ for (const auto& it : m_visitedArrays) {
+ if (it == array) return true;
+ }
+ uint32_t length = array->Length();
+ if (length > m_arrayLimit) return false;
+ if (m_visitedArrays.size() > maxStackDepthLimit) return false;
+
+ bool result = true;
+ m_arrayLimit -= length;
+ m_visitedArrays.push_back(array);
+ for (uint32_t i = 0; i < length; ++i) {
+ if (i) m_builder.append(',');
+ v8::Local<v8::Value> value;
+ if (!array->Get(m_context, i).ToLocal(&value)) continue;
+ if (!append(value, IgnoreNull | IgnoreUndefined)) {
+ result = false;
+ break;
+ }
+ }
+ m_visitedArrays.pop_back();
+ return result;
+ }
+
+ bool append(v8::Local<v8::Symbol> symbol) {
+ m_builder.append("Symbol(");
+ bool result = append(symbol->Name(), IgnoreUndefined);
+ m_builder.append(')');
+ return result;
+ }
+
+ bool append(v8::Local<v8::String> string) {
+ if (m_tryCatch.HasCaught()) return false;
+ if (!string.IsEmpty()) m_builder.append(toProtocolString(string));
+ return true;
+ }
+
+ String16 toString() {
+ if (m_tryCatch.HasCaught()) return String16();
+ return m_builder.toString();
+ }
+
+ uint32_t m_arrayLimit;
+ v8::Isolate* m_isolate;
+ String16Builder m_builder;
+ std::vector<v8::Local<v8::Array>> m_visitedArrays;
+ v8::TryCatch m_tryCatch;
+ v8::Local<v8::Context> m_context;
+};
+
+} // namespace
+
+V8ConsoleMessage::V8ConsoleMessage(V8MessageOrigin origin, double timestamp,
+ const String16& message)
+ : m_origin(origin),
+ m_timestamp(timestamp),
+ m_message(message),
+ m_lineNumber(0),
+ m_columnNumber(0),
+ m_scriptId(0),
+ m_contextId(0),
+ m_type(ConsoleAPIType::kLog),
+ m_exceptionId(0),
+ m_revokedExceptionId(0) {}
+
+V8ConsoleMessage::~V8ConsoleMessage() {}
+
+void V8ConsoleMessage::setLocation(const String16& url, unsigned lineNumber,
+ unsigned columnNumber,
+ std::unique_ptr<V8StackTraceImpl> stackTrace,
+ int scriptId) {
+ m_url = url;
+ m_lineNumber = lineNumber;
+ m_columnNumber = columnNumber;
+ m_stackTrace = std::move(stackTrace);
+ m_scriptId = scriptId;
+}
+
+void V8ConsoleMessage::reportToFrontend(
+ protocol::Console::Frontend* frontend) const {
+ DCHECK(m_origin == V8MessageOrigin::kConsole);
+ String16 level = protocol::Console::ConsoleMessage::LevelEnum::Log;
+ if (m_type == ConsoleAPIType::kDebug || m_type == ConsoleAPIType::kCount ||
+ m_type == ConsoleAPIType::kTimeEnd)
+ level = protocol::Console::ConsoleMessage::LevelEnum::Debug;
+ else if (m_type == ConsoleAPIType::kError ||
+ m_type == ConsoleAPIType::kAssert)
+ level = protocol::Console::ConsoleMessage::LevelEnum::Error;
+ else if (m_type == ConsoleAPIType::kWarning)
+ level = protocol::Console::ConsoleMessage::LevelEnum::Warning;
+ else if (m_type == ConsoleAPIType::kInfo)
+ level = protocol::Console::ConsoleMessage::LevelEnum::Info;
+ std::unique_ptr<protocol::Console::ConsoleMessage> result =
+ protocol::Console::ConsoleMessage::create()
+ .setSource(protocol::Console::ConsoleMessage::SourceEnum::ConsoleApi)
+ .setLevel(level)
+ .setText(m_message)
+ .build();
+ result->setLine(static_cast<int>(m_lineNumber));
+ result->setColumn(static_cast<int>(m_columnNumber));
+ result->setUrl(m_url);
+ frontend->messageAdded(std::move(result));
+}
+
+std::unique_ptr<protocol::Array<protocol::Runtime::RemoteObject>>
+V8ConsoleMessage::wrapArguments(V8InspectorSessionImpl* session,
+ bool generatePreview) const {
+ V8InspectorImpl* inspector = session->inspector();
+ int contextGroupId = session->contextGroupId();
+ int contextId = m_contextId;
+ if (!m_arguments.size() || !contextId) return nullptr;
+ InspectedContext* inspectedContext =
+ inspector->getContext(contextGroupId, contextId);
+ if (!inspectedContext) return nullptr;
+
+ v8::Isolate* isolate = inspectedContext->isolate();
+ v8::HandleScope handles(isolate);
+ v8::Local<v8::Context> context = inspectedContext->context();
+
+ std::unique_ptr<protocol::Array<protocol::Runtime::RemoteObject>> args =
+ protocol::Array<protocol::Runtime::RemoteObject>::create();
+ if (m_type == ConsoleAPIType::kTable && generatePreview) {
+ v8::Local<v8::Value> table = m_arguments[0]->Get(isolate);
+ v8::Local<v8::Value> columns = m_arguments.size() > 1
+ ? m_arguments[1]->Get(isolate)
+ : v8::Local<v8::Value>();
+ std::unique_ptr<protocol::Runtime::RemoteObject> wrapped =
+ session->wrapTable(context, table, columns);
+ inspectedContext = inspector->getContext(contextGroupId, contextId);
+ if (!inspectedContext) return nullptr;
+ if (wrapped)
+ args->addItem(std::move(wrapped));
+ else
+ args = nullptr;
+ } else {
+ for (size_t i = 0; i < m_arguments.size(); ++i) {
+ std::unique_ptr<protocol::Runtime::RemoteObject> wrapped =
+ session->wrapObject(context, m_arguments[i]->Get(isolate), "console",
+ generatePreview);
+ inspectedContext = inspector->getContext(contextGroupId, contextId);
+ if (!inspectedContext) return nullptr;
+ if (!wrapped) {
+ args = nullptr;
+ break;
+ }
+ args->addItem(std::move(wrapped));
+ }
+ }
+ return args;
+}
+
+void V8ConsoleMessage::reportToFrontend(protocol::Runtime::Frontend* frontend,
+ V8InspectorSessionImpl* session,
+ bool generatePreview) const {
+ int contextGroupId = session->contextGroupId();
+ V8InspectorImpl* inspector = session->inspector();
+
+ if (m_origin == V8MessageOrigin::kException) {
+ std::unique_ptr<protocol::Runtime::RemoteObject> exception =
+ wrapException(session, generatePreview);
+ if (!inspector->hasConsoleMessageStorage(contextGroupId)) return;
+ std::unique_ptr<protocol::Runtime::ExceptionDetails> exceptionDetails =
+ protocol::Runtime::ExceptionDetails::create()
+ .setExceptionId(m_exceptionId)
+ .setText(exception ? m_message : m_detailedMessage)
+ .setLineNumber(m_lineNumber ? m_lineNumber - 1 : 0)
+ .setColumnNumber(m_columnNumber ? m_columnNumber - 1 : 0)
+ .build();
+ if (m_scriptId)
+ exceptionDetails->setScriptId(String16::fromInteger(m_scriptId));
+ if (!m_url.isEmpty()) exceptionDetails->setUrl(m_url);
+ if (m_stackTrace)
+ exceptionDetails->setStackTrace(m_stackTrace->buildInspectorObjectImpl());
+ if (m_contextId) exceptionDetails->setExecutionContextId(m_contextId);
+ if (exception) exceptionDetails->setException(std::move(exception));
+ frontend->exceptionThrown(m_timestamp, std::move(exceptionDetails));
+ return;
+ }
+ if (m_origin == V8MessageOrigin::kRevokedException) {
+ frontend->exceptionRevoked(m_message, m_revokedExceptionId);
+ return;
+ }
+ if (m_origin == V8MessageOrigin::kConsole) {
+ std::unique_ptr<protocol::Array<protocol::Runtime::RemoteObject>>
+ arguments = wrapArguments(session, generatePreview);
+ if (!inspector->hasConsoleMessageStorage(contextGroupId)) return;
+ if (!arguments) {
+ arguments = protocol::Array<protocol::Runtime::RemoteObject>::create();
+ if (!m_message.isEmpty()) {
+ std::unique_ptr<protocol::Runtime::RemoteObject> messageArg =
+ protocol::Runtime::RemoteObject::create()
+ .setType(protocol::Runtime::RemoteObject::TypeEnum::String)
+ .build();
+ messageArg->setValue(protocol::StringValue::create(m_message));
+ arguments->addItem(std::move(messageArg));
+ }
+ }
+ frontend->consoleAPICalled(
+ consoleAPITypeValue(m_type), std::move(arguments), m_contextId,
+ m_timestamp,
+ m_stackTrace ? m_stackTrace->buildInspectorObjectImpl() : nullptr);
+ return;
+ }
+ UNREACHABLE();
+}
+
+std::unique_ptr<protocol::Runtime::RemoteObject>
+V8ConsoleMessage::wrapException(V8InspectorSessionImpl* session,
+ bool generatePreview) const {
+ if (!m_arguments.size() || !m_contextId) return nullptr;
+ DCHECK_EQ(1u, m_arguments.size());
+ InspectedContext* inspectedContext =
+ session->inspector()->getContext(session->contextGroupId(), m_contextId);
+ if (!inspectedContext) return nullptr;
+
+ v8::Isolate* isolate = inspectedContext->isolate();
+ v8::HandleScope handles(isolate);
+ // TODO(dgozman): should we use different object group?
+ return session->wrapObject(inspectedContext->context(),
+ m_arguments[0]->Get(isolate), "console",
+ generatePreview);
+}
+
+V8MessageOrigin V8ConsoleMessage::origin() const { return m_origin; }
+
+ConsoleAPIType V8ConsoleMessage::type() const { return m_type; }
+
+// static
+std::unique_ptr<V8ConsoleMessage> V8ConsoleMessage::createForConsoleAPI(
+ double timestamp, ConsoleAPIType type,
+ const std::vector<v8::Local<v8::Value>>& arguments,
+ std::unique_ptr<V8StackTraceImpl> stackTrace,
+ InspectedContext* inspectedContext) {
+ v8::Isolate* isolate = inspectedContext->isolate();
+ int contextId = inspectedContext->contextId();
+ int contextGroupId = inspectedContext->contextGroupId();
+ V8InspectorImpl* inspector = inspectedContext->inspector();
+ v8::Local<v8::Context> context = inspectedContext->context();
+
+ std::unique_ptr<V8ConsoleMessage> message = wrapUnique(
+ new V8ConsoleMessage(V8MessageOrigin::kConsole, timestamp, String16()));
+ if (stackTrace && !stackTrace->isEmpty()) {
+ message->m_url = toString16(stackTrace->topSourceURL());
+ message->m_lineNumber = stackTrace->topLineNumber();
+ message->m_columnNumber = stackTrace->topColumnNumber();
+ }
+ message->m_stackTrace = std::move(stackTrace);
+ message->m_type = type;
+ message->m_contextId = contextId;
+ for (size_t i = 0; i < arguments.size(); ++i)
+ message->m_arguments.push_back(
+ wrapUnique(new v8::Global<v8::Value>(isolate, arguments.at(i))));
+ if (arguments.size())
+ message->m_message = V8ValueStringBuilder::toString(arguments[0], context);
+
+ V8ConsoleAPIType clientType = V8ConsoleAPIType::kLog;
+ if (type == ConsoleAPIType::kDebug || type == ConsoleAPIType::kCount ||
+ type == ConsoleAPIType::kTimeEnd)
+ clientType = V8ConsoleAPIType::kDebug;
+ else if (type == ConsoleAPIType::kError || type == ConsoleAPIType::kAssert)
+ clientType = V8ConsoleAPIType::kError;
+ else if (type == ConsoleAPIType::kWarning)
+ clientType = V8ConsoleAPIType::kWarning;
+ else if (type == ConsoleAPIType::kInfo)
+ clientType = V8ConsoleAPIType::kInfo;
+ else if (type == ConsoleAPIType::kClear)
+ clientType = V8ConsoleAPIType::kClear;
+ inspector->client()->consoleAPIMessage(
+ contextGroupId, clientType, toStringView(message->m_message),
+ toStringView(message->m_url), message->m_lineNumber,
+ message->m_columnNumber, message->m_stackTrace.get());
+
+ return message;
+}
+
+// static
+std::unique_ptr<V8ConsoleMessage> V8ConsoleMessage::createForException(
+ double timestamp, const String16& detailedMessage, const String16& url,
+ unsigned lineNumber, unsigned columnNumber,
+ std::unique_ptr<V8StackTraceImpl> stackTrace, int scriptId,
+ v8::Isolate* isolate, const String16& message, int contextId,
+ v8::Local<v8::Value> exception, unsigned exceptionId) {
+ std::unique_ptr<V8ConsoleMessage> consoleMessage = wrapUnique(
+ new V8ConsoleMessage(V8MessageOrigin::kException, timestamp, message));
+ consoleMessage->setLocation(url, lineNumber, columnNumber,
+ std::move(stackTrace), scriptId);
+ consoleMessage->m_exceptionId = exceptionId;
+ consoleMessage->m_detailedMessage = detailedMessage;
+ if (contextId && !exception.IsEmpty()) {
+ consoleMessage->m_contextId = contextId;
+ consoleMessage->m_arguments.push_back(
+ wrapUnique(new v8::Global<v8::Value>(isolate, exception)));
+ }
+ return consoleMessage;
+}
+
+// static
+std::unique_ptr<V8ConsoleMessage> V8ConsoleMessage::createForRevokedException(
+ double timestamp, const String16& messageText,
+ unsigned revokedExceptionId) {
+ std::unique_ptr<V8ConsoleMessage> message = wrapUnique(new V8ConsoleMessage(
+ V8MessageOrigin::kRevokedException, timestamp, messageText));
+ message->m_revokedExceptionId = revokedExceptionId;
+ return message;
+}
+
+void V8ConsoleMessage::contextDestroyed(int contextId) {
+ if (contextId != m_contextId) return;
+ m_contextId = 0;
+ if (m_message.isEmpty()) m_message = "<message collected>";
+ Arguments empty;
+ m_arguments.swap(empty);
+}
+
+// ------------------------ V8ConsoleMessageStorage ----------------------------
+
+V8ConsoleMessageStorage::V8ConsoleMessageStorage(V8InspectorImpl* inspector,
+ int contextGroupId)
+ : m_inspector(inspector),
+ m_contextGroupId(contextGroupId),
+ m_expiredCount(0) {}
+
+V8ConsoleMessageStorage::~V8ConsoleMessageStorage() { clear(); }
+
+void V8ConsoleMessageStorage::addMessage(
+ std::unique_ptr<V8ConsoleMessage> message) {
+ int contextGroupId = m_contextGroupId;
+ V8InspectorImpl* inspector = m_inspector;
+ if (message->type() == ConsoleAPIType::kClear) clear();
+
+ V8InspectorSessionImpl* session =
+ inspector->sessionForContextGroup(contextGroupId);
+ if (session) {
+ if (message->origin() == V8MessageOrigin::kConsole)
+ session->consoleAgent()->messageAdded(message.get());
+ session->runtimeAgent()->messageAdded(message.get());
+ }
+ if (!inspector->hasConsoleMessageStorage(contextGroupId)) return;
+
+ DCHECK(m_messages.size() <= maxConsoleMessageCount);
+ if (m_messages.size() == maxConsoleMessageCount) {
+ ++m_expiredCount;
+ m_messages.pop_front();
+ }
+ m_messages.push_back(std::move(message));
+}
+
+void V8ConsoleMessageStorage::clear() {
+ m_messages.clear();
+ m_expiredCount = 0;
+ if (V8InspectorSessionImpl* session =
+ m_inspector->sessionForContextGroup(m_contextGroupId))
+ session->releaseObjectGroup("console");
+}
+
+void V8ConsoleMessageStorage::contextDestroyed(int contextId) {
+ for (size_t i = 0; i < m_messages.size(); ++i)
+ m_messages[i]->contextDestroyed(contextId);
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-console-message.h b/deps/v8/src/inspector/v8-console-message.h
new file mode 100644
index 0000000000..a6e9eafe2d
--- /dev/null
+++ b/deps/v8/src/inspector/v8-console-message.h
@@ -0,0 +1,120 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8CONSOLEMESSAGE_H_
+#define V8_INSPECTOR_V8CONSOLEMESSAGE_H_
+
+#include <deque>
+#include "include/v8.h"
+#include "src/inspector/protocol/Console.h"
+#include "src/inspector/protocol/Forward.h"
+#include "src/inspector/protocol/Runtime.h"
+
+namespace v8_inspector {
+
+class InspectedContext;
+class V8InspectorImpl;
+class V8InspectorSessionImpl;
+class V8StackTraceImpl;
+
+enum class V8MessageOrigin { kConsole, kException, kRevokedException };
+
+enum class ConsoleAPIType {
+ kLog,
+ kDebug,
+ kInfo,
+ kError,
+ kWarning,
+ kDir,
+ kDirXML,
+ kTable,
+ kTrace,
+ kStartGroup,
+ kStartGroupCollapsed,
+ kEndGroup,
+ kClear,
+ kAssert,
+ kTimeEnd,
+ kCount
+};
+
+class V8ConsoleMessage {
+ public:
+ ~V8ConsoleMessage();
+
+ static std::unique_ptr<V8ConsoleMessage> createForConsoleAPI(
+ double timestamp, ConsoleAPIType,
+ const std::vector<v8::Local<v8::Value>>& arguments,
+ std::unique_ptr<V8StackTraceImpl>, InspectedContext*);
+
+ static std::unique_ptr<V8ConsoleMessage> createForException(
+ double timestamp, const String16& detailedMessage, const String16& url,
+ unsigned lineNumber, unsigned columnNumber,
+ std::unique_ptr<V8StackTraceImpl>, int scriptId, v8::Isolate*,
+ const String16& message, int contextId, v8::Local<v8::Value> exception,
+ unsigned exceptionId);
+
+ static std::unique_ptr<V8ConsoleMessage> createForRevokedException(
+ double timestamp, const String16& message, unsigned revokedExceptionId);
+
+ V8MessageOrigin origin() const;
+ void reportToFrontend(protocol::Console::Frontend*) const;
+ void reportToFrontend(protocol::Runtime::Frontend*, V8InspectorSessionImpl*,
+ bool generatePreview) const;
+ ConsoleAPIType type() const;
+ void contextDestroyed(int contextId);
+
+ private:
+ V8ConsoleMessage(V8MessageOrigin, double timestamp, const String16& message);
+
+ using Arguments = std::vector<std::unique_ptr<v8::Global<v8::Value>>>;
+ std::unique_ptr<protocol::Array<protocol::Runtime::RemoteObject>>
+ wrapArguments(V8InspectorSessionImpl*, bool generatePreview) const;
+ std::unique_ptr<protocol::Runtime::RemoteObject> wrapException(
+ V8InspectorSessionImpl*, bool generatePreview) const;
+ void setLocation(const String16& url, unsigned lineNumber,
+ unsigned columnNumber, std::unique_ptr<V8StackTraceImpl>,
+ int scriptId);
+
+ V8MessageOrigin m_origin;
+ double m_timestamp;
+ String16 m_message;
+ String16 m_url;
+ unsigned m_lineNumber;
+ unsigned m_columnNumber;
+ std::unique_ptr<V8StackTraceImpl> m_stackTrace;
+ int m_scriptId;
+ int m_contextId;
+ ConsoleAPIType m_type;
+ unsigned m_exceptionId;
+ unsigned m_revokedExceptionId;
+ Arguments m_arguments;
+ String16 m_detailedMessage;
+};
+
+class V8ConsoleMessageStorage {
+ public:
+ V8ConsoleMessageStorage(V8InspectorImpl*, int contextGroupId);
+ ~V8ConsoleMessageStorage();
+
+ int contextGroupId() { return m_contextGroupId; }
+ int expiredCount() { return m_expiredCount; }
+ const std::deque<std::unique_ptr<V8ConsoleMessage>>& messages() const {
+ return m_messages;
+ }
+
+ void addMessage(std::unique_ptr<V8ConsoleMessage>);
+ void contextDestroyed(int contextId);
+ void clear();
+
+ private:
+ V8InspectorImpl* m_inspector;
+ int m_contextGroupId;
+ int m_expiredCount;
+ std::deque<std::unique_ptr<V8ConsoleMessage>> m_messages;
+};
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_V8CONSOLEMESSAGE_H_
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
new file mode 100644
index 0000000000..ddd4bf629e
--- /dev/null
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -0,0 +1,922 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-console.h"
+
+#include "src/base/macros.h"
+#include "src/inspector/injected-script.h"
+#include "src/inspector/inspected-context.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-console-message.h"
+#include "src/inspector/v8-debugger-agent-impl.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-inspector-session-impl.h"
+#include "src/inspector/v8-profiler-agent-impl.h"
+#include "src/inspector/v8-runtime-agent-impl.h"
+#include "src/inspector/v8-stack-trace-impl.h"
+#include "src/inspector/v8-value-copier.h"
+
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+namespace {
+
+v8::Local<v8::Private> inspectedContextPrivateKey(v8::Isolate* isolate) {
+ return v8::Private::ForApi(
+ isolate, toV8StringInternalized(isolate, "V8Console#InspectedContext"));
+}
+
+class ConsoleHelper {
+ public:
+ explicit ConsoleHelper(const v8::FunctionCallbackInfo<v8::Value>& info)
+ : m_info(info),
+ m_isolate(info.GetIsolate()),
+ m_context(info.GetIsolate()->GetCurrentContext()),
+ m_inspectedContext(nullptr),
+ m_inspectorClient(nullptr) {}
+
+ v8::Local<v8::Object> ensureConsole() {
+ if (m_console.IsEmpty()) {
+ DCHECK(!m_info.Data().IsEmpty());
+ DCHECK(!m_info.Data()->IsUndefined());
+ m_console = m_info.Data().As<v8::Object>();
+ }
+ return m_console;
+ }
+
+ InspectedContext* ensureInspectedContext() {
+ if (m_inspectedContext) return m_inspectedContext;
+ v8::Local<v8::Object> console = ensureConsole();
+
+ v8::Local<v8::Private> key = inspectedContextPrivateKey(m_isolate);
+ v8::Local<v8::Value> inspectedContextValue;
+ if (!console->GetPrivate(m_context, key).ToLocal(&inspectedContextValue))
+ return nullptr;
+ DCHECK(inspectedContextValue->IsExternal());
+ m_inspectedContext = static_cast<InspectedContext*>(
+ inspectedContextValue.As<v8::External>()->Value());
+ return m_inspectedContext;
+ }
+
+ V8InspectorClient* ensureDebuggerClient() {
+ if (m_inspectorClient) return m_inspectorClient;
+ InspectedContext* inspectedContext = ensureInspectedContext();
+ if (!inspectedContext) return nullptr;
+ m_inspectorClient = inspectedContext->inspector()->client();
+ return m_inspectorClient;
+ }
+
+ void reportCall(ConsoleAPIType type) {
+ if (!m_info.Length()) return;
+ std::vector<v8::Local<v8::Value>> arguments;
+ for (int i = 0; i < m_info.Length(); ++i) arguments.push_back(m_info[i]);
+ reportCall(type, arguments);
+ }
+
+ void reportCallWithDefaultArgument(ConsoleAPIType type,
+ const String16& message) {
+ std::vector<v8::Local<v8::Value>> arguments;
+ for (int i = 0; i < m_info.Length(); ++i) arguments.push_back(m_info[i]);
+ if (!m_info.Length()) arguments.push_back(toV8String(m_isolate, message));
+ reportCall(type, arguments);
+ }
+
+ void reportCallWithArgument(ConsoleAPIType type, const String16& message) {
+ std::vector<v8::Local<v8::Value>> arguments(1,
+ toV8String(m_isolate, message));
+ reportCall(type, arguments);
+ }
+
+ void reportCall(ConsoleAPIType type,
+ const std::vector<v8::Local<v8::Value>>& arguments) {
+ InspectedContext* inspectedContext = ensureInspectedContext();
+ if (!inspectedContext) return;
+ int contextGroupId = inspectedContext->contextGroupId();
+ V8InspectorImpl* inspector = inspectedContext->inspector();
+ std::unique_ptr<V8ConsoleMessage> message =
+ V8ConsoleMessage::createForConsoleAPI(
+ inspector->client()->currentTimeMS(), type, arguments,
+ inspector->debugger()->captureStackTrace(false), inspectedContext);
+ inspector->ensureConsoleMessageStorage(contextGroupId)
+ ->addMessage(std::move(message));
+ }
+
+ void reportDeprecatedCall(const char* id, const String16& message) {
+ if (checkAndSetPrivateFlagOnConsole(id, false)) return;
+ std::vector<v8::Local<v8::Value>> arguments(1,
+ toV8String(m_isolate, message));
+ reportCall(ConsoleAPIType::kWarning, arguments);
+ }
+
+ bool firstArgToBoolean(bool defaultValue) {
+ if (m_info.Length() < 1) return defaultValue;
+ if (m_info[0]->IsBoolean()) return m_info[0].As<v8::Boolean>()->Value();
+ return m_info[0]->BooleanValue(m_context).FromMaybe(defaultValue);
+ }
+
+ String16 firstArgToString(const String16& defaultValue) {
+ if (m_info.Length() < 1) return defaultValue;
+ v8::Local<v8::String> titleValue;
+ if (m_info[0]->IsObject()) {
+ if (!m_info[0].As<v8::Object>()->ObjectProtoToString(m_context).ToLocal(
+ &titleValue))
+ return defaultValue;
+ } else {
+ if (!m_info[0]->ToString(m_context).ToLocal(&titleValue))
+ return defaultValue;
+ }
+ return toProtocolString(titleValue);
+ }
+
+ v8::MaybeLocal<v8::Object> firstArgAsObject() {
+ if (m_info.Length() < 1 || !m_info[0]->IsObject())
+ return v8::MaybeLocal<v8::Object>();
+ return m_info[0].As<v8::Object>();
+ }
+
+ v8::MaybeLocal<v8::Function> firstArgAsFunction() {
+ if (m_info.Length() < 1 || !m_info[0]->IsFunction())
+ return v8::MaybeLocal<v8::Function>();
+ v8::Local<v8::Function> func = m_info[0].As<v8::Function>();
+ while (func->GetBoundFunction()->IsFunction())
+ func = func->GetBoundFunction().As<v8::Function>();
+ return func;
+ }
+
+ v8::MaybeLocal<v8::Map> privateMap(const char* name) {
+ v8::Local<v8::Object> console = ensureConsole();
+ v8::Local<v8::Private> privateKey =
+ v8::Private::ForApi(m_isolate, toV8StringInternalized(m_isolate, name));
+ v8::Local<v8::Value> mapValue;
+ if (!console->GetPrivate(m_context, privateKey).ToLocal(&mapValue))
+ return v8::MaybeLocal<v8::Map>();
+ if (mapValue->IsUndefined()) {
+ v8::Local<v8::Map> map = v8::Map::New(m_isolate);
+ if (!console->SetPrivate(m_context, privateKey, map).FromMaybe(false))
+ return v8::MaybeLocal<v8::Map>();
+ return map;
+ }
+ return mapValue->IsMap() ? mapValue.As<v8::Map>()
+ : v8::MaybeLocal<v8::Map>();
+ }
+
+ int32_t getIntFromMap(v8::Local<v8::Map> map, const String16& key,
+ int32_t defaultValue) {
+ v8::Local<v8::String> v8Key = toV8String(m_isolate, key);
+ if (!map->Has(m_context, v8Key).FromMaybe(false)) return defaultValue;
+ v8::Local<v8::Value> intValue;
+ if (!map->Get(m_context, v8Key).ToLocal(&intValue)) return defaultValue;
+ return static_cast<int32_t>(intValue.As<v8::Integer>()->Value());
+ }
+
+ void setIntOnMap(v8::Local<v8::Map> map, const String16& key, int32_t value) {
+ v8::Local<v8::String> v8Key = toV8String(m_isolate, key);
+ if (!map->Set(m_context, v8Key, v8::Integer::New(m_isolate, value))
+ .ToLocal(&map))
+ return;
+ }
+
+ double getDoubleFromMap(v8::Local<v8::Map> map, const String16& key,
+ double defaultValue) {
+ v8::Local<v8::String> v8Key = toV8String(m_isolate, key);
+ if (!map->Has(m_context, v8Key).FromMaybe(false)) return defaultValue;
+ v8::Local<v8::Value> intValue;
+ if (!map->Get(m_context, v8Key).ToLocal(&intValue)) return defaultValue;
+ return intValue.As<v8::Number>()->Value();
+ }
+
+ void setDoubleOnMap(v8::Local<v8::Map> map, const String16& key,
+ double value) {
+ v8::Local<v8::String> v8Key = toV8String(m_isolate, key);
+ if (!map->Set(m_context, v8Key, v8::Number::New(m_isolate, value))
+ .ToLocal(&map))
+ return;
+ }
+
+ V8ProfilerAgentImpl* profilerAgent() {
+ if (V8InspectorSessionImpl* session = currentSession()) {
+ if (session && session->profilerAgent()->enabled())
+ return session->profilerAgent();
+ }
+ return nullptr;
+ }
+
+ V8DebuggerAgentImpl* debuggerAgent() {
+ if (V8InspectorSessionImpl* session = currentSession()) {
+ if (session && session->debuggerAgent()->enabled())
+ return session->debuggerAgent();
+ }
+ return nullptr;
+ }
+
+ V8InspectorSessionImpl* currentSession() {
+ InspectedContext* inspectedContext = ensureInspectedContext();
+ if (!inspectedContext) return nullptr;
+ return inspectedContext->inspector()->sessionForContextGroup(
+ inspectedContext->contextGroupId());
+ }
+
+ private:
+ const v8::FunctionCallbackInfo<v8::Value>& m_info;
+ v8::Isolate* m_isolate;
+ v8::Local<v8::Context> m_context;
+ v8::Local<v8::Object> m_console;
+ InspectedContext* m_inspectedContext;
+ V8InspectorClient* m_inspectorClient;
+
+ bool checkAndSetPrivateFlagOnConsole(const char* name, bool defaultValue) {
+ v8::Local<v8::Object> console = ensureConsole();
+ v8::Local<v8::Private> key =
+ v8::Private::ForApi(m_isolate, toV8StringInternalized(m_isolate, name));
+ v8::Local<v8::Value> flagValue;
+ if (!console->GetPrivate(m_context, key).ToLocal(&flagValue))
+ return defaultValue;
+ DCHECK(flagValue->IsUndefined() || flagValue->IsBoolean());
+ if (flagValue->IsBoolean()) {
+ DCHECK(flagValue.As<v8::Boolean>()->Value());
+ return true;
+ }
+ if (!console->SetPrivate(m_context, key, v8::True(m_isolate))
+ .FromMaybe(false))
+ return defaultValue;
+ return false;
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(ConsoleHelper);
+};
+
+void returnDataCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(info.Data());
+}
+
+void createBoundFunctionProperty(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> console,
+ const char* name,
+ v8::FunctionCallback callback,
+ const char* description = nullptr) {
+ v8::Local<v8::String> funcName =
+ toV8StringInternalized(context->GetIsolate(), name);
+ v8::Local<v8::Function> func;
+ if (!v8::Function::New(context, callback, console, 0,
+ v8::ConstructorBehavior::kThrow)
+ .ToLocal(&func))
+ return;
+ func->SetName(funcName);
+ if (description) {
+ v8::Local<v8::String> returnValue =
+ toV8String(context->GetIsolate(), description);
+ v8::Local<v8::Function> toStringFunction;
+ if (v8::Function::New(context, returnDataCallback, returnValue, 0,
+ v8::ConstructorBehavior::kThrow)
+ .ToLocal(&toStringFunction))
+ createDataProperty(context, func, toV8StringInternalized(
+ context->GetIsolate(), "toString"),
+ toStringFunction);
+ }
+ createDataProperty(context, console, funcName, func);
+}
+
+} // namespace
+
+void V8Console::debugCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper(info).reportCall(ConsoleAPIType::kDebug);
+}
+
+void V8Console::errorCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper(info).reportCall(ConsoleAPIType::kError);
+}
+
+void V8Console::infoCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper(info).reportCall(ConsoleAPIType::kInfo);
+}
+
+void V8Console::logCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper(info).reportCall(ConsoleAPIType::kLog);
+}
+
+void V8Console::warnCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper(info).reportCall(ConsoleAPIType::kWarning);
+}
+
+void V8Console::dirCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper(info).reportCall(ConsoleAPIType::kDir);
+}
+
+void V8Console::dirxmlCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper(info).reportCall(ConsoleAPIType::kDirXML);
+}
+
+void V8Console::tableCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper(info).reportCall(ConsoleAPIType::kTable);
+}
+
+void V8Console::traceCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper(info).reportCallWithDefaultArgument(ConsoleAPIType::kTrace,
+ String16("console.trace"));
+}
+
+void V8Console::groupCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper(info).reportCallWithDefaultArgument(ConsoleAPIType::kStartGroup,
+ String16("console.group"));
+}
+
+void V8Console::groupCollapsedCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper(info).reportCallWithDefaultArgument(
+ ConsoleAPIType::kStartGroupCollapsed, String16("console.groupCollapsed"));
+}
+
+void V8Console::groupEndCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper(info).reportCallWithDefaultArgument(
+ ConsoleAPIType::kEndGroup, String16("console.groupEnd"));
+}
+
+void V8Console::clearCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper(info).reportCallWithDefaultArgument(ConsoleAPIType::kClear,
+ String16("console.clear"));
+}
+
+void V8Console::countCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper helper(info);
+
+ String16 title = helper.firstArgToString(String16());
+ String16 identifier;
+ if (title.isEmpty()) {
+ std::unique_ptr<V8StackTraceImpl> stackTrace =
+ V8StackTraceImpl::capture(nullptr, 0, 1);
+ if (stackTrace && !stackTrace->isEmpty()) {
+ identifier = toString16(stackTrace->topSourceURL()) + ":" +
+ String16::fromInteger(stackTrace->topLineNumber());
+ }
+ } else {
+ identifier = title + "@";
+ }
+
+ v8::Local<v8::Map> countMap;
+ if (!helper.privateMap("V8Console#countMap").ToLocal(&countMap)) return;
+ int32_t count = helper.getIntFromMap(countMap, identifier, 0) + 1;
+ helper.setIntOnMap(countMap, identifier, count);
+ helper.reportCallWithArgument(ConsoleAPIType::kCount,
+ title + ": " + String16::fromInteger(count));
+}
+
+void V8Console::assertCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper helper(info);
+ if (helper.firstArgToBoolean(false)) return;
+
+ std::vector<v8::Local<v8::Value>> arguments;
+ for (int i = 1; i < info.Length(); ++i) arguments.push_back(info[i]);
+ if (info.Length() < 2)
+ arguments.push_back(
+ toV8String(info.GetIsolate(), String16("console.assert")));
+ helper.reportCall(ConsoleAPIType::kAssert, arguments);
+
+ if (V8DebuggerAgentImpl* debuggerAgent = helper.debuggerAgent())
+ debuggerAgent->breakProgramOnException(
+ protocol::Debugger::Paused::ReasonEnum::Assert, nullptr);
+}
+
+void V8Console::markTimelineCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper(info).reportDeprecatedCall("V8Console#markTimelineDeprecated",
+ "'console.markTimeline' is "
+ "deprecated. Please use "
+ "'console.timeStamp' instead.");
+ timeStampCallback(info);
+}
+
+void V8Console::profileCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper helper(info);
+ if (V8ProfilerAgentImpl* profilerAgent = helper.profilerAgent())
+ profilerAgent->consoleProfile(helper.firstArgToString(String16()));
+}
+
+void V8Console::profileEndCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper helper(info);
+ if (V8ProfilerAgentImpl* profilerAgent = helper.profilerAgent())
+ profilerAgent->consoleProfileEnd(helper.firstArgToString(String16()));
+}
+
+static void timeFunction(const v8::FunctionCallbackInfo<v8::Value>& info,
+ bool timelinePrefix) {
+ ConsoleHelper helper(info);
+ if (V8InspectorClient* client = helper.ensureDebuggerClient()) {
+ String16 protocolTitle = helper.firstArgToString("default");
+ if (timelinePrefix) protocolTitle = "Timeline '" + protocolTitle + "'";
+ client->consoleTime(toStringView(protocolTitle));
+
+ v8::Local<v8::Map> timeMap;
+ if (!helper.privateMap("V8Console#timeMap").ToLocal(&timeMap)) return;
+ helper.setDoubleOnMap(timeMap, protocolTitle, client->currentTimeMS());
+ }
+}
+
+static void timeEndFunction(const v8::FunctionCallbackInfo<v8::Value>& info,
+ bool timelinePrefix) {
+ ConsoleHelper helper(info);
+ if (V8InspectorClient* client = helper.ensureDebuggerClient()) {
+ String16 protocolTitle = helper.firstArgToString("default");
+ if (timelinePrefix) protocolTitle = "Timeline '" + protocolTitle + "'";
+ client->consoleTimeEnd(toStringView(protocolTitle));
+
+ v8::Local<v8::Map> timeMap;
+ if (!helper.privateMap("V8Console#timeMap").ToLocal(&timeMap)) return;
+ double elapsed = client->currentTimeMS() -
+ helper.getDoubleFromMap(timeMap, protocolTitle, 0.0);
+ String16 message =
+ protocolTitle + ": " + String16::fromDouble(elapsed, 3) + "ms";
+ helper.reportCallWithArgument(ConsoleAPIType::kTimeEnd, message);
+ }
+}
+
+void V8Console::timelineCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper(info).reportDeprecatedCall(
+ "V8Console#timeline",
+ "'console.timeline' is deprecated. Please use 'console.time' instead.");
+ timeFunction(info, true);
+}
+
+void V8Console::timelineEndCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper(info).reportDeprecatedCall("V8Console#timelineEnd",
+ "'console.timelineEnd' is "
+ "deprecated. Please use "
+ "'console.timeEnd' instead.");
+ timeEndFunction(info, true);
+}
+
+void V8Console::timeCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ timeFunction(info, false);
+}
+
+void V8Console::timeEndCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ timeEndFunction(info, false);
+}
+
+void V8Console::timeStampCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper helper(info);
+ if (V8InspectorClient* client = helper.ensureDebuggerClient()) {
+ String16 title = helper.firstArgToString(String16());
+ client->consoleTimeStamp(toStringView(title));
+ }
+}
+
+void V8Console::memoryGetterCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ if (V8InspectorClient* client = ConsoleHelper(info).ensureDebuggerClient()) {
+ v8::Local<v8::Value> memoryValue;
+ if (!client
+ ->memoryInfo(info.GetIsolate(),
+ info.GetIsolate()->GetCurrentContext())
+ .ToLocal(&memoryValue))
+ return;
+ info.GetReturnValue().Set(memoryValue);
+ }
+}
+
+void V8Console::memorySetterCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ // We can't make the attribute readonly as it breaks existing code that relies
+ // on being able to assign to console.memory in strict mode. Instead, the
+ // setter just ignores the passed value. http://crbug.com/468611
+}
+
+void V8Console::keysCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ v8::Isolate* isolate = info.GetIsolate();
+ info.GetReturnValue().Set(v8::Array::New(isolate));
+
+ ConsoleHelper helper(info);
+ v8::Local<v8::Object> obj;
+ if (!helper.firstArgAsObject().ToLocal(&obj)) return;
+ v8::Local<v8::Array> names;
+ if (!obj->GetOwnPropertyNames(isolate->GetCurrentContext()).ToLocal(&names))
+ return;
+ info.GetReturnValue().Set(names);
+}
+
+void V8Console::valuesCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ v8::Isolate* isolate = info.GetIsolate();
+ info.GetReturnValue().Set(v8::Array::New(isolate));
+
+ ConsoleHelper helper(info);
+ v8::Local<v8::Object> obj;
+ if (!helper.firstArgAsObject().ToLocal(&obj)) return;
+ v8::Local<v8::Array> names;
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ if (!obj->GetOwnPropertyNames(context).ToLocal(&names)) return;
+ v8::Local<v8::Array> values = v8::Array::New(isolate, names->Length());
+ for (uint32_t i = 0; i < names->Length(); ++i) {
+ v8::Local<v8::Value> key;
+ if (!names->Get(context, i).ToLocal(&key)) continue;
+ v8::Local<v8::Value> value;
+ if (!obj->Get(context, key).ToLocal(&value)) continue;
+ createDataProperty(context, values, i, value);
+ }
+ info.GetReturnValue().Set(values);
+}
+
+static void setFunctionBreakpoint(ConsoleHelper& helper,
+ v8::Local<v8::Function> function,
+ V8DebuggerAgentImpl::BreakpointSource source,
+ const String16& condition, bool enable) {
+ V8DebuggerAgentImpl* debuggerAgent = helper.debuggerAgent();
+ if (!debuggerAgent) return;
+ String16 scriptId = String16::fromInteger(function->ScriptId());
+ int lineNumber = function->GetScriptLineNumber();
+ int columnNumber = function->GetScriptColumnNumber();
+ if (lineNumber == v8::Function::kLineOffsetNotFound ||
+ columnNumber == v8::Function::kLineOffsetNotFound)
+ return;
+ if (enable)
+ debuggerAgent->setBreakpointAt(scriptId, lineNumber, columnNumber, source,
+ condition);
+ else
+ debuggerAgent->removeBreakpointAt(scriptId, lineNumber, columnNumber,
+ source);
+}
+
+void V8Console::debugFunctionCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper helper(info);
+ v8::Local<v8::Function> function;
+ if (!helper.firstArgAsFunction().ToLocal(&function)) return;
+ setFunctionBreakpoint(helper, function,
+ V8DebuggerAgentImpl::DebugCommandBreakpointSource,
+ String16(), true);
+}
+
+void V8Console::undebugFunctionCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper helper(info);
+ v8::Local<v8::Function> function;
+ if (!helper.firstArgAsFunction().ToLocal(&function)) return;
+ setFunctionBreakpoint(helper, function,
+ V8DebuggerAgentImpl::DebugCommandBreakpointSource,
+ String16(), false);
+}
+
+void V8Console::monitorFunctionCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper helper(info);
+ v8::Local<v8::Function> function;
+ if (!helper.firstArgAsFunction().ToLocal(&function)) return;
+ v8::Local<v8::Value> name = function->GetName();
+ if (!name->IsString() || !v8::Local<v8::String>::Cast(name)->Length())
+ name = function->GetInferredName();
+ String16 functionName = toProtocolStringWithTypeCheck(name);
+ String16Builder builder;
+ builder.append("console.log(\"function ");
+ if (functionName.isEmpty())
+ builder.append("(anonymous function)");
+ else
+ builder.append(functionName);
+ builder.append(
+ " called\" + (arguments.length > 0 ? \" with arguments: \" + "
+ "Array.prototype.join.call(arguments, \", \") : \"\")) && false");
+ setFunctionBreakpoint(helper, function,
+ V8DebuggerAgentImpl::MonitorCommandBreakpointSource,
+ builder.toString(), true);
+}
+
+void V8Console::unmonitorFunctionCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper helper(info);
+ v8::Local<v8::Function> function;
+ if (!helper.firstArgAsFunction().ToLocal(&function)) return;
+ setFunctionBreakpoint(helper, function,
+ V8DebuggerAgentImpl::MonitorCommandBreakpointSource,
+ String16(), false);
+}
+
+void V8Console::lastEvaluationResultCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ConsoleHelper helper(info);
+ InspectedContext* context = helper.ensureInspectedContext();
+ if (!context) return;
+ if (InjectedScript* injectedScript = context->getInjectedScript())
+ info.GetReturnValue().Set(injectedScript->lastEvaluationResult());
+}
+
+static void inspectImpl(const v8::FunctionCallbackInfo<v8::Value>& info,
+ bool copyToClipboard) {
+ if (info.Length() < 1) return;
+ if (!copyToClipboard) info.GetReturnValue().Set(info[0]);
+
+ ConsoleHelper helper(info);
+ InspectedContext* context = helper.ensureInspectedContext();
+ if (!context) return;
+ InjectedScript* injectedScript = context->getInjectedScript();
+ if (!injectedScript) return;
+ ErrorString errorString;
+ std::unique_ptr<protocol::Runtime::RemoteObject> wrappedObject =
+ injectedScript->wrapObject(&errorString, info[0], "",
+ false /** forceValueType */,
+ false /** generatePreview */);
+ if (!wrappedObject || !errorString.isEmpty()) return;
+
+ std::unique_ptr<protocol::DictionaryValue> hints =
+ protocol::DictionaryValue::create();
+ if (copyToClipboard) hints->setBoolean("copyToClipboard", true);
+ if (V8InspectorSessionImpl* session = helper.currentSession())
+ session->runtimeAgent()->inspect(std::move(wrappedObject),
+ std::move(hints));
+}
+
+void V8Console::inspectCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ inspectImpl(info, false);
+}
+
+void V8Console::copyCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ inspectImpl(info, true);
+}
+
+void V8Console::inspectedObject(const v8::FunctionCallbackInfo<v8::Value>& info,
+ unsigned num) {
+ DCHECK(num < V8InspectorSessionImpl::kInspectedObjectBufferSize);
+ ConsoleHelper helper(info);
+ if (V8InspectorSessionImpl* session = helper.currentSession()) {
+ V8InspectorSession::Inspectable* object = session->inspectedObject(num);
+ v8::Isolate* isolate = info.GetIsolate();
+ if (object)
+ info.GetReturnValue().Set(object->get(isolate->GetCurrentContext()));
+ else
+ info.GetReturnValue().Set(v8::Undefined(isolate));
+ }
+}
+
+v8::Local<v8::Object> V8Console::createConsole(
+ InspectedContext* inspectedContext, bool hasMemoryAttribute) {
+ v8::Local<v8::Context> context = inspectedContext->context();
+ v8::Context::Scope contextScope(context);
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::MicrotasksScope microtasksScope(isolate,
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+
+ v8::Local<v8::Object> console = v8::Object::New(isolate);
+ bool success =
+ console->SetPrototype(context, v8::Object::New(isolate)).FromMaybe(false);
+ DCHECK(success);
+ USE(success);
+
+ createBoundFunctionProperty(context, console, "debug",
+ V8Console::debugCallback);
+ createBoundFunctionProperty(context, console, "error",
+ V8Console::errorCallback);
+ createBoundFunctionProperty(context, console, "info",
+ V8Console::infoCallback);
+ createBoundFunctionProperty(context, console, "log", V8Console::logCallback);
+ createBoundFunctionProperty(context, console, "warn",
+ V8Console::warnCallback);
+ createBoundFunctionProperty(context, console, "dir", V8Console::dirCallback);
+ createBoundFunctionProperty(context, console, "dirxml",
+ V8Console::dirxmlCallback);
+ createBoundFunctionProperty(context, console, "table",
+ V8Console::tableCallback);
+ createBoundFunctionProperty(context, console, "trace",
+ V8Console::traceCallback);
+ createBoundFunctionProperty(context, console, "group",
+ V8Console::groupCallback);
+ createBoundFunctionProperty(context, console, "groupCollapsed",
+ V8Console::groupCollapsedCallback);
+ createBoundFunctionProperty(context, console, "groupEnd",
+ V8Console::groupEndCallback);
+ createBoundFunctionProperty(context, console, "clear",
+ V8Console::clearCallback);
+ createBoundFunctionProperty(context, console, "count",
+ V8Console::countCallback);
+ createBoundFunctionProperty(context, console, "assert",
+ V8Console::assertCallback);
+ createBoundFunctionProperty(context, console, "markTimeline",
+ V8Console::markTimelineCallback);
+ createBoundFunctionProperty(context, console, "profile",
+ V8Console::profileCallback);
+ createBoundFunctionProperty(context, console, "profileEnd",
+ V8Console::profileEndCallback);
+ createBoundFunctionProperty(context, console, "timeline",
+ V8Console::timelineCallback);
+ createBoundFunctionProperty(context, console, "timelineEnd",
+ V8Console::timelineEndCallback);
+ createBoundFunctionProperty(context, console, "time",
+ V8Console::timeCallback);
+ createBoundFunctionProperty(context, console, "timeEnd",
+ V8Console::timeEndCallback);
+ createBoundFunctionProperty(context, console, "timeStamp",
+ V8Console::timeStampCallback);
+
+ if (hasMemoryAttribute)
+ console->SetAccessorProperty(
+ toV8StringInternalized(isolate, "memory"),
+ v8::Function::New(context, V8Console::memoryGetterCallback, console, 0,
+ v8::ConstructorBehavior::kThrow)
+ .ToLocalChecked(),
+ v8::Function::New(context, V8Console::memorySetterCallback,
+ v8::Local<v8::Value>(), 0,
+ v8::ConstructorBehavior::kThrow)
+ .ToLocalChecked(),
+ static_cast<v8::PropertyAttribute>(v8::None), v8::DEFAULT);
+
+ console->SetPrivate(context, inspectedContextPrivateKey(isolate),
+ v8::External::New(isolate, inspectedContext));
+ return console;
+}
+
+void V8Console::clearInspectedContextIfNeeded(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> console) {
+ v8::Isolate* isolate = context->GetIsolate();
+ console->SetPrivate(context, inspectedContextPrivateKey(isolate),
+ v8::External::New(isolate, nullptr));
+}
+
+v8::Local<v8::Object> V8Console::createCommandLineAPI(
+ InspectedContext* inspectedContext) {
+ v8::Local<v8::Context> context = inspectedContext->context();
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::MicrotasksScope microtasksScope(isolate,
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+
+ v8::Local<v8::Object> commandLineAPI = v8::Object::New(isolate);
+ bool success =
+ commandLineAPI->SetPrototype(context, v8::Null(isolate)).FromMaybe(false);
+ DCHECK(success);
+ USE(success);
+
+ createBoundFunctionProperty(context, commandLineAPI, "dir",
+ V8Console::dirCallback,
+ "function dir(value) { [Command Line API] }");
+ createBoundFunctionProperty(context, commandLineAPI, "dirxml",
+ V8Console::dirxmlCallback,
+ "function dirxml(value) { [Command Line API] }");
+ createBoundFunctionProperty(context, commandLineAPI, "profile",
+ V8Console::profileCallback,
+ "function profile(title) { [Command Line API] }");
+ createBoundFunctionProperty(
+ context, commandLineAPI, "profileEnd", V8Console::profileEndCallback,
+ "function profileEnd(title) { [Command Line API] }");
+ createBoundFunctionProperty(context, commandLineAPI, "clear",
+ V8Console::clearCallback,
+ "function clear() { [Command Line API] }");
+ createBoundFunctionProperty(
+ context, commandLineAPI, "table", V8Console::tableCallback,
+ "function table(data, [columns]) { [Command Line API] }");
+
+ createBoundFunctionProperty(context, commandLineAPI, "keys",
+ V8Console::keysCallback,
+ "function keys(object) { [Command Line API] }");
+ createBoundFunctionProperty(context, commandLineAPI, "values",
+ V8Console::valuesCallback,
+ "function values(object) { [Command Line API] }");
+ createBoundFunctionProperty(
+ context, commandLineAPI, "debug", V8Console::debugFunctionCallback,
+ "function debug(function) { [Command Line API] }");
+ createBoundFunctionProperty(
+ context, commandLineAPI, "undebug", V8Console::undebugFunctionCallback,
+ "function undebug(function) { [Command Line API] }");
+ createBoundFunctionProperty(
+ context, commandLineAPI, "monitor", V8Console::monitorFunctionCallback,
+ "function monitor(function) { [Command Line API] }");
+ createBoundFunctionProperty(
+ context, commandLineAPI, "unmonitor",
+ V8Console::unmonitorFunctionCallback,
+ "function unmonitor(function) { [Command Line API] }");
+ createBoundFunctionProperty(
+ context, commandLineAPI, "inspect", V8Console::inspectCallback,
+ "function inspect(object) { [Command Line API] }");
+ createBoundFunctionProperty(context, commandLineAPI, "copy",
+ V8Console::copyCallback,
+ "function copy(value) { [Command Line API] }");
+ createBoundFunctionProperty(context, commandLineAPI, "$_",
+ V8Console::lastEvaluationResultCallback);
+ createBoundFunctionProperty(context, commandLineAPI, "$0",
+ V8Console::inspectedObject0);
+ createBoundFunctionProperty(context, commandLineAPI, "$1",
+ V8Console::inspectedObject1);
+ createBoundFunctionProperty(context, commandLineAPI, "$2",
+ V8Console::inspectedObject2);
+ createBoundFunctionProperty(context, commandLineAPI, "$3",
+ V8Console::inspectedObject3);
+ createBoundFunctionProperty(context, commandLineAPI, "$4",
+ V8Console::inspectedObject4);
+
+ inspectedContext->inspector()->client()->installAdditionalCommandLineAPI(
+ context, commandLineAPI);
+
+ commandLineAPI->SetPrivate(context, inspectedContextPrivateKey(isolate),
+ v8::External::New(isolate, inspectedContext));
+ return commandLineAPI;
+}
+
+static bool isCommandLineAPIGetter(const String16& name) {
+ if (name.length() != 2) return false;
+ // $0 ... $4, $_
+ return name[0] == '$' &&
+ ((name[1] >= '0' && name[1] <= '4') || name[1] == '_');
+}
+
+void V8Console::CommandLineAPIScope::accessorGetterCallback(
+ v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CommandLineAPIScope* scope = static_cast<CommandLineAPIScope*>(
+ info.Data().As<v8::External>()->Value());
+ DCHECK(scope);
+
+ v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
+ if (scope->m_cleanup) {
+ bool removed = info.Holder()->Delete(context, name).FromMaybe(false);
+ DCHECK(removed);
+ USE(removed);
+ return;
+ }
+ v8::Local<v8::Object> commandLineAPI = scope->m_commandLineAPI;
+
+ v8::Local<v8::Value> value;
+ if (!commandLineAPI->Get(context, name).ToLocal(&value)) return;
+ if (isCommandLineAPIGetter(toProtocolStringWithTypeCheck(name))) {
+ DCHECK(value->IsFunction());
+ v8::MicrotasksScope microtasks(info.GetIsolate(),
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ if (value.As<v8::Function>()
+ ->Call(context, commandLineAPI, 0, nullptr)
+ .ToLocal(&value))
+ info.GetReturnValue().Set(value);
+ } else {
+ info.GetReturnValue().Set(value);
+ }
+}
+
+void V8Console::CommandLineAPIScope::accessorSetterCallback(
+ v8::Local<v8::Name> name, v8::Local<v8::Value> value,
+ const v8::PropertyCallbackInfo<void>& info) {
+ CommandLineAPIScope* scope = static_cast<CommandLineAPIScope*>(
+ info.Data().As<v8::External>()->Value());
+ v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
+ if (!info.Holder()->Delete(context, name).FromMaybe(false)) return;
+ if (!info.Holder()->CreateDataProperty(context, name, value).FromMaybe(false))
+ return;
+ bool removed =
+ scope->m_installedMethods->Delete(context, name).FromMaybe(false);
+ DCHECK(removed);
+ USE(removed);
+}
+
+V8Console::CommandLineAPIScope::CommandLineAPIScope(
+ v8::Local<v8::Context> context, v8::Local<v8::Object> commandLineAPI,
+ v8::Local<v8::Object> global)
+ : m_context(context),
+ m_commandLineAPI(commandLineAPI),
+ m_global(global),
+ m_installedMethods(v8::Set::New(context->GetIsolate())),
+ m_cleanup(false) {
+ v8::Local<v8::Array> names;
+ if (!m_commandLineAPI->GetOwnPropertyNames(context).ToLocal(&names)) return;
+ v8::Local<v8::External> externalThis =
+ v8::External::New(context->GetIsolate(), this);
+ for (uint32_t i = 0; i < names->Length(); ++i) {
+ v8::Local<v8::Value> name;
+ if (!names->Get(context, i).ToLocal(&name) || !name->IsName()) continue;
+ if (m_global->Has(context, name).FromMaybe(true)) continue;
+ if (!m_installedMethods->Add(context, name).ToLocal(&m_installedMethods))
+ continue;
+ if (!m_global
+ ->SetAccessor(context, v8::Local<v8::Name>::Cast(name),
+ CommandLineAPIScope::accessorGetterCallback,
+ CommandLineAPIScope::accessorSetterCallback,
+ externalThis, v8::DEFAULT, v8::DontEnum)
+ .FromMaybe(false)) {
+ bool removed = m_installedMethods->Delete(context, name).FromMaybe(false);
+ DCHECK(removed);
+ USE(removed);
+ continue;
+ }
+ }
+}
+
+V8Console::CommandLineAPIScope::~CommandLineAPIScope() {
+ m_cleanup = true;
+ v8::Local<v8::Array> names = m_installedMethods->AsArray();
+ for (uint32_t i = 0; i < names->Length(); ++i) {
+ v8::Local<v8::Value> name;
+ if (!names->Get(m_context, i).ToLocal(&name) || !name->IsName()) continue;
+ if (name->IsString()) {
+ v8::Local<v8::Value> descriptor;
+ bool success = m_global
+ ->GetOwnPropertyDescriptor(
+ m_context, v8::Local<v8::String>::Cast(name))
+ .ToLocal(&descriptor);
+ DCHECK(success);
+ USE(success);
+ }
+ }
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-console.h b/deps/v8/src/inspector/v8-console.h
new file mode 100644
index 0000000000..c643d49a41
--- /dev/null
+++ b/deps/v8/src/inspector/v8-console.h
@@ -0,0 +1,119 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8CONSOLE_H_
+#define V8_INSPECTOR_V8CONSOLE_H_
+
+#include "src/base/macros.h"
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+class InspectedContext;
+
+// Console API
+// https://console.spec.whatwg.org/#console-interface
+class V8Console {
+ public:
+ static v8::Local<v8::Object> createConsole(InspectedContext*,
+ bool hasMemoryAttribute);
+ static void clearInspectedContextIfNeeded(v8::Local<v8::Context>,
+ v8::Local<v8::Object> console);
+ static v8::Local<v8::Object> createCommandLineAPI(InspectedContext*);
+
+ class CommandLineAPIScope {
+ public:
+ CommandLineAPIScope(v8::Local<v8::Context>,
+ v8::Local<v8::Object> commandLineAPI,
+ v8::Local<v8::Object> global);
+ ~CommandLineAPIScope();
+
+ private:
+ static void accessorGetterCallback(
+ v8::Local<v8::Name>, const v8::PropertyCallbackInfo<v8::Value>&);
+ static void accessorSetterCallback(v8::Local<v8::Name>,
+ v8::Local<v8::Value>,
+ const v8::PropertyCallbackInfo<void>&);
+
+ v8::Local<v8::Context> m_context;
+ v8::Local<v8::Object> m_commandLineAPI;
+ v8::Local<v8::Object> m_global;
+ v8::Local<v8::Set> m_installedMethods;
+ bool m_cleanup;
+
+ DISALLOW_COPY_AND_ASSIGN(CommandLineAPIScope);
+ };
+
+ private:
+ static void debugCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void errorCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void infoCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void logCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void warnCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void dirCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void dirxmlCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void tableCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void traceCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void groupCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void groupCollapsedCallback(
+ const v8::FunctionCallbackInfo<v8::Value>&);
+ static void groupEndCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void clearCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void countCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void assertCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void markTimelineCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void profileCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void profileEndCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void timelineCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void timelineEndCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void timeCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void timeEndCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void timeStampCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ // TODO(foolip): There is no spec for the Memory Info API, see blink-dev:
+ // https://groups.google.com/a/chromium.org/d/msg/blink-dev/g5YRCGpC9vs/b4OJz71NmPwJ
+ static void memoryGetterCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void memorySetterCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+
+ // CommandLineAPI
+ static void keysCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void valuesCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void debugFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void undebugFunctionCallback(
+ const v8::FunctionCallbackInfo<v8::Value>&);
+ static void monitorFunctionCallback(
+ const v8::FunctionCallbackInfo<v8::Value>&);
+ static void unmonitorFunctionCallback(
+ const v8::FunctionCallbackInfo<v8::Value>&);
+ static void lastEvaluationResultCallback(
+ const v8::FunctionCallbackInfo<v8::Value>&);
+ static void inspectCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void copyCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void inspectedObject(const v8::FunctionCallbackInfo<v8::Value>&,
+ unsigned num);
+ static void inspectedObject0(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ inspectedObject(info, 0);
+ }
+ static void inspectedObject1(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ inspectedObject(info, 1);
+ }
+ static void inspectedObject2(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ inspectedObject(info, 2);
+ }
+ static void inspectedObject3(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ inspectedObject(info, 3);
+ }
+ static void inspectedObject4(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ inspectedObject(info, 4);
+ }
+};
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_V8CONSOLE_H_
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
new file mode 100644
index 0000000000..80e261119e
--- /dev/null
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -0,0 +1,1255 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-debugger-agent-impl.h"
+
+#include <algorithm>
+
+#include "src/inspector/injected-script.h"
+#include "src/inspector/inspected-context.h"
+#include "src/inspector/java-script-call-frame.h"
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/remote-object-id.h"
+#include "src/inspector/script-breakpoint.h"
+#include "src/inspector/search-util.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-debugger-script.h"
+#include "src/inspector/v8-debugger.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-inspector-session-impl.h"
+#include "src/inspector/v8-regex.h"
+#include "src/inspector/v8-runtime-agent-impl.h"
+#include "src/inspector/v8-stack-trace-impl.h"
+
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+using protocol::Array;
+using protocol::Maybe;
+using protocol::Debugger::BreakpointId;
+using protocol::Debugger::CallFrame;
+using protocol::Runtime::ExceptionDetails;
+using protocol::Runtime::ScriptId;
+using protocol::Runtime::StackTrace;
+using protocol::Runtime::RemoteObject;
+
+namespace DebuggerAgentState {
+static const char javaScriptBreakpoints[] = "javaScriptBreakopints";
+static const char pauseOnExceptionsState[] = "pauseOnExceptionsState";
+static const char asyncCallStackDepth[] = "asyncCallStackDepth";
+static const char blackboxPattern[] = "blackboxPattern";
+static const char debuggerEnabled[] = "debuggerEnabled";
+
+// Breakpoint properties.
+static const char url[] = "url";
+static const char isRegex[] = "isRegex";
+static const char lineNumber[] = "lineNumber";
+static const char columnNumber[] = "columnNumber";
+static const char condition[] = "condition";
+static const char skipAllPauses[] = "skipAllPauses";
+
+} // namespace DebuggerAgentState
+
+static const int maxSkipStepFrameCount = 128;
+static const char backtraceObjectGroup[] = "backtrace";
+
+static String16 breakpointIdSuffix(
+ V8DebuggerAgentImpl::BreakpointSource source) {
+ switch (source) {
+ case V8DebuggerAgentImpl::UserBreakpointSource:
+ break;
+ case V8DebuggerAgentImpl::DebugCommandBreakpointSource:
+ return ":debug";
+ case V8DebuggerAgentImpl::MonitorCommandBreakpointSource:
+ return ":monitor";
+ }
+ return String16();
+}
+
+static String16 generateBreakpointId(
+ const String16& scriptId, int lineNumber, int columnNumber,
+ V8DebuggerAgentImpl::BreakpointSource source) {
+ return scriptId + ":" + String16::fromInteger(lineNumber) + ":" +
+ String16::fromInteger(columnNumber) + breakpointIdSuffix(source);
+}
+
+static bool positionComparator(const std::pair<int, int>& a,
+ const std::pair<int, int>& b) {
+ if (a.first != b.first) return a.first < b.first;
+ return a.second < b.second;
+}
+
+static bool hasInternalError(ErrorString* errorString, bool hasError) {
+ if (hasError) *errorString = "Internal error";
+ return hasError;
+}
+
+static std::unique_ptr<protocol::Debugger::Location> buildProtocolLocation(
+ const String16& scriptId, int lineNumber, int columnNumber) {
+ return protocol::Debugger::Location::create()
+ .setScriptId(scriptId)
+ .setLineNumber(lineNumber)
+ .setColumnNumber(columnNumber)
+ .build();
+}
+
+V8DebuggerAgentImpl::V8DebuggerAgentImpl(
+ V8InspectorSessionImpl* session, protocol::FrontendChannel* frontendChannel,
+ protocol::DictionaryValue* state)
+ : m_inspector(session->inspector()),
+ m_debugger(m_inspector->debugger()),
+ m_session(session),
+ m_enabled(false),
+ m_state(state),
+ m_frontend(frontendChannel),
+ m_isolate(m_inspector->isolate()),
+ m_breakReason(protocol::Debugger::Paused::ReasonEnum::Other),
+ m_scheduledDebuggerStep(NoStep),
+ m_skipNextDebuggerStepOut(false),
+ m_javaScriptPauseScheduled(false),
+ m_steppingFromFramework(false),
+ m_pausingOnNativeEvent(false),
+ m_skippedStepFrameCount(0),
+ m_recursionLevelForStepOut(0),
+ m_recursionLevelForStepFrame(0),
+ m_skipAllPauses(false) {
+ clearBreakDetails();
+}
+
+V8DebuggerAgentImpl::~V8DebuggerAgentImpl() {}
+
+bool V8DebuggerAgentImpl::checkEnabled(ErrorString* errorString) {
+ if (enabled()) return true;
+ *errorString = "Debugger agent is not enabled";
+ return false;
+}
+
+void V8DebuggerAgentImpl::enable() {
+ // m_inspector->addListener may result in reporting all parsed scripts to
+ // the agent so it should already be in enabled state by then.
+ m_enabled = true;
+ m_state->setBoolean(DebuggerAgentState::debuggerEnabled, true);
+ m_debugger->enable();
+
+ std::vector<std::unique_ptr<V8DebuggerScript>> compiledScripts;
+ m_debugger->getCompiledScripts(m_session->contextGroupId(), compiledScripts);
+ for (size_t i = 0; i < compiledScripts.size(); i++)
+ didParseSource(std::move(compiledScripts[i]), true);
+
+ // FIXME(WK44513): breakpoints activated flag should be synchronized between
+ // all front-ends
+ m_debugger->setBreakpointsActivated(true);
+}
+
+bool V8DebuggerAgentImpl::enabled() { return m_enabled; }
+
+void V8DebuggerAgentImpl::enable(ErrorString* errorString) {
+ if (enabled()) return;
+
+ if (!m_inspector->client()->canExecuteScripts(m_session->contextGroupId())) {
+ *errorString = "Script execution is prohibited";
+ return;
+ }
+
+ enable();
+}
+
+void V8DebuggerAgentImpl::disable(ErrorString*) {
+ if (!enabled()) return;
+
+ m_state->setObject(DebuggerAgentState::javaScriptBreakpoints,
+ protocol::DictionaryValue::create());
+ m_state->setInteger(DebuggerAgentState::pauseOnExceptionsState,
+ V8Debugger::DontPauseOnExceptions);
+ m_state->setInteger(DebuggerAgentState::asyncCallStackDepth, 0);
+
+ if (!m_pausedContext.IsEmpty()) m_debugger->continueProgram();
+ m_debugger->disable();
+ m_pausedContext.Reset();
+ JavaScriptCallFrames emptyCallFrames;
+ m_pausedCallFrames.swap(emptyCallFrames);
+ m_scripts.clear();
+ m_blackboxedPositions.clear();
+ m_breakpointIdToDebuggerBreakpointIds.clear();
+ m_debugger->setAsyncCallStackDepth(this, 0);
+ m_continueToLocationBreakpointId = String16();
+ clearBreakDetails();
+ m_scheduledDebuggerStep = NoStep;
+ m_skipNextDebuggerStepOut = false;
+ m_javaScriptPauseScheduled = false;
+ m_steppingFromFramework = false;
+ m_pausingOnNativeEvent = false;
+ m_skippedStepFrameCount = 0;
+ m_recursionLevelForStepFrame = 0;
+ m_skipAllPauses = false;
+ m_blackboxPattern = nullptr;
+ m_state->remove(DebuggerAgentState::blackboxPattern);
+ m_enabled = false;
+ m_state->setBoolean(DebuggerAgentState::debuggerEnabled, false);
+}
+
+void V8DebuggerAgentImpl::restore() {
+ DCHECK(!m_enabled);
+ if (!m_state->booleanProperty(DebuggerAgentState::debuggerEnabled, false))
+ return;
+ if (!m_inspector->client()->canExecuteScripts(m_session->contextGroupId()))
+ return;
+
+ enable();
+ ErrorString error;
+
+ int pauseState = V8Debugger::DontPauseOnExceptions;
+ m_state->getInteger(DebuggerAgentState::pauseOnExceptionsState, &pauseState);
+ setPauseOnExceptionsImpl(&error, pauseState);
+ DCHECK(error.isEmpty());
+
+ m_skipAllPauses =
+ m_state->booleanProperty(DebuggerAgentState::skipAllPauses, false);
+
+ int asyncCallStackDepth = 0;
+ m_state->getInteger(DebuggerAgentState::asyncCallStackDepth,
+ &asyncCallStackDepth);
+ m_debugger->setAsyncCallStackDepth(this, asyncCallStackDepth);
+
+ String16 blackboxPattern;
+ if (m_state->getString(DebuggerAgentState::blackboxPattern,
+ &blackboxPattern)) {
+ if (!setBlackboxPattern(&error, blackboxPattern)) UNREACHABLE();
+ }
+}
+
+void V8DebuggerAgentImpl::setBreakpointsActive(ErrorString* errorString,
+ bool active) {
+ if (!checkEnabled(errorString)) return;
+ m_debugger->setBreakpointsActivated(active);
+}
+
+void V8DebuggerAgentImpl::setSkipAllPauses(ErrorString*, bool skip) {
+ m_skipAllPauses = skip;
+ m_state->setBoolean(DebuggerAgentState::skipAllPauses, m_skipAllPauses);
+}
+
+static std::unique_ptr<protocol::DictionaryValue>
+buildObjectForBreakpointCookie(const String16& url, int lineNumber,
+ int columnNumber, const String16& condition,
+ bool isRegex) {
+ std::unique_ptr<protocol::DictionaryValue> breakpointObject =
+ protocol::DictionaryValue::create();
+ breakpointObject->setString(DebuggerAgentState::url, url);
+ breakpointObject->setInteger(DebuggerAgentState::lineNumber, lineNumber);
+ breakpointObject->setInteger(DebuggerAgentState::columnNumber, columnNumber);
+ breakpointObject->setString(DebuggerAgentState::condition, condition);
+ breakpointObject->setBoolean(DebuggerAgentState::isRegex, isRegex);
+ return breakpointObject;
+}
+
+static bool matches(V8InspectorImpl* inspector, const String16& url,
+ const String16& pattern, bool isRegex) {
+ if (isRegex) {
+ V8Regex regex(inspector, pattern, true);
+ return regex.match(url) != -1;
+ }
+ return url == pattern;
+}
+
+void V8DebuggerAgentImpl::setBreakpointByUrl(
+ ErrorString* errorString, int lineNumber,
+ const Maybe<String16>& optionalURL, const Maybe<String16>& optionalURLRegex,
+ const Maybe<int>& optionalColumnNumber,
+ const Maybe<String16>& optionalCondition, String16* outBreakpointId,
+ std::unique_ptr<protocol::Array<protocol::Debugger::Location>>* locations) {
+ *locations = Array<protocol::Debugger::Location>::create();
+ if (optionalURL.isJust() == optionalURLRegex.isJust()) {
+ *errorString = "Either url or urlRegex must be specified.";
+ return;
+ }
+
+ String16 url = optionalURL.isJust() ? optionalURL.fromJust()
+ : optionalURLRegex.fromJust();
+ int columnNumber = 0;
+ if (optionalColumnNumber.isJust()) {
+ columnNumber = optionalColumnNumber.fromJust();
+ if (columnNumber < 0) {
+ *errorString = "Incorrect column number";
+ return;
+ }
+ }
+ String16 condition = optionalCondition.fromMaybe("");
+ bool isRegex = optionalURLRegex.isJust();
+
+ String16 breakpointId = (isRegex ? "/" + url + "/" : url) + ":" +
+ String16::fromInteger(lineNumber) + ":" +
+ String16::fromInteger(columnNumber);
+ protocol::DictionaryValue* breakpointsCookie =
+ m_state->getObject(DebuggerAgentState::javaScriptBreakpoints);
+ if (!breakpointsCookie) {
+ std::unique_ptr<protocol::DictionaryValue> newValue =
+ protocol::DictionaryValue::create();
+ breakpointsCookie = newValue.get();
+ m_state->setObject(DebuggerAgentState::javaScriptBreakpoints,
+ std::move(newValue));
+ }
+ if (breakpointsCookie->get(breakpointId)) {
+ *errorString = "Breakpoint at specified location already exists.";
+ return;
+ }
+
+ breakpointsCookie->setObject(
+ breakpointId, buildObjectForBreakpointCookie(
+ url, lineNumber, columnNumber, condition, isRegex));
+
+ ScriptBreakpoint breakpoint(lineNumber, columnNumber, condition);
+ for (const auto& script : m_scripts) {
+ if (!matches(m_inspector, script.second->sourceURL(), url, isRegex))
+ continue;
+ std::unique_ptr<protocol::Debugger::Location> location = resolveBreakpoint(
+ breakpointId, script.first, breakpoint, UserBreakpointSource);
+ if (location) (*locations)->addItem(std::move(location));
+ }
+
+ *outBreakpointId = breakpointId;
+}
+
+static bool parseLocation(
+ ErrorString* errorString,
+ std::unique_ptr<protocol::Debugger::Location> location, String16* scriptId,
+ int* lineNumber, int* columnNumber) {
+ *scriptId = location->getScriptId();
+ *lineNumber = location->getLineNumber();
+ *columnNumber = location->getColumnNumber(0);
+ return true;
+}
+
+void V8DebuggerAgentImpl::setBreakpoint(
+ ErrorString* errorString,
+ std::unique_ptr<protocol::Debugger::Location> location,
+ const Maybe<String16>& optionalCondition, String16* outBreakpointId,
+ std::unique_ptr<protocol::Debugger::Location>* actualLocation) {
+ String16 scriptId;
+ int lineNumber;
+ int columnNumber;
+
+ if (!parseLocation(errorString, std::move(location), &scriptId, &lineNumber,
+ &columnNumber))
+ return;
+
+ String16 condition = optionalCondition.fromMaybe("");
+
+ String16 breakpointId = generateBreakpointId(
+ scriptId, lineNumber, columnNumber, UserBreakpointSource);
+ if (m_breakpointIdToDebuggerBreakpointIds.find(breakpointId) !=
+ m_breakpointIdToDebuggerBreakpointIds.end()) {
+ *errorString = "Breakpoint at specified location already exists.";
+ return;
+ }
+ ScriptBreakpoint breakpoint(lineNumber, columnNumber, condition);
+ *actualLocation = resolveBreakpoint(breakpointId, scriptId, breakpoint,
+ UserBreakpointSource);
+ if (*actualLocation)
+ *outBreakpointId = breakpointId;
+ else
+ *errorString = "Could not resolve breakpoint";
+}
+
+void V8DebuggerAgentImpl::removeBreakpoint(ErrorString* errorString,
+ const String16& breakpointId) {
+ if (!checkEnabled(errorString)) return;
+ protocol::DictionaryValue* breakpointsCookie =
+ m_state->getObject(DebuggerAgentState::javaScriptBreakpoints);
+ if (breakpointsCookie) breakpointsCookie->remove(breakpointId);
+ removeBreakpoint(breakpointId);
+}
+
+void V8DebuggerAgentImpl::removeBreakpoint(const String16& breakpointId) {
+ DCHECK(enabled());
+ BreakpointIdToDebuggerBreakpointIdsMap::iterator
+ debuggerBreakpointIdsIterator =
+ m_breakpointIdToDebuggerBreakpointIds.find(breakpointId);
+ if (debuggerBreakpointIdsIterator ==
+ m_breakpointIdToDebuggerBreakpointIds.end())
+ return;
+ const std::vector<String16>& ids = debuggerBreakpointIdsIterator->second;
+ for (size_t i = 0; i < ids.size(); ++i) {
+ const String16& debuggerBreakpointId = ids[i];
+
+ m_debugger->removeBreakpoint(debuggerBreakpointId);
+ m_serverBreakpoints.erase(debuggerBreakpointId);
+ }
+ m_breakpointIdToDebuggerBreakpointIds.erase(breakpointId);
+}
+
+void V8DebuggerAgentImpl::continueToLocation(
+ ErrorString* errorString,
+ std::unique_ptr<protocol::Debugger::Location> location) {
+ if (!checkEnabled(errorString)) return;
+ if (!m_continueToLocationBreakpointId.isEmpty()) {
+ m_debugger->removeBreakpoint(m_continueToLocationBreakpointId);
+ m_continueToLocationBreakpointId = "";
+ }
+
+ String16 scriptId;
+ int lineNumber;
+ int columnNumber;
+
+ if (!parseLocation(errorString, std::move(location), &scriptId, &lineNumber,
+ &columnNumber))
+ return;
+
+ ScriptBreakpoint breakpoint(lineNumber, columnNumber, "");
+ m_continueToLocationBreakpointId = m_debugger->setBreakpoint(
+ scriptId, breakpoint, &lineNumber, &columnNumber);
+ resume(errorString);
+}
+
+bool V8DebuggerAgentImpl::isCurrentCallStackEmptyOrBlackboxed() {
+ DCHECK(enabled());
+ JavaScriptCallFrames callFrames = m_debugger->currentCallFrames();
+ for (size_t index = 0; index < callFrames.size(); ++index) {
+ if (!isCallFrameWithUnknownScriptOrBlackboxed(callFrames[index].get()))
+ return false;
+ }
+ return true;
+}
+
+bool V8DebuggerAgentImpl::isTopPausedCallFrameBlackboxed() {
+ DCHECK(enabled());
+ JavaScriptCallFrame* frame =
+ m_pausedCallFrames.size() ? m_pausedCallFrames[0].get() : nullptr;
+ return isCallFrameWithUnknownScriptOrBlackboxed(frame);
+}
+
+bool V8DebuggerAgentImpl::isCallFrameWithUnknownScriptOrBlackboxed(
+ JavaScriptCallFrame* frame) {
+ if (!frame) return true;
+ ScriptsMap::iterator it =
+ m_scripts.find(String16::fromInteger(frame->sourceID()));
+ if (it == m_scripts.end()) {
+ // Unknown scripts are blackboxed.
+ return true;
+ }
+ if (m_blackboxPattern) {
+ const String16& scriptSourceURL = it->second->sourceURL();
+ if (!scriptSourceURL.isEmpty() &&
+ m_blackboxPattern->match(scriptSourceURL) != -1)
+ return true;
+ }
+ auto itBlackboxedPositions =
+ m_blackboxedPositions.find(String16::fromInteger(frame->sourceID()));
+ if (itBlackboxedPositions == m_blackboxedPositions.end()) return false;
+
+ const std::vector<std::pair<int, int>>& ranges =
+ itBlackboxedPositions->second;
+ auto itRange = std::lower_bound(
+ ranges.begin(), ranges.end(),
+ std::make_pair(frame->line(), frame->column()), positionComparator);
+ // Ranges array contains positions in script where blackbox state is changed.
+ // [(0,0) ... ranges[0]) isn't blackboxed, [ranges[0] ... ranges[1]) is
+ // blackboxed...
+ return std::distance(ranges.begin(), itRange) % 2;
+}
+
+V8DebuggerAgentImpl::SkipPauseRequest
+V8DebuggerAgentImpl::shouldSkipExceptionPause(
+ JavaScriptCallFrame* topCallFrame) {
+ if (m_steppingFromFramework) return RequestNoSkip;
+ if (isCallFrameWithUnknownScriptOrBlackboxed(topCallFrame))
+ return RequestContinue;
+ return RequestNoSkip;
+}
+
+V8DebuggerAgentImpl::SkipPauseRequest V8DebuggerAgentImpl::shouldSkipStepPause(
+ JavaScriptCallFrame* topCallFrame) {
+ if (m_steppingFromFramework) return RequestNoSkip;
+
+ if (m_skipNextDebuggerStepOut) {
+ m_skipNextDebuggerStepOut = false;
+ if (m_scheduledDebuggerStep == StepOut) return RequestStepOut;
+ }
+
+ if (!isCallFrameWithUnknownScriptOrBlackboxed(topCallFrame))
+ return RequestNoSkip;
+
+ if (m_skippedStepFrameCount >= maxSkipStepFrameCount) return RequestStepOut;
+
+ if (!m_skippedStepFrameCount) m_recursionLevelForStepFrame = 1;
+
+ ++m_skippedStepFrameCount;
+ return RequestStepFrame;
+}
+
+std::unique_ptr<protocol::Debugger::Location>
+V8DebuggerAgentImpl::resolveBreakpoint(const String16& breakpointId,
+ const String16& scriptId,
+ const ScriptBreakpoint& breakpoint,
+ BreakpointSource source) {
+ DCHECK(enabled());
+ // FIXME: remove these checks once crbug.com/520702 is resolved.
+ CHECK(!breakpointId.isEmpty());
+ CHECK(!scriptId.isEmpty());
+ ScriptsMap::iterator scriptIterator = m_scripts.find(scriptId);
+ if (scriptIterator == m_scripts.end()) return nullptr;
+ if (breakpoint.lineNumber < scriptIterator->second->startLine() ||
+ scriptIterator->second->endLine() < breakpoint.lineNumber)
+ return nullptr;
+
+ int actualLineNumber;
+ int actualColumnNumber;
+ String16 debuggerBreakpointId = m_debugger->setBreakpoint(
+ scriptId, breakpoint, &actualLineNumber, &actualColumnNumber);
+ if (debuggerBreakpointId.isEmpty()) return nullptr;
+
+ m_serverBreakpoints[debuggerBreakpointId] =
+ std::make_pair(breakpointId, source);
+ CHECK(!breakpointId.isEmpty());
+
+ m_breakpointIdToDebuggerBreakpointIds[breakpointId].push_back(
+ debuggerBreakpointId);
+ return buildProtocolLocation(scriptId, actualLineNumber, actualColumnNumber);
+}
+
+void V8DebuggerAgentImpl::searchInContent(
+ ErrorString* error, const String16& scriptId, const String16& query,
+ const Maybe<bool>& optionalCaseSensitive,
+ const Maybe<bool>& optionalIsRegex,
+ std::unique_ptr<Array<protocol::Debugger::SearchMatch>>* results) {
+ v8::HandleScope handles(m_isolate);
+ ScriptsMap::iterator it = m_scripts.find(scriptId);
+ if (it == m_scripts.end()) {
+ *error = String16("No script for id: " + scriptId);
+ return;
+ }
+
+ std::vector<std::unique_ptr<protocol::Debugger::SearchMatch>> matches =
+ searchInTextByLinesImpl(m_session,
+ toProtocolString(it->second->source(m_isolate)),
+ query, optionalCaseSensitive.fromMaybe(false),
+ optionalIsRegex.fromMaybe(false));
+ *results = protocol::Array<protocol::Debugger::SearchMatch>::create();
+ for (size_t i = 0; i < matches.size(); ++i)
+ (*results)->addItem(std::move(matches[i]));
+}
+
+void V8DebuggerAgentImpl::setScriptSource(
+ ErrorString* errorString, const String16& scriptId,
+ const String16& newContent, const Maybe<bool>& dryRun,
+ Maybe<protocol::Array<protocol::Debugger::CallFrame>>* newCallFrames,
+ Maybe<bool>* stackChanged, Maybe<StackTrace>* asyncStackTrace,
+ Maybe<protocol::Runtime::ExceptionDetails>* optOutCompileError) {
+ if (!checkEnabled(errorString)) return;
+
+ v8::HandleScope handles(m_isolate);
+ v8::Local<v8::String> newSource = toV8String(m_isolate, newContent);
+ if (!m_debugger->setScriptSource(scriptId, newSource, dryRun.fromMaybe(false),
+ errorString, optOutCompileError,
+ &m_pausedCallFrames, stackChanged))
+ return;
+
+ ScriptsMap::iterator it = m_scripts.find(scriptId);
+ if (it != m_scripts.end()) it->second->setSource(m_isolate, newSource);
+
+ std::unique_ptr<Array<CallFrame>> callFrames = currentCallFrames(errorString);
+ if (!callFrames) return;
+ *newCallFrames = std::move(callFrames);
+ *asyncStackTrace = currentAsyncStackTrace();
+}
+
+void V8DebuggerAgentImpl::restartFrame(
+ ErrorString* errorString, const String16& callFrameId,
+ std::unique_ptr<Array<CallFrame>>* newCallFrames,
+ Maybe<StackTrace>* asyncStackTrace) {
+ if (!assertPaused(errorString)) return;
+ InjectedScript::CallFrameScope scope(
+ errorString, m_inspector, m_session->contextGroupId(), callFrameId);
+ if (!scope.initialize()) return;
+ if (scope.frameOrdinal() >= m_pausedCallFrames.size()) {
+ *errorString = "Could not find call frame with given id";
+ return;
+ }
+
+ v8::Local<v8::Value> resultValue;
+ v8::Local<v8::Boolean> result;
+ if (!m_pausedCallFrames[scope.frameOrdinal()]->restart().ToLocal(
+ &resultValue) ||
+ scope.tryCatch().HasCaught() ||
+ !resultValue->ToBoolean(scope.context()).ToLocal(&result) ||
+ !result->Value()) {
+ *errorString = "Internal error";
+ return;
+ }
+ JavaScriptCallFrames frames = m_debugger->currentCallFrames();
+ m_pausedCallFrames.swap(frames);
+
+ *newCallFrames = currentCallFrames(errorString);
+ if (!*newCallFrames) return;
+ *asyncStackTrace = currentAsyncStackTrace();
+}
+
+void V8DebuggerAgentImpl::getScriptSource(ErrorString* error,
+ const String16& scriptId,
+ String16* scriptSource) {
+ if (!checkEnabled(error)) return;
+ ScriptsMap::iterator it = m_scripts.find(scriptId);
+ if (it == m_scripts.end()) {
+ *error = "No script for id: " + scriptId;
+ return;
+ }
+ v8::HandleScope handles(m_isolate);
+ *scriptSource = toProtocolString(it->second->source(m_isolate));
+}
+
+void V8DebuggerAgentImpl::schedulePauseOnNextStatement(
+ const String16& breakReason,
+ std::unique_ptr<protocol::DictionaryValue> data) {
+ if (!enabled() || m_scheduledDebuggerStep == StepInto ||
+ m_javaScriptPauseScheduled || m_debugger->isPaused() ||
+ !m_debugger->breakpointsActivated())
+ return;
+ m_breakReason = breakReason;
+ m_breakAuxData = std::move(data);
+ m_pausingOnNativeEvent = true;
+ m_skipNextDebuggerStepOut = false;
+ m_debugger->setPauseOnNextStatement(true);
+}
+
+void V8DebuggerAgentImpl::schedulePauseOnNextStatementIfSteppingInto() {
+ DCHECK(enabled());
+ if (m_scheduledDebuggerStep != StepInto || m_javaScriptPauseScheduled ||
+ m_debugger->isPaused())
+ return;
+ clearBreakDetails();
+ m_pausingOnNativeEvent = false;
+ m_skippedStepFrameCount = 0;
+ m_recursionLevelForStepFrame = 0;
+ m_debugger->setPauseOnNextStatement(true);
+}
+
+void V8DebuggerAgentImpl::cancelPauseOnNextStatement() {
+ if (m_javaScriptPauseScheduled || m_debugger->isPaused()) return;
+ clearBreakDetails();
+ m_pausingOnNativeEvent = false;
+ m_debugger->setPauseOnNextStatement(false);
+}
+
+void V8DebuggerAgentImpl::pause(ErrorString* errorString) {
+ if (!checkEnabled(errorString)) return;
+ if (m_javaScriptPauseScheduled || m_debugger->isPaused()) return;
+ clearBreakDetails();
+ m_javaScriptPauseScheduled = true;
+ m_scheduledDebuggerStep = NoStep;
+ m_skippedStepFrameCount = 0;
+ m_steppingFromFramework = false;
+ m_debugger->setPauseOnNextStatement(true);
+}
+
+void V8DebuggerAgentImpl::resume(ErrorString* errorString) {
+ if (!assertPaused(errorString)) return;
+ m_scheduledDebuggerStep = NoStep;
+ m_steppingFromFramework = false;
+ m_session->releaseObjectGroup(backtraceObjectGroup);
+ m_debugger->continueProgram();
+}
+
+void V8DebuggerAgentImpl::stepOver(ErrorString* errorString) {
+ if (!assertPaused(errorString)) return;
+ // StepOver at function return point should fallback to StepInto.
+ JavaScriptCallFrame* frame =
+ !m_pausedCallFrames.empty() ? m_pausedCallFrames[0].get() : nullptr;
+ if (frame && frame->isAtReturn()) {
+ stepInto(errorString);
+ return;
+ }
+ m_scheduledDebuggerStep = StepOver;
+ m_steppingFromFramework = isTopPausedCallFrameBlackboxed();
+ m_session->releaseObjectGroup(backtraceObjectGroup);
+ m_debugger->stepOverStatement();
+}
+
+void V8DebuggerAgentImpl::stepInto(ErrorString* errorString) {
+ if (!assertPaused(errorString)) return;
+ m_scheduledDebuggerStep = StepInto;
+ m_steppingFromFramework = isTopPausedCallFrameBlackboxed();
+ m_session->releaseObjectGroup(backtraceObjectGroup);
+ m_debugger->stepIntoStatement();
+}
+
+void V8DebuggerAgentImpl::stepOut(ErrorString* errorString) {
+ if (!assertPaused(errorString)) return;
+ m_scheduledDebuggerStep = StepOut;
+ m_skipNextDebuggerStepOut = false;
+ m_recursionLevelForStepOut = 1;
+ m_steppingFromFramework = isTopPausedCallFrameBlackboxed();
+ m_session->releaseObjectGroup(backtraceObjectGroup);
+ m_debugger->stepOutOfFunction();
+}
+
+void V8DebuggerAgentImpl::setPauseOnExceptions(
+ ErrorString* errorString, const String16& stringPauseState) {
+ if (!checkEnabled(errorString)) return;
+ V8Debugger::PauseOnExceptionsState pauseState;
+ if (stringPauseState == "none") {
+ pauseState = V8Debugger::DontPauseOnExceptions;
+ } else if (stringPauseState == "all") {
+ pauseState = V8Debugger::PauseOnAllExceptions;
+ } else if (stringPauseState == "uncaught") {
+ pauseState = V8Debugger::PauseOnUncaughtExceptions;
+ } else {
+ *errorString = "Unknown pause on exceptions mode: " + stringPauseState;
+ return;
+ }
+ setPauseOnExceptionsImpl(errorString, pauseState);
+}
+
+void V8DebuggerAgentImpl::setPauseOnExceptionsImpl(ErrorString* errorString,
+ int pauseState) {
+ m_debugger->setPauseOnExceptionsState(
+ static_cast<V8Debugger::PauseOnExceptionsState>(pauseState));
+ if (m_debugger->getPauseOnExceptionsState() != pauseState)
+ *errorString = "Internal error. Could not change pause on exceptions state";
+ else
+ m_state->setInteger(DebuggerAgentState::pauseOnExceptionsState, pauseState);
+}
+
+void V8DebuggerAgentImpl::evaluateOnCallFrame(
+ ErrorString* errorString, const String16& callFrameId,
+ const String16& expression, const Maybe<String16>& objectGroup,
+ const Maybe<bool>& includeCommandLineAPI, const Maybe<bool>& silent,
+ const Maybe<bool>& returnByValue, const Maybe<bool>& generatePreview,
+ std::unique_ptr<RemoteObject>* result,
+ Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
+ if (!assertPaused(errorString)) return;
+ InjectedScript::CallFrameScope scope(
+ errorString, m_inspector, m_session->contextGroupId(), callFrameId);
+ if (!scope.initialize()) return;
+ if (scope.frameOrdinal() >= m_pausedCallFrames.size()) {
+ *errorString = "Could not find call frame with given id";
+ return;
+ }
+
+ if (includeCommandLineAPI.fromMaybe(false) && !scope.installCommandLineAPI())
+ return;
+ if (silent.fromMaybe(false)) scope.ignoreExceptionsAndMuteConsole();
+
+ v8::MaybeLocal<v8::Value> maybeResultValue =
+ m_pausedCallFrames[scope.frameOrdinal()]->evaluate(
+ toV8String(m_isolate, expression));
+
+ // Re-initialize after running client's code, as it could have destroyed
+ // context or session.
+ if (!scope.initialize()) return;
+ scope.injectedScript()->wrapEvaluateResult(
+ errorString, maybeResultValue, scope.tryCatch(),
+ objectGroup.fromMaybe(""), returnByValue.fromMaybe(false),
+ generatePreview.fromMaybe(false), result, exceptionDetails);
+}
+
+void V8DebuggerAgentImpl::setVariableValue(
+ ErrorString* errorString, int scopeNumber, const String16& variableName,
+ std::unique_ptr<protocol::Runtime::CallArgument> newValueArgument,
+ const String16& callFrameId) {
+ if (!checkEnabled(errorString)) return;
+ if (!assertPaused(errorString)) return;
+ InjectedScript::CallFrameScope scope(
+ errorString, m_inspector, m_session->contextGroupId(), callFrameId);
+ if (!scope.initialize()) return;
+
+ v8::Local<v8::Value> newValue;
+ if (!scope.injectedScript()
+ ->resolveCallArgument(errorString, newValueArgument.get())
+ .ToLocal(&newValue))
+ return;
+
+ if (scope.frameOrdinal() >= m_pausedCallFrames.size()) {
+ *errorString = "Could not find call frame with given id";
+ return;
+ }
+ v8::MaybeLocal<v8::Value> result =
+ m_pausedCallFrames[scope.frameOrdinal()]->setVariableValue(
+ scopeNumber, toV8String(m_isolate, variableName), newValue);
+ if (scope.tryCatch().HasCaught() || result.IsEmpty()) {
+ *errorString = "Internal error";
+ return;
+ }
+}
+
+void V8DebuggerAgentImpl::setAsyncCallStackDepth(ErrorString* errorString,
+ int depth) {
+ if (!checkEnabled(errorString)) return;
+ m_state->setInteger(DebuggerAgentState::asyncCallStackDepth, depth);
+ m_debugger->setAsyncCallStackDepth(this, depth);
+}
+
+void V8DebuggerAgentImpl::setBlackboxPatterns(
+ ErrorString* errorString,
+ std::unique_ptr<protocol::Array<String16>> patterns) {
+ if (!patterns->length()) {
+ m_blackboxPattern = nullptr;
+ m_state->remove(DebuggerAgentState::blackboxPattern);
+ return;
+ }
+
+ String16Builder patternBuilder;
+ patternBuilder.append('(');
+ for (size_t i = 0; i < patterns->length() - 1; ++i) {
+ patternBuilder.append(patterns->get(i));
+ patternBuilder.append("|");
+ }
+ patternBuilder.append(patterns->get(patterns->length() - 1));
+ patternBuilder.append(')');
+ String16 pattern = patternBuilder.toString();
+ if (!setBlackboxPattern(errorString, pattern)) return;
+ m_state->setString(DebuggerAgentState::blackboxPattern, pattern);
+}
+
+bool V8DebuggerAgentImpl::setBlackboxPattern(ErrorString* errorString,
+ const String16& pattern) {
+ std::unique_ptr<V8Regex> regex(new V8Regex(
+ m_inspector, pattern, true /** caseSensitive */, false /** multiline */));
+ if (!regex->isValid()) {
+ *errorString = "Pattern parser error: " + regex->errorMessage();
+ return false;
+ }
+ m_blackboxPattern = std::move(regex);
+ return true;
+}
+
+void V8DebuggerAgentImpl::setBlackboxedRanges(
+ ErrorString* error, const String16& scriptId,
+ std::unique_ptr<protocol::Array<protocol::Debugger::ScriptPosition>>
+ inPositions) {
+ if (m_scripts.find(scriptId) == m_scripts.end()) {
+ *error = "No script with passed id.";
+ return;
+ }
+
+ if (!inPositions->length()) {
+ m_blackboxedPositions.erase(scriptId);
+ return;
+ }
+
+ std::vector<std::pair<int, int>> positions;
+ positions.reserve(inPositions->length());
+ for (size_t i = 0; i < inPositions->length(); ++i) {
+ protocol::Debugger::ScriptPosition* position = inPositions->get(i);
+ if (position->getLineNumber() < 0) {
+ *error = "Position missing 'line' or 'line' < 0.";
+ return;
+ }
+ if (position->getColumnNumber() < 0) {
+ *error = "Position missing 'column' or 'column' < 0.";
+ return;
+ }
+ positions.push_back(
+ std::make_pair(position->getLineNumber(), position->getColumnNumber()));
+ }
+
+ for (size_t i = 1; i < positions.size(); ++i) {
+ if (positions[i - 1].first < positions[i].first) continue;
+ if (positions[i - 1].first == positions[i].first &&
+ positions[i - 1].second < positions[i].second)
+ continue;
+ *error =
+ "Input positions array is not sorted or contains duplicate values.";
+ return;
+ }
+
+ m_blackboxedPositions[scriptId] = positions;
+}
+
+void V8DebuggerAgentImpl::willExecuteScript(int scriptId) {
+ changeJavaScriptRecursionLevel(+1);
+ // Fast return.
+ if (m_scheduledDebuggerStep != StepInto) return;
+ schedulePauseOnNextStatementIfSteppingInto();
+}
+
+void V8DebuggerAgentImpl::didExecuteScript() {
+ changeJavaScriptRecursionLevel(-1);
+}
+
+void V8DebuggerAgentImpl::changeJavaScriptRecursionLevel(int step) {
+ if (m_javaScriptPauseScheduled && !m_skipAllPauses &&
+ !m_debugger->isPaused()) {
+ // Do not ever loose user's pause request until we have actually paused.
+ m_debugger->setPauseOnNextStatement(true);
+ }
+ if (m_scheduledDebuggerStep == StepOut) {
+ m_recursionLevelForStepOut += step;
+ if (!m_recursionLevelForStepOut) {
+ // When StepOut crosses a task boundary (i.e. js -> c++) from where it was
+ // requested,
+ // switch stepping to step into a next JS task, as if we exited to a
+ // blackboxed framework.
+ m_scheduledDebuggerStep = StepInto;
+ m_skipNextDebuggerStepOut = false;
+ }
+ }
+ if (m_recursionLevelForStepFrame) {
+ m_recursionLevelForStepFrame += step;
+ if (!m_recursionLevelForStepFrame) {
+ // We have walked through a blackboxed framework and got back to where we
+ // started.
+ // If there was no stepping scheduled, we should cancel the stepping
+ // explicitly,
+ // since there may be a scheduled StepFrame left.
+ // Otherwise, if we were stepping in/over, the StepFrame will stop at the
+ // right location,
+ // whereas if we were stepping out, we should continue doing so after
+ // debugger pauses
+ // from the old StepFrame.
+ m_skippedStepFrameCount = 0;
+ if (m_scheduledDebuggerStep == NoStep)
+ m_debugger->clearStepping();
+ else if (m_scheduledDebuggerStep == StepOut)
+ m_skipNextDebuggerStepOut = true;
+ }
+ }
+}
+
+std::unique_ptr<Array<CallFrame>> V8DebuggerAgentImpl::currentCallFrames(
+ ErrorString* errorString) {
+ if (m_pausedContext.IsEmpty() || !m_pausedCallFrames.size())
+ return Array<CallFrame>::create();
+ ErrorString ignored;
+ v8::HandleScope handles(m_isolate);
+ v8::Local<v8::Context> debuggerContext =
+ v8::Debug::GetDebugContext(m_isolate);
+ v8::Context::Scope contextScope(debuggerContext);
+
+ v8::Local<v8::Array> objects = v8::Array::New(m_isolate);
+
+ for (size_t frameOrdinal = 0; frameOrdinal < m_pausedCallFrames.size();
+ ++frameOrdinal) {
+ const std::unique_ptr<JavaScriptCallFrame>& currentCallFrame =
+ m_pausedCallFrames[frameOrdinal];
+
+ v8::Local<v8::Object> details = currentCallFrame->details();
+ if (hasInternalError(errorString, details.IsEmpty()))
+ return Array<CallFrame>::create();
+
+ int contextId = currentCallFrame->contextId();
+ InjectedScript* injectedScript =
+ contextId ? m_session->findInjectedScript(&ignored, contextId)
+ : nullptr;
+
+ String16 callFrameId =
+ RemoteCallFrameId::serialize(contextId, static_cast<int>(frameOrdinal));
+ if (hasInternalError(
+ errorString,
+ !details
+ ->Set(debuggerContext,
+ toV8StringInternalized(m_isolate, "callFrameId"),
+ toV8String(m_isolate, callFrameId))
+ .FromMaybe(false)))
+ return Array<CallFrame>::create();
+
+ if (injectedScript) {
+ v8::Local<v8::Value> scopeChain;
+ if (hasInternalError(
+ errorString,
+ !details->Get(debuggerContext,
+ toV8StringInternalized(m_isolate, "scopeChain"))
+ .ToLocal(&scopeChain) ||
+ !scopeChain->IsArray()))
+ return Array<CallFrame>::create();
+ v8::Local<v8::Array> scopeChainArray = scopeChain.As<v8::Array>();
+ if (!injectedScript->wrapPropertyInArray(
+ errorString, scopeChainArray,
+ toV8StringInternalized(m_isolate, "object"),
+ backtraceObjectGroup))
+ return Array<CallFrame>::create();
+ if (!injectedScript->wrapObjectProperty(
+ errorString, details, toV8StringInternalized(m_isolate, "this"),
+ backtraceObjectGroup))
+ return Array<CallFrame>::create();
+ if (details
+ ->Has(debuggerContext,
+ toV8StringInternalized(m_isolate, "returnValue"))
+ .FromMaybe(false)) {
+ if (!injectedScript->wrapObjectProperty(
+ errorString, details,
+ toV8StringInternalized(m_isolate, "returnValue"),
+ backtraceObjectGroup))
+ return Array<CallFrame>::create();
+ }
+ } else {
+ if (hasInternalError(errorString, !details
+ ->Set(debuggerContext,
+ toV8StringInternalized(
+ m_isolate, "scopeChain"),
+ v8::Array::New(m_isolate, 0))
+ .FromMaybe(false)))
+ return Array<CallFrame>::create();
+ v8::Local<v8::Object> remoteObject = v8::Object::New(m_isolate);
+ if (hasInternalError(
+ errorString,
+ !remoteObject
+ ->Set(debuggerContext,
+ toV8StringInternalized(m_isolate, "type"),
+ toV8StringInternalized(m_isolate, "undefined"))
+ .FromMaybe(false)))
+ return Array<CallFrame>::create();
+ if (hasInternalError(errorString,
+ !details
+ ->Set(debuggerContext,
+ toV8StringInternalized(m_isolate, "this"),
+ remoteObject)
+ .FromMaybe(false)))
+ return Array<CallFrame>::create();
+ if (hasInternalError(
+ errorString,
+ !details
+ ->Delete(debuggerContext,
+ toV8StringInternalized(m_isolate, "returnValue"))
+ .FromMaybe(false)))
+ return Array<CallFrame>::create();
+ }
+
+ if (hasInternalError(
+ errorString,
+ !objects
+ ->Set(debuggerContext, static_cast<int>(frameOrdinal), details)
+ .FromMaybe(false)))
+ return Array<CallFrame>::create();
+ }
+
+ std::unique_ptr<protocol::Value> protocolValue =
+ toProtocolValue(errorString, debuggerContext, objects);
+ if (!protocolValue) return Array<CallFrame>::create();
+ protocol::ErrorSupport errorSupport;
+ std::unique_ptr<Array<CallFrame>> callFrames =
+ Array<CallFrame>::parse(protocolValue.get(), &errorSupport);
+ if (hasInternalError(errorString, !callFrames))
+ return Array<CallFrame>::create();
+ return callFrames;
+}
+
+std::unique_ptr<StackTrace> V8DebuggerAgentImpl::currentAsyncStackTrace() {
+ if (m_pausedContext.IsEmpty()) return nullptr;
+ V8StackTraceImpl* stackTrace = m_debugger->currentAsyncCallChain();
+ return stackTrace ? stackTrace->buildInspectorObjectForTail(m_debugger)
+ : nullptr;
+}
+
+void V8DebuggerAgentImpl::didParseSource(
+ std::unique_ptr<V8DebuggerScript> script, bool success) {
+ v8::HandleScope handles(m_isolate);
+ String16 scriptSource = toProtocolString(script->source(m_isolate));
+ if (!success) script->setSourceURL(findSourceURL(scriptSource, false));
+ if (!success)
+ script->setSourceMappingURL(findSourceMapURL(scriptSource, false));
+
+ std::unique_ptr<protocol::DictionaryValue> executionContextAuxData;
+ if (!script->executionContextAuxData().isEmpty())
+ executionContextAuxData = protocol::DictionaryValue::cast(
+ protocol::parseJSON(script->executionContextAuxData()));
+ bool isLiveEdit = script->isLiveEdit();
+ bool hasSourceURL = script->hasSourceURL();
+ String16 scriptId = script->scriptId();
+ String16 scriptURL = script->sourceURL();
+
+ const Maybe<String16>& sourceMapURLParam = script->sourceMappingURL();
+ const Maybe<protocol::DictionaryValue>& executionContextAuxDataParam(
+ std::move(executionContextAuxData));
+ const bool* isLiveEditParam = isLiveEdit ? &isLiveEdit : nullptr;
+ const bool* hasSourceURLParam = hasSourceURL ? &hasSourceURL : nullptr;
+ if (success)
+ m_frontend.scriptParsed(
+ scriptId, scriptURL, script->startLine(), script->startColumn(),
+ script->endLine(), script->endColumn(), script->executionContextId(),
+ script->hash(), executionContextAuxDataParam, isLiveEditParam,
+ sourceMapURLParam, hasSourceURLParam);
+ else
+ m_frontend.scriptFailedToParse(
+ scriptId, scriptURL, script->startLine(), script->startColumn(),
+ script->endLine(), script->endColumn(), script->executionContextId(),
+ script->hash(), executionContextAuxDataParam, sourceMapURLParam,
+ hasSourceURLParam);
+
+ m_scripts[scriptId] = std::move(script);
+
+ if (scriptURL.isEmpty() || !success) return;
+
+ protocol::DictionaryValue* breakpointsCookie =
+ m_state->getObject(DebuggerAgentState::javaScriptBreakpoints);
+ if (!breakpointsCookie) return;
+
+ for (size_t i = 0; i < breakpointsCookie->size(); ++i) {
+ auto cookie = breakpointsCookie->at(i);
+ protocol::DictionaryValue* breakpointObject =
+ protocol::DictionaryValue::cast(cookie.second);
+ bool isRegex;
+ breakpointObject->getBoolean(DebuggerAgentState::isRegex, &isRegex);
+ String16 url;
+ breakpointObject->getString(DebuggerAgentState::url, &url);
+ if (!matches(m_inspector, scriptURL, url, isRegex)) continue;
+ ScriptBreakpoint breakpoint;
+ breakpointObject->getInteger(DebuggerAgentState::lineNumber,
+ &breakpoint.lineNumber);
+ breakpointObject->getInteger(DebuggerAgentState::columnNumber,
+ &breakpoint.columnNumber);
+ breakpointObject->getString(DebuggerAgentState::condition,
+ &breakpoint.condition);
+ std::unique_ptr<protocol::Debugger::Location> location = resolveBreakpoint(
+ cookie.first, scriptId, breakpoint, UserBreakpointSource);
+ if (location)
+ m_frontend.breakpointResolved(cookie.first, std::move(location));
+ }
+}
+
+V8DebuggerAgentImpl::SkipPauseRequest V8DebuggerAgentImpl::didPause(
+ v8::Local<v8::Context> context, v8::Local<v8::Value> exception,
+ const std::vector<String16>& hitBreakpoints, bool isPromiseRejection) {
+ JavaScriptCallFrames callFrames = m_debugger->currentCallFrames(1);
+ JavaScriptCallFrame* topCallFrame =
+ !callFrames.empty() ? callFrames.begin()->get() : nullptr;
+
+ V8DebuggerAgentImpl::SkipPauseRequest result;
+ if (m_skipAllPauses)
+ result = RequestContinue;
+ else if (!hitBreakpoints.empty())
+ result = RequestNoSkip; // Don't skip explicit breakpoints even if set in
+ // frameworks.
+ else if (!exception.IsEmpty())
+ result = shouldSkipExceptionPause(topCallFrame);
+ else if (m_scheduledDebuggerStep != NoStep || m_javaScriptPauseScheduled ||
+ m_pausingOnNativeEvent)
+ result = shouldSkipStepPause(topCallFrame);
+ else
+ result = RequestNoSkip;
+
+ m_skipNextDebuggerStepOut = false;
+ if (result != RequestNoSkip) return result;
+ // Skip pauses inside V8 internal scripts and on syntax errors.
+ if (!topCallFrame) return RequestContinue;
+
+ DCHECK(m_pausedContext.IsEmpty());
+ JavaScriptCallFrames frames = m_debugger->currentCallFrames();
+ m_pausedCallFrames.swap(frames);
+ m_pausedContext.Reset(m_isolate, context);
+ v8::HandleScope handles(m_isolate);
+
+ if (!exception.IsEmpty()) {
+ ErrorString ignored;
+ InjectedScript* injectedScript =
+ m_session->findInjectedScript(&ignored, V8Debugger::contextId(context));
+ if (injectedScript) {
+ m_breakReason =
+ isPromiseRejection
+ ? protocol::Debugger::Paused::ReasonEnum::PromiseRejection
+ : protocol::Debugger::Paused::ReasonEnum::Exception;
+ ErrorString errorString;
+ auto obj = injectedScript->wrapObject(&errorString, exception,
+ backtraceObjectGroup);
+ m_breakAuxData = obj ? obj->serialize() : nullptr;
+ // m_breakAuxData might be null after this.
+ }
+ }
+
+ std::unique_ptr<Array<String16>> hitBreakpointIds = Array<String16>::create();
+
+ for (const auto& point : hitBreakpoints) {
+ DebugServerBreakpointToBreakpointIdAndSourceMap::iterator
+ breakpointIterator = m_serverBreakpoints.find(point);
+ if (breakpointIterator != m_serverBreakpoints.end()) {
+ const String16& localId = breakpointIterator->second.first;
+ hitBreakpointIds->addItem(localId);
+
+ BreakpointSource source = breakpointIterator->second.second;
+ if (m_breakReason == protocol::Debugger::Paused::ReasonEnum::Other &&
+ source == DebugCommandBreakpointSource)
+ m_breakReason = protocol::Debugger::Paused::ReasonEnum::DebugCommand;
+ }
+ }
+
+ ErrorString errorString;
+ m_frontend.paused(currentCallFrames(&errorString), m_breakReason,
+ std::move(m_breakAuxData), std::move(hitBreakpointIds),
+ currentAsyncStackTrace());
+ m_scheduledDebuggerStep = NoStep;
+ m_javaScriptPauseScheduled = false;
+ m_steppingFromFramework = false;
+ m_pausingOnNativeEvent = false;
+ m_skippedStepFrameCount = 0;
+ m_recursionLevelForStepFrame = 0;
+
+ if (!m_continueToLocationBreakpointId.isEmpty()) {
+ m_debugger->removeBreakpoint(m_continueToLocationBreakpointId);
+ m_continueToLocationBreakpointId = "";
+ }
+ return result;
+}
+
+void V8DebuggerAgentImpl::didContinue() {
+ m_pausedContext.Reset();
+ JavaScriptCallFrames emptyCallFrames;
+ m_pausedCallFrames.swap(emptyCallFrames);
+ clearBreakDetails();
+ m_frontend.resumed();
+}
+
+void V8DebuggerAgentImpl::breakProgram(
+ const String16& breakReason,
+ std::unique_ptr<protocol::DictionaryValue> data) {
+ if (!enabled() || m_skipAllPauses || !m_pausedContext.IsEmpty() ||
+ isCurrentCallStackEmptyOrBlackboxed() ||
+ !m_debugger->breakpointsActivated())
+ return;
+ m_breakReason = breakReason;
+ m_breakAuxData = std::move(data);
+ m_scheduledDebuggerStep = NoStep;
+ m_steppingFromFramework = false;
+ m_pausingOnNativeEvent = false;
+ m_debugger->breakProgram();
+}
+
+void V8DebuggerAgentImpl::breakProgramOnException(
+ const String16& breakReason,
+ std::unique_ptr<protocol::DictionaryValue> data) {
+ if (!enabled() ||
+ m_debugger->getPauseOnExceptionsState() ==
+ V8Debugger::DontPauseOnExceptions)
+ return;
+ breakProgram(breakReason, std::move(data));
+}
+
+bool V8DebuggerAgentImpl::assertPaused(ErrorString* errorString) {
+ if (m_pausedContext.IsEmpty()) {
+ *errorString = "Can only perform operation while paused.";
+ return false;
+ }
+ return true;
+}
+
+void V8DebuggerAgentImpl::clearBreakDetails() {
+ m_breakReason = protocol::Debugger::Paused::ReasonEnum::Other;
+ m_breakAuxData = nullptr;
+}
+
+void V8DebuggerAgentImpl::setBreakpointAt(const String16& scriptId,
+ int lineNumber, int columnNumber,
+ BreakpointSource source,
+ const String16& condition) {
+ String16 breakpointId =
+ generateBreakpointId(scriptId, lineNumber, columnNumber, source);
+ ScriptBreakpoint breakpoint(lineNumber, columnNumber, condition);
+ resolveBreakpoint(breakpointId, scriptId, breakpoint, source);
+}
+
+void V8DebuggerAgentImpl::removeBreakpointAt(const String16& scriptId,
+ int lineNumber, int columnNumber,
+ BreakpointSource source) {
+ removeBreakpoint(
+ generateBreakpointId(scriptId, lineNumber, columnNumber, source));
+}
+
+void V8DebuggerAgentImpl::reset() {
+ if (!enabled()) return;
+ m_scheduledDebuggerStep = NoStep;
+ m_scripts.clear();
+ m_blackboxedPositions.clear();
+ m_breakpointIdToDebuggerBreakpointIds.clear();
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.h b/deps/v8/src/inspector/v8-debugger-agent-impl.h
new file mode 100644
index 0000000000..62aa67b64b
--- /dev/null
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.h
@@ -0,0 +1,224 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8DEBUGGERAGENTIMPL_H_
+#define V8_INSPECTOR_V8DEBUGGERAGENTIMPL_H_
+
+#include <vector>
+
+#include "src/base/macros.h"
+#include "src/inspector/java-script-call-frame.h"
+#include "src/inspector/protocol/Debugger.h"
+#include "src/inspector/protocol/Forward.h"
+
+namespace v8_inspector {
+
+struct ScriptBreakpoint;
+class JavaScriptCallFrame;
+class PromiseTracker;
+class V8Debugger;
+class V8DebuggerScript;
+class V8InspectorImpl;
+class V8InspectorSessionImpl;
+class V8Regex;
+class V8StackTraceImpl;
+
+using protocol::ErrorString;
+using protocol::Maybe;
+
+class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
+ public:
+ enum SkipPauseRequest {
+ RequestNoSkip,
+ RequestContinue,
+ RequestStepInto,
+ RequestStepOut,
+ RequestStepFrame
+ };
+
+ enum BreakpointSource {
+ UserBreakpointSource,
+ DebugCommandBreakpointSource,
+ MonitorCommandBreakpointSource
+ };
+
+ V8DebuggerAgentImpl(V8InspectorSessionImpl*, protocol::FrontendChannel*,
+ protocol::DictionaryValue* state);
+ ~V8DebuggerAgentImpl() override;
+ void restore();
+
+ // Part of the protocol.
+ void enable(ErrorString*) override;
+ void disable(ErrorString*) override;
+ void setBreakpointsActive(ErrorString*, bool active) override;
+ void setSkipAllPauses(ErrorString*, bool skip) override;
+ void setBreakpointByUrl(
+ ErrorString*, int lineNumber, const Maybe<String16>& optionalURL,
+ const Maybe<String16>& optionalURLRegex,
+ const Maybe<int>& optionalColumnNumber,
+ const Maybe<String16>& optionalCondition, String16*,
+ std::unique_ptr<protocol::Array<protocol::Debugger::Location>>* locations)
+ override;
+ void setBreakpoint(
+ ErrorString*, std::unique_ptr<protocol::Debugger::Location>,
+ const Maybe<String16>& optionalCondition, String16*,
+ std::unique_ptr<protocol::Debugger::Location>* actualLocation) override;
+ void removeBreakpoint(ErrorString*, const String16& breakpointId) override;
+ void continueToLocation(
+ ErrorString*, std::unique_ptr<protocol::Debugger::Location>) override;
+ void searchInContent(
+ ErrorString*, const String16& scriptId, const String16& query,
+ const Maybe<bool>& optionalCaseSensitive,
+ const Maybe<bool>& optionalIsRegex,
+ std::unique_ptr<protocol::Array<protocol::Debugger::SearchMatch>>*)
+ override;
+ void setScriptSource(
+ ErrorString*, const String16& inScriptId, const String16& inScriptSource,
+ const Maybe<bool>& dryRun,
+ Maybe<protocol::Array<protocol::Debugger::CallFrame>>* optOutCallFrames,
+ Maybe<bool>* optOutStackChanged,
+ Maybe<protocol::Runtime::StackTrace>* optOutAsyncStackTrace,
+ Maybe<protocol::Runtime::ExceptionDetails>* optOutCompileError) override;
+ void restartFrame(
+ ErrorString*, const String16& callFrameId,
+ std::unique_ptr<protocol::Array<protocol::Debugger::CallFrame>>*
+ newCallFrames,
+ Maybe<protocol::Runtime::StackTrace>* asyncStackTrace) override;
+ void getScriptSource(ErrorString*, const String16& scriptId,
+ String16* scriptSource) override;
+ void pause(ErrorString*) override;
+ void resume(ErrorString*) override;
+ void stepOver(ErrorString*) override;
+ void stepInto(ErrorString*) override;
+ void stepOut(ErrorString*) override;
+ void setPauseOnExceptions(ErrorString*, const String16& pauseState) override;
+ void evaluateOnCallFrame(
+ ErrorString*, const String16& callFrameId, const String16& expression,
+ const Maybe<String16>& objectGroup,
+ const Maybe<bool>& includeCommandLineAPI, const Maybe<bool>& silent,
+ const Maybe<bool>& returnByValue, const Maybe<bool>& generatePreview,
+ std::unique_ptr<protocol::Runtime::RemoteObject>* result,
+ Maybe<protocol::Runtime::ExceptionDetails>*) override;
+ void setVariableValue(
+ ErrorString*, int scopeNumber, const String16& variableName,
+ std::unique_ptr<protocol::Runtime::CallArgument> newValue,
+ const String16& callFrame) override;
+ void setAsyncCallStackDepth(ErrorString*, int depth) override;
+ void setBlackboxPatterns(
+ ErrorString*,
+ std::unique_ptr<protocol::Array<String16>> patterns) override;
+ void setBlackboxedRanges(
+ ErrorString*, const String16& scriptId,
+ std::unique_ptr<protocol::Array<protocol::Debugger::ScriptPosition>>
+ positions) override;
+
+ bool enabled();
+
+ void setBreakpointAt(const String16& scriptId, int lineNumber,
+ int columnNumber, BreakpointSource,
+ const String16& condition = String16());
+ void removeBreakpointAt(const String16& scriptId, int lineNumber,
+ int columnNumber, BreakpointSource);
+ void schedulePauseOnNextStatement(
+ const String16& breakReason,
+ std::unique_ptr<protocol::DictionaryValue> data);
+ void cancelPauseOnNextStatement();
+ void breakProgram(const String16& breakReason,
+ std::unique_ptr<protocol::DictionaryValue> data);
+ void breakProgramOnException(const String16& breakReason,
+ std::unique_ptr<protocol::DictionaryValue> data);
+
+ void reset();
+
+ // Interface for V8InspectorImpl
+ SkipPauseRequest didPause(v8::Local<v8::Context>,
+ v8::Local<v8::Value> exception,
+ const std::vector<String16>& hitBreakpoints,
+ bool isPromiseRejection);
+ void didContinue();
+ void didParseSource(std::unique_ptr<V8DebuggerScript>, bool success);
+ void willExecuteScript(int scriptId);
+ void didExecuteScript();
+
+ v8::Isolate* isolate() { return m_isolate; }
+
+ private:
+ bool checkEnabled(ErrorString*);
+ void enable();
+
+ SkipPauseRequest shouldSkipExceptionPause(JavaScriptCallFrame* topCallFrame);
+ SkipPauseRequest shouldSkipStepPause(JavaScriptCallFrame* topCallFrame);
+
+ void schedulePauseOnNextStatementIfSteppingInto();
+
+ std::unique_ptr<protocol::Array<protocol::Debugger::CallFrame>>
+ currentCallFrames(ErrorString*);
+ std::unique_ptr<protocol::Runtime::StackTrace> currentAsyncStackTrace();
+
+ void changeJavaScriptRecursionLevel(int step);
+
+ void setPauseOnExceptionsImpl(ErrorString*, int);
+
+ std::unique_ptr<protocol::Debugger::Location> resolveBreakpoint(
+ const String16& breakpointId, const String16& scriptId,
+ const ScriptBreakpoint&, BreakpointSource);
+ void removeBreakpoint(const String16& breakpointId);
+ bool assertPaused(ErrorString*);
+ void clearBreakDetails();
+
+ bool isCurrentCallStackEmptyOrBlackboxed();
+ bool isTopPausedCallFrameBlackboxed();
+ bool isCallFrameWithUnknownScriptOrBlackboxed(JavaScriptCallFrame*);
+
+ void internalSetAsyncCallStackDepth(int);
+ void increaseCachedSkipStackGeneration();
+
+ bool setBlackboxPattern(ErrorString*, const String16& pattern);
+
+ using ScriptsMap =
+ protocol::HashMap<String16, std::unique_ptr<V8DebuggerScript>>;
+ using BreakpointIdToDebuggerBreakpointIdsMap =
+ protocol::HashMap<String16, std::vector<String16>>;
+ using DebugServerBreakpointToBreakpointIdAndSourceMap =
+ protocol::HashMap<String16, std::pair<String16, BreakpointSource>>;
+ using MuteBreakpoins = protocol::HashMap<String16, std::pair<String16, int>>;
+
+ enum DebuggerStep { NoStep = 0, StepInto, StepOver, StepOut };
+
+ V8InspectorImpl* m_inspector;
+ V8Debugger* m_debugger;
+ V8InspectorSessionImpl* m_session;
+ bool m_enabled;
+ protocol::DictionaryValue* m_state;
+ protocol::Debugger::Frontend m_frontend;
+ v8::Isolate* m_isolate;
+ v8::Global<v8::Context> m_pausedContext;
+ JavaScriptCallFrames m_pausedCallFrames;
+ ScriptsMap m_scripts;
+ BreakpointIdToDebuggerBreakpointIdsMap m_breakpointIdToDebuggerBreakpointIds;
+ DebugServerBreakpointToBreakpointIdAndSourceMap m_serverBreakpoints;
+ String16 m_continueToLocationBreakpointId;
+ String16 m_breakReason;
+ std::unique_ptr<protocol::DictionaryValue> m_breakAuxData;
+ DebuggerStep m_scheduledDebuggerStep;
+ bool m_skipNextDebuggerStepOut;
+ bool m_javaScriptPauseScheduled;
+ bool m_steppingFromFramework;
+ bool m_pausingOnNativeEvent;
+
+ int m_skippedStepFrameCount;
+ int m_recursionLevelForStepOut;
+ int m_recursionLevelForStepFrame;
+ bool m_skipAllPauses;
+
+ std::unique_ptr<V8Regex> m_blackboxPattern;
+ protocol::HashMap<String16, std::vector<std::pair<int, int>>>
+ m_blackboxedPositions;
+
+ DISALLOW_COPY_AND_ASSIGN(V8DebuggerAgentImpl);
+};
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_V8DEBUGGERAGENTIMPL_H_
diff --git a/deps/v8/src/inspector/v8-debugger-script.cc b/deps/v8/src/inspector/v8-debugger-script.cc
new file mode 100644
index 0000000000..485188a48f
--- /dev/null
+++ b/deps/v8/src/inspector/v8-debugger-script.cc
@@ -0,0 +1,140 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-debugger-script.h"
+
+#include "src/inspector/protocol-platform.h"
+#include "src/inspector/string-util.h"
+
+namespace v8_inspector {
+
+static const char hexDigits[17] = "0123456789ABCDEF";
+
+static void appendUnsignedAsHex(uint64_t number, String16Builder* destination) {
+ for (size_t i = 0; i < 8; ++i) {
+ UChar c = hexDigits[number & 0xF];
+ destination->append(c);
+ number >>= 4;
+ }
+}
+
+// Hash algorithm for substrings is described in "Über die Komplexität der
+// Multiplikation in
+// eingeschränkten Branchingprogrammmodellen" by Woelfe.
+// http://opendatastructures.org/versions/edition-0.1d/ods-java/node33.html#SECTION00832000000000000000
+static String16 calculateHash(const String16& str) {
+ static uint64_t prime[] = {0x3FB75161, 0xAB1F4E4F, 0x82675BC5, 0xCD924D35,
+ 0x81ABE279};
+ static uint64_t random[] = {0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476,
+ 0xC3D2E1F0};
+ static uint32_t randomOdd[] = {0xB4663807, 0xCC322BF5, 0xD4F91BBD, 0xA7BEA11D,
+ 0x8F462907};
+
+ uint64_t hashes[] = {0, 0, 0, 0, 0};
+ uint64_t zi[] = {1, 1, 1, 1, 1};
+
+ const size_t hashesSize = arraysize(hashes);
+
+ size_t current = 0;
+ const uint32_t* data = nullptr;
+ size_t sizeInBytes = sizeof(UChar) * str.length();
+ data = reinterpret_cast<const uint32_t*>(str.characters16());
+ for (size_t i = 0; i < sizeInBytes / 4; i += 4) {
+ uint32_t v = data[i];
+ uint64_t xi = v * randomOdd[current] & 0x7FFFFFFF;
+ hashes[current] = (hashes[current] + zi[current] * xi) % prime[current];
+ zi[current] = (zi[current] * random[current]) % prime[current];
+ current = current == hashesSize - 1 ? 0 : current + 1;
+ }
+ if (sizeInBytes % 4) {
+ uint32_t v = 0;
+ for (size_t i = sizeInBytes - sizeInBytes % 4; i < sizeInBytes; ++i) {
+ v <<= 8;
+ v |= reinterpret_cast<const uint8_t*>(data)[i];
+ }
+ uint64_t xi = v * randomOdd[current] & 0x7FFFFFFF;
+ hashes[current] = (hashes[current] + zi[current] * xi) % prime[current];
+ zi[current] = (zi[current] * random[current]) % prime[current];
+ current = current == hashesSize - 1 ? 0 : current + 1;
+ }
+
+ for (size_t i = 0; i < hashesSize; ++i)
+ hashes[i] = (hashes[i] + zi[i] * (prime[i] - 1)) % prime[i];
+
+ String16Builder hash;
+ for (size_t i = 0; i < hashesSize; ++i) appendUnsignedAsHex(hashes[i], &hash);
+ return hash.toString();
+}
+
+static v8::Local<v8::Value> GetChecked(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> object,
+ const char* name) {
+ return object
+ ->Get(context, toV8StringInternalized(context->GetIsolate(), name))
+ .ToLocalChecked();
+}
+
+static int GetCheckedInt(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> object, const char* name) {
+ return static_cast<int>(GetChecked(context, object, name)
+ ->ToInteger(context)
+ .ToLocalChecked()
+ ->Value());
+}
+
+V8DebuggerScript::V8DebuggerScript(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> object,
+ bool isLiveEdit) {
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::Local<v8::Value> idValue = GetChecked(context, object, "id");
+ DCHECK(!idValue.IsEmpty() && idValue->IsInt32());
+ m_id = String16::fromInteger(idValue->Int32Value(context).FromJust());
+
+ m_url = toProtocolStringWithTypeCheck(GetChecked(context, object, "name"));
+ m_sourceURL =
+ toProtocolStringWithTypeCheck(GetChecked(context, object, "sourceURL"));
+ m_sourceMappingURL = toProtocolStringWithTypeCheck(
+ GetChecked(context, object, "sourceMappingURL"));
+ m_startLine = GetCheckedInt(context, object, "startLine");
+ m_startColumn = GetCheckedInt(context, object, "startColumn");
+ m_endLine = GetCheckedInt(context, object, "endLine");
+ m_endColumn = GetCheckedInt(context, object, "endColumn");
+ m_executionContextAuxData = toProtocolStringWithTypeCheck(
+ GetChecked(context, object, "executionContextAuxData"));
+ m_executionContextId = GetCheckedInt(context, object, "executionContextId");
+ m_isLiveEdit = isLiveEdit;
+
+ v8::Local<v8::Value> sourceValue;
+ if (!object->Get(context, toV8StringInternalized(isolate, "source"))
+ .ToLocal(&sourceValue) ||
+ !sourceValue->IsString())
+ return;
+ setSource(isolate, sourceValue.As<v8::String>());
+}
+
+V8DebuggerScript::~V8DebuggerScript() {}
+
+const String16& V8DebuggerScript::sourceURL() const {
+ return m_sourceURL.isEmpty() ? m_url : m_sourceURL;
+}
+
+v8::Local<v8::String> V8DebuggerScript::source(v8::Isolate* isolate) const {
+ return m_source.Get(isolate);
+}
+
+void V8DebuggerScript::setSourceURL(const String16& sourceURL) {
+ m_sourceURL = sourceURL;
+}
+
+void V8DebuggerScript::setSourceMappingURL(const String16& sourceMappingURL) {
+ m_sourceMappingURL = sourceMappingURL;
+}
+
+void V8DebuggerScript::setSource(v8::Isolate* isolate,
+ v8::Local<v8::String> source) {
+ m_source.Reset(isolate, source);
+ m_hash = calculateHash(toProtocolString(source));
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-debugger-script.h b/deps/v8/src/inspector/v8-debugger-script.h
new file mode 100644
index 0000000000..78c44b5eb9
--- /dev/null
+++ b/deps/v8/src/inspector/v8-debugger-script.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef V8_INSPECTOR_V8DEBUGGERSCRIPT_H_
+#define V8_INSPECTOR_V8DEBUGGERSCRIPT_H_
+
+#include "src/base/macros.h"
+#include "src/inspector/string-16.h"
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+class V8DebuggerScript {
+ public:
+ V8DebuggerScript(v8::Local<v8::Context>, v8::Local<v8::Object>,
+ bool isLiveEdit);
+ ~V8DebuggerScript();
+
+ const String16& scriptId() const { return m_id; }
+ const String16& url() const { return m_url; }
+ bool hasSourceURL() const { return !m_sourceURL.isEmpty(); }
+ const String16& sourceURL() const;
+ const String16& sourceMappingURL() const { return m_sourceMappingURL; }
+ v8::Local<v8::String> source(v8::Isolate*) const;
+ const String16& hash() const { return m_hash; }
+ int startLine() const { return m_startLine; }
+ int startColumn() const { return m_startColumn; }
+ int endLine() const { return m_endLine; }
+ int endColumn() const { return m_endColumn; }
+ int executionContextId() const { return m_executionContextId; }
+ const String16& executionContextAuxData() const {
+ return m_executionContextAuxData;
+ }
+ bool isLiveEdit() const { return m_isLiveEdit; }
+
+ void setSourceURL(const String16&);
+ void setSourceMappingURL(const String16&);
+ void setSource(v8::Isolate*, v8::Local<v8::String>);
+
+ private:
+ String16 m_id;
+ String16 m_url;
+ String16 m_sourceURL;
+ String16 m_sourceMappingURL;
+ v8::Global<v8::String> m_source;
+ String16 m_hash;
+ int m_startLine;
+ int m_startColumn;
+ int m_endLine;
+ int m_endColumn;
+ int m_executionContextId;
+ String16 m_executionContextAuxData;
+ bool m_isLiveEdit;
+
+ DISALLOW_COPY_AND_ASSIGN(V8DebuggerScript);
+};
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_V8DEBUGGERSCRIPT_H_
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
new file mode 100644
index 0000000000..d393f81ad4
--- /dev/null
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -0,0 +1,1002 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-debugger.h"
+
+#include "src/inspector/debugger-script.h"
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/script-breakpoint.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-debugger-agent-impl.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-internal-value-type.h"
+#include "src/inspector/v8-stack-trace-impl.h"
+#include "src/inspector/v8-value-copier.h"
+
+namespace v8_inspector {
+
+namespace {
+const char stepIntoV8MethodName[] = "stepIntoStatement";
+const char stepOutV8MethodName[] = "stepOutOfFunction";
+static const char v8AsyncTaskEventEnqueue[] = "enqueue";
+static const char v8AsyncTaskEventEnqueueRecurring[] = "enqueueRecurring";
+static const char v8AsyncTaskEventWillHandle[] = "willHandle";
+static const char v8AsyncTaskEventDidHandle[] = "didHandle";
+static const char v8AsyncTaskEventCancel[] = "cancel";
+
+inline v8::Local<v8::Boolean> v8Boolean(bool value, v8::Isolate* isolate) {
+ return value ? v8::True(isolate) : v8::False(isolate);
+}
+
+} // namespace
+
+static bool inLiveEditScope = false;
+
+v8::MaybeLocal<v8::Value> V8Debugger::callDebuggerMethod(
+ const char* functionName, int argc, v8::Local<v8::Value> argv[]) {
+ v8::MicrotasksScope microtasks(m_isolate,
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ DCHECK(m_isolate->InContext());
+ v8::Local<v8::Context> context = m_isolate->GetCurrentContext();
+ v8::Local<v8::Object> debuggerScript = m_debuggerScript.Get(m_isolate);
+ v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
+ debuggerScript
+ ->Get(context, toV8StringInternalized(m_isolate, functionName))
+ .ToLocalChecked());
+ return function->Call(context, debuggerScript, argc, argv);
+}
+
+V8Debugger::V8Debugger(v8::Isolate* isolate, V8InspectorImpl* inspector)
+ : m_isolate(isolate),
+ m_inspector(inspector),
+ m_lastContextId(0),
+ m_enableCount(0),
+ m_breakpointsActivated(true),
+ m_runningNestedMessageLoop(false),
+ m_ignoreScriptParsedEventsCounter(0),
+ m_maxAsyncCallStackDepth(0) {}
+
+V8Debugger::~V8Debugger() {}
+
+void V8Debugger::enable() {
+ if (m_enableCount++) return;
+ DCHECK(!enabled());
+ v8::HandleScope scope(m_isolate);
+ v8::Debug::SetDebugEventListener(m_isolate, &V8Debugger::v8DebugEventCallback,
+ v8::External::New(m_isolate, this));
+ m_debuggerContext.Reset(m_isolate, v8::Debug::GetDebugContext(m_isolate));
+ compileDebuggerScript();
+}
+
+void V8Debugger::disable() {
+ if (--m_enableCount) return;
+ DCHECK(enabled());
+ clearBreakpoints();
+ m_debuggerScript.Reset();
+ m_debuggerContext.Reset();
+ allAsyncTasksCanceled();
+ v8::Debug::SetDebugEventListener(m_isolate, nullptr);
+}
+
+bool V8Debugger::enabled() const { return !m_debuggerScript.IsEmpty(); }
+
+// static
+int V8Debugger::contextId(v8::Local<v8::Context> context) {
+ v8::Local<v8::Value> data =
+ context->GetEmbedderData(static_cast<int>(v8::Context::kDebugIdIndex));
+ if (data.IsEmpty() || !data->IsString()) return 0;
+ String16 dataString = toProtocolString(data.As<v8::String>());
+ if (dataString.isEmpty()) return 0;
+ size_t commaPos = dataString.find(",");
+ if (commaPos == String16::kNotFound) return 0;
+ size_t commaPos2 = dataString.find(",", commaPos + 1);
+ if (commaPos2 == String16::kNotFound) return 0;
+ return dataString.substring(commaPos + 1, commaPos2 - commaPos - 1)
+ .toInteger();
+}
+
+// static
+int V8Debugger::getGroupId(v8::Local<v8::Context> context) {
+ v8::Local<v8::Value> data =
+ context->GetEmbedderData(static_cast<int>(v8::Context::kDebugIdIndex));
+ if (data.IsEmpty() || !data->IsString()) return 0;
+ String16 dataString = toProtocolString(data.As<v8::String>());
+ if (dataString.isEmpty()) return 0;
+ size_t commaPos = dataString.find(",");
+ if (commaPos == String16::kNotFound) return 0;
+ return dataString.substring(0, commaPos).toInteger();
+}
+
+void V8Debugger::getCompiledScripts(
+ int contextGroupId,
+ std::vector<std::unique_ptr<V8DebuggerScript>>& result) {
+ v8::HandleScope scope(m_isolate);
+ v8::MicrotasksScope microtasks(m_isolate,
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ v8::Local<v8::Context> context = debuggerContext();
+ v8::Local<v8::Object> debuggerScript = m_debuggerScript.Get(m_isolate);
+ DCHECK(!debuggerScript->IsUndefined());
+ v8::Local<v8::Function> getScriptsFunction = v8::Local<v8::Function>::Cast(
+ debuggerScript
+ ->Get(context, toV8StringInternalized(m_isolate, "getScripts"))
+ .ToLocalChecked());
+ v8::Local<v8::Value> argv[] = {v8::Integer::New(m_isolate, contextGroupId)};
+ v8::Local<v8::Value> value;
+ if (!getScriptsFunction->Call(context, debuggerScript, arraysize(argv), argv)
+ .ToLocal(&value))
+ return;
+ DCHECK(value->IsArray());
+ v8::Local<v8::Array> scriptsArray = v8::Local<v8::Array>::Cast(value);
+ result.reserve(scriptsArray->Length());
+ for (unsigned i = 0; i < scriptsArray->Length(); ++i) {
+ v8::Local<v8::Object> scriptObject = v8::Local<v8::Object>::Cast(
+ scriptsArray->Get(context, v8::Integer::New(m_isolate, i))
+ .ToLocalChecked());
+ result.push_back(wrapUnique(
+ new V8DebuggerScript(context, scriptObject, inLiveEditScope)));
+ }
+}
+
+String16 V8Debugger::setBreakpoint(const String16& sourceID,
+ const ScriptBreakpoint& scriptBreakpoint,
+ int* actualLineNumber,
+ int* actualColumnNumber) {
+ v8::HandleScope scope(m_isolate);
+ v8::Local<v8::Context> context = debuggerContext();
+ v8::Context::Scope contextScope(context);
+
+ v8::Local<v8::Object> info = v8::Object::New(m_isolate);
+ bool success = false;
+ success = info->Set(context, toV8StringInternalized(m_isolate, "sourceID"),
+ toV8String(m_isolate, sourceID))
+ .FromMaybe(false);
+ DCHECK(success);
+ success = info->Set(context, toV8StringInternalized(m_isolate, "lineNumber"),
+ v8::Integer::New(m_isolate, scriptBreakpoint.lineNumber))
+ .FromMaybe(false);
+ DCHECK(success);
+ success =
+ info->Set(context, toV8StringInternalized(m_isolate, "columnNumber"),
+ v8::Integer::New(m_isolate, scriptBreakpoint.columnNumber))
+ .FromMaybe(false);
+ DCHECK(success);
+ success = info->Set(context, toV8StringInternalized(m_isolate, "condition"),
+ toV8String(m_isolate, scriptBreakpoint.condition))
+ .FromMaybe(false);
+ DCHECK(success);
+
+ v8::Local<v8::Function> setBreakpointFunction = v8::Local<v8::Function>::Cast(
+ m_debuggerScript.Get(m_isolate)
+ ->Get(context, toV8StringInternalized(m_isolate, "setBreakpoint"))
+ .ToLocalChecked());
+ v8::Local<v8::Value> breakpointId =
+ v8::Debug::Call(debuggerContext(), setBreakpointFunction, info)
+ .ToLocalChecked();
+ if (!breakpointId->IsString()) return "";
+ *actualLineNumber =
+ info->Get(context, toV8StringInternalized(m_isolate, "lineNumber"))
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust();
+ *actualColumnNumber =
+ info->Get(context, toV8StringInternalized(m_isolate, "columnNumber"))
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust();
+ return toProtocolString(breakpointId.As<v8::String>());
+}
+
+void V8Debugger::removeBreakpoint(const String16& breakpointId) {
+ v8::HandleScope scope(m_isolate);
+ v8::Local<v8::Context> context = debuggerContext();
+ v8::Context::Scope contextScope(context);
+
+ v8::Local<v8::Object> info = v8::Object::New(m_isolate);
+ bool success = false;
+ success =
+ info->Set(context, toV8StringInternalized(m_isolate, "breakpointId"),
+ toV8String(m_isolate, breakpointId))
+ .FromMaybe(false);
+ DCHECK(success);
+
+ v8::Local<v8::Function> removeBreakpointFunction =
+ v8::Local<v8::Function>::Cast(
+ m_debuggerScript.Get(m_isolate)
+ ->Get(context,
+ toV8StringInternalized(m_isolate, "removeBreakpoint"))
+ .ToLocalChecked());
+ v8::Debug::Call(debuggerContext(), removeBreakpointFunction, info)
+ .ToLocalChecked();
+}
+
+void V8Debugger::clearBreakpoints() {
+ v8::HandleScope scope(m_isolate);
+ v8::Local<v8::Context> context = debuggerContext();
+ v8::Context::Scope contextScope(context);
+
+ v8::Local<v8::Function> clearBreakpoints = v8::Local<v8::Function>::Cast(
+ m_debuggerScript.Get(m_isolate)
+ ->Get(context, toV8StringInternalized(m_isolate, "clearBreakpoints"))
+ .ToLocalChecked());
+ v8::Debug::Call(debuggerContext(), clearBreakpoints).ToLocalChecked();
+}
+
+void V8Debugger::setBreakpointsActivated(bool activated) {
+ if (!enabled()) {
+ UNREACHABLE();
+ return;
+ }
+ v8::HandleScope scope(m_isolate);
+ v8::Local<v8::Context> context = debuggerContext();
+ v8::Context::Scope contextScope(context);
+
+ v8::Local<v8::Object> info = v8::Object::New(m_isolate);
+ bool success = false;
+ success = info->Set(context, toV8StringInternalized(m_isolate, "enabled"),
+ v8::Boolean::New(m_isolate, activated))
+ .FromMaybe(false);
+ DCHECK(success);
+ v8::Local<v8::Function> setBreakpointsActivated =
+ v8::Local<v8::Function>::Cast(
+ m_debuggerScript.Get(m_isolate)
+ ->Get(context, toV8StringInternalized(m_isolate,
+ "setBreakpointsActivated"))
+ .ToLocalChecked());
+ v8::Debug::Call(debuggerContext(), setBreakpointsActivated, info)
+ .ToLocalChecked();
+
+ m_breakpointsActivated = activated;
+}
+
+V8Debugger::PauseOnExceptionsState V8Debugger::getPauseOnExceptionsState() {
+ DCHECK(enabled());
+ v8::HandleScope scope(m_isolate);
+ v8::Local<v8::Context> context = debuggerContext();
+ v8::Context::Scope contextScope(context);
+
+ v8::Local<v8::Value> argv[] = {v8::Undefined(m_isolate)};
+ v8::Local<v8::Value> result =
+ callDebuggerMethod("pauseOnExceptionsState", 0, argv).ToLocalChecked();
+ return static_cast<V8Debugger::PauseOnExceptionsState>(
+ result->Int32Value(context).FromJust());
+}
+
+void V8Debugger::setPauseOnExceptionsState(
+ PauseOnExceptionsState pauseOnExceptionsState) {
+ DCHECK(enabled());
+ v8::HandleScope scope(m_isolate);
+ v8::Context::Scope contextScope(debuggerContext());
+
+ v8::Local<v8::Value> argv[] = {
+ v8::Int32::New(m_isolate, pauseOnExceptionsState)};
+ callDebuggerMethod("setPauseOnExceptionsState", 1, argv);
+}
+
+void V8Debugger::setPauseOnNextStatement(bool pause) {
+ if (m_runningNestedMessageLoop) return;
+ if (pause)
+ v8::Debug::DebugBreak(m_isolate);
+ else
+ v8::Debug::CancelDebugBreak(m_isolate);
+}
+
+bool V8Debugger::canBreakProgram() {
+ if (!m_breakpointsActivated) return false;
+ return m_isolate->InContext();
+}
+
+void V8Debugger::breakProgram() {
+ if (isPaused()) {
+ DCHECK(!m_runningNestedMessageLoop);
+ v8::Local<v8::Value> exception;
+ v8::Local<v8::Array> hitBreakpoints;
+ handleProgramBreak(m_pausedContext, m_executionState, exception,
+ hitBreakpoints);
+ return;
+ }
+
+ if (!canBreakProgram()) return;
+
+ v8::HandleScope scope(m_isolate);
+ v8::Local<v8::Function> breakFunction;
+ if (!v8::Function::New(m_isolate->GetCurrentContext(),
+ &V8Debugger::breakProgramCallback,
+ v8::External::New(m_isolate, this), 0,
+ v8::ConstructorBehavior::kThrow)
+ .ToLocal(&breakFunction))
+ return;
+ v8::Debug::Call(debuggerContext(), breakFunction).ToLocalChecked();
+}
+
+void V8Debugger::continueProgram() {
+ if (isPaused()) m_inspector->client()->quitMessageLoopOnPause();
+ m_pausedContext.Clear();
+ m_executionState.Clear();
+}
+
+void V8Debugger::stepIntoStatement() {
+ DCHECK(isPaused());
+ DCHECK(!m_executionState.IsEmpty());
+ v8::HandleScope handleScope(m_isolate);
+ v8::Local<v8::Value> argv[] = {m_executionState};
+ callDebuggerMethod(stepIntoV8MethodName, 1, argv);
+ continueProgram();
+}
+
+void V8Debugger::stepOverStatement() {
+ DCHECK(isPaused());
+ DCHECK(!m_executionState.IsEmpty());
+ v8::HandleScope handleScope(m_isolate);
+ v8::Local<v8::Value> argv[] = {m_executionState};
+ callDebuggerMethod("stepOverStatement", 1, argv);
+ continueProgram();
+}
+
+void V8Debugger::stepOutOfFunction() {
+ DCHECK(isPaused());
+ DCHECK(!m_executionState.IsEmpty());
+ v8::HandleScope handleScope(m_isolate);
+ v8::Local<v8::Value> argv[] = {m_executionState};
+ callDebuggerMethod(stepOutV8MethodName, 1, argv);
+ continueProgram();
+}
+
+void V8Debugger::clearStepping() {
+ DCHECK(enabled());
+ v8::HandleScope scope(m_isolate);
+ v8::Context::Scope contextScope(debuggerContext());
+
+ v8::Local<v8::Value> argv[] = {v8::Undefined(m_isolate)};
+ callDebuggerMethod("clearStepping", 0, argv);
+}
+
+bool V8Debugger::setScriptSource(
+ const String16& sourceID, v8::Local<v8::String> newSource, bool dryRun,
+ ErrorString* error,
+ Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails,
+ JavaScriptCallFrames* newCallFrames, Maybe<bool>* stackChanged) {
+ class EnableLiveEditScope {
+ public:
+ explicit EnableLiveEditScope(v8::Isolate* isolate) : m_isolate(isolate) {
+ v8::Debug::SetLiveEditEnabled(m_isolate, true);
+ inLiveEditScope = true;
+ }
+ ~EnableLiveEditScope() {
+ v8::Debug::SetLiveEditEnabled(m_isolate, false);
+ inLiveEditScope = false;
+ }
+
+ private:
+ v8::Isolate* m_isolate;
+ };
+
+ DCHECK(enabled());
+ v8::HandleScope scope(m_isolate);
+
+ std::unique_ptr<v8::Context::Scope> contextScope;
+ if (!isPaused())
+ contextScope = wrapUnique(new v8::Context::Scope(debuggerContext()));
+
+ v8::Local<v8::Value> argv[] = {toV8String(m_isolate, sourceID), newSource,
+ v8Boolean(dryRun, m_isolate)};
+
+ v8::Local<v8::Value> v8result;
+ {
+ EnableLiveEditScope enableLiveEditScope(m_isolate);
+ v8::TryCatch tryCatch(m_isolate);
+ tryCatch.SetVerbose(false);
+ v8::MaybeLocal<v8::Value> maybeResult =
+ callDebuggerMethod("liveEditScriptSource", 3, argv);
+ if (tryCatch.HasCaught()) {
+ v8::Local<v8::Message> message = tryCatch.Message();
+ if (!message.IsEmpty())
+ *error = toProtocolStringWithTypeCheck(message->Get());
+ else
+ *error = "Unknown error.";
+ return false;
+ }
+ v8result = maybeResult.ToLocalChecked();
+ }
+ DCHECK(!v8result.IsEmpty());
+ v8::Local<v8::Context> context = m_isolate->GetCurrentContext();
+ v8::Local<v8::Object> resultTuple =
+ v8result->ToObject(context).ToLocalChecked();
+ int code = static_cast<int>(resultTuple->Get(context, 0)
+ .ToLocalChecked()
+ ->ToInteger(context)
+ .ToLocalChecked()
+ ->Value());
+ switch (code) {
+ case 0: {
+ *stackChanged = resultTuple->Get(context, 1)
+ .ToLocalChecked()
+ ->BooleanValue(context)
+ .FromJust();
+ // Call stack may have changed after if the edited function was on the
+ // stack.
+ if (!dryRun && isPaused()) {
+ JavaScriptCallFrames frames = currentCallFrames();
+ newCallFrames->swap(frames);
+ }
+ return true;
+ }
+ // Compile error.
+ case 1: {
+ *exceptionDetails =
+ protocol::Runtime::ExceptionDetails::create()
+ .setExceptionId(m_inspector->nextExceptionId())
+ .setText(toProtocolStringWithTypeCheck(
+ resultTuple->Get(context, 2).ToLocalChecked()))
+ .setLineNumber(static_cast<int>(resultTuple->Get(context, 3)
+ .ToLocalChecked()
+ ->ToInteger(context)
+ .ToLocalChecked()
+ ->Value()) -
+ 1)
+ .setColumnNumber(static_cast<int>(resultTuple->Get(context, 4)
+ .ToLocalChecked()
+ ->ToInteger(context)
+ .ToLocalChecked()
+ ->Value()) -
+ 1)
+ .build();
+ return false;
+ }
+ }
+ *error = "Unknown error.";
+ return false;
+}
+
+JavaScriptCallFrames V8Debugger::currentCallFrames(int limit) {
+ if (!m_isolate->InContext()) return JavaScriptCallFrames();
+ v8::Local<v8::Value> currentCallFramesV8;
+ if (m_executionState.IsEmpty()) {
+ v8::Local<v8::Function> currentCallFramesFunction =
+ v8::Local<v8::Function>::Cast(
+ m_debuggerScript.Get(m_isolate)
+ ->Get(debuggerContext(),
+ toV8StringInternalized(m_isolate, "currentCallFrames"))
+ .ToLocalChecked());
+ currentCallFramesV8 =
+ v8::Debug::Call(debuggerContext(), currentCallFramesFunction,
+ v8::Integer::New(m_isolate, limit))
+ .ToLocalChecked();
+ } else {
+ v8::Local<v8::Value> argv[] = {m_executionState,
+ v8::Integer::New(m_isolate, limit)};
+ currentCallFramesV8 =
+ callDebuggerMethod("currentCallFrames", arraysize(argv), argv)
+ .ToLocalChecked();
+ }
+ DCHECK(!currentCallFramesV8.IsEmpty());
+ if (!currentCallFramesV8->IsArray()) return JavaScriptCallFrames();
+ v8::Local<v8::Array> callFramesArray = currentCallFramesV8.As<v8::Array>();
+ JavaScriptCallFrames callFrames;
+ for (uint32_t i = 0; i < callFramesArray->Length(); ++i) {
+ v8::Local<v8::Value> callFrameValue;
+ if (!callFramesArray->Get(debuggerContext(), i).ToLocal(&callFrameValue))
+ return JavaScriptCallFrames();
+ if (!callFrameValue->IsObject()) return JavaScriptCallFrames();
+ v8::Local<v8::Object> callFrameObject = callFrameValue.As<v8::Object>();
+ callFrames.push_back(JavaScriptCallFrame::create(
+ debuggerContext(), v8::Local<v8::Object>::Cast(callFrameObject)));
+ }
+ return callFrames;
+}
+
+static V8Debugger* toV8Debugger(v8::Local<v8::Value> data) {
+ void* p = v8::Local<v8::External>::Cast(data)->Value();
+ return static_cast<V8Debugger*>(p);
+}
+
+void V8Debugger::breakProgramCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ DCHECK_EQ(info.Length(), 2);
+ V8Debugger* thisPtr = toV8Debugger(info.Data());
+ if (!thisPtr->enabled()) return;
+ v8::Local<v8::Context> pausedContext =
+ thisPtr->m_isolate->GetCurrentContext();
+ v8::Local<v8::Value> exception;
+ v8::Local<v8::Array> hitBreakpoints;
+ thisPtr->handleProgramBreak(pausedContext,
+ v8::Local<v8::Object>::Cast(info[0]), exception,
+ hitBreakpoints);
+}
+
+void V8Debugger::handleProgramBreak(v8::Local<v8::Context> pausedContext,
+ v8::Local<v8::Object> executionState,
+ v8::Local<v8::Value> exception,
+ v8::Local<v8::Array> hitBreakpointNumbers,
+ bool isPromiseRejection) {
+ // Don't allow nested breaks.
+ if (m_runningNestedMessageLoop) return;
+
+ V8DebuggerAgentImpl* agent =
+ m_inspector->enabledDebuggerAgentForGroup(getGroupId(pausedContext));
+ if (!agent) return;
+
+ std::vector<String16> breakpointIds;
+ if (!hitBreakpointNumbers.IsEmpty()) {
+ breakpointIds.reserve(hitBreakpointNumbers->Length());
+ for (uint32_t i = 0; i < hitBreakpointNumbers->Length(); i++) {
+ v8::Local<v8::Value> hitBreakpointNumber =
+ hitBreakpointNumbers->Get(debuggerContext(), i).ToLocalChecked();
+ DCHECK(hitBreakpointNumber->IsInt32());
+ breakpointIds.push_back(String16::fromInteger(
+ hitBreakpointNumber->Int32Value(debuggerContext()).FromJust()));
+ }
+ }
+
+ m_pausedContext = pausedContext;
+ m_executionState = executionState;
+ V8DebuggerAgentImpl::SkipPauseRequest result = agent->didPause(
+ pausedContext, exception, breakpointIds, isPromiseRejection);
+ if (result == V8DebuggerAgentImpl::RequestNoSkip) {
+ m_runningNestedMessageLoop = true;
+ int groupId = getGroupId(pausedContext);
+ DCHECK(groupId);
+ m_inspector->client()->runMessageLoopOnPause(groupId);
+ // The agent may have been removed in the nested loop.
+ agent =
+ m_inspector->enabledDebuggerAgentForGroup(getGroupId(pausedContext));
+ if (agent) agent->didContinue();
+ m_runningNestedMessageLoop = false;
+ }
+ m_pausedContext.Clear();
+ m_executionState.Clear();
+
+ if (result == V8DebuggerAgentImpl::RequestStepFrame) {
+ v8::Local<v8::Value> argv[] = {executionState};
+ callDebuggerMethod("stepFrameStatement", 1, argv);
+ } else if (result == V8DebuggerAgentImpl::RequestStepInto) {
+ v8::Local<v8::Value> argv[] = {executionState};
+ callDebuggerMethod(stepIntoV8MethodName, 1, argv);
+ } else if (result == V8DebuggerAgentImpl::RequestStepOut) {
+ v8::Local<v8::Value> argv[] = {executionState};
+ callDebuggerMethod(stepOutV8MethodName, 1, argv);
+ }
+}
+
+void V8Debugger::v8DebugEventCallback(
+ const v8::Debug::EventDetails& eventDetails) {
+ V8Debugger* thisPtr = toV8Debugger(eventDetails.GetCallbackData());
+ thisPtr->handleV8DebugEvent(eventDetails);
+}
+
+v8::Local<v8::Value> V8Debugger::callInternalGetterFunction(
+ v8::Local<v8::Object> object, const char* functionName) {
+ v8::MicrotasksScope microtasks(m_isolate,
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ v8::Local<v8::Value> getterValue =
+ object
+ ->Get(m_isolate->GetCurrentContext(),
+ toV8StringInternalized(m_isolate, functionName))
+ .ToLocalChecked();
+ DCHECK(!getterValue.IsEmpty() && getterValue->IsFunction());
+ return v8::Local<v8::Function>::Cast(getterValue)
+ ->Call(m_isolate->GetCurrentContext(), object, 0, 0)
+ .ToLocalChecked();
+}
+
+void V8Debugger::handleV8DebugEvent(
+ const v8::Debug::EventDetails& eventDetails) {
+ if (!enabled()) return;
+ v8::DebugEvent event = eventDetails.GetEvent();
+ if (event != v8::AsyncTaskEvent && event != v8::Break &&
+ event != v8::Exception && event != v8::AfterCompile &&
+ event != v8::BeforeCompile && event != v8::CompileError)
+ return;
+
+ v8::Local<v8::Context> eventContext = eventDetails.GetEventContext();
+ DCHECK(!eventContext.IsEmpty());
+
+ if (event == v8::AsyncTaskEvent) {
+ v8::HandleScope scope(m_isolate);
+ handleV8AsyncTaskEvent(eventContext, eventDetails.GetExecutionState(),
+ eventDetails.GetEventData());
+ return;
+ }
+
+ V8DebuggerAgentImpl* agent =
+ m_inspector->enabledDebuggerAgentForGroup(getGroupId(eventContext));
+ if (agent) {
+ v8::HandleScope scope(m_isolate);
+ if (m_ignoreScriptParsedEventsCounter == 0 &&
+ (event == v8::AfterCompile || event == v8::CompileError)) {
+ v8::Context::Scope contextScope(debuggerContext());
+ v8::Local<v8::Value> argv[] = {eventDetails.GetEventData()};
+ v8::Local<v8::Value> value =
+ callDebuggerMethod("getAfterCompileScript", 1, argv).ToLocalChecked();
+ if (value->IsNull()) return;
+ DCHECK(value->IsObject());
+ v8::Local<v8::Object> scriptObject = v8::Local<v8::Object>::Cast(value);
+ agent->didParseSource(
+ wrapUnique(new V8DebuggerScript(debuggerContext(), scriptObject,
+ inLiveEditScope)),
+ event == v8::AfterCompile);
+ } else if (event == v8::Exception) {
+ v8::Local<v8::Object> eventData = eventDetails.GetEventData();
+ v8::Local<v8::Value> exception =
+ callInternalGetterFunction(eventData, "exception");
+ v8::Local<v8::Value> promise =
+ callInternalGetterFunction(eventData, "promise");
+ bool isPromiseRejection = !promise.IsEmpty() && promise->IsObject();
+ handleProgramBreak(eventContext, eventDetails.GetExecutionState(),
+ exception, v8::Local<v8::Array>(), isPromiseRejection);
+ } else if (event == v8::Break) {
+ v8::Local<v8::Value> argv[] = {eventDetails.GetEventData()};
+ v8::Local<v8::Value> hitBreakpoints =
+ callDebuggerMethod("getBreakpointNumbers", 1, argv).ToLocalChecked();
+ DCHECK(hitBreakpoints->IsArray());
+ handleProgramBreak(eventContext, eventDetails.GetExecutionState(),
+ v8::Local<v8::Value>(),
+ hitBreakpoints.As<v8::Array>());
+ }
+ }
+}
+
+void V8Debugger::handleV8AsyncTaskEvent(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> executionState,
+ v8::Local<v8::Object> eventData) {
+ if (!m_maxAsyncCallStackDepth) return;
+
+ String16 type = toProtocolStringWithTypeCheck(
+ callInternalGetterFunction(eventData, "type"));
+ String16 name = toProtocolStringWithTypeCheck(
+ callInternalGetterFunction(eventData, "name"));
+ int id = static_cast<int>(callInternalGetterFunction(eventData, "id")
+ ->ToInteger(context)
+ .ToLocalChecked()
+ ->Value());
+ // Async task events from Promises are given misaligned pointers to prevent
+ // from overlapping with other Blink task identifiers. There is a single
+ // namespace of such ids, managed by src/js/promise.js.
+ void* ptr = reinterpret_cast<void*>(id * 2 + 1);
+ if (type == v8AsyncTaskEventEnqueue)
+ asyncTaskScheduled(name, ptr, false);
+ else if (type == v8AsyncTaskEventEnqueueRecurring)
+ asyncTaskScheduled(name, ptr, true);
+ else if (type == v8AsyncTaskEventWillHandle)
+ asyncTaskStarted(ptr);
+ else if (type == v8AsyncTaskEventDidHandle)
+ asyncTaskFinished(ptr);
+ else if (type == v8AsyncTaskEventCancel)
+ asyncTaskCanceled(ptr);
+ else
+ UNREACHABLE();
+}
+
+V8StackTraceImpl* V8Debugger::currentAsyncCallChain() {
+ if (!m_currentStacks.size()) return nullptr;
+ return m_currentStacks.back().get();
+}
+
+void V8Debugger::compileDebuggerScript() {
+ if (!m_debuggerScript.IsEmpty()) {
+ UNREACHABLE();
+ return;
+ }
+
+ v8::HandleScope scope(m_isolate);
+ v8::Context::Scope contextScope(debuggerContext());
+
+ v8::Local<v8::String> scriptValue =
+ v8::String::NewFromUtf8(m_isolate, DebuggerScript_js,
+ v8::NewStringType::kInternalized,
+ sizeof(DebuggerScript_js))
+ .ToLocalChecked();
+ v8::Local<v8::Value> value;
+ if (!m_inspector->compileAndRunInternalScript(debuggerContext(), scriptValue)
+ .ToLocal(&value)) {
+ UNREACHABLE();
+ return;
+ }
+ DCHECK(value->IsObject());
+ m_debuggerScript.Reset(m_isolate, value.As<v8::Object>());
+}
+
+v8::Local<v8::Context> V8Debugger::debuggerContext() const {
+ DCHECK(!m_debuggerContext.IsEmpty());
+ return m_debuggerContext.Get(m_isolate);
+}
+
+v8::MaybeLocal<v8::Value> V8Debugger::functionScopes(
+ v8::Local<v8::Context> context, v8::Local<v8::Function> function) {
+ if (!enabled()) {
+ UNREACHABLE();
+ return v8::Local<v8::Value>::New(m_isolate, v8::Undefined(m_isolate));
+ }
+ v8::Local<v8::Value> argv[] = {function};
+ v8::Local<v8::Value> scopesValue;
+ if (!callDebuggerMethod("getFunctionScopes", 1, argv).ToLocal(&scopesValue))
+ return v8::MaybeLocal<v8::Value>();
+ v8::Local<v8::Value> copied;
+ if (!copyValueFromDebuggerContext(m_isolate, debuggerContext(), context,
+ scopesValue)
+ .ToLocal(&copied) ||
+ !copied->IsArray())
+ return v8::MaybeLocal<v8::Value>();
+ if (!markAsInternal(context, v8::Local<v8::Array>::Cast(copied),
+ V8InternalValueType::kScopeList))
+ return v8::MaybeLocal<v8::Value>();
+ if (!markArrayEntriesAsInternal(context, v8::Local<v8::Array>::Cast(copied),
+ V8InternalValueType::kScope))
+ return v8::MaybeLocal<v8::Value>();
+ return copied;
+}
+
+v8::MaybeLocal<v8::Array> V8Debugger::internalProperties(
+ v8::Local<v8::Context> context, v8::Local<v8::Value> value) {
+ v8::Local<v8::Array> properties;
+ if (!v8::Debug::GetInternalProperties(m_isolate, value).ToLocal(&properties))
+ return v8::MaybeLocal<v8::Array>();
+ if (value->IsFunction()) {
+ v8::Local<v8::Function> function = value.As<v8::Function>();
+ v8::Local<v8::Value> location = functionLocation(context, function);
+ if (location->IsObject()) {
+ createDataProperty(
+ context, properties, properties->Length(),
+ toV8StringInternalized(m_isolate, "[[FunctionLocation]]"));
+ createDataProperty(context, properties, properties->Length(), location);
+ }
+ if (function->IsGeneratorFunction()) {
+ createDataProperty(context, properties, properties->Length(),
+ toV8StringInternalized(m_isolate, "[[IsGenerator]]"));
+ createDataProperty(context, properties, properties->Length(),
+ v8::True(m_isolate));
+ }
+ }
+ if (!enabled()) return properties;
+ if (value->IsMap() || value->IsWeakMap() || value->IsSet() ||
+ value->IsWeakSet() || value->IsSetIterator() || value->IsMapIterator()) {
+ v8::Local<v8::Value> entries =
+ collectionEntries(context, v8::Local<v8::Object>::Cast(value));
+ if (entries->IsArray()) {
+ createDataProperty(context, properties, properties->Length(),
+ toV8StringInternalized(m_isolate, "[[Entries]]"));
+ createDataProperty(context, properties, properties->Length(), entries);
+ }
+ }
+ if (value->IsGeneratorObject()) {
+ v8::Local<v8::Value> location =
+ generatorObjectLocation(context, v8::Local<v8::Object>::Cast(value));
+ if (location->IsObject()) {
+ createDataProperty(
+ context, properties, properties->Length(),
+ toV8StringInternalized(m_isolate, "[[GeneratorLocation]]"));
+ createDataProperty(context, properties, properties->Length(), location);
+ }
+ }
+ if (value->IsFunction()) {
+ v8::Local<v8::Function> function = value.As<v8::Function>();
+ v8::Local<v8::Value> boundFunction = function->GetBoundFunction();
+ v8::Local<v8::Value> scopes;
+ if (boundFunction->IsUndefined() &&
+ functionScopes(context, function).ToLocal(&scopes)) {
+ createDataProperty(context, properties, properties->Length(),
+ toV8StringInternalized(m_isolate, "[[Scopes]]"));
+ createDataProperty(context, properties, properties->Length(), scopes);
+ }
+ }
+ return properties;
+}
+
+v8::Local<v8::Value> V8Debugger::collectionEntries(
+ v8::Local<v8::Context> context, v8::Local<v8::Object> object) {
+ if (!enabled()) {
+ UNREACHABLE();
+ return v8::Undefined(m_isolate);
+ }
+ v8::Local<v8::Value> argv[] = {object};
+ v8::Local<v8::Value> entriesValue =
+ callDebuggerMethod("getCollectionEntries", 1, argv).ToLocalChecked();
+ if (!entriesValue->IsArray()) return v8::Undefined(m_isolate);
+
+ v8::Local<v8::Array> entries = entriesValue.As<v8::Array>();
+ v8::Local<v8::Array> copiedArray =
+ v8::Array::New(m_isolate, entries->Length());
+ if (!copiedArray->SetPrototype(context, v8::Null(m_isolate)).FromMaybe(false))
+ return v8::Undefined(m_isolate);
+ for (uint32_t i = 0; i < entries->Length(); ++i) {
+ v8::Local<v8::Value> item;
+ if (!entries->Get(debuggerContext(), i).ToLocal(&item))
+ return v8::Undefined(m_isolate);
+ v8::Local<v8::Value> copied;
+ if (!copyValueFromDebuggerContext(m_isolate, debuggerContext(), context,
+ item)
+ .ToLocal(&copied))
+ return v8::Undefined(m_isolate);
+ if (!createDataProperty(context, copiedArray, i, copied).FromMaybe(false))
+ return v8::Undefined(m_isolate);
+ }
+ if (!markArrayEntriesAsInternal(context,
+ v8::Local<v8::Array>::Cast(copiedArray),
+ V8InternalValueType::kEntry))
+ return v8::Undefined(m_isolate);
+ return copiedArray;
+}
+
+v8::Local<v8::Value> V8Debugger::generatorObjectLocation(
+ v8::Local<v8::Context> context, v8::Local<v8::Object> object) {
+ if (!enabled()) {
+ UNREACHABLE();
+ return v8::Null(m_isolate);
+ }
+ v8::Local<v8::Value> argv[] = {object};
+ v8::Local<v8::Value> location =
+ callDebuggerMethod("getGeneratorObjectLocation", 1, argv)
+ .ToLocalChecked();
+ v8::Local<v8::Value> copied;
+ if (!copyValueFromDebuggerContext(m_isolate, debuggerContext(), context,
+ location)
+ .ToLocal(&copied) ||
+ !copied->IsObject())
+ return v8::Null(m_isolate);
+ if (!markAsInternal(context, v8::Local<v8::Object>::Cast(copied),
+ V8InternalValueType::kLocation))
+ return v8::Null(m_isolate);
+ return copied;
+}
+
+v8::Local<v8::Value> V8Debugger::functionLocation(
+ v8::Local<v8::Context> context, v8::Local<v8::Function> function) {
+ int scriptId = function->ScriptId();
+ if (scriptId == v8::UnboundScript::kNoScriptId) return v8::Null(m_isolate);
+ int lineNumber = function->GetScriptLineNumber();
+ int columnNumber = function->GetScriptColumnNumber();
+ if (lineNumber == v8::Function::kLineOffsetNotFound ||
+ columnNumber == v8::Function::kLineOffsetNotFound)
+ return v8::Null(m_isolate);
+ v8::Local<v8::Object> location = v8::Object::New(m_isolate);
+ if (!location->SetPrototype(context, v8::Null(m_isolate)).FromMaybe(false))
+ return v8::Null(m_isolate);
+ if (!createDataProperty(
+ context, location, toV8StringInternalized(m_isolate, "scriptId"),
+ toV8String(m_isolate, String16::fromInteger(scriptId)))
+ .FromMaybe(false))
+ return v8::Null(m_isolate);
+ if (!createDataProperty(context, location,
+ toV8StringInternalized(m_isolate, "lineNumber"),
+ v8::Integer::New(m_isolate, lineNumber))
+ .FromMaybe(false))
+ return v8::Null(m_isolate);
+ if (!createDataProperty(context, location,
+ toV8StringInternalized(m_isolate, "columnNumber"),
+ v8::Integer::New(m_isolate, columnNumber))
+ .FromMaybe(false))
+ return v8::Null(m_isolate);
+ if (!markAsInternal(context, location, V8InternalValueType::kLocation))
+ return v8::Null(m_isolate);
+ return location;
+}
+
+bool V8Debugger::isPaused() { return !m_pausedContext.IsEmpty(); }
+
+std::unique_ptr<V8StackTraceImpl> V8Debugger::createStackTrace(
+ v8::Local<v8::StackTrace> stackTrace) {
+ int contextGroupId =
+ m_isolate->InContext() ? getGroupId(m_isolate->GetCurrentContext()) : 0;
+ return V8StackTraceImpl::create(this, contextGroupId, stackTrace,
+ V8StackTraceImpl::maxCallStackSizeToCapture);
+}
+
+int V8Debugger::markContext(const V8ContextInfo& info) {
+ DCHECK(info.context->GetIsolate() == m_isolate);
+ int contextId = ++m_lastContextId;
+ String16 debugData = String16::fromInteger(info.contextGroupId) + "," +
+ String16::fromInteger(contextId) + "," +
+ toString16(info.auxData);
+ v8::Context::Scope contextScope(info.context);
+ info.context->SetEmbedderData(static_cast<int>(v8::Context::kDebugIdIndex),
+ toV8String(m_isolate, debugData));
+ return contextId;
+}
+
+void V8Debugger::setAsyncCallStackDepth(V8DebuggerAgentImpl* agent, int depth) {
+ if (depth <= 0)
+ m_maxAsyncCallStackDepthMap.erase(agent);
+ else
+ m_maxAsyncCallStackDepthMap[agent] = depth;
+
+ int maxAsyncCallStackDepth = 0;
+ for (const auto& pair : m_maxAsyncCallStackDepthMap) {
+ if (pair.second > maxAsyncCallStackDepth)
+ maxAsyncCallStackDepth = pair.second;
+ }
+
+ if (m_maxAsyncCallStackDepth == maxAsyncCallStackDepth) return;
+ m_maxAsyncCallStackDepth = maxAsyncCallStackDepth;
+ if (!maxAsyncCallStackDepth) allAsyncTasksCanceled();
+}
+
+void V8Debugger::asyncTaskScheduled(const StringView& taskName, void* task,
+ bool recurring) {
+ if (!m_maxAsyncCallStackDepth) return;
+ asyncTaskScheduled(toString16(taskName), task, recurring);
+}
+
+void V8Debugger::asyncTaskScheduled(const String16& taskName, void* task,
+ bool recurring) {
+ if (!m_maxAsyncCallStackDepth) return;
+ v8::HandleScope scope(m_isolate);
+ int contextGroupId =
+ m_isolate->InContext() ? getGroupId(m_isolate->GetCurrentContext()) : 0;
+ std::unique_ptr<V8StackTraceImpl> chain = V8StackTraceImpl::capture(
+ this, contextGroupId, V8StackTraceImpl::maxCallStackSizeToCapture,
+ taskName);
+ if (chain) {
+ m_asyncTaskStacks[task] = std::move(chain);
+ if (recurring) m_recurringTasks.insert(task);
+ }
+}
+
+void V8Debugger::asyncTaskCanceled(void* task) {
+ if (!m_maxAsyncCallStackDepth) return;
+ m_asyncTaskStacks.erase(task);
+ m_recurringTasks.erase(task);
+}
+
+void V8Debugger::asyncTaskStarted(void* task) {
+ if (!m_maxAsyncCallStackDepth) return;
+ m_currentTasks.push_back(task);
+ AsyncTaskToStackTrace::iterator stackIt = m_asyncTaskStacks.find(task);
+ // Needs to support following order of events:
+ // - asyncTaskScheduled
+ // <-- attached here -->
+ // - asyncTaskStarted
+ // - asyncTaskCanceled <-- canceled before finished
+ // <-- async stack requested here -->
+ // - asyncTaskFinished
+ std::unique_ptr<V8StackTraceImpl> stack;
+ if (stackIt != m_asyncTaskStacks.end() && stackIt->second)
+ stack = stackIt->second->cloneImpl();
+ m_currentStacks.push_back(std::move(stack));
+}
+
+void V8Debugger::asyncTaskFinished(void* task) {
+ if (!m_maxAsyncCallStackDepth) return;
+ // We could start instrumenting half way and the stack is empty.
+ if (!m_currentStacks.size()) return;
+
+ DCHECK(m_currentTasks.back() == task);
+ m_currentTasks.pop_back();
+
+ m_currentStacks.pop_back();
+ if (m_recurringTasks.find(task) == m_recurringTasks.end())
+ m_asyncTaskStacks.erase(task);
+}
+
+void V8Debugger::allAsyncTasksCanceled() {
+ m_asyncTaskStacks.clear();
+ m_recurringTasks.clear();
+ m_currentStacks.clear();
+ m_currentTasks.clear();
+}
+
+void V8Debugger::muteScriptParsedEvents() {
+ ++m_ignoreScriptParsedEventsCounter;
+}
+
+void V8Debugger::unmuteScriptParsedEvents() {
+ --m_ignoreScriptParsedEventsCounter;
+ DCHECK_GE(m_ignoreScriptParsedEventsCounter, 0);
+}
+
+std::unique_ptr<V8StackTraceImpl> V8Debugger::captureStackTrace(
+ bool fullStack) {
+ if (!m_isolate->InContext()) return nullptr;
+
+ v8::HandleScope handles(m_isolate);
+ int contextGroupId = getGroupId(m_isolate->GetCurrentContext());
+ if (!contextGroupId) return nullptr;
+
+ size_t stackSize =
+ fullStack ? V8StackTraceImpl::maxCallStackSizeToCapture : 1;
+ if (m_inspector->enabledRuntimeAgentForGroup(contextGroupId))
+ stackSize = V8StackTraceImpl::maxCallStackSizeToCapture;
+
+ return V8StackTraceImpl::capture(this, contextGroupId, stackSize);
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-debugger.h b/deps/v8/src/inspector/v8-debugger.h
new file mode 100644
index 0000000000..83c1b21b02
--- /dev/null
+++ b/deps/v8/src/inspector/v8-debugger.h
@@ -0,0 +1,160 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8DEBUGGER_H_
+#define V8_INSPECTOR_V8DEBUGGER_H_
+
+#include <vector>
+
+#include "src/base/macros.h"
+#include "src/inspector/java-script-call-frame.h"
+#include "src/inspector/protocol/Forward.h"
+#include "src/inspector/protocol/Runtime.h"
+#include "src/inspector/v8-debugger-script.h"
+
+#include "include/v8-debug.h"
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+struct ScriptBreakpoint;
+class V8DebuggerAgentImpl;
+class V8InspectorImpl;
+class V8StackTraceImpl;
+
+using protocol::ErrorString;
+
+class V8Debugger {
+ public:
+ V8Debugger(v8::Isolate*, V8InspectorImpl*);
+ ~V8Debugger();
+
+ static int contextId(v8::Local<v8::Context>);
+ static int getGroupId(v8::Local<v8::Context>);
+ int markContext(const V8ContextInfo&);
+
+ bool enabled() const;
+
+ String16 setBreakpoint(const String16& sourceID, const ScriptBreakpoint&,
+ int* actualLineNumber, int* actualColumnNumber);
+ void removeBreakpoint(const String16& breakpointId);
+ void setBreakpointsActivated(bool);
+ bool breakpointsActivated() const { return m_breakpointsActivated; }
+
+ enum PauseOnExceptionsState {
+ DontPauseOnExceptions,
+ PauseOnAllExceptions,
+ PauseOnUncaughtExceptions
+ };
+ PauseOnExceptionsState getPauseOnExceptionsState();
+ void setPauseOnExceptionsState(PauseOnExceptionsState);
+ void setPauseOnNextStatement(bool);
+ bool canBreakProgram();
+ void breakProgram();
+ void continueProgram();
+ void stepIntoStatement();
+ void stepOverStatement();
+ void stepOutOfFunction();
+ void clearStepping();
+
+ bool setScriptSource(const String16& sourceID,
+ v8::Local<v8::String> newSource, bool dryRun,
+ ErrorString*,
+ protocol::Maybe<protocol::Runtime::ExceptionDetails>*,
+ JavaScriptCallFrames* newCallFrames,
+ protocol::Maybe<bool>* stackChanged);
+ JavaScriptCallFrames currentCallFrames(int limit = 0);
+
+ // Each script inherits debug data from v8::Context where it has been
+ // compiled.
+ // Only scripts whose debug data matches |contextGroupId| will be reported.
+ // Passing 0 will result in reporting all scripts.
+ void getCompiledScripts(int contextGroupId,
+ std::vector<std::unique_ptr<V8DebuggerScript>>&);
+ void enable();
+ void disable();
+
+ bool isPaused();
+ v8::Local<v8::Context> pausedContext() { return m_pausedContext; }
+
+ int maxAsyncCallChainDepth() { return m_maxAsyncCallStackDepth; }
+ V8StackTraceImpl* currentAsyncCallChain();
+ void setAsyncCallStackDepth(V8DebuggerAgentImpl*, int);
+ std::unique_ptr<V8StackTraceImpl> createStackTrace(v8::Local<v8::StackTrace>);
+ std::unique_ptr<V8StackTraceImpl> captureStackTrace(bool fullStack);
+
+ v8::MaybeLocal<v8::Array> internalProperties(v8::Local<v8::Context>,
+ v8::Local<v8::Value>);
+
+ void asyncTaskScheduled(const StringView& taskName, void* task,
+ bool recurring);
+ void asyncTaskScheduled(const String16& taskName, void* task, bool recurring);
+ void asyncTaskCanceled(void* task);
+ void asyncTaskStarted(void* task);
+ void asyncTaskFinished(void* task);
+ void allAsyncTasksCanceled();
+
+ void muteScriptParsedEvents();
+ void unmuteScriptParsedEvents();
+
+ V8InspectorImpl* inspector() { return m_inspector; }
+
+ private:
+ void compileDebuggerScript();
+ v8::MaybeLocal<v8::Value> callDebuggerMethod(const char* functionName,
+ int argc,
+ v8::Local<v8::Value> argv[]);
+ v8::Local<v8::Context> debuggerContext() const;
+ void clearBreakpoints();
+
+ static void breakProgramCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ void handleProgramBreak(v8::Local<v8::Context> pausedContext,
+ v8::Local<v8::Object> executionState,
+ v8::Local<v8::Value> exception,
+ v8::Local<v8::Array> hitBreakpoints,
+ bool isPromiseRejection = false);
+ static void v8DebugEventCallback(const v8::Debug::EventDetails&);
+ v8::Local<v8::Value> callInternalGetterFunction(v8::Local<v8::Object>,
+ const char* functionName);
+ void handleV8DebugEvent(const v8::Debug::EventDetails&);
+ void handleV8AsyncTaskEvent(v8::Local<v8::Context>,
+ v8::Local<v8::Object> executionState,
+ v8::Local<v8::Object> eventData);
+
+ v8::Local<v8::Value> collectionEntries(v8::Local<v8::Context>,
+ v8::Local<v8::Object>);
+ v8::Local<v8::Value> generatorObjectLocation(v8::Local<v8::Context>,
+ v8::Local<v8::Object>);
+ v8::Local<v8::Value> functionLocation(v8::Local<v8::Context>,
+ v8::Local<v8::Function>);
+ v8::MaybeLocal<v8::Value> functionScopes(v8::Local<v8::Context>,
+ v8::Local<v8::Function>);
+
+ v8::Isolate* m_isolate;
+ V8InspectorImpl* m_inspector;
+ int m_lastContextId;
+ int m_enableCount;
+ bool m_breakpointsActivated;
+ v8::Global<v8::Object> m_debuggerScript;
+ v8::Global<v8::Context> m_debuggerContext;
+ v8::Local<v8::Object> m_executionState;
+ v8::Local<v8::Context> m_pausedContext;
+ bool m_runningNestedMessageLoop;
+ int m_ignoreScriptParsedEventsCounter;
+
+ using AsyncTaskToStackTrace =
+ protocol::HashMap<void*, std::unique_ptr<V8StackTraceImpl>>;
+ AsyncTaskToStackTrace m_asyncTaskStacks;
+ protocol::HashSet<void*> m_recurringTasks;
+ int m_maxAsyncCallStackDepth;
+ std::vector<void*> m_currentTasks;
+ std::vector<std::unique_ptr<V8StackTraceImpl>> m_currentStacks;
+ protocol::HashMap<V8DebuggerAgentImpl*, int> m_maxAsyncCallStackDepthMap;
+
+ DISALLOW_COPY_AND_ASSIGN(V8Debugger);
+};
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_V8DEBUGGER_H_
diff --git a/deps/v8/src/inspector/v8-function-call.cc b/deps/v8/src/inspector/v8-function-call.cc
new file mode 100644
index 0000000000..3880e3100e
--- /dev/null
+++ b/deps/v8/src/inspector/v8-function-call.cc
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2009 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/inspector/v8-function-call.h"
+
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-debugger.h"
+#include "src/inspector/v8-inspector-impl.h"
+
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+V8FunctionCall::V8FunctionCall(V8InspectorImpl* inspector,
+ v8::Local<v8::Context> context,
+ v8::Local<v8::Value> value, const String16& name)
+ : m_inspector(inspector),
+ m_context(context),
+ m_name(toV8String(context->GetIsolate(), name)),
+ m_value(value) {}
+
+void V8FunctionCall::appendArgument(v8::Local<v8::Value> value) {
+ m_arguments.push_back(value);
+}
+
+void V8FunctionCall::appendArgument(const String16& argument) {
+ m_arguments.push_back(toV8String(m_context->GetIsolate(), argument));
+}
+
+void V8FunctionCall::appendArgument(int argument) {
+ m_arguments.push_back(v8::Number::New(m_context->GetIsolate(), argument));
+}
+
+void V8FunctionCall::appendArgument(bool argument) {
+ m_arguments.push_back(argument ? v8::True(m_context->GetIsolate())
+ : v8::False(m_context->GetIsolate()));
+}
+
+v8::Local<v8::Value> V8FunctionCall::call(bool& hadException,
+ bool reportExceptions) {
+ v8::TryCatch tryCatch(m_context->GetIsolate());
+ tryCatch.SetVerbose(reportExceptions);
+
+ v8::Local<v8::Value> result = callWithoutExceptionHandling();
+ hadException = tryCatch.HasCaught();
+ return result;
+}
+
+v8::Local<v8::Value> V8FunctionCall::callWithoutExceptionHandling() {
+ v8::Local<v8::Object> thisObject = v8::Local<v8::Object>::Cast(m_value);
+ v8::Local<v8::Value> value;
+ if (!thisObject->Get(m_context, m_name).ToLocal(&value))
+ return v8::Local<v8::Value>();
+
+ DCHECK(value->IsFunction());
+
+ v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(value);
+ std::unique_ptr<v8::Local<v8::Value>[]> info(
+ new v8::Local<v8::Value>[m_arguments.size()]);
+ for (size_t i = 0; i < m_arguments.size(); ++i) {
+ info[i] = m_arguments[i];
+ DCHECK(!info[i].IsEmpty());
+ }
+
+ int contextGroupId = V8Debugger::getGroupId(m_context);
+ if (contextGroupId) {
+ m_inspector->client()->muteMetrics(contextGroupId);
+ m_inspector->muteExceptions(contextGroupId);
+ }
+ v8::MicrotasksScope microtasksScope(m_context->GetIsolate(),
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ v8::MaybeLocal<v8::Value> maybeResult = function->Call(
+ m_context, thisObject, static_cast<int>(m_arguments.size()), info.get());
+ if (contextGroupId) {
+ m_inspector->client()->unmuteMetrics(contextGroupId);
+ m_inspector->unmuteExceptions(contextGroupId);
+ }
+
+ v8::Local<v8::Value> result;
+ if (!maybeResult.ToLocal(&result)) return v8::Local<v8::Value>();
+ return result;
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-function-call.h b/deps/v8/src/inspector/v8-function-call.h
new file mode 100644
index 0000000000..0337caa339
--- /dev/null
+++ b/deps/v8/src/inspector/v8-function-call.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2009 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef V8_INSPECTOR_V8FUNCTIONCALL_H_
+#define V8_INSPECTOR_V8FUNCTIONCALL_H_
+
+#include "src/inspector/string-16.h"
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+class V8InspectorImpl;
+
+class V8FunctionCall {
+ public:
+ V8FunctionCall(V8InspectorImpl*, v8::Local<v8::Context>, v8::Local<v8::Value>,
+ const String16& name);
+
+ void appendArgument(v8::Local<v8::Value>);
+ void appendArgument(const String16&);
+ void appendArgument(int);
+ void appendArgument(bool);
+
+ v8::Local<v8::Value> call(bool& hadException, bool reportExceptions = true);
+ v8::Local<v8::Value> callWithoutExceptionHandling();
+
+ protected:
+ V8InspectorImpl* m_inspector;
+ v8::Local<v8::Context> m_context;
+ std::vector<v8::Local<v8::Value>> m_arguments;
+ v8::Local<v8::String> m_name;
+ v8::Local<v8::Value> m_value;
+};
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_V8FUNCTIONCALL_H_
diff --git a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
new file mode 100644
index 0000000000..84c890bf3f
--- /dev/null
+++ b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
@@ -0,0 +1,407 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-heap-profiler-agent-impl.h"
+
+#include "src/inspector/injected-script.h"
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-debugger.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-inspector-session-impl.h"
+
+#include "include/v8-inspector.h"
+#include "include/v8-profiler.h"
+#include "include/v8-version.h"
+
+namespace v8_inspector {
+
+namespace {
+
+namespace HeapProfilerAgentState {
+static const char heapProfilerEnabled[] = "heapProfilerEnabled";
+static const char heapObjectsTrackingEnabled[] = "heapObjectsTrackingEnabled";
+static const char allocationTrackingEnabled[] = "allocationTrackingEnabled";
+static const char samplingHeapProfilerEnabled[] = "samplingHeapProfilerEnabled";
+static const char samplingHeapProfilerInterval[] =
+ "samplingHeapProfilerInterval";
+}
+
+class HeapSnapshotProgress final : public v8::ActivityControl {
+ public:
+ explicit HeapSnapshotProgress(protocol::HeapProfiler::Frontend* frontend)
+ : m_frontend(frontend) {}
+ ControlOption ReportProgressValue(int done, int total) override {
+ m_frontend->reportHeapSnapshotProgress(done, total,
+ protocol::Maybe<bool>());
+ if (done >= total) {
+ m_frontend->reportHeapSnapshotProgress(total, total, true);
+ }
+ m_frontend->flush();
+ return kContinue;
+ }
+
+ private:
+ protocol::HeapProfiler::Frontend* m_frontend;
+};
+
+class GlobalObjectNameResolver final
+ : public v8::HeapProfiler::ObjectNameResolver {
+ public:
+ explicit GlobalObjectNameResolver(V8InspectorSessionImpl* session)
+ : m_offset(0), m_strings(10000), m_session(session) {}
+
+ const char* GetName(v8::Local<v8::Object> object) override {
+ InspectedContext* context = m_session->inspector()->getContext(
+ m_session->contextGroupId(),
+ V8Debugger::contextId(object->CreationContext()));
+ if (!context) return "";
+ String16 name = context->origin();
+ size_t length = name.length();
+ if (m_offset + length + 1 >= m_strings.size()) return "";
+ for (size_t i = 0; i < length; ++i) {
+ UChar ch = name[i];
+ m_strings[m_offset + i] = ch > 0xff ? '?' : static_cast<char>(ch);
+ }
+ m_strings[m_offset + length] = '\0';
+ char* result = &*m_strings.begin() + m_offset;
+ m_offset += length + 1;
+ return result;
+ }
+
+ private:
+ size_t m_offset;
+ std::vector<char> m_strings;
+ V8InspectorSessionImpl* m_session;
+};
+
+class HeapSnapshotOutputStream final : public v8::OutputStream {
+ public:
+ explicit HeapSnapshotOutputStream(protocol::HeapProfiler::Frontend* frontend)
+ : m_frontend(frontend) {}
+ void EndOfStream() override {}
+ int GetChunkSize() override { return 102400; }
+ WriteResult WriteAsciiChunk(char* data, int size) override {
+ m_frontend->addHeapSnapshotChunk(String16(data, size));
+ m_frontend->flush();
+ return kContinue;
+ }
+
+ private:
+ protocol::HeapProfiler::Frontend* m_frontend;
+};
+
+v8::Local<v8::Object> objectByHeapObjectId(v8::Isolate* isolate, int id) {
+ v8::HeapProfiler* profiler = isolate->GetHeapProfiler();
+ v8::Local<v8::Value> value = profiler->FindObjectById(id);
+ if (value.IsEmpty() || !value->IsObject()) return v8::Local<v8::Object>();
+ return value.As<v8::Object>();
+}
+
+class InspectableHeapObject final : public V8InspectorSession::Inspectable {
+ public:
+ explicit InspectableHeapObject(int heapObjectId)
+ : m_heapObjectId(heapObjectId) {}
+ v8::Local<v8::Value> get(v8::Local<v8::Context> context) override {
+ return objectByHeapObjectId(context->GetIsolate(), m_heapObjectId);
+ }
+
+ private:
+ int m_heapObjectId;
+};
+
+class HeapStatsStream final : public v8::OutputStream {
+ public:
+ explicit HeapStatsStream(protocol::HeapProfiler::Frontend* frontend)
+ : m_frontend(frontend) {}
+
+ void EndOfStream() override {}
+
+ WriteResult WriteAsciiChunk(char* data, int size) override {
+ DCHECK(false);
+ return kAbort;
+ }
+
+ WriteResult WriteHeapStatsChunk(v8::HeapStatsUpdate* updateData,
+ int count) override {
+ DCHECK_GT(count, 0);
+ std::unique_ptr<protocol::Array<int>> statsDiff =
+ protocol::Array<int>::create();
+ for (int i = 0; i < count; ++i) {
+ statsDiff->addItem(updateData[i].index);
+ statsDiff->addItem(updateData[i].count);
+ statsDiff->addItem(updateData[i].size);
+ }
+ m_frontend->heapStatsUpdate(std::move(statsDiff));
+ return kContinue;
+ }
+
+ private:
+ protocol::HeapProfiler::Frontend* m_frontend;
+};
+
+} // namespace
+
+V8HeapProfilerAgentImpl::V8HeapProfilerAgentImpl(
+ V8InspectorSessionImpl* session, protocol::FrontendChannel* frontendChannel,
+ protocol::DictionaryValue* state)
+ : m_session(session),
+ m_isolate(session->inspector()->isolate()),
+ m_frontend(frontendChannel),
+ m_state(state),
+ m_hasTimer(false) {}
+
+V8HeapProfilerAgentImpl::~V8HeapProfilerAgentImpl() {}
+
+void V8HeapProfilerAgentImpl::restore() {
+ if (m_state->booleanProperty(HeapProfilerAgentState::heapProfilerEnabled,
+ false))
+ m_frontend.resetProfiles();
+ if (m_state->booleanProperty(
+ HeapProfilerAgentState::heapObjectsTrackingEnabled, false))
+ startTrackingHeapObjectsInternal(m_state->booleanProperty(
+ HeapProfilerAgentState::allocationTrackingEnabled, false));
+ if (m_state->booleanProperty(
+ HeapProfilerAgentState::samplingHeapProfilerEnabled, false)) {
+ ErrorString error;
+ double samplingInterval = m_state->doubleProperty(
+ HeapProfilerAgentState::samplingHeapProfilerInterval, -1);
+ DCHECK_GE(samplingInterval, 0);
+ startSampling(&error, Maybe<double>(samplingInterval));
+ }
+}
+
+void V8HeapProfilerAgentImpl::collectGarbage(ErrorString*) {
+ m_isolate->LowMemoryNotification();
+}
+
+void V8HeapProfilerAgentImpl::startTrackingHeapObjects(
+ ErrorString*, const protocol::Maybe<bool>& trackAllocations) {
+ m_state->setBoolean(HeapProfilerAgentState::heapObjectsTrackingEnabled, true);
+ bool allocationTrackingEnabled = trackAllocations.fromMaybe(false);
+ m_state->setBoolean(HeapProfilerAgentState::allocationTrackingEnabled,
+ allocationTrackingEnabled);
+ startTrackingHeapObjectsInternal(allocationTrackingEnabled);
+}
+
+void V8HeapProfilerAgentImpl::stopTrackingHeapObjects(
+ ErrorString* error, const protocol::Maybe<bool>& reportProgress) {
+ requestHeapStatsUpdate();
+ takeHeapSnapshot(error, reportProgress);
+ stopTrackingHeapObjectsInternal();
+}
+
+void V8HeapProfilerAgentImpl::enable(ErrorString*) {
+ m_state->setBoolean(HeapProfilerAgentState::heapProfilerEnabled, true);
+}
+
+void V8HeapProfilerAgentImpl::disable(ErrorString* error) {
+ stopTrackingHeapObjectsInternal();
+ if (m_state->booleanProperty(
+ HeapProfilerAgentState::samplingHeapProfilerEnabled, false)) {
+ v8::HeapProfiler* profiler = m_isolate->GetHeapProfiler();
+ if (profiler) profiler->StopSamplingHeapProfiler();
+ }
+ m_isolate->GetHeapProfiler()->ClearObjectIds();
+ m_state->setBoolean(HeapProfilerAgentState::heapProfilerEnabled, false);
+}
+
+void V8HeapProfilerAgentImpl::takeHeapSnapshot(
+ ErrorString* errorString, const protocol::Maybe<bool>& reportProgress) {
+ v8::HeapProfiler* profiler = m_isolate->GetHeapProfiler();
+ if (!profiler) {
+ *errorString = "Cannot access v8 heap profiler";
+ return;
+ }
+ std::unique_ptr<HeapSnapshotProgress> progress;
+ if (reportProgress.fromMaybe(false))
+ progress = wrapUnique(new HeapSnapshotProgress(&m_frontend));
+
+ GlobalObjectNameResolver resolver(m_session);
+ const v8::HeapSnapshot* snapshot =
+ profiler->TakeHeapSnapshot(progress.get(), &resolver);
+ if (!snapshot) {
+ *errorString = "Failed to take heap snapshot";
+ return;
+ }
+ HeapSnapshotOutputStream stream(&m_frontend);
+ snapshot->Serialize(&stream);
+ const_cast<v8::HeapSnapshot*>(snapshot)->Delete();
+}
+
+void V8HeapProfilerAgentImpl::getObjectByHeapObjectId(
+ ErrorString* error, const String16& heapSnapshotObjectId,
+ const protocol::Maybe<String16>& objectGroup,
+ std::unique_ptr<protocol::Runtime::RemoteObject>* result) {
+ bool ok;
+ int id = heapSnapshotObjectId.toInteger(&ok);
+ if (!ok) {
+ *error = "Invalid heap snapshot object id";
+ return;
+ }
+
+ v8::HandleScope handles(m_isolate);
+ v8::Local<v8::Object> heapObject = objectByHeapObjectId(m_isolate, id);
+ if (heapObject.IsEmpty()) {
+ *error = "Object is not available";
+ return;
+ }
+
+ if (!m_session->inspector()->client()->isInspectableHeapObject(heapObject)) {
+ *error = "Object is not available";
+ return;
+ }
+
+ *result = m_session->wrapObject(heapObject->CreationContext(), heapObject,
+ objectGroup.fromMaybe(""), false);
+ if (!result) *error = "Object is not available";
+}
+
+void V8HeapProfilerAgentImpl::addInspectedHeapObject(
+ ErrorString* errorString, const String16& inspectedHeapObjectId) {
+ bool ok;
+ int id = inspectedHeapObjectId.toInteger(&ok);
+ if (!ok) {
+ *errorString = "Invalid heap snapshot object id";
+ return;
+ }
+
+ v8::HandleScope handles(m_isolate);
+ v8::Local<v8::Object> heapObject = objectByHeapObjectId(m_isolate, id);
+ if (heapObject.IsEmpty()) {
+ *errorString = "Object is not available";
+ return;
+ }
+
+ if (!m_session->inspector()->client()->isInspectableHeapObject(heapObject)) {
+ *errorString = "Object is not available";
+ return;
+ }
+
+ m_session->addInspectedObject(wrapUnique(new InspectableHeapObject(id)));
+}
+
+void V8HeapProfilerAgentImpl::getHeapObjectId(ErrorString* errorString,
+ const String16& objectId,
+ String16* heapSnapshotObjectId) {
+ v8::HandleScope handles(m_isolate);
+ v8::Local<v8::Value> value;
+ v8::Local<v8::Context> context;
+ if (!m_session->unwrapObject(errorString, objectId, &value, &context,
+ nullptr) ||
+ value->IsUndefined())
+ return;
+
+ v8::SnapshotObjectId id = m_isolate->GetHeapProfiler()->GetObjectId(value);
+ *heapSnapshotObjectId = String16::fromInteger(static_cast<size_t>(id));
+}
+
+void V8HeapProfilerAgentImpl::requestHeapStatsUpdate() {
+ HeapStatsStream stream(&m_frontend);
+ v8::SnapshotObjectId lastSeenObjectId =
+ m_isolate->GetHeapProfiler()->GetHeapStats(&stream);
+ m_frontend.lastSeenObjectId(
+ lastSeenObjectId, m_session->inspector()->client()->currentTimeMS());
+}
+
+// static
+void V8HeapProfilerAgentImpl::onTimer(void* data) {
+ reinterpret_cast<V8HeapProfilerAgentImpl*>(data)->requestHeapStatsUpdate();
+}
+
+void V8HeapProfilerAgentImpl::startTrackingHeapObjectsInternal(
+ bool trackAllocations) {
+ m_isolate->GetHeapProfiler()->StartTrackingHeapObjects(trackAllocations);
+ if (!m_hasTimer) {
+ m_hasTimer = true;
+ m_session->inspector()->client()->startRepeatingTimer(
+ 0.05, &V8HeapProfilerAgentImpl::onTimer, reinterpret_cast<void*>(this));
+ }
+}
+
+void V8HeapProfilerAgentImpl::stopTrackingHeapObjectsInternal() {
+ if (m_hasTimer) {
+ m_session->inspector()->client()->cancelTimer(
+ reinterpret_cast<void*>(this));
+ m_hasTimer = false;
+ }
+ m_isolate->GetHeapProfiler()->StopTrackingHeapObjects();
+ m_state->setBoolean(HeapProfilerAgentState::heapObjectsTrackingEnabled,
+ false);
+ m_state->setBoolean(HeapProfilerAgentState::allocationTrackingEnabled, false);
+}
+
+void V8HeapProfilerAgentImpl::startSampling(
+ ErrorString* errorString, const Maybe<double>& samplingInterval) {
+ v8::HeapProfiler* profiler = m_isolate->GetHeapProfiler();
+ if (!profiler) {
+ *errorString = "Cannot access v8 heap profiler";
+ return;
+ }
+ const unsigned defaultSamplingInterval = 1 << 15;
+ double samplingIntervalValue =
+ samplingInterval.fromMaybe(defaultSamplingInterval);
+ m_state->setDouble(HeapProfilerAgentState::samplingHeapProfilerInterval,
+ samplingIntervalValue);
+ m_state->setBoolean(HeapProfilerAgentState::samplingHeapProfilerEnabled,
+ true);
+ profiler->StartSamplingHeapProfiler(
+ static_cast<uint64_t>(samplingIntervalValue), 128,
+ v8::HeapProfiler::kSamplingForceGC);
+}
+
+namespace {
+std::unique_ptr<protocol::HeapProfiler::SamplingHeapProfileNode>
+buildSampingHeapProfileNode(const v8::AllocationProfile::Node* node) {
+ auto children = protocol::Array<
+ protocol::HeapProfiler::SamplingHeapProfileNode>::create();
+ for (const auto* child : node->children)
+ children->addItem(buildSampingHeapProfileNode(child));
+ size_t selfSize = 0;
+ for (const auto& allocation : node->allocations)
+ selfSize += allocation.size * allocation.count;
+ std::unique_ptr<protocol::Runtime::CallFrame> callFrame =
+ protocol::Runtime::CallFrame::create()
+ .setFunctionName(toProtocolString(node->name))
+ .setScriptId(String16::fromInteger(node->script_id))
+ .setUrl(toProtocolString(node->script_name))
+ .setLineNumber(node->line_number - 1)
+ .setColumnNumber(node->column_number - 1)
+ .build();
+ std::unique_ptr<protocol::HeapProfiler::SamplingHeapProfileNode> result =
+ protocol::HeapProfiler::SamplingHeapProfileNode::create()
+ .setCallFrame(std::move(callFrame))
+ .setSelfSize(selfSize)
+ .setChildren(std::move(children))
+ .build();
+ return result;
+}
+} // namespace
+
+void V8HeapProfilerAgentImpl::stopSampling(
+ ErrorString* errorString,
+ std::unique_ptr<protocol::HeapProfiler::SamplingHeapProfile>* profile) {
+ v8::HeapProfiler* profiler = m_isolate->GetHeapProfiler();
+ if (!profiler) {
+ *errorString = "Cannot access v8 heap profiler";
+ return;
+ }
+ v8::HandleScope scope(
+ m_isolate); // Allocation profile contains Local handles.
+ std::unique_ptr<v8::AllocationProfile> v8Profile(
+ profiler->GetAllocationProfile());
+ profiler->StopSamplingHeapProfiler();
+ m_state->setBoolean(HeapProfilerAgentState::samplingHeapProfilerEnabled,
+ false);
+ if (!v8Profile) {
+ *errorString = "Cannot access v8 sampled heap profile.";
+ return;
+ }
+ v8::AllocationProfile::Node* root = v8Profile->GetRootNode();
+ *profile = protocol::HeapProfiler::SamplingHeapProfile::create()
+ .setHead(buildSampingHeapProfileNode(root))
+ .build();
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h
new file mode 100644
index 0000000000..caa969870b
--- /dev/null
+++ b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h
@@ -0,0 +1,73 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8HEAPPROFILERAGENTIMPL_H_
+#define V8_INSPECTOR_V8HEAPPROFILERAGENTIMPL_H_
+
+#include "src/base/macros.h"
+#include "src/inspector/protocol/Forward.h"
+#include "src/inspector/protocol/HeapProfiler.h"
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+class V8InspectorSessionImpl;
+
+using protocol::ErrorString;
+using protocol::Maybe;
+
+class V8HeapProfilerAgentImpl : public protocol::HeapProfiler::Backend {
+ public:
+ V8HeapProfilerAgentImpl(V8InspectorSessionImpl*, protocol::FrontendChannel*,
+ protocol::DictionaryValue* state);
+ ~V8HeapProfilerAgentImpl() override;
+ void restore();
+
+ void collectGarbage(ErrorString*) override;
+
+ void enable(ErrorString*) override;
+ void startTrackingHeapObjects(ErrorString*,
+ const Maybe<bool>& trackAllocations) override;
+ void stopTrackingHeapObjects(ErrorString*,
+ const Maybe<bool>& reportProgress) override;
+
+ void disable(ErrorString*) override;
+
+ void takeHeapSnapshot(ErrorString*,
+ const Maybe<bool>& reportProgress) override;
+
+ void getObjectByHeapObjectId(
+ ErrorString*, const String16& heapSnapshotObjectId,
+ const Maybe<String16>& objectGroup,
+ std::unique_ptr<protocol::Runtime::RemoteObject>* result) override;
+ void addInspectedHeapObject(ErrorString*,
+ const String16& inspectedHeapObjectId) override;
+ void getHeapObjectId(ErrorString*, const String16& objectId,
+ String16* heapSnapshotObjectId) override;
+
+ void startSampling(ErrorString*,
+ const Maybe<double>& samplingInterval) override;
+ void stopSampling(
+ ErrorString*,
+ std::unique_ptr<protocol::HeapProfiler::SamplingHeapProfile>*) override;
+
+ private:
+ void startTrackingHeapObjectsInternal(bool trackAllocations);
+ void stopTrackingHeapObjectsInternal();
+ void requestHeapStatsUpdate();
+ static void onTimer(void*);
+
+ V8InspectorSessionImpl* m_session;
+ v8::Isolate* m_isolate;
+ protocol::HeapProfiler::Frontend m_frontend;
+ protocol::DictionaryValue* m_state;
+ bool m_hasTimer;
+
+ DISALLOW_COPY_AND_ASSIGN(V8HeapProfilerAgentImpl);
+};
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_V8HEAPPROFILERAGENTIMPL_H_
diff --git a/deps/v8/src/inspector/v8-injected-script-host.cc b/deps/v8/src/inspector/v8-injected-script-host.cc
new file mode 100644
index 0000000000..dc41ef8631
--- /dev/null
+++ b/deps/v8/src/inspector/v8-injected-script-host.cc
@@ -0,0 +1,216 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-injected-script-host.h"
+
+#include "src/base/macros.h"
+#include "src/inspector/injected-script-native.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-debugger.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-internal-value-type.h"
+#include "src/inspector/v8-value-copier.h"
+
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+namespace {
+
+void setFunctionProperty(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> obj, const char* name,
+ v8::FunctionCallback callback,
+ v8::Local<v8::External> external) {
+ v8::Local<v8::String> funcName =
+ toV8StringInternalized(context->GetIsolate(), name);
+ v8::Local<v8::Function> func;
+ if (!v8::Function::New(context, callback, external, 0,
+ v8::ConstructorBehavior::kThrow)
+ .ToLocal(&func))
+ return;
+ func->SetName(funcName);
+ createDataProperty(context, obj, funcName, func);
+}
+
+V8InspectorImpl* unwrapInspector(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ DCHECK(!info.Data().IsEmpty());
+ DCHECK(info.Data()->IsExternal());
+ V8InspectorImpl* inspector =
+ static_cast<V8InspectorImpl*>(info.Data().As<v8::External>()->Value());
+ DCHECK(inspector);
+ return inspector;
+}
+
+} // namespace
+
+v8::Local<v8::Object> V8InjectedScriptHost::create(
+ v8::Local<v8::Context> context, V8InspectorImpl* inspector) {
+ v8::Isolate* isolate = inspector->isolate();
+ v8::Local<v8::Object> injectedScriptHost = v8::Object::New(isolate);
+ bool success = injectedScriptHost->SetPrototype(context, v8::Null(isolate))
+ .FromMaybe(false);
+ DCHECK(success);
+ USE(success);
+ v8::Local<v8::External> debuggerExternal =
+ v8::External::New(isolate, inspector);
+ setFunctionProperty(context, injectedScriptHost, "internalConstructorName",
+ V8InjectedScriptHost::internalConstructorNameCallback,
+ debuggerExternal);
+ setFunctionProperty(
+ context, injectedScriptHost, "formatAccessorsAsProperties",
+ V8InjectedScriptHost::formatAccessorsAsProperties, debuggerExternal);
+ setFunctionProperty(context, injectedScriptHost, "subtype",
+ V8InjectedScriptHost::subtypeCallback, debuggerExternal);
+ setFunctionProperty(context, injectedScriptHost, "getInternalProperties",
+ V8InjectedScriptHost::getInternalPropertiesCallback,
+ debuggerExternal);
+ setFunctionProperty(context, injectedScriptHost, "objectHasOwnProperty",
+ V8InjectedScriptHost::objectHasOwnPropertyCallback,
+ debuggerExternal);
+ setFunctionProperty(context, injectedScriptHost, "bind",
+ V8InjectedScriptHost::bindCallback, debuggerExternal);
+ setFunctionProperty(context, injectedScriptHost, "proxyTargetValue",
+ V8InjectedScriptHost::proxyTargetValueCallback,
+ debuggerExternal);
+ return injectedScriptHost;
+}
+
+void V8InjectedScriptHost::internalConstructorNameCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ if (info.Length() < 1 || !info[0]->IsObject()) return;
+
+ v8::Local<v8::Object> object = info[0].As<v8::Object>();
+ info.GetReturnValue().Set(object->GetConstructorName());
+}
+
+void V8InjectedScriptHost::formatAccessorsAsProperties(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ DCHECK_EQ(info.Length(), 2);
+ info.GetReturnValue().Set(false);
+ if (!info[1]->IsFunction()) return;
+ // Check that function is user-defined.
+ if (info[1].As<v8::Function>()->ScriptId() != v8::UnboundScript::kNoScriptId)
+ return;
+ info.GetReturnValue().Set(
+ unwrapInspector(info)->client()->formatAccessorsAsProperties(info[0]));
+}
+
+void V8InjectedScriptHost::subtypeCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ if (info.Length() < 1) return;
+
+ v8::Isolate* isolate = info.GetIsolate();
+ v8::Local<v8::Value> value = info[0];
+ if (value->IsObject()) {
+ v8::Local<v8::Value> internalType = v8InternalValueTypeFrom(
+ isolate->GetCurrentContext(), v8::Local<v8::Object>::Cast(value));
+ if (internalType->IsString()) {
+ info.GetReturnValue().Set(internalType);
+ return;
+ }
+ }
+ if (value->IsArray() || value->IsArgumentsObject()) {
+ info.GetReturnValue().Set(toV8StringInternalized(isolate, "array"));
+ return;
+ }
+ if (value->IsTypedArray()) {
+ info.GetReturnValue().Set(toV8StringInternalized(isolate, "typedarray"));
+ return;
+ }
+ if (value->IsDate()) {
+ info.GetReturnValue().Set(toV8StringInternalized(isolate, "date"));
+ return;
+ }
+ if (value->IsRegExp()) {
+ info.GetReturnValue().Set(toV8StringInternalized(isolate, "regexp"));
+ return;
+ }
+ if (value->IsMap() || value->IsWeakMap()) {
+ info.GetReturnValue().Set(toV8StringInternalized(isolate, "map"));
+ return;
+ }
+ if (value->IsSet() || value->IsWeakSet()) {
+ info.GetReturnValue().Set(toV8StringInternalized(isolate, "set"));
+ return;
+ }
+ if (value->IsMapIterator() || value->IsSetIterator()) {
+ info.GetReturnValue().Set(toV8StringInternalized(isolate, "iterator"));
+ return;
+ }
+ if (value->IsGeneratorObject()) {
+ info.GetReturnValue().Set(toV8StringInternalized(isolate, "generator"));
+ return;
+ }
+ if (value->IsNativeError()) {
+ info.GetReturnValue().Set(toV8StringInternalized(isolate, "error"));
+ return;
+ }
+ if (value->IsProxy()) {
+ info.GetReturnValue().Set(toV8StringInternalized(isolate, "proxy"));
+ return;
+ }
+ if (value->IsPromise()) {
+ info.GetReturnValue().Set(toV8StringInternalized(isolate, "promise"));
+ return;
+ }
+ std::unique_ptr<StringBuffer> subtype =
+ unwrapInspector(info)->client()->valueSubtype(value);
+ if (subtype) {
+ info.GetReturnValue().Set(toV8String(isolate, subtype->string()));
+ return;
+ }
+}
+
+void V8InjectedScriptHost::getInternalPropertiesCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ if (info.Length() < 1) return;
+ v8::Local<v8::Array> properties;
+ if (unwrapInspector(info)
+ ->debugger()
+ ->internalProperties(info.GetIsolate()->GetCurrentContext(), info[0])
+ .ToLocal(&properties))
+ info.GetReturnValue().Set(properties);
+}
+
+void V8InjectedScriptHost::objectHasOwnPropertyCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ if (info.Length() < 2 || !info[0]->IsObject() || !info[1]->IsString()) return;
+ bool result = info[0]
+ .As<v8::Object>()
+ ->HasOwnProperty(info.GetIsolate()->GetCurrentContext(),
+ v8::Local<v8::String>::Cast(info[1]))
+ .FromMaybe(false);
+ info.GetReturnValue().Set(v8::Boolean::New(info.GetIsolate(), result));
+}
+
+void V8InjectedScriptHost::bindCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ if (info.Length() < 2 || !info[1]->IsString()) return;
+ InjectedScriptNative* injectedScriptNative =
+ InjectedScriptNative::fromInjectedScriptHost(info.GetIsolate(),
+ info.Holder());
+ if (!injectedScriptNative) return;
+
+ v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
+ v8::Local<v8::String> v8groupName =
+ info[1]->ToString(context).ToLocalChecked();
+ String16 groupName = toProtocolStringWithTypeCheck(v8groupName);
+ int id = injectedScriptNative->bind(info[0], groupName);
+ info.GetReturnValue().Set(id);
+}
+
+void V8InjectedScriptHost::proxyTargetValueCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ if (info.Length() != 1 || !info[0]->IsProxy()) {
+ UNREACHABLE();
+ return;
+ }
+ v8::Local<v8::Object> target = info[0].As<v8::Proxy>();
+ while (target->IsProxy())
+ target = v8::Local<v8::Proxy>::Cast(target)->GetTarget();
+ info.GetReturnValue().Set(target);
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-injected-script-host.h b/deps/v8/src/inspector/v8-injected-script-host.h
new file mode 100644
index 0000000000..7d293af5a7
--- /dev/null
+++ b/deps/v8/src/inspector/v8-injected-script-host.h
@@ -0,0 +1,46 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8INJECTEDSCRIPTHOST_H_
+#define V8_INSPECTOR_V8INJECTEDSCRIPTHOST_H_
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+class V8InspectorImpl;
+
+// SECURITY NOTE: Although the InjectedScriptHost is intended for use solely by
+// the inspector,
+// a reference to the InjectedScriptHost may be leaked to the page being
+// inspected. Thus, the
+// InjectedScriptHost must never implemment methods that have more power over
+// the page than the
+// page already has itself (e.g. origin restriction bypasses).
+
+class V8InjectedScriptHost {
+ public:
+ // We expect that debugger outlives any JS context and thus
+ // V8InjectedScriptHost (owned by JS)
+ // is destroyed before inspector.
+ static v8::Local<v8::Object> create(v8::Local<v8::Context>, V8InspectorImpl*);
+
+ private:
+ static void internalConstructorNameCallback(
+ const v8::FunctionCallbackInfo<v8::Value>&);
+ static void formatAccessorsAsProperties(
+ const v8::FunctionCallbackInfo<v8::Value>&);
+ static void subtypeCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void getInternalPropertiesCallback(
+ const v8::FunctionCallbackInfo<v8::Value>&);
+ static void objectHasOwnPropertyCallback(
+ const v8::FunctionCallbackInfo<v8::Value>&);
+ static void bindCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static void proxyTargetValueCallback(
+ const v8::FunctionCallbackInfo<v8::Value>&);
+};
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_V8INJECTEDSCRIPTHOST_H_
diff --git a/deps/v8/src/inspector/v8-inspector-impl.cc b/deps/v8/src/inspector/v8-inspector-impl.cc
new file mode 100644
index 0000000000..bd68548fbf
--- /dev/null
+++ b/deps/v8/src/inspector/v8-inspector-impl.cc
@@ -0,0 +1,376 @@
+/*
+ * Copyright (c) 2010-2011 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/inspector/v8-inspector-impl.h"
+
+#include "src/inspector/inspected-context.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-console-agent-impl.h"
+#include "src/inspector/v8-console-message.h"
+#include "src/inspector/v8-debugger-agent-impl.h"
+#include "src/inspector/v8-debugger.h"
+#include "src/inspector/v8-inspector-session-impl.h"
+#include "src/inspector/v8-profiler-agent-impl.h"
+#include "src/inspector/v8-runtime-agent-impl.h"
+#include "src/inspector/v8-stack-trace-impl.h"
+
+namespace v8_inspector {
+
+std::unique_ptr<V8Inspector> V8Inspector::create(v8::Isolate* isolate,
+ V8InspectorClient* client) {
+ return wrapUnique(new V8InspectorImpl(isolate, client));
+}
+
+V8InspectorImpl::V8InspectorImpl(v8::Isolate* isolate,
+ V8InspectorClient* client)
+ : m_isolate(isolate),
+ m_client(client),
+ m_debugger(new V8Debugger(isolate, this)),
+ m_capturingStackTracesCount(0),
+ m_lastExceptionId(0) {}
+
+V8InspectorImpl::~V8InspectorImpl() {}
+
+V8DebuggerAgentImpl* V8InspectorImpl::enabledDebuggerAgentForGroup(
+ int contextGroupId) {
+ V8InspectorSessionImpl* session = sessionForContextGroup(contextGroupId);
+ V8DebuggerAgentImpl* agent = session ? session->debuggerAgent() : nullptr;
+ return agent && agent->enabled() ? agent : nullptr;
+}
+
+V8RuntimeAgentImpl* V8InspectorImpl::enabledRuntimeAgentForGroup(
+ int contextGroupId) {
+ V8InspectorSessionImpl* session = sessionForContextGroup(contextGroupId);
+ V8RuntimeAgentImpl* agent = session ? session->runtimeAgent() : nullptr;
+ return agent && agent->enabled() ? agent : nullptr;
+}
+
+V8ProfilerAgentImpl* V8InspectorImpl::enabledProfilerAgentForGroup(
+ int contextGroupId) {
+ V8InspectorSessionImpl* session = sessionForContextGroup(contextGroupId);
+ V8ProfilerAgentImpl* agent = session ? session->profilerAgent() : nullptr;
+ return agent && agent->enabled() ? agent : nullptr;
+}
+
+v8::MaybeLocal<v8::Value> V8InspectorImpl::runCompiledScript(
+ v8::Local<v8::Context> context, v8::Local<v8::Script> script) {
+ v8::MicrotasksScope microtasksScope(m_isolate,
+ v8::MicrotasksScope::kRunMicrotasks);
+ int groupId = V8Debugger::getGroupId(context);
+ if (V8DebuggerAgentImpl* agent = enabledDebuggerAgentForGroup(groupId))
+ agent->willExecuteScript(script->GetUnboundScript()->GetId());
+ v8::MaybeLocal<v8::Value> result = script->Run(context);
+ // Get agent from the map again, since it could have detached during script
+ // execution.
+ if (V8DebuggerAgentImpl* agent = enabledDebuggerAgentForGroup(groupId))
+ agent->didExecuteScript();
+ return result;
+}
+
+v8::MaybeLocal<v8::Value> V8InspectorImpl::callFunction(
+ v8::Local<v8::Function> function, v8::Local<v8::Context> context,
+ v8::Local<v8::Value> receiver, int argc, v8::Local<v8::Value> info[]) {
+ v8::MicrotasksScope microtasksScope(m_isolate,
+ v8::MicrotasksScope::kRunMicrotasks);
+ int groupId = V8Debugger::getGroupId(context);
+ if (V8DebuggerAgentImpl* agent = enabledDebuggerAgentForGroup(groupId))
+ agent->willExecuteScript(function->ScriptId());
+ v8::MaybeLocal<v8::Value> result =
+ function->Call(context, receiver, argc, info);
+ // Get agent from the map again, since it could have detached during script
+ // execution.
+ if (V8DebuggerAgentImpl* agent = enabledDebuggerAgentForGroup(groupId))
+ agent->didExecuteScript();
+ return result;
+}
+
+v8::MaybeLocal<v8::Value> V8InspectorImpl::compileAndRunInternalScript(
+ v8::Local<v8::Context> context, v8::Local<v8::String> source) {
+ v8::Local<v8::Script> script =
+ compileScript(context, source, String16(), true);
+ if (script.IsEmpty()) return v8::MaybeLocal<v8::Value>();
+ v8::MicrotasksScope microtasksScope(m_isolate,
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ return script->Run(context);
+}
+
+v8::Local<v8::Script> V8InspectorImpl::compileScript(
+ v8::Local<v8::Context> context, v8::Local<v8::String> code,
+ const String16& fileName, bool markAsInternal) {
+ v8::ScriptOrigin origin(
+ toV8String(m_isolate, fileName), v8::Integer::New(m_isolate, 0),
+ v8::Integer::New(m_isolate, 0),
+ v8::False(m_isolate), // sharable
+ v8::Local<v8::Integer>(),
+ v8::Boolean::New(m_isolate, markAsInternal), // internal
+ toV8String(m_isolate, String16()), // sourceMap
+ v8::True(m_isolate)); // opaqueresource
+ v8::ScriptCompiler::Source source(code, origin);
+ v8::Local<v8::Script> script;
+ if (!v8::ScriptCompiler::Compile(context, &source,
+ v8::ScriptCompiler::kNoCompileOptions)
+ .ToLocal(&script))
+ return v8::Local<v8::Script>();
+ return script;
+}
+
+void V8InspectorImpl::enableStackCapturingIfNeeded() {
+ if (!m_capturingStackTracesCount)
+ V8StackTraceImpl::setCaptureStackTraceForUncaughtExceptions(m_isolate,
+ true);
+ ++m_capturingStackTracesCount;
+}
+
+void V8InspectorImpl::disableStackCapturingIfNeeded() {
+ if (!(--m_capturingStackTracesCount))
+ V8StackTraceImpl::setCaptureStackTraceForUncaughtExceptions(m_isolate,
+ false);
+}
+
+void V8InspectorImpl::muteExceptions(int contextGroupId) {
+ m_muteExceptionsMap[contextGroupId]++;
+}
+
+void V8InspectorImpl::unmuteExceptions(int contextGroupId) {
+ m_muteExceptionsMap[contextGroupId]--;
+}
+
+V8ConsoleMessageStorage* V8InspectorImpl::ensureConsoleMessageStorage(
+ int contextGroupId) {
+ ConsoleStorageMap::iterator storageIt =
+ m_consoleStorageMap.find(contextGroupId);
+ if (storageIt == m_consoleStorageMap.end())
+ storageIt =
+ m_consoleStorageMap
+ .insert(std::make_pair(
+ contextGroupId,
+ wrapUnique(new V8ConsoleMessageStorage(this, contextGroupId))))
+ .first;
+ return storageIt->second.get();
+}
+
+bool V8InspectorImpl::hasConsoleMessageStorage(int contextGroupId) {
+ ConsoleStorageMap::iterator storageIt =
+ m_consoleStorageMap.find(contextGroupId);
+ return storageIt != m_consoleStorageMap.end();
+}
+
+std::unique_ptr<V8StackTrace> V8InspectorImpl::createStackTrace(
+ v8::Local<v8::StackTrace> stackTrace) {
+ return m_debugger->createStackTrace(stackTrace);
+}
+
+std::unique_ptr<V8InspectorSession> V8InspectorImpl::connect(
+ int contextGroupId, V8Inspector::Channel* channel,
+ const StringView& state) {
+ DCHECK(m_sessions.find(contextGroupId) == m_sessions.cend());
+ std::unique_ptr<V8InspectorSessionImpl> session =
+ V8InspectorSessionImpl::create(this, contextGroupId, channel, state);
+ m_sessions[contextGroupId] = session.get();
+ return std::move(session);
+}
+
+void V8InspectorImpl::disconnect(V8InspectorSessionImpl* session) {
+ DCHECK(m_sessions.find(session->contextGroupId()) != m_sessions.end());
+ m_sessions.erase(session->contextGroupId());
+}
+
+InspectedContext* V8InspectorImpl::getContext(int groupId,
+ int contextId) const {
+ if (!groupId || !contextId) return nullptr;
+
+ ContextsByGroupMap::const_iterator contextGroupIt = m_contexts.find(groupId);
+ if (contextGroupIt == m_contexts.end()) return nullptr;
+
+ ContextByIdMap::iterator contextIt = contextGroupIt->second->find(contextId);
+ if (contextIt == contextGroupIt->second->end()) return nullptr;
+
+ return contextIt->second.get();
+}
+
+void V8InspectorImpl::contextCreated(const V8ContextInfo& info) {
+ int contextId = m_debugger->markContext(info);
+
+ ContextsByGroupMap::iterator contextIt = m_contexts.find(info.contextGroupId);
+ if (contextIt == m_contexts.end())
+ contextIt = m_contexts
+ .insert(std::make_pair(info.contextGroupId,
+ wrapUnique(new ContextByIdMap())))
+ .first;
+
+ const auto& contextById = contextIt->second;
+
+ DCHECK(contextById->find(contextId) == contextById->cend());
+ InspectedContext* context = new InspectedContext(this, info, contextId);
+ (*contextById)[contextId] = wrapUnique(context);
+ SessionMap::iterator sessionIt = m_sessions.find(info.contextGroupId);
+ if (sessionIt != m_sessions.end())
+ sessionIt->second->runtimeAgent()->reportExecutionContextCreated(context);
+}
+
+void V8InspectorImpl::contextDestroyed(v8::Local<v8::Context> context) {
+ int contextId = V8Debugger::contextId(context);
+ int contextGroupId = V8Debugger::getGroupId(context);
+
+ ConsoleStorageMap::iterator storageIt =
+ m_consoleStorageMap.find(contextGroupId);
+ if (storageIt != m_consoleStorageMap.end())
+ storageIt->second->contextDestroyed(contextId);
+
+ InspectedContext* inspectedContext = getContext(contextGroupId, contextId);
+ if (!inspectedContext) return;
+
+ SessionMap::iterator iter = m_sessions.find(contextGroupId);
+ if (iter != m_sessions.end())
+ iter->second->runtimeAgent()->reportExecutionContextDestroyed(
+ inspectedContext);
+ discardInspectedContext(contextGroupId, contextId);
+}
+
+void V8InspectorImpl::resetContextGroup(int contextGroupId) {
+ m_consoleStorageMap.erase(contextGroupId);
+ m_muteExceptionsMap.erase(contextGroupId);
+ SessionMap::iterator session = m_sessions.find(contextGroupId);
+ if (session != m_sessions.end()) session->second->reset();
+ m_contexts.erase(contextGroupId);
+}
+
+void V8InspectorImpl::willExecuteScript(v8::Local<v8::Context> context,
+ int scriptId) {
+ if (V8DebuggerAgentImpl* agent =
+ enabledDebuggerAgentForGroup(V8Debugger::getGroupId(context)))
+ agent->willExecuteScript(scriptId);
+}
+
+void V8InspectorImpl::didExecuteScript(v8::Local<v8::Context> context) {
+ if (V8DebuggerAgentImpl* agent =
+ enabledDebuggerAgentForGroup(V8Debugger::getGroupId(context)))
+ agent->didExecuteScript();
+}
+
+void V8InspectorImpl::idleStarted() {
+ for (auto it = m_sessions.begin(); it != m_sessions.end(); ++it) {
+ if (it->second->profilerAgent()->idleStarted()) return;
+ }
+}
+
+void V8InspectorImpl::idleFinished() {
+ for (auto it = m_sessions.begin(); it != m_sessions.end(); ++it) {
+ if (it->second->profilerAgent()->idleFinished()) return;
+ }
+}
+
+unsigned V8InspectorImpl::exceptionThrown(
+ v8::Local<v8::Context> context, const StringView& message,
+ v8::Local<v8::Value> exception, const StringView& detailedMessage,
+ const StringView& url, unsigned lineNumber, unsigned columnNumber,
+ std::unique_ptr<V8StackTrace> stackTrace, int scriptId) {
+ int contextGroupId = V8Debugger::getGroupId(context);
+ if (!contextGroupId || m_muteExceptionsMap[contextGroupId]) return 0;
+ std::unique_ptr<V8StackTraceImpl> stackTraceImpl =
+ wrapUnique(static_cast<V8StackTraceImpl*>(stackTrace.release()));
+ unsigned exceptionId = nextExceptionId();
+ std::unique_ptr<V8ConsoleMessage> consoleMessage =
+ V8ConsoleMessage::createForException(
+ m_client->currentTimeMS(), toString16(detailedMessage),
+ toString16(url), lineNumber, columnNumber, std::move(stackTraceImpl),
+ scriptId, m_isolate, toString16(message),
+ V8Debugger::contextId(context), exception, exceptionId);
+ ensureConsoleMessageStorage(contextGroupId)
+ ->addMessage(std::move(consoleMessage));
+ return exceptionId;
+}
+
+void V8InspectorImpl::exceptionRevoked(v8::Local<v8::Context> context,
+ unsigned exceptionId,
+ const StringView& message) {
+ int contextGroupId = V8Debugger::getGroupId(context);
+ if (!contextGroupId) return;
+
+ std::unique_ptr<V8ConsoleMessage> consoleMessage =
+ V8ConsoleMessage::createForRevokedException(
+ m_client->currentTimeMS(), toString16(message), exceptionId);
+ ensureConsoleMessageStorage(contextGroupId)
+ ->addMessage(std::move(consoleMessage));
+}
+
+std::unique_ptr<V8StackTrace> V8InspectorImpl::captureStackTrace(
+ bool fullStack) {
+ return m_debugger->captureStackTrace(fullStack);
+}
+
+void V8InspectorImpl::asyncTaskScheduled(const StringView& taskName, void* task,
+ bool recurring) {
+ m_debugger->asyncTaskScheduled(taskName, task, recurring);
+}
+
+void V8InspectorImpl::asyncTaskCanceled(void* task) {
+ m_debugger->asyncTaskCanceled(task);
+}
+
+void V8InspectorImpl::asyncTaskStarted(void* task) {
+ m_debugger->asyncTaskStarted(task);
+}
+
+void V8InspectorImpl::asyncTaskFinished(void* task) {
+ m_debugger->asyncTaskFinished(task);
+}
+
+void V8InspectorImpl::allAsyncTasksCanceled() {
+ m_debugger->allAsyncTasksCanceled();
+}
+
+v8::Local<v8::Context> V8InspectorImpl::regexContext() {
+ if (m_regexContext.IsEmpty())
+ m_regexContext.Reset(m_isolate, v8::Context::New(m_isolate));
+ return m_regexContext.Get(m_isolate);
+}
+
+void V8InspectorImpl::discardInspectedContext(int contextGroupId,
+ int contextId) {
+ if (!getContext(contextGroupId, contextId)) return;
+ m_contexts[contextGroupId]->erase(contextId);
+ if (m_contexts[contextGroupId]->empty()) m_contexts.erase(contextGroupId);
+}
+
+const V8InspectorImpl::ContextByIdMap* V8InspectorImpl::contextGroup(
+ int contextGroupId) {
+ ContextsByGroupMap::iterator iter = m_contexts.find(contextGroupId);
+ return iter == m_contexts.end() ? nullptr : iter->second.get();
+}
+
+V8InspectorSessionImpl* V8InspectorImpl::sessionForContextGroup(
+ int contextGroupId) {
+ if (!contextGroupId) return nullptr;
+ SessionMap::iterator iter = m_sessions.find(contextGroupId);
+ return iter == m_sessions.end() ? nullptr : iter->second;
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-inspector-impl.h b/deps/v8/src/inspector/v8-inspector-impl.h
new file mode 100644
index 0000000000..0ca1a6a729
--- /dev/null
+++ b/deps/v8/src/inspector/v8-inspector-impl.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef V8_INSPECTOR_V8INSPECTORIMPL_H_
+#define V8_INSPECTOR_V8INSPECTORIMPL_H_
+
+#include <vector>
+
+#include "src/base/macros.h"
+#include "src/inspector/protocol/Protocol.h"
+
+#include "include/v8-debug.h"
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+class InspectedContext;
+class V8ConsoleMessageStorage;
+class V8Debugger;
+class V8DebuggerAgentImpl;
+class V8InspectorSessionImpl;
+class V8ProfilerAgentImpl;
+class V8RuntimeAgentImpl;
+class V8StackTraceImpl;
+
+class V8InspectorImpl : public V8Inspector {
+ public:
+ V8InspectorImpl(v8::Isolate*, V8InspectorClient*);
+ ~V8InspectorImpl() override;
+
+ v8::Isolate* isolate() const { return m_isolate; }
+ V8InspectorClient* client() { return m_client; }
+ V8Debugger* debugger() { return m_debugger.get(); }
+
+ v8::MaybeLocal<v8::Value> runCompiledScript(v8::Local<v8::Context>,
+ v8::Local<v8::Script>);
+ v8::MaybeLocal<v8::Value> callFunction(v8::Local<v8::Function>,
+ v8::Local<v8::Context>,
+ v8::Local<v8::Value> receiver,
+ int argc, v8::Local<v8::Value> info[]);
+ v8::MaybeLocal<v8::Value> compileAndRunInternalScript(v8::Local<v8::Context>,
+ v8::Local<v8::String>);
+ v8::Local<v8::Script> compileScript(v8::Local<v8::Context>,
+ v8::Local<v8::String>,
+ const String16& fileName,
+ bool markAsInternal);
+ v8::Local<v8::Context> regexContext();
+
+ // V8Inspector implementation.
+ std::unique_ptr<V8InspectorSession> connect(int contextGroupId,
+ V8Inspector::Channel*,
+ const StringView& state) override;
+ void contextCreated(const V8ContextInfo&) override;
+ void contextDestroyed(v8::Local<v8::Context>) override;
+ void resetContextGroup(int contextGroupId) override;
+ void willExecuteScript(v8::Local<v8::Context>, int scriptId) override;
+ void didExecuteScript(v8::Local<v8::Context>) override;
+ void idleStarted() override;
+ void idleFinished() override;
+ unsigned exceptionThrown(v8::Local<v8::Context>, const StringView& message,
+ v8::Local<v8::Value> exception,
+ const StringView& detailedMessage,
+ const StringView& url, unsigned lineNumber,
+ unsigned columnNumber, std::unique_ptr<V8StackTrace>,
+ int scriptId) override;
+ void exceptionRevoked(v8::Local<v8::Context>, unsigned exceptionId,
+ const StringView& message) override;
+ std::unique_ptr<V8StackTrace> createStackTrace(
+ v8::Local<v8::StackTrace>) override;
+ std::unique_ptr<V8StackTrace> captureStackTrace(bool fullStack) override;
+ void asyncTaskScheduled(const StringView& taskName, void* task,
+ bool recurring) override;
+ void asyncTaskCanceled(void* task) override;
+ void asyncTaskStarted(void* task) override;
+ void asyncTaskFinished(void* task) override;
+ void allAsyncTasksCanceled() override;
+
+ unsigned nextExceptionId() { return ++m_lastExceptionId; }
+ void enableStackCapturingIfNeeded();
+ void disableStackCapturingIfNeeded();
+ void muteExceptions(int contextGroupId);
+ void unmuteExceptions(int contextGroupId);
+ V8ConsoleMessageStorage* ensureConsoleMessageStorage(int contextGroupId);
+ bool hasConsoleMessageStorage(int contextGroupId);
+ using ContextByIdMap =
+ protocol::HashMap<int, std::unique_ptr<InspectedContext>>;
+ void discardInspectedContext(int contextGroupId, int contextId);
+ const ContextByIdMap* contextGroup(int contextGroupId);
+ void disconnect(V8InspectorSessionImpl*);
+ V8InspectorSessionImpl* sessionForContextGroup(int contextGroupId);
+ InspectedContext* getContext(int groupId, int contextId) const;
+ V8DebuggerAgentImpl* enabledDebuggerAgentForGroup(int contextGroupId);
+ V8RuntimeAgentImpl* enabledRuntimeAgentForGroup(int contextGroupId);
+ V8ProfilerAgentImpl* enabledProfilerAgentForGroup(int contextGroupId);
+
+ private:
+ v8::Isolate* m_isolate;
+ V8InspectorClient* m_client;
+ std::unique_ptr<V8Debugger> m_debugger;
+ v8::Global<v8::Context> m_regexContext;
+ int m_capturingStackTracesCount;
+ unsigned m_lastExceptionId;
+
+ using MuteExceptionsMap = protocol::HashMap<int, int>;
+ MuteExceptionsMap m_muteExceptionsMap;
+
+ using ContextsByGroupMap =
+ protocol::HashMap<int, std::unique_ptr<ContextByIdMap>>;
+ ContextsByGroupMap m_contexts;
+
+ using SessionMap = protocol::HashMap<int, V8InspectorSessionImpl*>;
+ SessionMap m_sessions;
+
+ using ConsoleStorageMap =
+ protocol::HashMap<int, std::unique_ptr<V8ConsoleMessageStorage>>;
+ ConsoleStorageMap m_consoleStorageMap;
+
+ DISALLOW_COPY_AND_ASSIGN(V8InspectorImpl);
+};
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_V8INSPECTORIMPL_H_
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.cc b/deps/v8/src/inspector/v8-inspector-session-impl.cc
new file mode 100644
index 0000000000..c3d3f48f00
--- /dev/null
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.cc
@@ -0,0 +1,417 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-inspector-session-impl.h"
+
+#include "src/inspector/injected-script.h"
+#include "src/inspector/inspected-context.h"
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/remote-object-id.h"
+#include "src/inspector/search-util.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-console-agent-impl.h"
+#include "src/inspector/v8-debugger-agent-impl.h"
+#include "src/inspector/v8-debugger.h"
+#include "src/inspector/v8-heap-profiler-agent-impl.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-profiler-agent-impl.h"
+#include "src/inspector/v8-runtime-agent-impl.h"
+#include "src/inspector/v8-schema-agent-impl.h"
+
+namespace v8_inspector {
+
+// static
+bool V8InspectorSession::canDispatchMethod(const StringView& method) {
+ return stringViewStartsWith(method,
+ protocol::Runtime::Metainfo::commandPrefix) ||
+ stringViewStartsWith(method,
+ protocol::Debugger::Metainfo::commandPrefix) ||
+ stringViewStartsWith(method,
+ protocol::Profiler::Metainfo::commandPrefix) ||
+ stringViewStartsWith(
+ method, protocol::HeapProfiler::Metainfo::commandPrefix) ||
+ stringViewStartsWith(method,
+ protocol::Console::Metainfo::commandPrefix) ||
+ stringViewStartsWith(method,
+ protocol::Schema::Metainfo::commandPrefix);
+}
+
+std::unique_ptr<V8InspectorSessionImpl> V8InspectorSessionImpl::create(
+ V8InspectorImpl* inspector, int contextGroupId,
+ V8Inspector::Channel* channel, const StringView& state) {
+ return wrapUnique(
+ new V8InspectorSessionImpl(inspector, contextGroupId, channel, state));
+}
+
+V8InspectorSessionImpl::V8InspectorSessionImpl(V8InspectorImpl* inspector,
+ int contextGroupId,
+ V8Inspector::Channel* channel,
+ const StringView& savedState)
+ : m_contextGroupId(contextGroupId),
+ m_inspector(inspector),
+ m_channel(channel),
+ m_customObjectFormatterEnabled(false),
+ m_dispatcher(this),
+ m_state(nullptr),
+ m_runtimeAgent(nullptr),
+ m_debuggerAgent(nullptr),
+ m_heapProfilerAgent(nullptr),
+ m_profilerAgent(nullptr),
+ m_consoleAgent(nullptr),
+ m_schemaAgent(nullptr) {
+ if (savedState.length()) {
+ std::unique_ptr<protocol::Value> state =
+ protocol::parseJSON(toString16(savedState));
+ if (state) m_state = protocol::DictionaryValue::cast(std::move(state));
+ if (!m_state) m_state = protocol::DictionaryValue::create();
+ } else {
+ m_state = protocol::DictionaryValue::create();
+ }
+
+ m_runtimeAgent = wrapUnique(new V8RuntimeAgentImpl(
+ this, this, agentState(protocol::Runtime::Metainfo::domainName)));
+ protocol::Runtime::Dispatcher::wire(&m_dispatcher, m_runtimeAgent.get());
+
+ m_debuggerAgent = wrapUnique(new V8DebuggerAgentImpl(
+ this, this, agentState(protocol::Debugger::Metainfo::domainName)));
+ protocol::Debugger::Dispatcher::wire(&m_dispatcher, m_debuggerAgent.get());
+
+ m_profilerAgent = wrapUnique(new V8ProfilerAgentImpl(
+ this, this, agentState(protocol::Profiler::Metainfo::domainName)));
+ protocol::Profiler::Dispatcher::wire(&m_dispatcher, m_profilerAgent.get());
+
+ m_heapProfilerAgent = wrapUnique(new V8HeapProfilerAgentImpl(
+ this, this, agentState(protocol::HeapProfiler::Metainfo::domainName)));
+ protocol::HeapProfiler::Dispatcher::wire(&m_dispatcher,
+ m_heapProfilerAgent.get());
+
+ m_consoleAgent = wrapUnique(new V8ConsoleAgentImpl(
+ this, this, agentState(protocol::Console::Metainfo::domainName)));
+ protocol::Console::Dispatcher::wire(&m_dispatcher, m_consoleAgent.get());
+
+ m_schemaAgent = wrapUnique(new V8SchemaAgentImpl(
+ this, this, agentState(protocol::Schema::Metainfo::domainName)));
+ protocol::Schema::Dispatcher::wire(&m_dispatcher, m_schemaAgent.get());
+
+ if (savedState.length()) {
+ m_runtimeAgent->restore();
+ m_debuggerAgent->restore();
+ m_heapProfilerAgent->restore();
+ m_profilerAgent->restore();
+ m_consoleAgent->restore();
+ }
+}
+
+V8InspectorSessionImpl::~V8InspectorSessionImpl() {
+ ErrorString errorString;
+ m_consoleAgent->disable(&errorString);
+ m_profilerAgent->disable(&errorString);
+ m_heapProfilerAgent->disable(&errorString);
+ m_debuggerAgent->disable(&errorString);
+ m_runtimeAgent->disable(&errorString);
+
+ discardInjectedScripts();
+ m_inspector->disconnect(this);
+}
+
+protocol::DictionaryValue* V8InspectorSessionImpl::agentState(
+ const String16& name) {
+ protocol::DictionaryValue* state = m_state->getObject(name);
+ if (!state) {
+ std::unique_ptr<protocol::DictionaryValue> newState =
+ protocol::DictionaryValue::create();
+ state = newState.get();
+ m_state->setObject(name, std::move(newState));
+ }
+ return state;
+}
+
+void V8InspectorSessionImpl::sendProtocolResponse(int callId,
+ const String16& message) {
+ m_channel->sendProtocolResponse(callId, toStringView(message));
+}
+
+void V8InspectorSessionImpl::sendProtocolNotification(const String16& message) {
+ m_channel->sendProtocolNotification(toStringView(message));
+}
+
+void V8InspectorSessionImpl::flushProtocolNotifications() {
+ m_channel->flushProtocolNotifications();
+}
+
+void V8InspectorSessionImpl::reset() {
+ m_debuggerAgent->reset();
+ m_runtimeAgent->reset();
+ discardInjectedScripts();
+}
+
+void V8InspectorSessionImpl::discardInjectedScripts() {
+ m_inspectedObjects.clear();
+ const V8InspectorImpl::ContextByIdMap* contexts =
+ m_inspector->contextGroup(m_contextGroupId);
+ if (!contexts) return;
+
+ std::vector<int> keys;
+ keys.reserve(contexts->size());
+ for (auto& idContext : *contexts) keys.push_back(idContext.first);
+ for (auto& key : keys) {
+ contexts = m_inspector->contextGroup(m_contextGroupId);
+ if (!contexts) continue;
+ auto contextIt = contexts->find(key);
+ if (contextIt != contexts->end())
+ contextIt->second
+ ->discardInjectedScript(); // This may destroy some contexts.
+ }
+}
+
+InjectedScript* V8InspectorSessionImpl::findInjectedScript(
+ ErrorString* errorString, int contextId) {
+ if (!contextId) {
+ *errorString = "Cannot find context with specified id";
+ return nullptr;
+ }
+
+ const V8InspectorImpl::ContextByIdMap* contexts =
+ m_inspector->contextGroup(m_contextGroupId);
+ if (!contexts) {
+ *errorString = "Cannot find context with specified id";
+ return nullptr;
+ }
+
+ auto contextsIt = contexts->find(contextId);
+ if (contextsIt == contexts->end()) {
+ *errorString = "Cannot find context with specified id";
+ return nullptr;
+ }
+
+ const std::unique_ptr<InspectedContext>& context = contextsIt->second;
+ if (!context->getInjectedScript()) {
+ if (!context->createInjectedScript()) {
+ *errorString = "Cannot access specified execution context";
+ return nullptr;
+ }
+ if (m_customObjectFormatterEnabled)
+ context->getInjectedScript()->setCustomObjectFormatterEnabled(true);
+ }
+ return context->getInjectedScript();
+}
+
+InjectedScript* V8InspectorSessionImpl::findInjectedScript(
+ ErrorString* errorString, RemoteObjectIdBase* objectId) {
+ return objectId ? findInjectedScript(errorString, objectId->contextId())
+ : nullptr;
+}
+
+void V8InspectorSessionImpl::releaseObjectGroup(const StringView& objectGroup) {
+ releaseObjectGroup(toString16(objectGroup));
+}
+
+void V8InspectorSessionImpl::releaseObjectGroup(const String16& objectGroup) {
+ const V8InspectorImpl::ContextByIdMap* contexts =
+ m_inspector->contextGroup(m_contextGroupId);
+ if (!contexts) return;
+
+ std::vector<int> keys;
+ for (auto& idContext : *contexts) keys.push_back(idContext.first);
+ for (auto& key : keys) {
+ contexts = m_inspector->contextGroup(m_contextGroupId);
+ if (!contexts) continue;
+ auto contextsIt = contexts->find(key);
+ if (contextsIt == contexts->end()) continue;
+ InjectedScript* injectedScript = contextsIt->second->getInjectedScript();
+ if (injectedScript)
+ injectedScript->releaseObjectGroup(
+ objectGroup); // This may destroy some contexts.
+ }
+}
+
+bool V8InspectorSessionImpl::unwrapObject(
+ std::unique_ptr<StringBuffer>* error, const StringView& objectId,
+ v8::Local<v8::Value>* object, v8::Local<v8::Context>* context,
+ std::unique_ptr<StringBuffer>* objectGroup) {
+ ErrorString errorString;
+ String16 objectGroupString;
+ bool result =
+ unwrapObject(&errorString, toString16(objectId), object, context,
+ objectGroup ? &objectGroupString : nullptr);
+ if (error) *error = StringBufferImpl::adopt(errorString);
+ if (objectGroup) *objectGroup = StringBufferImpl::adopt(objectGroupString);
+ return result;
+}
+
+bool V8InspectorSessionImpl::unwrapObject(ErrorString* errorString,
+ const String16& objectId,
+ v8::Local<v8::Value>* object,
+ v8::Local<v8::Context>* context,
+ String16* objectGroup) {
+ std::unique_ptr<RemoteObjectId> remoteId =
+ RemoteObjectId::parse(errorString, objectId);
+ if (!remoteId) return false;
+ InjectedScript* injectedScript =
+ findInjectedScript(errorString, remoteId.get());
+ if (!injectedScript) return false;
+ if (!injectedScript->findObject(errorString, *remoteId, object)) return false;
+ *context = injectedScript->context()->context();
+ if (objectGroup) *objectGroup = injectedScript->objectGroupName(*remoteId);
+ return true;
+}
+
+std::unique_ptr<protocol::Runtime::API::RemoteObject>
+V8InspectorSessionImpl::wrapObject(v8::Local<v8::Context> context,
+ v8::Local<v8::Value> value,
+ const StringView& groupName) {
+ return wrapObject(context, value, toString16(groupName), false);
+}
+
+std::unique_ptr<protocol::Runtime::RemoteObject>
+V8InspectorSessionImpl::wrapObject(v8::Local<v8::Context> context,
+ v8::Local<v8::Value> value,
+ const String16& groupName,
+ bool generatePreview) {
+ ErrorString errorString;
+ InjectedScript* injectedScript =
+ findInjectedScript(&errorString, V8Debugger::contextId(context));
+ if (!injectedScript) return nullptr;
+ return injectedScript->wrapObject(&errorString, value, groupName, false,
+ generatePreview);
+}
+
+std::unique_ptr<protocol::Runtime::RemoteObject>
+V8InspectorSessionImpl::wrapTable(v8::Local<v8::Context> context,
+ v8::Local<v8::Value> table,
+ v8::Local<v8::Value> columns) {
+ ErrorString errorString;
+ InjectedScript* injectedScript =
+ findInjectedScript(&errorString, V8Debugger::contextId(context));
+ if (!injectedScript) return nullptr;
+ return injectedScript->wrapTable(table, columns);
+}
+
+void V8InspectorSessionImpl::setCustomObjectFormatterEnabled(bool enabled) {
+ m_customObjectFormatterEnabled = enabled;
+ const V8InspectorImpl::ContextByIdMap* contexts =
+ m_inspector->contextGroup(m_contextGroupId);
+ if (!contexts) return;
+ for (auto& idContext : *contexts) {
+ InjectedScript* injectedScript = idContext.second->getInjectedScript();
+ if (injectedScript)
+ injectedScript->setCustomObjectFormatterEnabled(enabled);
+ }
+}
+
+void V8InspectorSessionImpl::reportAllContexts(V8RuntimeAgentImpl* agent) {
+ const V8InspectorImpl::ContextByIdMap* contexts =
+ m_inspector->contextGroup(m_contextGroupId);
+ if (!contexts) return;
+ for (auto& idContext : *contexts)
+ agent->reportExecutionContextCreated(idContext.second.get());
+}
+
+void V8InspectorSessionImpl::dispatchProtocolMessage(
+ const StringView& message) {
+ m_dispatcher.dispatch(protocol::parseJSON(message));
+}
+
+std::unique_ptr<StringBuffer> V8InspectorSessionImpl::stateJSON() {
+ String16 json = m_state->toJSONString();
+ return StringBufferImpl::adopt(json);
+}
+
+std::vector<std::unique_ptr<protocol::Schema::API::Domain>>
+V8InspectorSessionImpl::supportedDomains() {
+ std::vector<std::unique_ptr<protocol::Schema::Domain>> domains =
+ supportedDomainsImpl();
+ std::vector<std::unique_ptr<protocol::Schema::API::Domain>> result;
+ for (size_t i = 0; i < domains.size(); ++i)
+ result.push_back(std::move(domains[i]));
+ return result;
+}
+
+std::vector<std::unique_ptr<protocol::Schema::Domain>>
+V8InspectorSessionImpl::supportedDomainsImpl() {
+ std::vector<std::unique_ptr<protocol::Schema::Domain>> result;
+ result.push_back(protocol::Schema::Domain::create()
+ .setName(protocol::Runtime::Metainfo::domainName)
+ .setVersion(protocol::Runtime::Metainfo::version)
+ .build());
+ result.push_back(protocol::Schema::Domain::create()
+ .setName(protocol::Debugger::Metainfo::domainName)
+ .setVersion(protocol::Debugger::Metainfo::version)
+ .build());
+ result.push_back(protocol::Schema::Domain::create()
+ .setName(protocol::Profiler::Metainfo::domainName)
+ .setVersion(protocol::Profiler::Metainfo::version)
+ .build());
+ result.push_back(protocol::Schema::Domain::create()
+ .setName(protocol::HeapProfiler::Metainfo::domainName)
+ .setVersion(protocol::HeapProfiler::Metainfo::version)
+ .build());
+ result.push_back(protocol::Schema::Domain::create()
+ .setName(protocol::Schema::Metainfo::domainName)
+ .setVersion(protocol::Schema::Metainfo::version)
+ .build());
+ return result;
+}
+
+void V8InspectorSessionImpl::addInspectedObject(
+ std::unique_ptr<V8InspectorSession::Inspectable> inspectable) {
+ m_inspectedObjects.insert(m_inspectedObjects.begin(), std::move(inspectable));
+ if (m_inspectedObjects.size() > kInspectedObjectBufferSize)
+ m_inspectedObjects.resize(kInspectedObjectBufferSize);
+}
+
+V8InspectorSession::Inspectable* V8InspectorSessionImpl::inspectedObject(
+ unsigned num) {
+ if (num >= m_inspectedObjects.size()) return nullptr;
+ return m_inspectedObjects[num].get();
+}
+
+void V8InspectorSessionImpl::schedulePauseOnNextStatement(
+ const StringView& breakReason, const StringView& breakDetails) {
+ m_debuggerAgent->schedulePauseOnNextStatement(
+ toString16(breakReason),
+ protocol::DictionaryValue::cast(protocol::parseJSON(breakDetails)));
+}
+
+void V8InspectorSessionImpl::cancelPauseOnNextStatement() {
+ m_debuggerAgent->cancelPauseOnNextStatement();
+}
+
+void V8InspectorSessionImpl::breakProgram(const StringView& breakReason,
+ const StringView& breakDetails) {
+ m_debuggerAgent->breakProgram(
+ toString16(breakReason),
+ protocol::DictionaryValue::cast(protocol::parseJSON(breakDetails)));
+}
+
+void V8InspectorSessionImpl::setSkipAllPauses(bool skip) {
+ ErrorString errorString;
+ m_debuggerAgent->setSkipAllPauses(&errorString, skip);
+}
+
+void V8InspectorSessionImpl::resume() {
+ ErrorString errorString;
+ m_debuggerAgent->resume(&errorString);
+}
+
+void V8InspectorSessionImpl::stepOver() {
+ ErrorString errorString;
+ m_debuggerAgent->stepOver(&errorString);
+}
+
+std::vector<std::unique_ptr<protocol::Debugger::API::SearchMatch>>
+V8InspectorSessionImpl::searchInTextByLines(const StringView& text,
+ const StringView& query,
+ bool caseSensitive, bool isRegex) {
+ // TODO(dgozman): search may operate on StringView and avoid copying |text|.
+ std::vector<std::unique_ptr<protocol::Debugger::SearchMatch>> matches =
+ searchInTextByLinesImpl(this, toString16(text), toString16(query),
+ caseSensitive, isRegex);
+ std::vector<std::unique_ptr<protocol::Debugger::API::SearchMatch>> result;
+ for (size_t i = 0; i < matches.size(); ++i)
+ result.push_back(std::move(matches[i]));
+ return result;
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.h b/deps/v8/src/inspector/v8-inspector-session-impl.h
new file mode 100644
index 0000000000..e84e8c99a7
--- /dev/null
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.h
@@ -0,0 +1,126 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8INSPECTORSESSIONIMPL_H_
+#define V8_INSPECTOR_V8INSPECTORSESSIONIMPL_H_
+
+#include <vector>
+
+#include "src/base/macros.h"
+#include "src/inspector/protocol/Forward.h"
+#include "src/inspector/protocol/Runtime.h"
+#include "src/inspector/protocol/Schema.h"
+
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+class InjectedScript;
+class RemoteObjectIdBase;
+class V8ConsoleAgentImpl;
+class V8DebuggerAgentImpl;
+class V8InspectorImpl;
+class V8HeapProfilerAgentImpl;
+class V8ProfilerAgentImpl;
+class V8RuntimeAgentImpl;
+class V8SchemaAgentImpl;
+
+using protocol::ErrorString;
+
+class V8InspectorSessionImpl : public V8InspectorSession,
+ public protocol::FrontendChannel {
+ public:
+ static std::unique_ptr<V8InspectorSessionImpl> create(
+ V8InspectorImpl*, int contextGroupId, V8Inspector::Channel*,
+ const StringView& state);
+ ~V8InspectorSessionImpl();
+
+ V8InspectorImpl* inspector() const { return m_inspector; }
+ V8ConsoleAgentImpl* consoleAgent() { return m_consoleAgent.get(); }
+ V8DebuggerAgentImpl* debuggerAgent() { return m_debuggerAgent.get(); }
+ V8SchemaAgentImpl* schemaAgent() { return m_schemaAgent.get(); }
+ V8ProfilerAgentImpl* profilerAgent() { return m_profilerAgent.get(); }
+ V8RuntimeAgentImpl* runtimeAgent() { return m_runtimeAgent.get(); }
+ int contextGroupId() const { return m_contextGroupId; }
+
+ InjectedScript* findInjectedScript(ErrorString*, int contextId);
+ InjectedScript* findInjectedScript(ErrorString*, RemoteObjectIdBase*);
+ void reset();
+ void discardInjectedScripts();
+ void reportAllContexts(V8RuntimeAgentImpl*);
+ void setCustomObjectFormatterEnabled(bool);
+ std::unique_ptr<protocol::Runtime::RemoteObject> wrapObject(
+ v8::Local<v8::Context>, v8::Local<v8::Value>, const String16& groupName,
+ bool generatePreview);
+ std::unique_ptr<protocol::Runtime::RemoteObject> wrapTable(
+ v8::Local<v8::Context>, v8::Local<v8::Value> table,
+ v8::Local<v8::Value> columns);
+ std::vector<std::unique_ptr<protocol::Schema::Domain>> supportedDomainsImpl();
+ bool unwrapObject(ErrorString*, const String16& objectId,
+ v8::Local<v8::Value>*, v8::Local<v8::Context>*,
+ String16* objectGroup);
+ void releaseObjectGroup(const String16& objectGroup);
+
+ // V8InspectorSession implementation.
+ void dispatchProtocolMessage(const StringView& message) override;
+ std::unique_ptr<StringBuffer> stateJSON() override;
+ std::vector<std::unique_ptr<protocol::Schema::API::Domain>> supportedDomains()
+ override;
+ void addInspectedObject(
+ std::unique_ptr<V8InspectorSession::Inspectable>) override;
+ void schedulePauseOnNextStatement(const StringView& breakReason,
+ const StringView& breakDetails) override;
+ void cancelPauseOnNextStatement() override;
+ void breakProgram(const StringView& breakReason,
+ const StringView& breakDetails) override;
+ void setSkipAllPauses(bool) override;
+ void resume() override;
+ void stepOver() override;
+ std::vector<std::unique_ptr<protocol::Debugger::API::SearchMatch>>
+ searchInTextByLines(const StringView& text, const StringView& query,
+ bool caseSensitive, bool isRegex) override;
+ void releaseObjectGroup(const StringView& objectGroup) override;
+ bool unwrapObject(std::unique_ptr<StringBuffer>*, const StringView& objectId,
+ v8::Local<v8::Value>*, v8::Local<v8::Context>*,
+ std::unique_ptr<StringBuffer>* objectGroup) override;
+ std::unique_ptr<protocol::Runtime::API::RemoteObject> wrapObject(
+ v8::Local<v8::Context>, v8::Local<v8::Value>,
+ const StringView& groupName) override;
+
+ V8InspectorSession::Inspectable* inspectedObject(unsigned num);
+ static const unsigned kInspectedObjectBufferSize = 5;
+
+ private:
+ V8InspectorSessionImpl(V8InspectorImpl*, int contextGroupId,
+ V8Inspector::Channel*, const StringView& state);
+ protocol::DictionaryValue* agentState(const String16& name);
+
+ // protocol::FrontendChannel implementation.
+ void sendProtocolResponse(int callId, const String16& message) override;
+ void sendProtocolNotification(const String16& message) override;
+ void flushProtocolNotifications() override;
+
+ int m_contextGroupId;
+ V8InspectorImpl* m_inspector;
+ V8Inspector::Channel* m_channel;
+ bool m_customObjectFormatterEnabled;
+
+ protocol::UberDispatcher m_dispatcher;
+ std::unique_ptr<protocol::DictionaryValue> m_state;
+
+ std::unique_ptr<V8RuntimeAgentImpl> m_runtimeAgent;
+ std::unique_ptr<V8DebuggerAgentImpl> m_debuggerAgent;
+ std::unique_ptr<V8HeapProfilerAgentImpl> m_heapProfilerAgent;
+ std::unique_ptr<V8ProfilerAgentImpl> m_profilerAgent;
+ std::unique_ptr<V8ConsoleAgentImpl> m_consoleAgent;
+ std::unique_ptr<V8SchemaAgentImpl> m_schemaAgent;
+ std::vector<std::unique_ptr<V8InspectorSession::Inspectable>>
+ m_inspectedObjects;
+
+ DISALLOW_COPY_AND_ASSIGN(V8InspectorSessionImpl);
+};
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_V8INSPECTORSESSIONIMPL_H_
diff --git a/deps/v8/src/inspector/v8-internal-value-type.cc b/deps/v8/src/inspector/v8-internal-value-type.cc
new file mode 100644
index 0000000000..cde8bc9f7f
--- /dev/null
+++ b/deps/v8/src/inspector/v8-internal-value-type.cc
@@ -0,0 +1,77 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-internal-value-type.h"
+
+#include "src/inspector/protocol-platform.h"
+#include "src/inspector/string-util.h"
+
+namespace v8_inspector {
+
+namespace {
+
+v8::Local<v8::Private> internalSubtypePrivate(v8::Isolate* isolate) {
+ return v8::Private::ForApi(
+ isolate,
+ toV8StringInternalized(isolate, "V8InternalType#internalSubtype"));
+}
+
+v8::Local<v8::String> subtypeForInternalType(v8::Isolate* isolate,
+ V8InternalValueType type) {
+ switch (type) {
+ case V8InternalValueType::kEntry:
+ return toV8StringInternalized(isolate, "internal#entry");
+ case V8InternalValueType::kLocation:
+ return toV8StringInternalized(isolate, "internal#location");
+ case V8InternalValueType::kScope:
+ return toV8StringInternalized(isolate, "internal#scope");
+ case V8InternalValueType::kScopeList:
+ return toV8StringInternalized(isolate, "internal#scopeList");
+ }
+ UNREACHABLE();
+ return v8::Local<v8::String>();
+}
+
+} // namespace
+
+bool markAsInternal(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> object, V8InternalValueType type) {
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::Local<v8::Private> privateValue = internalSubtypePrivate(isolate);
+ v8::Local<v8::String> subtype = subtypeForInternalType(isolate, type);
+ return object->SetPrivate(context, privateValue, subtype).FromMaybe(false);
+}
+
+bool markArrayEntriesAsInternal(v8::Local<v8::Context> context,
+ v8::Local<v8::Array> array,
+ V8InternalValueType type) {
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::Local<v8::Private> privateValue = internalSubtypePrivate(isolate);
+ v8::Local<v8::String> subtype = subtypeForInternalType(isolate, type);
+ for (uint32_t i = 0; i < array->Length(); ++i) {
+ v8::Local<v8::Value> entry;
+ if (!array->Get(context, i).ToLocal(&entry) || !entry->IsObject())
+ return false;
+ if (!entry.As<v8::Object>()
+ ->SetPrivate(context, privateValue, subtype)
+ .FromMaybe(false))
+ return false;
+ }
+ return true;
+}
+
+v8::Local<v8::Value> v8InternalValueTypeFrom(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> object) {
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::Local<v8::Private> privateValue = internalSubtypePrivate(isolate);
+ if (!object->HasPrivate(context, privateValue).FromMaybe(false))
+ return v8::Null(isolate);
+ v8::Local<v8::Value> subtypeValue;
+ if (!object->GetPrivate(context, privateValue).ToLocal(&subtypeValue) ||
+ !subtypeValue->IsString())
+ return v8::Null(isolate);
+ return subtypeValue;
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-internal-value-type.h b/deps/v8/src/inspector/v8-internal-value-type.h
new file mode 100644
index 0000000000..e648a0d4a3
--- /dev/null
+++ b/deps/v8/src/inspector/v8-internal-value-type.h
@@ -0,0 +1,23 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8INTERNALVALUETYPE_H_
+#define V8_INSPECTOR_V8INTERNALVALUETYPE_H_
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+enum class V8InternalValueType { kEntry, kLocation, kScope, kScopeList };
+
+bool markAsInternal(v8::Local<v8::Context>, v8::Local<v8::Object>,
+ V8InternalValueType);
+bool markArrayEntriesAsInternal(v8::Local<v8::Context>, v8::Local<v8::Array>,
+ V8InternalValueType);
+v8::Local<v8::Value> v8InternalValueTypeFrom(v8::Local<v8::Context>,
+ v8::Local<v8::Object>);
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_V8INTERNALVALUETYPE_H_
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
new file mode 100644
index 0000000000..0511ca39b5
--- /dev/null
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
@@ -0,0 +1,321 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-profiler-agent-impl.h"
+
+#include <vector>
+
+#include "src/base/atomicops.h"
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-debugger.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-inspector-session-impl.h"
+#include "src/inspector/v8-stack-trace-impl.h"
+
+#include "include/v8-profiler.h"
+
+namespace v8_inspector {
+
+namespace ProfilerAgentState {
+static const char samplingInterval[] = "samplingInterval";
+static const char userInitiatedProfiling[] = "userInitiatedProfiling";
+static const char profilerEnabled[] = "profilerEnabled";
+}
+
+namespace {
+
+std::unique_ptr<protocol::Array<protocol::Profiler::PositionTickInfo>>
+buildInspectorObjectForPositionTicks(const v8::CpuProfileNode* node) {
+ unsigned lineCount = node->GetHitLineCount();
+ if (!lineCount) return nullptr;
+ auto array = protocol::Array<protocol::Profiler::PositionTickInfo>::create();
+ std::vector<v8::CpuProfileNode::LineTick> entries(lineCount);
+ if (node->GetLineTicks(&entries[0], lineCount)) {
+ for (unsigned i = 0; i < lineCount; i++) {
+ std::unique_ptr<protocol::Profiler::PositionTickInfo> line =
+ protocol::Profiler::PositionTickInfo::create()
+ .setLine(entries[i].line)
+ .setTicks(entries[i].hit_count)
+ .build();
+ array->addItem(std::move(line));
+ }
+ }
+ return array;
+}
+
+std::unique_ptr<protocol::Profiler::ProfileNode> buildInspectorObjectFor(
+ v8::Isolate* isolate, const v8::CpuProfileNode* node) {
+ v8::HandleScope handleScope(isolate);
+ auto callFrame =
+ protocol::Runtime::CallFrame::create()
+ .setFunctionName(toProtocolString(node->GetFunctionName()))
+ .setScriptId(String16::fromInteger(node->GetScriptId()))
+ .setUrl(toProtocolString(node->GetScriptResourceName()))
+ .setLineNumber(node->GetLineNumber() - 1)
+ .setColumnNumber(node->GetColumnNumber() - 1)
+ .build();
+ auto result = protocol::Profiler::ProfileNode::create()
+ .setCallFrame(std::move(callFrame))
+ .setHitCount(node->GetHitCount())
+ .setId(node->GetNodeId())
+ .build();
+
+ const int childrenCount = node->GetChildrenCount();
+ if (childrenCount) {
+ auto children = protocol::Array<int>::create();
+ for (int i = 0; i < childrenCount; i++)
+ children->addItem(node->GetChild(i)->GetNodeId());
+ result->setChildren(std::move(children));
+ }
+
+ const char* deoptReason = node->GetBailoutReason();
+ if (deoptReason && deoptReason[0] && strcmp(deoptReason, "no reason"))
+ result->setDeoptReason(deoptReason);
+
+ auto positionTicks = buildInspectorObjectForPositionTicks(node);
+ if (positionTicks) result->setPositionTicks(std::move(positionTicks));
+
+ return result;
+}
+
+std::unique_ptr<protocol::Array<int>> buildInspectorObjectForSamples(
+ v8::CpuProfile* v8profile) {
+ auto array = protocol::Array<int>::create();
+ int count = v8profile->GetSamplesCount();
+ for (int i = 0; i < count; i++)
+ array->addItem(v8profile->GetSample(i)->GetNodeId());
+ return array;
+}
+
+std::unique_ptr<protocol::Array<int>> buildInspectorObjectForTimestamps(
+ v8::CpuProfile* v8profile) {
+ auto array = protocol::Array<int>::create();
+ int count = v8profile->GetSamplesCount();
+ uint64_t lastTime = v8profile->GetStartTime();
+ for (int i = 0; i < count; i++) {
+ uint64_t ts = v8profile->GetSampleTimestamp(i);
+ array->addItem(static_cast<int>(ts - lastTime));
+ lastTime = ts;
+ }
+ return array;
+}
+
+void flattenNodesTree(v8::Isolate* isolate, const v8::CpuProfileNode* node,
+ protocol::Array<protocol::Profiler::ProfileNode>* list) {
+ list->addItem(buildInspectorObjectFor(isolate, node));
+ const int childrenCount = node->GetChildrenCount();
+ for (int i = 0; i < childrenCount; i++)
+ flattenNodesTree(isolate, node->GetChild(i), list);
+}
+
+std::unique_ptr<protocol::Profiler::Profile> createCPUProfile(
+ v8::Isolate* isolate, v8::CpuProfile* v8profile) {
+ auto nodes = protocol::Array<protocol::Profiler::ProfileNode>::create();
+ flattenNodesTree(isolate, v8profile->GetTopDownRoot(), nodes.get());
+ return protocol::Profiler::Profile::create()
+ .setNodes(std::move(nodes))
+ .setStartTime(static_cast<double>(v8profile->GetStartTime()))
+ .setEndTime(static_cast<double>(v8profile->GetEndTime()))
+ .setSamples(buildInspectorObjectForSamples(v8profile))
+ .setTimeDeltas(buildInspectorObjectForTimestamps(v8profile))
+ .build();
+}
+
+std::unique_ptr<protocol::Debugger::Location> currentDebugLocation(
+ V8InspectorImpl* inspector) {
+ std::unique_ptr<V8StackTraceImpl> callStack =
+ inspector->debugger()->captureStackTrace(false /* fullStack */);
+ auto location = protocol::Debugger::Location::create()
+ .setScriptId(toString16(callStack->topScriptId()))
+ .setLineNumber(callStack->topLineNumber())
+ .build();
+ location->setColumnNumber(callStack->topColumnNumber());
+ return location;
+}
+
+volatile int s_lastProfileId = 0;
+
+} // namespace
+
+class V8ProfilerAgentImpl::ProfileDescriptor {
+ public:
+ ProfileDescriptor(const String16& id, const String16& title)
+ : m_id(id), m_title(title) {}
+ String16 m_id;
+ String16 m_title;
+};
+
+V8ProfilerAgentImpl::V8ProfilerAgentImpl(
+ V8InspectorSessionImpl* session, protocol::FrontendChannel* frontendChannel,
+ protocol::DictionaryValue* state)
+ : m_session(session),
+ m_isolate(m_session->inspector()->isolate()),
+ m_profiler(nullptr),
+ m_state(state),
+ m_frontend(frontendChannel),
+ m_enabled(false),
+ m_recordingCPUProfile(false) {}
+
+V8ProfilerAgentImpl::~V8ProfilerAgentImpl() {
+ if (m_profiler) m_profiler->Dispose();
+}
+
+void V8ProfilerAgentImpl::consoleProfile(const String16& title) {
+ if (!m_enabled) return;
+ String16 id = nextProfileId();
+ m_startedProfiles.push_back(ProfileDescriptor(id, title));
+ startProfiling(id);
+ m_frontend.consoleProfileStarted(
+ id, currentDebugLocation(m_session->inspector()), title);
+}
+
+void V8ProfilerAgentImpl::consoleProfileEnd(const String16& title) {
+ if (!m_enabled) return;
+ String16 id;
+ String16 resolvedTitle;
+ // Take last started profile if no title was passed.
+ if (title.isEmpty()) {
+ if (m_startedProfiles.empty()) return;
+ id = m_startedProfiles.back().m_id;
+ resolvedTitle = m_startedProfiles.back().m_title;
+ m_startedProfiles.pop_back();
+ } else {
+ for (size_t i = 0; i < m_startedProfiles.size(); i++) {
+ if (m_startedProfiles[i].m_title == title) {
+ resolvedTitle = title;
+ id = m_startedProfiles[i].m_id;
+ m_startedProfiles.erase(m_startedProfiles.begin() + i);
+ break;
+ }
+ }
+ if (id.isEmpty()) return;
+ }
+ std::unique_ptr<protocol::Profiler::Profile> profile =
+ stopProfiling(id, true);
+ if (!profile) return;
+ std::unique_ptr<protocol::Debugger::Location> location =
+ currentDebugLocation(m_session->inspector());
+ m_frontend.consoleProfileFinished(id, std::move(location), std::move(profile),
+ resolvedTitle);
+}
+
+void V8ProfilerAgentImpl::enable(ErrorString*) {
+ if (m_enabled) return;
+ m_enabled = true;
+ DCHECK(!m_profiler);
+ m_profiler = v8::CpuProfiler::New(m_isolate);
+ m_state->setBoolean(ProfilerAgentState::profilerEnabled, true);
+}
+
+void V8ProfilerAgentImpl::disable(ErrorString* errorString) {
+ if (!m_enabled) return;
+ for (size_t i = m_startedProfiles.size(); i > 0; --i)
+ stopProfiling(m_startedProfiles[i - 1].m_id, false);
+ m_startedProfiles.clear();
+ stop(nullptr, nullptr);
+ m_profiler->Dispose();
+ m_profiler = nullptr;
+ m_enabled = false;
+ m_state->setBoolean(ProfilerAgentState::profilerEnabled, false);
+}
+
+void V8ProfilerAgentImpl::setSamplingInterval(ErrorString* error,
+ int interval) {
+ if (m_recordingCPUProfile) {
+ *error = "Cannot change sampling interval when profiling.";
+ return;
+ }
+ m_state->setInteger(ProfilerAgentState::samplingInterval, interval);
+ m_profiler->SetSamplingInterval(interval);
+}
+
+void V8ProfilerAgentImpl::restore() {
+ DCHECK(!m_enabled);
+ if (!m_state->booleanProperty(ProfilerAgentState::profilerEnabled, false))
+ return;
+ m_enabled = true;
+ DCHECK(!m_profiler);
+ m_profiler = v8::CpuProfiler::New(m_isolate);
+ int interval = 0;
+ m_state->getInteger(ProfilerAgentState::samplingInterval, &interval);
+ if (interval) m_profiler->SetSamplingInterval(interval);
+ if (m_state->booleanProperty(ProfilerAgentState::userInitiatedProfiling,
+ false)) {
+ ErrorString error;
+ start(&error);
+ }
+}
+
+void V8ProfilerAgentImpl::start(ErrorString* error) {
+ if (m_recordingCPUProfile) return;
+ if (!m_enabled) {
+ *error = "Profiler is not enabled";
+ return;
+ }
+ m_recordingCPUProfile = true;
+ m_frontendInitiatedProfileId = nextProfileId();
+ startProfiling(m_frontendInitiatedProfileId);
+ m_state->setBoolean(ProfilerAgentState::userInitiatedProfiling, true);
+}
+
+void V8ProfilerAgentImpl::stop(
+ ErrorString* errorString,
+ std::unique_ptr<protocol::Profiler::Profile>* profile) {
+ if (!m_recordingCPUProfile) {
+ if (errorString) *errorString = "No recording profiles found";
+ return;
+ }
+ m_recordingCPUProfile = false;
+ std::unique_ptr<protocol::Profiler::Profile> cpuProfile =
+ stopProfiling(m_frontendInitiatedProfileId, !!profile);
+ if (profile) {
+ *profile = std::move(cpuProfile);
+ if (!profile->get() && errorString) *errorString = "Profile is not found";
+ }
+ m_frontendInitiatedProfileId = String16();
+ m_state->setBoolean(ProfilerAgentState::userInitiatedProfiling, false);
+}
+
+String16 V8ProfilerAgentImpl::nextProfileId() {
+ return String16::fromInteger(
+ v8::base::NoBarrier_AtomicIncrement(&s_lastProfileId, 1));
+}
+
+void V8ProfilerAgentImpl::startProfiling(const String16& title) {
+ v8::HandleScope handleScope(m_isolate);
+ m_profiler->StartProfiling(toV8String(m_isolate, title), true);
+}
+
+std::unique_ptr<protocol::Profiler::Profile> V8ProfilerAgentImpl::stopProfiling(
+ const String16& title, bool serialize) {
+ v8::HandleScope handleScope(m_isolate);
+ v8::CpuProfile* profile =
+ m_profiler->StopProfiling(toV8String(m_isolate, title));
+ if (!profile) return nullptr;
+ std::unique_ptr<protocol::Profiler::Profile> result;
+ if (serialize) result = createCPUProfile(m_isolate, profile);
+ profile->Delete();
+ return result;
+}
+
+bool V8ProfilerAgentImpl::isRecording() const {
+ return m_recordingCPUProfile || !m_startedProfiles.empty();
+}
+
+bool V8ProfilerAgentImpl::idleStarted() {
+ if (m_profiler) m_profiler->SetIdle(true);
+ return m_profiler;
+}
+
+bool V8ProfilerAgentImpl::idleFinished() {
+ if (m_profiler) m_profiler->SetIdle(false);
+ return m_profiler;
+}
+
+void V8ProfilerAgentImpl::collectSample() {
+ if (m_profiler) m_profiler->CollectSample();
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.h b/deps/v8/src/inspector/v8-profiler-agent-impl.h
new file mode 100644
index 0000000000..ee8997653a
--- /dev/null
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.h
@@ -0,0 +1,74 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8PROFILERAGENTIMPL_H_
+#define V8_INSPECTOR_V8PROFILERAGENTIMPL_H_
+
+#include <vector>
+
+#include "src/base/macros.h"
+#include "src/inspector/protocol/Forward.h"
+#include "src/inspector/protocol/Profiler.h"
+
+namespace v8 {
+class CpuProfiler;
+class Isolate;
+}
+
+namespace v8_inspector {
+
+class V8InspectorSessionImpl;
+
+using protocol::ErrorString;
+
+class V8ProfilerAgentImpl : public protocol::Profiler::Backend {
+ public:
+ V8ProfilerAgentImpl(V8InspectorSessionImpl*, protocol::FrontendChannel*,
+ protocol::DictionaryValue* state);
+ ~V8ProfilerAgentImpl() override;
+
+ bool enabled() const { return m_enabled; }
+ void restore();
+
+ void enable(ErrorString*) override;
+ void disable(ErrorString*) override;
+ void setSamplingInterval(ErrorString*, int) override;
+ void start(ErrorString*) override;
+ void stop(ErrorString*,
+ std::unique_ptr<protocol::Profiler::Profile>*) override;
+
+ void consoleProfile(const String16& title);
+ void consoleProfileEnd(const String16& title);
+
+ bool idleStarted();
+ bool idleFinished();
+
+ void collectSample();
+
+ private:
+ String16 nextProfileId();
+
+ void startProfiling(const String16& title);
+ std::unique_ptr<protocol::Profiler::Profile> stopProfiling(
+ const String16& title, bool serialize);
+
+ bool isRecording() const;
+
+ V8InspectorSessionImpl* m_session;
+ v8::Isolate* m_isolate;
+ v8::CpuProfiler* m_profiler;
+ protocol::DictionaryValue* m_state;
+ protocol::Profiler::Frontend m_frontend;
+ bool m_enabled;
+ bool m_recordingCPUProfile;
+ class ProfileDescriptor;
+ std::vector<ProfileDescriptor> m_startedProfiles;
+ String16 m_frontendInitiatedProfileId;
+
+ DISALLOW_COPY_AND_ASSIGN(V8ProfilerAgentImpl);
+};
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_V8PROFILERAGENTIMPL_H_
diff --git a/deps/v8/src/inspector/v8-regex.cc b/deps/v8/src/inspector/v8-regex.cc
new file mode 100644
index 0000000000..47af70d360
--- /dev/null
+++ b/deps/v8/src/inspector/v8-regex.cc
@@ -0,0 +1,93 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-regex.h"
+
+#include <limits.h>
+
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-inspector-impl.h"
+
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+V8Regex::V8Regex(V8InspectorImpl* inspector, const String16& pattern,
+ bool caseSensitive, bool multiline)
+ : m_inspector(inspector) {
+ v8::Isolate* isolate = m_inspector->isolate();
+ v8::HandleScope handleScope(isolate);
+ v8::Local<v8::Context> context = m_inspector->regexContext();
+ v8::Context::Scope contextScope(context);
+ v8::TryCatch tryCatch(isolate);
+
+ unsigned flags = v8::RegExp::kNone;
+ if (!caseSensitive) flags |= v8::RegExp::kIgnoreCase;
+ if (multiline) flags |= v8::RegExp::kMultiline;
+
+ v8::Local<v8::RegExp> regex;
+ if (v8::RegExp::New(context, toV8String(isolate, pattern),
+ static_cast<v8::RegExp::Flags>(flags))
+ .ToLocal(&regex))
+ m_regex.Reset(isolate, regex);
+ else if (tryCatch.HasCaught())
+ m_errorMessage = toProtocolString(tryCatch.Message()->Get());
+ else
+ m_errorMessage = "Internal error";
+}
+
+int V8Regex::match(const String16& string, int startFrom,
+ int* matchLength) const {
+ if (matchLength) *matchLength = 0;
+
+ if (m_regex.IsEmpty() || string.isEmpty()) return -1;
+
+ // v8 strings are limited to int.
+ if (string.length() > INT_MAX) return -1;
+
+ v8::Isolate* isolate = m_inspector->isolate();
+ v8::HandleScope handleScope(isolate);
+ v8::Local<v8::Context> context = m_inspector->regexContext();
+ v8::MicrotasksScope microtasks(isolate,
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+ v8::TryCatch tryCatch(isolate);
+
+ v8::Local<v8::RegExp> regex = m_regex.Get(isolate);
+ v8::Local<v8::Value> exec;
+ if (!regex->Get(context, toV8StringInternalized(isolate, "exec"))
+ .ToLocal(&exec))
+ return -1;
+ v8::Local<v8::Value> argv[] = {
+ toV8String(isolate, string.substring(startFrom))};
+ v8::Local<v8::Value> returnValue;
+ if (!exec.As<v8::Function>()
+ ->Call(context, regex, arraysize(argv), argv)
+ .ToLocal(&returnValue))
+ return -1;
+
+ // RegExp#exec returns null if there's no match, otherwise it returns an
+ // Array of strings with the first being the whole match string and others
+ // being subgroups. The Array also has some random properties tacked on like
+ // "index" which is the offset of the match.
+ //
+ // https://developer.mozilla.org/en-US/docs/JavaScript/Reference/Global_Objects/RegExp/exec
+
+ DCHECK(!returnValue.IsEmpty());
+ if (!returnValue->IsArray()) return -1;
+
+ v8::Local<v8::Array> result = returnValue.As<v8::Array>();
+ v8::Local<v8::Value> matchOffset;
+ if (!result->Get(context, toV8StringInternalized(isolate, "index"))
+ .ToLocal(&matchOffset))
+ return -1;
+ if (matchLength) {
+ v8::Local<v8::Value> match;
+ if (!result->Get(context, 0).ToLocal(&match)) return -1;
+ *matchLength = match.As<v8::String>()->Length();
+ }
+
+ return matchOffset.As<v8::Int32>()->Value() + startFrom;
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-regex.h b/deps/v8/src/inspector/v8-regex.h
new file mode 100644
index 0000000000..b4b1f8ce13
--- /dev/null
+++ b/deps/v8/src/inspector/v8-regex.h
@@ -0,0 +1,37 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8REGEX_H_
+#define V8_INSPECTOR_V8REGEX_H_
+
+#include "src/base/macros.h"
+#include "src/inspector/string-16.h"
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+class V8InspectorImpl;
+
+enum MultilineMode { MultilineDisabled, MultilineEnabled };
+
+class V8Regex {
+ public:
+ V8Regex(V8InspectorImpl*, const String16&, bool caseSensitive,
+ bool multiline = false);
+ int match(const String16&, int startFrom = 0, int* matchLength = 0) const;
+ bool isValid() const { return !m_regex.IsEmpty(); }
+ const String16& errorMessage() const { return m_errorMessage; }
+
+ private:
+ V8InspectorImpl* m_inspector;
+ v8::Global<v8::RegExp> m_regex;
+ String16 m_errorMessage;
+
+ DISALLOW_COPY_AND_ASSIGN(V8Regex);
+};
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_V8REGEX_H_
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
new file mode 100644
index 0000000000..640ec317d2
--- /dev/null
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -0,0 +1,738 @@
+/*
+ * Copyright (C) 2011 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/inspector/v8-runtime-agent-impl.h"
+
+#include "src/inspector/injected-script.h"
+#include "src/inspector/inspected-context.h"
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/remote-object-id.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-console-message.h"
+#include "src/inspector/v8-debugger-agent-impl.h"
+#include "src/inspector/v8-debugger.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-inspector-session-impl.h"
+#include "src/inspector/v8-stack-trace-impl.h"
+
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+namespace V8RuntimeAgentImplState {
+static const char customObjectFormatterEnabled[] =
+ "customObjectFormatterEnabled";
+static const char runtimeEnabled[] = "runtimeEnabled";
+};
+
+using protocol::Runtime::RemoteObject;
+
+static bool hasInternalError(ErrorString* errorString, bool hasError) {
+ if (hasError) *errorString = "Internal error";
+ return hasError;
+}
+
+namespace {
+
+template <typename Callback>
+class ProtocolPromiseHandler {
+ public:
+ static void add(V8InspectorImpl* inspector, v8::Local<v8::Context> context,
+ v8::MaybeLocal<v8::Value> value,
+ const String16& notPromiseError, int contextGroupId,
+ int executionContextId, const String16& objectGroup,
+ bool returnByValue, bool generatePreview,
+ std::unique_ptr<Callback> callback) {
+ if (value.IsEmpty()) {
+ callback->sendFailure("Internal error");
+ return;
+ }
+ if (!value.ToLocalChecked()->IsPromise()) {
+ callback->sendFailure(notPromiseError);
+ return;
+ }
+ v8::MicrotasksScope microtasks_scope(inspector->isolate(),
+ v8::MicrotasksScope::kRunMicrotasks);
+ v8::Local<v8::Promise> promise =
+ v8::Local<v8::Promise>::Cast(value.ToLocalChecked());
+ Callback* rawCallback = callback.get();
+ ProtocolPromiseHandler<Callback>* handler = new ProtocolPromiseHandler(
+ inspector, contextGroupId, executionContextId, objectGroup,
+ returnByValue, generatePreview, std::move(callback));
+ v8::Local<v8::Value> wrapper = handler->m_wrapper.Get(inspector->isolate());
+
+ v8::Local<v8::Function> thenCallbackFunction =
+ v8::Function::New(context, thenCallback, wrapper, 0,
+ v8::ConstructorBehavior::kThrow)
+ .ToLocalChecked();
+ if (promise->Then(context, thenCallbackFunction).IsEmpty()) {
+ rawCallback->sendFailure("Internal error");
+ return;
+ }
+ v8::Local<v8::Function> catchCallbackFunction =
+ v8::Function::New(context, catchCallback, wrapper, 0,
+ v8::ConstructorBehavior::kThrow)
+ .ToLocalChecked();
+ if (promise->Catch(context, catchCallbackFunction).IsEmpty()) {
+ rawCallback->sendFailure("Internal error");
+ return;
+ }
+ }
+
+ private:
+ static void thenCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ProtocolPromiseHandler<Callback>* handler =
+ static_cast<ProtocolPromiseHandler<Callback>*>(
+ info.Data().As<v8::External>()->Value());
+ DCHECK(handler);
+ v8::Local<v8::Value> value =
+ info.Length() > 0
+ ? info[0]
+ : v8::Local<v8::Value>::Cast(v8::Undefined(info.GetIsolate()));
+ std::unique_ptr<protocol::Runtime::RemoteObject> wrappedValue(
+ handler->wrapObject(value));
+ if (!wrappedValue) return;
+ handler->m_callback->sendSuccess(
+ std::move(wrappedValue), Maybe<protocol::Runtime::ExceptionDetails>());
+ }
+
+ static void catchCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ProtocolPromiseHandler<Callback>* handler =
+ static_cast<ProtocolPromiseHandler<Callback>*>(
+ info.Data().As<v8::External>()->Value());
+ DCHECK(handler);
+ v8::Local<v8::Value> value =
+ info.Length() > 0
+ ? info[0]
+ : v8::Local<v8::Value>::Cast(v8::Undefined(info.GetIsolate()));
+
+ std::unique_ptr<protocol::Runtime::RemoteObject> wrappedValue(
+ handler->wrapObject(value));
+ if (!wrappedValue) return;
+
+ std::unique_ptr<V8StackTraceImpl> stack =
+ handler->m_inspector->debugger()->captureStackTrace(true);
+ std::unique_ptr<protocol::Runtime::ExceptionDetails> exceptionDetails =
+ protocol::Runtime::ExceptionDetails::create()
+ .setExceptionId(handler->m_inspector->nextExceptionId())
+ .setText("Uncaught (in promise)")
+ .setLineNumber(stack && !stack->isEmpty() ? stack->topLineNumber()
+ : 0)
+ .setColumnNumber(
+ stack && !stack->isEmpty() ? stack->topColumnNumber() : 0)
+ .setException(wrappedValue->clone())
+ .build();
+ if (stack)
+ exceptionDetails->setStackTrace(stack->buildInspectorObjectImpl());
+ if (stack && !stack->isEmpty())
+ exceptionDetails->setScriptId(toString16(stack->topScriptId()));
+ handler->m_callback->sendSuccess(std::move(wrappedValue),
+ std::move(exceptionDetails));
+ }
+
+ ProtocolPromiseHandler(V8InspectorImpl* inspector, int contextGroupId,
+ int executionContextId, const String16& objectGroup,
+ bool returnByValue, bool generatePreview,
+ std::unique_ptr<Callback> callback)
+ : m_inspector(inspector),
+ m_contextGroupId(contextGroupId),
+ m_executionContextId(executionContextId),
+ m_objectGroup(objectGroup),
+ m_returnByValue(returnByValue),
+ m_generatePreview(generatePreview),
+ m_callback(std::move(callback)),
+ m_wrapper(inspector->isolate(),
+ v8::External::New(inspector->isolate(), this)) {
+ m_wrapper.SetWeak(this, cleanup, v8::WeakCallbackType::kParameter);
+ }
+
+ static void cleanup(
+ const v8::WeakCallbackInfo<ProtocolPromiseHandler<Callback>>& data) {
+ if (!data.GetParameter()->m_wrapper.IsEmpty()) {
+ data.GetParameter()->m_wrapper.Reset();
+ data.SetSecondPassCallback(cleanup);
+ } else {
+ data.GetParameter()->m_callback->sendFailure("Promise was collected");
+ delete data.GetParameter();
+ }
+ }
+
+ std::unique_ptr<protocol::Runtime::RemoteObject> wrapObject(
+ v8::Local<v8::Value> value) {
+ ErrorString errorString;
+ InjectedScript::ContextScope scope(&errorString, m_inspector,
+ m_contextGroupId, m_executionContextId);
+ if (!scope.initialize()) {
+ m_callback->sendFailure(errorString);
+ return nullptr;
+ }
+ std::unique_ptr<protocol::Runtime::RemoteObject> wrappedValue =
+ scope.injectedScript()->wrapObject(&errorString, value, m_objectGroup,
+ m_returnByValue, m_generatePreview);
+ if (!wrappedValue) {
+ m_callback->sendFailure(errorString);
+ return nullptr;
+ }
+ return wrappedValue;
+ }
+
+ V8InspectorImpl* m_inspector;
+ int m_contextGroupId;
+ int m_executionContextId;
+ String16 m_objectGroup;
+ bool m_returnByValue;
+ bool m_generatePreview;
+ std::unique_ptr<Callback> m_callback;
+ v8::Global<v8::External> m_wrapper;
+};
+
+template <typename Callback>
+bool wrapEvaluateResultAsync(InjectedScript* injectedScript,
+ v8::MaybeLocal<v8::Value> maybeResultValue,
+ const v8::TryCatch& tryCatch,
+ const String16& objectGroup, bool returnByValue,
+ bool generatePreview, Callback* callback) {
+ std::unique_ptr<RemoteObject> result;
+ Maybe<protocol::Runtime::ExceptionDetails> exceptionDetails;
+
+ ErrorString errorString;
+ injectedScript->wrapEvaluateResult(
+ &errorString, maybeResultValue, tryCatch, objectGroup, returnByValue,
+ generatePreview, &result, &exceptionDetails);
+ if (errorString.isEmpty()) {
+ callback->sendSuccess(std::move(result), exceptionDetails);
+ return true;
+ }
+ callback->sendFailure(errorString);
+ return false;
+}
+
+int ensureContext(ErrorString* errorString, V8InspectorImpl* inspector,
+ int contextGroupId, const Maybe<int>& executionContextId) {
+ int contextId;
+ if (executionContextId.isJust()) {
+ contextId = executionContextId.fromJust();
+ } else {
+ v8::HandleScope handles(inspector->isolate());
+ v8::Local<v8::Context> defaultContext =
+ inspector->client()->ensureDefaultContextInGroup(contextGroupId);
+ if (defaultContext.IsEmpty()) {
+ *errorString = "Cannot find default execution context";
+ return 0;
+ }
+ contextId = V8Debugger::contextId(defaultContext);
+ }
+ return contextId;
+}
+
+} // namespace
+
+V8RuntimeAgentImpl::V8RuntimeAgentImpl(
+ V8InspectorSessionImpl* session, protocol::FrontendChannel* FrontendChannel,
+ protocol::DictionaryValue* state)
+ : m_session(session),
+ m_state(state),
+ m_frontend(FrontendChannel),
+ m_inspector(session->inspector()),
+ m_enabled(false) {}
+
+V8RuntimeAgentImpl::~V8RuntimeAgentImpl() {}
+
+void V8RuntimeAgentImpl::evaluate(
+ const String16& expression, const Maybe<String16>& objectGroup,
+ const Maybe<bool>& includeCommandLineAPI, const Maybe<bool>& silent,
+ const Maybe<int>& executionContextId, const Maybe<bool>& returnByValue,
+ const Maybe<bool>& generatePreview, const Maybe<bool>& userGesture,
+ const Maybe<bool>& awaitPromise,
+ std::unique_ptr<EvaluateCallback> callback) {
+ ErrorString errorString;
+ int contextId =
+ ensureContext(&errorString, m_inspector, m_session->contextGroupId(),
+ executionContextId);
+ if (!errorString.isEmpty()) {
+ callback->sendFailure(errorString);
+ return;
+ }
+
+ InjectedScript::ContextScope scope(&errorString, m_inspector,
+ m_session->contextGroupId(), contextId);
+ if (!scope.initialize()) {
+ callback->sendFailure(errorString);
+ return;
+ }
+
+ if (silent.fromMaybe(false)) scope.ignoreExceptionsAndMuteConsole();
+ if (userGesture.fromMaybe(false)) scope.pretendUserGesture();
+
+ if (includeCommandLineAPI.fromMaybe(false) &&
+ !scope.installCommandLineAPI()) {
+ callback->sendFailure(errorString);
+ return;
+ }
+
+ bool evalIsDisabled = !scope.context()->IsCodeGenerationFromStringsAllowed();
+ // Temporarily enable allow evals for inspector.
+ if (evalIsDisabled) scope.context()->AllowCodeGenerationFromStrings(true);
+
+ v8::MaybeLocal<v8::Value> maybeResultValue;
+ v8::Local<v8::Script> script = m_inspector->compileScript(
+ scope.context(), toV8String(m_inspector->isolate(), expression),
+ String16(), false);
+ if (!script.IsEmpty())
+ maybeResultValue = m_inspector->runCompiledScript(scope.context(), script);
+
+ if (evalIsDisabled) scope.context()->AllowCodeGenerationFromStrings(false);
+
+ // Re-initialize after running client's code, as it could have destroyed
+ // context or session.
+ if (!scope.initialize()) {
+ callback->sendFailure(errorString);
+ return;
+ }
+
+ if (!awaitPromise.fromMaybe(false) || scope.tryCatch().HasCaught()) {
+ wrapEvaluateResultAsync(scope.injectedScript(), maybeResultValue,
+ scope.tryCatch(), objectGroup.fromMaybe(""),
+ returnByValue.fromMaybe(false),
+ generatePreview.fromMaybe(false), callback.get());
+ return;
+ }
+ ProtocolPromiseHandler<EvaluateCallback>::add(
+ m_inspector, scope.context(), maybeResultValue,
+ "Result of the evaluation is not a promise", m_session->contextGroupId(),
+ scope.injectedScript()->context()->contextId(), objectGroup.fromMaybe(""),
+ returnByValue.fromMaybe(false), generatePreview.fromMaybe(false),
+ std::move(callback));
+}
+
+void V8RuntimeAgentImpl::awaitPromise(
+ const String16& promiseObjectId, const Maybe<bool>& returnByValue,
+ const Maybe<bool>& generatePreview,
+ std::unique_ptr<AwaitPromiseCallback> callback) {
+ ErrorString errorString;
+ InjectedScript::ObjectScope scope(
+ &errorString, m_inspector, m_session->contextGroupId(), promiseObjectId);
+ if (!scope.initialize()) {
+ callback->sendFailure(errorString);
+ return;
+ }
+ ProtocolPromiseHandler<AwaitPromiseCallback>::add(
+ m_inspector, scope.context(), scope.object(),
+ "Could not find promise with given id", m_session->contextGroupId(),
+ scope.injectedScript()->context()->contextId(), scope.objectGroupName(),
+ returnByValue.fromMaybe(false), generatePreview.fromMaybe(false),
+ std::move(callback));
+}
+
+void V8RuntimeAgentImpl::callFunctionOn(
+ const String16& objectId, const String16& expression,
+ const Maybe<protocol::Array<protocol::Runtime::CallArgument>>&
+ optionalArguments,
+ const Maybe<bool>& silent, const Maybe<bool>& returnByValue,
+ const Maybe<bool>& generatePreview, const Maybe<bool>& userGesture,
+ const Maybe<bool>& awaitPromise,
+ std::unique_ptr<CallFunctionOnCallback> callback) {
+ ErrorString errorString;
+ InjectedScript::ObjectScope scope(&errorString, m_inspector,
+ m_session->contextGroupId(), objectId);
+ if (!scope.initialize()) {
+ callback->sendFailure(errorString);
+ return;
+ }
+
+ std::unique_ptr<v8::Local<v8::Value>[]> argv = nullptr;
+ int argc = 0;
+ if (optionalArguments.isJust()) {
+ protocol::Array<protocol::Runtime::CallArgument>* arguments =
+ optionalArguments.fromJust();
+ argc = static_cast<int>(arguments->length());
+ argv.reset(new v8::Local<v8::Value>[argc]);
+ for (int i = 0; i < argc; ++i) {
+ v8::Local<v8::Value> argumentValue;
+ if (!scope.injectedScript()
+ ->resolveCallArgument(&errorString, arguments->get(i))
+ .ToLocal(&argumentValue)) {
+ callback->sendFailure(errorString);
+ return;
+ }
+ argv[i] = argumentValue;
+ }
+ }
+
+ if (silent.fromMaybe(false)) scope.ignoreExceptionsAndMuteConsole();
+ if (userGesture.fromMaybe(false)) scope.pretendUserGesture();
+
+ v8::MaybeLocal<v8::Value> maybeFunctionValue =
+ m_inspector->compileAndRunInternalScript(
+ scope.context(),
+ toV8String(m_inspector->isolate(), "(" + expression + ")"));
+ // Re-initialize after running client's code, as it could have destroyed
+ // context or session.
+ if (!scope.initialize()) {
+ callback->sendFailure(errorString);
+ return;
+ }
+
+ if (scope.tryCatch().HasCaught()) {
+ wrapEvaluateResultAsync(scope.injectedScript(), maybeFunctionValue,
+ scope.tryCatch(), scope.objectGroupName(), false,
+ false, callback.get());
+ return;
+ }
+
+ v8::Local<v8::Value> functionValue;
+ if (!maybeFunctionValue.ToLocal(&functionValue) ||
+ !functionValue->IsFunction()) {
+ callback->sendFailure("Given expression does not evaluate to a function");
+ return;
+ }
+
+ v8::MaybeLocal<v8::Value> maybeResultValue = m_inspector->callFunction(
+ functionValue.As<v8::Function>(), scope.context(), scope.object(), argc,
+ argv.get());
+ // Re-initialize after running client's code, as it could have destroyed
+ // context or session.
+ if (!scope.initialize()) {
+ callback->sendFailure(errorString);
+ return;
+ }
+
+ if (!awaitPromise.fromMaybe(false) || scope.tryCatch().HasCaught()) {
+ wrapEvaluateResultAsync(scope.injectedScript(), maybeResultValue,
+ scope.tryCatch(), scope.objectGroupName(),
+ returnByValue.fromMaybe(false),
+ generatePreview.fromMaybe(false), callback.get());
+ return;
+ }
+
+ ProtocolPromiseHandler<CallFunctionOnCallback>::add(
+ m_inspector, scope.context(), maybeResultValue,
+ "Result of the function call is not a promise",
+ m_session->contextGroupId(),
+ scope.injectedScript()->context()->contextId(), scope.objectGroupName(),
+ returnByValue.fromMaybe(false), generatePreview.fromMaybe(false),
+ std::move(callback));
+}
+
+void V8RuntimeAgentImpl::getProperties(
+ ErrorString* errorString, const String16& objectId,
+ const Maybe<bool>& ownProperties, const Maybe<bool>& accessorPropertiesOnly,
+ const Maybe<bool>& generatePreview,
+ std::unique_ptr<protocol::Array<protocol::Runtime::PropertyDescriptor>>*
+ result,
+ Maybe<protocol::Array<protocol::Runtime::InternalPropertyDescriptor>>*
+ internalProperties,
+ Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
+ using protocol::Runtime::InternalPropertyDescriptor;
+
+ InjectedScript::ObjectScope scope(errorString, m_inspector,
+ m_session->contextGroupId(), objectId);
+ if (!scope.initialize()) return;
+
+ scope.ignoreExceptionsAndMuteConsole();
+ if (!scope.object()->IsObject()) {
+ *errorString = "Value with given id is not an object";
+ return;
+ }
+
+ v8::Local<v8::Object> object = scope.object().As<v8::Object>();
+ scope.injectedScript()->getProperties(
+ errorString, object, scope.objectGroupName(),
+ ownProperties.fromMaybe(false), accessorPropertiesOnly.fromMaybe(false),
+ generatePreview.fromMaybe(false), result, exceptionDetails);
+ if (!errorString->isEmpty() || exceptionDetails->isJust() ||
+ accessorPropertiesOnly.fromMaybe(false))
+ return;
+ v8::Local<v8::Array> propertiesArray;
+ if (hasInternalError(errorString, !m_inspector->debugger()
+ ->internalProperties(scope.context(),
+ scope.object())
+ .ToLocal(&propertiesArray)))
+ return;
+ std::unique_ptr<protocol::Array<InternalPropertyDescriptor>>
+ propertiesProtocolArray =
+ protocol::Array<InternalPropertyDescriptor>::create();
+ for (uint32_t i = 0; i < propertiesArray->Length(); i += 2) {
+ v8::Local<v8::Value> name;
+ if (hasInternalError(
+ errorString,
+ !propertiesArray->Get(scope.context(), i).ToLocal(&name)) ||
+ !name->IsString())
+ return;
+ v8::Local<v8::Value> value;
+ if (hasInternalError(
+ errorString,
+ !propertiesArray->Get(scope.context(), i + 1).ToLocal(&value)))
+ return;
+ std::unique_ptr<RemoteObject> wrappedValue =
+ scope.injectedScript()->wrapObject(errorString, value,
+ scope.objectGroupName());
+ if (!wrappedValue) return;
+ propertiesProtocolArray->addItem(
+ InternalPropertyDescriptor::create()
+ .setName(toProtocolString(name.As<v8::String>()))
+ .setValue(std::move(wrappedValue))
+ .build());
+ }
+ if (!propertiesProtocolArray->length()) return;
+ *internalProperties = std::move(propertiesProtocolArray);
+}
+
+void V8RuntimeAgentImpl::releaseObject(ErrorString* errorString,
+ const String16& objectId) {
+ InjectedScript::ObjectScope scope(errorString, m_inspector,
+ m_session->contextGroupId(), objectId);
+ if (!scope.initialize()) return;
+ scope.injectedScript()->releaseObject(objectId);
+}
+
+void V8RuntimeAgentImpl::releaseObjectGroup(ErrorString*,
+ const String16& objectGroup) {
+ m_session->releaseObjectGroup(objectGroup);
+}
+
+void V8RuntimeAgentImpl::runIfWaitingForDebugger(ErrorString* errorString) {
+ m_inspector->client()->runIfWaitingForDebugger(m_session->contextGroupId());
+}
+
+void V8RuntimeAgentImpl::setCustomObjectFormatterEnabled(ErrorString*,
+ bool enabled) {
+ m_state->setBoolean(V8RuntimeAgentImplState::customObjectFormatterEnabled,
+ enabled);
+ m_session->setCustomObjectFormatterEnabled(enabled);
+}
+
+void V8RuntimeAgentImpl::discardConsoleEntries(ErrorString*) {
+ V8ConsoleMessageStorage* storage =
+ m_inspector->ensureConsoleMessageStorage(m_session->contextGroupId());
+ storage->clear();
+}
+
+void V8RuntimeAgentImpl::compileScript(
+ ErrorString* errorString, const String16& expression,
+ const String16& sourceURL, bool persistScript,
+ const Maybe<int>& executionContextId, Maybe<String16>* scriptId,
+ Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
+ if (!m_enabled) {
+ *errorString = "Runtime agent is not enabled";
+ return;
+ }
+ int contextId =
+ ensureContext(errorString, m_inspector, m_session->contextGroupId(),
+ executionContextId);
+ if (!errorString->isEmpty()) return;
+ InjectedScript::ContextScope scope(errorString, m_inspector,
+ m_session->contextGroupId(), contextId);
+ if (!scope.initialize()) return;
+
+ if (!persistScript) m_inspector->debugger()->muteScriptParsedEvents();
+ v8::Local<v8::Script> script = m_inspector->compileScript(
+ scope.context(), toV8String(m_inspector->isolate(), expression),
+ sourceURL, false);
+ if (!persistScript) m_inspector->debugger()->unmuteScriptParsedEvents();
+ if (script.IsEmpty()) {
+ if (scope.tryCatch().HasCaught())
+ *exceptionDetails = scope.injectedScript()->createExceptionDetails(
+ errorString, scope.tryCatch(), String16(), false);
+ else
+ *errorString = "Script compilation failed";
+ return;
+ }
+
+ if (!persistScript) return;
+
+ String16 scriptValueId =
+ String16::fromInteger(script->GetUnboundScript()->GetId());
+ std::unique_ptr<v8::Global<v8::Script>> global(
+ new v8::Global<v8::Script>(m_inspector->isolate(), script));
+ m_compiledScripts[scriptValueId] = std::move(global);
+ *scriptId = scriptValueId;
+}
+
+void V8RuntimeAgentImpl::runScript(
+ const String16& scriptId, const Maybe<int>& executionContextId,
+ const Maybe<String16>& objectGroup, const Maybe<bool>& silent,
+ const Maybe<bool>& includeCommandLineAPI, const Maybe<bool>& returnByValue,
+ const Maybe<bool>& generatePreview, const Maybe<bool>& awaitPromise,
+ std::unique_ptr<RunScriptCallback> callback) {
+ if (!m_enabled) {
+ callback->sendFailure("Runtime agent is not enabled");
+ return;
+ }
+
+ auto it = m_compiledScripts.find(scriptId);
+ if (it == m_compiledScripts.end()) {
+ callback->sendFailure("No script with given id");
+ return;
+ }
+
+ ErrorString errorString;
+ int contextId =
+ ensureContext(&errorString, m_inspector, m_session->contextGroupId(),
+ executionContextId);
+ if (!errorString.isEmpty()) {
+ callback->sendFailure(errorString);
+ return;
+ }
+
+ InjectedScript::ContextScope scope(&errorString, m_inspector,
+ m_session->contextGroupId(), contextId);
+ if (!scope.initialize()) {
+ callback->sendFailure(errorString);
+ return;
+ }
+
+ if (silent.fromMaybe(false)) scope.ignoreExceptionsAndMuteConsole();
+
+ std::unique_ptr<v8::Global<v8::Script>> scriptWrapper = std::move(it->second);
+ m_compiledScripts.erase(it);
+ v8::Local<v8::Script> script = scriptWrapper->Get(m_inspector->isolate());
+ if (script.IsEmpty()) {
+ callback->sendFailure("Script execution failed");
+ return;
+ }
+
+ if (includeCommandLineAPI.fromMaybe(false) && !scope.installCommandLineAPI())
+ return;
+
+ v8::MaybeLocal<v8::Value> maybeResultValue =
+ m_inspector->runCompiledScript(scope.context(), script);
+
+ // Re-initialize after running client's code, as it could have destroyed
+ // context or session.
+ if (!scope.initialize()) return;
+
+ if (!awaitPromise.fromMaybe(false) || scope.tryCatch().HasCaught()) {
+ wrapEvaluateResultAsync(scope.injectedScript(), maybeResultValue,
+ scope.tryCatch(), objectGroup.fromMaybe(""),
+ returnByValue.fromMaybe(false),
+ generatePreview.fromMaybe(false), callback.get());
+ return;
+ }
+ ProtocolPromiseHandler<RunScriptCallback>::add(
+ m_inspector, scope.context(), maybeResultValue.ToLocalChecked(),
+ "Result of the script execution is not a promise",
+ m_session->contextGroupId(),
+ scope.injectedScript()->context()->contextId(), objectGroup.fromMaybe(""),
+ returnByValue.fromMaybe(false), generatePreview.fromMaybe(false),
+ std::move(callback));
+}
+
+void V8RuntimeAgentImpl::restore() {
+ if (!m_state->booleanProperty(V8RuntimeAgentImplState::runtimeEnabled, false))
+ return;
+ m_frontend.executionContextsCleared();
+ ErrorString error;
+ enable(&error);
+ if (m_state->booleanProperty(
+ V8RuntimeAgentImplState::customObjectFormatterEnabled, false))
+ m_session->setCustomObjectFormatterEnabled(true);
+}
+
+void V8RuntimeAgentImpl::enable(ErrorString* errorString) {
+ if (m_enabled) return;
+ m_inspector->client()->beginEnsureAllContextsInGroup(
+ m_session->contextGroupId());
+ m_enabled = true;
+ m_state->setBoolean(V8RuntimeAgentImplState::runtimeEnabled, true);
+ m_inspector->enableStackCapturingIfNeeded();
+ m_session->reportAllContexts(this);
+ V8ConsoleMessageStorage* storage =
+ m_inspector->ensureConsoleMessageStorage(m_session->contextGroupId());
+ for (const auto& message : storage->messages()) {
+ if (!reportMessage(message.get(), false)) return;
+ }
+}
+
+void V8RuntimeAgentImpl::disable(ErrorString* errorString) {
+ if (!m_enabled) return;
+ m_enabled = false;
+ m_state->setBoolean(V8RuntimeAgentImplState::runtimeEnabled, false);
+ m_inspector->disableStackCapturingIfNeeded();
+ m_session->discardInjectedScripts();
+ reset();
+ m_inspector->client()->endEnsureAllContextsInGroup(
+ m_session->contextGroupId());
+}
+
+void V8RuntimeAgentImpl::reset() {
+ m_compiledScripts.clear();
+ if (m_enabled) {
+ if (const V8InspectorImpl::ContextByIdMap* contexts =
+ m_inspector->contextGroup(m_session->contextGroupId())) {
+ for (auto& idContext : *contexts) idContext.second->setReported(false);
+ }
+ m_frontend.executionContextsCleared();
+ }
+}
+
+void V8RuntimeAgentImpl::reportExecutionContextCreated(
+ InspectedContext* context) {
+ if (!m_enabled) return;
+ context->setReported(true);
+ std::unique_ptr<protocol::Runtime::ExecutionContextDescription> description =
+ protocol::Runtime::ExecutionContextDescription::create()
+ .setId(context->contextId())
+ .setName(context->humanReadableName())
+ .setOrigin(context->origin())
+ .build();
+ if (!context->auxData().isEmpty())
+ description->setAuxData(protocol::DictionaryValue::cast(
+ protocol::parseJSON(context->auxData())));
+ m_frontend.executionContextCreated(std::move(description));
+}
+
+void V8RuntimeAgentImpl::reportExecutionContextDestroyed(
+ InspectedContext* context) {
+ if (m_enabled && context->isReported()) {
+ context->setReported(false);
+ m_frontend.executionContextDestroyed(context->contextId());
+ }
+}
+
+void V8RuntimeAgentImpl::inspect(
+ std::unique_ptr<protocol::Runtime::RemoteObject> objectToInspect,
+ std::unique_ptr<protocol::DictionaryValue> hints) {
+ if (m_enabled)
+ m_frontend.inspectRequested(std::move(objectToInspect), std::move(hints));
+}
+
+void V8RuntimeAgentImpl::messageAdded(V8ConsoleMessage* message) {
+ if (m_enabled) reportMessage(message, true);
+}
+
+bool V8RuntimeAgentImpl::reportMessage(V8ConsoleMessage* message,
+ bool generatePreview) {
+ message->reportToFrontend(&m_frontend, m_session, generatePreview);
+ m_frontend.flush();
+ return m_inspector->hasConsoleMessageStorage(m_session->contextGroupId());
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.h b/deps/v8/src/inspector/v8-runtime-agent-impl.h
new file mode 100644
index 0000000000..edeeed47ed
--- /dev/null
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2011 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef V8_INSPECTOR_V8RUNTIMEAGENTIMPL_H_
+#define V8_INSPECTOR_V8RUNTIMEAGENTIMPL_H_
+
+#include "src/base/macros.h"
+#include "src/inspector/protocol/Forward.h"
+#include "src/inspector/protocol/Runtime.h"
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+class InjectedScript;
+class InspectedContext;
+class RemoteObjectIdBase;
+class V8ConsoleMessage;
+class V8InspectorImpl;
+class V8InspectorSessionImpl;
+
+using protocol::ErrorString;
+using protocol::Maybe;
+
+class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
+ public:
+ V8RuntimeAgentImpl(V8InspectorSessionImpl*, protocol::FrontendChannel*,
+ protocol::DictionaryValue* state);
+ ~V8RuntimeAgentImpl() override;
+ void restore();
+
+ // Part of the protocol.
+ void enable(ErrorString*) override;
+ void disable(ErrorString*) override;
+ void evaluate(const String16& expression, const Maybe<String16>& objectGroup,
+ const Maybe<bool>& includeCommandLineAPI,
+ const Maybe<bool>& silent, const Maybe<int>& executionContextId,
+ const Maybe<bool>& returnByValue,
+ const Maybe<bool>& generatePreview,
+ const Maybe<bool>& userGesture, const Maybe<bool>& awaitPromise,
+ std::unique_ptr<EvaluateCallback>) override;
+ void awaitPromise(const String16& promiseObjectId,
+ const Maybe<bool>& returnByValue,
+ const Maybe<bool>& generatePreview,
+ std::unique_ptr<AwaitPromiseCallback>) override;
+ void callFunctionOn(
+ const String16& objectId, const String16& expression,
+ const Maybe<protocol::Array<protocol::Runtime::CallArgument>>&
+ optionalArguments,
+ const Maybe<bool>& silent, const Maybe<bool>& returnByValue,
+ const Maybe<bool>& generatePreview, const Maybe<bool>& userGesture,
+ const Maybe<bool>& awaitPromise,
+ std::unique_ptr<CallFunctionOnCallback>) override;
+ void releaseObject(ErrorString*, const String16& objectId) override;
+ void getProperties(
+ ErrorString*, const String16& objectId, const Maybe<bool>& ownProperties,
+ const Maybe<bool>& accessorPropertiesOnly,
+ const Maybe<bool>& generatePreview,
+ std::unique_ptr<protocol::Array<protocol::Runtime::PropertyDescriptor>>*
+ result,
+ Maybe<protocol::Array<protocol::Runtime::InternalPropertyDescriptor>>*
+ internalProperties,
+ Maybe<protocol::Runtime::ExceptionDetails>*) override;
+ void releaseObjectGroup(ErrorString*, const String16& objectGroup) override;
+ void runIfWaitingForDebugger(ErrorString*) override;
+ void setCustomObjectFormatterEnabled(ErrorString*, bool) override;
+ void discardConsoleEntries(ErrorString*) override;
+ void compileScript(ErrorString*, const String16& expression,
+ const String16& sourceURL, bool persistScript,
+ const Maybe<int>& executionContextId, Maybe<String16>*,
+ Maybe<protocol::Runtime::ExceptionDetails>*) override;
+ void runScript(const String16&, const Maybe<int>& executionContextId,
+ const Maybe<String16>& objectGroup, const Maybe<bool>& silent,
+ const Maybe<bool>& includeCommandLineAPI,
+ const Maybe<bool>& returnByValue,
+ const Maybe<bool>& generatePreview,
+ const Maybe<bool>& awaitPromise,
+ std::unique_ptr<RunScriptCallback>) override;
+
+ void reset();
+ void reportExecutionContextCreated(InspectedContext*);
+ void reportExecutionContextDestroyed(InspectedContext*);
+ void inspect(std::unique_ptr<protocol::Runtime::RemoteObject> objectToInspect,
+ std::unique_ptr<protocol::DictionaryValue> hints);
+ void messageAdded(V8ConsoleMessage*);
+ bool enabled() const { return m_enabled; }
+
+ private:
+ bool reportMessage(V8ConsoleMessage*, bool generatePreview);
+
+ V8InspectorSessionImpl* m_session;
+ protocol::DictionaryValue* m_state;
+ protocol::Runtime::Frontend m_frontend;
+ V8InspectorImpl* m_inspector;
+ bool m_enabled;
+ protocol::HashMap<String16, std::unique_ptr<v8::Global<v8::Script>>>
+ m_compiledScripts;
+
+ DISALLOW_COPY_AND_ASSIGN(V8RuntimeAgentImpl);
+};
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_V8RUNTIMEAGENTIMPL_H_
diff --git a/deps/v8/src/inspector/v8-schema-agent-impl.cc b/deps/v8/src/inspector/v8-schema-agent-impl.cc
new file mode 100644
index 0000000000..9eed5bdf81
--- /dev/null
+++ b/deps/v8/src/inspector/v8-schema-agent-impl.cc
@@ -0,0 +1,29 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-schema-agent-impl.h"
+
+#include "src/inspector/protocol/Protocol.h"
+#include "src/inspector/v8-inspector-session-impl.h"
+
+namespace v8_inspector {
+
+V8SchemaAgentImpl::V8SchemaAgentImpl(V8InspectorSessionImpl* session,
+ protocol::FrontendChannel* frontendChannel,
+ protocol::DictionaryValue* state)
+ : m_session(session), m_frontend(frontendChannel) {}
+
+V8SchemaAgentImpl::~V8SchemaAgentImpl() {}
+
+void V8SchemaAgentImpl::getDomains(
+ ErrorString*,
+ std::unique_ptr<protocol::Array<protocol::Schema::Domain>>* result) {
+ std::vector<std::unique_ptr<protocol::Schema::Domain>> domains =
+ m_session->supportedDomainsImpl();
+ *result = protocol::Array<protocol::Schema::Domain>::create();
+ for (size_t i = 0; i < domains.size(); ++i)
+ (*result)->addItem(std::move(domains[i]));
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-schema-agent-impl.h b/deps/v8/src/inspector/v8-schema-agent-impl.h
new file mode 100644
index 0000000000..6150201f8b
--- /dev/null
+++ b/deps/v8/src/inspector/v8-schema-agent-impl.h
@@ -0,0 +1,37 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8SCHEMAAGENTIMPL_H_
+#define V8_INSPECTOR_V8SCHEMAAGENTIMPL_H_
+
+#include "src/base/macros.h"
+#include "src/inspector/protocol/Forward.h"
+#include "src/inspector/protocol/Schema.h"
+
+namespace v8_inspector {
+
+class V8InspectorSessionImpl;
+
+using protocol::ErrorString;
+
+class V8SchemaAgentImpl : public protocol::Schema::Backend {
+ public:
+ V8SchemaAgentImpl(V8InspectorSessionImpl*, protocol::FrontendChannel*,
+ protocol::DictionaryValue* state);
+ ~V8SchemaAgentImpl() override;
+
+ void getDomains(
+ ErrorString*,
+ std::unique_ptr<protocol::Array<protocol::Schema::Domain>>*) override;
+
+ private:
+ V8InspectorSessionImpl* m_session;
+ protocol::Schema::Frontend m_frontend;
+
+ DISALLOW_COPY_AND_ASSIGN(V8SchemaAgentImpl);
+};
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_V8SCHEMAAGENTIMPL_H_
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.cc b/deps/v8/src/inspector/v8-stack-trace-impl.cc
new file mode 100644
index 0000000000..1a38c6dd82
--- /dev/null
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.cc
@@ -0,0 +1,281 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-stack-trace-impl.h"
+
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-debugger.h"
+#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-profiler-agent-impl.h"
+
+#include "include/v8-debug.h"
+#include "include/v8-profiler.h"
+#include "include/v8-version.h"
+
+namespace v8_inspector {
+
+namespace {
+
+static const v8::StackTrace::StackTraceOptions stackTraceOptions =
+ static_cast<v8::StackTrace::StackTraceOptions>(
+ v8::StackTrace::kLineNumber | v8::StackTrace::kColumnOffset |
+ v8::StackTrace::kScriptId | v8::StackTrace::kScriptNameOrSourceURL |
+ v8::StackTrace::kFunctionName);
+
+V8StackTraceImpl::Frame toFrame(v8::Local<v8::StackFrame> frame) {
+ String16 scriptId = String16::fromInteger(frame->GetScriptId());
+ String16 sourceName;
+ v8::Local<v8::String> sourceNameValue(frame->GetScriptNameOrSourceURL());
+ if (!sourceNameValue.IsEmpty())
+ sourceName = toProtocolString(sourceNameValue);
+
+ String16 functionName;
+ v8::Local<v8::String> functionNameValue(frame->GetFunctionName());
+ if (!functionNameValue.IsEmpty())
+ functionName = toProtocolString(functionNameValue);
+
+ int sourceLineNumber = frame->GetLineNumber();
+ int sourceColumn = frame->GetColumn();
+ return V8StackTraceImpl::Frame(functionName, scriptId, sourceName,
+ sourceLineNumber, sourceColumn);
+}
+
+void toFramesVector(v8::Local<v8::StackTrace> stackTrace,
+ std::vector<V8StackTraceImpl::Frame>& frames,
+ size_t maxStackSize, v8::Isolate* isolate) {
+ DCHECK(isolate->InContext());
+ int frameCount = stackTrace->GetFrameCount();
+ if (frameCount > static_cast<int>(maxStackSize))
+ frameCount = static_cast<int>(maxStackSize);
+ for (int i = 0; i < frameCount; i++) {
+ v8::Local<v8::StackFrame> stackFrame = stackTrace->GetFrame(i);
+ frames.push_back(toFrame(stackFrame));
+ }
+}
+
+} // namespace
+
+V8StackTraceImpl::Frame::Frame()
+ : m_functionName("undefined"),
+ m_scriptId(""),
+ m_scriptName("undefined"),
+ m_lineNumber(0),
+ m_columnNumber(0) {}
+
+V8StackTraceImpl::Frame::Frame(const String16& functionName,
+ const String16& scriptId,
+ const String16& scriptName, int lineNumber,
+ int column)
+ : m_functionName(functionName),
+ m_scriptId(scriptId),
+ m_scriptName(scriptName),
+ m_lineNumber(lineNumber),
+ m_columnNumber(column) {
+ DCHECK(m_lineNumber != v8::Message::kNoLineNumberInfo);
+ DCHECK(m_columnNumber != v8::Message::kNoColumnInfo);
+}
+
+V8StackTraceImpl::Frame::~Frame() {}
+
+// buildInspectorObject() and SourceLocation's toTracedValue() should set the
+// same fields.
+// If either of them is modified, the other should be also modified.
+std::unique_ptr<protocol::Runtime::CallFrame>
+V8StackTraceImpl::Frame::buildInspectorObject() const {
+ return protocol::Runtime::CallFrame::create()
+ .setFunctionName(m_functionName)
+ .setScriptId(m_scriptId)
+ .setUrl(m_scriptName)
+ .setLineNumber(m_lineNumber - 1)
+ .setColumnNumber(m_columnNumber - 1)
+ .build();
+}
+
+V8StackTraceImpl::Frame V8StackTraceImpl::Frame::clone() const {
+ return Frame(m_functionName, m_scriptId, m_scriptName, m_lineNumber,
+ m_columnNumber);
+}
+
+// static
+void V8StackTraceImpl::setCaptureStackTraceForUncaughtExceptions(
+ v8::Isolate* isolate, bool capture) {
+ isolate->SetCaptureStackTraceForUncaughtExceptions(
+ capture, V8StackTraceImpl::maxCallStackSizeToCapture, stackTraceOptions);
+}
+
+// static
+std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::create(
+ V8Debugger* debugger, int contextGroupId,
+ v8::Local<v8::StackTrace> stackTrace, size_t maxStackSize,
+ const String16& description) {
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope scope(isolate);
+ std::vector<V8StackTraceImpl::Frame> frames;
+ if (!stackTrace.IsEmpty())
+ toFramesVector(stackTrace, frames, maxStackSize, isolate);
+
+ int maxAsyncCallChainDepth = 1;
+ V8StackTraceImpl* asyncCallChain = nullptr;
+ if (debugger && maxStackSize > 1) {
+ asyncCallChain = debugger->currentAsyncCallChain();
+ maxAsyncCallChainDepth = debugger->maxAsyncCallChainDepth();
+ }
+ // Do not accidentally append async call chain from another group. This should
+ // not
+ // happen if we have proper instrumentation, but let's double-check to be
+ // safe.
+ if (contextGroupId && asyncCallChain && asyncCallChain->m_contextGroupId &&
+ asyncCallChain->m_contextGroupId != contextGroupId) {
+ asyncCallChain = nullptr;
+ maxAsyncCallChainDepth = 1;
+ }
+
+ // Only the top stack in the chain may be empty, so ensure that second stack
+ // is non-empty (it's the top of appended chain).
+ if (asyncCallChain && asyncCallChain->isEmpty())
+ asyncCallChain = asyncCallChain->m_parent.get();
+
+ if (stackTrace.IsEmpty() && !asyncCallChain) return nullptr;
+
+ std::unique_ptr<V8StackTraceImpl> result(new V8StackTraceImpl(
+ contextGroupId, description, frames,
+ asyncCallChain ? asyncCallChain->cloneImpl() : nullptr));
+
+ // Crop to not exceed maxAsyncCallChainDepth.
+ V8StackTraceImpl* deepest = result.get();
+ while (deepest && maxAsyncCallChainDepth) {
+ deepest = deepest->m_parent.get();
+ maxAsyncCallChainDepth--;
+ }
+ if (deepest) deepest->m_parent.reset();
+
+ return result;
+}
+
+// static
+std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::capture(
+ V8Debugger* debugger, int contextGroupId, size_t maxStackSize,
+ const String16& description) {
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope handleScope(isolate);
+ v8::Local<v8::StackTrace> stackTrace;
+ if (isolate->InContext()) {
+ if (debugger) {
+ V8InspectorImpl* inspector = debugger->inspector();
+ V8ProfilerAgentImpl* profilerAgent =
+ inspector->enabledProfilerAgentForGroup(contextGroupId);
+ if (profilerAgent) profilerAgent->collectSample();
+ }
+ stackTrace = v8::StackTrace::CurrentStackTrace(
+ isolate, static_cast<int>(maxStackSize), stackTraceOptions);
+ }
+ return V8StackTraceImpl::create(debugger, contextGroupId, stackTrace,
+ maxStackSize, description);
+}
+
+std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::cloneImpl() {
+ std::vector<Frame> framesCopy(m_frames);
+ return wrapUnique(
+ new V8StackTraceImpl(m_contextGroupId, m_description, framesCopy,
+ m_parent ? m_parent->cloneImpl() : nullptr));
+}
+
+std::unique_ptr<V8StackTrace> V8StackTraceImpl::clone() {
+ std::vector<Frame> frames;
+ for (size_t i = 0; i < m_frames.size(); i++)
+ frames.push_back(m_frames.at(i).clone());
+ return wrapUnique(
+ new V8StackTraceImpl(m_contextGroupId, m_description, frames, nullptr));
+}
+
+V8StackTraceImpl::V8StackTraceImpl(int contextGroupId,
+ const String16& description,
+ std::vector<Frame>& frames,
+ std::unique_ptr<V8StackTraceImpl> parent)
+ : m_contextGroupId(contextGroupId),
+ m_description(description),
+ m_parent(std::move(parent)) {
+ m_frames.swap(frames);
+}
+
+V8StackTraceImpl::~V8StackTraceImpl() {}
+
+StringView V8StackTraceImpl::topSourceURL() const {
+ DCHECK(m_frames.size());
+ return toStringView(m_frames[0].m_scriptName);
+}
+
+int V8StackTraceImpl::topLineNumber() const {
+ DCHECK(m_frames.size());
+ return m_frames[0].m_lineNumber;
+}
+
+int V8StackTraceImpl::topColumnNumber() const {
+ DCHECK(m_frames.size());
+ return m_frames[0].m_columnNumber;
+}
+
+StringView V8StackTraceImpl::topFunctionName() const {
+ DCHECK(m_frames.size());
+ return toStringView(m_frames[0].m_functionName);
+}
+
+StringView V8StackTraceImpl::topScriptId() const {
+ DCHECK(m_frames.size());
+ return toStringView(m_frames[0].m_scriptId);
+}
+
+std::unique_ptr<protocol::Runtime::StackTrace>
+V8StackTraceImpl::buildInspectorObjectImpl() const {
+ std::unique_ptr<protocol::Array<protocol::Runtime::CallFrame>> frames =
+ protocol::Array<protocol::Runtime::CallFrame>::create();
+ for (size_t i = 0; i < m_frames.size(); i++)
+ frames->addItem(m_frames.at(i).buildInspectorObject());
+
+ std::unique_ptr<protocol::Runtime::StackTrace> stackTrace =
+ protocol::Runtime::StackTrace::create()
+ .setCallFrames(std::move(frames))
+ .build();
+ if (!m_description.isEmpty()) stackTrace->setDescription(m_description);
+ if (m_parent) stackTrace->setParent(m_parent->buildInspectorObjectImpl());
+ return stackTrace;
+}
+
+std::unique_ptr<protocol::Runtime::StackTrace>
+V8StackTraceImpl::buildInspectorObjectForTail(V8Debugger* debugger) const {
+ v8::HandleScope handleScope(v8::Isolate::GetCurrent());
+ // Next call collapses possible empty stack and ensures
+ // maxAsyncCallChainDepth.
+ std::unique_ptr<V8StackTraceImpl> fullChain = V8StackTraceImpl::create(
+ debugger, m_contextGroupId, v8::Local<v8::StackTrace>(),
+ V8StackTraceImpl::maxCallStackSizeToCapture);
+ if (!fullChain || !fullChain->m_parent) return nullptr;
+ return fullChain->m_parent->buildInspectorObjectImpl();
+}
+
+std::unique_ptr<protocol::Runtime::API::StackTrace>
+V8StackTraceImpl::buildInspectorObject() const {
+ return buildInspectorObjectImpl();
+}
+
+std::unique_ptr<StringBuffer> V8StackTraceImpl::toString() const {
+ String16Builder stackTrace;
+ for (size_t i = 0; i < m_frames.size(); ++i) {
+ const Frame& frame = m_frames[i];
+ stackTrace.append("\n at " + (frame.functionName().length()
+ ? frame.functionName()
+ : "(anonymous function)"));
+ stackTrace.append(" (");
+ stackTrace.append(frame.sourceURL());
+ stackTrace.append(':');
+ stackTrace.append(String16::fromInteger(frame.lineNumber()));
+ stackTrace.append(':');
+ stackTrace.append(String16::fromInteger(frame.columnNumber()));
+ stackTrace.append(')');
+ }
+ String16 string = stackTrace.toString();
+ return StringBufferImpl::adopt(string);
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.h b/deps/v8/src/inspector/v8-stack-trace-impl.h
new file mode 100644
index 0000000000..f0a452e939
--- /dev/null
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.h
@@ -0,0 +1,99 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8STACKTRACEIMPL_H_
+#define V8_INSPECTOR_V8STACKTRACEIMPL_H_
+
+#include <vector>
+
+#include "src/base/macros.h"
+#include "src/inspector/protocol/Forward.h"
+#include "src/inspector/protocol/Runtime.h"
+
+#include "include/v8-inspector.h"
+
+namespace v8_inspector {
+
+class TracedValue;
+class V8Debugger;
+
+// Note: async stack trace may have empty top stack with non-empty tail to
+// indicate
+// that current native-only state had some async story.
+// On the other hand, any non-top async stack is guaranteed to be non-empty.
+class V8StackTraceImpl final : public V8StackTrace {
+ public:
+ static const size_t maxCallStackSizeToCapture = 200;
+
+ class Frame {
+ public:
+ Frame();
+ Frame(const String16& functionName, const String16& scriptId,
+ const String16& scriptName, int lineNumber, int column = 0);
+ ~Frame();
+
+ const String16& functionName() const { return m_functionName; }
+ const String16& scriptId() const { return m_scriptId; }
+ const String16& sourceURL() const { return m_scriptName; }
+ int lineNumber() const { return m_lineNumber; }
+ int columnNumber() const { return m_columnNumber; }
+ Frame clone() const;
+
+ private:
+ friend class V8StackTraceImpl;
+ std::unique_ptr<protocol::Runtime::CallFrame> buildInspectorObject() const;
+ void toTracedValue(TracedValue*) const;
+
+ String16 m_functionName;
+ String16 m_scriptId;
+ String16 m_scriptName;
+ int m_lineNumber;
+ int m_columnNumber;
+ };
+
+ static void setCaptureStackTraceForUncaughtExceptions(v8::Isolate*,
+ bool capture);
+ static std::unique_ptr<V8StackTraceImpl> create(
+ V8Debugger*, int contextGroupId, v8::Local<v8::StackTrace>,
+ size_t maxStackSize, const String16& description = String16());
+ static std::unique_ptr<V8StackTraceImpl> capture(
+ V8Debugger*, int contextGroupId, size_t maxStackSize,
+ const String16& description = String16());
+
+ // This method drops the async chain. Use cloneImpl() instead.
+ std::unique_ptr<V8StackTrace> clone() override;
+ std::unique_ptr<V8StackTraceImpl> cloneImpl();
+ std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObjectForTail(
+ V8Debugger*) const;
+ std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObjectImpl()
+ const;
+ ~V8StackTraceImpl() override;
+
+ // V8StackTrace implementation.
+ bool isEmpty() const override { return !m_frames.size(); };
+ StringView topSourceURL() const override;
+ int topLineNumber() const override;
+ int topColumnNumber() const override;
+ StringView topScriptId() const override;
+ StringView topFunctionName() const override;
+ std::unique_ptr<protocol::Runtime::API::StackTrace> buildInspectorObject()
+ const override;
+ std::unique_ptr<StringBuffer> toString() const override;
+
+ private:
+ V8StackTraceImpl(int contextGroupId, const String16& description,
+ std::vector<Frame>& frames,
+ std::unique_ptr<V8StackTraceImpl> parent);
+
+ int m_contextGroupId;
+ String16 m_description;
+ std::vector<Frame> m_frames;
+ std::unique_ptr<V8StackTraceImpl> m_parent;
+
+ DISALLOW_COPY_AND_ASSIGN(V8StackTraceImpl);
+};
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_V8STACKTRACEIMPL_H_
diff --git a/deps/v8/src/inspector/v8-value-copier.cc b/deps/v8/src/inspector/v8-value-copier.cc
new file mode 100644
index 0000000000..09d86b7b98
--- /dev/null
+++ b/deps/v8/src/inspector/v8-value-copier.cc
@@ -0,0 +1,110 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-value-copier.h"
+
+namespace v8_inspector {
+
+namespace {
+
+static int kMaxDepth = 20;
+static int kMaxCalls = 1000;
+
+class V8ValueCopier {
+ public:
+ v8::MaybeLocal<v8::Value> copy(v8::Local<v8::Value> value, int depth) {
+ if (++m_calls > kMaxCalls || depth > kMaxDepth)
+ return v8::MaybeLocal<v8::Value>();
+
+ if (value.IsEmpty()) return v8::MaybeLocal<v8::Value>();
+ if (value->IsNull() || value->IsUndefined() || value->IsBoolean() ||
+ value->IsString() || value->IsNumber())
+ return value;
+ if (!value->IsObject()) return v8::MaybeLocal<v8::Value>();
+ v8::Local<v8::Object> object = value.As<v8::Object>();
+ if (object->CreationContext() != m_from) return value;
+
+ if (object->IsArray()) {
+ v8::Local<v8::Array> array = object.As<v8::Array>();
+ v8::Local<v8::Array> result = v8::Array::New(m_isolate, array->Length());
+ if (!result->SetPrototype(m_to, v8::Null(m_isolate)).FromMaybe(false))
+ return v8::MaybeLocal<v8::Value>();
+ for (uint32_t i = 0; i < array->Length(); ++i) {
+ v8::Local<v8::Value> item;
+ if (!array->Get(m_from, i).ToLocal(&item))
+ return v8::MaybeLocal<v8::Value>();
+ v8::Local<v8::Value> copied;
+ if (!copy(item, depth + 1).ToLocal(&copied))
+ return v8::MaybeLocal<v8::Value>();
+ if (!createDataProperty(m_to, result, i, copied).FromMaybe(false))
+ return v8::MaybeLocal<v8::Value>();
+ }
+ return result;
+ }
+
+ v8::Local<v8::Object> result = v8::Object::New(m_isolate);
+ if (!result->SetPrototype(m_to, v8::Null(m_isolate)).FromMaybe(false))
+ return v8::MaybeLocal<v8::Value>();
+ v8::Local<v8::Array> properties;
+ if (!object->GetOwnPropertyNames(m_from).ToLocal(&properties))
+ return v8::MaybeLocal<v8::Value>();
+ for (uint32_t i = 0; i < properties->Length(); ++i) {
+ v8::Local<v8::Value> name;
+ if (!properties->Get(m_from, i).ToLocal(&name) || !name->IsString())
+ return v8::MaybeLocal<v8::Value>();
+ v8::Local<v8::Value> property;
+ if (!object->Get(m_from, name).ToLocal(&property))
+ return v8::MaybeLocal<v8::Value>();
+ v8::Local<v8::Value> copied;
+ if (!copy(property, depth + 1).ToLocal(&copied))
+ return v8::MaybeLocal<v8::Value>();
+ if (!createDataProperty(m_to, result, v8::Local<v8::String>::Cast(name),
+ copied)
+ .FromMaybe(false))
+ return v8::MaybeLocal<v8::Value>();
+ }
+ return result;
+ }
+
+ v8::Isolate* m_isolate;
+ v8::Local<v8::Context> m_from;
+ v8::Local<v8::Context> m_to;
+ int m_calls;
+};
+
+} // namespace
+
+v8::MaybeLocal<v8::Value> copyValueFromDebuggerContext(
+ v8::Isolate* isolate, v8::Local<v8::Context> debuggerContext,
+ v8::Local<v8::Context> toContext, v8::Local<v8::Value> value) {
+ V8ValueCopier copier;
+ copier.m_isolate = isolate;
+ copier.m_from = debuggerContext;
+ copier.m_to = toContext;
+ copier.m_calls = 0;
+ return copier.copy(value, 0);
+}
+
+v8::Maybe<bool> createDataProperty(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> object,
+ v8::Local<v8::Name> key,
+ v8::Local<v8::Value> value) {
+ v8::TryCatch tryCatch(context->GetIsolate());
+ v8::Isolate::DisallowJavascriptExecutionScope throwJs(
+ context->GetIsolate(),
+ v8::Isolate::DisallowJavascriptExecutionScope::THROW_ON_FAILURE);
+ return object->CreateDataProperty(context, key, value);
+}
+
+v8::Maybe<bool> createDataProperty(v8::Local<v8::Context> context,
+ v8::Local<v8::Array> array, int index,
+ v8::Local<v8::Value> value) {
+ v8::TryCatch tryCatch(context->GetIsolate());
+ v8::Isolate::DisallowJavascriptExecutionScope throwJs(
+ context->GetIsolate(),
+ v8::Isolate::DisallowJavascriptExecutionScope::THROW_ON_FAILURE);
+ return array->CreateDataProperty(context, index, value);
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-value-copier.h b/deps/v8/src/inspector/v8-value-copier.h
new file mode 100644
index 0000000000..c24a5648a2
--- /dev/null
+++ b/deps/v8/src/inspector/v8-value-copier.h
@@ -0,0 +1,24 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8VALUECOPIER_H_
+#define V8_INSPECTOR_V8VALUECOPIER_H_
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+v8::MaybeLocal<v8::Value> copyValueFromDebuggerContext(
+ v8::Isolate*, v8::Local<v8::Context> debuggerContext,
+ v8::Local<v8::Context> toContext, v8::Local<v8::Value>);
+v8::Maybe<bool> createDataProperty(v8::Local<v8::Context>,
+ v8::Local<v8::Object>,
+ v8::Local<v8::Name> key,
+ v8::Local<v8::Value>);
+v8::Maybe<bool> createDataProperty(v8::Local<v8::Context>, v8::Local<v8::Array>,
+ int index, v8::Local<v8::Value>);
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_V8VALUECOPIER_H_
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/interface-descriptors.cc
index a16cae7d61..2628b9fb6f 100644
--- a/deps/v8/src/interface-descriptors.cc
+++ b/deps/v8/src/interface-descriptors.cc
@@ -7,41 +7,6 @@
namespace v8 {
namespace internal {
-namespace {
-// Constructors for common combined semantic and representation types.
-Type* SmiType(Zone* zone) {
- return Type::Intersect(Type::SignedSmall(), Type::TaggedSigned(), zone);
-}
-
-
-Type* UntaggedIntegral32(Zone* zone) {
- return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone);
-}
-
-
-Type* AnyTagged(Zone* zone) {
- return Type::Intersect(
- Type::Any(),
- Type::Union(Type::TaggedPointer(), Type::TaggedSigned(), zone), zone);
-}
-
-
-Type* ExternalPointer(Zone* zone) {
- return Type::Intersect(Type::Internal(), Type::UntaggedPointer(), zone);
-}
-} // namespace
-
-FunctionType* CallInterfaceDescriptor::BuildDefaultFunctionType(
- Isolate* isolate, int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), parameter_count, zone)
- ->AsFunction();
- while (parameter_count-- != 0) {
- function->InitParameter(parameter_count, AnyTagged(zone));
- }
- return function;
-}
void CallInterfaceDescriptorData::InitializePlatformSpecific(
int register_parameter_count, const Register* registers,
@@ -56,6 +21,22 @@ void CallInterfaceDescriptorData::InitializePlatformSpecific(
}
}
+void CallInterfaceDescriptorData::InitializePlatformIndependent(
+ int parameter_count, int extra_parameter_count,
+ const MachineType* machine_types) {
+ // InterfaceDescriptor owns a copy of the MachineType array.
+ // We only care about parameters, not receiver and result.
+ param_count_ = parameter_count + extra_parameter_count;
+ machine_types_.reset(NewArray<MachineType>(param_count_));
+ for (int i = 0; i < param_count_; i++) {
+ if (machine_types == NULL || i >= parameter_count) {
+ machine_types_[i] = MachineType::AnyTagged();
+ } else {
+ machine_types_[i] = machine_types[i];
+ }
+ }
+}
+
const char* CallInterfaceDescriptor::DebugName(Isolate* isolate) const {
CallInterfaceDescriptorData* start = isolate->call_descriptor_data(0);
size_t index = data_ - start;
@@ -79,15 +60,12 @@ void VoidDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr);
}
-FunctionType*
-FastNewFunctionContextDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), 2, zone)->AsFunction();
- function->InitParameter(0, AnyTagged(zone));
- function->InitParameter(1, UntaggedIntegral32(zone));
- return function;
+void FastNewFunctionContextDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ MachineType machine_types[] = {MachineType::AnyTagged(),
+ MachineType::Int32()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
}
void FastNewFunctionContextDescriptor::InitializePlatformSpecific(
@@ -96,33 +74,28 @@ void FastNewFunctionContextDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-FunctionType* LoadDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
- ->AsFunction();
- function->InitParameter(kReceiver, AnyTagged(zone));
- function->InitParameter(kName, AnyTagged(zone));
- function->InitParameter(kSlot, SmiType(zone));
- return function;
+void LoadDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kReceiver, kName, kSlot
+ MachineType machine_types[] = {MachineType::AnyTagged(),
+ MachineType::AnyTagged(),
+ MachineType::TaggedSigned()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
}
-
void LoadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister()};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-FunctionType* LoadGlobalDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
- ->AsFunction();
- function->InitParameter(kSlot, SmiType(zone));
- return function;
+void LoadGlobalDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kSlot
+ MachineType machine_types[] = {MachineType::TaggedSigned()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
}
void LoadGlobalDescriptor::InitializePlatformSpecific(
@@ -131,16 +104,13 @@ void LoadGlobalDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-FunctionType*
-LoadGlobalWithVectorDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
- ->AsFunction();
- function->InitParameter(kSlot, SmiType(zone));
- function->InitParameter(kVector, AnyTagged(zone));
- return function;
+void LoadGlobalWithVectorDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kSlot, kVector
+ MachineType machine_types[] = {MachineType::TaggedSigned(),
+ MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
}
void LoadGlobalWithVectorDescriptor::InitializePlatformSpecific(
@@ -150,76 +120,77 @@ void LoadGlobalWithVectorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-FunctionType* StoreDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
- ->AsFunction();
- function->InitParameter(kReceiver, AnyTagged(zone));
- function->InitParameter(kName, AnyTagged(zone));
- function->InitParameter(kValue, AnyTagged(zone));
- function->InitParameter(kSlot, SmiType(zone));
- return function;
+void StoreDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kReceiver, kName, kValue, kSlot
+ MachineType machine_types[] = {
+ MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::TaggedSigned()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
}
void StoreDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
SlotRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
+ int len = arraysize(registers) - kStackArgumentsCount;
+ data->InitializePlatformSpecific(len, registers);
+}
void StoreTransitionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- MapRegister()};
-
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ Register registers[] = {
+ ReceiverRegister(), NameRegister(), MapRegister(),
+ ValueRegister(), SlotRegister(), VectorRegister(),
+ };
+ int len = arraysize(registers) - kStackArgumentsCount;
+ data->InitializePlatformSpecific(len, registers);
}
-
-void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
+void StoreTransitionDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
- if (SlotRegister().is(no_reg)) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- MapRegister(), VectorRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
- } else {
- Register registers[] = {ReceiverRegister(), NameRegister(),
- ValueRegister(), MapRegister(),
- SlotRegister(), VectorRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
- }
+ // kReceiver, kName, kMap, kValue, kSlot, kVector
+ MachineType machine_types[] = {
+ MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::TaggedSigned(), MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
}
-FunctionType*
-StoreTransitionDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
- ->AsFunction();
- function->InitParameter(kReceiver, AnyTagged(zone));
- function->InitParameter(kName, AnyTagged(zone));
- function->InitParameter(kValue, AnyTagged(zone));
- function->InitParameter(kMap, AnyTagged(zone));
- return function;
+void StoreNamedTransitionDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kReceiver, kFieldOffset, kMap, kValue, kSlot, kVector, kName
+ MachineType machine_types[] = {
+ MachineType::AnyTagged(), MachineType::TaggedSigned(),
+ MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::TaggedSigned(), MachineType::AnyTagged(),
+ MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
}
-FunctionType*
-StoreGlobalViaContextDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
- ->AsFunction();
- function->InitParameter(kSlot, UntaggedIntegral32(zone));
- function->InitParameter(kValue, AnyTagged(zone));
- return function;
+void StoreNamedTransitionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ ReceiverRegister(), FieldOffsetRegister(), MapRegister(),
+ ValueRegister(), SlotRegister(), VectorRegister(),
+ NameRegister(),
+ };
+ int len = arraysize(registers) - kStackArgumentsCount;
+ data->InitializePlatformSpecific(len, registers);
}
+void StoreGlobalViaContextDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kSlot, kValue
+ MachineType machine_types[] = {MachineType::Int32(),
+ MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
void StoreGlobalViaContextDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -252,18 +223,14 @@ void MathPowIntegerDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-FunctionType*
-LoadWithVectorDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
- ->AsFunction();
- function->InitParameter(kReceiver, AnyTagged(zone));
- function->InitParameter(kName, AnyTagged(zone));
- function->InitParameter(kSlot, SmiType(zone));
- function->InitParameter(kVector, AnyTagged(zone));
- return function;
+void LoadWithVectorDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kReceiver, kName, kSlot, kVector
+ MachineType machine_types[] = {
+ MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::TaggedSigned(), MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
}
@@ -274,63 +241,33 @@ void LoadWithVectorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-FunctionType*
-VectorStoreTransitionDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- bool has_slot = !VectorStoreTransitionDescriptor::SlotRegister().is(no_reg);
- int arg_count = has_slot ? 6 : 5;
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), arg_count, zone)
- ->AsFunction();
- int index = 0;
- // TODO(ishell): use ParameterIndices here
- function->InitParameter(index++, AnyTagged(zone)); // receiver
- function->InitParameter(index++, AnyTagged(zone)); // name
- function->InitParameter(index++, AnyTagged(zone)); // value
- function->InitParameter(index++, AnyTagged(zone)); // map
- if (has_slot) {
- function->InitParameter(index++, SmiType(zone)); // slot
- }
- function->InitParameter(index++, AnyTagged(zone)); // vector
- return function;
-}
-
-FunctionType*
-StoreWithVectorDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
- ->AsFunction();
- function->InitParameter(kReceiver, AnyTagged(zone));
- function->InitParameter(kName, AnyTagged(zone));
- function->InitParameter(kValue, AnyTagged(zone));
- function->InitParameter(kSlot, SmiType(zone));
- function->InitParameter(kVector, AnyTagged(zone));
- return function;
+void StoreWithVectorDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kReceiver, kName, kValue, kSlot, kVector
+ MachineType machine_types[] = {
+ MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::TaggedSigned(),
+ MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
}
void StoreWithVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
SlotRegister(), VectorRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ int len = arraysize(registers) - kStackArgumentsCount;
+ data->InitializePlatformSpecific(len, registers);
}
-FunctionType*
-BinaryOpWithVectorDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int parameter_count) {
- DCHECK_EQ(parameter_count, kParameterCount);
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
- ->AsFunction();
- function->InitParameter(kLeft, AnyTagged(zone));
- function->InitParameter(kRight, AnyTagged(zone));
- function->InitParameter(kSlot, UntaggedIntegral32(zone));
- function->InitParameter(kVector, AnyTagged(zone));
- return function;
+void BinaryOpWithVectorDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kLeft, kRight, kSlot, kVector
+ MachineType machine_types[] = {MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::Int32(),
+ MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
}
const Register ApiGetterDescriptor::ReceiverRegister() {
@@ -349,291 +286,204 @@ void ContextOnlyDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr);
}
-CallInterfaceDescriptor OnStackArgsDescriptorBase::ForArgs(
- Isolate* isolate, int parameter_count) {
- switch (parameter_count) {
- case 1:
- return OnStackWith1ArgsDescriptor(isolate);
- case 2:
- return OnStackWith2ArgsDescriptor(isolate);
- case 3:
- return OnStackWith3ArgsDescriptor(isolate);
- case 4:
- return OnStackWith4ArgsDescriptor(isolate);
- case 5:
- return OnStackWith5ArgsDescriptor(isolate);
- case 6:
- return OnStackWith6ArgsDescriptor(isolate);
- case 7:
- return OnStackWith7ArgsDescriptor(isolate);
- default:
- UNREACHABLE();
- return VoidDescriptor(isolate);
- }
+void GrowArrayElementsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ObjectRegister(), KeyRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-FunctionType*
-OnStackArgsDescriptorBase::BuildCallInterfaceDescriptorFunctionTypeWithArg(
- Isolate* isolate, int register_parameter_count, int parameter_count) {
- DCHECK_EQ(0, register_parameter_count);
- DCHECK_GT(parameter_count, 0);
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), AnyTagged(zone), parameter_count, zone)
- ->AsFunction();
- for (int i = 0; i < parameter_count; i++) {
- function->InitParameter(i, AnyTagged(zone));
- }
- return function;
+void VarArgFunctionDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kActualArgumentsCount
+ MachineType machine_types[] = {MachineType::Int32()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
}
-void OnStackArgsDescriptorBase::InitializePlatformSpecific(
+void FastCloneRegExpDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
+ // kClosure, kLiteralIndex, kPattern, kFlags
+ MachineType machine_types[] = {
+ MachineType::AnyTagged(), MachineType::TaggedSigned(),
+ MachineType::AnyTagged(), MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
}
-void GrowArrayElementsDescriptor::InitializePlatformSpecific(
+void FastCloneShallowArrayDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
- Register registers[] = {ObjectRegister(), KeyRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ // kClosure, kLiteralIndex, kConstantElements
+ MachineType machine_types[] = {MachineType::AnyTagged(),
+ MachineType::TaggedSigned(),
+ MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
}
-FunctionType*
-VarArgFunctionDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), AnyTagged(zone), kParameterCount, zone)
- ->AsFunction();
- function->InitParameter(kActualArgumentsCount, UntaggedIntegral32(zone));
- return function;
-}
-
-FunctionType*
-FastCloneRegExpDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
- ->AsFunction();
- function->InitParameter(kClosure, AnyTagged(zone));
- function->InitParameter(kLiteralIndex, SmiType(zone));
- function->InitParameter(kPattern, AnyTagged(zone));
- function->InitParameter(kFlags, AnyTagged(zone));
- return function;
-}
-
-FunctionType*
-FastCloneShallowArrayDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
- ->AsFunction();
- function->InitParameter(kClosure, AnyTagged(zone));
- function->InitParameter(kLiteralIndex, SmiType(zone));
- function->InitParameter(kConstantElements, AnyTagged(zone));
- return function;
-}
-
-FunctionType*
-CreateAllocationSiteDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
- ->AsFunction();
- function->InitParameter(kVector, AnyTagged(zone));
- function->InitParameter(kSlot, SmiType(zone));
- return function;
-}
-
-FunctionType*
-CreateWeakCellDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
- ->AsFunction();
- function->InitParameter(kVector, AnyTagged(zone));
- function->InitParameter(kSlot, SmiType(zone));
- function->InitParameter(kValue, AnyTagged(zone));
- return function;
-}
-
-FunctionType*
-CallTrampolineDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
- ->AsFunction();
- function->InitParameter(kFunction, AnyTagged(zone));
- function->InitParameter(kActualArgumentsCount, UntaggedIntegral32(zone));
- return function;
-}
-
-FunctionType* ConstructStubDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
- ->AsFunction();
- function->InitParameter(kFunction, AnyTagged(zone));
- function->InitParameter(kNewTarget, AnyTagged(zone));
- function->InitParameter(kActualArgumentsCount, UntaggedIntegral32(zone));
- function->InitParameter(kAllocationSite, AnyTagged(zone));
- return function;
-}
-
-FunctionType*
-ConstructTrampolineDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
- ->AsFunction();
- function->InitParameter(kFunction, AnyTagged(zone));
- function->InitParameter(kNewTarget, AnyTagged(zone));
- function->InitParameter(kActualArgumentsCount, UntaggedIntegral32(zone));
- return function;
-}
-
-FunctionType*
-CallFunctionWithFeedbackDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
- ->AsFunction();
- function->InitParameter(kFunction, Type::Receiver());
- function->InitParameter(kSlot, SmiType(zone));
- return function;
-}
-
-FunctionType* CallFunctionWithFeedbackAndVectorDescriptor::
- BuildCallInterfaceDescriptorFunctionType(Isolate* isolate,
- int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
- ->AsFunction();
- function->InitParameter(kFunction, Type::Receiver());
- function->InitParameter(kSlot, SmiType(zone));
- function->InitParameter(kVector, AnyTagged(zone));
- return function;
-}
-
-FunctionType*
-ArrayNoArgumentConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
- ->AsFunction();
- function->InitParameter(kFunction, Type::Receiver());
- function->InitParameter(kAllocationSite, AnyTagged(zone));
- function->InitParameter(kActualArgumentsCount, UntaggedIntegral32(zone));
- function->InitParameter(kFunctionParameter, AnyTagged(zone));
- return function;
-}
-
-FunctionType* ArraySingleArgumentConstructorDescriptor::
- BuildCallInterfaceDescriptorFunctionType(Isolate* isolate,
- int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
- ->AsFunction();
- function->InitParameter(kFunction, Type::Receiver());
- function->InitParameter(kAllocationSite, AnyTagged(zone));
- function->InitParameter(kActualArgumentsCount, UntaggedIntegral32(zone));
- function->InitParameter(kFunctionParameter, AnyTagged(zone));
- function->InitParameter(kArraySizeSmiParameter, AnyTagged(zone));
- return function;
-}
-
-FunctionType*
-ArrayNArgumentsConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
- ->AsFunction();
- function->InitParameter(kFunction, Type::Receiver());
- function->InitParameter(kAllocationSite, AnyTagged(zone));
- function->InitParameter(kActualArgumentsCount, UntaggedIntegral32(zone));
- return function;
-}
-
-FunctionType*
-ArgumentAdaptorDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
- ->AsFunction();
- function->InitParameter(kFunction, Type::Receiver());
- function->InitParameter(kNewTarget, AnyTagged(zone));
- function->InitParameter(kActualArgumentsCount, UntaggedIntegral32(zone));
- function->InitParameter(kExpectedArgumentsCount, UntaggedIntegral32(zone));
- return function;
-}
-
-CallInterfaceDescriptor ApiCallbackDescriptorBase::ForArgs(Isolate* isolate,
- int argc) {
- switch (argc) {
- case 0:
- return ApiCallbackWith0ArgsDescriptor(isolate);
- case 1:
- return ApiCallbackWith1ArgsDescriptor(isolate);
- case 2:
- return ApiCallbackWith2ArgsDescriptor(isolate);
- case 3:
- return ApiCallbackWith3ArgsDescriptor(isolate);
- case 4:
- return ApiCallbackWith4ArgsDescriptor(isolate);
- case 5:
- return ApiCallbackWith5ArgsDescriptor(isolate);
- case 6:
- return ApiCallbackWith6ArgsDescriptor(isolate);
- case 7:
- return ApiCallbackWith7ArgsDescriptor(isolate);
- default:
- UNREACHABLE();
- return VoidDescriptor(isolate);
- }
+void CreateAllocationSiteDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kVector, kSlot
+ MachineType machine_types[] = {MachineType::AnyTagged(),
+ MachineType::TaggedSigned()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
}
-FunctionType*
-ApiCallbackDescriptorBase::BuildCallInterfaceDescriptorFunctionTypeWithArg(
- Isolate* isolate, int parameter_count, int argc) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function = Type::Function(AnyTagged(zone), Type::Undefined(),
- kParameterCount + argc, zone)
- ->AsFunction();
- function->InitParameter(kFunction, AnyTagged(zone));
- function->InitParameter(kCallData, AnyTagged(zone));
- function->InitParameter(kHolder, AnyTagged(zone));
- function->InitParameter(kApiFunctionAddress, ExternalPointer(zone));
- for (int i = 0; i < argc; i++) {
- function->InitParameter(i, AnyTagged(zone));
- }
- return function;
-}
-
-FunctionType*
-InterpreterDispatchDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int parameter_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
- ->AsFunction();
- function->InitParameter(kAccumulator, AnyTagged(zone));
- function->InitParameter(kBytecodeOffset, UntaggedIntegral32(zone));
- function->InitParameter(kBytecodeArray, AnyTagged(zone));
- function->InitParameter(kDispatchTable, AnyTagged(zone));
- return function;
+void CreateWeakCellDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kVector, kSlot, kValue
+ MachineType machine_types[] = {MachineType::AnyTagged(),
+ MachineType::TaggedSigned(),
+ MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void CallTrampolineDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kFunction, kActualArgumentsCount
+ MachineType machine_types[] = {MachineType::AnyTagged(),
+ MachineType::Int32()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void ConstructStubDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kFunction, kNewTarget, kActualArgumentsCount, kAllocationSite
+ MachineType machine_types[] = {MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::Int32(),
+ MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void ConstructTrampolineDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kFunction, kNewTarget, kActualArgumentsCount
+ MachineType machine_types[] = {
+ MachineType::AnyTagged(), MachineType::AnyTagged(), MachineType::Int32()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void CallFunctionWithFeedbackDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kFunction, kSlot
+ MachineType machine_types[] = {MachineType::AnyTagged(),
+ MachineType::TaggedSigned()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kFunction, kSlot, kVector
+ MachineType machine_types[] = {MachineType::TaggedPointer(),
+ MachineType::TaggedSigned(),
+ MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void ArrayNoArgumentConstructorDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kFunction, kAllocationSite, kActualArgumentsCount, kFunctionParameter
+ MachineType machine_types[] = {MachineType::TaggedPointer(),
+ MachineType::AnyTagged(), MachineType::Int32(),
+ MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void ArraySingleArgumentConstructorDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kFunction, kAllocationSite, kActualArgumentsCount, kFunctionParameter,
+ // kArraySizeSmiParameter
+ MachineType machine_types[] = {
+ MachineType::TaggedPointer(), MachineType::AnyTagged(),
+ MachineType::Int32(), MachineType::AnyTagged(), MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void ArrayNArgumentsConstructorDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kFunction, kAllocationSite, kActualArgumentsCount
+ MachineType machine_types[] = {MachineType::TaggedPointer(),
+ MachineType::AnyTagged(),
+ MachineType::Int32()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void ArgumentAdaptorDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kFunction, kNewTarget, kActualArgumentsCount, kExpectedArgumentsCount
+ MachineType machine_types[] = {MachineType::TaggedPointer(),
+ MachineType::AnyTagged(), MachineType::Int32(),
+ MachineType::Int32()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void ApiCallbackDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kFunction, kCallData, kHolder, kApiFunctionAddress
+ MachineType machine_types[] = {
+ MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::Pointer()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void InterpreterDispatchDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kAccumulator, kBytecodeOffset, kBytecodeArray, kDispatchTable
+ MachineType machine_types[] = {
+ MachineType::AnyTagged(), MachineType::IntPtr(), MachineType::AnyTagged(),
+ MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void InterpreterPushArgsAndCallDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kNumberOfArguments, kFirstArgument, kFunction
+ MachineType machine_types[] = {MachineType::Int32(), MachineType::Pointer(),
+ MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void InterpreterPushArgsAndConstructDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kNumberOfArguments, kNewTarget, kConstructor, kFeedbackElement,
+ // kFirstArgument
+ MachineType machine_types[] = {
+ MachineType::Int32(), MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::Pointer()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void InterpreterPushArgsAndConstructArrayDescriptor::
+ InitializePlatformIndependent(CallInterfaceDescriptorData* data) {
+ // kNumberOfArguments, kFunction, kFeedbackElement, kFirstArgument
+ MachineType machine_types[] = {MachineType::Int32(), MachineType::AnyTagged(),
+ MachineType::AnyTagged(),
+ MachineType::Pointer()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void InterpreterCEntryDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kNumberOfArguments, kFirstArgument, kFunctionEntry
+ MachineType machine_types[] = {MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Pointer()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
}
} // namespace internal
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index af59bdb121..09dc377338 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -15,137 +15,130 @@ namespace internal {
class PlatformInterfaceDescriptor;
-#define INTERFACE_DESCRIPTOR_LIST(V) \
- V(Void) \
- V(ContextOnly) \
- V(OnStackWith1Args) \
- V(OnStackWith2Args) \
- V(OnStackWith3Args) \
- V(OnStackWith4Args) \
- V(OnStackWith5Args) \
- V(OnStackWith6Args) \
- V(OnStackWith7Args) \
- V(Load) \
- V(LoadWithVector) \
- V(LoadGlobal) \
- V(LoadGlobalWithVector) \
- V(Store) \
- V(StoreWithVector) \
- V(StoreTransition) \
- V(VectorStoreTransition) \
- V(VarArgFunction) \
- V(FastNewClosure) \
- V(FastNewFunctionContext) \
- V(FastNewObject) \
- V(FastNewRestParameter) \
- V(FastNewSloppyArguments) \
- V(FastNewStrictArguments) \
- V(TypeConversion) \
- V(Typeof) \
- V(FastCloneRegExp) \
- V(FastCloneShallowArray) \
- V(FastCloneShallowObject) \
- V(CreateAllocationSite) \
- V(CreateWeakCell) \
- V(CallFunction) \
- V(CallFunctionWithFeedback) \
- V(CallFunctionWithFeedbackAndVector) \
- V(CallConstruct) \
- V(CallTrampoline) \
- V(ConstructStub) \
- V(ConstructTrampoline) \
- V(RegExpConstructResult) \
- V(CopyFastSmiOrObjectElements) \
- V(TransitionElementsKind) \
- V(AllocateHeapNumber) \
- V(AllocateFloat32x4) \
- V(AllocateInt32x4) \
- V(AllocateUint32x4) \
- V(AllocateBool32x4) \
- V(AllocateInt16x8) \
- V(AllocateUint16x8) \
- V(AllocateBool16x8) \
- V(AllocateInt8x16) \
- V(AllocateUint8x16) \
- V(AllocateBool8x16) \
- V(ArrayNoArgumentConstructor) \
- V(ArraySingleArgumentConstructor) \
- V(ArrayNArgumentsConstructor) \
- V(Compare) \
- V(BinaryOp) \
- V(BinaryOpWithAllocationSite) \
- V(BinaryOpWithVector) \
- V(CountOp) \
- V(StringAdd) \
- V(StringCompare) \
- V(Keyed) \
- V(Named) \
- V(HasProperty) \
- V(ForInFilter) \
- V(GetProperty) \
- V(CallHandler) \
- V(ArgumentAdaptor) \
- V(ApiCallbackWith0Args) \
- V(ApiCallbackWith1Args) \
- V(ApiCallbackWith2Args) \
- V(ApiCallbackWith3Args) \
- V(ApiCallbackWith4Args) \
- V(ApiCallbackWith5Args) \
- V(ApiCallbackWith6Args) \
- V(ApiCallbackWith7Args) \
- V(ApiGetter) \
- V(StoreGlobalViaContext) \
- V(MathPowTagged) \
- V(MathPowInteger) \
- V(GrowArrayElements) \
- V(InterpreterDispatch) \
- V(InterpreterPushArgsAndCall) \
- V(InterpreterPushArgsAndConstruct) \
- V(InterpreterCEntry) \
+#define INTERFACE_DESCRIPTOR_LIST(V) \
+ V(Void) \
+ V(ContextOnly) \
+ V(Load) \
+ V(LoadWithVector) \
+ V(LoadGlobal) \
+ V(LoadGlobalWithVector) \
+ V(Store) \
+ V(StoreWithVector) \
+ V(StoreNamedTransition) \
+ V(StoreTransition) \
+ V(VarArgFunction) \
+ V(FastNewClosure) \
+ V(FastNewFunctionContext) \
+ V(FastNewObject) \
+ V(FastNewRestParameter) \
+ V(FastNewSloppyArguments) \
+ V(FastNewStrictArguments) \
+ V(TypeConversion) \
+ V(Typeof) \
+ V(FastCloneRegExp) \
+ V(FastCloneShallowArray) \
+ V(FastCloneShallowObject) \
+ V(CreateAllocationSite) \
+ V(CreateWeakCell) \
+ V(CallFunction) \
+ V(CallFunctionWithFeedback) \
+ V(CallFunctionWithFeedbackAndVector) \
+ V(CallConstruct) \
+ V(CallTrampoline) \
+ V(ConstructStub) \
+ V(ConstructTrampoline) \
+ V(RegExpExec) \
+ V(RegExpConstructResult) \
+ V(CopyFastSmiOrObjectElements) \
+ V(TransitionElementsKind) \
+ V(AllocateHeapNumber) \
+ V(AllocateFloat32x4) \
+ V(AllocateInt32x4) \
+ V(AllocateUint32x4) \
+ V(AllocateBool32x4) \
+ V(AllocateInt16x8) \
+ V(AllocateUint16x8) \
+ V(AllocateBool16x8) \
+ V(AllocateInt8x16) \
+ V(AllocateUint8x16) \
+ V(AllocateBool8x16) \
+ V(ArrayNoArgumentConstructor) \
+ V(ArraySingleArgumentConstructor) \
+ V(ArrayNArgumentsConstructor) \
+ V(Compare) \
+ V(BinaryOp) \
+ V(BinaryOpWithAllocationSite) \
+ V(BinaryOpWithVector) \
+ V(CountOp) \
+ V(StringAdd) \
+ V(StringCompare) \
+ V(SubString) \
+ V(Keyed) \
+ V(Named) \
+ V(HasProperty) \
+ V(ForInFilter) \
+ V(GetProperty) \
+ V(CallHandler) \
+ V(ArgumentAdaptor) \
+ V(ApiCallback) \
+ V(ApiGetter) \
+ V(StoreGlobalViaContext) \
+ V(MathPowTagged) \
+ V(MathPowInteger) \
+ V(GrowArrayElements) \
+ V(InterpreterDispatch) \
+ V(InterpreterPushArgsAndCall) \
+ V(InterpreterPushArgsAndConstruct) \
+ V(InterpreterPushArgsAndConstructArray) \
+ V(InterpreterCEntry) \
V(ResumeGenerator)
class CallInterfaceDescriptorData {
public:
- CallInterfaceDescriptorData()
- : register_param_count_(-1), function_type_(nullptr) {}
+ CallInterfaceDescriptorData() : register_param_count_(-1), param_count_(-1) {}
// A copy of the passed in registers and param_representations is made
// and owned by the CallInterfaceDescriptorData.
- void InitializePlatformIndependent(FunctionType* function_type) {
- function_type_ = function_type;
- }
-
- // TODO(mvstanton): Instead of taking parallel arrays register and
- // param_representations, how about a struct that puts the representation
- // and register side by side (eg, RegRep(r1, Representation::Tagged()).
- // The same should go for the CodeStubDescriptor class.
void InitializePlatformSpecific(
int register_parameter_count, const Register* registers,
PlatformInterfaceDescriptor* platform_descriptor = NULL);
- bool IsInitialized() const { return register_param_count_ >= 0; }
+ // if machine_types is null, then an array of size
+ // (register_parameter_count + extra_parameter_count) will be created
+ // with MachineType::AnyTagged() for each member.
+ //
+ // if machine_types is not null, then it should be of the size
+ // register_parameter_count. Those members of the parameter array
+ // will be initialized from {machine_types}, and the rest initialized
+ // to MachineType::AnyTagged().
+ void InitializePlatformIndependent(int parameter_count,
+ int extra_parameter_count,
+ const MachineType* machine_types);
+
+ bool IsInitialized() const {
+ return register_param_count_ >= 0 && param_count_ >= 0;
+ }
- int param_count() const { return function_type_->Arity(); }
+ int param_count() const { return param_count_; }
int register_param_count() const { return register_param_count_; }
Register register_param(int index) const { return register_params_[index]; }
Register* register_params() const { return register_params_.get(); }
- Type* param_type(int index) const { return function_type_->Parameter(index); }
+ MachineType param_type(int index) const { return machine_types_[index]; }
PlatformInterfaceDescriptor* platform_specific_descriptor() const {
return platform_specific_descriptor_;
}
private:
int register_param_count_;
+ int param_count_;
// The Register params are allocated dynamically by the
// InterfaceDescriptor, and freed on destruction. This is because static
// arrays of Registers cause creation of runtime static initializers
// which we don't want.
std::unique_ptr<Register[]> register_params_;
-
- // Specifies types for parameters and return
- FunctionType* function_type_;
+ std::unique_ptr<MachineType[]> machine_types_;
PlatformInterfaceDescriptor* platform_specific_descriptor_;
@@ -186,7 +179,7 @@ class CallInterfaceDescriptor {
return data()->register_param(index);
}
- Type* GetParameterType(int index) const {
+ MachineType GetParameterType(int index) const {
DCHECK(index < data()->param_count());
return data()->param_type(index);
}
@@ -200,21 +193,18 @@ class CallInterfaceDescriptor {
const char* DebugName(Isolate* isolate) const;
- static FunctionType* BuildDefaultFunctionType(Isolate* isolate,
- int parameter_count);
-
protected:
const CallInterfaceDescriptorData* data() const { return data_; }
- virtual FunctionType* BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int register_param_count) {
- return BuildDefaultFunctionType(isolate, register_param_count);
- }
-
virtual void InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
UNREACHABLE();
}
+ virtual void InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ data->InitializePlatformIndependent(data->register_param_count(), 0, NULL);
+ }
+
void Initialize(Isolate* isolate, CallDescriptors::Key key) {
if (!data()->IsInitialized()) {
// We should only initialize descriptors on the isolate's main thread.
@@ -222,9 +212,7 @@ class CallInterfaceDescriptor {
CallInterfaceDescriptorData* d = isolate->call_descriptor_data(key);
DCHECK(d == data()); // d should be a modifiable pointer to data().
InitializePlatformSpecific(d);
- FunctionType* function_type = BuildCallInterfaceDescriptorFunctionType(
- isolate, d->register_param_count());
- d->InitializePlatformIndependent(function_type);
+ InitializePlatformIndependent(d);
}
}
@@ -264,23 +252,26 @@ class CallInterfaceDescriptor {
\
public:
-#define DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(name, base) \
- DECLARE_DESCRIPTOR(name, base) \
- protected: \
- FunctionType* BuildCallInterfaceDescriptorFunctionType( \
- Isolate* isolate, int register_param_count) override; \
- \
+#define DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(name, base) \
+ DECLARE_DESCRIPTOR(name, base) \
+ protected: \
+ void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
+ override; \
+ \
public:
-#define DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(name, base, arg) \
- DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
- protected: \
- FunctionType* BuildCallInterfaceDescriptorFunctionType( \
- Isolate* isolate, int register_param_count) override { \
- return BuildCallInterfaceDescriptorFunctionTypeWithArg( \
- isolate, register_param_count, arg); \
- } \
- \
+#define DECLARE_DESCRIPTOR_WITH_STACK_ARGS(name, base) \
+ DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
+ protected: \
+ void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
+ override { \
+ data->InitializePlatformIndependent(0, kParameterCount, NULL); \
+ } \
+ void InitializePlatformSpecific(CallInterfaceDescriptorData* data) \
+ override { \
+ data->InitializePlatformSpecific(0, nullptr); \
+ } \
+ \
public:
#define DEFINE_PARAMETERS(...) \
@@ -301,73 +292,6 @@ class ContextOnlyDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(ContextOnlyDescriptor, CallInterfaceDescriptor)
};
-// The OnStackWith*ArgsDescriptors have a lot of boilerplate. The superclass
-// OnStackArgsDescriptorBase is not meant to be instantiated directly and has no
-// public constructors to ensure this is so.contains all the logic, and the
-//
-// Use OnStackArgsDescriptorBase::ForArgs(isolate, parameter_count) to
-// instantiate a descriptor with the number of args.
-class OnStackArgsDescriptorBase : public CallInterfaceDescriptor {
- public:
- static CallInterfaceDescriptor ForArgs(Isolate* isolate, int parameter_count);
-
- protected:
- OnStackArgsDescriptorBase(Isolate* isolate, CallDescriptors::Key key)
- : CallInterfaceDescriptor(isolate, key) {}
- void InitializePlatformSpecific(CallInterfaceDescriptorData* data) override;
- FunctionType* BuildCallInterfaceDescriptorFunctionTypeWithArg(
- Isolate* isolate, int register_parameter_count, int parameter_count);
-};
-
-class OnStackWith1ArgsDescriptor : public OnStackArgsDescriptorBase {
- public:
- DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith1ArgsDescriptor,
- OnStackArgsDescriptorBase,
- 1)
-};
-
-class OnStackWith2ArgsDescriptor : public OnStackArgsDescriptorBase {
- public:
- DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith2ArgsDescriptor,
- OnStackArgsDescriptorBase,
- 2)
-};
-
-class OnStackWith3ArgsDescriptor : public OnStackArgsDescriptorBase {
- public:
- DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith3ArgsDescriptor,
- OnStackArgsDescriptorBase,
- 3)
-};
-
-class OnStackWith4ArgsDescriptor : public OnStackArgsDescriptorBase {
- public:
- DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith4ArgsDescriptor,
- OnStackArgsDescriptorBase,
- 4)
-};
-
-class OnStackWith5ArgsDescriptor : public OnStackArgsDescriptorBase {
- public:
- DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith5ArgsDescriptor,
- OnStackArgsDescriptorBase,
- 5)
-};
-
-class OnStackWith6ArgsDescriptor : public OnStackArgsDescriptorBase {
- public:
- DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith6ArgsDescriptor,
- OnStackArgsDescriptorBase,
- 6)
-};
-
-class OnStackWith7ArgsDescriptor : public OnStackArgsDescriptorBase {
- public:
- DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith7ArgsDescriptor,
- OnStackArgsDescriptorBase,
- 7)
-};
-
// LoadDescriptor is used by all stubs that implement Load/KeyedLoad ICs.
class LoadDescriptor : public CallInterfaceDescriptor {
public:
@@ -401,42 +325,47 @@ class StoreDescriptor : public CallInterfaceDescriptor {
static const Register NameRegister();
static const Register ValueRegister();
static const Register SlotRegister();
-};
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
+ static const bool kPassLastArgsOnStack = true;
+#else
+ static const bool kPassLastArgsOnStack = false;
+#endif
+
+ // Pass value and slot through the stack.
+ static const int kStackArgumentsCount = kPassLastArgsOnStack ? 2 : 0;
+};
class StoreTransitionDescriptor : public StoreDescriptor {
public:
- DEFINE_PARAMETERS(kReceiver, kName, kValue, kMap)
+ DEFINE_PARAMETERS(kReceiver, kName, kMap, kValue, kSlot, kVector)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StoreTransitionDescriptor,
StoreDescriptor)
static const Register MapRegister();
-};
+ static const Register SlotRegister();
+ static const Register VectorRegister();
+ // Pass value, slot and vector through the stack.
+ static const int kStackArgumentsCount = kPassLastArgsOnStack ? 3 : 0;
+};
-class VectorStoreTransitionDescriptor : public StoreDescriptor {
+class StoreNamedTransitionDescriptor : public StoreTransitionDescriptor {
public:
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(VectorStoreTransitionDescriptor,
- StoreDescriptor)
-
- // TODO(ishell): use DEFINE_PARAMETERS macro here
- // Extends StoreDescriptor with Map parameter.
- enum ParameterIndices {
- kReceiver = 0,
- kName = 1,
- kValue = 2,
+ DEFINE_PARAMETERS(kReceiver, kFieldOffset, kMap, kValue, kSlot, kVector,
+ kName)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StoreNamedTransitionDescriptor,
+ StoreTransitionDescriptor)
- kMap = 3,
-
- kSlot = 4, // not present on ia32.
- kVirtualSlotVector = 4,
-
- kVector = 5
- };
+ // Always pass name on the stack.
+ static const bool kPassLastArgsOnStack = true;
+ static const int kStackArgumentsCount =
+ StoreTransitionDescriptor::kStackArgumentsCount + 1;
- static const Register MapRegister();
- static const Register SlotRegister();
- static const Register VectorRegister();
+ static const Register NameRegister() { return no_reg; }
+ static const Register FieldOffsetRegister() {
+ return StoreTransitionDescriptor::NameRegister();
+ }
};
class StoreWithVectorDescriptor : public StoreDescriptor {
@@ -446,6 +375,9 @@ class StoreWithVectorDescriptor : public StoreDescriptor {
StoreDescriptor)
static const Register VectorRegister();
+
+ // Pass value, slot and vector through the stack.
+ static const int kStackArgumentsCount = kPassLastArgsOnStack ? 3 : 0;
};
class LoadWithVectorDescriptor : public LoadDescriptor {
@@ -632,6 +564,12 @@ class CallConstructDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(CallConstructDescriptor, CallInterfaceDescriptor)
};
+class RegExpExecDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kRegExpObject, kString, kPreviousIndex, kLastMatchInfo)
+ DECLARE_DESCRIPTOR_WITH_STACK_ARGS(RegExpExecDescriptor,
+ CallInterfaceDescriptor)
+};
class RegExpConstructResultDescriptor : public CallInterfaceDescriptor {
public:
@@ -751,6 +689,13 @@ class StringCompareDescriptor : public CallInterfaceDescriptor {
static const Register RightRegister();
};
+class SubStringDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kString, kFrom, kTo)
+ DECLARE_DESCRIPTOR_WITH_STACK_ARGS(SubStringDescriptor,
+ CallInterfaceDescriptor)
+};
+
// TODO(ishell): not used, remove.
class KeyedDescriptor : public CallInterfaceDescriptor {
public:
@@ -778,79 +723,13 @@ class ArgumentAdaptorDescriptor : public CallInterfaceDescriptor {
CallInterfaceDescriptor)
};
-// The ApiCallback*Descriptors have a lot of boilerplate. The superclass
-// ApiCallbackDescriptorBase contains all the logic, and the
-// ApiCallbackWith*ArgsDescriptor merely instantiate these with a
-// parameter for the number of args.
-//
-// The base class is not meant to be instantiated directly and has no
-// public constructors to ensure this is so.
-//
-// The simplest usage for all the ApiCallback*Descriptors is probably
-// ApiCallbackDescriptorBase::ForArgs(isolate, argc)
-//
-class ApiCallbackDescriptorBase : public CallInterfaceDescriptor {
+class ApiCallbackDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kFunction, kCallData, kHolder, kApiFunctionAddress)
- static CallInterfaceDescriptor ForArgs(Isolate* isolate, int argc);
-
- protected:
- ApiCallbackDescriptorBase(Isolate* isolate, CallDescriptors::Key key)
- : CallInterfaceDescriptor(isolate, key) {}
- void InitializePlatformSpecific(CallInterfaceDescriptorData* data) override;
- FunctionType* BuildCallInterfaceDescriptorFunctionTypeWithArg(
- Isolate* isolate, int parameter_count, int argc);
-};
-
-class ApiCallbackWith0ArgsDescriptor : public ApiCallbackDescriptorBase {
- public:
- DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
- ApiCallbackWith0ArgsDescriptor, ApiCallbackDescriptorBase, 0)
-};
-
-class ApiCallbackWith1ArgsDescriptor : public ApiCallbackDescriptorBase {
- public:
- DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
- ApiCallbackWith1ArgsDescriptor, ApiCallbackDescriptorBase, 1)
-};
-
-class ApiCallbackWith2ArgsDescriptor : public ApiCallbackDescriptorBase {
- public:
- DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
- ApiCallbackWith2ArgsDescriptor, ApiCallbackDescriptorBase, 2)
-};
-
-class ApiCallbackWith3ArgsDescriptor : public ApiCallbackDescriptorBase {
- public:
- DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
- ApiCallbackWith3ArgsDescriptor, ApiCallbackDescriptorBase, 3)
-};
-
-class ApiCallbackWith4ArgsDescriptor : public ApiCallbackDescriptorBase {
- public:
- DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
- ApiCallbackWith4ArgsDescriptor, ApiCallbackDescriptorBase, 4)
-};
-
-class ApiCallbackWith5ArgsDescriptor : public ApiCallbackDescriptorBase {
- public:
- DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
- ApiCallbackWith5ArgsDescriptor, ApiCallbackDescriptorBase, 5)
-};
-
-class ApiCallbackWith6ArgsDescriptor : public ApiCallbackDescriptorBase {
- public:
- DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
- ApiCallbackWith6ArgsDescriptor, ApiCallbackDescriptorBase, 6)
-};
-
-class ApiCallbackWith7ArgsDescriptor : public ApiCallbackDescriptorBase {
- public:
- DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
- ApiCallbackWith7ArgsDescriptor, ApiCallbackDescriptorBase, 7)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ApiCallbackDescriptor,
+ CallInterfaceDescriptor)
};
-
class ApiGetterDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kReceiver, kHolder, kCallback)
@@ -904,22 +783,35 @@ class InterpreterDispatchDescriptor : public CallInterfaceDescriptor {
class InterpreterPushArgsAndCallDescriptor : public CallInterfaceDescriptor {
public:
- DECLARE_DESCRIPTOR(InterpreterPushArgsAndCallDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_PARAMETERS(kNumberOfArguments, kFirstArgument, kFunction)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
+ InterpreterPushArgsAndCallDescriptor, CallInterfaceDescriptor)
};
class InterpreterPushArgsAndConstructDescriptor
: public CallInterfaceDescriptor {
public:
- DECLARE_DESCRIPTOR(InterpreterPushArgsAndConstructDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_PARAMETERS(kNumberOfArguments, kNewTarget, kConstructor,
+ kFeedbackElement, kFirstArgument)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
+ InterpreterPushArgsAndConstructDescriptor, CallInterfaceDescriptor)
};
+class InterpreterPushArgsAndConstructArrayDescriptor
+ : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kNumberOfArguments, kFunction, kFeedbackElement,
+ kFirstArgument)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
+ InterpreterPushArgsAndConstructArrayDescriptor, CallInterfaceDescriptor)
+};
class InterpreterCEntryDescriptor : public CallInterfaceDescriptor {
public:
- DECLARE_DESCRIPTOR(InterpreterCEntryDescriptor, CallInterfaceDescriptor)
+ DEFINE_PARAMETERS(kNumberOfArguments, kFirstArgument, kFunctionEntry)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(InterpreterCEntryDescriptor,
+ CallInterfaceDescriptor)
};
class ResumeGeneratorDescriptor final : public CallInterfaceDescriptor {
diff --git a/deps/v8/src/interpreter/OWNERS b/deps/v8/src/interpreter/OWNERS
index d12fcf90d9..4e6a721fe0 100644
--- a/deps/v8/src/interpreter/OWNERS
+++ b/deps/v8/src/interpreter/OWNERS
@@ -3,5 +3,4 @@ set noparent
bmeurer@chromium.org
mstarzinger@chromium.org
mythria@chromium.org
-oth@chromium.org
rmcilroy@chromium.org
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index 9bef5a5a4c..dfa395095a 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -4,7 +4,6 @@
#include "src/interpreter/bytecode-array-builder.h"
-#include "src/compiler.h"
#include "src/globals.h"
#include "src/interpreter/bytecode-array-writer.h"
#include "src/interpreter/bytecode-dead-code-optimizer.h"
@@ -29,7 +28,7 @@ BytecodeArrayBuilder::BytecodeArrayBuilder(
parameter_count_(parameter_count),
local_register_count_(locals_count),
context_register_count_(context_count),
- temporary_allocator_(zone, fixed_register_count()),
+ register_allocator_(fixed_register_count()),
bytecode_array_writer_(zone, &constant_array_builder_,
source_position_mode),
pipeline_(&bytecode_array_writer_) {
@@ -47,7 +46,8 @@ BytecodeArrayBuilder::BytecodeArrayBuilder(
if (FLAG_ignition_reo) {
pipeline_ = new (zone) BytecodeRegisterOptimizer(
- zone, &temporary_allocator_, parameter_count, pipeline_);
+ zone, &register_allocator_, fixed_register_count(), parameter_count,
+ pipeline_);
}
return_position_ =
@@ -70,10 +70,6 @@ Register BytecodeArrayBuilder::Parameter(int parameter_index) const {
return Register::FromParameterIndex(parameter_index, parameter_count());
}
-bool BytecodeArrayBuilder::RegisterIsParameterOrLocal(Register reg) const {
- return reg.is_parameter() || reg.index() < locals_count();
-}
-
Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(Isolate* isolate) {
DCHECK(return_seen_in_block_);
DCHECK(!bytecode_generated_);
@@ -81,86 +77,121 @@ Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(Isolate* isolate) {
Handle<FixedArray> handler_table =
handler_table_builder()->ToHandlerTable(isolate);
- return pipeline_->ToBytecodeArray(isolate, fixed_register_count(),
+ return pipeline_->ToBytecodeArray(isolate, total_register_count(),
parameter_count(), handler_table);
}
-namespace {
-
-static bool ExpressionPositionIsNeeded(Bytecode bytecode) {
- // An expression position is always needed if filtering is turned
- // off. Otherwise an expression is only needed if the bytecode has
- // external side effects.
- return !FLAG_ignition_filter_expression_positions ||
- !Bytecodes::IsWithoutExternalSideEffects(bytecode);
-}
-
-} // namespace
-
-void BytecodeArrayBuilder::AttachSourceInfo(BytecodeNode* node) {
- if (latest_source_info_.is_valid()) {
- // Statement positions need to be emitted immediately. Expression
- // positions can be pushed back until a bytecode is found that can
- // throw. Hence we only invalidate the existing source position
- // information if it is used.
- if (latest_source_info_.is_statement() ||
- ExpressionPositionIsNeeded(node->bytecode())) {
- node->source_info().Clone(latest_source_info_);
- latest_source_info_.set_invalid();
- }
- }
-}
-
void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
uint32_t operand1, uint32_t operand2,
uint32_t operand3) {
DCHECK(OperandsAreValid(bytecode, 4, operand0, operand1, operand2, operand3));
- BytecodeNode node(bytecode, operand0, operand1, operand2, operand3);
- AttachSourceInfo(&node);
+ BytecodeNode node(bytecode, operand0, operand1, operand2, operand3,
+ &latest_source_info_);
pipeline()->Write(&node);
}
void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
uint32_t operand1, uint32_t operand2) {
DCHECK(OperandsAreValid(bytecode, 3, operand0, operand1, operand2));
- BytecodeNode node(bytecode, operand0, operand1, operand2);
- AttachSourceInfo(&node);
+ BytecodeNode node(bytecode, operand0, operand1, operand2,
+ &latest_source_info_);
pipeline()->Write(&node);
}
void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
uint32_t operand1) {
DCHECK(OperandsAreValid(bytecode, 2, operand0, operand1));
- BytecodeNode node(bytecode, operand0, operand1);
- AttachSourceInfo(&node);
+ BytecodeNode node(bytecode, operand0, operand1, &latest_source_info_);
pipeline()->Write(&node);
}
void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0) {
DCHECK(OperandsAreValid(bytecode, 1, operand0));
- BytecodeNode node(bytecode, operand0);
- AttachSourceInfo(&node);
+ BytecodeNode node(bytecode, operand0, &latest_source_info_);
pipeline()->Write(&node);
}
void BytecodeArrayBuilder::Output(Bytecode bytecode) {
DCHECK(OperandsAreValid(bytecode, 0));
- BytecodeNode node(bytecode);
- AttachSourceInfo(&node);
+ BytecodeNode node(bytecode, &latest_source_info_);
pipeline()->Write(&node);
}
+void BytecodeArrayBuilder::OutputJump(Bytecode bytecode, BytecodeLabel* label) {
+ BytecodeNode node(bytecode, 0, &latest_source_info_);
+ pipeline_->WriteJump(&node, label);
+ LeaveBasicBlock();
+}
+
+void BytecodeArrayBuilder::OutputJump(Bytecode bytecode, uint32_t operand0,
+ BytecodeLabel* label) {
+ BytecodeNode node(bytecode, 0, operand0, &latest_source_info_);
+ pipeline_->WriteJump(&node, label);
+ LeaveBasicBlock();
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
Register reg,
int feedback_slot) {
- Output(BytecodeForBinaryOperation(op), RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ switch (op) {
+ case Token::Value::ADD:
+ Output(Bytecode::kAdd, RegisterOperand(reg),
+ UnsignedOperand(feedback_slot));
+ break;
+ case Token::Value::SUB:
+ Output(Bytecode::kSub, RegisterOperand(reg),
+ UnsignedOperand(feedback_slot));
+ break;
+ case Token::Value::MUL:
+ Output(Bytecode::kMul, RegisterOperand(reg),
+ UnsignedOperand(feedback_slot));
+ break;
+ case Token::Value::DIV:
+ Output(Bytecode::kDiv, RegisterOperand(reg),
+ UnsignedOperand(feedback_slot));
+ break;
+ case Token::Value::MOD:
+ Output(Bytecode::kMod, RegisterOperand(reg),
+ UnsignedOperand(feedback_slot));
+ break;
+ case Token::Value::BIT_OR:
+ Output(Bytecode::kBitwiseOr, RegisterOperand(reg),
+ UnsignedOperand(feedback_slot));
+ break;
+ case Token::Value::BIT_XOR:
+ Output(Bytecode::kBitwiseXor, RegisterOperand(reg),
+ UnsignedOperand(feedback_slot));
+ break;
+ case Token::Value::BIT_AND:
+ Output(Bytecode::kBitwiseAnd, RegisterOperand(reg),
+ UnsignedOperand(feedback_slot));
+ break;
+ case Token::Value::SHL:
+ Output(Bytecode::kShiftLeft, RegisterOperand(reg),
+ UnsignedOperand(feedback_slot));
+ break;
+ case Token::Value::SAR:
+ Output(Bytecode::kShiftRight, RegisterOperand(reg),
+ UnsignedOperand(feedback_slot));
+ break;
+ case Token::Value::SHR:
+ Output(Bytecode::kShiftRightLogical, RegisterOperand(reg),
+ UnsignedOperand(feedback_slot));
+ break;
+ default:
+ UNREACHABLE();
+ }
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CountOperation(Token::Value op,
int feedback_slot) {
- Output(BytecodeForCountOperation(op), UnsignedOperand(feedback_slot));
+ if (op == Token::Value::ADD) {
+ Output(Bytecode::kInc, UnsignedOperand(feedback_slot));
+ } else {
+ DCHECK_EQ(op, Token::Value::SUB);
+ Output(Bytecode::kDec, UnsignedOperand(feedback_slot));
+ }
return *this;
}
@@ -169,15 +200,51 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LogicalNot() {
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::TypeOf() {
Output(Bytecode::kTypeOf);
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(Token::Value op,
- Register reg) {
- Output(BytecodeForCompareOperation(op), RegisterOperand(reg));
+BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(
+ Token::Value op, Register reg, int feedback_slot) {
+ switch (op) {
+ case Token::Value::EQ:
+ Output(Bytecode::kTestEqual, RegisterOperand(reg),
+ UnsignedOperand(feedback_slot));
+ break;
+ case Token::Value::NE:
+ Output(Bytecode::kTestNotEqual, RegisterOperand(reg),
+ UnsignedOperand(feedback_slot));
+ break;
+ case Token::Value::EQ_STRICT:
+ Output(Bytecode::kTestEqualStrict, RegisterOperand(reg),
+ UnsignedOperand(feedback_slot));
+ break;
+ case Token::Value::LT:
+ Output(Bytecode::kTestLessThan, RegisterOperand(reg),
+ UnsignedOperand(feedback_slot));
+ break;
+ case Token::Value::GT:
+ Output(Bytecode::kTestGreaterThan, RegisterOperand(reg),
+ UnsignedOperand(feedback_slot));
+ break;
+ case Token::Value::LTE:
+ Output(Bytecode::kTestLessThanOrEqual, RegisterOperand(reg),
+ UnsignedOperand(feedback_slot));
+ break;
+ case Token::Value::GTE:
+ Output(Bytecode::kTestGreaterThanOrEqual, RegisterOperand(reg),
+ UnsignedOperand(feedback_slot));
+ break;
+ case Token::Value::INSTANCEOF:
+ Output(Bytecode::kTestInstanceOf, RegisterOperand(reg));
+ break;
+ case Token::Value::IN:
+ Output(Bytecode::kTestIn, RegisterOperand(reg));
+ break;
+ default:
+ UNREACHABLE();
+ }
return *this;
}
@@ -250,50 +317,90 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::MoveRegister(Register from,
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(int feedback_slot,
TypeofMode typeof_mode) {
- // TODO(rmcilroy): Potentially store typeof information in an
- // operand rather than having extra bytecodes.
- Bytecode bytecode = BytecodeForLoadGlobal(typeof_mode);
- Output(bytecode, UnsignedOperand(feedback_slot));
+ if (typeof_mode == INSIDE_TYPEOF) {
+ Output(Bytecode::kLdaGlobalInsideTypeof, feedback_slot);
+ } else {
+ DCHECK_EQ(typeof_mode, NOT_INSIDE_TYPEOF);
+ Output(Bytecode::kLdaGlobal, UnsignedOperand(feedback_slot));
+ }
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreGlobal(
const Handle<String> name, int feedback_slot, LanguageMode language_mode) {
- Bytecode bytecode = BytecodeForStoreGlobal(language_mode);
size_t name_index = GetConstantPoolEntry(name);
- Output(bytecode, UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
+ if (language_mode == SLOPPY) {
+ Output(Bytecode::kStaGlobalSloppy, UnsignedOperand(name_index),
+ UnsignedOperand(feedback_slot));
+ } else {
+ DCHECK_EQ(language_mode, STRICT);
+ Output(Bytecode::kStaGlobalStrict, UnsignedOperand(name_index),
+ UnsignedOperand(feedback_slot));
+ }
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadContextSlot(Register context,
- int slot_index) {
+ int slot_index,
+ int depth) {
Output(Bytecode::kLdaContextSlot, RegisterOperand(context),
- UnsignedOperand(slot_index));
+ UnsignedOperand(slot_index), UnsignedOperand(depth));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreContextSlot(Register context,
- int slot_index) {
+ int slot_index,
+ int depth) {
Output(Bytecode::kStaContextSlot, RegisterOperand(context),
- UnsignedOperand(slot_index));
+ UnsignedOperand(slot_index), UnsignedOperand(depth));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupSlot(
const Handle<String> name, TypeofMode typeof_mode) {
+ size_t name_index = GetConstantPoolEntry(name);
+ if (typeof_mode == INSIDE_TYPEOF) {
+ Output(Bytecode::kLdaLookupSlotInsideTypeof, UnsignedOperand(name_index));
+ } else {
+ DCHECK_EQ(typeof_mode, NOT_INSIDE_TYPEOF);
+ Output(Bytecode::kLdaLookupSlot, UnsignedOperand(name_index));
+ }
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupContextSlot(
+ const Handle<String> name, TypeofMode typeof_mode, int slot_index,
+ int depth) {
+ Bytecode bytecode = (typeof_mode == INSIDE_TYPEOF)
+ ? Bytecode::kLdaLookupContextSlotInsideTypeof
+ : Bytecode::kLdaLookupContextSlot;
+ size_t name_index = GetConstantPoolEntry(name);
+ Output(bytecode, UnsignedOperand(name_index), UnsignedOperand(slot_index),
+ UnsignedOperand(depth));
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupGlobalSlot(
+ const Handle<String> name, TypeofMode typeof_mode, int feedback_slot,
+ int depth) {
Bytecode bytecode = (typeof_mode == INSIDE_TYPEOF)
- ? Bytecode::kLdaLookupSlotInsideTypeof
- : Bytecode::kLdaLookupSlot;
+ ? Bytecode::kLdaLookupGlobalSlotInsideTypeof
+ : Bytecode::kLdaLookupGlobalSlot;
size_t name_index = GetConstantPoolEntry(name);
- Output(bytecode, UnsignedOperand(name_index));
+ Output(bytecode, UnsignedOperand(name_index), UnsignedOperand(feedback_slot),
+ UnsignedOperand(depth));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreLookupSlot(
const Handle<String> name, LanguageMode language_mode) {
- Bytecode bytecode = BytecodeForStoreLookupSlot(language_mode);
size_t name_index = GetConstantPoolEntry(name);
- Output(bytecode, UnsignedOperand(name_index));
+ if (language_mode == SLOPPY) {
+ Output(Bytecode::kStaLookupSlotSloppy, UnsignedOperand(name_index));
+ } else {
+ DCHECK_EQ(language_mode, STRICT);
+ Output(Bytecode::kStaLookupSlotStrict, UnsignedOperand(name_index));
+ }
return *this;
}
@@ -315,19 +422,29 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
Register object, const Handle<Name> name, int feedback_slot,
LanguageMode language_mode) {
- Bytecode bytecode = BytecodeForStoreNamedProperty(language_mode);
size_t name_index = GetConstantPoolEntry(name);
- Output(bytecode, RegisterOperand(object), UnsignedOperand(name_index),
- UnsignedOperand(feedback_slot));
+ if (language_mode == SLOPPY) {
+ Output(Bytecode::kStaNamedPropertySloppy, RegisterOperand(object),
+ UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
+ } else {
+ DCHECK_EQ(language_mode, STRICT);
+ Output(Bytecode::kStaNamedPropertyStrict, RegisterOperand(object),
+ UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
+ }
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
Register object, Register key, int feedback_slot,
LanguageMode language_mode) {
- Bytecode bytecode = BytecodeForStoreKeyedProperty(language_mode);
- Output(bytecode, RegisterOperand(object), RegisterOperand(key),
- UnsignedOperand(feedback_slot));
+ if (language_mode == SLOPPY) {
+ Output(Bytecode::kStaKeyedPropertySloppy, RegisterOperand(object),
+ RegisterOperand(key), UnsignedOperand(feedback_slot));
+ } else {
+ DCHECK_EQ(language_mode, STRICT);
+ Output(Bytecode::kStaKeyedPropertyStrict, RegisterOperand(object),
+ RegisterOperand(key), UnsignedOperand(feedback_slot));
+ }
return *this;
}
@@ -346,10 +463,11 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateBlockContext(
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateCatchContext(
- Register exception, Handle<String> name) {
+ Register exception, Handle<String> name, Handle<ScopeInfo> scope_info) {
size_t name_index = GetConstantPoolEntry(name);
+ size_t scope_info_index = GetConstantPoolEntry(scope_info);
Output(Bytecode::kCreateCatchContext, RegisterOperand(exception),
- UnsignedOperand(name_index));
+ UnsignedOperand(name_index), UnsignedOperand(scope_info_index));
return *this;
}
@@ -358,18 +476,29 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateFunctionContext(int slots) {
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::CreateWithContext(Register object) {
- Output(Bytecode::kCreateWithContext, RegisterOperand(object));
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateWithContext(
+ Register object, Handle<ScopeInfo> scope_info) {
+ size_t scope_info_index = GetConstantPoolEntry(scope_info);
+ Output(Bytecode::kCreateWithContext, RegisterOperand(object),
+ UnsignedOperand(scope_info_index));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArguments(
CreateArgumentsType type) {
- // TODO(rmcilroy): Consider passing the type as a bytecode operand rather
- // than having two different bytecodes once we have better support for
- // branches in the InterpreterAssembler.
- Bytecode bytecode = BytecodeForCreateArguments(type);
- Output(bytecode);
+ switch (type) {
+ case CreateArgumentsType::kMappedArguments:
+ Output(Bytecode::kCreateMappedArguments);
+ break;
+ case CreateArgumentsType::kUnmappedArguments:
+ Output(Bytecode::kCreateUnmappedArguments);
+ break;
+ case CreateArgumentsType::kRestParameter:
+ Output(Bytecode::kCreateRestParameter);
+ break;
+ default:
+ UNREACHABLE();
+ }
return *this;
}
@@ -411,19 +540,19 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::PopContext(Register context) {
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToJSObject(
+BytecodeArrayBuilder& BytecodeArrayBuilder::ConvertAccumulatorToObject(
Register out) {
Output(Bytecode::kToObject, RegisterOperand(out));
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToName(
+BytecodeArrayBuilder& BytecodeArrayBuilder::ConvertAccumulatorToName(
Register out) {
Output(Bytecode::kToName, RegisterOperand(out));
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToNumber(
+BytecodeArrayBuilder& BytecodeArrayBuilder::ConvertAccumulatorToNumber(
Register out) {
Output(Bytecode::kToNumber, RegisterOperand(out));
return *this;
@@ -442,43 +571,44 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(const BytecodeLabel& target,
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::OutputJump(Bytecode jump_bytecode,
- BytecodeLabel* label) {
- BytecodeNode node(jump_bytecode, 0);
- AttachSourceInfo(&node);
- pipeline_->WriteJump(&node, label);
- LeaveBasicBlock();
- return *this;
-}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::Jump(BytecodeLabel* label) {
- return OutputJump(Bytecode::kJump, label);
+ OutputJump(Bytecode::kJump, label);
+ return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfTrue(BytecodeLabel* label) {
// The peephole optimizer attempts to simplify JumpIfToBooleanTrue
// to JumpIfTrue.
- return OutputJump(Bytecode::kJumpIfToBooleanTrue, label);
+ OutputJump(Bytecode::kJumpIfToBooleanTrue, label);
+ return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfFalse(BytecodeLabel* label) {
- // The peephole optimizer attempts to simplify JumpIfToBooleanFalse
- // to JumpIfFalse.
- return OutputJump(Bytecode::kJumpIfToBooleanFalse, label);
+ OutputJump(Bytecode::kJumpIfToBooleanFalse, label);
+ return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNull(BytecodeLabel* label) {
- return OutputJump(Bytecode::kJumpIfNull, label);
+ OutputJump(Bytecode::kJumpIfNull, label);
+ return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfUndefined(
BytecodeLabel* label) {
- return OutputJump(Bytecode::kJumpIfUndefined, label);
+ OutputJump(Bytecode::kJumpIfUndefined, label);
+ return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNotHole(
BytecodeLabel* label) {
- return OutputJump(Bytecode::kJumpIfNotHole, label);
+ OutputJump(Bytecode::kJumpIfNotHole, label);
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::JumpLoop(BytecodeLabel* label,
+ int loop_depth) {
+ OutputJump(Bytecode::kJumpLoop, UnsignedOperand(loop_depth), label);
+ return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck(int position) {
@@ -499,11 +629,6 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck(int position) {
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::OsrPoll(int loop_depth) {
- Output(Bytecode::kOsrPoll, UnsignedOperand(loop_depth));
- return *this;
-}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::Throw() {
Output(Bytecode::kThrow);
return *this;
@@ -527,24 +652,27 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Debugger() {
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(
- Register receiver, Register cache_info_triple) {
+ Register receiver, RegisterList cache_info_triple) {
+ DCHECK_EQ(3, cache_info_triple.register_count());
Output(Bytecode::kForInPrepare, RegisterOperand(receiver),
- RegisterOperand(cache_info_triple));
+ RegisterOperand(cache_info_triple.first_register()));
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::ForInDone(Register index,
- Register cache_length) {
- Output(Bytecode::kForInDone, RegisterOperand(index),
+BytecodeArrayBuilder& BytecodeArrayBuilder::ForInContinue(
+ Register index, Register cache_length) {
+ Output(Bytecode::kForInContinue, RegisterOperand(index),
RegisterOperand(cache_length));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInNext(
- Register receiver, Register index, Register cache_type_array_pair,
+ Register receiver, Register index, RegisterList cache_type_array_pair,
int feedback_slot) {
+ DCHECK_EQ(2, cache_type_array_pair.register_count());
Output(Bytecode::kForInNext, RegisterOperand(receiver),
- RegisterOperand(index), RegisterOperand(cache_type_array_pair),
+ RegisterOperand(index),
+ RegisterOperand(cache_type_array_pair.first_register()),
UnsignedOperand(feedback_slot));
return *this;
}
@@ -591,45 +719,39 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryEnd(int handler_id) {
return *this;
}
-void BytecodeArrayBuilder::EnsureReturn() {
- if (!return_seen_in_block_) {
- LoadUndefined();
- Return();
- }
- DCHECK(return_seen_in_block_);
-}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
- Register receiver_args,
- size_t receiver_args_count,
+ RegisterList args,
int feedback_slot,
TailCallMode tail_call_mode) {
- Bytecode bytecode = BytecodeForCall(tail_call_mode);
- Output(bytecode, RegisterOperand(callable), RegisterOperand(receiver_args),
- UnsignedOperand(receiver_args_count), UnsignedOperand(feedback_slot));
+ if (tail_call_mode == TailCallMode::kDisallow) {
+ Output(Bytecode::kCall, RegisterOperand(callable),
+ RegisterOperand(args.first_register()),
+ UnsignedOperand(args.register_count()),
+ UnsignedOperand(feedback_slot));
+ } else {
+ DCHECK(tail_call_mode == TailCallMode::kAllow);
+ Output(Bytecode::kTailCall, RegisterOperand(callable),
+ RegisterOperand(args.first_register()),
+ UnsignedOperand(args.register_count()),
+ UnsignedOperand(feedback_slot));
+ }
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::New(Register constructor,
- Register first_arg,
- size_t arg_count) {
- if (!first_arg.is_valid()) {
- DCHECK_EQ(0u, arg_count);
- first_arg = Register(0);
- }
+ RegisterList args,
+ int feedback_slot_id) {
Output(Bytecode::kNew, RegisterOperand(constructor),
- RegisterOperand(first_arg), UnsignedOperand(arg_count));
+ RegisterOperand(args.first_register()),
+ UnsignedOperand(args.register_count()),
+ UnsignedOperand(feedback_slot_id));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
- Runtime::FunctionId function_id, Register first_arg, size_t arg_count) {
+ Runtime::FunctionId function_id, RegisterList args) {
DCHECK_EQ(1, Runtime::FunctionForId(function_id)->result_size);
DCHECK(Bytecodes::SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
- if (!first_arg.is_valid()) {
- DCHECK_EQ(0u, arg_count);
- first_arg = Register(0);
- }
Bytecode bytecode;
uint32_t id;
if (IntrinsicsHelper::IsSupported(function_id)) {
@@ -639,35 +761,56 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
bytecode = Bytecode::kCallRuntime;
id = static_cast<uint32_t>(function_id);
}
- Output(bytecode, id, RegisterOperand(first_arg), UnsignedOperand(arg_count));
+ Output(bytecode, id, RegisterOperand(args.first_register()),
+ UnsignedOperand(args.register_count()));
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
+ Runtime::FunctionId function_id, Register arg) {
+ return CallRuntime(function_id, RegisterList(arg.index(), 1));
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
+ Runtime::FunctionId function_id) {
+ return CallRuntime(function_id, RegisterList());
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntimeForPair(
- Runtime::FunctionId function_id, Register first_arg, size_t arg_count,
- Register first_return) {
+ Runtime::FunctionId function_id, RegisterList args,
+ RegisterList return_pair) {
DCHECK_EQ(2, Runtime::FunctionForId(function_id)->result_size);
DCHECK(Bytecodes::SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
- if (!first_arg.is_valid()) {
- DCHECK_EQ(0u, arg_count);
- first_arg = Register(0);
- }
+ DCHECK_EQ(2, return_pair.register_count());
Output(Bytecode::kCallRuntimeForPair, static_cast<uint16_t>(function_id),
- RegisterOperand(first_arg), UnsignedOperand(arg_count),
- RegisterOperand(first_return));
+ RegisterOperand(args.first_register()),
+ UnsignedOperand(args.register_count()),
+ RegisterOperand(return_pair.first_register()));
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(
- int context_index, Register receiver_args, size_t receiver_args_count) {
+BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntimeForPair(
+ Runtime::FunctionId function_id, Register arg, RegisterList return_pair) {
+ return CallRuntimeForPair(function_id, RegisterList(arg.index(), 1),
+ return_pair);
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(int context_index,
+ RegisterList args) {
Output(Bytecode::kCallJSRuntime, UnsignedOperand(context_index),
- RegisterOperand(receiver_args), UnsignedOperand(receiver_args_count));
+ RegisterOperand(args.first_register()),
+ UnsignedOperand(args.register_count()));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
LanguageMode language_mode) {
- Output(BytecodeForDelete(language_mode), RegisterOperand(object));
+ if (language_mode == SLOPPY) {
+ Output(Bytecode::kDeletePropertySloppy, RegisterOperand(object));
+ } else {
+ DCHECK_EQ(language_mode, STRICT);
+ Output(Bytecode::kDeletePropertyStrict, RegisterOperand(object));
+ }
return *this;
}
@@ -689,29 +832,6 @@ void BytecodeArrayBuilder::SetReturnPosition() {
latest_source_info_.MakeStatementPosition(return_position_);
}
-void BytecodeArrayBuilder::SetStatementPosition(Statement* stmt) {
- if (stmt->position() == kNoSourcePosition) return;
- latest_source_info_.MakeStatementPosition(stmt->position());
-}
-
-void BytecodeArrayBuilder::SetExpressionPosition(Expression* expr) {
- if (expr->position() == kNoSourcePosition) return;
- if (!latest_source_info_.is_statement()) {
- // Ensure the current expression position is overwritten with the
- // latest value.
- latest_source_info_.MakeExpressionPosition(expr->position());
- }
-}
-
-void BytecodeArrayBuilder::SetExpressionAsStatementPosition(Expression* expr) {
- if (expr->position() == kNoSourcePosition) return;
- latest_source_info_.MakeStatementPosition(expr->position());
-}
-
-bool BytecodeArrayBuilder::TemporaryRegisterIsLive(Register reg) const {
- return temporary_register_allocator()->RegisterIsLive(reg);
-}
-
bool BytecodeArrayBuilder::RegisterIsValid(Register reg) const {
if (!reg.is_valid()) {
return false;
@@ -726,7 +846,7 @@ bool BytecodeArrayBuilder::RegisterIsValid(Register reg) const {
} else if (reg.index() < fixed_register_count()) {
return true;
} else {
- return TemporaryRegisterIsLive(reg);
+ return register_allocator()->RegisterIsLive(reg);
}
}
@@ -743,19 +863,6 @@ bool BytecodeArrayBuilder::OperandsAreValid(
switch (operand_types[i]) {
case OperandType::kNone:
return false;
- case OperandType::kRegCount: {
- CHECK_NE(i, 0);
- CHECK(operand_types[i - 1] == OperandType::kMaybeReg ||
- operand_types[i - 1] == OperandType::kReg);
- if (i > 0 && operands[i] > 0) {
- Register start = Register::FromOperand(operands[i - 1]);
- Register end(start.index() + static_cast<int>(operands[i]) - 1);
- if (!RegisterIsValid(start) || !RegisterIsValid(end) || start > end) {
- return false;
- }
- }
- break;
- }
case OperandType::kFlag8:
case OperandType::kIntrinsicId:
if (Bytecodes::SizeForUnsignedOperand(operands[i]) >
@@ -770,17 +877,28 @@ bool BytecodeArrayBuilder::OperandsAreValid(
}
break;
case OperandType::kIdx:
- // TODO(oth): Consider splitting OperandType::kIdx into two
- // operand types. One which is a constant pool index that can
- // be checked, and the other is an unsigned value.
+ // TODO(leszeks): Possibly split this up into constant pool indices and
+ // other indices, for checking.
break;
+ case OperandType::kUImm:
case OperandType::kImm:
break;
- case OperandType::kMaybeReg:
- if (Register::FromOperand(operands[i]) == Register(0)) {
- break;
+ case OperandType::kRegList: {
+ CHECK_LT(i, operand_count - 1);
+ CHECK(operand_types[i + 1] == OperandType::kRegCount);
+ int reg_count = static_cast<int>(operands[i + 1]);
+ if (reg_count == 0) {
+ return Register::FromOperand(operands[i]) == Register(0);
+ } else {
+ Register start = Register::FromOperand(operands[i]);
+ Register end(start.index() + reg_count - 1);
+ if (!RegisterIsValid(start) || !RegisterIsValid(end) || start > end) {
+ return false;
+ }
}
- // Fall-through to kReg case.
+ i++; // Skip past kRegCount operand.
+ break;
+ }
case OperandType::kReg:
case OperandType::kRegOut: {
Register reg = Register::FromOperand(operands[i]);
@@ -808,186 +926,14 @@ bool BytecodeArrayBuilder::OperandsAreValid(
}
break;
}
+ case OperandType::kRegCount:
+ UNREACHABLE(); // Dealt with in kRegList above.
}
}
return true;
}
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForBinaryOperation(Token::Value op) {
- switch (op) {
- case Token::Value::ADD:
- return Bytecode::kAdd;
- case Token::Value::SUB:
- return Bytecode::kSub;
- case Token::Value::MUL:
- return Bytecode::kMul;
- case Token::Value::DIV:
- return Bytecode::kDiv;
- case Token::Value::MOD:
- return Bytecode::kMod;
- case Token::Value::BIT_OR:
- return Bytecode::kBitwiseOr;
- case Token::Value::BIT_XOR:
- return Bytecode::kBitwiseXor;
- case Token::Value::BIT_AND:
- return Bytecode::kBitwiseAnd;
- case Token::Value::SHL:
- return Bytecode::kShiftLeft;
- case Token::Value::SAR:
- return Bytecode::kShiftRight;
- case Token::Value::SHR:
- return Bytecode::kShiftRightLogical;
- default:
- UNREACHABLE();
- return Bytecode::kIllegal;
- }
-}
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForCountOperation(Token::Value op) {
- switch (op) {
- case Token::Value::ADD:
- return Bytecode::kInc;
- case Token::Value::SUB:
- return Bytecode::kDec;
- default:
- UNREACHABLE();
- return Bytecode::kIllegal;
- }
-}
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForCompareOperation(Token::Value op) {
- switch (op) {
- case Token::Value::EQ:
- return Bytecode::kTestEqual;
- case Token::Value::NE:
- return Bytecode::kTestNotEqual;
- case Token::Value::EQ_STRICT:
- return Bytecode::kTestEqualStrict;
- case Token::Value::LT:
- return Bytecode::kTestLessThan;
- case Token::Value::GT:
- return Bytecode::kTestGreaterThan;
- case Token::Value::LTE:
- return Bytecode::kTestLessThanOrEqual;
- case Token::Value::GTE:
- return Bytecode::kTestGreaterThanOrEqual;
- case Token::Value::INSTANCEOF:
- return Bytecode::kTestInstanceOf;
- case Token::Value::IN:
- return Bytecode::kTestIn;
- default:
- UNREACHABLE();
- return Bytecode::kIllegal;
- }
-}
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForStoreNamedProperty(
- LanguageMode language_mode) {
- switch (language_mode) {
- case SLOPPY:
- return Bytecode::kStaNamedPropertySloppy;
- case STRICT:
- return Bytecode::kStaNamedPropertyStrict;
- default:
- UNREACHABLE();
- }
- return Bytecode::kIllegal;
-}
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForStoreKeyedProperty(
- LanguageMode language_mode) {
- switch (language_mode) {
- case SLOPPY:
- return Bytecode::kStaKeyedPropertySloppy;
- case STRICT:
- return Bytecode::kStaKeyedPropertyStrict;
- default:
- UNREACHABLE();
- }
- return Bytecode::kIllegal;
-}
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForLoadGlobal(TypeofMode typeof_mode) {
- return typeof_mode == INSIDE_TYPEOF ? Bytecode::kLdaGlobalInsideTypeof
- : Bytecode::kLdaGlobal;
-}
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForStoreGlobal(
- LanguageMode language_mode) {
- switch (language_mode) {
- case SLOPPY:
- return Bytecode::kStaGlobalSloppy;
- case STRICT:
- return Bytecode::kStaGlobalStrict;
- default:
- UNREACHABLE();
- }
- return Bytecode::kIllegal;
-}
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForStoreLookupSlot(
- LanguageMode language_mode) {
- switch (language_mode) {
- case SLOPPY:
- return Bytecode::kStaLookupSlotSloppy;
- case STRICT:
- return Bytecode::kStaLookupSlotStrict;
- default:
- UNREACHABLE();
- }
- return Bytecode::kIllegal;
-}
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForCreateArguments(
- CreateArgumentsType type) {
- switch (type) {
- case CreateArgumentsType::kMappedArguments:
- return Bytecode::kCreateMappedArguments;
- case CreateArgumentsType::kUnmappedArguments:
- return Bytecode::kCreateUnmappedArguments;
- case CreateArgumentsType::kRestParameter:
- return Bytecode::kCreateRestParameter;
- }
- UNREACHABLE();
- return Bytecode::kIllegal;
-}
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForDelete(LanguageMode language_mode) {
- switch (language_mode) {
- case SLOPPY:
- return Bytecode::kDeletePropertySloppy;
- case STRICT:
- return Bytecode::kDeletePropertyStrict;
- default:
- UNREACHABLE();
- }
- return Bytecode::kIllegal;
-}
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForCall(TailCallMode tail_call_mode) {
- switch (tail_call_mode) {
- case TailCallMode::kDisallow:
- return Bytecode::kCall;
- case TailCallMode::kAllow:
- return Bytecode::kTailCall;
- default:
- UNREACHABLE();
- }
- return Bytecode::kIllegal;
-}
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index 51b61861c3..a9fa7a7bb5 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -12,7 +12,7 @@
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/constant-array-builder.h"
#include "src/interpreter/handler-table-builder.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -61,23 +61,14 @@ class BytecodeArrayBuilder final : public ZoneObject {
int fixed_register_count() const { return context_count() + locals_count(); }
// Returns the number of fixed and temporary registers.
- int fixed_and_temporary_register_count() const {
- return fixed_register_count() + temporary_register_count();
- }
-
- int temporary_register_count() const {
- return temporary_register_allocator()->allocation_count();
+ int total_register_count() const {
+ DCHECK_LE(fixed_register_count(),
+ register_allocator()->maximum_register_count());
+ return register_allocator()->maximum_register_count();
}
Register Parameter(int parameter_index) const;
- // Return true if the register |reg| represents a parameter or a
- // local.
- bool RegisterIsParameterOrLocal(Register reg) const;
-
- // Returns true if the register |reg| is a live temporary register.
- bool TemporaryRegisterIsLive(Register reg) const;
-
// Constant loads to accumulator.
BytecodeArrayBuilder& LoadConstantPoolEntry(size_t entry);
BytecodeArrayBuilder& LoadLiteral(v8::internal::Smi* value);
@@ -94,11 +85,15 @@ class BytecodeArrayBuilder final : public ZoneObject {
int feedback_slot,
LanguageMode language_mode);
- // Load the object at |slot_index| in |context| into the accumulator.
- BytecodeArrayBuilder& LoadContextSlot(Register context, int slot_index);
+ // Load the object at |slot_index| at |depth| in the context chain starting
+ // with |context| into the accumulator.
+ BytecodeArrayBuilder& LoadContextSlot(Register context, int slot_index,
+ int depth);
- // Stores the object in the accumulator into |slot_index| of |context|.
- BytecodeArrayBuilder& StoreContextSlot(Register context, int slot_index);
+ // Stores the object in the accumulator into |slot_index| at |depth| in the
+ // context chain starting with |context|.
+ BytecodeArrayBuilder& StoreContextSlot(Register context, int slot_index,
+ int depth);
// Register-accumulator transfers.
BytecodeArrayBuilder& LoadAccumulatorWithRegister(Register reg);
@@ -127,6 +122,20 @@ class BytecodeArrayBuilder final : public ZoneObject {
BytecodeArrayBuilder& LoadLookupSlot(const Handle<String> name,
TypeofMode typeof_mode);
+ // Lookup the variable with |name|, which is known to be at |slot_index| at
+ // |depth| in the context chain if not shadowed by a context extension
+ // somewhere in that context chain.
+ BytecodeArrayBuilder& LoadLookupContextSlot(const Handle<String> name,
+ TypeofMode typeof_mode,
+ int slot_index, int depth);
+
+ // Lookup the variable with |name|, which has its feedback in |feedback_slot|
+ // and is known to be global if not shadowed by a context extension somewhere
+ // up to |depth| in that context chain.
+ BytecodeArrayBuilder& LoadLookupGlobalSlot(const Handle<String> name,
+ TypeofMode typeof_mode,
+ int feedback_slot, int depth);
+
// Store value in the accumulator into the variable with |name|.
BytecodeArrayBuilder& StoreLookupSlot(const Handle<String> name,
LanguageMode language_mode);
@@ -139,17 +148,19 @@ class BytecodeArrayBuilder final : public ZoneObject {
// in the accumulator.
BytecodeArrayBuilder& CreateBlockContext(Handle<ScopeInfo> scope_info);
- // Create a new context for a catch block with |exception| and |name| and the
- // closure in the accumulator.
+ // Create a new context for a catch block with |exception|, |name|,
+ // |scope_info|, and the closure in the accumulator.
BytecodeArrayBuilder& CreateCatchContext(Register exception,
- Handle<String> name);
+ Handle<String> name,
+ Handle<ScopeInfo> scope_info);
// Create a new context with size |slots|.
BytecodeArrayBuilder& CreateFunctionContext(int slots);
- // Creates a new context for a with-statement with the |object| in a register
- // and the closure in the accumulator.
- BytecodeArrayBuilder& CreateWithContext(Register object);
+ // Creates a new context with the given |scope_info| for a with-statement
+ // with the |object| in a register and the closure in the accumulator.
+ BytecodeArrayBuilder& CreateWithContext(Register object,
+ Handle<ScopeInfo> scope_info);
// Create a new arguments object in the accumulator.
BytecodeArrayBuilder& CreateArguments(CreateArgumentsType type);
@@ -171,46 +182,42 @@ class BytecodeArrayBuilder final : public ZoneObject {
BytecodeArrayBuilder& PopContext(Register context);
// Call a JS function. The JSFunction or Callable to be called should be in
- // |callable|, the receiver should be in |receiver_args| and all subsequent
- // arguments should be in registers <receiver_args + 1> to
- // <receiver_args + receiver_arg_count - 1>. Type feedback is recorded in
- // the |feedback_slot| in the type feedback vector.
+ // |callable|. The arguments should be in |args|, with the receiver in
+ // |args[0]|. Type feedback is recorded in the |feedback_slot| in the type
+ // feedback vector.
BytecodeArrayBuilder& Call(
- Register callable, Register receiver_args, size_t receiver_arg_count,
- int feedback_slot, TailCallMode tail_call_mode = TailCallMode::kDisallow);
-
- BytecodeArrayBuilder& TailCall(Register callable, Register receiver_args,
- size_t receiver_arg_count, int feedback_slot) {
- return Call(callable, receiver_args, receiver_arg_count, feedback_slot,
- TailCallMode::kAllow);
- }
+ Register callable, RegisterList args, int feedback_slot,
+ TailCallMode tail_call_mode = TailCallMode::kDisallow);
// Call the new operator. The accumulator holds the |new_target|.
- // The |constructor| is in a register followed by |arg_count|
- // consecutive arguments starting at |first_arg| for the constuctor
- // invocation.
- BytecodeArrayBuilder& New(Register constructor, Register first_arg,
- size_t arg_count);
-
- // Call the runtime function with |function_id|. The first argument should be
- // in |first_arg| and all subsequent arguments should be in registers
- // <first_arg + 1> to <first_arg + arg_count - 1>.
+ // The |constructor| is in a register and arguments are in |args|.
+ BytecodeArrayBuilder& New(Register constructor, RegisterList args,
+ int feedback_slot);
+
+ // Call the runtime function with |function_id| and arguments |args|.
+ BytecodeArrayBuilder& CallRuntime(Runtime::FunctionId function_id,
+ RegisterList args);
+ // Call the runtime function with |function_id| with single argument |arg|.
BytecodeArrayBuilder& CallRuntime(Runtime::FunctionId function_id,
- Register first_arg, size_t arg_count);
+ Register arg);
+ // Call the runtime function with |function_id| with no arguments.
+ BytecodeArrayBuilder& CallRuntime(Runtime::FunctionId function_id);
- // Call the runtime function with |function_id| that returns a pair of values.
- // The first argument should be in |first_arg| and all subsequent arguments
- // should be in registers <first_arg + 1> to <first_arg + arg_count - 1>. The
- // return values will be returned in <first_return> and <first_return + 1>.
+ // Call the runtime function with |function_id| and arguments |args|, that
+ // returns a pair of values. The return values will be returned in
+ // |return_pair|.
BytecodeArrayBuilder& CallRuntimeForPair(Runtime::FunctionId function_id,
- Register first_arg, size_t arg_count,
- Register first_return);
+ RegisterList args,
+ RegisterList return_pair);
+ // Call the runtime function with |function_id| with single argument |arg|
+ // that returns a pair of values. The return values will be returned in
+ // |return_pair|.
+ BytecodeArrayBuilder& CallRuntimeForPair(Runtime::FunctionId function_id,
+ Register arg,
+ RegisterList return_pair);
- // Call the JS runtime function with |context_index|. The the receiver should
- // be in |receiver_args| and all subsequent arguments should be in registers
- // <receiver + 1> to <receiver + receiver_args_count - 1>.
- BytecodeArrayBuilder& CallJSRuntime(int context_index, Register receiver_args,
- size_t receiver_args_count);
+ // Call the JS runtime function with |context_index| and arguments |args|.
+ BytecodeArrayBuilder& CallJSRuntime(int context_index, RegisterList args);
// Operators (register holds the lhs value, accumulator holds the rhs value).
// Type feedback will be recorded in the |feedback_slot|
@@ -230,15 +237,13 @@ class BytecodeArrayBuilder final : public ZoneObject {
BytecodeArrayBuilder& Delete(Register object, LanguageMode language_mode);
// Tests.
- BytecodeArrayBuilder& CompareOperation(Token::Value op, Register reg);
-
- // Casts accumulator and stores result in accumulator.
- BytecodeArrayBuilder& CastAccumulatorToBoolean();
+ BytecodeArrayBuilder& CompareOperation(Token::Value op, Register reg,
+ int feedback_slot = kNoFeedbackSlot);
- // Casts accumulator and stores result in register |out|.
- BytecodeArrayBuilder& CastAccumulatorToJSObject(Register out);
- BytecodeArrayBuilder& CastAccumulatorToName(Register out);
- BytecodeArrayBuilder& CastAccumulatorToNumber(Register out);
+ // Converts accumulator and stores result in register |out|.
+ BytecodeArrayBuilder& ConvertAccumulatorToObject(Register out);
+ BytecodeArrayBuilder& ConvertAccumulatorToName(Register out);
+ BytecodeArrayBuilder& ConvertAccumulatorToNumber(Register out);
// Flow Control.
BytecodeArrayBuilder& Bind(BytecodeLabel* label);
@@ -250,11 +255,10 @@ class BytecodeArrayBuilder final : public ZoneObject {
BytecodeArrayBuilder& JumpIfNotHole(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfNull(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfUndefined(BytecodeLabel* label);
+ BytecodeArrayBuilder& JumpLoop(BytecodeLabel* label, int loop_depth);
BytecodeArrayBuilder& StackCheck(int position);
- BytecodeArrayBuilder& OsrPoll(int loop_depth);
-
BytecodeArrayBuilder& Throw();
BytecodeArrayBuilder& ReThrow();
BytecodeArrayBuilder& Return();
@@ -264,10 +268,10 @@ class BytecodeArrayBuilder final : public ZoneObject {
// Complex flow control.
BytecodeArrayBuilder& ForInPrepare(Register receiver,
- Register cache_info_triple);
- BytecodeArrayBuilder& ForInDone(Register index, Register cache_length);
+ RegisterList cache_info_triple);
+ BytecodeArrayBuilder& ForInContinue(Register index, Register cache_length);
BytecodeArrayBuilder& ForInNext(Register receiver, Register index,
- Register cache_type_array_pair,
+ RegisterList cache_type_array_pair,
int feedback_slot);
BytecodeArrayBuilder& ForInStep(Register index);
@@ -292,20 +296,55 @@ class BytecodeArrayBuilder final : public ZoneObject {
void InitializeReturnPosition(FunctionLiteral* literal);
- void SetStatementPosition(Statement* stmt);
- void SetExpressionPosition(Expression* expr);
- void SetExpressionAsStatementPosition(Expression* expr);
+ void SetStatementPosition(Statement* stmt) {
+ if (stmt->position() == kNoSourcePosition) return;
+ latest_source_info_.MakeStatementPosition(stmt->position());
+ }
+
+ void SetExpressionPosition(Expression* expr) {
+ if (expr->position() == kNoSourcePosition) return;
+ if (!latest_source_info_.is_statement()) {
+ // Ensure the current expression position is overwritten with the
+ // latest value.
+ latest_source_info_.MakeExpressionPosition(expr->position());
+ }
+ }
+
+ void SetExpressionAsStatementPosition(Expression* expr) {
+ if (expr->position() == kNoSourcePosition) return;
+ latest_source_info_.MakeStatementPosition(expr->position());
+ }
+
+ bool RequiresImplicitReturn() const { return !return_seen_in_block_; }
// Accessors
- TemporaryRegisterAllocator* temporary_register_allocator() {
- return &temporary_allocator_;
+ BytecodeRegisterAllocator* register_allocator() {
+ return &register_allocator_;
}
- const TemporaryRegisterAllocator* temporary_register_allocator() const {
- return &temporary_allocator_;
+ const BytecodeRegisterAllocator* register_allocator() const {
+ return &register_allocator_;
}
Zone* zone() const { return zone_; }
- void EnsureReturn();
+ private:
+ friend class BytecodeRegisterAllocator;
+
+ INLINE(void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ uint32_t operand2, uint32_t operand3));
+ INLINE(void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ uint32_t operand2));
+ INLINE(void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1));
+ INLINE(void Output(Bytecode bytecode, uint32_t operand0));
+ INLINE(void Output(Bytecode bytecode));
+
+ INLINE(void OutputJump(Bytecode bytecode, BytecodeLabel* label));
+ INLINE(void OutputJump(Bytecode bytecode, uint32_t operand0,
+ BytecodeLabel* label));
+
+ bool RegisterIsValid(Register reg) const;
+ bool OperandsAreValid(Bytecode bytecode, int operand_count,
+ uint32_t operand0 = 0, uint32_t operand1 = 0,
+ uint32_t operand2 = 0, uint32_t operand3 = 0) const;
static uint32_t RegisterOperand(Register reg) {
return static_cast<uint32_t>(reg.ToOperand());
@@ -325,40 +364,6 @@ class BytecodeArrayBuilder final : public ZoneObject {
return static_cast<uint32_t>(value);
}
- private:
- friend class BytecodeRegisterAllocator;
-
- static Bytecode BytecodeForBinaryOperation(Token::Value op);
- static Bytecode BytecodeForCountOperation(Token::Value op);
- static Bytecode BytecodeForCompareOperation(Token::Value op);
- static Bytecode BytecodeForStoreNamedProperty(LanguageMode language_mode);
- static Bytecode BytecodeForStoreKeyedProperty(LanguageMode language_mode);
- static Bytecode BytecodeForLoadGlobal(TypeofMode typeof_mode);
- static Bytecode BytecodeForStoreGlobal(LanguageMode language_mode);
- static Bytecode BytecodeForStoreLookupSlot(LanguageMode language_mode);
- static Bytecode BytecodeForCreateArguments(CreateArgumentsType type);
- static Bytecode BytecodeForDelete(LanguageMode language_mode);
- static Bytecode BytecodeForCall(TailCallMode tail_call_mode);
-
- void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
- uint32_t operand2, uint32_t operand3);
- void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
- uint32_t operand2);
- void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1);
- void Output(Bytecode bytecode, uint32_t operand0);
- void Output(Bytecode bytecode);
-
- BytecodeArrayBuilder& OutputJump(Bytecode jump_bytecode,
- BytecodeLabel* label);
-
- bool RegisterIsValid(Register reg) const;
- bool OperandsAreValid(Bytecode bytecode, int operand_count,
- uint32_t operand0 = 0, uint32_t operand1 = 0,
- uint32_t operand2 = 0, uint32_t operand3 = 0) const;
-
- // Attach latest source position to |node|.
- void AttachSourceInfo(BytecodeNode* node);
-
// Set position for return.
void SetReturnPosition();
@@ -395,11 +400,13 @@ class BytecodeArrayBuilder final : public ZoneObject {
int local_register_count_;
int context_register_count_;
int return_position_;
- TemporaryRegisterAllocator temporary_allocator_;
+ BytecodeRegisterAllocator register_allocator_;
BytecodeArrayWriter bytecode_array_writer_;
BytecodePipelineStage* pipeline_;
BytecodeSourceInfo latest_source_info_;
+ static int const kNoFeedbackSlot = 0;
+
DISALLOW_COPY_AND_ASSIGN(BytecodeArrayBuilder);
};
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.cc b/deps/v8/src/interpreter/bytecode-array-iterator.cc
index 84c0028342..e596b11a05 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.cc
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.cc
@@ -97,6 +97,13 @@ uint32_t BytecodeArrayIterator::GetFlagOperand(int operand_index) const {
return GetUnsignedOperand(operand_index, OperandType::kFlag8);
}
+uint32_t BytecodeArrayIterator::GetUnsignedImmediateOperand(
+ int operand_index) const {
+ DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+ OperandType::kUImm);
+ return GetUnsignedOperand(operand_index, OperandType::kUImm);
+}
+
int32_t BytecodeArrayIterator::GetImmediateOperand(int operand_index) const {
DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
OperandType::kImm);
@@ -133,11 +140,11 @@ int BytecodeArrayIterator::GetRegisterOperandRange(int operand_index) const {
DCHECK_LE(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
const OperandType* operand_types =
Bytecodes::GetOperandTypes(current_bytecode());
- DCHECK(Bytecodes::IsRegisterOperandType(operand_types[operand_index]));
- if (operand_types[operand_index + 1] == OperandType::kRegCount) {
+ OperandType operand_type = operand_types[operand_index];
+ DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
+ if (operand_type == OperandType::kRegList) {
return GetRegisterCountOperand(operand_index + 1);
} else {
- OperandType operand_type = operand_types[operand_index];
return Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type);
}
}
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.h b/deps/v8/src/interpreter/bytecode-array-iterator.h
index 0f7c6c7df9..09226252cc 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.h
@@ -31,6 +31,7 @@ class BytecodeArrayIterator {
}
uint32_t GetFlagOperand(int operand_index) const;
+ uint32_t GetUnsignedImmediateOperand(int operand_index) const;
int32_t GetImmediateOperand(int operand_index) const;
uint32_t GetIndexOperand(int operand_index) const;
uint32_t GetRegisterCountOperand(int operand_index) const;
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index 6694a3697c..fb3876819e 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -21,27 +21,23 @@ BytecodeArrayWriter::BytecodeArrayWriter(
Zone* zone, ConstantArrayBuilder* constant_array_builder,
SourcePositionTableBuilder::RecordingMode source_position_mode)
: bytecodes_(zone),
- max_register_count_(0),
unbound_jumps_(0),
source_position_table_builder_(zone, source_position_mode),
- constant_array_builder_(constant_array_builder) {}
+ constant_array_builder_(constant_array_builder) {
+ bytecodes_.reserve(512); // Derived via experimentation.
+}
// override
BytecodeArrayWriter::~BytecodeArrayWriter() {}
// override
Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
- Isolate* isolate, int fixed_register_count, int parameter_count,
+ Isolate* isolate, int register_count, int parameter_count,
Handle<FixedArray> handler_table) {
DCHECK_EQ(0, unbound_jumps_);
int bytecode_size = static_cast<int>(bytecodes()->size());
-
- // All locals need a frame slot for the debugger, but may not be
- // present in generated code.
- int frame_size_for_locals = fixed_register_count * kPointerSize;
- int frame_size_used = max_register_count() * kPointerSize;
- int frame_size = std::max(frame_size_for_locals, frame_size_used);
+ int frame_size = register_count * kPointerSize;
Handle<FixedArray> constant_pool =
constant_array_builder()->ToFixedArray(isolate);
Handle<BytecodeArray> bytecode_array = isolate->factory()->NewBytecodeArray(
@@ -104,116 +100,48 @@ void BytecodeArrayWriter::UpdateSourcePositionTable(
}
}
-namespace {
-
-OperandScale ScaleForScalableByteOperand(OperandSize operand_size) {
- STATIC_ASSERT(static_cast<int>(OperandSize::kByte) ==
- static_cast<int>(OperandScale::kSingle));
- STATIC_ASSERT(static_cast<int>(OperandSize::kShort) ==
- static_cast<int>(OperandScale::kDouble));
- STATIC_ASSERT(static_cast<int>(OperandSize::kQuad) ==
- static_cast<int>(OperandScale::kQuadruple));
- return static_cast<OperandScale>(operand_size);
-}
-
-OperandScale OperandScaleForScalableSignedByte(uint32_t operand_value) {
- int32_t signed_operand = static_cast<int32_t>(operand_value);
- OperandSize bytes_required = Bytecodes::SizeForSignedOperand(signed_operand);
- return ScaleForScalableByteOperand(bytes_required);
-}
-
-OperandScale OperandScaleForScalableUnsignedByte(uint32_t operand_value) {
- OperandSize bytes_required = Bytecodes::SizeForUnsignedOperand(operand_value);
- return ScaleForScalableByteOperand(bytes_required);
-}
-
-OperandScale GetOperandScale(const BytecodeNode* const node) {
- const OperandTypeInfo* operand_type_infos =
- Bytecodes::GetOperandTypeInfos(node->bytecode());
- OperandScale operand_scale = OperandScale::kSingle;
- int operand_count = node->operand_count();
- for (int i = 0; i < operand_count; ++i) {
- switch (operand_type_infos[i]) {
- case OperandTypeInfo::kScalableSignedByte: {
- uint32_t operand = node->operand(i);
- operand_scale =
- std::max(operand_scale, OperandScaleForScalableSignedByte(operand));
- break;
- }
- case OperandTypeInfo::kScalableUnsignedByte: {
- uint32_t operand = node->operand(i);
- operand_scale = std::max(operand_scale,
- OperandScaleForScalableUnsignedByte(operand));
- break;
- }
- case OperandTypeInfo::kFixedUnsignedByte:
- case OperandTypeInfo::kFixedUnsignedShort:
- break;
- case OperandTypeInfo::kNone:
- UNREACHABLE();
- break;
- }
- }
- return operand_scale;
-}
-
-} // namespace
-
void BytecodeArrayWriter::EmitBytecode(const BytecodeNode* const node) {
DCHECK_NE(node->bytecode(), Bytecode::kIllegal);
- uint8_t buffer[kMaxSizeOfPackedBytecode];
- uint8_t* buffer_limit = buffer;
+ Bytecode bytecode = node->bytecode();
+ OperandScale operand_scale = node->operand_scale();
- OperandScale operand_scale = GetOperandScale(node);
if (operand_scale != OperandScale::kSingle) {
Bytecode prefix = Bytecodes::OperandScaleToPrefixBytecode(operand_scale);
- *buffer_limit++ = Bytecodes::ToByte(prefix);
+ bytecodes()->push_back(Bytecodes::ToByte(prefix));
}
-
- Bytecode bytecode = node->bytecode();
- *buffer_limit++ = Bytecodes::ToByte(bytecode);
+ bytecodes()->push_back(Bytecodes::ToByte(bytecode));
const uint32_t* const operands = node->operands();
- const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
- const int operand_count = Bytecodes::NumberOfOperands(bytecode);
+ const int operand_count = node->operand_count();
+ const OperandSize* operand_sizes =
+ Bytecodes::GetOperandSizes(bytecode, operand_scale);
for (int i = 0; i < operand_count; ++i) {
- OperandSize operand_size =
- Bytecodes::SizeOfOperand(operand_types[i], operand_scale);
- switch (operand_size) {
+ switch (operand_sizes[i]) {
case OperandSize::kNone:
UNREACHABLE();
break;
case OperandSize::kByte:
- *buffer_limit++ = static_cast<uint8_t>(operands[i]);
+ bytecodes()->push_back(static_cast<uint8_t>(operands[i]));
break;
case OperandSize::kShort: {
- WriteUnalignedUInt16(buffer_limit, operands[i]);
- buffer_limit += 2;
+ uint16_t operand = static_cast<uint16_t>(operands[i]);
+ const uint8_t* raw_operand = reinterpret_cast<const uint8_t*>(&operand);
+ bytecodes()->push_back(raw_operand[0]);
+ bytecodes()->push_back(raw_operand[1]);
break;
}
case OperandSize::kQuad: {
- WriteUnalignedUInt32(buffer_limit, operands[i]);
- buffer_limit += 4;
+ const uint8_t* raw_operand =
+ reinterpret_cast<const uint8_t*>(&operands[i]);
+ bytecodes()->push_back(raw_operand[0]);
+ bytecodes()->push_back(raw_operand[1]);
+ bytecodes()->push_back(raw_operand[2]);
+ bytecodes()->push_back(raw_operand[3]);
break;
}
}
-
- int count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_types[i]);
- if (count == 0) {
- continue;
- }
- // NB operand_types is terminated by OperandType::kNone so
- // operand_types[i + 1] is valid whilst i < operand_count.
- if (operand_types[i + 1] == OperandType::kRegCount) {
- count = static_cast<int>(operands[i]);
- }
- Register reg = Register::FromOperand(static_cast<int32_t>(operands[i]));
- max_register_count_ = std::max(max_register_count_, reg.index() + count);
}
-
- DCHECK_LE(buffer_limit, buffer + sizeof(buffer));
- bytecodes()->insert(bytecodes()->end(), buffer, buffer_limit);
}
// static
@@ -247,18 +175,17 @@ void BytecodeArrayWriter::PatchJumpWith8BitOperand(size_t jump_location,
DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
size_t operand_location = jump_location + 1;
DCHECK_EQ(bytecodes()->at(operand_location), k8BitJumpPlaceholder);
- if (Bytecodes::SizeForSignedOperand(delta) == OperandSize::kByte) {
- // The jump fits within the range of an Imm operand, so cancel
+ if (Bytecodes::ScaleForSignedOperand(delta) == OperandScale::kSingle) {
+ // The jump fits within the range of an Imm8 operand, so cancel
// the reservation and jump directly.
constant_array_builder()->DiscardReservedEntry(OperandSize::kByte);
bytecodes()->at(operand_location) = static_cast<uint8_t>(delta);
} else {
- // The jump does not fit within the range of an Imm operand, so
+ // The jump does not fit within the range of an Imm8 operand, so
// commit reservation putting the offset into the constant pool,
// and update the jump instruction and operand.
size_t entry = constant_array_builder()->CommitReservedEntry(
OperandSize::kByte, Smi::FromInt(delta));
- DCHECK_LE(entry, kMaxUInt32);
DCHECK_EQ(Bytecodes::SizeForUnsignedOperand(static_cast<uint32_t>(entry)),
OperandSize::kByte);
jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
@@ -273,14 +200,21 @@ void BytecodeArrayWriter::PatchJumpWith16BitOperand(size_t jump_location,
DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
size_t operand_location = jump_location + 1;
uint8_t operand_bytes[2];
- if (Bytecodes::SizeForSignedOperand(delta) <= OperandSize::kShort) {
+ if (Bytecodes::ScaleForSignedOperand(delta) <= OperandScale::kDouble) {
+ // The jump fits within the range of an Imm16 operand, so cancel
+ // the reservation and jump directly.
constant_array_builder()->DiscardReservedEntry(OperandSize::kShort);
WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(delta));
} else {
- jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
- bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
+ // The jump does not fit within the range of an Imm16 operand, so
+ // commit reservation putting the offset into the constant pool,
+ // and update the jump instruction and operand.
size_t entry = constant_array_builder()->CommitReservedEntry(
OperandSize::kShort, Smi::FromInt(delta));
+ DCHECK_EQ(Bytecodes::SizeForUnsignedOperand(static_cast<uint32_t>(entry)),
+ OperandSize::kShort);
+ jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
+ bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry));
}
DCHECK(bytecodes()->at(operand_location) == k8BitJumpPlaceholder &&
@@ -351,13 +285,14 @@ void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) {
// Label has been bound already so this is a backwards jump.
size_t abs_delta = current_offset - label->offset();
int delta = -static_cast<int>(abs_delta);
- OperandSize operand_size = Bytecodes::SizeForSignedOperand(delta);
- if (operand_size > OperandSize::kByte) {
+ OperandScale operand_scale = Bytecodes::ScaleForSignedOperand(delta);
+ if (operand_scale > OperandScale::kSingle) {
// Adjust for scaling byte prefix for wide jump offset.
DCHECK_LE(delta, 0);
delta -= 1;
}
- node->set_bytecode(node->bytecode(), delta);
+ DCHECK_EQ(Bytecode::kJumpLoop, node->bytecode());
+ node->set_bytecode(node->bytecode(), delta, node->operand(1));
} else {
// The label has not yet been bound so this is a forward reference
// that will be patched when the label is bound. We create a
@@ -369,6 +304,7 @@ void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) {
label->set_referrer(current_offset);
OperandSize reserved_operand_size =
constant_array_builder()->CreateReservedEntry();
+ DCHECK_NE(Bytecode::kJumpLoop, node->bytecode());
switch (reserved_operand_size) {
case OperandSize::kNone:
UNREACHABLE();
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.h b/deps/v8/src/interpreter/bytecode-array-writer.h
index 17fe3d4732..712fcb9837 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.h
+++ b/deps/v8/src/interpreter/bytecode-array-writer.h
@@ -33,7 +33,7 @@ class BytecodeArrayWriter final : public BytecodePipelineStage {
void BindLabel(BytecodeLabel* label) override;
void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
Handle<BytecodeArray> ToBytecodeArray(
- Isolate* isolate, int fixed_register_count, int parameter_count,
+ Isolate* isolate, int register_count, int parameter_count,
Handle<FixedArray> handler_table) override;
private:
@@ -69,10 +69,8 @@ class BytecodeArrayWriter final : public BytecodePipelineStage {
ConstantArrayBuilder* constant_array_builder() {
return constant_array_builder_;
}
- int max_register_count() { return max_register_count_; }
ZoneVector<uint8_t> bytecodes_;
- int max_register_count_;
int unbound_jumps_;
SourcePositionTableBuilder source_position_table_builder_;
ConstantArrayBuilder* constant_array_builder_;
diff --git a/deps/v8/src/interpreter/bytecode-dead-code-optimizer.cc b/deps/v8/src/interpreter/bytecode-dead-code-optimizer.cc
index 5d301c76ce..848036c01e 100644
--- a/deps/v8/src/interpreter/bytecode-dead-code-optimizer.cc
+++ b/deps/v8/src/interpreter/bytecode-dead-code-optimizer.cc
@@ -14,10 +14,10 @@ BytecodeDeadCodeOptimizer::BytecodeDeadCodeOptimizer(
// override
Handle<BytecodeArray> BytecodeDeadCodeOptimizer::ToBytecodeArray(
- Isolate* isolate, int fixed_register_count, int parameter_count,
+ Isolate* isolate, int register_count, int parameter_count,
Handle<FixedArray> handler_table) {
- return next_stage_->ToBytecodeArray(isolate, fixed_register_count,
- parameter_count, handler_table);
+ return next_stage_->ToBytecodeArray(isolate, register_count, parameter_count,
+ handler_table);
}
// override
diff --git a/deps/v8/src/interpreter/bytecode-dead-code-optimizer.h b/deps/v8/src/interpreter/bytecode-dead-code-optimizer.h
index 8a9732cb3f..188d610890 100644
--- a/deps/v8/src/interpreter/bytecode-dead-code-optimizer.h
+++ b/deps/v8/src/interpreter/bytecode-dead-code-optimizer.h
@@ -24,7 +24,7 @@ class BytecodeDeadCodeOptimizer final : public BytecodePipelineStage,
void BindLabel(BytecodeLabel* label) override;
void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
Handle<BytecodeArray> ToBytecodeArray(
- Isolate* isolate, int fixed_register_count, int parameter_count,
+ Isolate* isolate, int register_count, int parameter_count,
Handle<FixedArray> handler_table) override;
private:
diff --git a/deps/v8/src/interpreter/bytecode-decoder.cc b/deps/v8/src/interpreter/bytecode-decoder.cc
index 74c5806ef5..49751897ee 100644
--- a/deps/v8/src/interpreter/bytecode-decoder.cc
+++ b/deps/v8/src/interpreter/bytecode-decoder.cc
@@ -23,6 +23,15 @@ Register BytecodeDecoder::DecodeRegisterOperand(const uint8_t* operand_start,
}
// static
+RegisterList BytecodeDecoder::DecodeRegisterListOperand(
+ const uint8_t* operand_start, uint32_t count, OperandType operand_type,
+ OperandScale operand_scale) {
+ Register first_reg =
+ DecodeRegisterOperand(operand_start, operand_type, operand_scale);
+ return RegisterList(first_reg.index(), static_cast<int>(count));
+}
+
+// static
int32_t BytecodeDecoder::DecodeSignedOperand(const uint8_t* operand_start,
OperandType operand_type,
OperandScale operand_scale) {
@@ -94,7 +103,6 @@ std::ostream& BytecodeDecoder::Decode(std::ostream& os,
if (Bytecodes::IsDebugBreak(bytecode)) return os;
int number_of_operands = Bytecodes::NumberOfOperands(bytecode);
- int range = 0;
for (int i = 0; i < number_of_operands; i++) {
OperandType op_type = Bytecodes::GetOperandType(bytecode, i);
int operand_offset =
@@ -102,11 +110,8 @@ std::ostream& BytecodeDecoder::Decode(std::ostream& os,
const uint8_t* operand_start =
&bytecode_start[prefix_offset + operand_offset];
switch (op_type) {
- case interpreter::OperandType::kRegCount:
- os << "#"
- << DecodeUnsignedOperand(operand_start, op_type, operand_scale);
- break;
case interpreter::OperandType::kIdx:
+ case interpreter::OperandType::kUImm:
case interpreter::OperandType::kRuntimeId:
case interpreter::OperandType::kIntrinsicId:
os << "["
@@ -121,7 +126,6 @@ std::ostream& BytecodeDecoder::Decode(std::ostream& os,
os << "#"
<< DecodeUnsignedOperand(operand_start, op_type, operand_scale);
break;
- case interpreter::OperandType::kMaybeReg:
case interpreter::OperandType::kReg:
case interpreter::OperandType::kRegOut: {
Register reg =
@@ -129,19 +133,40 @@ std::ostream& BytecodeDecoder::Decode(std::ostream& os,
os << reg.ToString(parameter_count);
break;
}
- case interpreter::OperandType::kRegOutTriple:
- range += 1;
+ case interpreter::OperandType::kRegOutTriple: {
+ RegisterList reg_list =
+ DecodeRegisterListOperand(operand_start, 3, op_type, operand_scale);
+ os << reg_list.first_register().ToString(parameter_count) << "-"
+ << reg_list.last_register().ToString(parameter_count);
+ break;
+ }
case interpreter::OperandType::kRegOutPair:
case interpreter::OperandType::kRegPair: {
- range += 1;
- Register first_reg =
- DecodeRegisterOperand(operand_start, op_type, operand_scale);
- Register last_reg = Register(first_reg.index() + range);
- os << first_reg.ToString(parameter_count) << "-"
- << last_reg.ToString(parameter_count);
+ RegisterList reg_list =
+ DecodeRegisterListOperand(operand_start, 2, op_type, operand_scale);
+ os << reg_list.first_register().ToString(parameter_count) << "-"
+ << reg_list.last_register().ToString(parameter_count);
+ break;
+ }
+ case interpreter::OperandType::kRegList: {
+ DCHECK_LT(i, number_of_operands - 1);
+ DCHECK_EQ(Bytecodes::GetOperandType(bytecode, i + 1),
+ OperandType::kRegCount);
+ int reg_count_offset =
+ Bytecodes::GetOperandOffset(bytecode, i + 1, operand_scale);
+ const uint8_t* reg_count_operand =
+ &bytecode_start[prefix_offset + reg_count_offset];
+ uint32_t count = DecodeUnsignedOperand(
+ reg_count_operand, OperandType::kRegCount, operand_scale);
+ RegisterList reg_list = DecodeRegisterListOperand(
+ operand_start, count, op_type, operand_scale);
+ os << reg_list.first_register().ToString(parameter_count) << "-"
+ << reg_list.last_register().ToString(parameter_count);
+ i++; // Skip kRegCount.
break;
}
case interpreter::OperandType::kNone:
+ case interpreter::OperandType::kRegCount: // Dealt with in kRegList.
UNREACHABLE();
break;
}
diff --git a/deps/v8/src/interpreter/bytecode-decoder.h b/deps/v8/src/interpreter/bytecode-decoder.h
index 6613179d0c..d1749efb7f 100644
--- a/deps/v8/src/interpreter/bytecode-decoder.h
+++ b/deps/v8/src/interpreter/bytecode-decoder.h
@@ -21,6 +21,12 @@ class BytecodeDecoder final {
OperandType operand_type,
OperandScale operand_scale);
+ // Decodes a register list operand in a byte array.
+ static RegisterList DecodeRegisterListOperand(const uint8_t* operand_start,
+ uint32_t count,
+ OperandType operand_type,
+ OperandScale operand_scale);
+
// Decodes a signed operand in a byte array.
static int32_t DecodeSignedOperand(const uint8_t* operand_start,
OperandType operand_type,
diff --git a/deps/v8/src/interpreter/bytecode-flags.cc b/deps/v8/src/interpreter/bytecode-flags.cc
index 9b25dbd230..158af13ea7 100644
--- a/deps/v8/src/interpreter/bytecode-flags.cc
+++ b/deps/v8/src/interpreter/bytecode-flags.cc
@@ -11,6 +11,14 @@ namespace internal {
namespace interpreter {
// static
+uint8_t CreateArrayLiteralFlags::Encode(bool use_fast_shallow_clone,
+ int runtime_flags) {
+ uint8_t result = FlagsBits::encode(runtime_flags);
+ result |= FastShallowCloneBit::encode(use_fast_shallow_clone);
+ return result;
+}
+
+// static
uint8_t CreateObjectLiteralFlags::Encode(bool fast_clone_supported,
int properties_count,
int runtime_flags) {
diff --git a/deps/v8/src/interpreter/bytecode-flags.h b/deps/v8/src/interpreter/bytecode-flags.h
index 1068d8a9d9..6e87ce20b2 100644
--- a/deps/v8/src/interpreter/bytecode-flags.h
+++ b/deps/v8/src/interpreter/bytecode-flags.h
@@ -11,6 +11,17 @@ namespace v8 {
namespace internal {
namespace interpreter {
+class CreateArrayLiteralFlags {
+ public:
+ class FlagsBits : public BitField8<int, 0, 3> {};
+ class FastShallowCloneBit : public BitField8<bool, FlagsBits::kNext, 1> {};
+
+ static uint8_t Encode(bool use_fast_shallow_clone, int runtime_flags);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CreateArrayLiteralFlags);
+};
+
class CreateObjectLiteralFlags {
public:
class FlagsBits : public BitField8<int, 0, 3> {};
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 6ff43a4170..db5a596b85 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -4,15 +4,16 @@
#include "src/interpreter/bytecode-generator.h"
+#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
#include "src/code-stubs.h"
+#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-register-allocator.h"
#include "src/interpreter/control-flow-builders.h"
#include "src/objects.h"
-#include "src/parsing/parser.h"
#include "src/parsing/token.h"
namespace v8 {
@@ -216,10 +217,10 @@ class BytecodeGenerator::ControlScopeForTopLevel final
case CMD_CONTINUE:
UNREACHABLE();
case CMD_RETURN:
- generator()->builder()->Return();
+ generator()->BuildReturn();
return true;
case CMD_RETHROW:
- generator()->builder()->ReThrow();
+ generator()->BuildReThrow();
return true;
}
return false;
@@ -310,7 +311,7 @@ class BytecodeGenerator::ControlScopeForTryCatch final
case CMD_RETURN:
break;
case CMD_RETHROW:
- generator()->builder()->ReThrow();
+ generator()->BuildReThrow();
return true;
}
return false;
@@ -373,75 +374,35 @@ class BytecodeGenerator::RegisterAllocationScope {
public:
explicit RegisterAllocationScope(BytecodeGenerator* generator)
: generator_(generator),
- outer_(generator->register_allocator()),
- allocator_(builder()->zone(),
- builder()->temporary_register_allocator()) {
- generator_->set_register_allocator(this);
- }
+ outer_next_register_index_(
+ generator->register_allocator()->next_register_index()) {}
virtual ~RegisterAllocationScope() {
- generator_->set_register_allocator(outer_);
- }
-
- Register NewRegister() {
- RegisterAllocationScope* current_scope = generator()->register_allocator();
- if ((current_scope == this) ||
- (current_scope->outer() == this &&
- !current_scope->allocator_.HasConsecutiveAllocations())) {
- // Regular case - Allocating registers in current or outer context.
- // VisitForRegisterValue allocates register in outer context.
- return allocator_.NewRegister();
- } else {
- // If it is required to allocate a register other than current or outer
- // scopes, allocate a new temporary register. It might be expensive to
- // walk the full context chain and compute the list of consecutive
- // reservations in the innerscopes.
- UNIMPLEMENTED();
- return Register::invalid_value();
- }
- }
-
- void PrepareForConsecutiveAllocations(int count) {
- allocator_.PrepareForConsecutiveAllocations(count);
+ generator_->register_allocator()->ReleaseRegisters(
+ outer_next_register_index_);
}
- Register NextConsecutiveRegister() {
- return allocator_.NextConsecutiveRegister();
- }
-
- bool RegisterIsAllocatedInThisScope(Register reg) const {
- return allocator_.RegisterIsAllocatedInThisScope(reg);
- }
-
- RegisterAllocationScope* outer() const { return outer_; }
-
private:
- BytecodeGenerator* generator() const { return generator_; }
- BytecodeArrayBuilder* builder() const { return generator_->builder(); }
-
BytecodeGenerator* generator_;
- RegisterAllocationScope* outer_;
- BytecodeRegisterAllocator allocator_;
+ int outer_next_register_index_;
DISALLOW_COPY_AND_ASSIGN(RegisterAllocationScope);
};
-// Scoped base class for determining where the result of an expression
-// is stored.
+// Scoped base class for determining how the result of an expression will be
+// used.
class BytecodeGenerator::ExpressionResultScope {
public:
ExpressionResultScope(BytecodeGenerator* generator, Expression::Context kind)
: generator_(generator),
kind_(kind),
outer_(generator->execution_result()),
- allocator_(generator),
- result_identified_(false) {
+ allocator_(generator) {
generator_->set_execution_result(this);
}
virtual ~ExpressionResultScope() {
generator_->set_execution_result(outer_);
- DCHECK(result_identified() || generator_->HasStackOverflow());
}
bool IsEffect() const { return kind_ == Expression::kEffect; }
@@ -453,28 +414,11 @@ class BytecodeGenerator::ExpressionResultScope {
return reinterpret_cast<TestResultScope*>(this);
}
- virtual void SetResultInAccumulator() = 0;
- virtual void SetResultInRegister(Register reg) = 0;
-
- protected:
- ExpressionResultScope* outer() const { return outer_; }
- BytecodeArrayBuilder* builder() const { return generator_->builder(); }
- BytecodeGenerator* generator() const { return generator_; }
- const RegisterAllocationScope* allocator() const { return &allocator_; }
-
- void set_result_identified() {
- DCHECK(!result_identified());
- result_identified_ = true;
- }
-
- bool result_identified() const { return result_identified_; }
-
private:
BytecodeGenerator* generator_;
Expression::Context kind_;
ExpressionResultScope* outer_;
RegisterAllocationScope allocator_;
- bool result_identified_;
DISALLOW_COPY_AND_ASSIGN(ExpressionResultScope);
};
@@ -485,61 +429,15 @@ class BytecodeGenerator::EffectResultScope final
: public ExpressionResultScope {
public:
explicit EffectResultScope(BytecodeGenerator* generator)
- : ExpressionResultScope(generator, Expression::kEffect) {
- set_result_identified();
- }
-
- virtual void SetResultInAccumulator() {}
- virtual void SetResultInRegister(Register reg) {}
-};
-
-// Scoped class used when the result of the current expression to be
-// evaluated should go into the interpreter's accumulator register.
-class BytecodeGenerator::AccumulatorResultScope final
- : public ExpressionResultScope {
- public:
- explicit AccumulatorResultScope(BytecodeGenerator* generator)
- : ExpressionResultScope(generator, Expression::kValue) {}
-
- virtual void SetResultInAccumulator() { set_result_identified(); }
-
- virtual void SetResultInRegister(Register reg) {
- builder()->LoadAccumulatorWithRegister(reg);
- set_result_identified();
- }
+ : ExpressionResultScope(generator, Expression::kEffect) {}
};
// Scoped class used when the result of the current expression to be
-// evaluated should go into an interpreter register.
-class BytecodeGenerator::RegisterResultScope final
- : public ExpressionResultScope {
+// evaluated should go into the interpreter's accumulator.
+class BytecodeGenerator::ValueResultScope final : public ExpressionResultScope {
public:
- explicit RegisterResultScope(BytecodeGenerator* generator)
+ explicit ValueResultScope(BytecodeGenerator* generator)
: ExpressionResultScope(generator, Expression::kValue) {}
-
- virtual void SetResultInAccumulator() {
- result_register_ = allocator()->outer()->NewRegister();
- builder()->StoreAccumulatorInRegister(result_register_);
- set_result_identified();
- }
-
- virtual void SetResultInRegister(Register reg) {
- DCHECK(builder()->RegisterIsParameterOrLocal(reg) ||
- (builder()->TemporaryRegisterIsLive(reg) &&
- !allocator()->RegisterIsAllocatedInThisScope(reg)));
- result_register_ = reg;
- set_result_identified();
- }
-
- Register ResultRegister() {
- if (generator()->HasStackOverflow() && !result_identified()) {
- SetResultInAccumulator();
- }
- return result_register_;
- }
-
- private:
- Register result_register_;
};
// Scoped class used when the result of the current expression to be
@@ -554,18 +452,10 @@ class BytecodeGenerator::TestResultScope final : public ExpressionResultScope {
fallthrough_(fallthrough),
result_consumed_by_test_(false) {}
- virtual void SetResultInAccumulator() { set_result_identified(); }
-
- virtual void SetResultInRegister(Register reg) {
- builder()->LoadAccumulatorWithRegister(reg);
- set_result_identified();
- }
-
// Used when code special cases for TestResultScope and consumes any
// possible value by testing and jumping to a then/else label.
void SetResultConsumedByTest() {
result_consumed_by_test_ = true;
- set_result_identified();
}
bool ResultConsumedByTest() { return result_consumed_by_test_; }
@@ -677,22 +567,17 @@ BytecodeGenerator::BytecodeGenerator(CompilationInfo* info)
execution_control_(nullptr),
execution_context_(nullptr),
execution_result_(nullptr),
- register_allocator_(nullptr),
generator_resume_points_(info->literal()->yield_count(), info->zone()),
generator_state_(),
loop_depth_(0),
home_object_symbol_(info->isolate()->factory()->home_object_symbol()),
prototype_string_(info->isolate()->factory()->prototype_string()) {
- InitializeAstVisitor(info->isolate()->stack_guard()->real_climit());
}
Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(Isolate* isolate) {
- // Create an inner HandleScope to avoid unnecessarily canonicalizing handles
- // created as part of bytecode finalization.
- HandleScope scope(isolate);
AllocateDeferredConstants();
if (HasStackOverflow()) return Handle<BytecodeArray>();
- return scope.CloseAndEscape(builder()->ToBytecodeArray(isolate));
+ return builder()->ToBytecodeArray(isolate);
}
void BytecodeGenerator::AllocateDeferredConstants() {
@@ -726,11 +611,13 @@ void BytecodeGenerator::AllocateDeferredConstants() {
}
}
-void BytecodeGenerator::GenerateBytecode() {
+void BytecodeGenerator::GenerateBytecode(uintptr_t stack_limit) {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
+ InitializeAstVisitor(stack_limit);
+
// Initialize the incoming context.
ContextScope incoming_context(this, scope(), false);
@@ -744,12 +631,11 @@ void BytecodeGenerator::GenerateBytecode() {
VisitGeneratorPrologue();
}
- // Build function context only if there are context allocated variables.
if (scope()->NeedsContext()) {
// Push a new inner context scope for the function.
- VisitNewLocalFunctionContext();
+ BuildNewLocalActivationContext();
ContextScope local_function_context(this, scope(), false);
- VisitBuildLocalActivationContext();
+ BuildLocalActivationContextInitialization();
GenerateBytecodeBody();
} else {
GenerateBytecodeBody();
@@ -763,7 +649,13 @@ void BytecodeGenerator::GenerateBytecode() {
if (!label.is_bound()) builder()->Bind(&label);
}
- builder()->EnsureReturn();
+ // Emit an implicit return instruction in case control flow can fall off the
+ // end of the function without an explicit return being present on all paths.
+ if (builder()->RequiresImplicitReturn()) {
+ builder()->LoadUndefined();
+ BuildReturn();
+ }
+ DCHECK(!builder()->RequiresImplicitReturn());
}
void BytecodeGenerator::GenerateBytecodeBody() {
@@ -771,8 +663,7 @@ void BytecodeGenerator::GenerateBytecodeBody() {
VisitArgumentsObject(scope()->arguments());
// Build rest arguments array if it is used.
- int rest_index;
- Variable* rest_parameter = scope()->rest_parameter(&rest_index);
+ Variable* rest_parameter = scope()->rest_parameter();
VisitRestArgumentsArray(rest_parameter);
// Build assignment to {.this_function} variable if it is used.
@@ -781,10 +672,8 @@ void BytecodeGenerator::GenerateBytecodeBody() {
// Build assignment to {new.target} variable if it is used.
VisitNewTargetVariable(scope()->new_target_var());
- // TODO(rmcilroy): Emit tracing call if requested to do so.
- if (FLAG_trace) {
- UNIMPLEMENTED();
- }
+ // Emit tracing call if requested to do so.
+ if (FLAG_trace) builder()->CallRuntime(Runtime::kTraceEnter);
// Visit declarations within the function scope.
VisitDeclarations(scope()->declarations());
@@ -829,14 +718,6 @@ void BytecodeGenerator::VisitIterationHeader(IterationStatement* stmt,
loop_builder->LoopHeader(&resume_points_in_loop);
- // Insert an explicit {OsrPoll} right after the loop header, to trigger
- // on-stack replacement when armed for the given loop nesting depth.
- if (FLAG_ignition_osr) {
- // TODO(4764): Merge this with another bytecode (e.g. {Jump} back edge).
- int level = Min(loop_depth_, AbstractCode::kMaxLoopNestingMarker - 1);
- builder()->OsrPoll(level);
- }
-
if (stmt->yield_count() > 0) {
// If we are not resuming, fall through to loop body.
// If we are resuming, perform state dispatch.
@@ -882,7 +763,7 @@ void BytecodeGenerator::VisitGeneratorPrologue() {
void BytecodeGenerator::VisitBlock(Block* stmt) {
// Visit declarations and statements.
if (stmt->scope() != nullptr && stmt->scope()->NeedsContext()) {
- VisitNewLocalBlockContext(stmt->scope());
+ BuildNewLocalBlockContext(stmt->scope());
ContextScope scope(this, stmt->scope());
VisitBlockDeclarationsAndStatements(stmt);
} else {
@@ -903,7 +784,6 @@ void BytecodeGenerator::VisitBlockDeclarationsAndStatements(Block* stmt) {
void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
Variable* variable = decl->proxy()->var();
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
@@ -926,8 +806,9 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
break;
case VariableLocation::CONTEXT:
if (variable->binding_needs_init()) {
+ DCHECK_EQ(0, execution_context()->ContextChainDepth(variable->scope()));
builder()->LoadTheHole().StoreContextSlot(execution_context()->reg(),
- variable->index());
+ variable->index(), 0);
}
break;
case VariableLocation::LOOKUP: {
@@ -939,18 +820,24 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
builder()
->LoadLiteral(variable->name())
.StoreAccumulatorInRegister(name)
- .CallRuntime(Runtime::kDeclareEvalVar, name, 1);
+ .CallRuntime(Runtime::kDeclareEvalVar, name);
break;
}
case VariableLocation::MODULE:
- UNREACHABLE();
+ if (variable->IsExport() && variable->binding_needs_init()) {
+ builder()->LoadTheHole();
+ VisitVariableAssignment(variable, Token::INIT,
+ FeedbackVectorSlot::Invalid());
+ }
+ // Nothing to do for imports.
+ break;
}
}
void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
Variable* variable = decl->proxy()->var();
+ DCHECK(variable->mode() == LET || variable->mode() == VAR);
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
globals_builder()->AddFunctionDeclaration(slot, decl->fun());
@@ -959,8 +846,6 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL: {
VisitForAccumulatorValue(decl->fun());
- DCHECK(variable->mode() == LET || variable->mode() == VAR ||
- variable->mode() == CONST);
VisitVariableAssignment(variable, Token::INIT,
FeedbackVectorSlot::Invalid());
break;
@@ -968,23 +853,27 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
case VariableLocation::CONTEXT: {
DCHECK_EQ(0, execution_context()->ContextChainDepth(variable->scope()));
VisitForAccumulatorValue(decl->fun());
- builder()->StoreContextSlot(execution_context()->reg(),
- variable->index());
+ builder()->StoreContextSlot(execution_context()->reg(), variable->index(),
+ 0);
break;
}
case VariableLocation::LOOKUP: {
- register_allocator()->PrepareForConsecutiveAllocations(2);
- Register name = register_allocator()->NextConsecutiveRegister();
- Register literal = register_allocator()->NextConsecutiveRegister();
- builder()->LoadLiteral(variable->name()).StoreAccumulatorInRegister(name);
-
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->LoadLiteral(variable->name())
+ .StoreAccumulatorInRegister(args[0]);
VisitForAccumulatorValue(decl->fun());
- builder()->StoreAccumulatorInRegister(literal).CallRuntime(
- Runtime::kDeclareEvalFunction, name, 2);
+ builder()->StoreAccumulatorInRegister(args[1]).CallRuntime(
+ Runtime::kDeclareEvalFunction, args);
break;
}
case VariableLocation::MODULE:
- UNREACHABLE();
+ DCHECK_EQ(variable->mode(), LET);
+ DCHECK(variable->IsExport());
+ VisitForAccumulatorValue(decl->fun());
+ VisitVariableAssignment(variable, Token::INIT,
+ FeedbackVectorSlot::Invalid());
+ break;
}
}
@@ -1002,20 +891,15 @@ void BytecodeGenerator::VisitDeclarations(
builder()->AllocateConstantPoolEntry());
int encoded_flags = info()->GetDeclareGlobalsFlags();
- register_allocator()->PrepareForConsecutiveAllocations(3);
-
- Register pairs = register_allocator()->NextConsecutiveRegister();
- Register flags = register_allocator()->NextConsecutiveRegister();
- Register function = register_allocator()->NextConsecutiveRegister();
-
// Emit code to declare globals.
+ RegisterList args = register_allocator()->NewRegisterList(3);
builder()
->LoadConstantPoolEntry(globals_builder()->constant_pool_entry())
- .StoreAccumulatorInRegister(pairs)
+ .StoreAccumulatorInRegister(args[0])
.LoadLiteral(Smi::FromInt(encoded_flags))
- .StoreAccumulatorInRegister(flags)
- .MoveRegister(Register::function_closure(), function)
- .CallRuntime(Runtime::kDeclareGlobalsForInterpreter, pairs, 3);
+ .StoreAccumulatorInRegister(args[1])
+ .MoveRegister(Register::function_closure(), args[2])
+ .CallRuntime(Runtime::kDeclareGlobalsForInterpreter, args);
// Push and reset globals builder.
global_declarations_.push_back(globals_builder());
@@ -1097,7 +981,7 @@ void BytecodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
void BytecodeGenerator::VisitWithStatement(WithStatement* stmt) {
builder()->SetStatementPosition(stmt);
VisitForAccumulatorValue(stmt->expression());
- VisitNewLocalWithContext();
+ BuildNewLocalWithContext(stmt->scope());
VisitInScope(stmt->statement(), stmt->scope());
}
@@ -1126,7 +1010,9 @@ void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Perform label comparison as if via '===' with tag.
VisitForAccumulatorValue(clause->label());
- builder()->CompareOperation(Token::Value::EQ_STRICT, tag);
+ builder()->CompareOperation(
+ Token::Value::EQ_STRICT, tag,
+ feedback_index(clause->CompareOperationFeedbackSlot()));
switch_builder.Case(i);
}
@@ -1168,13 +1054,16 @@ void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
} else if (stmt->cond()->ToBooleanIsTrue()) {
VisitIterationHeader(stmt, &loop_builder);
VisitIterationBody(stmt, &loop_builder);
- loop_builder.JumpToHeader();
+ loop_builder.JumpToHeader(loop_depth_);
} else {
VisitIterationHeader(stmt, &loop_builder);
VisitIterationBody(stmt, &loop_builder);
builder()->SetExpressionAsStatementPosition(stmt->cond());
- VisitForTest(stmt->cond(), loop_builder.header_labels(),
- loop_builder.break_labels(), TestFallthrough::kElse);
+ BytecodeLabels loop_backbranch(zone());
+ VisitForTest(stmt->cond(), &loop_backbranch, loop_builder.break_labels(),
+ TestFallthrough::kThen);
+ loop_backbranch.Bind(builder());
+ loop_builder.JumpToHeader(loop_depth_);
}
loop_builder.EndLoop();
}
@@ -1195,7 +1084,7 @@ void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
loop_body.Bind(builder());
}
VisitIterationBody(stmt, &loop_builder);
- loop_builder.JumpToHeader();
+ loop_builder.JumpToHeader(loop_depth_);
loop_builder.EndLoop();
}
@@ -1223,7 +1112,7 @@ void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
builder()->SetStatementPosition(stmt->next());
Visit(stmt->next());
}
- loop_builder.JumpToHeader();
+ loop_builder.JumpToHeader(loop_depth_);
loop_builder.EndLoop();
}
@@ -1265,36 +1154,28 @@ void BytecodeGenerator::VisitForInAssignment(Expression* expr,
}
case NAMED_SUPER_PROPERTY: {
RegisterAllocationScope register_scope(this);
- register_allocator()->PrepareForConsecutiveAllocations(4);
- Register receiver = register_allocator()->NextConsecutiveRegister();
- Register home_object = register_allocator()->NextConsecutiveRegister();
- Register name = register_allocator()->NextConsecutiveRegister();
- Register value = register_allocator()->NextConsecutiveRegister();
- builder()->StoreAccumulatorInRegister(value);
+ RegisterList args = register_allocator()->NewRegisterList(4);
+ builder()->StoreAccumulatorInRegister(args[3]);
SuperPropertyReference* super_property =
property->obj()->AsSuperPropertyReference();
- VisitForRegisterValue(super_property->this_var(), receiver);
- VisitForRegisterValue(super_property->home_object(), home_object);
+ VisitForRegisterValue(super_property->this_var(), args[0]);
+ VisitForRegisterValue(super_property->home_object(), args[1]);
builder()
->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
- .StoreAccumulatorInRegister(name);
- BuildNamedSuperPropertyStore(receiver, home_object, name, value);
+ .StoreAccumulatorInRegister(args[2])
+ .CallRuntime(StoreToSuperRuntimeId(), args);
break;
}
case KEYED_SUPER_PROPERTY: {
RegisterAllocationScope register_scope(this);
- register_allocator()->PrepareForConsecutiveAllocations(4);
- Register receiver = register_allocator()->NextConsecutiveRegister();
- Register home_object = register_allocator()->NextConsecutiveRegister();
- Register key = register_allocator()->NextConsecutiveRegister();
- Register value = register_allocator()->NextConsecutiveRegister();
- builder()->StoreAccumulatorInRegister(value);
+ RegisterList args = register_allocator()->NewRegisterList(4);
+ builder()->StoreAccumulatorInRegister(args[3]);
SuperPropertyReference* super_property =
property->obj()->AsSuperPropertyReference();
- VisitForRegisterValue(super_property->this_var(), receiver);
- VisitForRegisterValue(super_property->home_object(), home_object);
- VisitForRegisterValue(property->key(), key);
- BuildKeyedSuperPropertyStore(receiver, home_object, key, value);
+ VisitForRegisterValue(super_property->this_var(), args[0]);
+ VisitForRegisterValue(super_property->home_object(), args[1]);
+ VisitForRegisterValue(property->key(), args[2]);
+ builder()->CallRuntime(StoreKeyedToSuperRuntimeId(), args);
break;
}
}
@@ -1316,15 +1197,12 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
builder()->JumpIfUndefined(&subject_undefined_label);
builder()->JumpIfNull(&subject_null_label);
Register receiver = register_allocator()->NewRegister();
- builder()->CastAccumulatorToJSObject(receiver);
+ builder()->ConvertAccumulatorToObject(receiver);
- register_allocator()->PrepareForConsecutiveAllocations(3);
- Register cache_type = register_allocator()->NextConsecutiveRegister();
- Register cache_array = register_allocator()->NextConsecutiveRegister();
- Register cache_length = register_allocator()->NextConsecutiveRegister();
// Used as kRegTriple and kRegPair in ForInPrepare and ForInNext.
- USE(cache_array);
- builder()->ForInPrepare(receiver, cache_type);
+ RegisterList triple = register_allocator()->NewRegisterList(3);
+ Register cache_length = triple[2];
+ builder()->ForInPrepare(receiver, triple);
// Set up loop counter
Register index = register_allocator()->NewRegister();
@@ -1334,17 +1212,17 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// The loop
VisitIterationHeader(stmt, &loop_builder);
builder()->SetExpressionAsStatementPosition(stmt->each());
- builder()->ForInDone(index, cache_length);
- loop_builder.BreakIfTrue();
- DCHECK(Register::AreContiguous(cache_type, cache_array));
+ builder()->ForInContinue(index, cache_length);
+ loop_builder.BreakIfFalse();
FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
- builder()->ForInNext(receiver, index, cache_type, feedback_index(slot));
+ builder()->ForInNext(receiver, index, triple.Truncate(2),
+ feedback_index(slot));
loop_builder.ContinueIfUndefined();
VisitForInAssignment(stmt->each(), stmt->EachFeedbackSlot());
VisitIterationBody(stmt, &loop_builder);
builder()->ForInStep(index);
builder()->StoreAccumulatorInRegister(index);
- loop_builder.JumpToHeader();
+ loop_builder.JumpToHeader(loop_depth_);
loop_builder.EndLoop();
builder()->Bind(&subject_null_label);
builder()->Bind(&subject_undefined_label);
@@ -1364,13 +1242,12 @@ void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
VisitForEffect(stmt->assign_each());
VisitIterationBody(stmt, &loop_builder);
- loop_builder.JumpToHeader();
+ loop_builder.JumpToHeader(loop_depth_);
loop_builder.EndLoop();
}
void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
TryCatchBuilder try_control_builder(builder(), stmt->catch_prediction());
- Register no_reg;
// Preserve the context in a dedicated register, so that it can be restored
// when the handler is entered by the stack-unwinding machinery.
@@ -1388,12 +1265,12 @@ void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
try_control_builder.EndTry();
// Create a catch scope that binds the exception.
- VisitNewLocalCatchContext(stmt->variable());
+ BuildNewLocalCatchContext(stmt->variable(), stmt->scope());
builder()->StoreAccumulatorInRegister(context);
// If requested, clear message object as we enter the catch block.
if (stmt->clear_pending_message()) {
- builder()->CallRuntime(Runtime::kInterpreterClearPendingMessage, no_reg, 0);
+ builder()->CallRuntime(Runtime::kInterpreterClearPendingMessage);
}
// Load the catch context into the accumulator.
@@ -1406,7 +1283,6 @@ void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
TryFinallyBuilder try_control_builder(builder(), stmt->catch_prediction());
- Register no_reg;
// We keep a record of all paths that enter the finally-block to be able to
// dispatch to the correct continuation point after the statements in the
@@ -1454,7 +1330,7 @@ void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// Clear message object as we enter the finally block.
builder()
- ->CallRuntime(Runtime::kInterpreterClearPendingMessage, no_reg, 0)
+ ->CallRuntime(Runtime::kInterpreterClearPendingMessage)
.StoreAccumulatorInRegister(message);
// Evaluate the finally-block.
@@ -1462,7 +1338,7 @@ void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
try_control_builder.EndFinally();
// Pending message object is restored on exit.
- builder()->CallRuntime(Runtime::kInterpreterSetPendingMessage, message, 1);
+ builder()->CallRuntime(Runtime::kInterpreterSetPendingMessage, message);
// Dynamic dispatch after the finally-block.
commands.ApplyDeferredCommands();
@@ -1479,16 +1355,15 @@ void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
size_t entry = builder()->AllocateConstantPoolEntry();
builder()->CreateClosure(entry, flags);
function_literals_.push_back(std::make_pair(expr, entry));
- execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
VisitClassLiteralForRuntimeDefinition(expr);
// Load the "prototype" from the constructor.
- register_allocator()->PrepareForConsecutiveAllocations(2);
- Register literal = register_allocator()->NextConsecutiveRegister();
- Register prototype = register_allocator()->NextConsecutiveRegister();
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ Register literal = args[0];
+ Register prototype = args[1];
FeedbackVectorSlot slot = expr->PrototypeSlot();
builder()
->StoreAccumulatorInRegister(literal)
@@ -1496,7 +1371,7 @@ void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
.StoreAccumulatorInRegister(prototype);
VisitClassLiteralProperties(expr, literal, prototype);
- builder()->CallRuntime(Runtime::kToFastProperties, literal, 1);
+ builder()->CallRuntime(Runtime::kToFastProperties, literal);
// Assign to class variable.
if (expr->class_variable_proxy() != nullptr) {
Variable* var = expr->class_variable_proxy()->var();
@@ -1505,49 +1380,37 @@ void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
: FeedbackVectorSlot::Invalid();
VisitVariableAssignment(var, Token::INIT, slot);
}
- execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitClassLiteralForRuntimeDefinition(
ClassLiteral* expr) {
- AccumulatorResultScope result_scope(this);
- register_allocator()->PrepareForConsecutiveAllocations(4);
- Register extends = register_allocator()->NextConsecutiveRegister();
- Register constructor = register_allocator()->NextConsecutiveRegister();
- Register start_position = register_allocator()->NextConsecutiveRegister();
- Register end_position = register_allocator()->NextConsecutiveRegister();
-
+ RegisterAllocationScope register_scope(this);
+ RegisterList args = register_allocator()->NewRegisterList(4);
VisitForAccumulatorValueOrTheHole(expr->extends());
- builder()->StoreAccumulatorInRegister(extends);
-
- VisitForAccumulatorValue(expr->constructor());
+ builder()->StoreAccumulatorInRegister(args[0]);
+ VisitForRegisterValue(expr->constructor(), args[1]);
builder()
- ->StoreAccumulatorInRegister(constructor)
- .LoadLiteral(Smi::FromInt(expr->start_position()))
- .StoreAccumulatorInRegister(start_position)
+ ->LoadLiteral(Smi::FromInt(expr->start_position()))
+ .StoreAccumulatorInRegister(args[2])
.LoadLiteral(Smi::FromInt(expr->end_position()))
- .StoreAccumulatorInRegister(end_position)
- .CallRuntime(Runtime::kDefineClass, extends, 4);
- result_scope.SetResultInAccumulator();
+ .StoreAccumulatorInRegister(args[3])
+ .CallRuntime(Runtime::kDefineClass, args);
}
void BytecodeGenerator::VisitClassLiteralProperties(ClassLiteral* expr,
Register literal,
Register prototype) {
RegisterAllocationScope register_scope(this);
- register_allocator()->PrepareForConsecutiveAllocations(5);
- Register receiver = register_allocator()->NextConsecutiveRegister();
- Register key = register_allocator()->NextConsecutiveRegister();
- Register value = register_allocator()->NextConsecutiveRegister();
- Register attr = register_allocator()->NextConsecutiveRegister();
- Register set_function_name = register_allocator()->NextConsecutiveRegister();
+ RegisterList args = register_allocator()->NewRegisterList(5);
+ Register receiver = args[0], key = args[1], value = args[2], attr = args[3],
+ set_function_name = args[4];
bool attr_assigned = false;
Register old_receiver = Register::invalid_value();
// Create nodes to store method values into the literal.
for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
+ ClassLiteral::Property* property = expr->properties()->at(i);
// Set-up receiver.
Register new_receiver = property->is_static() ? literal : prototype;
@@ -1557,17 +1420,23 @@ void BytecodeGenerator::VisitClassLiteralProperties(ClassLiteral* expr,
}
VisitForAccumulatorValue(property->key());
- builder()->CastAccumulatorToName(key);
- // The static prototype property is read only. We handle the non computed
- // property name case in the parser. Since this is the only case where we
- // need to check for an own read only property we special case this so we do
- // not need to do this for every property.
+ builder()->ConvertAccumulatorToName(key);
+
if (property->is_static() && property->is_computed_name()) {
- VisitClassLiteralStaticPrototypeWithComputedName(key);
+ // The static prototype property is read only. We handle the non computed
+ // property name case in the parser. Since this is the only case where we
+ // need to check for an own read only property we special case this so we
+ // do not need to do this for every property.
+ BytecodeLabel done;
+ builder()
+ ->LoadLiteral(prototype_string())
+ .CompareOperation(Token::Value::EQ_STRICT, key)
+ .JumpIfFalse(&done)
+ .CallRuntime(Runtime::kThrowStaticPrototypeError)
+ .Bind(&done);
}
- VisitForAccumulatorValue(property->value());
- builder()->StoreAccumulatorInRegister(value);
+ VisitForRegisterValue(property->value(), value);
VisitSetHomeObject(value, receiver, property);
if (!attr_assigned) {
@@ -1578,51 +1447,36 @@ void BytecodeGenerator::VisitClassLiteralProperties(ClassLiteral* expr,
}
switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::PROTOTYPE:
- // Invalid properties for ES6 classes.
- UNREACHABLE();
- break;
- case ObjectLiteral::Property::COMPUTED: {
+ case ClassLiteral::Property::METHOD: {
builder()
->LoadLiteral(Smi::FromInt(property->NeedsSetFunctionName()))
- .StoreAccumulatorInRegister(set_function_name);
- builder()->CallRuntime(Runtime::kDefineDataPropertyInLiteral, receiver,
- 5);
+ .StoreAccumulatorInRegister(set_function_name)
+ .CallRuntime(Runtime::kDefineDataPropertyInLiteral, args);
break;
}
- case ObjectLiteral::Property::GETTER: {
+ case ClassLiteral::Property::GETTER: {
builder()->CallRuntime(Runtime::kDefineGetterPropertyUnchecked,
- receiver, 4);
+ args.Truncate(4));
break;
}
- case ObjectLiteral::Property::SETTER: {
+ case ClassLiteral::Property::SETTER: {
builder()->CallRuntime(Runtime::kDefineSetterPropertyUnchecked,
- receiver, 4);
+ args.Truncate(4));
+ break;
+ }
+ case ClassLiteral::Property::FIELD: {
+ UNREACHABLE();
break;
}
}
}
}
-void BytecodeGenerator::VisitClassLiteralStaticPrototypeWithComputedName(
- Register key) {
- BytecodeLabel done;
- builder()
- ->LoadLiteral(prototype_string())
- .CompareOperation(Token::Value::EQ_STRICT, key)
- .JumpIfFalse(&done)
- .CallRuntime(Runtime::kThrowStaticPrototypeError, Register(0), 0)
- .Bind(&done);
-}
-
void BytecodeGenerator::VisitNativeFunctionLiteral(
NativeFunctionLiteral* expr) {
size_t entry = builder()->AllocateConstantPoolEntry();
builder()->CreateClosure(entry, NOT_TENURED);
native_function_literals_.push_back(std::make_pair(expr, entry));
- execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitDoExpression(DoExpression* expr) {
@@ -1652,8 +1506,6 @@ void BytecodeGenerator::VisitConditional(Conditional* expr) {
VisitForAccumulatorValue(expr->else_expression());
builder()->Bind(&end_label);
}
-
- execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitLiteral(Literal* expr) {
@@ -1674,7 +1526,6 @@ void BytecodeGenerator::VisitLiteral(Literal* expr) {
} else {
builder()->LoadLiteral(raw_value->value());
}
- execution_result()->SetResultInAccumulator();
}
}
@@ -1682,7 +1533,6 @@ void BytecodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// Materialize a regular expression literal.
builder()->CreateRegExpLiteral(expr->pattern(), expr->literal_index(),
expr->flags());
- execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
@@ -1693,7 +1543,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
expr->ComputeFlags());
// Allocate in the outer scope since this register is used to return the
// expression's results to the caller.
- Register literal = register_allocator()->outer()->NewRegister();
+ Register literal = register_allocator()->NewRegister();
builder()->CreateObjectLiteral(expr->constant_properties(),
expr->literal_index(), flags, literal);
@@ -1737,23 +1587,17 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForEffect(property->value());
}
} else {
- register_allocator()->PrepareForConsecutiveAllocations(4);
- Register literal_argument =
- register_allocator()->NextConsecutiveRegister();
- Register key = register_allocator()->NextConsecutiveRegister();
- Register value = register_allocator()->NextConsecutiveRegister();
- Register language = register_allocator()->NextConsecutiveRegister();
-
- builder()->MoveRegister(literal, literal_argument);
- VisitForAccumulatorValue(property->key());
- builder()->StoreAccumulatorInRegister(key);
- VisitForAccumulatorValue(property->value());
- builder()->StoreAccumulatorInRegister(value);
+ RegisterList args = register_allocator()->NewRegisterList(4);
+
+ builder()->MoveRegister(literal, args[0]);
+ VisitForRegisterValue(property->key(), args[1]);
+ VisitForRegisterValue(property->value(), args[2]);
if (property->emit_store()) {
builder()
->LoadLiteral(Smi::FromInt(SLOPPY))
- .StoreAccumulatorInRegister(language)
- .CallRuntime(Runtime::kSetProperty, literal_argument, 4);
+ .StoreAccumulatorInRegister(args[3])
+ .CallRuntime(Runtime::kSetProperty, args);
+ Register value = args[2];
VisitSetHomeObject(value, literal, property);
}
}
@@ -1761,15 +1605,10 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
case ObjectLiteral::Property::PROTOTYPE: {
DCHECK(property->emit_store());
- register_allocator()->PrepareForConsecutiveAllocations(2);
- Register literal_argument =
- register_allocator()->NextConsecutiveRegister();
- Register value = register_allocator()->NextConsecutiveRegister();
-
- builder()->MoveRegister(literal, literal_argument);
- VisitForAccumulatorValue(property->value());
- builder()->StoreAccumulatorInRegister(value).CallRuntime(
- Runtime::kInternalSetPrototype, literal_argument, 2);
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()->MoveRegister(literal, args[0]);
+ VisitForRegisterValue(property->value(), args[1]);
+ builder()->CallRuntime(Runtime::kInternalSetPrototype, args);
break;
}
case ObjectLiteral::Property::GETTER:
@@ -1790,23 +1629,15 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
for (AccessorTable::Iterator it = accessor_table.begin();
it != accessor_table.end(); ++it) {
RegisterAllocationScope inner_register_scope(this);
- register_allocator()->PrepareForConsecutiveAllocations(5);
- Register literal_argument = register_allocator()->NextConsecutiveRegister();
- Register name = register_allocator()->NextConsecutiveRegister();
- Register getter = register_allocator()->NextConsecutiveRegister();
- Register setter = register_allocator()->NextConsecutiveRegister();
- Register attr = register_allocator()->NextConsecutiveRegister();
-
- builder()->MoveRegister(literal, literal_argument);
- VisitForAccumulatorValue(it->first);
- builder()->StoreAccumulatorInRegister(name);
- VisitObjectLiteralAccessor(literal, it->second->getter, getter);
- VisitObjectLiteralAccessor(literal, it->second->setter, setter);
+ RegisterList args = register_allocator()->NewRegisterList(5);
+ builder()->MoveRegister(literal, args[0]);
+ VisitForRegisterValue(it->first, args[1]);
+ VisitObjectLiteralAccessor(literal, it->second->getter, args[2]);
+ VisitObjectLiteralAccessor(literal, it->second->setter, args[3]);
builder()
->LoadLiteral(Smi::FromInt(NONE))
- .StoreAccumulatorInRegister(attr)
- .CallRuntime(Runtime::kDefineAccessorPropertyUnchecked,
- literal_argument, 5);
+ .StoreAccumulatorInRegister(args[4])
+ .CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, args);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1824,66 +1655,68 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
DCHECK(property->emit_store());
- register_allocator()->PrepareForConsecutiveAllocations(2);
- Register literal_argument =
- register_allocator()->NextConsecutiveRegister();
- Register value = register_allocator()->NextConsecutiveRegister();
-
- builder()->MoveRegister(literal, literal_argument);
- VisitForAccumulatorValue(property->value());
- builder()->StoreAccumulatorInRegister(value).CallRuntime(
- Runtime::kInternalSetPrototype, literal_argument, 2);
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()->MoveRegister(literal, args[0]);
+ VisitForRegisterValue(property->value(), args[1]);
+ builder()->CallRuntime(Runtime::kInternalSetPrototype, args);
continue;
}
- register_allocator()->PrepareForConsecutiveAllocations(5);
- Register literal_argument = register_allocator()->NextConsecutiveRegister();
- Register key = register_allocator()->NextConsecutiveRegister();
- Register value = register_allocator()->NextConsecutiveRegister();
- Register attr = register_allocator()->NextConsecutiveRegister();
- DCHECK(Register::AreContiguous(literal_argument, key, value, attr));
- Register set_function_name =
- register_allocator()->NextConsecutiveRegister();
-
- builder()->MoveRegister(literal, literal_argument);
- VisitForAccumulatorValue(property->key());
- builder()->CastAccumulatorToName(key);
- VisitForAccumulatorValue(property->value());
- builder()->StoreAccumulatorInRegister(value);
- VisitSetHomeObject(value, literal, property);
- builder()->LoadLiteral(Smi::FromInt(NONE)).StoreAccumulatorInRegister(attr);
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
case ObjectLiteral::Property::COMPUTED:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL: {
+ RegisterList args = register_allocator()->NewRegisterList(5);
+ builder()->MoveRegister(literal, args[0]);
+ VisitForAccumulatorValue(property->key());
+ builder()->ConvertAccumulatorToName(args[1]);
+ VisitForRegisterValue(property->value(), args[2]);
+ VisitSetHomeObject(args[2], literal, property);
builder()
- ->LoadLiteral(Smi::FromInt(property->NeedsSetFunctionName()))
- .StoreAccumulatorInRegister(set_function_name);
- builder()->CallRuntime(Runtime::kDefineDataPropertyInLiteral,
- literal_argument, 5);
- break;
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE(); // Handled specially above.
+ ->LoadLiteral(Smi::FromInt(NONE))
+ .StoreAccumulatorInRegister(args[3])
+ .LoadLiteral(Smi::FromInt(property->NeedsSetFunctionName()))
+ .StoreAccumulatorInRegister(args[4]);
+ builder()->CallRuntime(Runtime::kDefineDataPropertyInLiteral, args);
break;
+ }
case ObjectLiteral::Property::GETTER:
- builder()->CallRuntime(Runtime::kDefineGetterPropertyUnchecked,
- literal_argument, 4);
+ case ObjectLiteral::Property::SETTER: {
+ RegisterList args = register_allocator()->NewRegisterList(4);
+ builder()->MoveRegister(literal, args[0]);
+ VisitForAccumulatorValue(property->key());
+ builder()->ConvertAccumulatorToName(args[1]);
+ VisitForRegisterValue(property->value(), args[2]);
+ VisitSetHomeObject(args[2], literal, property);
+ builder()
+ ->LoadLiteral(Smi::FromInt(NONE))
+ .StoreAccumulatorInRegister(args[3]);
+ Runtime::FunctionId function_id =
+ property->kind() == ObjectLiteral::Property::GETTER
+ ? Runtime::kDefineGetterPropertyUnchecked
+ : Runtime::kDefineSetterPropertyUnchecked;
+ builder()->CallRuntime(function_id, args);
break;
- case ObjectLiteral::Property::SETTER:
- builder()->CallRuntime(Runtime::kDefineSetterPropertyUnchecked,
- literal_argument, 4);
+ }
+ case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE(); // Handled specially above.
break;
}
}
- execution_result()->SetResultInRegister(literal);
+ builder()->LoadAccumulatorWithRegister(literal);
}
void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Deep-copy the literal boilerplate.
+ int runtime_flags = expr->ComputeFlags();
+ bool use_fast_shallow_clone =
+ (runtime_flags & ArrayLiteral::kShallowElements) != 0 &&
+ expr->values()->length() <= JSArray::kInitialMaxFastElementArray;
+ uint8_t flags =
+ CreateArrayLiteralFlags::Encode(use_fast_shallow_clone, runtime_flags);
builder()->CreateArrayLiteral(expr->constant_elements(),
- expr->literal_index(),
- expr->ComputeFlags(true));
+ expr->literal_index(), flags);
Register index, literal;
// Evaluate all the non-constant subexpressions and store them into the
@@ -1915,7 +1748,6 @@ void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Restore literal array into accumulator.
builder()->LoadAccumulatorWithRegister(literal);
}
- execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitVariableProxy(VariableProxy* proxy) {
@@ -1953,7 +1785,6 @@ void BytecodeGenerator::VisitVariableLoad(Variable* variable,
BuildHoleCheckForVariableLoad(variable);
break;
}
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
builder()->LoadGlobal(feedback_index(slot), typeof_mode);
break;
@@ -1964,85 +1795,82 @@ void BytecodeGenerator::VisitVariableLoad(Variable* variable,
Register context_reg;
if (context) {
context_reg = context->reg();
+ depth = 0;
} else {
- context_reg = register_allocator()->NewRegister();
- // Walk the context chain to find the context at the given depth.
- // TODO(rmcilroy): Perform this work in a bytecode handler once we have
- // a generic mechanism for performing jumps in interpreter.cc.
- // TODO(mythria): Also update bytecode graph builder with correct depth
- // when this changes.
- builder()
- ->LoadAccumulatorWithRegister(execution_context()->reg())
- .StoreAccumulatorInRegister(context_reg);
- for (int i = 0; i < depth; ++i) {
- builder()
- ->LoadContextSlot(context_reg, Context::PREVIOUS_INDEX)
- .StoreAccumulatorInRegister(context_reg);
- }
+ context_reg = execution_context()->reg();
}
- builder()->LoadContextSlot(context_reg, variable->index());
+ builder()->LoadContextSlot(context_reg, variable->index(), depth);
BuildHoleCheckForVariableLoad(variable);
break;
}
case VariableLocation::LOOKUP: {
- builder()->LoadLookupSlot(variable->name(), typeof_mode);
+ switch (variable->mode()) {
+ case DYNAMIC_LOCAL: {
+ Variable* local_variable = variable->local_if_not_shadowed();
+ int depth =
+ execution_context()->ContextChainDepth(local_variable->scope());
+ builder()->LoadLookupContextSlot(variable->name(), typeof_mode,
+ local_variable->index(), depth);
+ BuildHoleCheckForVariableLoad(variable);
+ break;
+ }
+ case DYNAMIC_GLOBAL: {
+ int depth = scope()->ContextChainLengthUntilOutermostSloppyEval();
+ builder()->LoadLookupGlobalSlot(variable->name(), typeof_mode,
+ feedback_index(slot), depth);
+ break;
+ }
+ default:
+ builder()->LoadLookupSlot(variable->name(), typeof_mode);
+ }
+ break;
+ }
+ case VariableLocation::MODULE: {
+ ModuleDescriptor* descriptor = scope()->GetModuleScope()->module();
+ if (variable->IsExport()) {
+ auto it = descriptor->regular_exports().find(variable->raw_name());
+ DCHECK(it != descriptor->regular_exports().end());
+ Register export_name = register_allocator()->NewRegister();
+ builder()
+ ->LoadLiteral(it->second->export_name->string())
+ .StoreAccumulatorInRegister(export_name)
+ .CallRuntime(Runtime::kLoadModuleExport, export_name);
+ } else {
+ auto it = descriptor->regular_imports().find(variable->raw_name());
+ DCHECK(it != descriptor->regular_imports().end());
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->LoadLiteral(it->second->import_name->string())
+ .StoreAccumulatorInRegister(args[0])
+ .LoadLiteral(Smi::FromInt(it->second->module_request))
+ .StoreAccumulatorInRegister(args[1])
+ .CallRuntime(Runtime::kLoadModuleImport, args);
+ }
+ BuildHoleCheckForVariableLoad(variable);
break;
}
- case VariableLocation::MODULE:
- UNREACHABLE();
}
- execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitVariableLoadForAccumulatorValue(
Variable* variable, FeedbackVectorSlot slot, TypeofMode typeof_mode) {
- AccumulatorResultScope accumulator_result(this);
+ ValueResultScope accumulator_result(this);
VisitVariableLoad(variable, slot, typeof_mode);
}
-Register BytecodeGenerator::VisitVariableLoadForRegisterValue(
- Variable* variable, FeedbackVectorSlot slot, TypeofMode typeof_mode) {
- RegisterResultScope register_scope(this);
- VisitVariableLoad(variable, slot, typeof_mode);
- return register_scope.ResultRegister();
-}
-
-void BytecodeGenerator::BuildNamedSuperPropertyLoad(Register receiver,
- Register home_object,
- Register name) {
- DCHECK(Register::AreContiguous(receiver, home_object, name));
- builder()->CallRuntime(Runtime::kLoadFromSuper, receiver, 3);
-}
-
-void BytecodeGenerator::BuildKeyedSuperPropertyLoad(Register receiver,
- Register home_object,
- Register key) {
- DCHECK(Register::AreContiguous(receiver, home_object, key));
- builder()->CallRuntime(Runtime::kLoadKeyedFromSuper, receiver, 3);
-}
-
-void BytecodeGenerator::BuildNamedSuperPropertyStore(Register receiver,
- Register home_object,
- Register name,
- Register value) {
- DCHECK(Register::AreContiguous(receiver, home_object, name, value));
- Runtime::FunctionId function_id = is_strict(language_mode())
- ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy;
- builder()->CallRuntime(function_id, receiver, 4);
+void BytecodeGenerator::BuildReturn() {
+ if (FLAG_trace) {
+ RegisterAllocationScope register_scope(this);
+ Register result = register_allocator()->NewRegister();
+ // Runtime returns {result} value, preserving accumulator.
+ builder()->StoreAccumulatorInRegister(result).CallRuntime(
+ Runtime::kTraceExit, result);
+ }
+ builder()->Return();
}
-void BytecodeGenerator::BuildKeyedSuperPropertyStore(Register receiver,
- Register home_object,
- Register key,
- Register value) {
- DCHECK(Register::AreContiguous(receiver, home_object, key, value));
- Runtime::FunctionId function_id = is_strict(language_mode())
- ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy;
- builder()->CallRuntime(function_id, receiver, 4);
-}
+void BytecodeGenerator::BuildReThrow() { builder()->ReThrow(); }
void BytecodeGenerator::BuildAbort(BailoutReason bailout_reason) {
RegisterAllocationScope register_scope(this);
@@ -2050,14 +1878,14 @@ void BytecodeGenerator::BuildAbort(BailoutReason bailout_reason) {
builder()
->LoadLiteral(Smi::FromInt(static_cast<int>(bailout_reason)))
.StoreAccumulatorInRegister(reason)
- .CallRuntime(Runtime::kAbort, reason, 1);
+ .CallRuntime(Runtime::kAbort, reason);
}
void BytecodeGenerator::BuildThrowReferenceError(Handle<String> name) {
RegisterAllocationScope register_scope(this);
Register name_reg = register_allocator()->NewRegister();
builder()->LoadLiteral(name).StoreAccumulatorInRegister(name_reg).CallRuntime(
- Runtime::kThrowReferenceError, name_reg, 1);
+ Runtime::kThrowReferenceError, name_reg);
}
void BytecodeGenerator::BuildThrowIfHole(Handle<String> name) {
@@ -2083,7 +1911,6 @@ void BytecodeGenerator::BuildThrowIfNotHole(Handle<String> name) {
void BytecodeGenerator::BuildHoleCheckForVariableAssignment(Variable* variable,
Token::Value op) {
- DCHECK(variable->mode() != CONST_LEGACY);
if (op != Token::INIT) {
// Perform an initialization check for let/const declared variables.
// E.g. let x = (x = 20); is not allowed.
@@ -2128,20 +1955,13 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
builder()->LoadAccumulatorWithRegister(value_temp);
}
- if ((mode == CONST || mode == CONST_LEGACY) && op != Token::INIT) {
- if (mode == CONST || is_strict(language_mode())) {
- builder()->CallRuntime(Runtime::kThrowConstAssignError, Register(),
- 0);
- }
- // Non-initializing assignments to legacy constants are ignored
- // in sloppy mode. Break here to avoid storing into variable.
- break;
+ if (mode != CONST || op == Token::INIT) {
+ builder()->StoreAccumulatorInRegister(destination);
+ } else if (variable->throw_on_const_assignment(language_mode())) {
+ builder()->CallRuntime(Runtime::kThrowConstAssignError);
}
-
- builder()->StoreAccumulatorInRegister(destination);
break;
}
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
builder()->StoreGlobal(variable->name(), feedback_index(slot),
language_mode());
@@ -2154,24 +1974,9 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
if (context) {
context_reg = context->reg();
+ depth = 0;
} else {
- Register value_temp = register_allocator()->NewRegister();
- context_reg = register_allocator()->NewRegister();
- // Walk the context chain to find the context at the given depth.
- // TODO(rmcilroy): Perform this work in a bytecode handler once we have
- // a generic mechanism for performing jumps in interpreter.cc.
- // TODO(mythria): Also update bytecode graph builder with correct depth
- // when this changes.
- builder()
- ->StoreAccumulatorInRegister(value_temp)
- .LoadAccumulatorWithRegister(execution_context()->reg())
- .StoreAccumulatorInRegister(context_reg);
- for (int i = 0; i < depth; ++i) {
- builder()
- ->LoadContextSlot(context_reg, Context::PREVIOUS_INDEX)
- .StoreAccumulatorInRegister(context_reg);
- }
- builder()->LoadAccumulatorWithRegister(value_temp);
+ context_reg = execution_context()->reg();
}
if (hole_check_required) {
@@ -2179,38 +1984,57 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
Register value_temp = register_allocator()->NewRegister();
builder()
->StoreAccumulatorInRegister(value_temp)
- .LoadContextSlot(context_reg, variable->index());
+ .LoadContextSlot(context_reg, variable->index(), depth);
BuildHoleCheckForVariableAssignment(variable, op);
builder()->LoadAccumulatorWithRegister(value_temp);
}
- if ((mode == CONST || mode == CONST_LEGACY) && op != Token::INIT) {
- if (mode == CONST || is_strict(language_mode())) {
- builder()->CallRuntime(Runtime::kThrowConstAssignError, Register(),
- 0);
- }
- // Non-initializing assignments to legacy constants are ignored
- // in sloppy mode. Break here to avoid storing into variable.
- break;
+ if (mode != CONST || op == Token::INIT) {
+ builder()->StoreContextSlot(context_reg, variable->index(), depth);
+ } else if (variable->throw_on_const_assignment(language_mode())) {
+ builder()->CallRuntime(Runtime::kThrowConstAssignError);
}
-
- builder()->StoreContextSlot(context_reg, variable->index());
break;
}
case VariableLocation::LOOKUP: {
- DCHECK_NE(CONST_LEGACY, variable->mode());
builder()->StoreLookupSlot(variable->name(), language_mode());
break;
}
- case VariableLocation::MODULE:
- UNREACHABLE();
+ case VariableLocation::MODULE: {
+ DCHECK(IsDeclaredVariableMode(mode));
+
+ if (mode == CONST && op != Token::INIT) {
+ builder()->CallRuntime(Runtime::kThrowConstAssignError);
+ break;
+ }
+
+ // If we don't throw above, we know that we're dealing with an
+ // export because imports are const and we do not generate initializing
+ // assignments for them.
+ DCHECK(variable->IsExport());
+
+ ModuleDescriptor* mod = scope()->GetModuleScope()->module();
+ // There may be several export names for this local name, but it doesn't
+ // matter which one we pick, as they all map to the same cell.
+ auto it = mod->regular_exports().find(variable->raw_name());
+ DCHECK(it != mod->regular_exports().end());
+
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->StoreAccumulatorInRegister(args[1])
+ .LoadLiteral(it->second->export_name->string())
+ .StoreAccumulatorInRegister(args[0])
+ .CallRuntime(Runtime::kStoreModuleExport, args);
+ break;
+ }
}
}
void BytecodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
- Register object, key, home_object, value;
+ Register object, key;
+ RegisterList super_property_args;
Handle<String> name;
// Left-hand side can only be a property, a global or a variable slot.
@@ -2229,44 +2053,29 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
}
case KEYED_PROPERTY: {
object = VisitForRegisterValue(property->obj());
- if (expr->is_compound()) {
- // Use VisitForAccumulator and store to register so that the key is
- // still in the accumulator for loading the old value below.
- key = register_allocator()->NewRegister();
- VisitForAccumulatorValue(property->key());
- builder()->StoreAccumulatorInRegister(key);
- } else {
- key = VisitForRegisterValue(property->key());
- }
+ key = VisitForRegisterValue(property->key());
break;
}
case NAMED_SUPER_PROPERTY: {
- register_allocator()->PrepareForConsecutiveAllocations(4);
- object = register_allocator()->NextConsecutiveRegister();
- home_object = register_allocator()->NextConsecutiveRegister();
- key = register_allocator()->NextConsecutiveRegister();
- value = register_allocator()->NextConsecutiveRegister();
+ super_property_args = register_allocator()->NewRegisterList(4);
SuperPropertyReference* super_property =
property->obj()->AsSuperPropertyReference();
- VisitForRegisterValue(super_property->this_var(), object);
- VisitForRegisterValue(super_property->home_object(), home_object);
+ VisitForRegisterValue(super_property->this_var(), super_property_args[0]);
+ VisitForRegisterValue(super_property->home_object(),
+ super_property_args[1]);
builder()
->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
- .StoreAccumulatorInRegister(key);
+ .StoreAccumulatorInRegister(super_property_args[2]);
break;
}
case KEYED_SUPER_PROPERTY: {
- register_allocator()->PrepareForConsecutiveAllocations(4);
- object = register_allocator()->NextConsecutiveRegister();
- home_object = register_allocator()->NextConsecutiveRegister();
- key = register_allocator()->NextConsecutiveRegister();
- value = register_allocator()->NextConsecutiveRegister();
- builder()->StoreAccumulatorInRegister(value);
+ super_property_args = register_allocator()->NewRegisterList(4);
SuperPropertyReference* super_property =
property->obj()->AsSuperPropertyReference();
- VisitForRegisterValue(super_property->this_var(), object);
- VisitForRegisterValue(super_property->home_object(), home_object);
- VisitForRegisterValue(property->key(), key);
+ VisitForRegisterValue(super_property->this_var(), super_property_args[0]);
+ VisitForRegisterValue(super_property->home_object(),
+ super_property_args[1]);
+ VisitForRegisterValue(property->key(), super_property_args[2]);
break;
}
}
@@ -2274,17 +2083,16 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
// Evaluate the value and potentially handle compound assignments by loading
// the left-hand side value and performing a binary operation.
if (expr->is_compound()) {
- Register old_value;
+ Register old_value = register_allocator()->NewRegister();
switch (assign_type) {
case VARIABLE: {
VariableProxy* proxy = expr->target()->AsVariableProxy();
- old_value = VisitVariableLoadForRegisterValue(
- proxy->var(), proxy->VariableFeedbackSlot());
+ VisitVariableLoad(proxy->var(), proxy->VariableFeedbackSlot());
+ builder()->StoreAccumulatorInRegister(old_value);
break;
}
case NAMED_PROPERTY: {
FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
- old_value = register_allocator()->NewRegister();
builder()
->LoadNamedProperty(object, name, feedback_index(slot))
.StoreAccumulatorInRegister(old_value);
@@ -2294,22 +2102,23 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
// Key is already in accumulator at this point due to evaluating the
// LHS above.
FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
- old_value = register_allocator()->NewRegister();
builder()
->LoadKeyedProperty(object, feedback_index(slot))
.StoreAccumulatorInRegister(old_value);
break;
}
case NAMED_SUPER_PROPERTY: {
- old_value = register_allocator()->NewRegister();
- BuildNamedSuperPropertyLoad(object, home_object, key);
- builder()->StoreAccumulatorInRegister(old_value);
+ builder()
+ ->CallRuntime(Runtime::kLoadFromSuper,
+ super_property_args.Truncate(3))
+ .StoreAccumulatorInRegister(old_value);
break;
}
case KEYED_SUPER_PROPERTY: {
- old_value = register_allocator()->NewRegister();
- BuildKeyedSuperPropertyLoad(object, home_object, key);
- builder()->StoreAccumulatorInRegister(old_value);
+ builder()
+ ->CallRuntime(Runtime::kLoadKeyedFromSuper,
+ super_property_args.Truncate(3))
+ .StoreAccumulatorInRegister(old_value);
break;
}
}
@@ -2342,17 +2151,18 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
language_mode());
break;
case NAMED_SUPER_PROPERTY: {
- builder()->StoreAccumulatorInRegister(value);
- BuildNamedSuperPropertyStore(object, home_object, key, value);
+ builder()
+ ->StoreAccumulatorInRegister(super_property_args[3])
+ .CallRuntime(StoreToSuperRuntimeId(), super_property_args);
break;
}
case KEYED_SUPER_PROPERTY: {
- builder()->StoreAccumulatorInRegister(value);
- BuildKeyedSuperPropertyStore(object, home_object, key, value);
+ builder()
+ ->StoreAccumulatorInRegister(super_property_args[3])
+ .CallRuntime(StoreKeyedToSuperRuntimeId(), super_property_args);
break;
}
}
- execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitYield(Yield* expr) {
@@ -2382,12 +2192,12 @@ void BytecodeGenerator::VisitYield(Yield* expr) {
Register input = register_allocator()->NewRegister();
builder()
- ->CallRuntime(Runtime::kInlineGeneratorGetInputOrDebugPos, generator, 1)
+ ->CallRuntime(Runtime::kInlineGeneratorGetInputOrDebugPos, generator)
.StoreAccumulatorInRegister(input);
Register resume_mode = register_allocator()->NewRegister();
builder()
- ->CallRuntime(Runtime::kInlineGeneratorGetResumeMode, generator, 1)
+ ->CallRuntime(Runtime::kInlineGeneratorGetResumeMode, generator)
.StoreAccumulatorInRegister(resume_mode);
// Now dispatch on resume mode.
@@ -2407,14 +2217,12 @@ void BytecodeGenerator::VisitYield(Yield* expr) {
builder()->Bind(&resume_with_return);
{
- register_allocator()->PrepareForConsecutiveAllocations(2);
- Register value = register_allocator()->NextConsecutiveRegister();
- Register done = register_allocator()->NextConsecutiveRegister();
+ RegisterList args = register_allocator()->NewRegisterList(2);
builder()
- ->MoveRegister(input, value)
+ ->MoveRegister(input, args[0])
.LoadTrue()
- .StoreAccumulatorInRegister(done)
- .CallRuntime(Runtime::kInlineCreateIterResultObject, value, 2);
+ .StoreAccumulatorInRegister(args[1])
+ .CallRuntime(Runtime::kInlineCreateIterResultObject, args);
execution_control()->ReturnAccumulator();
}
@@ -2430,18 +2238,12 @@ void BytecodeGenerator::VisitYield(Yield* expr) {
builder()->Bind(&resume_with_next);
builder()->LoadAccumulatorWithRegister(input);
}
- execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitThrow(Throw* expr) {
VisitForAccumulatorValue(expr->exception());
builder()->SetExpressionPosition(expr);
builder()->Throw();
- // Throw statements are modeled as expressions instead of statements. These
- // are converted from assignment statements in Rewriter::ReWrite pass. An
- // assignment statement expects a value in the accumulator. This is a hack to
- // avoid DCHECK fails assert accumulator has been set.
- execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* expr) {
@@ -2469,56 +2271,45 @@ void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* expr) {
VisitKeyedSuperPropertyLoad(expr, Register::invalid_value());
break;
}
- execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitPropertyLoadForAccumulator(Register obj,
Property* expr) {
- AccumulatorResultScope result_scope(this);
+ ValueResultScope result_scope(this);
VisitPropertyLoad(obj, expr);
}
void BytecodeGenerator::VisitNamedSuperPropertyLoad(Property* property,
Register opt_receiver_out) {
RegisterAllocationScope register_scope(this);
- register_allocator()->PrepareForConsecutiveAllocations(3);
-
- Register receiver, home_object, name;
- receiver = register_allocator()->NextConsecutiveRegister();
- home_object = register_allocator()->NextConsecutiveRegister();
- name = register_allocator()->NextConsecutiveRegister();
SuperPropertyReference* super_property =
property->obj()->AsSuperPropertyReference();
- VisitForRegisterValue(super_property->this_var(), receiver);
- VisitForRegisterValue(super_property->home_object(), home_object);
+ RegisterList args = register_allocator()->NewRegisterList(3);
+ VisitForRegisterValue(super_property->this_var(), args[0]);
+ VisitForRegisterValue(super_property->home_object(), args[1]);
builder()
->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
- .StoreAccumulatorInRegister(name);
- BuildNamedSuperPropertyLoad(receiver, home_object, name);
+ .StoreAccumulatorInRegister(args[2])
+ .CallRuntime(Runtime::kLoadFromSuper, args);
if (opt_receiver_out.is_valid()) {
- builder()->MoveRegister(receiver, opt_receiver_out);
+ builder()->MoveRegister(args[0], opt_receiver_out);
}
}
void BytecodeGenerator::VisitKeyedSuperPropertyLoad(Property* property,
Register opt_receiver_out) {
RegisterAllocationScope register_scope(this);
- register_allocator()->PrepareForConsecutiveAllocations(3);
-
- Register receiver, home_object, key;
- receiver = register_allocator()->NextConsecutiveRegister();
- home_object = register_allocator()->NextConsecutiveRegister();
- key = register_allocator()->NextConsecutiveRegister();
SuperPropertyReference* super_property =
property->obj()->AsSuperPropertyReference();
- VisitForRegisterValue(super_property->this_var(), receiver);
- VisitForRegisterValue(super_property->home_object(), home_object);
- VisitForRegisterValue(property->key(), key);
- BuildKeyedSuperPropertyLoad(receiver, home_object, key);
+ RegisterList args = register_allocator()->NewRegisterList(3);
+ VisitForRegisterValue(super_property->this_var(), args[0]);
+ VisitForRegisterValue(super_property->home_object(), args[1]);
+ VisitForRegisterValue(property->key(), args[2]);
+ builder()->CallRuntime(Runtime::kLoadKeyedFromSuper, args);
if (opt_receiver_out.is_valid()) {
- builder()->MoveRegister(receiver, opt_receiver_out);
+ builder()->MoveRegister(args[0], opt_receiver_out);
}
}
@@ -2533,36 +2324,13 @@ void BytecodeGenerator::VisitProperty(Property* expr) {
}
}
-Register BytecodeGenerator::VisitArguments(ZoneList<Expression*>* args) {
- if (args->length() == 0) {
- return Register();
+void BytecodeGenerator::VisitArguments(ZoneList<Expression*>* args,
+ RegisterList arg_regs,
+ size_t first_argument_register) {
+ // Visit arguments.
+ for (int i = 0; i < static_cast<int>(args->length()); i++) {
+ VisitForRegisterValue(args->at(i), arg_regs[first_argument_register + i]);
}
-
- // Visit arguments and place in a contiguous block of temporary
- // registers. Return the first temporary register corresponding to
- // the first argument.
- //
- // NB the caller may have already called
- // PrepareForConsecutiveAllocations() with args->length() + N. The
- // second call here will be a no-op provided there have been N or
- // less calls to NextConsecutiveRegister(). Otherwise, the arguments
- // here will be consecutive, but they will not be consecutive with
- // earlier consecutive allocations made by the caller.
- register_allocator()->PrepareForConsecutiveAllocations(args->length());
-
- // Visit for first argument that goes into returned register
- Register first_arg = register_allocator()->NextConsecutiveRegister();
- VisitForAccumulatorValue(args->at(0));
- builder()->StoreAccumulatorInRegister(first_arg);
-
- // Visit remaining arguments
- for (int i = 1; i < static_cast<int>(args->length()); i++) {
- Register ith_arg = register_allocator()->NextConsecutiveRegister();
- VisitForAccumulatorValue(args->at(i));
- builder()->StoreAccumulatorInRegister(ith_arg);
- DCHECK(ith_arg.index() - i == first_arg.index());
- }
- return first_arg;
}
void BytecodeGenerator::VisitCall(Call* expr) {
@@ -2573,18 +2341,15 @@ void BytecodeGenerator::VisitCall(Call* expr) {
return VisitCallSuper(expr);
}
- // Prepare the callee and the receiver to the function call. This depends on
- // the semantics of the underlying call type.
+ Register callee = register_allocator()->NewRegister();
- // The receiver and arguments need to be allocated consecutively for
- // Call(). We allocate the callee and receiver consecutively for calls to
- // %LoadLookupSlotForCall. Future optimizations could avoid this there are
- // no arguments or the receiver and arguments are already consecutive.
- ZoneList<Expression*>* args = expr->arguments();
- register_allocator()->PrepareForConsecutiveAllocations(args->length() + 2);
- Register callee = register_allocator()->NextConsecutiveRegister();
- Register receiver = register_allocator()->NextConsecutiveRegister();
+ // Add an argument register for the receiver.
+ RegisterList args =
+ register_allocator()->NewRegisterList(expr->arguments()->length() + 1);
+ Register receiver = args[0];
+ // Prepare the callee and the receiver to the function call. This depends on
+ // the semantics of the underlying call type.
switch (call_type) {
case Call::NAMED_PROPERTY_CALL:
case Call::KEYED_PROPERTY_CALL: {
@@ -2613,12 +2378,13 @@ void BytecodeGenerator::VisitCall(Call* expr) {
// Call %LoadLookupSlotForCall to get the callee and receiver.
DCHECK(Register::AreContiguous(callee, receiver));
+ RegisterList result_pair(callee.index(), 2);
Variable* variable = callee_expr->AsVariableProxy()->var();
builder()
->LoadLiteral(variable->name())
.StoreAccumulatorInRegister(name)
- .CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, name, 1,
- callee);
+ .CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, name,
+ result_pair);
break;
}
// Fall through.
@@ -2626,8 +2392,7 @@ void BytecodeGenerator::VisitCall(Call* expr) {
}
case Call::OTHER_CALL: {
builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
- VisitForAccumulatorValue(callee_expr);
- builder()->StoreAccumulatorInRegister(callee);
+ VisitForRegisterValue(callee_expr, callee);
break;
}
case Call::NAMED_SUPER_PROPERTY_CALL: {
@@ -2647,42 +2412,34 @@ void BytecodeGenerator::VisitCall(Call* expr) {
break;
}
- // Evaluate all arguments to the function call and store in sequential
+ // Evaluate all arguments to the function call and store in sequential args
// registers.
- Register arg = VisitArguments(args);
- CHECK(args->length() == 0 || arg.index() == receiver.index() + 1);
+ VisitArguments(expr->arguments(), args, 1);
// Resolve callee for a potential direct eval call. This block will mutate the
// callee value.
- if (call_type == Call::POSSIBLY_EVAL_CALL && args->length() > 0) {
+ if (call_type == Call::POSSIBLY_EVAL_CALL &&
+ expr->arguments()->length() > 0) {
RegisterAllocationScope inner_register_scope(this);
- register_allocator()->PrepareForConsecutiveAllocations(6);
- Register callee_for_eval = register_allocator()->NextConsecutiveRegister();
- Register source = register_allocator()->NextConsecutiveRegister();
- Register function = register_allocator()->NextConsecutiveRegister();
- Register language = register_allocator()->NextConsecutiveRegister();
- Register eval_scope_position =
- register_allocator()->NextConsecutiveRegister();
- Register eval_position = register_allocator()->NextConsecutiveRegister();
-
// Set up arguments for ResolvePossiblyDirectEval by copying callee, source
// strings and function closure, and loading language and
// position.
+ RegisterList runtime_call_args = register_allocator()->NewRegisterList(6);
builder()
- ->MoveRegister(callee, callee_for_eval)
- .MoveRegister(arg, source)
- .MoveRegister(Register::function_closure(), function)
+ ->MoveRegister(callee, runtime_call_args[0])
+ .MoveRegister(args[1], runtime_call_args[1])
+ .MoveRegister(Register::function_closure(), runtime_call_args[2])
.LoadLiteral(Smi::FromInt(language_mode()))
- .StoreAccumulatorInRegister(language)
+ .StoreAccumulatorInRegister(runtime_call_args[3])
.LoadLiteral(
Smi::FromInt(execution_context()->scope()->start_position()))
- .StoreAccumulatorInRegister(eval_scope_position)
+ .StoreAccumulatorInRegister(runtime_call_args[4])
.LoadLiteral(Smi::FromInt(expr->position()))
- .StoreAccumulatorInRegister(eval_position);
+ .StoreAccumulatorInRegister(runtime_call_args[5]);
// Call ResolvePossiblyDirectEval and modify the callee.
builder()
- ->CallRuntime(Runtime::kResolvePossiblyDirectEval, callee_for_eval, 6)
+ ->CallRuntime(Runtime::kResolvePossiblyDirectEval, runtime_call_args)
.StoreAccumulatorInRegister(callee);
}
@@ -2692,16 +2449,14 @@ void BytecodeGenerator::VisitCall(Call* expr) {
if (expr->CallFeedbackICSlot().IsInvalid()) {
DCHECK(call_type == Call::POSSIBLY_EVAL_CALL);
// Valid type feedback slots can only be greater than kReservedIndexCount.
- // We use 0 to indicate an invalid slot it. Statically assert that 0 cannot
+ // We use 0 to indicate an invalid slot id. Statically assert that 0 cannot
// be a valid slot id.
STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
feedback_slot_index = 0;
} else {
feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
}
- builder()->Call(callee, receiver, 1 + args->length(), feedback_slot_index,
- expr->tail_call_mode());
- execution_result()->SetResultInAccumulator();
+ builder()->Call(callee, args, feedback_slot_index, expr->tail_call_mode());
}
void BytecodeGenerator::VisitCallSuper(Call* expr) {
@@ -2709,17 +2464,15 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
SuperCallReference* super = expr->expression()->AsSuperCallReference();
// Prepare the constructor to the super call.
- Register this_function = register_allocator()->NewRegister();
- VisitForAccumulatorValue(super->this_function_var());
- builder()
- ->StoreAccumulatorInRegister(this_function)
- .CallRuntime(Runtime::kInlineGetSuperConstructor, this_function, 1);
+ Register this_function = VisitForRegisterValue(super->this_function_var());
+ builder()->CallRuntime(Runtime::kInlineGetSuperConstructor, this_function);
Register constructor = this_function; // Re-use dead this_function register.
builder()->StoreAccumulatorInRegister(constructor);
- ZoneList<Expression*>* args = expr->arguments();
- Register first_arg = VisitArguments(args);
+ RegisterList args =
+ register_allocator()->NewRegisterList(expr->arguments()->length());
+ VisitArguments(expr->arguments(), args);
// The new target is loaded into the accumulator from the
// {new.target} variable.
@@ -2727,51 +2480,51 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
// Call construct.
builder()->SetExpressionPosition(expr);
- builder()->New(constructor, first_arg, args->length());
- execution_result()->SetResultInAccumulator();
+ // Valid type feedback slots can only be greater than kReservedIndexCount.
+ // Assert that 0 cannot be valid a valid slot id.
+ STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
+ // Type feedback is not necessary for super constructor calls. The type
+ // information can be inferred in most cases. Slot id 0 indicates type
+ // feedback is not required.
+ builder()->New(constructor, args, 0);
}
void BytecodeGenerator::VisitCallNew(CallNew* expr) {
- Register constructor = register_allocator()->NewRegister();
- VisitForAccumulatorValue(expr->expression());
- builder()->StoreAccumulatorInRegister(constructor);
-
- ZoneList<Expression*>* args = expr->arguments();
- Register first_arg = VisitArguments(args);
+ Register constructor = VisitForRegisterValue(expr->expression());
+ RegisterList args =
+ register_allocator()->NewRegisterList(expr->arguments()->length());
+ VisitArguments(expr->arguments(), args);
builder()->SetExpressionPosition(expr);
// The accumulator holds new target which is the same as the
// constructor for CallNew.
builder()
->LoadAccumulatorWithRegister(constructor)
- .New(constructor, first_arg, args->length());
- execution_result()->SetResultInAccumulator();
+ .New(constructor, args, feedback_index(expr->CallNewFeedbackSlot()));
}
void BytecodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
if (expr->is_jsruntime()) {
// Allocate a register for the receiver and load it with undefined.
- register_allocator()->PrepareForConsecutiveAllocations(1 + args->length());
- Register receiver = register_allocator()->NextConsecutiveRegister();
+ RegisterList args =
+ register_allocator()->NewRegisterList(expr->arguments()->length() + 1);
+ Register receiver = args[0];
builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
- Register first_arg = VisitArguments(args);
- CHECK(args->length() == 0 || first_arg.index() == receiver.index() + 1);
- builder()->CallJSRuntime(expr->context_index(), receiver,
- 1 + args->length());
+ VisitArguments(expr->arguments(), args, 1);
+ builder()->CallJSRuntime(expr->context_index(), args);
} else {
// Evaluate all arguments to the runtime call.
- Register first_arg = VisitArguments(args);
+ RegisterList args =
+ register_allocator()->NewRegisterList(expr->arguments()->length());
+ VisitArguments(expr->arguments(), args);
Runtime::FunctionId function_id = expr->function()->function_id;
- builder()->CallRuntime(function_id, first_arg, args->length());
+ builder()->CallRuntime(function_id, args);
}
- execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitVoid(UnaryOperation* expr) {
VisitForEffect(expr->expression());
builder()->LoadUndefined();
- execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitTypeOf(UnaryOperation* expr) {
@@ -2785,7 +2538,6 @@ void BytecodeGenerator::VisitTypeOf(UnaryOperation* expr) {
VisitForAccumulatorValue(expr->expression());
}
builder()->TypeOf();
- execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitNot(UnaryOperation* expr) {
@@ -2802,7 +2554,6 @@ void BytecodeGenerator::VisitNot(UnaryOperation* expr) {
} else {
VisitForAccumulatorValue(expr->expression());
builder()->LogicalNot();
- execution_result()->SetResultInAccumulator();
}
}
@@ -2846,16 +2597,15 @@ void BytecodeGenerator::VisitDelete(UnaryOperation* expr) {
Variable* variable = proxy->var();
DCHECK(is_sloppy(language_mode()) || variable->is_this());
switch (variable->location()) {
- case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
// Global var, let, const or variables not explicitly declared.
Register native_context = register_allocator()->NewRegister();
Register global_object = register_allocator()->NewRegister();
builder()
->LoadContextSlot(execution_context()->reg(),
- Context::NATIVE_CONTEXT_INDEX)
+ Context::NATIVE_CONTEXT_INDEX, 0)
.StoreAccumulatorInRegister(native_context)
- .LoadContextSlot(native_context, Context::EXTENSION_INDEX)
+ .LoadContextSlot(native_context, Context::EXTENSION_INDEX, 0)
.StoreAccumulatorInRegister(global_object)
.LoadLiteral(variable->name())
.Delete(global_object, language_mode());
@@ -2878,7 +2628,7 @@ void BytecodeGenerator::VisitDelete(UnaryOperation* expr) {
builder()
->LoadLiteral(variable->name())
.StoreAccumulatorInRegister(name_reg)
- .CallRuntime(Runtime::kDeleteLookupSlot, name_reg, 1);
+ .CallRuntime(Runtime::kDeleteLookupSlot, name_reg);
break;
}
default:
@@ -2889,7 +2639,6 @@ void BytecodeGenerator::VisitDelete(UnaryOperation* expr) {
VisitForEffect(expr->expression());
builder()->LoadTrue();
}
- execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
@@ -2902,7 +2651,8 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
bool is_postfix = expr->is_postfix() && !execution_result()->IsEffect();
// Evaluate LHS expression and get old value.
- Register object, home_object, key, old_value, value;
+ Register object, key, old_value;
+ RegisterList super_property_args;
Handle<String> name;
switch (assign_type) {
case VARIABLE: {
@@ -2930,44 +2680,36 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case NAMED_SUPER_PROPERTY: {
- register_allocator()->PrepareForConsecutiveAllocations(4);
- object = register_allocator()->NextConsecutiveRegister();
- home_object = register_allocator()->NextConsecutiveRegister();
- key = register_allocator()->NextConsecutiveRegister();
- value = register_allocator()->NextConsecutiveRegister();
+ super_property_args = register_allocator()->NewRegisterList(4);
+ RegisterList load_super_args = super_property_args.Truncate(3);
SuperPropertyReference* super_property =
property->obj()->AsSuperPropertyReference();
- VisitForRegisterValue(super_property->this_var(), object);
- VisitForRegisterValue(super_property->home_object(), home_object);
+ VisitForRegisterValue(super_property->this_var(), load_super_args[0]);
+ VisitForRegisterValue(super_property->home_object(), load_super_args[1]);
builder()
->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
- .StoreAccumulatorInRegister(key);
- BuildNamedSuperPropertyLoad(object, home_object, key);
+ .StoreAccumulatorInRegister(load_super_args[2])
+ .CallRuntime(Runtime::kLoadFromSuper, load_super_args);
break;
}
case KEYED_SUPER_PROPERTY: {
- register_allocator()->PrepareForConsecutiveAllocations(4);
- object = register_allocator()->NextConsecutiveRegister();
- home_object = register_allocator()->NextConsecutiveRegister();
- key = register_allocator()->NextConsecutiveRegister();
- value = register_allocator()->NextConsecutiveRegister();
- builder()->StoreAccumulatorInRegister(value);
+ super_property_args = register_allocator()->NewRegisterList(4);
+ RegisterList load_super_args = super_property_args.Truncate(3);
SuperPropertyReference* super_property =
property->obj()->AsSuperPropertyReference();
- VisitForRegisterValue(super_property->this_var(), object);
- VisitForRegisterValue(super_property->home_object(), home_object);
- VisitForRegisterValue(property->key(), key);
- BuildKeyedSuperPropertyLoad(object, home_object, key);
+ VisitForRegisterValue(super_property->this_var(), load_super_args[0]);
+ VisitForRegisterValue(super_property->home_object(), load_super_args[1]);
+ VisitForRegisterValue(property->key(), load_super_args[2]);
+ builder()->CallRuntime(Runtime::kLoadKeyedFromSuper, load_super_args);
break;
}
}
// Save result for postfix expressions.
if (is_postfix) {
- old_value = register_allocator()->outer()->NewRegister();
-
// Convert old value into a number before saving it.
- builder()->CastAccumulatorToNumber(old_value);
+ old_value = register_allocator()->NewRegister();
+ builder()->ConvertAccumulatorToNumber(old_value);
}
// Perform +1/-1 operation.
@@ -2994,22 +2736,22 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case NAMED_SUPER_PROPERTY: {
- builder()->StoreAccumulatorInRegister(value);
- BuildNamedSuperPropertyStore(object, home_object, key, value);
+ builder()
+ ->StoreAccumulatorInRegister(super_property_args[3])
+ .CallRuntime(StoreToSuperRuntimeId(), super_property_args);
break;
}
case KEYED_SUPER_PROPERTY: {
- builder()->StoreAccumulatorInRegister(value);
- BuildKeyedSuperPropertyStore(object, home_object, key, value);
+ builder()
+ ->StoreAccumulatorInRegister(super_property_args[3])
+ .CallRuntime(StoreKeyedToSuperRuntimeId(), super_property_args);
break;
}
}
// Restore old value for postfix expressions.
if (is_postfix) {
- execution_result()->SetResultInRegister(old_value);
- } else {
- execution_result()->SetResultInAccumulator();
+ builder()->LoadAccumulatorWithRegister(old_value);
}
}
@@ -3034,8 +2776,8 @@ void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Register lhs = VisitForRegisterValue(expr->left());
VisitForAccumulatorValue(expr->right());
builder()->SetExpressionPosition(expr);
- builder()->CompareOperation(expr->op(), lhs);
- execution_result()->SetResultInAccumulator();
+ FeedbackVectorSlot slot = expr->CompareOperationFeedbackSlot();
+ builder()->CompareOperation(expr->op(), lhs, feedback_index(slot));
}
void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
@@ -3045,7 +2787,6 @@ void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
VisitForAccumulatorValue(expr->right());
FeedbackVectorSlot slot = expr->BinaryOperationFeedbackSlot();
builder()->BinaryOperation(expr->op(), lhs, feedback_index(slot));
- execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitSpread(Spread* expr) { UNREACHABLE(); }
@@ -3055,7 +2796,7 @@ void BytecodeGenerator::VisitEmptyParentheses(EmptyParentheses* expr) {
}
void BytecodeGenerator::VisitThisFunction(ThisFunction* expr) {
- execution_result()->SetResultInRegister(Register::function_closure());
+ builder()->LoadAccumulatorWithRegister(Register::function_closure());
}
void BytecodeGenerator::VisitSuperCallReference(SuperCallReference* expr) {
@@ -3065,8 +2806,7 @@ void BytecodeGenerator::VisitSuperCallReference(SuperCallReference* expr) {
void BytecodeGenerator::VisitSuperPropertyReference(
SuperPropertyReference* expr) {
- builder()->CallRuntime(Runtime::kThrowUnsupportedSuperError, Register(0), 0);
- execution_result()->SetResultInAccumulator();
+ builder()->CallRuntime(Runtime::kThrowUnsupportedSuperError);
}
void BytecodeGenerator::VisitCommaExpression(BinaryOperation* binop) {
@@ -3106,7 +2846,6 @@ void BytecodeGenerator::VisitLogicalOrExpression(BinaryOperation* binop) {
VisitForAccumulatorValue(right);
builder()->Bind(&end_label);
}
- execution_result()->SetResultInAccumulator();
}
}
@@ -3142,7 +2881,6 @@ void BytecodeGenerator::VisitLogicalAndExpression(BinaryOperation* binop) {
VisitForAccumulatorValue(right);
builder()->Bind(&end_label);
}
- execution_result()->SetResultInAccumulator();
}
}
@@ -3150,35 +2888,45 @@ void BytecodeGenerator::VisitRewritableExpression(RewritableExpression* expr) {
Visit(expr->expression());
}
-void BytecodeGenerator::VisitNewLocalFunctionContext() {
- AccumulatorResultScope accumulator_execution_result(this);
+void BytecodeGenerator::BuildNewLocalActivationContext() {
+ ValueResultScope value_execution_result(this);
Scope* scope = this->scope();
- // Allocate a new local context.
+ // Create the appropriate context.
if (scope->is_script_scope()) {
- RegisterAllocationScope register_scope(this);
- Register closure = register_allocator()->NewRegister();
- Register scope_info = register_allocator()->NewRegister();
- DCHECK(Register::AreContiguous(closure, scope_info));
+ RegisterList args = register_allocator()->NewRegisterList(2);
builder()
->LoadAccumulatorWithRegister(Register::function_closure())
- .StoreAccumulatorInRegister(closure)
+ .StoreAccumulatorInRegister(args[0])
.LoadLiteral(scope->scope_info())
- .StoreAccumulatorInRegister(scope_info)
- .CallRuntime(Runtime::kNewScriptContext, closure, 2);
+ .StoreAccumulatorInRegister(args[1])
+ .CallRuntime(Runtime::kNewScriptContext, args);
+ } else if (scope->is_module_scope()) {
+ // We don't need to do anything for the outer script scope.
+ DCHECK(scope->outer_scope()->is_script_scope());
+
+ // A JSFunction representing a module is called with the module object as
+ // its sole argument, which we pass on to PushModuleContext.
+ RegisterList args = register_allocator()->NewRegisterList(3);
+ builder()
+ ->MoveRegister(builder()->Parameter(1), args[0])
+ .LoadAccumulatorWithRegister(Register::function_closure())
+ .StoreAccumulatorInRegister(args[1])
+ .LoadLiteral(scope->scope_info())
+ .StoreAccumulatorInRegister(args[2])
+ .CallRuntime(Runtime::kPushModuleContext, args);
} else {
int slot_count = scope->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (slot_count <= FastNewFunctionContextStub::kMaximumSlots) {
builder()->CreateFunctionContext(slot_count);
} else {
builder()->CallRuntime(Runtime::kNewFunctionContext,
- Register::function_closure(), 1);
+ Register::function_closure());
}
}
- execution_result()->SetResultInAccumulator();
}
-void BytecodeGenerator::VisitBuildLocalActivationContext() {
+void BytecodeGenerator::BuildLocalActivationContextInitialization() {
DeclarationScope* scope = this->scope();
if (scope->has_this_declaration() && scope->receiver()->IsContextSlot()) {
@@ -3187,7 +2935,7 @@ void BytecodeGenerator::VisitBuildLocalActivationContext() {
// Context variable (at bottom of the context chain).
DCHECK_EQ(0, scope->ContextChainLength(variable->scope()));
builder()->LoadAccumulatorWithRegister(receiver).StoreContextSlot(
- execution_context()->reg(), variable->index());
+ execution_context()->reg(), variable->index(), 0);
}
// Copy parameters into context if necessary.
@@ -3201,56 +2949,53 @@ void BytecodeGenerator::VisitBuildLocalActivationContext() {
Register parameter(builder()->Parameter(i + 1));
// Context variable (at bottom of the context chain).
DCHECK_EQ(0, scope->ContextChainLength(variable->scope()));
- builder()->LoadAccumulatorWithRegister(parameter)
- .StoreContextSlot(execution_context()->reg(), variable->index());
+ builder()->LoadAccumulatorWithRegister(parameter).StoreContextSlot(
+ execution_context()->reg(), variable->index(), 0);
}
}
-void BytecodeGenerator::VisitNewLocalBlockContext(Scope* scope) {
- AccumulatorResultScope accumulator_execution_result(this);
+void BytecodeGenerator::BuildNewLocalBlockContext(Scope* scope) {
+ ValueResultScope value_execution_result(this);
DCHECK(scope->is_block_scope());
VisitFunctionClosureForContext();
builder()->CreateBlockContext(scope->scope_info());
- execution_result()->SetResultInAccumulator();
}
-void BytecodeGenerator::VisitNewLocalWithContext() {
- AccumulatorResultScope accumulator_execution_result(this);
+void BytecodeGenerator::BuildNewLocalWithContext(Scope* scope) {
+ ValueResultScope value_execution_result(this);
Register extension_object = register_allocator()->NewRegister();
- builder()->CastAccumulatorToJSObject(extension_object);
+ builder()->ConvertAccumulatorToObject(extension_object);
VisitFunctionClosureForContext();
- builder()->CreateWithContext(extension_object);
- execution_result()->SetResultInAccumulator();
+ builder()->CreateWithContext(extension_object, scope->scope_info());
}
-void BytecodeGenerator::VisitNewLocalCatchContext(Variable* variable) {
- AccumulatorResultScope accumulator_execution_result(this);
+void BytecodeGenerator::BuildNewLocalCatchContext(Variable* variable,
+ Scope* scope) {
+ ValueResultScope value_execution_result(this);
DCHECK(variable->IsContextSlot());
Register exception = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(exception);
VisitFunctionClosureForContext();
- builder()->CreateCatchContext(exception, variable->name());
- execution_result()->SetResultInAccumulator();
+ builder()->CreateCatchContext(exception, variable->name(),
+ scope->scope_info());
}
void BytecodeGenerator::VisitObjectLiteralAccessor(
Register home_object, ObjectLiteralProperty* property, Register value_out) {
- // TODO(rmcilroy): Replace value_out with VisitForRegister();
if (property == nullptr) {
builder()->LoadNull().StoreAccumulatorInRegister(value_out);
} else {
- VisitForAccumulatorValue(property->value());
- builder()->StoreAccumulatorInRegister(value_out);
+ VisitForRegisterValue(property->value(), value_out);
VisitSetHomeObject(value_out, home_object, property);
}
}
void BytecodeGenerator::VisitSetHomeObject(Register value, Register home_object,
- ObjectLiteralProperty* property,
+ LiteralProperty* property,
int slot_number) {
Expression* expr = property->value();
if (FunctionLiteral::NeedsHomeObject(expr)) {
@@ -3302,38 +3047,44 @@ void BytecodeGenerator::VisitNewTargetVariable(Variable* variable) {
// Store the new target we were called with in the given variable.
builder()->LoadAccumulatorWithRegister(Register::new_target());
VisitVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid());
+
+ // TODO(mstarzinger): The <new.target> register is not set by the deoptimizer
+ // and we need to make sure {BytecodeRegisterOptimizer} flushes its state
+ // before a local variable containing the <new.target> is used. Using a label
+ // as below flushes the entire pipeline, we should be more specific here.
+ BytecodeLabel flush_state_label;
+ builder()->Bind(&flush_state_label);
}
void BytecodeGenerator::VisitFunctionClosureForContext() {
- AccumulatorResultScope accumulator_execution_result(this);
+ ValueResultScope value_execution_result(this);
DeclarationScope* closure_scope =
execution_context()->scope()->GetClosureScope();
- if (closure_scope->is_script_scope() ||
- closure_scope->is_module_scope()) {
+ if (closure_scope->is_script_scope()) {
// Contexts nested in the native context have a canonical empty function as
// their closure, not the anonymous closure containing the global code.
Register native_context = register_allocator()->NewRegister();
builder()
->LoadContextSlot(execution_context()->reg(),
- Context::NATIVE_CONTEXT_INDEX)
+ Context::NATIVE_CONTEXT_INDEX, 0)
.StoreAccumulatorInRegister(native_context)
- .LoadContextSlot(native_context, Context::CLOSURE_INDEX);
+ .LoadContextSlot(native_context, Context::CLOSURE_INDEX, 0);
} else if (closure_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
builder()->LoadContextSlot(execution_context()->reg(),
- Context::CLOSURE_INDEX);
+ Context::CLOSURE_INDEX, 0);
} else {
- DCHECK(closure_scope->is_function_scope());
+ DCHECK(closure_scope->is_function_scope() ||
+ closure_scope->is_module_scope());
builder()->LoadAccumulatorWithRegister(Register::function_closure());
}
- execution_result()->SetResultInAccumulator();
}
// Visits the expression |expr| and places the result in the accumulator.
void BytecodeGenerator::VisitForAccumulatorValue(Expression* expr) {
- AccumulatorResultScope accumulator_scope(this);
+ ValueResultScope accumulator_scope(this);
Visit(expr);
}
@@ -3354,16 +3105,17 @@ void BytecodeGenerator::VisitForEffect(Expression* expr) {
// Visits the expression |expr| and returns the register containing
// the expression result.
Register BytecodeGenerator::VisitForRegisterValue(Expression* expr) {
- RegisterResultScope register_scope(this);
- Visit(expr);
- return register_scope.ResultRegister();
+ VisitForAccumulatorValue(expr);
+ Register result = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(result);
+ return result;
}
// Visits the expression |expr| and stores the expression result in
// |destination|.
void BytecodeGenerator::VisitForRegisterValue(Expression* expr,
Register destination) {
- AccumulatorResultScope register_scope(this);
+ ValueResultScope register_scope(this);
Visit(expr);
builder()->StoreAccumulatorInRegister(destination);
}
@@ -3412,6 +3164,16 @@ int BytecodeGenerator::feedback_index(FeedbackVectorSlot slot) const {
return TypeFeedbackVector::GetIndex(slot);
}
+Runtime::FunctionId BytecodeGenerator::StoreToSuperRuntimeId() {
+ return is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy;
+}
+
+Runtime::FunctionId BytecodeGenerator::StoreKeyedToSuperRuntimeId() {
+ return is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy;
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index ee72135f43..03067de08d 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -24,7 +24,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
public:
explicit BytecodeGenerator(CompilationInfo* info);
- void GenerateBytecode();
+ void GenerateBytecode(uintptr_t stack_limit);
Handle<BytecodeArray> FinalizeBytecode(Isolate* isolate);
#define DECLARE_VISIT(type) void Visit##type(type* node);
@@ -36,7 +36,6 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void VisitStatements(ZoneList<Statement*>* statments);
private:
- class AccumulatorResultScope;
class ContextScope;
class ControlScope;
class ControlScopeForBreakable;
@@ -47,9 +46,9 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
class ExpressionResultScope;
class EffectResultScope;
class GlobalDeclarationsBuilder;
- class RegisterResultScope;
class RegisterAllocationScope;
class TestResultScope;
+ class ValueResultScope;
enum class TestFallthrough { kThen, kElse, kNone };
@@ -73,8 +72,10 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
// Used by flow control routines to evaluate loop condition.
void VisitCondition(Expression* expr);
- // Helper visitors which perform common operations.
- Register VisitArguments(ZoneList<Expression*>* arguments);
+ // Visit the arguments expressions in |args| and store them in |args_regs|
+ // starting at register |first_argument_register| in the list.
+ void VisitArguments(ZoneList<Expression*>* args, RegisterList arg_regs,
+ size_t first_argument_register = 0);
// Visit a keyed super property load. The optional
// |opt_receiver_out| register will have the receiver stored to it
@@ -104,15 +105,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void VisitVariableAssignment(Variable* variable, Token::Value op,
FeedbackVectorSlot slot);
- void BuildNamedSuperPropertyStore(Register receiver, Register home_object,
- Register name, Register value);
- void BuildKeyedSuperPropertyStore(Register receiver, Register home_object,
- Register key, Register value);
- void BuildNamedSuperPropertyLoad(Register receiver, Register home_object,
- Register name);
- void BuildKeyedSuperPropertyLoad(Register receiver, Register home_object,
- Register key);
-
+ void BuildReturn();
+ void BuildReThrow();
void BuildAbort(BailoutReason bailout_reason);
void BuildThrowIfHole(Handle<String> name);
void BuildThrowIfNotHole(Handle<String> name);
@@ -125,6 +119,12 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildIndexedJump(Register value, size_t start_index, size_t size,
ZoneVector<BytecodeLabel>& targets);
+ void BuildNewLocalActivationContext();
+ void BuildLocalActivationContextInitialization();
+ void BuildNewLocalBlockContext(Scope* scope);
+ void BuildNewLocalCatchContext(Variable* variable, Scope* scope);
+ void BuildNewLocalWithContext(Scope* scope);
+
void VisitGeneratorPrologue();
void VisitArgumentsObject(Variable* variable);
@@ -133,18 +133,12 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void VisitClassLiteralForRuntimeDefinition(ClassLiteral* expr);
void VisitClassLiteralProperties(ClassLiteral* expr, Register literal,
Register prototype);
- void VisitClassLiteralStaticPrototypeWithComputedName(Register name);
void VisitThisFunctionVariable(Variable* variable);
void VisitNewTargetVariable(Variable* variable);
- void VisitNewLocalFunctionContext();
- void VisitBuildLocalActivationContext();
void VisitBlockDeclarationsAndStatements(Block* stmt);
- void VisitNewLocalBlockContext(Scope* scope);
- void VisitNewLocalCatchContext(Variable* variable);
- void VisitNewLocalWithContext();
void VisitFunctionClosureForContext();
void VisitSetHomeObject(Register value, Register home_object,
- ObjectLiteralProperty* property, int slot_number = 0);
+ LiteralProperty* property, int slot_number = 0);
void VisitObjectLiteralAccessor(Register home_object,
ObjectLiteralProperty* property,
Register value_out);
@@ -168,13 +162,10 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void VisitForTest(Expression* expr, BytecodeLabels* then_labels,
BytecodeLabels* else_labels, TestFallthrough fallthrough);
- // Methods for tracking and remapping register.
- void RecordStoreToRegister(Register reg);
- Register LoadFromAliasedRegister(Register reg);
-
- // Initialize an array of temporary registers with consecutive registers.
- template <size_t N>
- void InitializeWithConsecutiveRegisters(Register (&registers)[N]);
+ // Returns the runtime function id for a store to super for the function's
+ // language mode.
+ inline Runtime::FunctionId StoreToSuperRuntimeId();
+ inline Runtime::FunctionId StoreKeyedToSuperRuntimeId();
inline BytecodeArrayBuilder* builder() const { return builder_; }
inline Zone* zone() const { return zone_; }
@@ -193,12 +184,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
execution_result_ = execution_result;
}
ExpressionResultScope* execution_result() const { return execution_result_; }
- inline void set_register_allocator(
- RegisterAllocationScope* register_allocator) {
- register_allocator_ = register_allocator;
- }
- RegisterAllocationScope* register_allocator() const {
- return register_allocator_;
+ BytecodeRegisterAllocator* register_allocator() const {
+ return builder()->register_allocator();
}
GlobalDeclarationsBuilder* globals_builder() { return globals_builder_; }
@@ -222,7 +209,6 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
ControlScope* execution_control_;
ContextScope* execution_context_;
ExpressionResultScope* execution_result_;
- RegisterAllocationScope* register_allocator_;
ZoneVector<BytecodeLabel> generator_resume_points_;
Register generator_state_;
diff --git a/deps/v8/src/interpreter/bytecode-label.h b/deps/v8/src/interpreter/bytecode-label.h
index d96cf66d13..b5f602d216 100644
--- a/deps/v8/src/interpreter/bytecode-label.h
+++ b/deps/v8/src/interpreter/bytecode-label.h
@@ -5,7 +5,7 @@
#ifndef V8_INTERPRETER_BYTECODE_LABEL_H_
#define V8_INTERPRETER_BYTECODE_LABEL_H_
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/interpreter/bytecode-operands.cc b/deps/v8/src/interpreter/bytecode-operands.cc
new file mode 100644
index 0000000000..6be81fe62e
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-operands.cc
@@ -0,0 +1,89 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-operands.h"
+
+#include <iomanip>
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+namespace {
+
+const char* AccumulatorUseToString(AccumulatorUse accumulator_use) {
+ switch (accumulator_use) {
+ case AccumulatorUse::kNone:
+ return "None";
+ case AccumulatorUse::kRead:
+ return "Read";
+ case AccumulatorUse::kWrite:
+ return "Write";
+ case AccumulatorUse::kReadWrite:
+ return "ReadWrite";
+ }
+ UNREACHABLE();
+ return "";
+}
+
+const char* OperandTypeToString(OperandType operand_type) {
+ switch (operand_type) {
+#define CASE(Name, _) \
+ case OperandType::k##Name: \
+ return #Name;
+ OPERAND_TYPE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return "";
+}
+
+const char* OperandScaleToString(OperandScale operand_scale) {
+ switch (operand_scale) {
+#define CASE(Name, _) \
+ case OperandScale::k##Name: \
+ return #Name;
+ OPERAND_SCALE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return "";
+}
+
+const char* OperandSizeToString(OperandSize operand_size) {
+ switch (operand_size) {
+ case OperandSize::kNone:
+ return "None";
+ case OperandSize::kByte:
+ return "Byte";
+ case OperandSize::kShort:
+ return "Short";
+ case OperandSize::kQuad:
+ return "Quad";
+ }
+ UNREACHABLE();
+ return "";
+}
+
+} // namespace
+
+std::ostream& operator<<(std::ostream& os, const AccumulatorUse& use) {
+ return os << AccumulatorUseToString(use);
+}
+
+std::ostream& operator<<(std::ostream& os, const OperandSize& operand_size) {
+ return os << OperandSizeToString(operand_size);
+}
+
+std::ostream& operator<<(std::ostream& os, const OperandScale& operand_scale) {
+ return os << OperandScaleToString(operand_scale);
+}
+
+std::ostream& operator<<(std::ostream& os, const OperandType& operand_type) {
+ return os << OperandTypeToString(operand_type);
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-operands.h b/deps/v8/src/interpreter/bytecode-operands.h
new file mode 100644
index 0000000000..b35c4866be
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-operands.h
@@ -0,0 +1,126 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_OPERANDS_H_
+#define V8_INTERPRETER_BYTECODE_OPERANDS_H_
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+#define INVALID_OPERAND_TYPE_LIST(V) V(None, OperandTypeInfo::kNone)
+
+#define REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
+ V(RegList, OperandTypeInfo::kScalableSignedByte) \
+ V(Reg, OperandTypeInfo::kScalableSignedByte) \
+ V(RegPair, OperandTypeInfo::kScalableSignedByte)
+
+#define REGISTER_OUTPUT_OPERAND_TYPE_LIST(V) \
+ V(RegOut, OperandTypeInfo::kScalableSignedByte) \
+ V(RegOutPair, OperandTypeInfo::kScalableSignedByte) \
+ V(RegOutTriple, OperandTypeInfo::kScalableSignedByte)
+
+#define SCALAR_OPERAND_TYPE_LIST(V) \
+ V(Flag8, OperandTypeInfo::kFixedUnsignedByte) \
+ V(IntrinsicId, OperandTypeInfo::kFixedUnsignedByte) \
+ V(Idx, OperandTypeInfo::kScalableUnsignedByte) \
+ V(UImm, OperandTypeInfo::kScalableUnsignedByte) \
+ V(Imm, OperandTypeInfo::kScalableSignedByte) \
+ V(RegCount, OperandTypeInfo::kScalableUnsignedByte) \
+ V(RuntimeId, OperandTypeInfo::kFixedUnsignedShort)
+
+#define REGISTER_OPERAND_TYPE_LIST(V) \
+ REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
+ REGISTER_OUTPUT_OPERAND_TYPE_LIST(V)
+
+#define NON_REGISTER_OPERAND_TYPE_LIST(V) \
+ INVALID_OPERAND_TYPE_LIST(V) \
+ SCALAR_OPERAND_TYPE_LIST(V)
+
+// The list of operand types used by bytecodes.
+#define OPERAND_TYPE_LIST(V) \
+ NON_REGISTER_OPERAND_TYPE_LIST(V) \
+ REGISTER_OPERAND_TYPE_LIST(V)
+
+// Enumeration of scaling factors applicable to scalable operands. Code
+// relies on being able to cast values to integer scaling values.
+#define OPERAND_SCALE_LIST(V) \
+ V(Single, 1) \
+ V(Double, 2) \
+ V(Quadruple, 4)
+
+enum class OperandScale : uint8_t {
+#define DECLARE_OPERAND_SCALE(Name, Scale) k##Name = Scale,
+ OPERAND_SCALE_LIST(DECLARE_OPERAND_SCALE)
+#undef DECLARE_OPERAND_SCALE
+ kLast = kQuadruple
+};
+
+// Enumeration of the size classes of operand types used by
+// bytecodes. Code relies on being able to cast values to integer
+// types to get the size in bytes.
+enum class OperandSize : uint8_t {
+ kNone = 0,
+ kByte = 1,
+ kShort = 2,
+ kQuad = 4,
+ kLast = kQuad
+};
+
+// Primitive operand info used that summarize properties of operands.
+// Columns are Name, IsScalable, IsUnsigned, UnscaledSize.
+#define OPERAND_TYPE_INFO_LIST(V) \
+ V(None, false, false, OperandSize::kNone) \
+ V(ScalableSignedByte, true, false, OperandSize::kByte) \
+ V(ScalableUnsignedByte, true, true, OperandSize::kByte) \
+ V(FixedUnsignedByte, false, true, OperandSize::kByte) \
+ V(FixedUnsignedShort, false, true, OperandSize::kShort)
+
+enum class OperandTypeInfo : uint8_t {
+#define DECLARE_OPERAND_TYPE_INFO(Name, ...) k##Name,
+ OPERAND_TYPE_INFO_LIST(DECLARE_OPERAND_TYPE_INFO)
+#undef DECLARE_OPERAND_TYPE_INFO
+};
+
+// Enumeration of operand types used by bytecodes.
+enum class OperandType : uint8_t {
+#define DECLARE_OPERAND_TYPE(Name, _) k##Name,
+ OPERAND_TYPE_LIST(DECLARE_OPERAND_TYPE)
+#undef DECLARE_OPERAND_TYPE
+#define COUNT_OPERAND_TYPES(x, _) +1
+ // The COUNT_OPERAND macro will turn this into kLast = -1 +1 +1... which will
+ // evaluate to the same value as the last operand.
+ kLast = -1 OPERAND_TYPE_LIST(COUNT_OPERAND_TYPES)
+#undef COUNT_OPERAND_TYPES
+};
+
+enum class AccumulatorUse : uint8_t {
+ kNone = 0,
+ kRead = 1 << 0,
+ kWrite = 1 << 1,
+ kReadWrite = kRead | kWrite
+};
+
+inline AccumulatorUse operator&(AccumulatorUse lhs, AccumulatorUse rhs) {
+ int result = static_cast<int>(lhs) & static_cast<int>(rhs);
+ return static_cast<AccumulatorUse>(result);
+}
+
+inline AccumulatorUse operator|(AccumulatorUse lhs, AccumulatorUse rhs) {
+ int result = static_cast<int>(lhs) | static_cast<int>(rhs);
+ return static_cast<AccumulatorUse>(result);
+}
+
+std::ostream& operator<<(std::ostream& os, const AccumulatorUse& use);
+std::ostream& operator<<(std::ostream& os, const OperandScale& operand_scale);
+std::ostream& operator<<(std::ostream& os, const OperandSize& operand_size);
+std::ostream& operator<<(std::ostream& os, const OperandType& operand_type);
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_OPERANDS_H_
diff --git a/deps/v8/src/interpreter/bytecode-peephole-optimizer.cc b/deps/v8/src/interpreter/bytecode-peephole-optimizer.cc
index 11aebb6ddb..c87d31c39f 100644
--- a/deps/v8/src/interpreter/bytecode-peephole-optimizer.cc
+++ b/deps/v8/src/interpreter/bytecode-peephole-optimizer.cc
@@ -13,17 +13,17 @@ namespace interpreter {
BytecodePeepholeOptimizer::BytecodePeepholeOptimizer(
BytecodePipelineStage* next_stage)
- : next_stage_(next_stage) {
+ : next_stage_(next_stage), last_(Bytecode::kIllegal) {
InvalidateLast();
}
// override
Handle<BytecodeArray> BytecodePeepholeOptimizer::ToBytecodeArray(
- Isolate* isolate, int fixed_register_count, int parameter_count,
+ Isolate* isolate, int register_count, int parameter_count,
Handle<FixedArray> handler_table) {
Flush();
- return next_stage_->ToBytecodeArray(isolate, fixed_register_count,
- parameter_count, handler_table);
+ return next_stage_->ToBytecodeArray(isolate, register_count, parameter_count,
+ handler_table);
}
// override
@@ -142,7 +142,7 @@ void TransformLdaSmiBinaryOpToBinaryOpWithSmi(Bytecode new_bytecode,
current->set_bytecode(new_bytecode, last->operand(0), current->operand(0),
current->operand(1));
if (last->source_info().is_valid()) {
- current->source_info().Clone(last->source_info());
+ current->source_info_ptr()->Clone(last->source_info());
}
}
@@ -153,7 +153,7 @@ void TransformLdaZeroBinaryOpToBinaryOpWithZero(Bytecode new_bytecode,
current->set_bytecode(new_bytecode, 0, current->operand(0),
current->operand(1));
if (last->source_info().is_valid()) {
- current->source_info().Clone(last->source_info());
+ current->source_info_ptr()->Clone(last->source_info());
}
}
@@ -223,7 +223,7 @@ void BytecodePeepholeOptimizer::ElideLastAction(
// |node| can not have a valid source position if the source
// position of last() is valid (per rules in
// CanElideLastBasedOnSourcePosition()).
- node->source_info().Clone(last()->source_info());
+ node->source_info_ptr()->Clone(last()->source_info());
}
SetLast(node);
} else {
@@ -314,7 +314,7 @@ void BytecodePeepholeOptimizer::ElideLastBeforeJumpAction(
if (!CanElideLastBasedOnSourcePosition(node)) {
next_stage()->Write(last());
} else if (!node->source_info().is_valid()) {
- node->source_info().Clone(last()->source_info());
+ node->source_info_ptr()->Clone(last()->source_info());
}
InvalidateLast();
}
diff --git a/deps/v8/src/interpreter/bytecode-peephole-optimizer.h b/deps/v8/src/interpreter/bytecode-peephole-optimizer.h
index 2f4a35fd1b..cedd742f87 100644
--- a/deps/v8/src/interpreter/bytecode-peephole-optimizer.h
+++ b/deps/v8/src/interpreter/bytecode-peephole-optimizer.h
@@ -28,7 +28,7 @@ class BytecodePeepholeOptimizer final : public BytecodePipelineStage,
void BindLabel(BytecodeLabel* label) override;
void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
Handle<BytecodeArray> ToBytecodeArray(
- Isolate* isolate, int fixed_register_count, int parameter_count,
+ Isolate* isolate, int register_count, int parameter_count,
Handle<FixedArray> handler_table) override;
private:
diff --git a/deps/v8/src/interpreter/bytecode-pipeline.cc b/deps/v8/src/interpreter/bytecode-pipeline.cc
index 66b8bdf533..6e6a6b6fab 100644
--- a/deps/v8/src/interpreter/bytecode-pipeline.cc
+++ b/deps/v8/src/interpreter/bytecode-pipeline.cc
@@ -11,45 +11,6 @@ namespace v8 {
namespace internal {
namespace interpreter {
-BytecodeNode::BytecodeNode(Bytecode bytecode) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
- bytecode_ = bytecode;
-}
-
-BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
- bytecode_ = bytecode;
- operands_[0] = operand0;
-}
-
-BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0,
- uint32_t operand1) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 2);
- bytecode_ = bytecode;
- operands_[0] = operand0;
- operands_[1] = operand1;
-}
-
-BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0,
- uint32_t operand1, uint32_t operand2) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 3);
- bytecode_ = bytecode;
- operands_[0] = operand0;
- operands_[1] = operand1;
- operands_[2] = operand2;
-}
-
-BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0,
- uint32_t operand1, uint32_t operand2,
- uint32_t operand3) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 4);
- bytecode_ = bytecode;
- operands_[0] = operand0;
- operands_[1] = operand1;
- operands_[2] = operand2;
- operands_[3] = operand3;
-}
-
BytecodeNode::BytecodeNode(const BytecodeNode& other) {
memcpy(this, &other, sizeof(other));
}
@@ -83,23 +44,6 @@ void BytecodeNode::Print(std::ostream& os) const {
#endif // DEBUG
}
-void BytecodeNode::Transform(Bytecode new_bytecode, uint32_t extra_operand) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(new_bytecode),
- Bytecodes::NumberOfOperands(bytecode()) + 1);
- DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 1 ||
- Bytecodes::GetOperandType(new_bytecode, 0) ==
- Bytecodes::GetOperandType(bytecode(), 0));
- DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 2 ||
- Bytecodes::GetOperandType(new_bytecode, 1) ==
- Bytecodes::GetOperandType(bytecode(), 1));
- DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 3 ||
- Bytecodes::GetOperandType(new_bytecode, 2) ==
- Bytecodes::GetOperandType(bytecode(), 2));
- DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 4);
- operands_[operand_count()] = extra_operand;
- bytecode_ = new_bytecode;
-}
-
bool BytecodeNode::operator==(const BytecodeNode& other) const {
if (this == &other) {
return true;
diff --git a/deps/v8/src/interpreter/bytecode-pipeline.h b/deps/v8/src/interpreter/bytecode-pipeline.h
index 1668bab9c1..0b1a1f1bf3 100644
--- a/deps/v8/src/interpreter/bytecode-pipeline.h
+++ b/deps/v8/src/interpreter/bytecode-pipeline.h
@@ -9,7 +9,7 @@
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
#include "src/objects.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -47,7 +47,7 @@ class BytecodePipelineStage {
// Flush the pipeline and generate a bytecode array.
virtual Handle<BytecodeArray> ToBytecodeArray(
- Isolate* isolate, int fixed_register_count, int parameter_count,
+ Isolate* isolate, int register_count, int parameter_count,
Handle<FixedArray> handler_table) = 0;
};
@@ -134,21 +134,69 @@ class BytecodeSourceInfo final {
PositionType position_type_;
int source_position_;
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeSourceInfo);
};
// A container for a generated bytecode, it's operands, and source information.
// These must be allocated by a BytecodeNodeAllocator instance.
class BytecodeNode final : ZoneObject {
public:
- explicit BytecodeNode(Bytecode bytecode = Bytecode::kIllegal);
- BytecodeNode(Bytecode bytecode, uint32_t operand0);
- BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1);
- BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
- uint32_t operand2);
- BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
- uint32_t operand2, uint32_t operand3);
+ INLINE(BytecodeNode(const Bytecode bytecode,
+ BytecodeSourceInfo* source_info = nullptr))
+ : bytecode_(bytecode),
+ operand_count_(0),
+ operand_scale_(OperandScale::kSingle) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
+ AttachSourceInfo(source_info);
+ }
+
+ INLINE(BytecodeNode(const Bytecode bytecode, uint32_t operand0,
+ BytecodeSourceInfo* source_info = nullptr))
+ : bytecode_(bytecode),
+ operand_count_(1),
+ operand_scale_(OperandScale::kSingle) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
+ SetOperand(0, operand0);
+ AttachSourceInfo(source_info);
+ }
+
+ INLINE(BytecodeNode(const Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1,
+ BytecodeSourceInfo* source_info = nullptr))
+ : bytecode_(bytecode),
+ operand_count_(2),
+ operand_scale_(OperandScale::kSingle) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
+ SetOperand(0, operand0);
+ SetOperand(1, operand1);
+ AttachSourceInfo(source_info);
+ }
+
+ INLINE(BytecodeNode(const Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1, uint32_t operand2,
+ BytecodeSourceInfo* source_info = nullptr))
+ : bytecode_(bytecode),
+ operand_count_(3),
+ operand_scale_(OperandScale::kSingle) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
+ SetOperand(0, operand0);
+ SetOperand(1, operand1);
+ SetOperand(2, operand2);
+ AttachSourceInfo(source_info);
+ }
+
+ INLINE(BytecodeNode(const Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1, uint32_t operand2, uint32_t operand3,
+ BytecodeSourceInfo* source_info = nullptr))
+ : bytecode_(bytecode),
+ operand_count_(4),
+ operand_scale_(OperandScale::kSingle) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
+ SetOperand(0, operand0);
+ SetOperand(1, operand1);
+ SetOperand(2, operand2);
+ SetOperand(3, operand3);
+ AttachSourceInfo(source_info);
+ }
BytecodeNode(const BytecodeNode& other);
BytecodeNode& operator=(const BytecodeNode& other);
@@ -162,25 +210,33 @@ class BytecodeNode final : ZoneObject {
void set_bytecode(Bytecode bytecode) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
bytecode_ = bytecode;
+ operand_count_ = 0;
+ operand_scale_ = OperandScale::kSingle;
}
void set_bytecode(Bytecode bytecode, uint32_t operand0) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
bytecode_ = bytecode;
- operands_[0] = operand0;
+ operand_count_ = 1;
+ operand_scale_ = OperandScale::kSingle;
+ SetOperand(0, operand0);
}
void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 2);
bytecode_ = bytecode;
- operands_[0] = operand0;
- operands_[1] = operand1;
+ operand_count_ = 2;
+ operand_scale_ = OperandScale::kSingle;
+ SetOperand(0, operand0);
+ SetOperand(1, operand1);
}
void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
uint32_t operand2) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 3);
bytecode_ = bytecode;
- operands_[0] = operand0;
- operands_[1] = operand1;
- operands_[2] = operand2;
+ operand_count_ = 3;
+ operand_scale_ = OperandScale::kSingle;
+ SetOperand(0, operand0);
+ SetOperand(1, operand1);
+ SetOperand(2, operand2);
}
// Clone |other|.
@@ -191,7 +247,36 @@ class BytecodeNode final : ZoneObject {
// Transform to a node representing |new_bytecode| which has one
// operand more than the current bytecode.
- void Transform(Bytecode new_bytecode, uint32_t extra_operand);
+ void Transform(Bytecode new_bytecode, uint32_t extra_operand) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(new_bytecode),
+ Bytecodes::NumberOfOperands(bytecode()) + 1);
+ DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 1 ||
+ Bytecodes::GetOperandType(new_bytecode, 0) ==
+ Bytecodes::GetOperandType(bytecode(), 0));
+ DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 2 ||
+ Bytecodes::GetOperandType(new_bytecode, 1) ==
+ Bytecodes::GetOperandType(bytecode(), 1));
+ DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 3 ||
+ Bytecodes::GetOperandType(new_bytecode, 2) ==
+ Bytecodes::GetOperandType(bytecode(), 2));
+ DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 4);
+
+ bytecode_ = new_bytecode;
+ operand_count_++;
+ SetOperand(operand_count() - 1, extra_operand);
+ }
+
+ // Updates the operand at |operand_index| to |operand|.
+ void UpdateOperand(int operand_index, uint32_t operand) {
+ DCHECK_LE(operand_index, Bytecodes::NumberOfOperands(bytecode()));
+ operands_[operand_index] = operand;
+ if ((Bytecodes::OperandIsScalableSignedByte(bytecode(), operand_index) &&
+ Bytecodes::ScaleForSignedOperand(operand) != operand_scale_) ||
+ (Bytecodes::OperandIsScalableUnsignedByte(bytecode(), operand_index) &&
+ Bytecodes::ScaleForUnsignedOperand(operand) != operand_scale_)) {
+ UpdateScale();
+ }
+ }
Bytecode bytecode() const { return bytecode_; }
@@ -199,22 +284,60 @@ class BytecodeNode final : ZoneObject {
DCHECK_LT(i, operand_count());
return operands_[i];
}
- uint32_t* operands() { return operands_; }
const uint32_t* operands() const { return operands_; }
- int operand_count() const { return Bytecodes::NumberOfOperands(bytecode_); }
+ int operand_count() const { return operand_count_; }
+ OperandScale operand_scale() const { return operand_scale_; }
const BytecodeSourceInfo& source_info() const { return source_info_; }
- BytecodeSourceInfo& source_info() { return source_info_; }
+ BytecodeSourceInfo* source_info_ptr() { return &source_info_; }
bool operator==(const BytecodeNode& other) const;
bool operator!=(const BytecodeNode& other) const { return !(*this == other); }
private:
- static const int kInvalidPosition = kMinInt;
+ INLINE(void AttachSourceInfo(BytecodeSourceInfo* source_info)) {
+ if (source_info && source_info->is_valid()) {
+ // Statement positions need to be emitted immediately. Expression
+ // positions can be pushed back until a bytecode is found that can
+ // throw (if expression position filtering is turned on). We only
+ // invalidate the existing source position information if it is used.
+ if (source_info->is_statement() ||
+ !FLAG_ignition_filter_expression_positions ||
+ !Bytecodes::IsWithoutExternalSideEffects(bytecode())) {
+ source_info_.Clone(*source_info);
+ source_info->set_invalid();
+ }
+ }
+ }
+
+ INLINE(void UpdateScaleForOperand(int operand_index, uint32_t operand)) {
+ if (Bytecodes::OperandIsScalableSignedByte(bytecode(), operand_index)) {
+ operand_scale_ =
+ std::max(operand_scale_, Bytecodes::ScaleForSignedOperand(operand));
+ } else if (Bytecodes::OperandIsScalableUnsignedByte(bytecode(),
+ operand_index)) {
+ operand_scale_ =
+ std::max(operand_scale_, Bytecodes::ScaleForUnsignedOperand(operand));
+ }
+ }
+
+ INLINE(void SetOperand(int operand_index, uint32_t operand)) {
+ operands_[operand_index] = operand;
+ UpdateScaleForOperand(operand_index, operand);
+ }
+
+ void UpdateScale() {
+ operand_scale_ = OperandScale::kSingle;
+ for (int i = 0; i < operand_count(); i++) {
+ UpdateScaleForOperand(i, operands_[i]);
+ }
+ }
Bytecode bytecode_;
uint32_t operands_[Bytecodes::kMaxOperands];
+ int operand_count_;
+ OperandScale operand_scale_;
BytecodeSourceInfo source_info_;
};
diff --git a/deps/v8/src/interpreter/bytecode-register-allocator.cc b/deps/v8/src/interpreter/bytecode-register-allocator.cc
deleted file mode 100644
index 10afcdc76d..0000000000
--- a/deps/v8/src/interpreter/bytecode-register-allocator.cc
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/interpreter/bytecode-register-allocator.h"
-
-#include "src/interpreter/bytecode-array-builder.h"
-
-namespace v8 {
-namespace internal {
-namespace interpreter {
-
-TemporaryRegisterAllocator::TemporaryRegisterAllocator(Zone* zone,
- int allocation_base)
- : free_temporaries_(zone),
- allocation_base_(allocation_base),
- allocation_count_(0),
- observer_(nullptr) {}
-
-Register TemporaryRegisterAllocator::first_temporary_register() const {
- DCHECK(allocation_count() > 0);
- return Register(allocation_base());
-}
-
-Register TemporaryRegisterAllocator::last_temporary_register() const {
- DCHECK(allocation_count() > 0);
- return Register(allocation_base() + allocation_count() - 1);
-}
-
-void TemporaryRegisterAllocator::set_observer(
- TemporaryRegisterObserver* observer) {
- DCHECK(observer_ == nullptr);
- observer_ = observer;
-}
-
-int TemporaryRegisterAllocator::AllocateTemporaryRegister() {
- allocation_count_ += 1;
- return allocation_base() + allocation_count() - 1;
-}
-
-int TemporaryRegisterAllocator::BorrowTemporaryRegister() {
- if (free_temporaries_.empty()) {
- return AllocateTemporaryRegister();
- } else {
- auto pos = free_temporaries_.begin();
- int retval = *pos;
- free_temporaries_.erase(pos);
- return retval;
- }
-}
-
-int TemporaryRegisterAllocator::BorrowTemporaryRegisterNotInRange(
- int start_index, int end_index) {
- if (free_temporaries_.empty()) {
- int next_allocation = allocation_base() + allocation_count();
- while (next_allocation >= start_index && next_allocation <= end_index) {
- free_temporaries_.insert(AllocateTemporaryRegister());
- next_allocation += 1;
- }
- return AllocateTemporaryRegister();
- }
-
- ZoneSet<int>::iterator index = free_temporaries_.lower_bound(start_index);
- if (index == free_temporaries_.begin()) {
- // If start_index is the first free register, check for a register
- // greater than end_index.
- index = free_temporaries_.upper_bound(end_index);
- if (index == free_temporaries_.end()) {
- return AllocateTemporaryRegister();
- }
- } else {
- // If there is a free register < start_index
- index--;
- }
-
- int retval = *index;
- free_temporaries_.erase(index);
- return retval;
-}
-
-int TemporaryRegisterAllocator::PrepareForConsecutiveTemporaryRegisters(
- size_t count) {
- if (count == 0) {
- return -1;
- }
-
- // TODO(oth): replace use of set<> here for free_temporaries with a
- // more efficient structure. And/or partition into two searches -
- // one before the translation window and one after.
-
- // A run will require at least |count| free temporaries.
- while (free_temporaries_.size() < count) {
- free_temporaries_.insert(AllocateTemporaryRegister());
- }
-
- // Search within existing temporaries for a run.
- auto start = free_temporaries_.begin();
- size_t run_length = 0;
- for (auto run_end = start; run_end != free_temporaries_.end(); run_end++) {
- int expected = *start + static_cast<int>(run_length);
- if (*run_end != expected) {
- start = run_end;
- run_length = 0;
- }
- if (++run_length == count) {
- return *start;
- }
- }
-
- // Continue run if possible across existing last temporary.
- if (allocation_count_ > 0 && (start == free_temporaries_.end() ||
- *start + static_cast<int>(run_length) !=
- last_temporary_register().index() + 1)) {
- run_length = 0;
- }
-
- // Pad temporaries if extended run would cross translation boundary.
- Register reg_first(*start);
- Register reg_last(*start + static_cast<int>(count) - 1);
-
- // Ensure enough registers for run.
- while (run_length++ < count) {
- free_temporaries_.insert(AllocateTemporaryRegister());
- }
-
- int run_start =
- last_temporary_register().index() - static_cast<int>(count) + 1;
- return run_start;
-}
-
-bool TemporaryRegisterAllocator::RegisterIsLive(Register reg) const {
- if (allocation_count_ > 0) {
- DCHECK(reg >= first_temporary_register() &&
- reg <= last_temporary_register());
- return free_temporaries_.find(reg.index()) == free_temporaries_.end();
- } else {
- return false;
- }
-}
-
-void TemporaryRegisterAllocator::BorrowConsecutiveTemporaryRegister(
- int reg_index) {
- DCHECK(free_temporaries_.find(reg_index) != free_temporaries_.end());
- free_temporaries_.erase(reg_index);
-}
-
-void TemporaryRegisterAllocator::ReturnTemporaryRegister(int reg_index) {
- DCHECK(free_temporaries_.find(reg_index) == free_temporaries_.end());
- free_temporaries_.insert(reg_index);
- if (observer_) {
- observer_->TemporaryRegisterFreeEvent(Register(reg_index));
- }
-}
-
-BytecodeRegisterAllocator::BytecodeRegisterAllocator(
- Zone* zone, TemporaryRegisterAllocator* allocator)
- : base_allocator_(allocator),
- allocated_(zone),
- next_consecutive_register_(-1),
- next_consecutive_count_(-1) {}
-
-BytecodeRegisterAllocator::~BytecodeRegisterAllocator() {
- for (auto i = allocated_.rbegin(); i != allocated_.rend(); i++) {
- base_allocator()->ReturnTemporaryRegister(*i);
- }
- allocated_.clear();
-}
-
-Register BytecodeRegisterAllocator::NewRegister() {
- int allocated = -1;
- if (next_consecutive_count_ <= 0) {
- allocated = base_allocator()->BorrowTemporaryRegister();
- } else {
- allocated = base_allocator()->BorrowTemporaryRegisterNotInRange(
- next_consecutive_register_,
- next_consecutive_register_ + next_consecutive_count_ - 1);
- }
- allocated_.push_back(allocated);
- return Register(allocated);
-}
-
-bool BytecodeRegisterAllocator::RegisterIsAllocatedInThisScope(
- Register reg) const {
- for (auto i = allocated_.begin(); i != allocated_.end(); i++) {
- if (*i == reg.index()) return true;
- }
- return false;
-}
-
-void BytecodeRegisterAllocator::PrepareForConsecutiveAllocations(size_t count) {
- if (static_cast<int>(count) > next_consecutive_count_) {
- next_consecutive_register_ =
- base_allocator()->PrepareForConsecutiveTemporaryRegisters(count);
- next_consecutive_count_ = static_cast<int>(count);
- }
-}
-
-Register BytecodeRegisterAllocator::NextConsecutiveRegister() {
- DCHECK_GE(next_consecutive_register_, 0);
- DCHECK_GT(next_consecutive_count_, 0);
- base_allocator()->BorrowConsecutiveTemporaryRegister(
- next_consecutive_register_);
- allocated_.push_back(next_consecutive_register_);
- next_consecutive_count_--;
- return Register(next_consecutive_register_++);
-}
-
-} // namespace interpreter
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-register-allocator.h b/deps/v8/src/interpreter/bytecode-register-allocator.h
index b8f737be79..e9de4661d3 100644
--- a/deps/v8/src/interpreter/bytecode-register-allocator.h
+++ b/deps/v8/src/interpreter/bytecode-register-allocator.h
@@ -5,106 +5,76 @@
#ifndef V8_INTERPRETER_BYTECODE_REGISTER_ALLOCATOR_H_
#define V8_INTERPRETER_BYTECODE_REGISTER_ALLOCATOR_H_
+#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
namespace interpreter {
-class BytecodeArrayBuilder;
-class Register;
-class TemporaryRegisterObserver;
-
-class TemporaryRegisterAllocator final {
- public:
- TemporaryRegisterAllocator(Zone* zone, int start_index);
-
- // Borrow a temporary register.
- int BorrowTemporaryRegister();
-
- // Borrow a temporary register from the register range outside of
- // |start_index| to |end_index|.
- int BorrowTemporaryRegisterNotInRange(int start_index, int end_index);
-
- // Return a temporary register when no longer used.
- void ReturnTemporaryRegister(int reg_index);
-
- // Ensure a run of consecutive registers is available. Each register in
- // the range should be borrowed with BorrowConsecutiveTemporaryRegister().
- // Returns the start index of the run.
- int PrepareForConsecutiveTemporaryRegisters(size_t count);
-
- // Borrow a register from a range prepared with
- // PrepareForConsecutiveTemporaryRegisters().
- void BorrowConsecutiveTemporaryRegister(int reg_index);
-
- // Returns true if |reg| is a temporary register and is currently
- // borrowed.
- bool RegisterIsLive(Register reg) const;
-
- // Returns the first register in the range of temporary registers.
- Register first_temporary_register() const;
-
- // Returns the last register in the range of temporary registers.
- Register last_temporary_register() const;
-
- // Returns the start index of temporary register allocations.
- int allocation_base() const { return allocation_base_; }
-
- // Returns the number of temporary register allocations made.
- int allocation_count() const { return allocation_count_; }
-
- // Sets an observer for temporary register events.
- void set_observer(TemporaryRegisterObserver* observer);
-
- private:
- // Allocate a temporary register.
- int AllocateTemporaryRegister();
-
- ZoneSet<int> free_temporaries_;
- int allocation_base_;
- int allocation_count_;
- TemporaryRegisterObserver* observer_;
-
- DISALLOW_COPY_AND_ASSIGN(TemporaryRegisterAllocator);
-};
-
-class TemporaryRegisterObserver {
- public:
- virtual ~TemporaryRegisterObserver() {}
- virtual void TemporaryRegisterFreeEvent(Register reg) = 0;
-};
-
-// A class that allows the instantiator to allocate temporary registers that are
-// cleaned up when scope is closed.
+// A class that allows the allocation of contiguous temporary registers.
class BytecodeRegisterAllocator final {
public:
- explicit BytecodeRegisterAllocator(Zone* zone,
- TemporaryRegisterAllocator* allocator);
- ~BytecodeRegisterAllocator();
- Register NewRegister();
-
- // Ensure |count| consecutive allocations are available.
- void PrepareForConsecutiveAllocations(size_t count);
-
- // Get the next consecutive allocation after calling
- // PrepareForConsecutiveAllocations.
- Register NextConsecutiveRegister();
-
- // Returns true if |reg| is allocated in this allocator.
- bool RegisterIsAllocatedInThisScope(Register reg) const;
-
- // Returns true if unused consecutive allocations remain.
- bool HasConsecutiveAllocations() const { return next_consecutive_count_ > 0; }
+ // Enables observation of register allocation and free events.
+ class Observer {
+ public:
+ virtual ~Observer() {}
+ virtual void RegisterAllocateEvent(Register reg) = 0;
+ virtual void RegisterListAllocateEvent(RegisterList reg_list) = 0;
+ virtual void RegisterListFreeEvent(RegisterList reg_list) = 0;
+ };
+
+ explicit BytecodeRegisterAllocator(int start_index)
+ : next_register_index_(start_index),
+ max_register_count_(start_index),
+ observer_(nullptr) {}
+ ~BytecodeRegisterAllocator() {}
+
+ // Returns a new register.
+ Register NewRegister() {
+ Register reg(next_register_index_++);
+ max_register_count_ = std::max(next_register_index_, max_register_count_);
+ if (observer_) {
+ observer_->RegisterAllocateEvent(reg);
+ }
+ return reg;
+ }
+
+ // Returns a consecutive list of |count| new registers.
+ RegisterList NewRegisterList(int count) {
+ RegisterList reg_list(next_register_index_, count);
+ next_register_index_ += count;
+ max_register_count_ = std::max(next_register_index_, max_register_count_);
+ if (observer_) {
+ observer_->RegisterListAllocateEvent(reg_list);
+ }
+ return reg_list;
+ }
+
+ // Release all registers above |register_index|.
+ void ReleaseRegisters(int register_index) {
+ if (observer_) {
+ observer_->RegisterListFreeEvent(
+ RegisterList(register_index, next_register_index_ - register_index));
+ }
+ next_register_index_ = register_index;
+ }
+
+ // Returns true if the register |reg| is a live register.
+ bool RegisterIsLive(Register reg) const {
+ return reg.index() < next_register_index_;
+ }
+
+ void set_observer(Observer* observer) { observer_ = observer; }
+
+ int next_register_index() const { return next_register_index_; }
+ int maximum_register_count() const { return max_register_count_; }
private:
- TemporaryRegisterAllocator* base_allocator() const { return base_allocator_; }
-
- TemporaryRegisterAllocator* base_allocator_;
- ZoneVector<int> allocated_;
- int next_consecutive_register_;
- int next_consecutive_count_;
+ int next_register_index_;
+ int max_register_count_;
+ Observer* observer_;
DISALLOW_COPY_AND_ASSIGN(BytecodeRegisterAllocator);
};
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.cc b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
index d28f215de8..acbe0ba5a1 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.cc
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
@@ -15,10 +15,12 @@ const uint32_t BytecodeRegisterOptimizer::kInvalidEquivalenceId;
// register is materialized in the bytecode stream.
class BytecodeRegisterOptimizer::RegisterInfo final : public ZoneObject {
public:
- RegisterInfo(Register reg, uint32_t equivalence_id, bool materialized)
+ RegisterInfo(Register reg, uint32_t equivalence_id, bool materialized,
+ bool allocated)
: register_(reg),
equivalence_id_(equivalence_id),
materialized_(materialized),
+ allocated_(allocated),
next_(this),
prev_(this) {}
@@ -48,12 +50,17 @@ class BytecodeRegisterOptimizer::RegisterInfo final : public ZoneObject {
// exists.
RegisterInfo* GetEquivalentToMaterialize();
+ // Marks all temporary registers of the equivalence set as unmaterialized.
+ void MarkTemporariesAsUnmaterialized(Register temporary_base);
+
// Get an equivalent register. Returns this if none exists.
RegisterInfo* GetEquivalent();
Register register_value() const { return register_; }
bool materialized() const { return materialized_; }
void set_materialized(bool materialized) { materialized_ = materialized; }
+ bool allocated() const { return allocated_; }
+ void set_allocated(bool allocated) { allocated_ = allocated; }
void set_equivalence_id(uint32_t equivalence_id) {
equivalence_id_ = equivalence_id;
}
@@ -63,6 +70,7 @@ class BytecodeRegisterOptimizer::RegisterInfo final : public ZoneObject {
Register register_;
uint32_t equivalence_id_;
bool materialized_;
+ bool allocated_;
// Equivalence set pointers.
RegisterInfo* next_;
@@ -155,8 +163,9 @@ BytecodeRegisterOptimizer::RegisterInfo::GetEquivalentToMaterialize() {
if (visitor->materialized()) {
return nullptr;
}
- if (best_info == nullptr ||
- visitor->register_value() < best_info->register_value()) {
+ if (visitor->allocated() &&
+ (best_info == nullptr ||
+ visitor->register_value() < best_info->register_value())) {
best_info = visitor;
}
visitor = visitor->next_;
@@ -164,16 +173,31 @@ BytecodeRegisterOptimizer::RegisterInfo::GetEquivalentToMaterialize() {
return best_info;
}
+void BytecodeRegisterOptimizer::RegisterInfo::MarkTemporariesAsUnmaterialized(
+ Register temporary_base) {
+ DCHECK(this->register_value() < temporary_base);
+ DCHECK(this->materialized());
+ RegisterInfo* visitor = this->next_;
+ while (visitor != this) {
+ if (visitor->register_value() >= temporary_base) {
+ visitor->set_materialized(false);
+ }
+ visitor = visitor->next_;
+ }
+}
+
BytecodeRegisterOptimizer::RegisterInfo*
BytecodeRegisterOptimizer::RegisterInfo::GetEquivalent() {
return next_;
}
BytecodeRegisterOptimizer::BytecodeRegisterOptimizer(
- Zone* zone, TemporaryRegisterAllocator* register_allocator,
- int parameter_count, BytecodePipelineStage* next_stage)
+ Zone* zone, BytecodeRegisterAllocator* register_allocator,
+ int fixed_registers_count, int parameter_count,
+ BytecodePipelineStage* next_stage)
: accumulator_(Register::virtual_accumulator()),
- temporary_base_(register_allocator->allocation_base()),
+ temporary_base_(fixed_registers_count),
+ max_register_index_(fixed_registers_count - 1),
register_info_table_(zone),
equivalence_id_(0),
next_stage_(next_stage),
@@ -198,7 +222,7 @@ BytecodeRegisterOptimizer::BytecodeRegisterOptimizer(
static_cast<size_t>(temporary_base_.index()));
for (size_t i = 0; i < register_info_table_.size(); ++i) {
register_info_table_[i] = new (zone) RegisterInfo(
- RegisterFromRegisterInfoTableIndex(i), NextEquivalenceId(), true);
+ RegisterFromRegisterInfoTableIndex(i), NextEquivalenceId(), true, true);
DCHECK_EQ(register_info_table_[i]->register_value().index(),
RegisterFromRegisterInfoTableIndex(i).index());
}
@@ -208,15 +232,17 @@ BytecodeRegisterOptimizer::BytecodeRegisterOptimizer(
// override
Handle<BytecodeArray> BytecodeRegisterOptimizer::ToBytecodeArray(
- Isolate* isolate, int fixed_register_count, int parameter_count,
+ Isolate* isolate, int register_count, int parameter_count,
Handle<FixedArray> handler_table) {
FlushState();
- return next_stage_->ToBytecodeArray(isolate, fixed_register_count,
+ return next_stage_->ToBytecodeArray(isolate, max_register_index_ + 1,
parameter_count, handler_table);
}
// override
void BytecodeRegisterOptimizer::Write(BytecodeNode* node) {
+ // Jumps are handled by WriteJump.
+ DCHECK(!Bytecodes::IsJump(node->bytecode()));
//
// Transfers with observable registers as the destination will be
// immediately materialized so the source position information will
@@ -245,18 +271,16 @@ void BytecodeRegisterOptimizer::Write(BytecodeNode* node) {
break;
}
- if (Bytecodes::IsJump(node->bytecode()) ||
- node->bytecode() == Bytecode::kDebugger ||
+ if (node->bytecode() == Bytecode::kDebugger ||
node->bytecode() == Bytecode::kSuspendGenerator) {
// All state must be flushed before emitting
- // - a jump (due to how bytecode offsets for jumps are evaluated),
// - a call to the debugger (as it can manipulate locals and parameters),
// - a generator suspend (as this involves saving all registers).
FlushState();
}
PrepareOperands(node);
- WriteToNextStage(node);
+ next_stage_->Write(node);
}
// override
@@ -295,7 +319,7 @@ void BytecodeRegisterOptimizer::FlushState() {
// own equivalence set.
RegisterInfo* equivalent;
while ((equivalent = reg_info->GetEquivalent()) != reg_info) {
- if (!equivalent->materialized()) {
+ if (equivalent->allocated() && !equivalent->materialized()) {
OutputRegisterTransfer(reg_info, equivalent);
}
equivalent->MoveToNewEquivalenceSet(NextEquivalenceId(), true);
@@ -306,38 +330,29 @@ void BytecodeRegisterOptimizer::FlushState() {
flush_required_ = false;
}
-void BytecodeRegisterOptimizer::WriteToNextStage(BytecodeNode* node) const {
- next_stage_->Write(node);
-}
-
-void BytecodeRegisterOptimizer::WriteToNextStage(
- BytecodeNode* node, const BytecodeSourceInfo& source_info) const {
- if (source_info.is_valid()) {
- node->source_info().Clone(source_info);
- }
- next_stage_->Write(node);
-}
-
void BytecodeRegisterOptimizer::OutputRegisterTransfer(
RegisterInfo* input_info, RegisterInfo* output_info,
- const BytecodeSourceInfo& source_info) {
+ BytecodeSourceInfo* source_info) {
Register input = input_info->register_value();
Register output = output_info->register_value();
DCHECK_NE(input.index(), output.index());
if (input == accumulator_) {
uint32_t operand = static_cast<uint32_t>(output.ToOperand());
- BytecodeNode node(Bytecode::kStar, operand);
- WriteToNextStage(&node, source_info);
+ BytecodeNode node(Bytecode::kStar, operand, source_info);
+ next_stage_->Write(&node);
} else if (output == accumulator_) {
uint32_t operand = static_cast<uint32_t>(input.ToOperand());
- BytecodeNode node(Bytecode::kLdar, operand);
- WriteToNextStage(&node, source_info);
+ BytecodeNode node(Bytecode::kLdar, operand, source_info);
+ next_stage_->Write(&node);
} else {
uint32_t operand0 = static_cast<uint32_t>(input.ToOperand());
uint32_t operand1 = static_cast<uint32_t>(output.ToOperand());
- BytecodeNode node(Bytecode::kMov, operand0, operand1);
- WriteToNextStage(&node, source_info);
+ BytecodeNode node(Bytecode::kMov, operand0, operand1, source_info);
+ next_stage_->Write(&node);
+ }
+ if (output != accumulator_) {
+ max_register_index_ = std::max(max_register_index_, output.index());
}
output_info->set_materialized(true);
}
@@ -389,7 +404,7 @@ void BytecodeRegisterOptimizer::AddToEquivalenceSet(
void BytecodeRegisterOptimizer::RegisterTransfer(
RegisterInfo* input_info, RegisterInfo* output_info,
- const BytecodeSourceInfo& source_info) {
+ BytecodeSourceInfo* source_info) {
// Materialize an alternate in the equivalence set that
// |output_info| is leaving.
if (output_info->materialized()) {
@@ -408,42 +423,48 @@ void BytecodeRegisterOptimizer::RegisterTransfer(
output_info->set_materialized(false);
RegisterInfo* materialized_info = input_info->GetMaterializedEquivalent();
OutputRegisterTransfer(materialized_info, output_info, source_info);
- } else if (source_info.is_valid()) {
+ } else if (source_info->is_valid()) {
// Emit a placeholder nop to maintain source position info.
EmitNopForSourceInfo(source_info);
}
+
+ bool input_is_observable = RegisterIsObservable(input_info->register_value());
+ if (input_is_observable) {
+ // If input is observable by the debugger, mark all other temporaries
+ // registers as unmaterialized so that this register is used in preference.
+ input_info->MarkTemporariesAsUnmaterialized(temporary_base_);
+ }
}
void BytecodeRegisterOptimizer::EmitNopForSourceInfo(
- const BytecodeSourceInfo& source_info) const {
- DCHECK(source_info.is_valid());
- BytecodeNode nop(Bytecode::kNop);
- nop.source_info().Clone(source_info);
- WriteToNextStage(&nop);
+ BytecodeSourceInfo* source_info) const {
+ DCHECK(source_info->is_valid());
+ BytecodeNode nop(Bytecode::kNop, source_info);
+ next_stage_->Write(&nop);
}
-void BytecodeRegisterOptimizer::DoLdar(const BytecodeNode* const node) {
+void BytecodeRegisterOptimizer::DoLdar(BytecodeNode* node) {
Register input = GetRegisterInputOperand(
0, node->bytecode(), node->operands(), node->operand_count());
RegisterInfo* input_info = GetRegisterInfo(input);
- RegisterTransfer(input_info, accumulator_info_, node->source_info());
+ RegisterTransfer(input_info, accumulator_info_, node->source_info_ptr());
}
-void BytecodeRegisterOptimizer::DoMov(const BytecodeNode* const node) {
+void BytecodeRegisterOptimizer::DoMov(BytecodeNode* node) {
Register input = GetRegisterInputOperand(
0, node->bytecode(), node->operands(), node->operand_count());
RegisterInfo* input_info = GetRegisterInfo(input);
Register output = GetRegisterOutputOperand(
1, node->bytecode(), node->operands(), node->operand_count());
- RegisterInfo* output_info = GetOrCreateRegisterInfo(output);
- RegisterTransfer(input_info, output_info, node->source_info());
+ RegisterInfo* output_info = GetRegisterInfo(output);
+ RegisterTransfer(input_info, output_info, node->source_info_ptr());
}
-void BytecodeRegisterOptimizer::DoStar(const BytecodeNode* const node) {
+void BytecodeRegisterOptimizer::DoStar(BytecodeNode* node) {
Register output = GetRegisterOutputOperand(
0, node->bytecode(), node->operands(), node->operand_count());
- RegisterInfo* output_info = GetOrCreateRegisterInfo(output);
- RegisterTransfer(accumulator_info_, output_info, node->source_info());
+ RegisterInfo* output_info = GetRegisterInfo(output);
+ RegisterTransfer(accumulator_info_, output_info, node->source_info_ptr());
}
void BytecodeRegisterOptimizer::PrepareRegisterOutputOperand(
@@ -451,6 +472,8 @@ void BytecodeRegisterOptimizer::PrepareRegisterOutputOperand(
if (reg_info->materialized()) {
CreateMaterializedEquivalent(reg_info);
}
+ max_register_index_ =
+ std::max(max_register_index_, reg_info->register_value().index());
reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), true);
}
@@ -458,7 +481,7 @@ void BytecodeRegisterOptimizer::PrepareRegisterRangeOutputOperand(
Register start, int count) {
for (int i = 0; i < count; ++i) {
Register reg(start.index() + i);
- RegisterInfo* reg_info = GetOrCreateRegisterInfo(reg);
+ RegisterInfo* reg_info = GetRegisterInfo(reg);
PrepareRegisterOutputOperand(reg_info);
}
}
@@ -468,7 +491,7 @@ Register BytecodeRegisterOptimizer::GetEquivalentRegisterForInputOperand(
// For a temporary register, RegInfo state may need be created. For
// locals and parameters, the RegInfo state is created in the
// BytecodeRegisterOptimizer constructor.
- RegisterInfo* reg_info = GetOrCreateRegisterInfo(reg);
+ RegisterInfo* reg_info = GetRegisterInfo(reg);
if (reg_info->materialized()) {
return reg;
} else {
@@ -481,8 +504,8 @@ Register BytecodeRegisterOptimizer::GetEquivalentRegisterForInputOperand(
void BytecodeRegisterOptimizer::PrepareRegisterInputOperand(
BytecodeNode* const node, Register reg, int operand_index) {
Register equivalent = GetEquivalentRegisterForInputOperand(reg);
- node->operands()[operand_index] =
- static_cast<uint32_t>(equivalent.ToOperand());
+ node->UpdateOperand(operand_index,
+ static_cast<uint32_t>(equivalent.ToOperand()));
}
void BytecodeRegisterOptimizer::PrepareRegisterRangeInputOperand(Register start,
@@ -510,9 +533,9 @@ void BytecodeRegisterOptimizer::PrepareRegisterOperands(
Bytecodes::GetOperandTypes(node->bytecode());
for (int i = 0; i < operand_count; ++i) {
int count;
- // operand_types is terminated by OperandType::kNone so this does not
- // go out of bounds.
- if (operand_types[i + 1] == OperandType::kRegCount) {
+ if (operand_types[i] == OperandType::kRegList) {
+ DCHECK_LT(i, operand_count - 1);
+ DCHECK(operand_types[i + 1] == OperandType::kRegCount);
count = static_cast<int>(operands[i + 1]);
} else {
count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_types[i]);
@@ -577,8 +600,8 @@ Register BytecodeRegisterOptimizer::GetRegisterOutputOperand(
BytecodeRegisterOptimizer::RegisterInfo*
BytecodeRegisterOptimizer::GetRegisterInfo(Register reg) {
size_t index = GetRegisterInfoTableIndex(reg);
- return (index < register_info_table_.size()) ? register_info_table_[index]
- : nullptr;
+ DCHECK_LT(index, register_info_table_.size());
+ return register_info_table_[index];
}
BytecodeRegisterOptimizer::RegisterInfo*
@@ -599,26 +622,37 @@ BytecodeRegisterOptimizer::NewRegisterInfo(Register reg) {
void BytecodeRegisterOptimizer::GrowRegisterMap(Register reg) {
DCHECK(RegisterIsTemporary(reg));
size_t index = GetRegisterInfoTableIndex(reg);
- DCHECK_GE(index, register_info_table_.size());
- size_t new_size = index + 1;
- size_t old_size = register_info_table_.size();
- register_info_table_.resize(new_size);
- for (size_t i = old_size; i < new_size; ++i) {
- register_info_table_[i] = new (zone()) RegisterInfo(
- RegisterFromRegisterInfoTableIndex(i), NextEquivalenceId(), false);
+ if (index >= register_info_table_.size()) {
+ size_t new_size = index + 1;
+ size_t old_size = register_info_table_.size();
+ register_info_table_.resize(new_size);
+ for (size_t i = old_size; i < new_size; ++i) {
+ register_info_table_[i] =
+ new (zone()) RegisterInfo(RegisterFromRegisterInfoTableIndex(i),
+ NextEquivalenceId(), false, false);
+ }
}
}
-void BytecodeRegisterOptimizer::TemporaryRegisterFreeEvent(Register reg) {
- RegisterInfo* info = GetRegisterInfo(reg);
- if (info != nullptr) {
- // If register is materialized and part of equivalence set, make
- // sure another member of the set holds the value before the
- // temporary register is removed.
- if (info->materialized()) {
- CreateMaterializedEquivalent(info);
+void BytecodeRegisterOptimizer::RegisterAllocateEvent(Register reg) {
+ GetOrCreateRegisterInfo(reg)->set_allocated(true);
+}
+
+void BytecodeRegisterOptimizer::RegisterListAllocateEvent(
+ RegisterList reg_list) {
+ if (reg_list.register_count() != 0) {
+ int first_index = reg_list.first_register().index();
+ GrowRegisterMap(Register(first_index + reg_list.register_count() - 1));
+ for (int i = 0; i < reg_list.register_count(); i++) {
+ GetRegisterInfo(Register(first_index + i))->set_allocated(true);
}
- info->MoveToNewEquivalenceSet(kInvalidEquivalenceId, false);
+ }
+}
+
+void BytecodeRegisterOptimizer::RegisterListFreeEvent(RegisterList reg_list) {
+ int first_index = reg_list.first_register().index();
+ for (int i = 0; i < reg_list.register_count(); i++) {
+ GetRegisterInfo(Register(first_index + i))->set_allocated(false);
}
}
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.h b/deps/v8/src/interpreter/bytecode-register-optimizer.h
index fb087b527a..eda22e5f4d 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.h
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.h
@@ -15,13 +15,14 @@ namespace interpreter {
// registers. The bytecode generator uses temporary registers
// liberally for correctness and convenience and this stage removes
// transfers that are not required and preserves correctness.
-class BytecodeRegisterOptimizer final : public BytecodePipelineStage,
- public TemporaryRegisterObserver,
- public ZoneObject {
+class BytecodeRegisterOptimizer final
+ : public BytecodePipelineStage,
+ public BytecodeRegisterAllocator::Observer,
+ public ZoneObject {
public:
BytecodeRegisterOptimizer(Zone* zone,
- TemporaryRegisterAllocator* register_allocator,
- int parameter_count,
+ BytecodeRegisterAllocator* register_allocator,
+ int fixed_registers_count, int parameter_count,
BytecodePipelineStage* next_stage);
virtual ~BytecodeRegisterOptimizer() {}
@@ -31,7 +32,7 @@ class BytecodeRegisterOptimizer final : public BytecodePipelineStage,
void BindLabel(BytecodeLabel* label) override;
void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
Handle<BytecodeArray> ToBytecodeArray(
- Isolate* isolate, int fixed_register_count, int parameter_count,
+ Isolate* isolate, int register_count, int parameter_count,
Handle<FixedArray> handler_table) override;
private:
@@ -39,34 +40,32 @@ class BytecodeRegisterOptimizer final : public BytecodePipelineStage,
class RegisterInfo;
- // TemporaryRegisterObserver interface.
- void TemporaryRegisterFreeEvent(Register reg) override;
+ // BytecodeRegisterAllocator::Observer interface.
+ void RegisterAllocateEvent(Register reg) override;
+ void RegisterListAllocateEvent(RegisterList reg_list) override;
+ void RegisterListFreeEvent(RegisterList reg) override;
// Helpers for BytecodePipelineStage interface.
void FlushState();
- void WriteToNextStage(BytecodeNode* node) const;
- void WriteToNextStage(BytecodeNode* node,
- const BytecodeSourceInfo& output_info) const;
// Update internal state for register transfer from |input| to
// |output| using |source_info| as source position information if
// any bytecodes are emitted due to transfer.
void RegisterTransfer(RegisterInfo* input, RegisterInfo* output,
- const BytecodeSourceInfo& source_info);
+ BytecodeSourceInfo* source_info);
// Emit a register transfer bytecode from |input| to |output|.
- void OutputRegisterTransfer(
- RegisterInfo* input, RegisterInfo* output,
- const BytecodeSourceInfo& source_info = BytecodeSourceInfo());
+ void OutputRegisterTransfer(RegisterInfo* input, RegisterInfo* output,
+ BytecodeSourceInfo* source_info = nullptr);
// Emits a Nop to preserve source position information in the
// bytecode pipeline.
- void EmitNopForSourceInfo(const BytecodeSourceInfo& source_info) const;
+ void EmitNopForSourceInfo(BytecodeSourceInfo* source_info) const;
// Handlers for bytecode nodes for register to register transfers.
- void DoLdar(const BytecodeNode* const node);
- void DoMov(const BytecodeNode* const node);
- void DoStar(const BytecodeNode* const node);
+ void DoLdar(BytecodeNode* node);
+ void DoMov(BytecodeNode* node);
+ void DoStar(BytecodeNode* node);
// Operand processing methods for bytecodes other than those
// performing register to register transfers.
@@ -133,6 +132,7 @@ class BytecodeRegisterOptimizer final : public BytecodePipelineStage,
const Register accumulator_;
RegisterInfo* accumulator_info_;
const Register temporary_base_;
+ int max_register_index_;
// Direct mapping to register info.
ZoneVector<RegisterInfo*> register_info_table_;
diff --git a/deps/v8/src/interpreter/bytecode-register.cc b/deps/v8/src/interpreter/bytecode-register.cc
index 31e3b90852..1ce512b0e0 100644
--- a/deps/v8/src/interpreter/bytecode-register.cc
+++ b/deps/v8/src/interpreter/bytecode-register.cc
@@ -121,7 +121,7 @@ bool Register::AreContiguous(Register reg1, Register reg2, Register reg3,
return true;
}
-std::string Register::ToString(int parameter_count) {
+std::string Register::ToString(int parameter_count) const {
if (is_current_context()) {
return std::string("<context>");
} else if (is_function_closure()) {
diff --git a/deps/v8/src/interpreter/bytecode-register.h b/deps/v8/src/interpreter/bytecode-register.h
index b698da6a74..d698d4049d 100644
--- a/deps/v8/src/interpreter/bytecode-register.h
+++ b/deps/v8/src/interpreter/bytecode-register.h
@@ -66,7 +66,7 @@ class Register final {
Register reg4 = Register(),
Register reg5 = Register());
- std::string ToString(int parameter_count);
+ std::string ToString(int parameter_count) const;
bool operator==(const Register& other) const {
return index() == other.index();
@@ -98,6 +98,40 @@ class Register final {
int index_;
};
+class RegisterList {
+ public:
+ RegisterList() : first_reg_index_(Register().index()), register_count_(0) {}
+ RegisterList(int first_reg_index, int register_count)
+ : first_reg_index_(first_reg_index), register_count_(register_count) {}
+
+ // Returns a new RegisterList which is a truncated version of this list, with
+ // |count| registers.
+ const RegisterList Truncate(int new_count) {
+ DCHECK_GE(new_count, 0);
+ DCHECK_LT(new_count, register_count_);
+ return RegisterList(first_reg_index_, new_count);
+ }
+
+ const Register operator[](size_t i) const {
+ DCHECK_LT(static_cast<int>(i), register_count_);
+ return Register(first_reg_index_ + static_cast<int>(i));
+ }
+
+ const Register first_register() const {
+ return (register_count() == 0) ? Register(0) : (*this)[0];
+ }
+
+ const Register last_register() const {
+ return (register_count() == 0) ? Register(0) : (*this)[register_count_ - 1];
+ }
+
+ int register_count() const { return register_count_; }
+
+ private:
+ int first_reg_index_;
+ int register_count_;
+};
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-traits.h b/deps/v8/src/interpreter/bytecode-traits.h
index 672a687faf..f71598cbef 100644
--- a/deps/v8/src/interpreter/bytecode-traits.h
+++ b/deps/v8/src/interpreter/bytecode-traits.h
@@ -5,7 +5,7 @@
#ifndef V8_INTERPRETER_BYTECODE_TRAITS_H_
#define V8_INTERPRETER_BYTECODE_TRAITS_H_
-#include "src/interpreter/bytecodes.h"
+#include "src/interpreter/bytecode-operands.h"
namespace v8 {
namespace internal {
@@ -65,208 +65,88 @@ struct OperandScaler {
static const OperandSize kOperandSize = static_cast<OperandSize>(kSize);
};
-template <OperandType>
-struct RegisterOperandTraits {
- static const int kIsRegisterOperand = 0;
+template <int... values>
+struct SumHelper;
+template <int value>
+struct SumHelper<value> {
+ static const int kValue = value;
};
-
-#define DECLARE_REGISTER_OPERAND(Name, _) \
- template <> \
- struct RegisterOperandTraits<OperandType::k##Name> { \
- static const int kIsRegisterOperand = 1; \
- };
-REGISTER_OPERAND_TYPE_LIST(DECLARE_REGISTER_OPERAND)
-#undef DECLARE_REGISTER_OPERAND
-
-template <AccumulatorUse, OperandType...>
-struct BytecodeTraits {};
-
-template <AccumulatorUse accumulator_use, OperandType operand_0,
- OperandType operand_1, OperandType operand_2, OperandType operand_3>
-struct BytecodeTraits<accumulator_use, operand_0, operand_1, operand_2,
- operand_3> {
- static const OperandType* GetOperandTypes() {
- static const OperandType operand_types[] = {operand_0, operand_1, operand_2,
- operand_3, OperandType::kNone};
- return operand_types;
- }
-
- static const OperandTypeInfo* GetOperandTypeInfos() {
- static const OperandTypeInfo operand_type_infos[] = {
- OperandTraits<operand_0>::kOperandTypeInfo,
- OperandTraits<operand_1>::kOperandTypeInfo,
- OperandTraits<operand_2>::kOperandTypeInfo,
- OperandTraits<operand_3>::kOperandTypeInfo, OperandTypeInfo::kNone};
- return operand_type_infos;
- }
-
- template <OperandType ot>
- static inline bool HasAnyOperandsOfType() {
- return operand_0 == ot || operand_1 == ot || operand_2 == ot ||
- operand_3 == ot;
- }
-
- static inline bool IsScalable() {
- return (OperandTraits<operand_0>::TypeInfoTraits::kIsScalable |
- OperandTraits<operand_1>::TypeInfoTraits::kIsScalable |
- OperandTraits<operand_2>::TypeInfoTraits::kIsScalable |
- OperandTraits<operand_3>::TypeInfoTraits::kIsScalable);
- }
-
- static const AccumulatorUse kAccumulatorUse = accumulator_use;
- static const int kOperandCount = 4;
- static const int kRegisterOperandCount =
- RegisterOperandTraits<operand_0>::kIsRegisterOperand +
- RegisterOperandTraits<operand_1>::kIsRegisterOperand +
- RegisterOperandTraits<operand_2>::kIsRegisterOperand +
- RegisterOperandTraits<operand_3>::kIsRegisterOperand;
+template <int value, int... values>
+struct SumHelper<value, values...> {
+ static const int kValue = value + SumHelper<values...>::kValue;
};
-template <AccumulatorUse accumulator_use, OperandType operand_0,
- OperandType operand_1, OperandType operand_2>
-struct BytecodeTraits<accumulator_use, operand_0, operand_1, operand_2> {
- static const OperandType* GetOperandTypes() {
- static const OperandType operand_types[] = {operand_0, operand_1, operand_2,
- OperandType::kNone};
- return operand_types;
- }
-
- static const OperandTypeInfo* GetOperandTypeInfos() {
- static const OperandTypeInfo operand_type_infos[] = {
- OperandTraits<operand_0>::kOperandTypeInfo,
- OperandTraits<operand_1>::kOperandTypeInfo,
- OperandTraits<operand_2>::kOperandTypeInfo, OperandTypeInfo::kNone};
- return operand_type_infos;
- }
-
- template <OperandType ot>
- static inline bool HasAnyOperandsOfType() {
- return operand_0 == ot || operand_1 == ot || operand_2 == ot;
- }
-
- static inline bool IsScalable() {
- return (OperandTraits<operand_0>::TypeInfoTraits::kIsScalable |
- OperandTraits<operand_1>::TypeInfoTraits::kIsScalable |
- OperandTraits<operand_2>::TypeInfoTraits::kIsScalable);
- }
-
- static const AccumulatorUse kAccumulatorUse = accumulator_use;
- static const int kOperandCount = 3;
- static const int kRegisterOperandCount =
- RegisterOperandTraits<operand_0>::kIsRegisterOperand +
- RegisterOperandTraits<operand_1>::kIsRegisterOperand +
- RegisterOperandTraits<operand_2>::kIsRegisterOperand;
-};
-
-template <AccumulatorUse accumulator_use, OperandType operand_0,
- OperandType operand_1>
-struct BytecodeTraits<accumulator_use, operand_0, operand_1> {
- static const OperandType* GetOperandTypes() {
- static const OperandType operand_types[] = {operand_0, operand_1,
- OperandType::kNone};
- return operand_types;
- }
-
- static const OperandTypeInfo* GetOperandTypeInfos() {
- static const OperandTypeInfo operand_type_infos[] = {
- OperandTraits<operand_0>::kOperandTypeInfo,
- OperandTraits<operand_1>::kOperandTypeInfo, OperandTypeInfo::kNone};
- return operand_type_infos;
- }
-
- template <OperandType ot>
- static inline bool HasAnyOperandsOfType() {
- return operand_0 == ot || operand_1 == ot;
- }
-
- static inline bool IsScalable() {
- return (OperandTraits<operand_0>::TypeInfoTraits::kIsScalable |
- OperandTraits<operand_1>::TypeInfoTraits::kIsScalable);
- }
-
+template <AccumulatorUse accumulator_use, OperandType... operands>
+struct BytecodeTraits {
+ static const OperandType kOperandTypes[];
+ static const OperandTypeInfo kOperandTypeInfos[];
+ static const OperandSize kSingleScaleOperandSizes[];
+ static const OperandSize kDoubleScaleOperandSizes[];
+ static const OperandSize kQuadrupleScaleOperandSizes[];
+ static const int kSingleScaleSize = SumHelper<
+ 1, OperandScaler<operands, OperandScale::kSingle>::kSize...>::kValue;
+ static const int kDoubleScaleSize = SumHelper<
+ 1, OperandScaler<operands, OperandScale::kDouble>::kSize...>::kValue;
+ static const int kQuadrupleScaleSize = SumHelper<
+ 1, OperandScaler<operands, OperandScale::kQuadruple>::kSize...>::kValue;
static const AccumulatorUse kAccumulatorUse = accumulator_use;
- static const int kOperandCount = 2;
- static const int kRegisterOperandCount =
- RegisterOperandTraits<operand_0>::kIsRegisterOperand +
- RegisterOperandTraits<operand_1>::kIsRegisterOperand;
+ static const int kOperandCount = sizeof...(operands);
};
-template <AccumulatorUse accumulator_use, OperandType operand_0>
-struct BytecodeTraits<accumulator_use, operand_0> {
- static const OperandType* GetOperandTypes() {
- static const OperandType operand_types[] = {operand_0, OperandType::kNone};
- return operand_types;
- }
-
- static const OperandTypeInfo* GetOperandTypeInfos() {
- static const OperandTypeInfo operand_type_infos[] = {
- OperandTraits<operand_0>::kOperandTypeInfo, OperandTypeInfo::kNone};
- return operand_type_infos;
- }
-
- template <OperandType ot>
- static inline bool HasAnyOperandsOfType() {
- return operand_0 == ot;
- }
-
- static inline bool IsScalable() {
- return OperandTraits<operand_0>::TypeInfoTraits::kIsScalable;
- }
-
- static const AccumulatorUse kAccumulatorUse = accumulator_use;
- static const int kOperandCount = 1;
- static const int kRegisterOperandCount =
- RegisterOperandTraits<operand_0>::kIsRegisterOperand;
-};
+template <AccumulatorUse accumulator_use, OperandType... operands>
+STATIC_CONST_MEMBER_DEFINITION const OperandType
+ BytecodeTraits<accumulator_use, operands...>::kOperandTypes[] = {
+ operands...};
+template <AccumulatorUse accumulator_use, OperandType... operands>
+STATIC_CONST_MEMBER_DEFINITION const OperandTypeInfo
+ BytecodeTraits<accumulator_use, operands...>::kOperandTypeInfos[] = {
+ OperandTraits<operands>::kOperandTypeInfo...};
+template <AccumulatorUse accumulator_use, OperandType... operands>
+STATIC_CONST_MEMBER_DEFINITION const OperandSize
+ BytecodeTraits<accumulator_use, operands...>::kSingleScaleOperandSizes[] = {
+ OperandScaler<operands, OperandScale::kSingle>::kOperandSize...};
+template <AccumulatorUse accumulator_use, OperandType... operands>
+STATIC_CONST_MEMBER_DEFINITION const OperandSize
+ BytecodeTraits<accumulator_use, operands...>::kDoubleScaleOperandSizes[] = {
+ OperandScaler<operands, OperandScale::kDouble>::kOperandSize...};
+template <AccumulatorUse accumulator_use, OperandType... operands>
+STATIC_CONST_MEMBER_DEFINITION const OperandSize BytecodeTraits<
+ accumulator_use, operands...>::kQuadrupleScaleOperandSizes[] = {
+ OperandScaler<operands, OperandScale::kQuadruple>::kOperandSize...};
template <AccumulatorUse accumulator_use>
struct BytecodeTraits<accumulator_use> {
- static const OperandType* GetOperandTypes() {
- static const OperandType operand_types[] = {OperandType::kNone};
- return operand_types;
- }
-
- static const OperandTypeInfo* GetOperandTypeInfos() {
- static const OperandTypeInfo operand_type_infos[] = {
- OperandTypeInfo::kNone};
- return operand_type_infos;
- }
-
- template <OperandType ot>
- static inline bool HasAnyOperandsOfType() {
- return false;
- }
-
- static inline bool IsScalable() { return false; }
-
+ static const OperandType kOperandTypes[];
+ static const OperandTypeInfo kOperandTypeInfos[];
+ static const OperandSize kSingleScaleOperandSizes[];
+ static const OperandSize kDoubleScaleOperandSizes[];
+ static const OperandSize kQuadrupleScaleOperandSizes[];
+ static const int kSingleScaleSize = 1;
+ static const int kDoubleScaleSize = 1;
+ static const int kQuadrupleScaleSize = 1;
static const AccumulatorUse kAccumulatorUse = accumulator_use;
static const int kOperandCount = 0;
- static const int kRegisterOperandCount = 0;
};
-static OperandSize ScaledOperandSize(OperandType operand_type,
- OperandScale operand_scale) {
- STATIC_ASSERT(static_cast<int>(OperandScale::kQuadruple) == 4 &&
- OperandScale::kLast == OperandScale::kQuadruple);
- int index = static_cast<int>(operand_scale) >> 1;
- switch (operand_type) {
-#define CASE(Name, TypeInfo) \
- case OperandType::k##Name: { \
- static const OperandSize kOperandSizes[] = { \
- OperandScaler<OperandType::k##Name, \
- OperandScale::kSingle>::kOperandSize, \
- OperandScaler<OperandType::k##Name, \
- OperandScale::kDouble>::kOperandSize, \
- OperandScaler<OperandType::k##Name, \
- OperandScale::kQuadruple>::kOperandSize}; \
- return kOperandSizes[index]; \
- }
- OPERAND_TYPE_LIST(CASE)
-#undef CASE
- }
- UNREACHABLE();
- return OperandSize::kNone;
-}
+template <AccumulatorUse accumulator_use>
+STATIC_CONST_MEMBER_DEFINITION const OperandType
+ BytecodeTraits<accumulator_use>::kOperandTypes[] = {OperandType::kNone};
+template <AccumulatorUse accumulator_use>
+STATIC_CONST_MEMBER_DEFINITION const OperandTypeInfo
+ BytecodeTraits<accumulator_use>::kOperandTypeInfos[] = {
+ OperandTypeInfo::kNone};
+template <AccumulatorUse accumulator_use>
+STATIC_CONST_MEMBER_DEFINITION const OperandSize
+ BytecodeTraits<accumulator_use>::kSingleScaleOperandSizes[] = {
+ OperandSize::kNone};
+template <AccumulatorUse accumulator_use>
+STATIC_CONST_MEMBER_DEFINITION const OperandSize
+ BytecodeTraits<accumulator_use>::kDoubleScaleOperandSizes[] = {
+ OperandSize::kNone};
+template <AccumulatorUse accumulator_use>
+STATIC_CONST_MEMBER_DEFINITION const OperandSize
+ BytecodeTraits<accumulator_use>::kQuadrupleScaleOperandSizes[] = {
+ OperandSize::kNone};
} // namespace interpreter
} // namespace internal
diff --git a/deps/v8/src/interpreter/bytecodes.cc b/deps/v8/src/interpreter/bytecodes.cc
index 09bcd22b92..c58f4685a2 100644
--- a/deps/v8/src/interpreter/bytecodes.cc
+++ b/deps/v8/src/interpreter/bytecodes.cc
@@ -7,14 +7,55 @@
#include <iomanip>
#include "src/base/bits.h"
-#include "src/globals.h"
#include "src/interpreter/bytecode-traits.h"
namespace v8 {
namespace internal {
namespace interpreter {
-STATIC_CONST_MEMBER_DEFINITION const int Bytecodes::kMaxOperands;
+// clang-format off
+const OperandType* const Bytecodes::kOperandTypes[] = {
+#define ENTRY(Name, ...) BytecodeTraits<__VA_ARGS__>::kOperandTypes,
+ BYTECODE_LIST(ENTRY)
+#undef ENTRY
+};
+
+const OperandTypeInfo* const Bytecodes::kOperandTypeInfos[] = {
+#define ENTRY(Name, ...) BytecodeTraits<__VA_ARGS__>::kOperandTypeInfos,
+ BYTECODE_LIST(ENTRY)
+#undef ENTRY
+};
+
+const int Bytecodes::kOperandCount[] = {
+#define ENTRY(Name, ...) BytecodeTraits<__VA_ARGS__>::kOperandCount,
+ BYTECODE_LIST(ENTRY)
+#undef ENTRY
+};
+
+const AccumulatorUse Bytecodes::kAccumulatorUse[] = {
+#define ENTRY(Name, ...) BytecodeTraits<__VA_ARGS__>::kAccumulatorUse,
+ BYTECODE_LIST(ENTRY)
+#undef ENTRY
+};
+
+const int Bytecodes::kBytecodeSizes[][3] = {
+#define ENTRY(Name, ...) \
+ { BytecodeTraits<__VA_ARGS__>::kSingleScaleSize, \
+ BytecodeTraits<__VA_ARGS__>::kDoubleScaleSize, \
+ BytecodeTraits<__VA_ARGS__>::kQuadrupleScaleSize },
+ BYTECODE_LIST(ENTRY)
+#undef ENTRY
+};
+
+const OperandSize* const Bytecodes::kOperandSizes[][3] = {
+#define ENTRY(Name, ...) \
+ { BytecodeTraits<__VA_ARGS__>::kSingleScaleOperandSizes, \
+ BytecodeTraits<__VA_ARGS__>::kDoubleScaleOperandSizes, \
+ BytecodeTraits<__VA_ARGS__>::kQuadrupleScaleOperandSizes },
+ BYTECODE_LIST(ENTRY)
+#undef ENTRY
+};
+// clang-format on
// static
const char* Bytecodes::ToString(Bytecode bytecode) {
@@ -44,77 +85,6 @@ std::string Bytecodes::ToString(Bytecode bytecode, OperandScale operand_scale) {
}
// static
-const char* Bytecodes::AccumulatorUseToString(AccumulatorUse accumulator_use) {
- switch (accumulator_use) {
- case AccumulatorUse::kNone:
- return "None";
- case AccumulatorUse::kRead:
- return "Read";
- case AccumulatorUse::kWrite:
- return "Write";
- case AccumulatorUse::kReadWrite:
- return "ReadWrite";
- }
- UNREACHABLE();
- return "";
-}
-
-// static
-const char* Bytecodes::OperandTypeToString(OperandType operand_type) {
- switch (operand_type) {
-#define CASE(Name, _) \
- case OperandType::k##Name: \
- return #Name;
- OPERAND_TYPE_LIST(CASE)
-#undef CASE
- }
- UNREACHABLE();
- return "";
-}
-
-// static
-const char* Bytecodes::OperandScaleToString(OperandScale operand_scale) {
- switch (operand_scale) {
-#define CASE(Name, _) \
- case OperandScale::k##Name: \
- return #Name;
- OPERAND_SCALE_LIST(CASE)
-#undef CASE
- }
- UNREACHABLE();
- return "";
-}
-
-// static
-const char* Bytecodes::OperandSizeToString(OperandSize operand_size) {
- switch (operand_size) {
- case OperandSize::kNone:
- return "None";
- case OperandSize::kByte:
- return "Byte";
- case OperandSize::kShort:
- return "Short";
- case OperandSize::kQuad:
- return "Quad";
- }
- UNREACHABLE();
- return "";
-}
-
-// static
-uint8_t Bytecodes::ToByte(Bytecode bytecode) {
- DCHECK_LE(bytecode, Bytecode::kLast);
- return static_cast<uint8_t>(bytecode);
-}
-
-// static
-Bytecode Bytecodes::FromByte(uint8_t value) {
- Bytecode bytecode = static_cast<Bytecode>(value);
- DCHECK(bytecode <= Bytecode::kLast);
- return bytecode;
-}
-
-// static
Bytecode Bytecodes::GetDebugBreak(Bytecode bytecode) {
DCHECK(!IsDebugBreak(bytecode));
if (bytecode == Bytecode::kWide) {
@@ -124,7 +94,7 @@ Bytecode Bytecodes::GetDebugBreak(Bytecode bytecode) {
return Bytecode::kDebugBreakExtraWide;
}
int bytecode_size = Size(bytecode, OperandScale::kSingle);
-#define RETURN_IF_DEBUG_BREAK_SIZE_MATCHES(Name, ...) \
+#define RETURN_IF_DEBUG_BREAK_SIZE_MATCHES(Name) \
if (bytecode_size == Size(Bytecode::k##Name, OperandScale::kSingle)) { \
return Bytecode::k##Name; \
}
@@ -135,224 +105,6 @@ Bytecode Bytecodes::GetDebugBreak(Bytecode bytecode) {
}
// static
-int Bytecodes::Size(Bytecode bytecode, OperandScale operand_scale) {
- int size = 1;
- for (int i = 0; i < NumberOfOperands(bytecode); i++) {
- OperandSize operand_size = GetOperandSize(bytecode, i, operand_scale);
- int delta = static_cast<int>(operand_size);
- DCHECK(base::bits::IsPowerOfTwo32(static_cast<uint32_t>(delta)));
- size += delta;
- }
- return size;
-}
-
-// static
-size_t Bytecodes::ReturnCount(Bytecode bytecode) {
- return bytecode == Bytecode::kReturn ? 1 : 0;
-}
-
-// static
-int Bytecodes::NumberOfOperands(Bytecode bytecode) {
- DCHECK(bytecode <= Bytecode::kLast);
- switch (bytecode) {
-#define CASE(Name, ...) \
- case Bytecode::k##Name: \
- return BytecodeTraits<__VA_ARGS__>::kOperandCount;
- BYTECODE_LIST(CASE)
-#undef CASE
- }
- UNREACHABLE();
- return 0;
-}
-
-// static
-int Bytecodes::NumberOfRegisterOperands(Bytecode bytecode) {
- DCHECK(bytecode <= Bytecode::kLast);
- switch (bytecode) {
-#define CASE(Name, ...) \
- case Bytecode::k##Name: \
- typedef BytecodeTraits<__VA_ARGS__> Name##Trait; \
- return Name##Trait::kRegisterOperandCount;
- BYTECODE_LIST(CASE)
-#undef CASE
- }
- UNREACHABLE();
- return false;
-}
-
-// static
-Bytecode Bytecodes::OperandScaleToPrefixBytecode(OperandScale operand_scale) {
- switch (operand_scale) {
- case OperandScale::kQuadruple:
- return Bytecode::kExtraWide;
- case OperandScale::kDouble:
- return Bytecode::kWide;
- default:
- UNREACHABLE();
- return Bytecode::kIllegal;
- }
-}
-
-// static
-bool Bytecodes::OperandScaleRequiresPrefixBytecode(OperandScale operand_scale) {
- return operand_scale != OperandScale::kSingle;
-}
-
-// static
-OperandScale Bytecodes::PrefixBytecodeToOperandScale(Bytecode bytecode) {
- switch (bytecode) {
- case Bytecode::kExtraWide:
- case Bytecode::kDebugBreakExtraWide:
- return OperandScale::kQuadruple;
- case Bytecode::kWide:
- case Bytecode::kDebugBreakWide:
- return OperandScale::kDouble;
- default:
- UNREACHABLE();
- return OperandScale::kSingle;
- }
-}
-
-// static
-AccumulatorUse Bytecodes::GetAccumulatorUse(Bytecode bytecode) {
- DCHECK(bytecode <= Bytecode::kLast);
- switch (bytecode) {
-#define CASE(Name, ...) \
- case Bytecode::k##Name: \
- return BytecodeTraits<__VA_ARGS__>::kAccumulatorUse;
- BYTECODE_LIST(CASE)
-#undef CASE
- }
- UNREACHABLE();
- return AccumulatorUse::kNone;
-}
-
-// static
-bool Bytecodes::ReadsAccumulator(Bytecode bytecode) {
- return (GetAccumulatorUse(bytecode) & AccumulatorUse::kRead) ==
- AccumulatorUse::kRead;
-}
-
-// static
-bool Bytecodes::WritesAccumulator(Bytecode bytecode) {
- return (GetAccumulatorUse(bytecode) & AccumulatorUse::kWrite) ==
- AccumulatorUse::kWrite;
-}
-
-// static
-bool Bytecodes::WritesBooleanToAccumulator(Bytecode bytecode) {
- switch (bytecode) {
- case Bytecode::kLdaTrue:
- case Bytecode::kLdaFalse:
- case Bytecode::kToBooleanLogicalNot:
- case Bytecode::kLogicalNot:
- case Bytecode::kTestEqual:
- case Bytecode::kTestNotEqual:
- case Bytecode::kTestEqualStrict:
- case Bytecode::kTestLessThan:
- case Bytecode::kTestLessThanOrEqual:
- case Bytecode::kTestGreaterThan:
- case Bytecode::kTestGreaterThanOrEqual:
- case Bytecode::kTestInstanceOf:
- case Bytecode::kTestIn:
- case Bytecode::kForInDone:
- return true;
- default:
- return false;
- }
-}
-
-// static
-bool Bytecodes::IsAccumulatorLoadWithoutEffects(Bytecode bytecode) {
- switch (bytecode) {
- case Bytecode::kLdaZero:
- case Bytecode::kLdaSmi:
- case Bytecode::kLdaUndefined:
- case Bytecode::kLdaNull:
- case Bytecode::kLdaTheHole:
- case Bytecode::kLdaTrue:
- case Bytecode::kLdaFalse:
- case Bytecode::kLdaConstant:
- case Bytecode::kLdar:
- return true;
- default:
- return false;
- }
-}
-
-// static
-bool Bytecodes::IsJumpWithoutEffects(Bytecode bytecode) {
- return IsJump(bytecode) && !IsJumpIfToBoolean(bytecode);
-}
-
-// static
-bool Bytecodes::IsRegisterLoadWithoutEffects(Bytecode bytecode) {
- switch (bytecode) {
- case Bytecode::kMov:
- case Bytecode::kPopContext:
- case Bytecode::kPushContext:
- case Bytecode::kStar:
- case Bytecode::kLdrUndefined:
- return true;
- default:
- return false;
- }
-}
-
-// static
-bool Bytecodes::IsWithoutExternalSideEffects(Bytecode bytecode) {
- // These bytecodes only manipulate interpreter frame state and will
- // never throw.
- return (IsAccumulatorLoadWithoutEffects(bytecode) ||
- IsRegisterLoadWithoutEffects(bytecode) ||
- bytecode == Bytecode::kNop || IsJumpWithoutEffects(bytecode));
-}
-
-// static
-OperandType Bytecodes::GetOperandType(Bytecode bytecode, int i) {
- DCHECK_LE(bytecode, Bytecode::kLast);
- DCHECK_LT(i, NumberOfOperands(bytecode));
- DCHECK_GE(i, 0);
- return GetOperandTypes(bytecode)[i];
-}
-
-// static
-const OperandType* Bytecodes::GetOperandTypes(Bytecode bytecode) {
- DCHECK(bytecode <= Bytecode::kLast);
- switch (bytecode) {
-#define CASE(Name, ...) \
- case Bytecode::k##Name: \
- return BytecodeTraits<__VA_ARGS__>::GetOperandTypes();
- BYTECODE_LIST(CASE)
-#undef CASE
- }
- UNREACHABLE();
- return nullptr;
-}
-
-// static
-const OperandTypeInfo* Bytecodes::GetOperandTypeInfos(Bytecode bytecode) {
- DCHECK(bytecode <= Bytecode::kLast);
- switch (bytecode) {
-#define CASE(Name, ...) \
- case Bytecode::k##Name: \
- return BytecodeTraits<__VA_ARGS__>::GetOperandTypeInfos();
- BYTECODE_LIST(CASE)
-#undef CASE
- }
- UNREACHABLE();
- return nullptr;
-}
-
-// static
-OperandSize Bytecodes::GetOperandSize(Bytecode bytecode, int i,
- OperandScale operand_scale) {
- DCHECK_LT(i, NumberOfOperands(bytecode));
- OperandType operand_type = GetOperandType(bytecode, i);
- return SizeOfOperand(operand_type, operand_scale);
-}
-
-// static
int Bytecodes::GetOperandOffset(Bytecode bytecode, int i,
OperandScale operand_scale) {
DCHECK_LT(i, Bytecodes::NumberOfOperands(bytecode));
@@ -367,67 +119,6 @@ int Bytecodes::GetOperandOffset(Bytecode bytecode, int i,
}
// static
-OperandSize Bytecodes::SizeOfOperand(OperandType operand_type,
- OperandScale operand_scale) {
- return static_cast<OperandSize>(
- ScaledOperandSize(operand_type, operand_scale));
-}
-
-// static
-bool Bytecodes::IsConditionalJumpImmediate(Bytecode bytecode) {
- return bytecode == Bytecode::kJumpIfTrue ||
- bytecode == Bytecode::kJumpIfFalse ||
- bytecode == Bytecode::kJumpIfToBooleanTrue ||
- bytecode == Bytecode::kJumpIfToBooleanFalse ||
- bytecode == Bytecode::kJumpIfNotHole ||
- bytecode == Bytecode::kJumpIfNull ||
- bytecode == Bytecode::kJumpIfUndefined;
-}
-
-// static
-bool Bytecodes::IsConditionalJumpConstant(Bytecode bytecode) {
- return bytecode == Bytecode::kJumpIfTrueConstant ||
- bytecode == Bytecode::kJumpIfFalseConstant ||
- bytecode == Bytecode::kJumpIfToBooleanTrueConstant ||
- bytecode == Bytecode::kJumpIfToBooleanFalseConstant ||
- bytecode == Bytecode::kJumpIfNotHoleConstant ||
- bytecode == Bytecode::kJumpIfNullConstant ||
- bytecode == Bytecode::kJumpIfUndefinedConstant;
-}
-
-// static
-bool Bytecodes::IsConditionalJump(Bytecode bytecode) {
- return IsConditionalJumpImmediate(bytecode) ||
- IsConditionalJumpConstant(bytecode);
-}
-
-
-// static
-bool Bytecodes::IsJumpImmediate(Bytecode bytecode) {
- return bytecode == Bytecode::kJump || IsConditionalJumpImmediate(bytecode);
-}
-
-
-// static
-bool Bytecodes::IsJumpConstant(Bytecode bytecode) {
- return bytecode == Bytecode::kJumpConstant ||
- IsConditionalJumpConstant(bytecode);
-}
-
-// static
-bool Bytecodes::IsJump(Bytecode bytecode) {
- return IsJumpImmediate(bytecode) || IsJumpConstant(bytecode);
-}
-
-// static
-bool Bytecodes::IsJumpIfToBoolean(Bytecode bytecode) {
- return bytecode == Bytecode::kJumpIfToBooleanTrue ||
- bytecode == Bytecode::kJumpIfToBooleanFalse ||
- bytecode == Bytecode::kJumpIfToBooleanTrueConstant ||
- bytecode == Bytecode::kJumpIfToBooleanFalseConstant;
-}
-
-// static
Bytecode Bytecodes::GetJumpWithoutToBoolean(Bytecode bytecode) {
switch (bytecode) {
case Bytecode::kJumpIfToBooleanTrue:
@@ -446,19 +137,6 @@ Bytecode Bytecodes::GetJumpWithoutToBoolean(Bytecode bytecode) {
}
// static
-bool Bytecodes::IsCallOrNew(Bytecode bytecode) {
- return bytecode == Bytecode::kCall || bytecode == Bytecode::kTailCall ||
- bytecode == Bytecode::kNew;
-}
-
-// static
-bool Bytecodes::IsCallRuntime(Bytecode bytecode) {
- return bytecode == Bytecode::kCallRuntime ||
- bytecode == Bytecode::kCallRuntimeForPair ||
- bytecode == Bytecode::kInvokeIntrinsic;
-}
-
-// static
bool Bytecodes::IsDebugBreak(Bytecode bytecode) {
switch (bytecode) {
#define CASE(Name, ...) case Bytecode::k##Name:
@@ -472,53 +150,6 @@ bool Bytecodes::IsDebugBreak(Bytecode bytecode) {
}
// static
-bool Bytecodes::IsLdarOrStar(Bytecode bytecode) {
- return bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar;
-}
-
-// static
-bool Bytecodes::IsBytecodeWithScalableOperands(Bytecode bytecode) {
- switch (bytecode) {
-#define CASE(Name, ...) \
- case Bytecode::k##Name: \
- typedef BytecodeTraits<__VA_ARGS__> Name##Trait; \
- return Name##Trait::IsScalable();
- BYTECODE_LIST(CASE)
-#undef CASE
- }
- UNREACHABLE();
- return false;
-}
-
-// static
-bool Bytecodes::IsPrefixScalingBytecode(Bytecode bytecode) {
- switch (bytecode) {
- case Bytecode::kExtraWide:
- case Bytecode::kDebugBreakExtraWide:
- case Bytecode::kWide:
- case Bytecode::kDebugBreakWide:
- return true;
- default:
- return false;
- }
-}
-
-// static
-bool Bytecodes::PutsNameInAccumulator(Bytecode bytecode) {
- return bytecode == Bytecode::kTypeOf;
-}
-
-// static
-bool Bytecodes::IsJumpOrReturn(Bytecode bytecode) {
- return bytecode == Bytecode::kReturn || IsJump(bytecode);
-}
-
-// static
-bool Bytecodes::IsMaybeRegisterOperandType(OperandType operand_type) {
- return operand_type == OperandType::kMaybeReg;
-}
-
-// static
bool Bytecodes::IsRegisterOperandType(OperandType operand_type) {
switch (operand_type) {
#define CASE(Name, _) \
@@ -599,21 +230,11 @@ bool Bytecodes::IsStarLookahead(Bytecode bytecode, OperandScale operand_scale) {
}
// static
-int Bytecodes::GetNumberOfRegistersRepresentedBy(OperandType operand_type) {
- switch (operand_type) {
- case OperandType::kMaybeReg:
- case OperandType::kReg:
- case OperandType::kRegOut:
- return 1;
- case OperandType::kRegPair:
- case OperandType::kRegOutPair:
- return 2;
- case OperandType::kRegOutTriple:
- return 3;
- default:
- return 0;
+bool Bytecodes::IsBytecodeWithScalableOperands(Bytecode bytecode) {
+ for (int i = 0; i < NumberOfOperands(bytecode); i++) {
+ if (OperandIsScalable(bytecode, i)) return true;
}
- return 0;
+ return false;
}
// static
@@ -630,25 +251,28 @@ bool Bytecodes::IsUnsignedOperandType(OperandType operand_type) {
}
// static
-OperandSize Bytecodes::SizeForSignedOperand(int value) {
- if (value >= kMinInt8 && value <= kMaxInt8) {
- return OperandSize::kByte;
- } else if (value >= kMinInt16 && value <= kMaxInt16) {
- return OperandSize::kShort;
- } else {
- return OperandSize::kQuad;
- }
-}
-
-// static
-OperandSize Bytecodes::SizeForUnsignedOperand(uint32_t value) {
- if (value <= kMaxUInt8) {
- return OperandSize::kByte;
- } else if (value <= kMaxUInt16) {
- return OperandSize::kShort;
- } else {
- return OperandSize::kQuad;
- }
+OperandSize Bytecodes::SizeOfOperand(OperandType operand_type,
+ OperandScale operand_scale) {
+ DCHECK_LE(operand_type, OperandType::kLast);
+ DCHECK_GE(operand_scale, OperandScale::kSingle);
+ DCHECK_LE(operand_scale, OperandScale::kLast);
+ STATIC_ASSERT(static_cast<int>(OperandScale::kQuadruple) == 4 &&
+ OperandScale::kLast == OperandScale::kQuadruple);
+ int scale_index = static_cast<int>(operand_scale) >> 1;
+ // clang-format off
+ static const OperandSize kOperandSizes[][3] = {
+#define ENTRY(Name, ...) \
+ { OperandScaler<OperandType::k##Name, \
+ OperandScale::kSingle>::kOperandSize, \
+ OperandScaler<OperandType::k##Name, \
+ OperandScale::kDouble>::kOperandSize, \
+ OperandScaler<OperandType::k##Name, \
+ OperandScale::kQuadruple>::kOperandSize },
+ OPERAND_TYPE_LIST(ENTRY)
+#undef ENTRY
+ };
+ // clang-format on
+ return kOperandSizes[static_cast<size_t>(operand_type)][scale_index];
}
// static
@@ -662,22 +286,6 @@ std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode) {
return os << Bytecodes::ToString(bytecode);
}
-std::ostream& operator<<(std::ostream& os, const AccumulatorUse& use) {
- return os << Bytecodes::AccumulatorUseToString(use);
-}
-
-std::ostream& operator<<(std::ostream& os, const OperandSize& operand_size) {
- return os << Bytecodes::OperandSizeToString(operand_size);
-}
-
-std::ostream& operator<<(std::ostream& os, const OperandScale& operand_scale) {
- return os << Bytecodes::OperandScaleToString(operand_scale);
-}
-
-std::ostream& operator<<(std::ostream& os, const OperandType& operand_type) {
- return os << Bytecodes::OperandTypeToString(operand_type);
-}
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index 036ae72872..6232966bbc 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -9,6 +9,9 @@
#include <iosfwd>
#include <string>
+#include "src/globals.h"
+#include "src/interpreter/bytecode-operands.h"
+
// This interface and it's implementation are independent of the
// libv8_base library as they are used by the interpreter and the
// standalone mkpeephole table generator program.
@@ -17,64 +20,8 @@ namespace v8 {
namespace internal {
namespace interpreter {
-#define INVALID_OPERAND_TYPE_LIST(V) V(None, OperandTypeInfo::kNone)
-
-#define REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
- V(MaybeReg, OperandTypeInfo::kScalableSignedByte) \
- V(Reg, OperandTypeInfo::kScalableSignedByte) \
- V(RegPair, OperandTypeInfo::kScalableSignedByte)
-
-#define REGISTER_OUTPUT_OPERAND_TYPE_LIST(V) \
- V(RegOut, OperandTypeInfo::kScalableSignedByte) \
- V(RegOutPair, OperandTypeInfo::kScalableSignedByte) \
- V(RegOutTriple, OperandTypeInfo::kScalableSignedByte)
-
-#define SCALAR_OPERAND_TYPE_LIST(V) \
- V(Flag8, OperandTypeInfo::kFixedUnsignedByte) \
- V(IntrinsicId, OperandTypeInfo::kFixedUnsignedByte) \
- V(Idx, OperandTypeInfo::kScalableUnsignedByte) \
- V(Imm, OperandTypeInfo::kScalableSignedByte) \
- V(RegCount, OperandTypeInfo::kScalableUnsignedByte) \
- V(RuntimeId, OperandTypeInfo::kFixedUnsignedShort)
-
-#define REGISTER_OPERAND_TYPE_LIST(V) \
- REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
- REGISTER_OUTPUT_OPERAND_TYPE_LIST(V)
-
-#define NON_REGISTER_OPERAND_TYPE_LIST(V) \
- INVALID_OPERAND_TYPE_LIST(V) \
- SCALAR_OPERAND_TYPE_LIST(V)
-
-// The list of operand types used by bytecodes.
-#define OPERAND_TYPE_LIST(V) \
- NON_REGISTER_OPERAND_TYPE_LIST(V) \
- REGISTER_OPERAND_TYPE_LIST(V)
-
-// Define one debug break bytecode for each possible size of unscaled
-// bytecodes. Format is V(<bytecode>, <accumulator_use>, <operands>).
-#define DEBUG_BREAK_PLAIN_BYTECODE_LIST(V) \
- V(DebugBreak0, AccumulatorUse::kRead) \
- V(DebugBreak1, AccumulatorUse::kRead, OperandType::kReg) \
- V(DebugBreak2, AccumulatorUse::kRead, OperandType::kReg, OperandType::kReg) \
- V(DebugBreak3, AccumulatorUse::kRead, OperandType::kReg, OperandType::kReg, \
- OperandType::kReg) \
- V(DebugBreak4, AccumulatorUse::kRead, OperandType::kReg, OperandType::kReg, \
- OperandType::kReg, OperandType::kReg) \
- V(DebugBreak5, AccumulatorUse::kRead, OperandType::kRuntimeId, \
- OperandType::kReg, OperandType::kReg) \
- V(DebugBreak6, AccumulatorUse::kRead, OperandType::kRuntimeId, \
- OperandType::kReg, OperandType::kReg, OperandType::kReg)
-
-// Define one debug break for each widening prefix.
-#define DEBUG_BREAK_PREFIX_BYTECODE_LIST(V) \
- V(DebugBreakWide, AccumulatorUse::kRead) \
- V(DebugBreakExtraWide, AccumulatorUse::kRead)
-
-#define DEBUG_BREAK_BYTECODE_LIST(V) \
- DEBUG_BREAK_PLAIN_BYTECODE_LIST(V) \
- DEBUG_BREAK_PREFIX_BYTECODE_LIST(V)
-
// The list of bytecodes which are interpreted by the interpreter.
+// Format is V(<bytecode>, <accumulator_use>, <operands>).
#define BYTECODE_LIST(V) \
/* Extended width operands */ \
V(Wide, AccumulatorUse::kNone) \
@@ -106,15 +53,23 @@ namespace interpreter {
V(PushContext, AccumulatorUse::kRead, OperandType::kRegOut) \
V(PopContext, AccumulatorUse::kNone, OperandType::kReg) \
V(LdaContextSlot, AccumulatorUse::kWrite, OperandType::kReg, \
- OperandType::kIdx) \
+ OperandType::kIdx, OperandType::kUImm) \
V(LdrContextSlot, AccumulatorUse::kNone, OperandType::kReg, \
- OperandType::kIdx, OperandType::kRegOut) \
+ OperandType::kIdx, OperandType::kUImm, OperandType::kRegOut) \
V(StaContextSlot, AccumulatorUse::kRead, OperandType::kReg, \
- OperandType::kIdx) \
+ OperandType::kIdx, OperandType::kUImm) \
\
/* Load-Store lookup slots */ \
V(LdaLookupSlot, AccumulatorUse::kWrite, OperandType::kIdx) \
+ V(LdaLookupContextSlot, AccumulatorUse::kWrite, OperandType::kIdx, \
+ OperandType::kIdx, OperandType::kUImm) \
+ V(LdaLookupGlobalSlot, AccumulatorUse::kWrite, OperandType::kIdx, \
+ OperandType::kIdx, OperandType::kUImm) \
V(LdaLookupSlotInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx) \
+ V(LdaLookupContextSlotInsideTypeof, AccumulatorUse::kWrite, \
+ OperandType::kIdx, OperandType::kIdx, OperandType::kUImm) \
+ V(LdaLookupGlobalSlotInsideTypeof, AccumulatorUse::kWrite, \
+ OperandType::kIdx, OperandType::kIdx, OperandType::kUImm) \
V(StaLookupSlotSloppy, AccumulatorUse::kReadWrite, OperandType::kIdx) \
V(StaLookupSlotStrict, AccumulatorUse::kReadWrite, OperandType::kIdx) \
\
@@ -188,33 +143,40 @@ namespace interpreter {
V(DeletePropertySloppy, AccumulatorUse::kReadWrite, OperandType::kReg) \
\
/* Call operations */ \
- V(Call, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg, \
- OperandType::kRegCount, OperandType::kIdx) \
- V(TailCall, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg, \
+ V(Call, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kRegList, \
OperandType::kRegCount, OperandType::kIdx) \
+ V(TailCall, AccumulatorUse::kWrite, OperandType::kReg, \
+ OperandType::kRegList, OperandType::kRegCount, OperandType::kIdx) \
V(CallRuntime, AccumulatorUse::kWrite, OperandType::kRuntimeId, \
- OperandType::kMaybeReg, OperandType::kRegCount) \
+ OperandType::kRegList, OperandType::kRegCount) \
V(CallRuntimeForPair, AccumulatorUse::kNone, OperandType::kRuntimeId, \
- OperandType::kMaybeReg, OperandType::kRegCount, OperandType::kRegOutPair) \
+ OperandType::kRegList, OperandType::kRegCount, OperandType::kRegOutPair) \
V(CallJSRuntime, AccumulatorUse::kWrite, OperandType::kIdx, \
- OperandType::kReg, OperandType::kRegCount) \
+ OperandType::kRegList, OperandType::kRegCount) \
\
/* Intrinsics */ \
V(InvokeIntrinsic, AccumulatorUse::kWrite, OperandType::kIntrinsicId, \
- OperandType::kMaybeReg, OperandType::kRegCount) \
+ OperandType::kRegList, OperandType::kRegCount) \
\
/* New operator */ \
- V(New, AccumulatorUse::kReadWrite, OperandType::kReg, \
- OperandType::kMaybeReg, OperandType::kRegCount) \
+ V(New, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kRegList, \
+ OperandType::kRegCount, OperandType::kIdx) \
\
/* Test Operators */ \
- V(TestEqual, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(TestNotEqual, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(TestEqualStrict, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(TestLessThan, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(TestGreaterThan, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(TestLessThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(TestGreaterThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(TestEqual, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(TestNotEqual, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(TestEqualStrict, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(TestLessThan, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(TestGreaterThan, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(TestLessThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(TestGreaterThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx) \
V(TestInstanceOf, AccumulatorUse::kReadWrite, OperandType::kReg) \
V(TestIn, AccumulatorUse::kReadWrite, OperandType::kReg) \
\
@@ -238,10 +200,10 @@ namespace interpreter {
/* Context allocation */ \
V(CreateBlockContext, AccumulatorUse::kReadWrite, OperandType::kIdx) \
V(CreateCatchContext, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx, OperandType::kIdx) \
+ V(CreateFunctionContext, AccumulatorUse::kWrite, OperandType::kUImm) \
+ V(CreateWithContext, AccumulatorUse::kReadWrite, OperandType::kReg, \
OperandType::kIdx) \
- /* TODO(klaasb) rename Idx or add unsigned Imm OperandType? */ \
- V(CreateFunctionContext, AccumulatorUse::kWrite, OperandType::kIdx) \
- V(CreateWithContext, AccumulatorUse::kReadWrite, OperandType::kReg) \
\
/* Arguments allocation */ \
V(CreateMappedArguments, AccumulatorUse::kWrite) \
@@ -265,11 +227,13 @@ namespace interpreter {
V(JumpIfUndefinedConstant, AccumulatorUse::kRead, OperandType::kIdx) \
V(JumpIfNotHole, AccumulatorUse::kRead, OperandType::kImm) \
V(JumpIfNotHoleConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ V(JumpLoop, AccumulatorUse::kNone, OperandType::kImm, OperandType::kImm) \
\
/* Complex flow control For..in */ \
V(ForInPrepare, AccumulatorUse::kNone, OperandType::kReg, \
OperandType::kRegOutTriple) \
- V(ForInDone, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg) \
+ V(ForInContinue, AccumulatorUse::kWrite, OperandType::kReg, \
+ OperandType::kReg) \
V(ForInNext, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg, \
OperandType::kRegPair, OperandType::kIdx) \
V(ForInStep, AccumulatorUse::kWrite, OperandType::kReg) \
@@ -277,9 +241,6 @@ namespace interpreter {
/* Perform a stack guard check */ \
V(StackCheck, AccumulatorUse::kNone) \
\
- /* Perform a check to trigger on-stack replacement */ \
- V(OsrPoll, AccumulatorUse::kNone, OperandType::kImm) \
- \
/* Non-local flow control */ \
V(Throw, AccumulatorUse::kRead) \
V(ReThrow, AccumulatorUse::kRead) \
@@ -291,7 +252,22 @@ namespace interpreter {
\
/* Debugger */ \
V(Debugger, AccumulatorUse::kNone) \
- DEBUG_BREAK_BYTECODE_LIST(V) \
+ \
+ /* Debug Breakpoints - one for each possible size of unscaled bytecodes */ \
+ /* and one for each operand widening prefix bytecode */ \
+ V(DebugBreak0, AccumulatorUse::kRead) \
+ V(DebugBreak1, AccumulatorUse::kRead, OperandType::kReg) \
+ V(DebugBreak2, AccumulatorUse::kRead, OperandType::kReg, OperandType::kReg) \
+ V(DebugBreak3, AccumulatorUse::kRead, OperandType::kReg, OperandType::kReg, \
+ OperandType::kReg) \
+ V(DebugBreak4, AccumulatorUse::kRead, OperandType::kReg, OperandType::kReg, \
+ OperandType::kReg, OperandType::kReg) \
+ V(DebugBreak5, AccumulatorUse::kRead, OperandType::kRuntimeId, \
+ OperandType::kReg, OperandType::kReg) \
+ V(DebugBreak6, AccumulatorUse::kRead, OperandType::kRuntimeId, \
+ OperandType::kReg, OperandType::kReg, OperandType::kReg) \
+ V(DebugBreakWide, AccumulatorUse::kRead) \
+ V(DebugBreakExtraWide, AccumulatorUse::kRead) \
\
/* Illegal bytecode (terminates execution) */ \
V(Illegal, AccumulatorUse::kNone) \
@@ -300,74 +276,23 @@ namespace interpreter {
/* eliminated bytecodes). */ \
V(Nop, AccumulatorUse::kNone)
-enum class AccumulatorUse : uint8_t {
- kNone = 0,
- kRead = 1 << 0,
- kWrite = 1 << 1,
- kReadWrite = kRead | kWrite
-};
+// List of debug break bytecodes.
+#define DEBUG_BREAK_PLAIN_BYTECODE_LIST(V) \
+ V(DebugBreak0) \
+ V(DebugBreak1) \
+ V(DebugBreak2) \
+ V(DebugBreak3) \
+ V(DebugBreak4) \
+ V(DebugBreak5) \
+ V(DebugBreak6)
-inline AccumulatorUse operator&(AccumulatorUse lhs, AccumulatorUse rhs) {
- int result = static_cast<int>(lhs) & static_cast<int>(rhs);
- return static_cast<AccumulatorUse>(result);
-}
-
-inline AccumulatorUse operator|(AccumulatorUse lhs, AccumulatorUse rhs) {
- int result = static_cast<int>(lhs) | static_cast<int>(rhs);
- return static_cast<AccumulatorUse>(result);
-}
-
-// Enumeration of scaling factors applicable to scalable operands. Code
-// relies on being able to cast values to integer scaling values.
-#define OPERAND_SCALE_LIST(V) \
- V(Single, 1) \
- V(Double, 2) \
- V(Quadruple, 4)
-
-enum class OperandScale : uint8_t {
-#define DECLARE_OPERAND_SCALE(Name, Scale) k##Name = Scale,
- OPERAND_SCALE_LIST(DECLARE_OPERAND_SCALE)
-#undef DECLARE_OPERAND_SCALE
- kLast = kQuadruple
-};
-
-// Enumeration of the size classes of operand types used by
-// bytecodes. Code relies on being able to cast values to integer
-// types to get the size in bytes.
-enum class OperandSize : uint8_t {
- kNone = 0,
- kByte = 1,
- kShort = 2,
- kQuad = 4,
- kLast = kQuad
-};
-
-// Primitive operand info used that summarize properties of operands.
-// Columns are Name, IsScalable, IsUnsigned, UnscaledSize.
-#define OPERAND_TYPE_INFO_LIST(V) \
- V(None, false, false, OperandSize::kNone) \
- V(ScalableSignedByte, true, false, OperandSize::kByte) \
- V(ScalableUnsignedByte, true, true, OperandSize::kByte) \
- V(FixedUnsignedByte, false, true, OperandSize::kByte) \
- V(FixedUnsignedShort, false, true, OperandSize::kShort)
-
-enum class OperandTypeInfo : uint8_t {
-#define DECLARE_OPERAND_TYPE_INFO(Name, ...) k##Name,
- OPERAND_TYPE_INFO_LIST(DECLARE_OPERAND_TYPE_INFO)
-#undef DECLARE_OPERAND_TYPE_INFO
-};
+#define DEBUG_BREAK_PREFIX_BYTECODE_LIST(V) \
+ V(DebugBreakWide) \
+ V(DebugBreakExtraWide)
-// Enumeration of operand types used by bytecodes.
-enum class OperandType : uint8_t {
-#define DECLARE_OPERAND_TYPE(Name, _) k##Name,
- OPERAND_TYPE_LIST(DECLARE_OPERAND_TYPE)
-#undef DECLARE_OPERAND_TYPE
-#define COUNT_OPERAND_TYPES(x, _) +1
- // The COUNT_OPERAND macro will turn this into kLast = -1 +1 +1... which will
- // evaluate to the same value as the last operand.
- kLast = -1 OPERAND_TYPE_LIST(COUNT_OPERAND_TYPES)
-#undef COUNT_OPERAND_TYPES
-};
+#define DEBUG_BREAK_BYTECODE_LIST(V) \
+ DEBUG_BREAK_PLAIN_BYTECODE_LIST(V) \
+ DEBUG_BREAK_PREFIX_BYTECODE_LIST(V)
// Enumeration of interpreter bytecodes.
enum class Bytecode : uint8_t {
@@ -381,6 +306,14 @@ enum class Bytecode : uint8_t {
#undef COUNT_BYTECODE
};
+// TODO(rmcilroy): Remove once we switch to MSVC 2015 which supports constexpr.
+// See crbug.com/603131.
+#if V8_CC_MSVC
+#define CONSTEXPR const
+#else
+#define CONSTEXPR constexpr
+#endif
+
class Bytecodes final {
public:
// The maximum number of operands a bytecode may have.
@@ -392,156 +325,314 @@ class Bytecodes final {
// Returns string representation of |bytecode|.
static std::string ToString(Bytecode bytecode, OperandScale operand_scale);
- // Returns string representation of |accumulator_use|.
- static const char* AccumulatorUseToString(AccumulatorUse accumulator_use);
-
- // Returns string representation of |operand_type|.
- static const char* OperandTypeToString(OperandType operand_type);
-
- // Returns string representation of |operand_scale|.
- static const char* OperandScaleToString(OperandScale operand_scale);
-
- // Returns string representation of |operand_size|.
- static const char* OperandSizeToString(OperandSize operand_size);
-
// Returns byte value of bytecode.
- static uint8_t ToByte(Bytecode bytecode);
+ static uint8_t ToByte(Bytecode bytecode) {
+ DCHECK_LE(bytecode, Bytecode::kLast);
+ return static_cast<uint8_t>(bytecode);
+ }
// Returns bytecode for |value|.
- static Bytecode FromByte(uint8_t value);
-
- // Returns the number of operands expected by |bytecode|.
- static int NumberOfOperands(Bytecode bytecode);
-
- // Returns the number of register operands expected by |bytecode|.
- static int NumberOfRegisterOperands(Bytecode bytecode);
+ static Bytecode FromByte(uint8_t value) {
+ Bytecode bytecode = static_cast<Bytecode>(value);
+ DCHECK(bytecode <= Bytecode::kLast);
+ return bytecode;
+ }
// Returns the prefix bytecode representing an operand scale to be
// applied to a a bytecode.
- static Bytecode OperandScaleToPrefixBytecode(OperandScale operand_scale);
+ static Bytecode OperandScaleToPrefixBytecode(OperandScale operand_scale) {
+ switch (operand_scale) {
+ case OperandScale::kQuadruple:
+ return Bytecode::kExtraWide;
+ case OperandScale::kDouble:
+ return Bytecode::kWide;
+ default:
+ UNREACHABLE();
+ return Bytecode::kIllegal;
+ }
+ }
// Returns true if the operand scale requires a prefix bytecode.
- static bool OperandScaleRequiresPrefixBytecode(OperandScale operand_scale);
+ static bool OperandScaleRequiresPrefixBytecode(OperandScale operand_scale) {
+ return operand_scale != OperandScale::kSingle;
+ }
// Returns the scaling applied to scalable operands if bytecode is
// is a scaling prefix.
- static OperandScale PrefixBytecodeToOperandScale(Bytecode bytecode);
+ static OperandScale PrefixBytecodeToOperandScale(Bytecode bytecode) {
+ switch (bytecode) {
+ case Bytecode::kExtraWide:
+ case Bytecode::kDebugBreakExtraWide:
+ return OperandScale::kQuadruple;
+ case Bytecode::kWide:
+ case Bytecode::kDebugBreakWide:
+ return OperandScale::kDouble;
+ default:
+ UNREACHABLE();
+ return OperandScale::kSingle;
+ }
+ }
// Returns how accumulator is used by |bytecode|.
- static AccumulatorUse GetAccumulatorUse(Bytecode bytecode);
+ static AccumulatorUse GetAccumulatorUse(Bytecode bytecode) {
+ DCHECK(bytecode <= Bytecode::kLast);
+ return kAccumulatorUse[static_cast<size_t>(bytecode)];
+ }
// Returns true if |bytecode| reads the accumulator.
- static bool ReadsAccumulator(Bytecode bytecode);
+ static bool ReadsAccumulator(Bytecode bytecode) {
+ return (GetAccumulatorUse(bytecode) & AccumulatorUse::kRead) ==
+ AccumulatorUse::kRead;
+ }
// Returns true if |bytecode| writes the accumulator.
- static bool WritesAccumulator(Bytecode bytecode);
+ static bool WritesAccumulator(Bytecode bytecode) {
+ return (GetAccumulatorUse(bytecode) & AccumulatorUse::kWrite) ==
+ AccumulatorUse::kWrite;
+ }
// Return true if |bytecode| writes the accumulator with a boolean value.
- static bool WritesBooleanToAccumulator(Bytecode bytecode);
+ static bool WritesBooleanToAccumulator(Bytecode bytecode) {
+ switch (bytecode) {
+ case Bytecode::kLdaTrue:
+ case Bytecode::kLdaFalse:
+ case Bytecode::kToBooleanLogicalNot:
+ case Bytecode::kLogicalNot:
+ case Bytecode::kTestEqual:
+ case Bytecode::kTestNotEqual:
+ case Bytecode::kTestEqualStrict:
+ case Bytecode::kTestLessThan:
+ case Bytecode::kTestLessThanOrEqual:
+ case Bytecode::kTestGreaterThan:
+ case Bytecode::kTestGreaterThanOrEqual:
+ case Bytecode::kTestInstanceOf:
+ case Bytecode::kTestIn:
+ case Bytecode::kForInContinue:
+ return true;
+ default:
+ return false;
+ }
+ }
// Return true if |bytecode| is an accumulator load without effects,
// e.g. LdaConstant, LdaTrue, Ldar.
- static bool IsAccumulatorLoadWithoutEffects(Bytecode bytecode);
-
- // Return true if |bytecode| is a jump without effects,
- // e.g. any jump excluding those that include type coercion like
- // JumpIfTrueToBoolean.
- static bool IsJumpWithoutEffects(Bytecode bytecode);
+ static CONSTEXPR bool IsAccumulatorLoadWithoutEffects(Bytecode bytecode) {
+ return bytecode == Bytecode::kLdar || bytecode == Bytecode::kLdaZero ||
+ bytecode == Bytecode::kLdaSmi || bytecode == Bytecode::kLdaNull ||
+ bytecode == Bytecode::kLdaTrue || bytecode == Bytecode::kLdaFalse ||
+ bytecode == Bytecode::kLdaUndefined ||
+ bytecode == Bytecode::kLdaTheHole ||
+ bytecode == Bytecode::kLdaConstant;
+ }
// Return true if |bytecode| is a register load without effects,
// e.g. Mov, Star, LdrUndefined.
- static bool IsRegisterLoadWithoutEffects(Bytecode bytecode);
-
- // Returns true if |bytecode| has no effects.
- static bool IsWithoutExternalSideEffects(Bytecode bytecode);
-
- // Returns the i-th operand of |bytecode|.
- static OperandType GetOperandType(Bytecode bytecode, int i);
-
- // Returns a pointer to an array of operand types terminated in
- // OperandType::kNone.
- static const OperandType* GetOperandTypes(Bytecode bytecode);
-
- // Returns a pointer to an array of operand type info terminated in
- // OperandTypeInfo::kNone.
- static const OperandTypeInfo* GetOperandTypeInfos(Bytecode bytecode);
-
- // Returns the size of the i-th operand of |bytecode|.
- static OperandSize GetOperandSize(Bytecode bytecode, int i,
- OperandScale operand_scale);
-
- // Returns the offset of the i-th operand of |bytecode| relative to the start
- // of the bytecode.
- static int GetOperandOffset(Bytecode bytecode, int i,
- OperandScale operand_scale);
-
- // Returns a debug break bytecode to replace |bytecode|.
- static Bytecode GetDebugBreak(Bytecode bytecode);
-
- // Returns the size of the bytecode including its operands for the
- // given |operand_scale|.
- static int Size(Bytecode bytecode, OperandScale operand_scale);
-
- // Returns the size of |operand|.
- static OperandSize SizeOfOperand(OperandType operand, OperandScale scale);
-
- // Returns the number of values which |bytecode| returns.
- static size_t ReturnCount(Bytecode bytecode);
+ static CONSTEXPR bool IsRegisterLoadWithoutEffects(Bytecode bytecode) {
+ return bytecode == Bytecode::kMov || bytecode == Bytecode::kPopContext ||
+ bytecode == Bytecode::kPushContext || bytecode == Bytecode::kStar ||
+ bytecode == Bytecode::kLdrUndefined;
+ }
// Returns true if the bytecode is a conditional jump taking
// an immediate byte operand (OperandType::kImm).
- static bool IsConditionalJumpImmediate(Bytecode bytecode);
+ static CONSTEXPR bool IsConditionalJumpImmediate(Bytecode bytecode) {
+ return bytecode == Bytecode::kJumpIfTrue ||
+ bytecode == Bytecode::kJumpIfFalse ||
+ bytecode == Bytecode::kJumpIfToBooleanTrue ||
+ bytecode == Bytecode::kJumpIfToBooleanFalse ||
+ bytecode == Bytecode::kJumpIfNotHole ||
+ bytecode == Bytecode::kJumpIfNull ||
+ bytecode == Bytecode::kJumpIfUndefined;
+ }
// Returns true if the bytecode is a conditional jump taking
// a constant pool entry (OperandType::kIdx).
- static bool IsConditionalJumpConstant(Bytecode bytecode);
+ static CONSTEXPR bool IsConditionalJumpConstant(Bytecode bytecode) {
+ return bytecode == Bytecode::kJumpIfTrueConstant ||
+ bytecode == Bytecode::kJumpIfFalseConstant ||
+ bytecode == Bytecode::kJumpIfToBooleanTrueConstant ||
+ bytecode == Bytecode::kJumpIfToBooleanFalseConstant ||
+ bytecode == Bytecode::kJumpIfNotHoleConstant ||
+ bytecode == Bytecode::kJumpIfNullConstant ||
+ bytecode == Bytecode::kJumpIfUndefinedConstant;
+ }
// Returns true if the bytecode is a conditional jump taking
// any kind of operand.
- static bool IsConditionalJump(Bytecode bytecode);
+ static CONSTEXPR bool IsConditionalJump(Bytecode bytecode) {
+ return IsConditionalJumpImmediate(bytecode) ||
+ IsConditionalJumpConstant(bytecode);
+ }
// Returns true if the bytecode is a jump or a conditional jump taking
// an immediate byte operand (OperandType::kImm).
- static bool IsJumpImmediate(Bytecode bytecode);
+ static CONSTEXPR bool IsJumpImmediate(Bytecode bytecode) {
+ return bytecode == Bytecode::kJump || bytecode == Bytecode::kJumpLoop ||
+ IsConditionalJumpImmediate(bytecode);
+ }
// Returns true if the bytecode is a jump or conditional jump taking a
// constant pool entry (OperandType::kIdx).
- static bool IsJumpConstant(Bytecode bytecode);
-
- // Returns true if the bytecode is a jump or conditional jump taking
- // any kind of operand.
- static bool IsJump(Bytecode bytecode);
+ static CONSTEXPR bool IsJumpConstant(Bytecode bytecode) {
+ return bytecode == Bytecode::kJumpConstant ||
+ IsConditionalJumpConstant(bytecode);
+ }
// Returns true if the bytecode is a jump that internally coerces the
// accumulator to a boolean.
- static bool IsJumpIfToBoolean(Bytecode bytecode);
+ static CONSTEXPR bool IsJumpIfToBoolean(Bytecode bytecode) {
+ return bytecode == Bytecode::kJumpIfToBooleanTrue ||
+ bytecode == Bytecode::kJumpIfToBooleanFalse ||
+ bytecode == Bytecode::kJumpIfToBooleanTrueConstant ||
+ bytecode == Bytecode::kJumpIfToBooleanFalseConstant;
+ }
- // Returns the equivalent jump bytecode without the accumulator coercion.
- static Bytecode GetJumpWithoutToBoolean(Bytecode bytecode);
+ // Returns true if the bytecode is a jump or conditional jump taking
+ // any kind of operand.
+ static CONSTEXPR bool IsJump(Bytecode bytecode) {
+ return IsJumpImmediate(bytecode) || IsJumpConstant(bytecode);
+ }
// Returns true if the bytecode is a conditional jump, a jump, or a return.
- static bool IsJumpOrReturn(Bytecode bytecode);
+ static CONSTEXPR bool IsJumpOrReturn(Bytecode bytecode) {
+ return bytecode == Bytecode::kReturn || IsJump(bytecode);
+ }
+
+ // Return true if |bytecode| is a jump without effects,
+ // e.g. any jump excluding those that include type coercion like
+ // JumpIfTrueToBoolean.
+ static CONSTEXPR bool IsJumpWithoutEffects(Bytecode bytecode) {
+ return IsJump(bytecode) && !IsJumpIfToBoolean(bytecode);
+ }
+
+ // Returns true if |bytecode| has no effects. These bytecodes only manipulate
+ // interpreter frame state and will never throw.
+ static CONSTEXPR bool IsWithoutExternalSideEffects(Bytecode bytecode) {
+ return (IsAccumulatorLoadWithoutEffects(bytecode) ||
+ IsRegisterLoadWithoutEffects(bytecode) ||
+ bytecode == Bytecode::kNop || IsJumpWithoutEffects(bytecode));
+ }
+
+ // Returns true if the bytecode is Ldar or Star.
+ static CONSTEXPR bool IsLdarOrStar(Bytecode bytecode) {
+ return bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar;
+ }
+
+ // Returns true if |bytecode| puts a name in the accumulator.
+ static CONSTEXPR bool PutsNameInAccumulator(Bytecode bytecode) {
+ return bytecode == Bytecode::kTypeOf;
+ }
// Returns true if the bytecode is a call or a constructor call.
- static bool IsCallOrNew(Bytecode bytecode);
+ static CONSTEXPR bool IsCallOrNew(Bytecode bytecode) {
+ return bytecode == Bytecode::kCall || bytecode == Bytecode::kTailCall ||
+ bytecode == Bytecode::kNew;
+ }
// Returns true if the bytecode is a call to the runtime.
- static bool IsCallRuntime(Bytecode bytecode);
+ static CONSTEXPR bool IsCallRuntime(Bytecode bytecode) {
+ return bytecode == Bytecode::kCallRuntime ||
+ bytecode == Bytecode::kCallRuntimeForPair ||
+ bytecode == Bytecode::kInvokeIntrinsic;
+ }
- // Returns true if the bytecode is a debug break.
- static bool IsDebugBreak(Bytecode bytecode);
+ // Returns true if the bytecode is a scaling prefix bytecode.
+ static CONSTEXPR bool IsPrefixScalingBytecode(Bytecode bytecode) {
+ return bytecode == Bytecode::kExtraWide || bytecode == Bytecode::kWide ||
+ bytecode == Bytecode::kDebugBreakExtraWide ||
+ bytecode == Bytecode::kDebugBreakWide;
+ }
- // Returns true if the bytecode is Ldar or Star.
- static bool IsLdarOrStar(Bytecode bytecode);
+ // Returns the number of values which |bytecode| returns.
+ static CONSTEXPR size_t ReturnCount(Bytecode bytecode) {
+ return bytecode == Bytecode::kReturn ? 1 : 0;
+ }
+
+ // Returns the number of operands expected by |bytecode|.
+ static int NumberOfOperands(Bytecode bytecode) {
+ DCHECK(bytecode <= Bytecode::kLast);
+ return kOperandCount[static_cast<size_t>(bytecode)];
+ }
+
+ // Returns the i-th operand of |bytecode|.
+ static OperandType GetOperandType(Bytecode bytecode, int i) {
+ DCHECK_LE(bytecode, Bytecode::kLast);
+ DCHECK_LT(i, NumberOfOperands(bytecode));
+ DCHECK_GE(i, 0);
+ return GetOperandTypes(bytecode)[i];
+ }
+
+ // Returns a pointer to an array of operand types terminated in
+ // OperandType::kNone.
+ static const OperandType* GetOperandTypes(Bytecode bytecode) {
+ DCHECK(bytecode <= Bytecode::kLast);
+ return kOperandTypes[static_cast<size_t>(bytecode)];
+ }
+
+ static bool OperandIsScalableSignedByte(Bytecode bytecode,
+ int operand_index) {
+ DCHECK(bytecode <= Bytecode::kLast);
+ return kOperandTypeInfos[static_cast<size_t>(bytecode)][operand_index] ==
+ OperandTypeInfo::kScalableSignedByte;
+ }
+
+ static bool OperandIsScalableUnsignedByte(Bytecode bytecode,
+ int operand_index) {
+ DCHECK(bytecode <= Bytecode::kLast);
+ return kOperandTypeInfos[static_cast<size_t>(bytecode)][operand_index] ==
+ OperandTypeInfo::kScalableUnsignedByte;
+ }
+
+ static bool OperandIsScalable(Bytecode bytecode, int operand_index) {
+ return OperandIsScalableSignedByte(bytecode, operand_index) ||
+ OperandIsScalableUnsignedByte(bytecode, operand_index);
+ }
// Returns true if the bytecode has wider operand forms.
static bool IsBytecodeWithScalableOperands(Bytecode bytecode);
- // Returns true if the bytecode is a scaling prefix bytecode.
- static bool IsPrefixScalingBytecode(Bytecode bytecode);
+ // Returns the size of the i-th operand of |bytecode|.
+ static OperandSize GetOperandSize(Bytecode bytecode, int i,
+ OperandScale operand_scale) {
+ CHECK_LT(i, NumberOfOperands(bytecode));
+ return GetOperandSizes(bytecode, operand_scale)[i];
+ }
+
+ // Returns the operand sizes of |bytecode| with scale |operand_scale|.
+ static const OperandSize* GetOperandSizes(Bytecode bytecode,
+ OperandScale operand_scale) {
+ DCHECK(bytecode <= Bytecode::kLast);
+ DCHECK_GE(operand_scale, OperandScale::kSingle);
+ DCHECK_LE(operand_scale, OperandScale::kLast);
+ STATIC_ASSERT(static_cast<int>(OperandScale::kQuadruple) == 4 &&
+ OperandScale::kLast == OperandScale::kQuadruple);
+ int scale_index = static_cast<int>(operand_scale) >> 1;
+ return kOperandSizes[static_cast<size_t>(bytecode)][scale_index];
+ }
- // Returns true if |bytecode| puts a name in the accumulator.
- static bool PutsNameInAccumulator(Bytecode bytecode);
+ // Returns the offset of the i-th operand of |bytecode| relative to the start
+ // of the bytecode.
+ static int GetOperandOffset(Bytecode bytecode, int i,
+ OperandScale operand_scale);
+
+ // Returns the size of the bytecode including its operands for the
+ // given |operand_scale|.
+ static int Size(Bytecode bytecode, OperandScale operand_scale) {
+ DCHECK(bytecode <= Bytecode::kLast);
+ STATIC_ASSERT(static_cast<int>(OperandScale::kQuadruple) == 4 &&
+ OperandScale::kLast == OperandScale::kQuadruple);
+ int scale_index = static_cast<int>(operand_scale) >> 1;
+ return kBytecodeSizes[static_cast<size_t>(bytecode)][scale_index];
+ }
+
+ // Returns a debug break bytecode to replace |bytecode|.
+ static Bytecode GetDebugBreak(Bytecode bytecode);
+
+ // Returns the equivalent jump bytecode without the accumulator coercion.
+ static Bytecode GetJumpWithoutToBoolean(Bytecode bytecode);
+
+ // Returns true if the bytecode is a debug break.
+ static bool IsDebugBreak(Bytecode bytecode);
// Returns true if |operand_type| is any type of register operand.
static bool IsRegisterOperandType(OperandType operand_type);
@@ -557,12 +648,30 @@ class Bytecodes final {
static bool IsStarLookahead(Bytecode bytecode, OperandScale operand_scale);
// Returns the number of registers represented by a register operand. For
- // instance, a RegPair represents two registers.
- static int GetNumberOfRegistersRepresentedBy(OperandType operand_type);
-
- // Returns true if |operand_type| is a maybe register operand
- // (kMaybeReg).
- static bool IsMaybeRegisterOperandType(OperandType operand_type);
+ // instance, a RegPair represents two registers. Should not be called for
+ // kRegList which has a variable number of registers based on the following
+ // kRegCount operand.
+ static int GetNumberOfRegistersRepresentedBy(OperandType operand_type) {
+ switch (operand_type) {
+ case OperandType::kReg:
+ case OperandType::kRegOut:
+ return 1;
+ case OperandType::kRegPair:
+ case OperandType::kRegOutPair:
+ return 2;
+ case OperandType::kRegOutTriple:
+ return 3;
+ case OperandType::kRegList:
+ UNREACHABLE();
+ return 0;
+ default:
+ return 0;
+ }
+ return 0;
+ }
+
+ // Returns the size of |operand| for |operand_scale|.
+ static OperandSize SizeOfOperand(OperandType operand, OperandScale scale);
// Returns true if |operand_type| is a runtime-id operand (kRuntimeId).
static bool IsRuntimeIdOperandType(OperandType operand_type);
@@ -576,18 +685,55 @@ class Bytecodes final {
// OperandScale values.
static bool BytecodeHasHandler(Bytecode bytecode, OperandScale operand_scale);
- // Return the operand size required to hold a signed operand.
- static OperandSize SizeForSignedOperand(int value);
-
- // Return the operand size required to hold an unsigned operand.
- static OperandSize SizeForUnsignedOperand(uint32_t value);
+ // Return the operand scale required to hold a signed operand with |value|.
+ static OperandScale ScaleForSignedOperand(int32_t value) {
+ if (value >= kMinInt8 && value <= kMaxInt8) {
+ return OperandScale::kSingle;
+ } else if (value >= kMinInt16 && value <= kMaxInt16) {
+ return OperandScale::kDouble;
+ } else {
+ return OperandScale::kQuadruple;
+ }
+ }
+
+ // Return the operand scale required to hold an unsigned operand with |value|.
+ static OperandScale ScaleForUnsignedOperand(uint32_t value) {
+ if (value <= kMaxUInt8) {
+ return OperandScale::kSingle;
+ } else if (value <= kMaxUInt16) {
+ return OperandScale::kDouble;
+ } else {
+ return OperandScale::kQuadruple;
+ }
+ }
+
+ // Return the operand size required to hold an unsigned operand with |value|.
+ static OperandSize SizeForUnsignedOperand(uint32_t value) {
+ if (value <= kMaxUInt8) {
+ return OperandSize::kByte;
+ } else if (value <= kMaxUInt16) {
+ return OperandSize::kShort;
+ } else {
+ return OperandSize::kQuad;
+ }
+ }
+
+ private:
+ static const OperandType* const kOperandTypes[];
+ static const OperandTypeInfo* const kOperandTypeInfos[];
+ static const int kOperandCount[];
+ static const int kNumberOfRegisterOperands[];
+ static const AccumulatorUse kAccumulatorUse[];
+ static const bool kIsScalable[];
+ static const int kBytecodeSizes[][3];
+ static const OperandSize* const kOperandSizes[][3];
};
+// TODO(rmcilroy): Remove once we switch to MSVC 2015 which supports constexpr.
+// See crbug.com/603131.
+#undef CONSTEXPR
+
std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode);
-std::ostream& operator<<(std::ostream& os, const AccumulatorUse& use);
-std::ostream& operator<<(std::ostream& os, const OperandScale& operand_scale);
-std::ostream& operator<<(std::ostream& os, const OperandSize& operand_size);
-std::ostream& operator<<(std::ostream& os, const OperandType& operand_type);
} // namespace interpreter
} // namespace internal
diff --git a/deps/v8/src/interpreter/constant-array-builder.cc b/deps/v8/src/interpreter/constant-array-builder.cc
index ff3823fde2..d2b7995623 100644
--- a/deps/v8/src/interpreter/constant-array-builder.cc
+++ b/deps/v8/src/interpreter/constant-array-builder.cc
@@ -4,6 +4,7 @@
#include "src/interpreter/constant-array-builder.h"
+#include <functional>
#include <set>
#include "src/isolate.h"
@@ -72,9 +73,11 @@ STATIC_CONST_MEMBER_DEFINITION const size_t
ConstantArrayBuilder::ConstantArrayBuilder(Zone* zone,
Handle<Object> the_hole_value)
- : constants_map_(zone),
+ : constants_map_(16, base::KeyEqualityMatcher<Address>(),
+ ZoneAllocationPolicy(zone)),
smi_map_(zone),
smi_pairs_(zone),
+ zone_(zone),
the_hole_value_(the_hole_value) {
idx_slice_[0] =
new (zone) ConstantArraySlice(zone, 0, k8BitCapacity, OperandSize::kByte);
@@ -153,16 +156,11 @@ Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(Isolate* isolate) {
}
size_t ConstantArrayBuilder::Insert(Handle<Object> object) {
- auto entry = constants_map_.find(object.address());
- return (entry == constants_map_.end()) ? AllocateEntry(object)
- : entry->second;
-}
-
-ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateEntry(
- Handle<Object> object) {
- index_t index = AllocateIndex(object);
- constants_map_[object.address()] = index;
- return index;
+ return constants_map_
+ .LookupOrInsert(object.address(), ObjectHash(object.address()),
+ [&]() { return AllocateIndex(object); },
+ ZoneAllocationPolicy(zone_))
+ ->value;
}
ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateIndex(
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
index 2018f25693..78d36f5044 100644
--- a/deps/v8/src/interpreter/constant-array-builder.h
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -7,7 +7,7 @@
#include "src/identity-map.h"
#include "src/interpreter/bytecodes.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -70,7 +70,6 @@ class ConstantArrayBuilder final BASE_EMBEDDED {
private:
typedef uint32_t index_t;
- index_t AllocateEntry(Handle<Object> object);
index_t AllocateIndex(Handle<Object> object);
index_t AllocateReservedEntry(Smi* value);
@@ -108,9 +107,12 @@ class ConstantArrayBuilder final BASE_EMBEDDED {
Handle<Object> the_hole_value() const { return the_hole_value_; }
ConstantArraySlice* idx_slice_[3];
- ZoneMap<Address, index_t> constants_map_;
+ base::TemplateHashMapImpl<Address, index_t, base::KeyEqualityMatcher<Address>,
+ ZoneAllocationPolicy>
+ constants_map_;
ZoneMap<Smi*, index_t> smi_map_;
ZoneVector<std::pair<Smi*, index_t>> smi_pairs_;
+ Zone* zone_;
Handle<Object> the_hole_value_;
};
diff --git a/deps/v8/src/interpreter/control-flow-builders.cc b/deps/v8/src/interpreter/control-flow-builders.cc
index 56cd481f9c..0e71b96cce 100644
--- a/deps/v8/src/interpreter/control-flow-builders.cc
+++ b/deps/v8/src/interpreter/control-flow-builders.cc
@@ -60,18 +60,14 @@ void LoopBuilder::LoopHeader(ZoneVector<BytecodeLabel>* additional_labels) {
}
}
-void LoopBuilder::JumpToHeader() {
+void LoopBuilder::JumpToHeader(int loop_depth) {
+ // Pass the proper loop nesting level to the backwards branch, to trigger
+ // on-stack replacement when armed for the given loop nesting depth.
+ int level = Min(loop_depth, AbstractCode::kMaxLoopNestingMarker - 1);
// Loop must have closed form, i.e. all loop elements are within the loop,
// the loop header precedes the body and next elements in the loop.
DCHECK(loop_header_.is_bound());
- builder()->Jump(&loop_header_);
-}
-
-void LoopBuilder::JumpToHeaderIfTrue() {
- // Loop must have closed form, i.e. all loop elements are within the loop,
- // the loop header precedes the body and next elements in the loop.
- DCHECK(loop_header_.is_bound());
- builder()->JumpIfTrue(&loop_header_);
+ builder()->JumpLoop(&loop_header_, level);
}
void LoopBuilder::EndLoop() {
diff --git a/deps/v8/src/interpreter/control-flow-builders.h b/deps/v8/src/interpreter/control-flow-builders.h
index 5cd9b5bc99..3174db5da1 100644
--- a/deps/v8/src/interpreter/control-flow-builders.h
+++ b/deps/v8/src/interpreter/control-flow-builders.h
@@ -8,7 +8,7 @@
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-label.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -86,8 +86,7 @@ class LoopBuilder final : public BreakableControlFlowBuilder {
~LoopBuilder();
void LoopHeader(ZoneVector<BytecodeLabel>* additional_labels);
- void JumpToHeader();
- void JumpToHeaderIfTrue();
+ void JumpToHeader(int loop_depth);
void BindContinueTarget();
void EndLoop();
@@ -99,9 +98,6 @@ class LoopBuilder final : public BreakableControlFlowBuilder {
void ContinueIfUndefined() { EmitJumpIfUndefined(&continue_labels_); }
void ContinueIfNull() { EmitJumpIfNull(&continue_labels_); }
- BytecodeLabels* header_labels() { return &header_labels_; }
- BytecodeLabels* continue_labels() { return &continue_labels_; }
-
private:
BytecodeLabel loop_header_;
diff --git a/deps/v8/src/interpreter/handler-table-builder.h b/deps/v8/src/interpreter/handler-table-builder.h
index 26c45f4056..25147ca26b 100644
--- a/deps/v8/src/interpreter/handler-table-builder.h
+++ b/deps/v8/src/interpreter/handler-table-builder.h
@@ -8,7 +8,7 @@
#include "src/handles.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index 227fd395ce..5767ffa8a5 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -14,7 +14,7 @@
#include "src/interpreter/interpreter.h"
#include "src/machine-type.h"
#include "src/macro-assembler.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
@@ -84,6 +84,71 @@ void InterpreterAssembler::SetContext(Node* value) {
StoreRegister(value, Register::current_context());
}
+Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) {
+ Variable cur_context(this, MachineRepresentation::kTaggedPointer);
+ cur_context.Bind(context);
+
+ Variable cur_depth(this, MachineRepresentation::kWord32);
+ cur_depth.Bind(depth);
+
+ Label context_found(this);
+
+ Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
+ Label context_search(this, 2, context_search_loop_variables);
+
+ // Fast path if the depth is 0.
+ BranchIfWord32Equal(depth, Int32Constant(0), &context_found, &context_search);
+
+ // Loop until the depth is 0.
+ Bind(&context_search);
+ {
+ cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
+ cur_context.Bind(
+ LoadContextSlot(cur_context.value(), Context::PREVIOUS_INDEX));
+
+ BranchIfWord32Equal(cur_depth.value(), Int32Constant(0), &context_found,
+ &context_search);
+ }
+
+ Bind(&context_found);
+ return cur_context.value();
+}
+
+void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context,
+ Node* depth,
+ Label* target) {
+ Variable cur_context(this, MachineRepresentation::kTaggedPointer);
+ cur_context.Bind(context);
+
+ Variable cur_depth(this, MachineRepresentation::kWord32);
+ cur_depth.Bind(depth);
+
+ Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
+ Label context_search(this, 2, context_search_loop_variables);
+
+ // Loop until the depth is 0.
+ Goto(&context_search);
+ Bind(&context_search);
+ {
+ // TODO(leszeks): We only need to do this check if the context had a sloppy
+ // eval, we could pass in a context chain bitmask to figure out which
+ // contexts actually need to be checked.
+
+ Node* extension_slot =
+ LoadContextSlot(cur_context.value(), Context::EXTENSION_INDEX);
+
+ // Jump to the target if the extension slot is not a hole.
+ GotoIf(WordNotEqual(extension_slot, TheHoleConstant()), target);
+
+ cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
+ cur_context.Bind(
+ LoadContextSlot(cur_context.value(), Context::PREVIOUS_INDEX));
+
+ GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)),
+ &context_search);
+ }
+}
+
Node* InterpreterAssembler::BytecodeOffset() {
return bytecode_offset_.value();
}
@@ -341,6 +406,14 @@ Node* InterpreterAssembler::BytecodeOperandFlag(int operand_index) {
return BytecodeUnsignedOperand(operand_index, operand_size);
}
+Node* InterpreterAssembler::BytecodeOperandUImm(int operand_index) {
+ DCHECK_EQ(OperandType::kUImm,
+ Bytecodes::GetOperandType(bytecode_, operand_index));
+ OperandSize operand_size =
+ Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+ return BytecodeUnsignedOperand(operand_index, operand_size);
+}
+
Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
DCHECK_EQ(OperandType::kImm,
Bytecodes::GetOperandType(bytecode_, operand_index));
@@ -460,6 +533,18 @@ void InterpreterAssembler::CallEpilogue() {
}
}
+Node* InterpreterAssembler::IncrementCallCount(Node* type_feedback_vector,
+ Node* slot_id) {
+ Comment("increment call count");
+ Node* call_count_slot = IntPtrAdd(slot_id, IntPtrConstant(1));
+ Node* call_count =
+ LoadFixedArrayElement(type_feedback_vector, call_count_slot);
+ Node* new_count = SmiAdd(call_count, SmiTag(Int32Constant(1)));
+ // Count is Smi, so we don't need a write barrier.
+ return StoreFixedArrayElement(type_feedback_vector, call_count_slot,
+ new_count, SKIP_WRITE_BARRIER);
+}
+
Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
Node* first_arg, Node* arg_count,
Node* slot_id,
@@ -481,15 +566,16 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
WeakCell::kValueOffset == Symbol::kHashFieldSlot);
Variable return_value(this, MachineRepresentation::kTagged);
- Label handle_monomorphic(this), extra_checks(this), end(this), call(this);
+ Label handle_monomorphic(this), extra_checks(this), end(this), call(this),
+ call_function(this), call_without_feedback(this);
// Slot id of 0 is used to indicate no typefeedback is available. Call using
// call builtin.
STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
Node* is_feedback_unavailable = Word32Equal(slot_id, Int32Constant(0));
- GotoIf(is_feedback_unavailable, &call);
+ GotoIf(is_feedback_unavailable, &call_without_feedback);
- // The checks. First, does rdi match the recorded monomorphic target?
+ // The checks. First, does function match the recorded monomorphic target?
Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id);
Node* feedback_value = LoadWeakCellValue(feedback_element);
Node* is_monomorphic = WordEqual(function, feedback_value);
@@ -503,13 +589,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
GotoIf(is_smi, &extra_checks);
// Increment the call count.
- Node* call_count_slot = IntPtrAdd(slot_id, IntPtrConstant(1));
- Node* call_count =
- LoadFixedArrayElement(type_feedback_vector, call_count_slot);
- Node* new_count = SmiAdd(call_count, SmiTag(Int32Constant(1)));
- // Count is Smi, so we don't need a write barrier.
- StoreFixedArrayElement(type_feedback_vector, call_count_slot, new_count,
- SKIP_WRITE_BARRIER);
+ IncrementCallCount(type_feedback_vector, slot_id);
// Call using call function builtin.
Callable callable = CodeFactory::InterpreterPushArgsAndCall(
@@ -523,12 +603,42 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
Bind(&extra_checks);
{
- Label check_initialized(this, Label::kDeferred), mark_megamorphic(this);
+ Label check_initialized(this, Label::kDeferred), mark_megamorphic(this),
+ check_allocation_site(this),
+ create_allocation_site(this, Label::kDeferred);
// Check if it is a megamorphic target
Node* is_megamorphic = WordEqual(
feedback_element,
HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- BranchIf(is_megamorphic, &call, &check_initialized);
+ BranchIf(is_megamorphic, &call, &check_allocation_site);
+
+ Bind(&check_allocation_site);
+ {
+ Node* is_allocation_site =
+ WordEqual(LoadMap(feedback_element),
+ LoadRoot(Heap::kAllocationSiteMapRootIndex));
+ GotoUnless(is_allocation_site, &check_initialized);
+
+ // If it is not the Array() function, mark megamorphic.
+ Node* context_slot =
+ LoadFixedArrayElement(LoadNativeContext(context),
+ Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+ Node* is_array_function = WordEqual(context_slot, function);
+ GotoUnless(is_array_function, &mark_megamorphic);
+
+ // It is a monomorphic Array function. Increment the call count.
+ IncrementCallCount(type_feedback_vector, slot_id);
+
+ // Call ArrayConstructorStub.
+ Callable callable_call =
+ CodeFactory::InterpreterPushArgsAndConstructArray(isolate());
+ Node* code_target_call = HeapConstant(callable_call.code());
+ Node* ret_value =
+ CallStub(callable_call.descriptor(), code_target_call, context,
+ arg_count, function, feedback_element, first_arg);
+ return_value.Bind(ret_value);
+ Goto(&end);
+ }
Bind(&check_initialized);
{
@@ -548,12 +658,12 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE));
GotoUnless(is_js_function, &mark_megamorphic);
- // Check that it is not the Array() function.
+ // Check if it is the Array() function.
Node* context_slot =
LoadFixedArrayElement(LoadNativeContext(context),
Int32Constant(Context::ARRAY_FUNCTION_INDEX));
Node* is_array_function = WordEqual(context_slot, function);
- GotoIf(is_array_function, &mark_megamorphic);
+ GotoIf(is_array_function, &create_allocation_site);
// Check if the function belongs to the same native context
Node* native_context = LoadNativeContext(
@@ -562,23 +672,22 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
WordEqual(native_context, LoadNativeContext(context));
GotoUnless(is_same_native_context, &mark_megamorphic);
- // Initialize it to a monomorphic target.
- Node* call_count_slot = IntPtrAdd(slot_id, IntPtrConstant(1));
- // Count is Smi, so we don't need a write barrier.
- StoreFixedArrayElement(type_feedback_vector, call_count_slot,
- SmiTag(Int32Constant(1)), SKIP_WRITE_BARRIER);
-
CreateWeakCellInFeedbackVector(type_feedback_vector, SmiTag(slot_id),
function);
// Call using call function builtin.
- Callable callable = CodeFactory::InterpreterPushArgsAndCall(
- isolate(), tail_call_mode, CallableType::kJSFunction);
- Node* code_target = HeapConstant(callable.code());
- Node* ret_value = CallStub(callable.descriptor(), code_target, context,
- arg_count, first_arg, function);
- return_value.Bind(ret_value);
- Goto(&end);
+ Goto(&call_function);
+ }
+
+ Bind(&create_allocation_site);
+ {
+ CreateAllocationSiteInFeedbackVector(type_feedback_vector,
+ SmiTag(slot_id));
+
+ // Call using CallFunction builtin. CallICs have a PREMONOMORPHIC state.
+ // They start collecting feedback only when a call is executed the second
+ // time. So, do not pass any feedback here.
+ Goto(&call_function);
}
Bind(&mark_megamorphic);
@@ -595,8 +704,37 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
}
}
+ Bind(&call_function);
+ {
+ // Increment the call count.
+ IncrementCallCount(type_feedback_vector, slot_id);
+
+ Callable callable_call = CodeFactory::InterpreterPushArgsAndCall(
+ isolate(), tail_call_mode, CallableType::kJSFunction);
+ Node* code_target_call = HeapConstant(callable_call.code());
+ Node* ret_value = CallStub(callable_call.descriptor(), code_target_call,
+ context, arg_count, first_arg, function);
+ return_value.Bind(ret_value);
+ Goto(&end);
+ }
+
Bind(&call);
{
+ // Increment the call count.
+ IncrementCallCount(type_feedback_vector, slot_id);
+
+ // Call using call builtin.
+ Callable callable_call = CodeFactory::InterpreterPushArgsAndCall(
+ isolate(), tail_call_mode, CallableType::kAny);
+ Node* code_target_call = HeapConstant(callable_call.code());
+ Node* ret_value = CallStub(callable_call.descriptor(), code_target_call,
+ context, arg_count, first_arg, function);
+ return_value.Bind(ret_value);
+ Goto(&end);
+ }
+
+ Bind(&call_without_feedback);
+ {
// Call using call builtin.
Callable callable_call = CodeFactory::InterpreterPushArgsAndCall(
isolate(), tail_call_mode, CallableType::kAny);
@@ -623,11 +761,169 @@ Node* InterpreterAssembler::CallJS(Node* function, Node* context,
Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
Node* new_target, Node* first_arg,
- Node* arg_count) {
- Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(isolate());
- Node* code_target = HeapConstant(callable.code());
- return CallStub(callable.descriptor(), code_target, context, arg_count,
- new_target, constructor, first_arg);
+ Node* arg_count, Node* slot_id,
+ Node* type_feedback_vector) {
+ Label call_construct(this), js_function(this), end(this);
+ Variable return_value(this, MachineRepresentation::kTagged);
+ Variable allocation_feedback(this, MachineRepresentation::kTagged);
+ allocation_feedback.Bind(UndefinedConstant());
+
+ // Slot id of 0 is used to indicate no type feedback is available.
+ STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
+ Node* is_feedback_unavailable = Word32Equal(slot_id, Int32Constant(0));
+ GotoIf(is_feedback_unavailable, &call_construct);
+
+ // Check that the constructor is not a smi.
+ Node* is_smi = WordIsSmi(constructor);
+ GotoIf(is_smi, &call_construct);
+
+ // Check that constructor is a JSFunction.
+ Node* instance_type = LoadInstanceType(constructor);
+ Node* is_js_function =
+ WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE));
+ BranchIf(is_js_function, &js_function, &call_construct);
+
+ Bind(&js_function);
+ {
+ // Cache the called function in a feedback vector slot. Cache states
+ // are uninitialized, monomorphic (indicated by a JSFunction), and
+ // megamorphic.
+ // TODO(mythria/v8:5210): Check if it is better to mark extra_checks as a
+ // deferred block so that call_construct_function will be scheduled.
+ Label extra_checks(this), call_construct_function(this);
+
+ Node* feedback_element =
+ LoadFixedArrayElement(type_feedback_vector, slot_id);
+ Node* feedback_value = LoadWeakCellValue(feedback_element);
+ Node* is_monomorphic = WordEqual(constructor, feedback_value);
+ BranchIf(is_monomorphic, &call_construct_function, &extra_checks);
+
+ Bind(&extra_checks);
+ {
+ Label mark_megamorphic(this), initialize(this),
+ check_allocation_site(this), check_initialized(this),
+ set_alloc_feedback_and_call(this);
+ {
+ // Check if it is a megamorphic target
+ Comment("check if megamorphic");
+ Node* is_megamorphic = WordEqual(
+ feedback_element,
+ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+ GotoIf(is_megamorphic, &call_construct_function);
+
+ Comment("check if weak cell");
+ Node* is_weak_cell = WordEqual(LoadMap(feedback_element),
+ LoadRoot(Heap::kWeakCellMapRootIndex));
+ GotoUnless(is_weak_cell, &check_allocation_site);
+ // If the weak cell is cleared, we have a new chance to become
+ // monomorphic.
+ Comment("check if weak cell is cleared");
+ Node* is_smi = WordIsSmi(feedback_value);
+ BranchIf(is_smi, &initialize, &mark_megamorphic);
+ }
+
+ Bind(&check_allocation_site);
+ {
+ Comment("check if it is an allocation site");
+ Node* is_allocation_site =
+ WordEqual(LoadObjectField(feedback_element, 0),
+ LoadRoot(Heap::kAllocationSiteMapRootIndex));
+ GotoUnless(is_allocation_site, &check_initialized);
+
+ // Make sure the function is the Array() function
+ Node* context_slot =
+ LoadFixedArrayElement(LoadNativeContext(context),
+ Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+ Node* is_array_function = WordEqual(context_slot, constructor);
+ BranchIf(is_array_function, &set_alloc_feedback_and_call,
+ &mark_megamorphic);
+ }
+
+ Bind(&set_alloc_feedback_and_call);
+ {
+ allocation_feedback.Bind(feedback_element);
+ Goto(&call_construct_function);
+ }
+
+ Bind(&check_initialized);
+ {
+ // Check if it is uninitialized.
+ Comment("check if uninitialized");
+ Node* is_uninitialized = WordEqual(
+ feedback_element, LoadRoot(Heap::kuninitialized_symbolRootIndex));
+ BranchIf(is_uninitialized, &initialize, &mark_megamorphic);
+ }
+
+ Bind(&initialize);
+ {
+ Label create_weak_cell(this), create_allocation_site(this);
+ Comment("initialize the feedback element");
+ // Check that it is the Array() function.
+ Node* context_slot =
+ LoadFixedArrayElement(LoadNativeContext(context),
+ Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+ Node* is_array_function = WordEqual(context_slot, constructor);
+ BranchIf(is_array_function, &create_allocation_site, &create_weak_cell);
+
+ Bind(&create_allocation_site);
+ {
+ Node* site = CreateAllocationSiteInFeedbackVector(
+ type_feedback_vector, SmiTag(slot_id));
+ allocation_feedback.Bind(site);
+ Goto(&call_construct_function);
+ }
+
+ Bind(&create_weak_cell);
+ {
+ CreateWeakCellInFeedbackVector(type_feedback_vector, SmiTag(slot_id),
+ constructor);
+ Goto(&call_construct_function);
+ }
+ }
+
+ Bind(&mark_megamorphic);
+ {
+ // MegamorphicSentinel is an immortal immovable object so
+ // write-barrier is not needed.
+ Comment("transition to megamorphic");
+ DCHECK(
+ Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
+ StoreFixedArrayElement(
+ type_feedback_vector, slot_id,
+ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())),
+ SKIP_WRITE_BARRIER);
+ Goto(&call_construct_function);
+ }
+ }
+
+ Bind(&call_construct_function);
+ {
+ Comment("call using callConstructFunction");
+ IncrementCallCount(type_feedback_vector, slot_id);
+ Callable callable_function = CodeFactory::InterpreterPushArgsAndConstruct(
+ isolate(), CallableType::kJSFunction);
+ return_value.Bind(CallStub(callable_function.descriptor(),
+ HeapConstant(callable_function.code()),
+ context, arg_count, new_target, constructor,
+ allocation_feedback.value(), first_arg));
+ Goto(&end);
+ }
+ }
+
+ Bind(&call_construct);
+ {
+ Comment("call using callConstruct builtin");
+ Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(
+ isolate(), CallableType::kAny);
+ Node* code_target = HeapConstant(callable.code());
+ return_value.Bind(CallStub(callable.descriptor(), code_target, context,
+ arg_count, new_target, constructor,
+ UndefinedConstant(), first_arg));
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return return_value.value();
}
Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
@@ -651,6 +947,9 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
}
void InterpreterAssembler::UpdateInterruptBudget(Node* weight) {
+ // TODO(rmcilroy): It might be worthwhile to only update the budget for
+ // backwards branches. Those are distinguishable by the {JumpLoop} bytecode.
+
Label ok(this), interrupt_check(this, Label::kDeferred), end(this);
Node* budget_offset =
IntPtrConstant(BytecodeArray::kInterruptBudgetOffset - kHeapObjectTag);
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index b3fa42fbf6..9dda20af48 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -32,6 +32,9 @@ class InterpreterAssembler : public CodeStubAssembler {
// Returns the index immediate for bytecode operand |operand_index| in the
// current bytecode.
compiler::Node* BytecodeOperandIdx(int operand_index);
+ // Returns the UImm8 immediate for bytecode operand |operand_index| in the
+ // current bytecode.
+ compiler::Node* BytecodeOperandUImm(int operand_index);
// Returns the Imm8 immediate for bytecode operand |operand_index| in the
// current bytecode.
compiler::Node* BytecodeOperandImm(int operand_index);
@@ -53,6 +56,15 @@ class InterpreterAssembler : public CodeStubAssembler {
compiler::Node* GetContext();
void SetContext(compiler::Node* value);
+ // Context at |depth| in the context chain starting at |context|.
+ compiler::Node* GetContextAtDepth(compiler::Node* context,
+ compiler::Node* depth);
+
+ // Goto the given |target| if the context chain starting at |context| has any
+ // extensions up to the given |depth|.
+ void GotoIfHasContextExtensionUpToDepth(compiler::Node* context,
+ compiler::Node* depth, Label* target);
+
// Number of registers.
compiler::Node* RegisterCount();
@@ -92,6 +104,11 @@ class InterpreterAssembler : public CodeStubAssembler {
// Load the TypeFeedbackVector for the current function.
compiler::Node* LoadTypeFeedbackVector();
+ // Increment the call count for a CALL_IC or construct call.
+ // The call count is located at feedback_vector[slot_id + 1].
+ compiler::Node* IncrementCallCount(compiler::Node* type_feedback_vector,
+ compiler::Node* slot_id);
+
// Call JSFunction or Callable |function| with |arg_count|
// arguments (not including receiver) and the first argument
// located at |first_arg|. Type feedback is collected in the
@@ -120,7 +137,9 @@ class InterpreterAssembler : public CodeStubAssembler {
compiler::Node* context,
compiler::Node* new_target,
compiler::Node* first_arg,
- compiler::Node* arg_count);
+ compiler::Node* arg_count,
+ compiler::Node* slot_id,
+ compiler::Node* type_feedback_vector);
// Call runtime function with |arg_count| arguments and the first argument
// located at |first_arg|.
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 68f0342180..410030247f 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -9,6 +9,7 @@
#include "src/ast/prettyprinter.h"
#include "src/code-factory.h"
+#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/factory.h"
#include "src/interpreter/bytecode-flags.h"
@@ -17,7 +18,7 @@
#include "src/interpreter/interpreter-assembler.h"
#include "src/interpreter/interpreter-intrinsics.h"
#include "src/log.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
@@ -150,14 +151,39 @@ int Interpreter::InterruptBudget() {
}
InterpreterCompilationJob::InterpreterCompilationJob(CompilationInfo* info)
- : CompilationJob(info, "Ignition"), generator_(info) {}
+ : CompilationJob(info->isolate(), info, "Ignition"), generator_(info) {}
InterpreterCompilationJob::Status InterpreterCompilationJob::PrepareJobImpl() {
+ if (FLAG_print_bytecode || FLAG_print_ast) {
+ OFStream os(stdout);
+ std::unique_ptr<char[]> name = info()->GetDebugName();
+ os << "[generating bytecode for function: " << info()->GetDebugName().get()
+ << "]" << std::endl
+ << std::flush;
+ }
+
+#ifdef DEBUG
+ if (info()->parse_info() && FLAG_print_ast) {
+ OFStream os(stdout);
+ os << "--- AST ---" << std::endl
+ << AstPrinter(info()->isolate()).PrintProgram(info()->literal())
+ << std::endl
+ << std::flush;
+ }
+#endif // DEBUG
+
return SUCCEEDED;
}
InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
- generator()->GenerateBytecode();
+ // TODO(5203): These timers aren't thread safe, move to using the CompilerJob
+ // timers.
+ RuntimeCallTimerScope runtimeTimer(info()->isolate(),
+ &RuntimeCallStats::CompileIgnition);
+ TimerEventScope<TimerEventCompileIgnition> timer(info()->isolate());
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileIgnition");
+
+ generator()->GenerateBytecode(stack_limit());
if (generator()->HasStackOverflow()) {
return FAILED;
@@ -182,34 +208,8 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl() {
return SUCCEEDED;
}
-bool Interpreter::MakeBytecode(CompilationInfo* info) {
- RuntimeCallTimerScope runtimeTimer(info->isolate(),
- &RuntimeCallStats::CompileIgnition);
- TimerEventScope<TimerEventCompileIgnition> timer(info->isolate());
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- info->isolate(), &tracing::TraceEventStatsTable::CompileIgnition);
-
- if (FLAG_print_bytecode || FLAG_print_ast) {
- OFStream os(stdout);
- std::unique_ptr<char[]> name = info->GetDebugName();
- os << "[generating bytecode for function: " << info->GetDebugName().get()
- << "]" << std::endl
- << std::flush;
- }
-
-#ifdef DEBUG
- if (info->parse_info() && FLAG_print_ast) {
- OFStream os(stdout);
- os << "--- AST ---" << std::endl
- << AstPrinter(info->isolate()).PrintProgram(info->literal()) << std::endl
- << std::flush;
- }
-#endif // DEBUG
-
- InterpreterCompilationJob job(info);
- if (job.PrepareJob() != CompilationJob::SUCCEEDED) return false;
- if (job.ExecuteJob() != CompilationJob::SUCCEEDED) return false;
- return job.FinalizeJob() == CompilationJob::SUCCEEDED;
+CompilationJob* Interpreter::NewCompilationJob(CompilationInfo* info) {
+ return new InterpreterCompilationJob(info);
}
bool Interpreter::IsDispatchTableInitialized() {
@@ -421,16 +421,14 @@ void Interpreter::DoMov(InterpreterAssembler* assembler) {
__ Dispatch();
}
-Node* Interpreter::BuildLoadGlobal(Callable ic,
+Node* Interpreter::BuildLoadGlobal(Callable ic, Node* context,
+ Node* feedback_slot,
InterpreterAssembler* assembler) {
typedef LoadGlobalWithVectorDescriptor Descriptor;
- // Get the global object.
- Node* context = __ GetContext();
// Load the global via the LoadGlobalIC.
Node* code_target = __ HeapConstant(ic.code());
- Node* raw_slot = __ BytecodeOperandIdx(0);
- Node* smi_slot = __ SmiTag(raw_slot);
+ Node* smi_slot = __ SmiTag(feedback_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
return __ CallStub(ic.descriptor(), code_target, context,
Arg(Descriptor::kSlot, smi_slot),
@@ -444,7 +442,11 @@ Node* Interpreter::BuildLoadGlobal(Callable ic,
void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) {
Callable ic =
CodeFactory::LoadGlobalICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF);
- Node* result = BuildLoadGlobal(ic, assembler);
+
+ Node* context = __ GetContext();
+
+ Node* raw_slot = __ BytecodeOperandIdx(0);
+ Node* result = BuildLoadGlobal(ic, context, raw_slot, assembler);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -456,7 +458,11 @@ void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) {
void Interpreter::DoLdrGlobal(InterpreterAssembler* assembler) {
Callable ic =
CodeFactory::LoadGlobalICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF);
- Node* result = BuildLoadGlobal(ic, assembler);
+
+ Node* context = __ GetContext();
+
+ Node* raw_slot = __ BytecodeOperandIdx(0);
+ Node* result = BuildLoadGlobal(ic, context, raw_slot, assembler);
Node* destination = __ BytecodeOperandReg(1);
__ StoreRegister(result, destination);
__ Dispatch();
@@ -469,7 +475,11 @@ void Interpreter::DoLdrGlobal(InterpreterAssembler* assembler) {
void Interpreter::DoLdaGlobalInsideTypeof(InterpreterAssembler* assembler) {
Callable ic =
CodeFactory::LoadGlobalICInOptimizedCode(isolate_, INSIDE_TYPEOF);
- Node* result = BuildLoadGlobal(ic, assembler);
+
+ Node* context = __ GetContext();
+
+ Node* raw_slot = __ BytecodeOperandIdx(0);
+ Node* result = BuildLoadGlobal(ic, context, raw_slot, assembler);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -520,44 +530,51 @@ compiler::Node* Interpreter::BuildLoadContextSlot(
Node* reg_index = __ BytecodeOperandReg(0);
Node* context = __ LoadRegister(reg_index);
Node* slot_index = __ BytecodeOperandIdx(1);
- return __ LoadContextSlot(context, slot_index);
+ Node* depth = __ BytecodeOperandUImm(2);
+ Node* slot_context = __ GetContextAtDepth(context, depth);
+ return __ LoadContextSlot(slot_context, slot_index);
}
-// LdaContextSlot <context> <slot_index>
+// LdaContextSlot <context> <slot_index> <depth>
//
-// Load the object in |slot_index| of |context| into the accumulator.
+// Load the object in |slot_index| of the context at |depth| in the context
+// chain starting at |context| into the accumulator.
void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) {
Node* result = BuildLoadContextSlot(assembler);
__ SetAccumulator(result);
__ Dispatch();
}
-// LdrContextSlot <context> <slot_index> <reg>
+// LdrContextSlot <context> <slot_index> <depth> <reg>
//
-// Load the object in <slot_index> of <context> into register <reg>.
+// Load the object in |slot_index| of the context at |depth| in the context
+// chain of |context| into register |reg|.
void Interpreter::DoLdrContextSlot(InterpreterAssembler* assembler) {
Node* result = BuildLoadContextSlot(assembler);
- Node* destination = __ BytecodeOperandReg(2);
+ Node* destination = __ BytecodeOperandReg(3);
__ StoreRegister(result, destination);
__ Dispatch();
}
-// StaContextSlot <context> <slot_index>
+// StaContextSlot <context> <slot_index> <depth>
//
-// Stores the object in the accumulator into |slot_index| of |context|.
+// Stores the object in the accumulator into |slot_index| of the context at
+// |depth| in the context chain starting at |context|.
void Interpreter::DoStaContextSlot(InterpreterAssembler* assembler) {
Node* value = __ GetAccumulator();
Node* reg_index = __ BytecodeOperandReg(0);
Node* context = __ LoadRegister(reg_index);
Node* slot_index = __ BytecodeOperandIdx(1);
- __ StoreContextSlot(context, slot_index, value);
+ Node* depth = __ BytecodeOperandUImm(2);
+ Node* slot_context = __ GetContextAtDepth(context, depth);
+ __ StoreContextSlot(slot_context, slot_index, value);
__ Dispatch();
}
void Interpreter::DoLdaLookupSlot(Runtime::FunctionId function_id,
InterpreterAssembler* assembler) {
- Node* index = __ BytecodeOperandIdx(0);
- Node* name = __ LoadConstantPoolEntry(index);
+ Node* name_index = __ BytecodeOperandIdx(0);
+ Node* name = __ LoadConstantPoolEntry(name_index);
Node* context = __ GetContext();
Node* result = __ CallRuntime(function_id, context, name);
__ SetAccumulator(result);
@@ -580,6 +597,103 @@ void Interpreter::DoLdaLookupSlotInsideTypeof(InterpreterAssembler* assembler) {
DoLdaLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
}
+void Interpreter::DoLdaLookupContextSlot(Runtime::FunctionId function_id,
+ InterpreterAssembler* assembler) {
+ Node* context = __ GetContext();
+ Node* name_index = __ BytecodeOperandIdx(0);
+ Node* slot_index = __ BytecodeOperandIdx(1);
+ Node* depth = __ BytecodeOperandUImm(2);
+
+ Label slowpath(assembler, Label::kDeferred);
+
+ // Check for context extensions to allow the fast path.
+ __ GotoIfHasContextExtensionUpToDepth(context, depth, &slowpath);
+
+ // Fast path does a normal load context.
+ {
+ Node* slot_context = __ GetContextAtDepth(context, depth);
+ Node* result = __ LoadContextSlot(slot_context, slot_index);
+ __ SetAccumulator(result);
+ __ Dispatch();
+ }
+
+ // Slow path when we have to call out to the runtime.
+ __ Bind(&slowpath);
+ {
+ Node* name = __ LoadConstantPoolEntry(name_index);
+ Node* result = __ CallRuntime(function_id, context, name);
+ __ SetAccumulator(result);
+ __ Dispatch();
+ }
+}
+
+// LdaLookupSlot <name_index>
+//
+// Lookup the object with the name in constant pool entry |name_index|
+// dynamically.
+void Interpreter::DoLdaLookupContextSlot(InterpreterAssembler* assembler) {
+ DoLdaLookupContextSlot(Runtime::kLoadLookupSlot, assembler);
+}
+
+// LdaLookupSlotInsideTypeof <name_index>
+//
+// Lookup the object with the name in constant pool entry |name_index|
+// dynamically without causing a NoReferenceError.
+void Interpreter::DoLdaLookupContextSlotInsideTypeof(
+ InterpreterAssembler* assembler) {
+ DoLdaLookupContextSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
+}
+
+void Interpreter::DoLdaLookupGlobalSlot(Runtime::FunctionId function_id,
+ InterpreterAssembler* assembler) {
+ Node* context = __ GetContext();
+ Node* name_index = __ BytecodeOperandIdx(0);
+ Node* feedback_slot = __ BytecodeOperandIdx(1);
+ Node* depth = __ BytecodeOperandUImm(2);
+
+ Label slowpath(assembler, Label::kDeferred);
+
+ // Check for context extensions to allow the fast path
+ __ GotoIfHasContextExtensionUpToDepth(context, depth, &slowpath);
+
+ // Fast path does a normal load global
+ {
+ Callable ic = CodeFactory::LoadGlobalICInOptimizedCode(
+ isolate_, function_id == Runtime::kLoadLookupSlotInsideTypeof
+ ? INSIDE_TYPEOF
+ : NOT_INSIDE_TYPEOF);
+ Node* result = BuildLoadGlobal(ic, context, feedback_slot, assembler);
+ __ SetAccumulator(result);
+ __ Dispatch();
+ }
+
+ // Slow path when we have to call out to the runtime
+ __ Bind(&slowpath);
+ {
+ Node* name = __ LoadConstantPoolEntry(name_index);
+ Node* result = __ CallRuntime(function_id, context, name);
+ __ SetAccumulator(result);
+ __ Dispatch();
+ }
+}
+
+// LdaLookupGlobalSlot <name_index> <feedback_slot> <depth>
+//
+// Lookup the object with the name in constant pool entry |name_index|
+// dynamically.
+void Interpreter::DoLdaLookupGlobalSlot(InterpreterAssembler* assembler) {
+ DoLdaLookupGlobalSlot(Runtime::kLoadLookupSlot, assembler);
+}
+
+// LdaLookupGlobalSlotInsideTypeof <name_index> <feedback_slot> <depth>
+//
+// Lookup the object with the name in constant pool entry |name_index|
+// dynamically without causing a NoReferenceError.
+void Interpreter::DoLdaLookupGlobalSlotInsideTypeof(
+ InterpreterAssembler* assembler) {
+ DoLdaLookupGlobalSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
+}
+
void Interpreter::DoStaLookupSlot(LanguageMode language_mode,
InterpreterAssembler* assembler) {
Node* value = __ GetAccumulator();
@@ -816,6 +930,80 @@ void Interpreter::DoBinaryOpWithFeedback(InterpreterAssembler* assembler) {
__ Dispatch();
}
+template <class Generator>
+void Interpreter::DoCompareOpWithFeedback(InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* lhs = __ LoadRegister(reg_index);
+ Node* rhs = __ GetAccumulator();
+ Node* context = __ GetContext();
+ Node* slot_index = __ BytecodeOperandIdx(1);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+
+ // TODO(interpreter): the only reason this check is here is because we
+ // sometimes emit comparisons that shouldn't collect feedback (e.g.
+ // try-finally blocks and generators), and we could get rid of this by
+ // introducing Smi equality tests.
+ Label skip_feedback_update(assembler);
+ __ GotoIf(__ WordEqual(slot_index, __ IntPtrConstant(0)),
+ &skip_feedback_update);
+
+ Variable var_type_feedback(assembler, MachineRepresentation::kWord32);
+ Label lhs_is_smi(assembler), lhs_is_not_smi(assembler),
+ gather_rhs_type(assembler), do_compare(assembler);
+ __ Branch(__ WordIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
+
+ __ Bind(&lhs_is_smi);
+ var_type_feedback.Bind(
+ __ Int32Constant(CompareOperationFeedback::kSignedSmall));
+ __ Goto(&gather_rhs_type);
+
+ __ Bind(&lhs_is_not_smi);
+ {
+ Label lhs_is_number(assembler), lhs_is_not_number(assembler);
+ Node* lhs_map = __ LoadMap(lhs);
+ __ Branch(__ WordEqual(lhs_map, __ HeapNumberMapConstant()), &lhs_is_number,
+ &lhs_is_not_number);
+
+ __ Bind(&lhs_is_number);
+ var_type_feedback.Bind(__ Int32Constant(CompareOperationFeedback::kNumber));
+ __ Goto(&gather_rhs_type);
+
+ __ Bind(&lhs_is_not_number);
+ var_type_feedback.Bind(__ Int32Constant(CompareOperationFeedback::kAny));
+ __ Goto(&do_compare);
+ }
+
+ __ Bind(&gather_rhs_type);
+ {
+ Label rhs_is_smi(assembler);
+ __ GotoIf(__ WordIsSmi(rhs), &rhs_is_smi);
+
+ Node* rhs_map = __ LoadMap(rhs);
+ Node* rhs_type =
+ __ Select(__ WordEqual(rhs_map, __ HeapNumberMapConstant()),
+ __ Int32Constant(CompareOperationFeedback::kNumber),
+ __ Int32Constant(CompareOperationFeedback::kAny));
+ var_type_feedback.Bind(__ Word32Or(var_type_feedback.value(), rhs_type));
+ __ Goto(&do_compare);
+
+ __ Bind(&rhs_is_smi);
+ var_type_feedback.Bind(
+ __ Word32Or(var_type_feedback.value(),
+ __ Int32Constant(CompareOperationFeedback::kSignedSmall)));
+ __ Goto(&do_compare);
+ }
+
+ __ Bind(&do_compare);
+ __ UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+ slot_index);
+ __ Goto(&skip_feedback_update);
+
+ __ Bind(&skip_feedback_update);
+ Node* result = Generator::Generate(assembler, lhs, rhs, context);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
// Add <src>
//
// Add register <src> to accumulator.
@@ -1227,25 +1415,29 @@ void Interpreter::DoUnaryOpWithFeedback(InterpreterAssembler* assembler) {
// ToName
//
-// Cast the object referenced by the accumulator to a name.
+// Convert the object referenced by the accumulator to a name.
void Interpreter::DoToName(InterpreterAssembler* assembler) {
- Node* result = BuildUnaryOp(CodeFactory::ToName(isolate_), assembler);
+ Node* object = __ GetAccumulator();
+ Node* context = __ GetContext();
+ Node* result = __ ToName(context, object);
__ StoreRegister(result, __ BytecodeOperandReg(0));
__ Dispatch();
}
// ToNumber
//
-// Cast the object referenced by the accumulator to a number.
+// Convert the object referenced by the accumulator to a number.
void Interpreter::DoToNumber(InterpreterAssembler* assembler) {
- Node* result = BuildUnaryOp(CodeFactory::ToNumber(isolate_), assembler);
+ Node* object = __ GetAccumulator();
+ Node* context = __ GetContext();
+ Node* result = __ ToNumber(context, object);
__ StoreRegister(result, __ BytecodeOperandReg(0));
__ Dispatch();
}
// ToObject
//
-// Cast the object referenced by the accumulator to a JSObject.
+// Convert the object referenced by the accumulator to a JSReceiver.
void Interpreter::DoToObject(InterpreterAssembler* assembler) {
Node* result = BuildUnaryOp(CodeFactory::ToObject(isolate_), assembler);
__ StoreRegister(result, __ BytecodeOperandReg(0));
@@ -1395,7 +1587,12 @@ void Interpreter::DoTailCall(InterpreterAssembler* assembler) {
DoJSCall(assembler, TailCallMode::kAllow);
}
-void Interpreter::DoCallRuntimeCommon(InterpreterAssembler* assembler) {
+// CallRuntime <function_id> <first_arg> <arg_count>
+//
+// Call the runtime function |function_id| with the first argument in
+// register |first_arg| and |arg_count| arguments in subsequent
+// registers.
+void Interpreter::DoCallRuntime(InterpreterAssembler* assembler) {
Node* function_id = __ BytecodeOperandRuntimeId(0);
Node* first_arg_reg = __ BytecodeOperandReg(1);
Node* first_arg = __ RegisterLocation(first_arg_reg);
@@ -1406,15 +1603,6 @@ void Interpreter::DoCallRuntimeCommon(InterpreterAssembler* assembler) {
__ Dispatch();
}
-// CallRuntime <function_id> <first_arg> <arg_count>
-//
-// Call the runtime function |function_id| with the first argument in
-// register |first_arg| and |arg_count| arguments in subsequent
-// registers.
-void Interpreter::DoCallRuntime(InterpreterAssembler* assembler) {
- DoCallRuntimeCommon(assembler);
-}
-
// InvokeIntrinsic <function_id> <first_arg> <arg_count>
//
// Implements the semantic equivalent of calling the runtime function
@@ -1432,7 +1620,13 @@ void Interpreter::DoInvokeIntrinsic(InterpreterAssembler* assembler) {
__ Dispatch();
}
-void Interpreter::DoCallRuntimeForPairCommon(InterpreterAssembler* assembler) {
+// CallRuntimeForPair <function_id> <first_arg> <arg_count> <first_return>
+//
+// Call the runtime function |function_id| which returns a pair, with the
+// first argument in register |first_arg| and |arg_count| arguments in
+// subsequent registers. Returns the result in <first_return> and
+// <first_return + 1>
+void Interpreter::DoCallRuntimeForPair(InterpreterAssembler* assembler) {
// Call the runtime function.
Node* function_id = __ BytecodeOperandRuntimeId(0);
Node* first_arg_reg = __ BytecodeOperandReg(1);
@@ -1452,17 +1646,11 @@ void Interpreter::DoCallRuntimeForPairCommon(InterpreterAssembler* assembler) {
__ Dispatch();
}
-// CallRuntimeForPair <function_id> <first_arg> <arg_count> <first_return>
+// CallJSRuntime <context_index> <receiver> <arg_count>
//
-// Call the runtime function |function_id| which returns a pair, with the
-// first argument in register |first_arg| and |arg_count| arguments in
-// subsequent registers. Returns the result in <first_return> and
-// <first_return + 1>
-void Interpreter::DoCallRuntimeForPair(InterpreterAssembler* assembler) {
- DoCallRuntimeForPairCommon(assembler);
-}
-
-void Interpreter::DoCallJSRuntimeCommon(InterpreterAssembler* assembler) {
+// Call the JS runtime function that has the |context_index| with the receiver
+// in register |receiver| and |arg_count| arguments in subsequent registers.
+void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) {
Node* context_index = __ BytecodeOperandIdx(0);
Node* receiver_reg = __ BytecodeOperandReg(1);
Node* first_arg = __ RegisterLocation(receiver_reg);
@@ -1483,15 +1671,13 @@ void Interpreter::DoCallJSRuntimeCommon(InterpreterAssembler* assembler) {
__ Dispatch();
}
-// CallJSRuntime <context_index> <receiver> <arg_count>
+// New <constructor> <first_arg> <arg_count>
//
-// Call the JS runtime function that has the |context_index| with the receiver
-// in register |receiver| and |arg_count| arguments in subsequent registers.
-void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) {
- DoCallJSRuntimeCommon(assembler);
-}
-
-void Interpreter::DoCallConstruct(InterpreterAssembler* assembler) {
+// Call operator new with |constructor| and the first argument in
+// register |first_arg| and |arg_count| arguments in subsequent
+// registers. The new.target is in the accumulator.
+//
+void Interpreter::DoNew(InterpreterAssembler* assembler) {
Callable ic = CodeFactory::InterpreterPushArgsAndConstruct(isolate_);
Node* new_target = __ GetAccumulator();
Node* constructor_reg = __ BytecodeOperandReg(0);
@@ -1499,56 +1685,48 @@ void Interpreter::DoCallConstruct(InterpreterAssembler* assembler) {
Node* first_arg_reg = __ BytecodeOperandReg(1);
Node* first_arg = __ RegisterLocation(first_arg_reg);
Node* args_count = __ BytecodeOperandCount(2);
+ Node* slot_id = __ BytecodeOperandIdx(3);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* context = __ GetContext();
- Node* result =
- __ CallConstruct(constructor, context, new_target, first_arg, args_count);
+ Node* result = __ CallConstruct(constructor, context, new_target, first_arg,
+ args_count, slot_id, type_feedback_vector);
__ SetAccumulator(result);
__ Dispatch();
}
-// New <constructor> <first_arg> <arg_count>
-//
-// Call operator new with |constructor| and the first argument in
-// register |first_arg| and |arg_count| arguments in subsequent
-// registers. The new.target is in the accumulator.
-//
-void Interpreter::DoNew(InterpreterAssembler* assembler) {
- DoCallConstruct(assembler);
-}
-
// TestEqual <src>
//
// Test if the value in the <src> register equals the accumulator.
void Interpreter::DoTestEqual(InterpreterAssembler* assembler) {
- DoBinaryOp<EqualStub>(assembler);
+ DoCompareOpWithFeedback<EqualStub>(assembler);
}
// TestNotEqual <src>
//
// Test if the value in the <src> register is not equal to the accumulator.
void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) {
- DoBinaryOp<NotEqualStub>(assembler);
+ DoCompareOpWithFeedback<NotEqualStub>(assembler);
}
// TestEqualStrict <src>
//
// Test if the value in the <src> register is strictly equal to the accumulator.
void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) {
- DoBinaryOp<StrictEqualStub>(assembler);
+ DoCompareOpWithFeedback<StrictEqualStub>(assembler);
}
// TestLessThan <src>
//
// Test if the value in the <src> register is less than the accumulator.
void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) {
- DoBinaryOp<LessThanStub>(assembler);
+ DoCompareOpWithFeedback<LessThanStub>(assembler);
}
// TestGreaterThan <src>
//
// Test if the value in the <src> register is greater than the accumulator.
void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) {
- DoBinaryOp<GreaterThanStub>(assembler);
+ DoCompareOpWithFeedback<GreaterThanStub>(assembler);
}
// TestLessThanOrEqual <src>
@@ -1556,7 +1734,7 @@ void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) {
// Test if the value in the <src> register is less than or equal to the
// accumulator.
void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) {
- DoBinaryOp<LessThanOrEqualStub>(assembler);
+ DoCompareOpWithFeedback<LessThanOrEqualStub>(assembler);
}
// TestGreaterThanOrEqual <src>
@@ -1564,7 +1742,7 @@ void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) {
// Test if the value in the <src> register is greater than or equal to the
// accumulator.
void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) {
- DoBinaryOp<GreaterThanOrEqualStub>(assembler);
+ DoCompareOpWithFeedback<GreaterThanOrEqualStub>(assembler);
}
// TestIn <src>
@@ -1783,6 +1961,35 @@ void Interpreter::DoJumpIfNotHoleConstant(InterpreterAssembler* assembler) {
__ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
}
+// JumpLoop <imm> <loop_depth>
+//
+// Jump by number of bytes represented by the immediate operand |imm|. Also
+// performs a loop nesting check and potentially triggers OSR in case the
+// current OSR level matches (or exceeds) the specified |loop_depth|.
+void Interpreter::DoJumpLoop(InterpreterAssembler* assembler) {
+ Node* relative_jump = __ BytecodeOperandImm(0);
+ Node* loop_depth = __ BytecodeOperandImm(1);
+ Node* osr_level = __ LoadOSRNestingLevel();
+
+ // Check if OSR points at the given {loop_depth} are armed by comparing it to
+ // the current {osr_level} loaded from the header of the BytecodeArray.
+ Label ok(assembler), osr_armed(assembler, Label::kDeferred);
+ Node* condition = __ Int32GreaterThanOrEqual(loop_depth, osr_level);
+ __ Branch(condition, &ok, &osr_armed);
+
+ __ Bind(&ok);
+ __ Jump(relative_jump);
+
+ __ Bind(&osr_armed);
+ {
+ Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate_);
+ Node* target = __ HeapConstant(callable.code());
+ Node* context = __ GetContext();
+ __ CallStub(callable.descriptor(), target, context);
+ __ Jump(relative_jump);
+ }
+}
+
// CreateRegExpLiteral <pattern_idx> <literal_idx> <flags>
//
// Creates a regular expression literal for literal index <literal_idx> with
@@ -1804,21 +2011,47 @@ void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) {
// CreateArrayLiteral <element_idx> <literal_idx> <flags>
//
-// Creates an array literal for literal index <literal_idx> with flags <flags>
-// and constant elements in <element_idx>.
+// Creates an array literal for literal index <literal_idx> with
+// CreateArrayLiteral flags <flags> and constant elements in <element_idx>.
void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
- Node* index = __ BytecodeOperandIdx(0);
- Node* constant_elements = __ LoadConstantPoolEntry(index);
Node* literal_index_raw = __ BytecodeOperandIdx(1);
Node* literal_index = __ SmiTag(literal_index_raw);
- Node* flags_raw = __ BytecodeOperandFlag(2);
- Node* flags = __ SmiTag(flags_raw);
Node* closure = __ LoadRegister(Register::function_closure());
Node* context = __ GetContext();
- Node* result = __ CallRuntime(Runtime::kCreateArrayLiteral, context, closure,
- literal_index, constant_elements, flags);
- __ SetAccumulator(result);
- __ Dispatch();
+ Node* bytecode_flags = __ BytecodeOperandFlag(2);
+
+ Label fast_shallow_clone(assembler),
+ call_runtime(assembler, Label::kDeferred);
+ Node* use_fast_shallow_clone = __ Word32And(
+ bytecode_flags,
+ __ Int32Constant(CreateArrayLiteralFlags::FastShallowCloneBit::kMask));
+ __ BranchIf(use_fast_shallow_clone, &fast_shallow_clone, &call_runtime);
+
+ __ Bind(&fast_shallow_clone);
+ {
+ DCHECK(FLAG_allocation_site_pretenuring);
+ Node* result = FastCloneShallowArrayStub::Generate(
+ assembler, closure, literal_index, context, &call_runtime,
+ TRACK_ALLOCATION_SITE);
+ __ SetAccumulator(result);
+ __ Dispatch();
+ }
+
+ __ Bind(&call_runtime);
+ {
+ STATIC_ASSERT(CreateArrayLiteralFlags::FlagsBits::kShift == 0);
+ Node* flags_raw = __ Word32And(
+ bytecode_flags,
+ __ Int32Constant(CreateArrayLiteralFlags::FlagsBits::kMask));
+ Node* flags = __ SmiTag(flags_raw);
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* constant_elements = __ LoadConstantPoolEntry(index);
+ Node* result =
+ __ CallRuntime(Runtime::kCreateArrayLiteral, context, closure,
+ literal_index, constant_elements, flags);
+ __ SetAccumulator(result);
+ __ Dispatch();
+ }
}
// CreateObjectLiteral <element_idx> <literal_idx> <flags>
@@ -1915,19 +2148,22 @@ void Interpreter::DoCreateBlockContext(InterpreterAssembler* assembler) {
__ Dispatch();
}
-// CreateCatchContext <exception> <index>
+// CreateCatchContext <exception> <name_idx> <scope_info_idx>
//
// Creates a new context for a catch block with the |exception| in a register,
-// the variable name at |index| and the closure in the accumulator.
+// the variable name at |name_idx|, the ScopeInfo at |scope_info_idx|, and the
+// closure in the accumulator.
void Interpreter::DoCreateCatchContext(InterpreterAssembler* assembler) {
Node* exception_reg = __ BytecodeOperandReg(0);
Node* exception = __ LoadRegister(exception_reg);
- Node* index = __ BytecodeOperandIdx(1);
- Node* name = __ LoadConstantPoolEntry(index);
+ Node* name_idx = __ BytecodeOperandIdx(1);
+ Node* name = __ LoadConstantPoolEntry(name_idx);
+ Node* scope_info_idx = __ BytecodeOperandIdx(2);
+ Node* scope_info = __ LoadConstantPoolEntry(scope_info_idx);
Node* closure = __ GetAccumulator();
Node* context = __ GetContext();
__ SetAccumulator(__ CallRuntime(Runtime::kPushCatchContext, context, name,
- exception, closure));
+ exception, scope_info, closure));
__ Dispatch();
}
@@ -1936,24 +2172,27 @@ void Interpreter::DoCreateCatchContext(InterpreterAssembler* assembler) {
// Creates a new context with number of |slots| for the function closure.
void Interpreter::DoCreateFunctionContext(InterpreterAssembler* assembler) {
Node* closure = __ LoadRegister(Register::function_closure());
- Node* slots = __ BytecodeOperandIdx(0);
+ Node* slots = __ BytecodeOperandUImm(0);
Node* context = __ GetContext();
__ SetAccumulator(
FastNewFunctionContextStub::Generate(assembler, closure, slots, context));
__ Dispatch();
}
-// CreateWithContext <register>
+// CreateWithContext <register> <scope_info_idx>
//
-// Creates a new context for a with-statement with the object in |register| and
-// the closure in the accumulator.
+// Creates a new context with the ScopeInfo at |scope_info_idx| for a
+// with-statement with the object in |register| and the closure in the
+// accumulator.
void Interpreter::DoCreateWithContext(InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(reg_index);
+ Node* scope_info_idx = __ BytecodeOperandIdx(1);
+ Node* scope_info = __ LoadConstantPoolEntry(scope_info_idx);
Node* closure = __ GetAccumulator();
Node* context = __ GetContext();
- __ SetAccumulator(
- __ CallRuntime(Runtime::kPushWithContext, context, object, closure));
+ __ SetAccumulator(__ CallRuntime(Runtime::kPushWithContext, context, object,
+ scope_info, closure));
__ Dispatch();
}
@@ -2047,32 +2286,6 @@ void Interpreter::DoStackCheck(InterpreterAssembler* assembler) {
}
}
-// OsrPoll <loop_depth>
-//
-// Performs a loop nesting check and potentially triggers OSR.
-void Interpreter::DoOsrPoll(InterpreterAssembler* assembler) {
- Node* loop_depth = __ BytecodeOperandImm(0);
- Node* osr_level = __ LoadOSRNestingLevel();
-
- // Check if OSR points at the given {loop_depth} are armed by comparing it to
- // the current {osr_level} loaded from the header of the BytecodeArray.
- Label ok(assembler), osr_armed(assembler, Label::kDeferred);
- Node* condition = __ Int32GreaterThanOrEqual(loop_depth, osr_level);
- __ Branch(condition, &ok, &osr_armed);
-
- __ Bind(&ok);
- __ Dispatch();
-
- __ Bind(&osr_armed);
- {
- Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate_);
- Node* target = __ HeapConstant(callable.code());
- Node* context = __ GetContext();
- __ CallStub(callable.descriptor(), target, context);
- __ Dispatch();
- }
-}
-
// Throw
//
// Throws the exception in the accumulator.
@@ -2158,9 +2371,8 @@ void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) {
if (FLAG_debug_code) {
Label already_receiver(assembler), abort(assembler);
Node* instance_type = __ LoadInstanceType(receiver);
- Node* first_receiver_type = __ Int32Constant(FIRST_JS_RECEIVER_TYPE);
- __ BranchIfInt32GreaterThanOrEqual(instance_type, first_receiver_type,
- &already_receiver, &abort);
+ __ Branch(__ IsJSReceiverInstanceType(instance_type), &already_receiver,
+ &abort);
__ Bind(&abort);
{
__ Abort(kExpectedJSReceiver);
@@ -2260,10 +2472,10 @@ void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
}
}
-// ForInDone <index> <cache_length>
+// ForInContinue <index> <cache_length>
//
-// Returns true if the end of the enumerable properties has been reached.
-void Interpreter::DoForInDone(InterpreterAssembler* assembler) {
+// Returns false if the end of the enumerable properties has been reached.
+void Interpreter::DoForInContinue(InterpreterAssembler* assembler) {
Node* index_reg = __ BytecodeOperandReg(0);
Node* index = __ LoadRegister(index_reg);
Node* cache_length_reg = __ BytecodeOperandReg(1);
@@ -2274,12 +2486,12 @@ void Interpreter::DoForInDone(InterpreterAssembler* assembler) {
__ BranchIfWordEqual(index, cache_length, &if_true, &if_false);
__ Bind(&if_true);
{
- __ SetAccumulator(__ BooleanConstant(true));
+ __ SetAccumulator(__ BooleanConstant(false));
__ Goto(&end);
}
__ Bind(&if_false);
{
- __ SetAccumulator(__ BooleanConstant(false));
+ __ SetAccumulator(__ BooleanConstant(true));
__ Goto(&end);
}
__ Bind(&end);
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index bbd0102999..b646bf8313 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -22,6 +22,7 @@ namespace internal {
class Isolate;
class Callable;
class CompilationInfo;
+class CompilationJob;
namespace compiler {
class Node;
@@ -42,8 +43,8 @@ class Interpreter {
// Returns the interrupt budget which should be used for the profiler counter.
static int InterruptBudget();
- // Generate bytecode for |info|.
- static bool MakeBytecode(CompilationInfo* info);
+ // Creates a compilation job which will generate bytecode for |info|.
+ static CompilationJob* NewCompilationJob(CompilationInfo* info);
// Return bytecode handler for |bytecode|.
Code* GetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale);
@@ -55,7 +56,7 @@ class Interpreter {
void TraceCodegen(Handle<Code> code);
const char* LookupNameOfBytecodeHandler(Code* code);
- Local<v8::Object> GetDispatchCountersObject();
+ V8_EXPORT_PRIVATE Local<v8::Object> GetDispatchCountersObject();
Address dispatch_table_address() {
return reinterpret_cast<Address>(&dispatch_table_[0]);
@@ -83,6 +84,11 @@ class Interpreter {
template <class Generator>
void DoBinaryOpWithFeedback(InterpreterAssembler* assembler);
+ // Generates code to perform the comparison via |Generator| while gathering
+ // type feedback.
+ template <class Generator>
+ void DoCompareOpWithFeedback(InterpreterAssembler* assembler);
+
// Generates code to perform the bitwise binary operation corresponding to
// |bitwise_op| while gathering type feedback.
void DoBitwiseBinaryOp(Token::Value bitwise_op,
@@ -118,18 +124,6 @@ class Interpreter {
// Generates code to perform a JS call that collects type feedback.
void DoJSCall(InterpreterAssembler* assembler, TailCallMode tail_call_mode);
- // Generates code to perform a runtime call.
- void DoCallRuntimeCommon(InterpreterAssembler* assembler);
-
- // Generates code to perform a runtime call returning a pair.
- void DoCallRuntimeForPairCommon(InterpreterAssembler* assembler);
-
- // Generates code to perform a JS runtime call.
- void DoCallJSRuntimeCommon(InterpreterAssembler* assembler);
-
- // Generates code to perform a constructor call.
- void DoCallConstruct(InterpreterAssembler* assembler);
-
// Generates code to perform delete via function_id.
void DoDelete(Runtime::FunctionId function_id,
InterpreterAssembler* assembler);
@@ -138,18 +132,28 @@ class Interpreter {
void DoLdaLookupSlot(Runtime::FunctionId function_id,
InterpreterAssembler* assembler);
- // Generates code to perform a lookup slot store depending on |language_mode|.
+ // Generates code to perform a lookup slot load via |function_id| that can
+ // fast path to a context slot load.
+ void DoLdaLookupContextSlot(Runtime::FunctionId function_id,
+ InterpreterAssembler* assembler);
+
+ // Generates code to perform a lookup slot load via |function_id| that can
+ // fast path to a global load.
+ void DoLdaLookupGlobalSlot(Runtime::FunctionId function_id,
+ InterpreterAssembler* assembler);
+
+ // Generates code to perform a lookup slot store depending on
+ // |language_mode|.
void DoStaLookupSlot(LanguageMode language_mode,
InterpreterAssembler* assembler);
- // Generates a node with the undefined constant.
- compiler::Node* BuildLoadUndefined(InterpreterAssembler* assembler);
-
// Generates code to load a context slot.
compiler::Node* BuildLoadContextSlot(InterpreterAssembler* assembler);
// Generates code to load a global.
- compiler::Node* BuildLoadGlobal(Callable ic, InterpreterAssembler* assembler);
+ compiler::Node* BuildLoadGlobal(Callable ic, compiler::Node* context,
+ compiler::Node* feedback_slot,
+ InterpreterAssembler* assembler);
// Generates code to load a named property.
compiler::Node* BuildLoadNamedProperty(Callable ic,
diff --git a/deps/v8/src/interpreter/mkpeephole.cc b/deps/v8/src/interpreter/mkpeephole.cc
index 8e9d5fea47..270fe83ef9 100644
--- a/deps/v8/src/interpreter/mkpeephole.cc
+++ b/deps/v8/src/interpreter/mkpeephole.cc
@@ -146,6 +146,9 @@ PeepholeActionAndData PeepholeActionTableWriter::LookupActionAndData(
Bytecode::kIllegal};
}
+ // TODO(rmcilroy): Add elide for consecutive mov to and from the same
+ // register.
+
// Remove ToBoolean coercion from conditional jumps where possible.
if (Bytecodes::WritesBooleanToAccumulator(last)) {
if (Bytecodes::IsJumpIfToBoolean(current)) {
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index 5c71d9188e..34c98bba64 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -76,6 +76,11 @@ bool Isolate::is_catchable_by_javascript(Object* exception) {
return exception != heap()->termination_exception();
}
+bool Isolate::is_catchable_by_wasm(Object* exception) {
+ return is_catchable_by_javascript(exception) &&
+ (exception->IsNumber() || exception->IsSmi());
+}
+
void Isolate::FireBeforeCallEnteredCallback() {
for (int i = 0; i < before_call_entered_callbacks_.length(); i++) {
before_call_entered_callbacks_.at(i)(reinterpret_cast<v8::Isolate*>(this));
@@ -100,20 +105,6 @@ Isolate::ExceptionScope::~ExceptionScope() {
isolate_->set_pending_exception(*pending_exception_);
}
-SaveContext::SaveContext(Isolate* isolate)
- : isolate_(isolate), prev_(isolate->save_context()) {
- if (isolate->context() != NULL) {
- context_ = Handle<Context>(isolate->context());
- }
- isolate->set_save_context(this);
- c_entry_fp_ = isolate->c_entry_fp(isolate->thread_local_top());
-}
-
-SaveContext::~SaveContext() {
- isolate_->set_context(context_.is_null() ? NULL : *context_);
- isolate_->set_save_context(prev_);
-}
-
#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
Handle<type> Isolate::name() { \
return Handle<type>(raw_native_context()->name(), this); \
@@ -147,6 +138,11 @@ bool Isolate::IsHasInstanceLookupChainIntact() {
return has_instance_cell->value() == Smi::FromInt(kArrayProtectorValid);
}
+bool Isolate::IsStringLengthOverflowIntact() {
+ PropertyCell* has_instance_cell = heap()->string_length_protector();
+ return has_instance_cell->value() == Smi::FromInt(kArrayProtectorValid);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index e14db60385..63c927b04c 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -10,7 +10,6 @@
#include <sstream>
#include "src/ast/context-slot-cache.h"
-#include "src/base/accounting-allocator.h"
#include "src/base/hashmap.h"
#include "src/base/platform/platform.h"
#include "src/base/sys-info.h"
@@ -28,6 +27,7 @@
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
#include "src/ic/stub-cache.h"
+#include "src/interface-descriptors.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/libsampler/sampler.h"
@@ -43,6 +43,7 @@
#include "src/version.h"
#include "src/vm-state-inl.h"
#include "src/wasm/wasm-module.h"
+#include "src/zone/accounting-allocator.h"
namespace v8 {
namespace internal {
@@ -315,21 +316,7 @@ void Isolate::PushStackTraceAndDie(unsigned int magic, void* ptr1, void* ptr2,
base::OS::Abort();
}
-static Handle<FixedArray> MaybeGrow(Isolate* isolate,
- Handle<FixedArray> elements,
- int cur_position, int new_size) {
- if (new_size > elements->length()) {
- int new_capacity = JSObject::NewElementsCapacity(elements->length());
- Handle<FixedArray> new_elements =
- isolate->factory()->NewFixedArrayWithHoles(new_capacity);
- for (int i = 0; i < cur_position; i++) {
- new_elements->set(i, elements->get(i));
- }
- elements = new_elements;
- }
- DCHECK(new_size <= elements->length());
- return elements;
-}
+namespace {
class StackTraceHelper {
public:
@@ -351,21 +338,17 @@ class StackTraceHelper {
break;
}
encountered_strict_function_ = false;
- sloppy_frames_ = 0;
}
+ // Poison stack frames below the first strict mode frame.
// The stack trace API should not expose receivers and function
// objects on frames deeper than the top-most one with a strict mode
- // function. The number of sloppy frames is stored as first element in
- // the result array.
- void CountSloppyFrames(JSFunction* fun) {
+ // function.
+ bool IsStrictFrame(JSFunction* fun) {
if (!encountered_strict_function_) {
- if (is_strict(fun->shared()->language_mode())) {
- encountered_strict_function_ = true;
- } else {
- sloppy_frames_++;
- }
+ encountered_strict_function_ = is_strict(fun->shared()->language_mode());
}
+ return encountered_strict_function_;
}
// Determines whether the given stack frame should be displayed in a stack
@@ -375,8 +358,6 @@ class StackTraceHelper {
IsInSameSecurityContext(fun);
}
- int sloppy_frames() const { return sloppy_frames_; }
-
private:
// This mechanism excludes a number of uninteresting frames from the stack
// trace. This can be be the first frame (which will be a builtin-exit frame
@@ -422,12 +403,9 @@ class StackTraceHelper {
const Handle<Object> caller_;
bool skip_next_frame_;
- int sloppy_frames_;
bool encountered_strict_function_;
};
-namespace {
-
// TODO(jgruber): Fix all cases in which frames give us a hole value (e.g. the
// receiver in RegExp constructor frames.
Handle<Object> TheHoleToUndefined(Isolate* isolate, Handle<Object> in) {
@@ -435,35 +413,36 @@ Handle<Object> TheHoleToUndefined(Isolate* isolate, Handle<Object> in) {
? Handle<Object>::cast(isolate->factory()->undefined_value())
: in;
}
+
+bool GetStackTraceLimit(Isolate* isolate, int* result) {
+ Handle<JSObject> error = isolate->error_function();
+
+ Handle<String> key = isolate->factory()->stackTraceLimit_string();
+ Handle<Object> stack_trace_limit = JSReceiver::GetDataProperty(error, key);
+ if (!stack_trace_limit->IsNumber()) return false;
+
+ // Ensure that limit is not negative.
+ *result = Max(FastD2IChecked(stack_trace_limit->Number()), 0);
+ return true;
}
+} // namespace
+
Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
FrameSkipMode mode,
Handle<Object> caller) {
DisallowJavascriptExecution no_js(this);
- // Get stack trace limit.
- Handle<JSObject> error = error_function();
- Handle<String> stackTraceLimit =
- factory()->InternalizeUtf8String("stackTraceLimit");
- DCHECK(!stackTraceLimit.is_null());
- Handle<Object> stack_trace_limit =
- JSReceiver::GetDataProperty(error, stackTraceLimit);
- if (!stack_trace_limit->IsNumber()) return factory()->undefined_value();
- int limit = FastD2IChecked(stack_trace_limit->Number());
- limit = Max(limit, 0); // Ensure that limit is not negative.
-
- int initial_size = Min(limit, 10);
- Handle<FixedArray> elements =
- factory()->NewFixedArrayWithHoles(initial_size * 4 + 1);
+ int limit;
+ if (!GetStackTraceLimit(this, &limit)) return factory()->undefined_value();
+
+ const int initial_size = Min(limit, 10);
+ Handle<FrameArray> elements = factory()->NewFrameArray(initial_size);
StackTraceHelper helper(this, mode, caller);
- // First element is reserved to store the number of sloppy frames.
- int cursor = 1;
- int frames_seen = 0;
- for (StackFrameIterator iter(this); !iter.done() && frames_seen < limit;
- iter.Advance()) {
+ for (StackFrameIterator iter(this);
+ !iter.done() && elements->FrameCount() < limit; iter.Advance()) {
StackFrame* frame = iter.frame();
switch (frame->type()) {
@@ -481,26 +460,27 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
// Filter out internal frames that we do not want to show.
if (!helper.IsVisibleInStackTrace(*fun)) continue;
- helper.CountSloppyFrames(*fun);
Handle<Object> recv = frames[i].receiver();
Handle<AbstractCode> abstract_code = frames[i].abstract_code();
+ const int offset = frames[i].code_offset();
+
+ bool force_constructor = false;
if (frame->type() == StackFrame::BUILTIN) {
// Help CallSite::IsConstructor correctly detect hand-written
// construct stubs.
- Code* code = Code::cast(*abstract_code);
- if (code->is_construct_stub()) {
- recv = handle(heap()->call_site_constructor_symbol(), this);
+ if (Code::cast(*abstract_code)->is_construct_stub()) {
+ force_constructor = true;
}
}
- Handle<Smi> offset(Smi::FromInt(frames[i].code_offset()), this);
-
- elements = MaybeGrow(this, elements, cursor, cursor + 4);
- elements->set(cursor++, *TheHoleToUndefined(this, recv));
- elements->set(cursor++, *fun);
- elements->set(cursor++, *abstract_code);
- elements->set(cursor++, *offset);
- frames_seen++;
+
+ int flags = 0;
+ if (helper.IsStrictFrame(*fun)) flags |= FrameArray::kIsStrict;
+ if (force_constructor) flags |= FrameArray::kForceConstructor;
+
+ elements = FrameArray::AppendJSFrame(
+ elements, TheHoleToUndefined(this, recv), fun, abstract_code,
+ offset, flags);
}
} break;
@@ -510,54 +490,49 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
// Filter out internal frames that we do not want to show.
if (!helper.IsVisibleInStackTrace(*fun)) continue;
- helper.CountSloppyFrames(*fun);
- Handle<Code> code = handle(exit_frame->LookupCode(), this);
- int offset =
+ Handle<Object> recv(exit_frame->receiver(), this);
+ Handle<Code> code(exit_frame->LookupCode(), this);
+ const int offset =
static_cast<int>(exit_frame->pc() - code->instruction_start());
- // In order to help CallSite::IsConstructor detect builtin constructors,
- // we reuse the receiver field to pass along a special symbol.
- Handle<Object> recv;
- if (exit_frame->IsConstructor()) {
- recv = factory()->call_site_constructor_symbol();
- } else {
- recv = handle(exit_frame->receiver(), this);
- }
+ int flags = 0;
+ if (helper.IsStrictFrame(*fun)) flags |= FrameArray::kIsStrict;
+ if (exit_frame->IsConstructor()) flags |= FrameArray::kForceConstructor;
- elements = MaybeGrow(this, elements, cursor, cursor + 4);
- elements->set(cursor++, *recv);
- elements->set(cursor++, *fun);
- elements->set(cursor++, *code);
- elements->set(cursor++, Smi::FromInt(offset));
- frames_seen++;
+ elements = FrameArray::AppendJSFrame(elements, recv, fun,
+ Handle<AbstractCode>::cast(code),
+ offset, flags);
} break;
case StackFrame::WASM: {
WasmFrame* wasm_frame = WasmFrame::cast(frame);
+ Handle<Object> wasm_object(wasm_frame->wasm_obj(), this);
+ const int wasm_function_index = wasm_frame->function_index();
Code* code = wasm_frame->unchecked_code();
- Handle<AbstractCode> abstract_code =
- Handle<AbstractCode>(AbstractCode::cast(code), this);
- int offset =
+ Handle<AbstractCode> abstract_code(AbstractCode::cast(code), this);
+ const int offset =
static_cast<int>(wasm_frame->pc() - code->instruction_start());
- elements = MaybeGrow(this, elements, cursor, cursor + 4);
- elements->set(cursor++, wasm_frame->wasm_obj());
- elements->set(cursor++, Smi::FromInt(wasm_frame->function_index()));
- elements->set(cursor++, *abstract_code);
- elements->set(cursor++, Smi::FromInt(offset));
- frames_seen++;
+
+ // TODO(wasm): The wasm object returned by the WasmFrame should always
+ // be a wasm object.
+ DCHECK(wasm::IsWasmObject(*wasm_object) ||
+ wasm_object->IsUndefined(this));
+
+ elements = FrameArray::AppendWasmFrame(
+ elements, wasm_object, wasm_function_index, abstract_code, offset,
+ FrameArray::kIsWasmFrame);
} break;
default:
break;
}
}
- elements->set(0, Smi::FromInt(helper.sloppy_frames()));
- elements->Shrink(cursor);
- Handle<JSArray> result = factory()->NewJSArrayWithElements(elements);
- result->set_length(Smi::FromInt(cursor));
+
+ elements->ShrinkToFit();
+
// TODO(yangguo): Queue this structured stack trace for preprocessing on GC.
- return result;
+ return factory()->NewJSArrayWithElements(elements);
}
MaybeHandle<JSReceiver> Isolate::CaptureAndSetDetailedStackTrace(
@@ -764,19 +739,6 @@ class CaptureStackTraceHelper {
Handle<String> constructor_key_;
};
-
-int PositionFromStackTrace(Handle<FixedArray> elements, int index) {
- DisallowHeapAllocation no_gc;
- Object* maybe_code = elements->get(index + 2);
- if (maybe_code->IsSmi()) {
- return Smi::cast(maybe_code)->value();
- } else {
- AbstractCode* abstract_code = AbstractCode::cast(maybe_code);
- int code_offset = Smi::cast(elements->get(index + 3))->value();
- return abstract_code->SourcePosition(code_offset);
- }
-}
-
Handle<JSArray> Isolate::CaptureCurrentStackTrace(
int frame_limit, StackTrace::StackTraceOptions options) {
DisallowJavascriptExecution no_js(this);
@@ -963,6 +925,10 @@ bool Isolate::MayAccess(Handle<Context> accessing_context,
Object* Isolate::StackOverflow() {
+ if (FLAG_abort_on_stack_overflow) {
+ FATAL("Aborting on stack overflow");
+ }
+
DisallowJavascriptExecution no_js(this);
HandleScope scope(this);
@@ -979,7 +945,8 @@ Object* Isolate::StackOverflow() {
#ifdef VERIFY_HEAP
if (FLAG_verify_heap && FLAG_stress_compaction) {
- heap()->CollectAllGarbage(Heap::kNoGCFlags, "trigger compaction");
+ heap()->CollectAllGarbage(Heap::kNoGCFlags,
+ GarbageCollectionReason::kTesting);
}
#endif // VERIFY_HEAP
@@ -1017,6 +984,8 @@ void Isolate::RequestInterrupt(InterruptCallback callback, void* data) {
void Isolate::InvokeApiInterruptCallbacks() {
+ RuntimeCallTimerScope runtimeTimer(
+ this, &RuntimeCallStats::InvokeApiInterruptCallbacks);
// Note: callback below should be called outside of execution access lock.
while (true) {
InterruptEntry entry;
@@ -1180,8 +1149,8 @@ Object* Isolate::UnwindAndFindHandler() {
Address handler_sp = nullptr;
Address handler_fp = nullptr;
- // Special handling of termination exceptions, uncatchable by JavaScript code,
- // we unwind the handlers until the top ENTRY handler is found.
+ // Special handling of termination exceptions, uncatchable by JavaScript and
+ // Wasm code, we unwind the handlers until the top ENTRY handler is found.
bool catchable_by_js = is_catchable_by_javascript(exception);
// Compute handler and stack unwinding information by performing a full walk
@@ -1203,6 +1172,28 @@ Object* Isolate::UnwindAndFindHandler() {
break;
}
+ if (FLAG_wasm_eh_prototype) {
+ if (frame->is_wasm() && is_catchable_by_wasm(exception)) {
+ int stack_slots = 0; // Will contain stack slot count of frame.
+ WasmFrame* wasm_frame = static_cast<WasmFrame*>(frame);
+ offset = wasm_frame->LookupExceptionHandlerInTable(&stack_slots);
+ if (offset >= 0) {
+ // Compute the stack pointer from the frame pointer. This ensures that
+ // argument slots on the stack are dropped as returning would.
+ Address return_sp = frame->fp() +
+ StandardFrameConstants::kFixedFrameSizeAboveFp -
+ stack_slots * kPointerSize;
+
+ // Gather information from the frame.
+ code = frame->LookupCode();
+
+ handler_sp = return_sp;
+ handler_fp = frame->fp();
+ break;
+ }
+ }
+ }
+
// For optimized frames we perform a lookup in the handler table.
if (frame->is_optimized() && catchable_by_js) {
OptimizedFrame* js_frame = static_cast<OptimizedFrame*>(frame);
@@ -1349,6 +1340,8 @@ Isolate::CatchType Isolate::PredictExceptionCatcher() {
JavaScriptFrame* js_frame = static_cast<JavaScriptFrame*>(frame);
HandlerTable::CatchPrediction prediction = PredictException(js_frame);
if (prediction == HandlerTable::DESUGARING) return CAUGHT_BY_DESUGARING;
+ if (prediction == HandlerTable::ASYNC_AWAIT) return CAUGHT_BY_ASYNC_AWAIT;
+ if (prediction == HandlerTable::PROMISE) return CAUGHT_BY_PROMISE;
if (prediction != HandlerTable::UNCAUGHT) return CAUGHT_BY_JAVASCRIPT;
}
@@ -1425,36 +1418,20 @@ Object* Isolate::PromoteScheduledException() {
void Isolate::PrintCurrentStackTrace(FILE* out) {
- StackTraceFrameIterator it(this);
- while (!it.done()) {
+ for (StackTraceFrameIterator it(this); !it.done(); it.Advance()) {
+ if (!it.is_javascript()) continue;
+
HandleScope scope(this);
- // Find code position if recorded in relocation info.
- StandardFrame* frame = it.frame();
- AbstractCode* abstract_code;
- int code_offset;
- if (frame->is_interpreted()) {
- InterpretedFrame* iframe = reinterpret_cast<InterpretedFrame*>(frame);
- abstract_code = AbstractCode::cast(iframe->GetBytecodeArray());
- code_offset = iframe->GetBytecodeOffset();
- } else {
- DCHECK(frame->is_java_script() || frame->is_wasm());
- Code* code = frame->LookupCode();
- abstract_code = AbstractCode::cast(code);
- code_offset = static_cast<int>(frame->pc() - code->instruction_start());
- }
- int pos = abstract_code->SourcePosition(code_offset);
- JavaScriptFrame* js_frame = JavaScriptFrame::cast(frame);
- Handle<Object> pos_obj(Smi::FromInt(pos), this);
- // Fetch function and receiver.
- Handle<JSFunction> fun(js_frame->function(), this);
- Handle<Object> recv(js_frame->receiver(), this);
- // Advance to the next JavaScript frame and determine if the
- // current frame is the top-level frame.
- it.Advance();
- Handle<Object> is_top_level = factory()->ToBoolean(it.done());
- // Generate and print stack trace line.
- Handle<String> line =
- Execution::GetStackTraceLine(recv, fun, pos_obj, is_top_level);
+ JavaScriptFrame* frame = it.javascript_frame();
+
+ Handle<Object> receiver(frame->receiver(), this);
+ Handle<JSFunction> function(frame->function(), this);
+ Handle<AbstractCode> code(AbstractCode::cast(frame->LookupCode()), this);
+ const int offset =
+ static_cast<int>(frame->pc() - code->instruction_start());
+
+ JSStackFrame site(this, receiver, function, code, offset);
+ Handle<String> line = site.ToString().ToHandleChecked();
if (line->length() > 0) {
line->PrintOn(out);
PrintF(out, "\n");
@@ -1522,22 +1499,25 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
if (!property->IsJSArray()) return false;
Handle<JSArray> simple_stack_trace = Handle<JSArray>::cast(property);
- Handle<FixedArray> elements(FixedArray::cast(simple_stack_trace->elements()));
- int elements_limit = Smi::cast(simple_stack_trace->length())->value();
+ Handle<FrameArray> elements(FrameArray::cast(simple_stack_trace->elements()));
- for (int i = 1; i < elements_limit; i += 4) {
- Handle<Object> fun_obj = handle(elements->get(i + 1), this);
- if (fun_obj->IsSmi()) {
+ const int frame_count = elements->FrameCount();
+ for (int i = 0; i < frame_count; i++) {
+ if (elements->IsWasmFrame(i)) {
// TODO(clemensh): handle wasm frames
return false;
}
- Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
+
+ Handle<JSFunction> fun = handle(elements->Function(i), this);
if (!fun->shared()->IsSubjectToDebugging()) continue;
Object* script = fun->shared()->script();
if (script->IsScript() &&
!(Script::cast(script)->source()->IsUndefined(this))) {
- int pos = PositionFromStackTrace(elements, i);
+ AbstractCode* abstract_code = elements->Code(i);
+ const int code_offset = elements->Offset(i)->value();
+ const int pos = abstract_code->SourcePosition(code_offset);
+
Handle<Script> casted_script(Script::cast(script));
*target = MessageLocation(casted_script, pos, pos + 1);
return true;
@@ -1752,6 +1732,22 @@ void Isolate::PopPromise() {
global_handles()->Destroy(global_promise.location());
}
+bool Isolate::PromiseHasUserDefinedRejectHandler(Handle<Object> promise) {
+ Handle<JSFunction> fun = promise_has_user_defined_reject_handler();
+ Handle<Object> has_reject_handler;
+ // If we are, e.g., overflowing the stack, don't try to call out to JS
+ if (!AllowJavascriptExecution::IsAllowed(this)) return false;
+ // Call the registered function to check for a handler
+ if (Execution::TryCall(this, fun, promise, 0, NULL)
+ .ToHandle(&has_reject_handler)) {
+ return has_reject_handler->IsTrue(this);
+ }
+ // If an exception is thrown in the course of execution of this built-in
+ // function, it indicates either a bug, or a synthetic uncatchable
+ // exception in the shutdown path. In either case, it's OK to predict either
+ // way in DevTools.
+ return false;
+}
Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
Handle<Object> undefined = factory()->undefined_value();
@@ -1762,18 +1758,49 @@ Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
if (prediction == NOT_CAUGHT || prediction == CAUGHT_BY_EXTERNAL) {
return undefined;
}
+ Handle<Object> retval = undefined;
+ PromiseOnStack* promise_on_stack = tltop->promise_on_stack_;
for (JavaScriptFrameIterator it(this); !it.done(); it.Advance()) {
switch (PredictException(it.frame())) {
case HandlerTable::UNCAUGHT:
- break;
+ continue;
case HandlerTable::CAUGHT:
case HandlerTable::DESUGARING:
- return undefined;
+ if (retval->IsJSObject()) {
+ // Caught the result of an inner async/await invocation.
+ // Mark the inner promise as caught in the "synchronous case" so
+ // that Debug::OnException will see. In the synchronous case,
+ // namely in the code in an async function before the first
+ // await, the function which has this exception event has not yet
+ // returned, so the generated Promise has not yet been marked
+ // by AsyncFunctionAwaitCaught with promiseHandledHintSymbol.
+ Handle<Symbol> key = factory()->promise_handled_hint_symbol();
+ JSObject::SetProperty(Handle<JSObject>::cast(retval), key,
+ factory()->true_value(), STRICT)
+ .Assert();
+ }
+ return retval;
case HandlerTable::PROMISE:
- return tltop->promise_on_stack_->promise();
+ return promise_on_stack
+ ? Handle<Object>::cast(promise_on_stack->promise())
+ : undefined;
+ case HandlerTable::ASYNC_AWAIT: {
+ // If in the initial portion of async/await, continue the loop to pop up
+ // successive async/await stack frames until an asynchronous one with
+ // dependents is found, or a non-async stack frame is encountered, in
+ // order to handle the synchronous async/await catch prediction case:
+ // assume that async function calls are awaited.
+ if (!promise_on_stack) return retval;
+ retval = promise_on_stack->promise();
+ if (PromiseHasUserDefinedRejectHandler(retval)) {
+ return retval;
+ }
+ promise_on_stack = promise_on_stack->prev();
+ continue;
+ }
}
}
- return undefined;
+ return retval;
}
@@ -1904,13 +1931,13 @@ void Isolate::ThreadDataTable::RemoveAllThreads(Isolate* isolate) {
#define TRACE_ISOLATE(tag)
#endif
-class VerboseAccountingAllocator : public base::AccountingAllocator {
+class VerboseAccountingAllocator : public AccountingAllocator {
public:
VerboseAccountingAllocator(Heap* heap, size_t sample_bytes)
: heap_(heap), last_memory_usage_(0), sample_bytes_(sample_bytes) {}
- void* Allocate(size_t size) override {
- void* memory = base::AccountingAllocator::Allocate(size);
+ v8::internal::Segment* AllocateSegment(size_t size) override {
+ v8::internal::Segment* memory = AccountingAllocator::AllocateSegment(size);
if (memory) {
size_t current = GetCurrentMemoryUsage();
if (last_memory_usage_.Value() + sample_bytes_ < current) {
@@ -1921,8 +1948,8 @@ class VerboseAccountingAllocator : public base::AccountingAllocator {
return memory;
}
- void Free(void* memory, size_t bytes) override {
- base::AccountingAllocator::Free(memory, bytes);
+ void FreeSegment(v8::internal::Segment* memory) override {
+ AccountingAllocator::FreeSegment(memory);
size_t current = GetCurrentMemoryUsage();
if (current + sample_bytes_ < last_memory_usage_.Value()) {
PrintJSON(current);
@@ -1977,9 +2004,8 @@ Isolate::Isolate(bool enable_serializer)
unicode_cache_(NULL),
allocator_(FLAG_trace_gc_object_stats
? new VerboseAccountingAllocator(&heap_, 256 * KB)
- : new base::AccountingAllocator()),
+ : new AccountingAllocator()),
runtime_zone_(new Zone(allocator_)),
- interface_descriptor_zone_(new Zone(allocator_)),
inner_pointer_to_code_cache_(NULL),
global_handles_(NULL),
eternal_handles_(NULL),
@@ -2004,8 +2030,6 @@ Isolate::Isolate(bool enable_serializer)
deferred_handles_head_(NULL),
optimizing_compile_dispatcher_(NULL),
stress_deopt_count_(0),
- virtual_handler_register_(NULL),
- virtual_slot_register_(NULL),
next_optimization_id_(0),
js_calls_from_api_counter_(0),
#if TRACE_MAPS
@@ -2258,9 +2282,6 @@ Isolate::~Isolate() {
delete runtime_zone_;
runtime_zone_ = nullptr;
- delete interface_descriptor_zone_;
- interface_descriptor_zone_ = nullptr;
-
delete allocator_;
allocator_ = nullptr;
@@ -2399,6 +2420,12 @@ bool Isolate::Init(Deserializer* des) {
return false;
}
+// Initialize the interface descriptors ahead of time.
+#define INTERFACE_DESCRIPTOR(V) \
+ { V##Descriptor(this); }
+ INTERFACE_DESCRIPTOR_LIST(INTERFACE_DESCRIPTOR)
+#undef INTERFACE_DESCRIPTOR
+
deoptimizer_data_ = new DeoptimizerData(heap()->memory_allocator());
const bool create_heap_objects = (des == NULL);
@@ -2436,13 +2463,19 @@ bool Isolate::Init(Deserializer* des) {
runtime_profiler_ = new RuntimeProfiler(this);
// If we are deserializing, read the state into the now-empty heap.
- if (!create_heap_objects) {
- des->Deserialize(this);
- }
- load_stub_cache_->Initialize();
- store_stub_cache_->Initialize();
- if (FLAG_ignition || serializer_enabled()) {
- interpreter_->Initialize();
+ {
+ AlwaysAllocateScope always_allocate(this);
+
+ if (!create_heap_objects) {
+ des->Deserialize(this);
+ }
+ load_stub_cache_->Initialize();
+ store_stub_cache_->Initialize();
+ if (FLAG_ignition || serializer_enabled()) {
+ interpreter_->Initialize();
+ }
+
+ heap_.NotifyDeserializationComplete();
}
// Finish initialization of ThreadLocal after deserialization is done.
@@ -2473,8 +2506,6 @@ bool Isolate::Init(Deserializer* des) {
time_millis_at_init_ = heap_.MonotonicallyIncreasingTimeInMs();
- heap_.NotifyDeserializationComplete();
-
if (!create_heap_objects) {
// Now that the heap is consistent, it's OK to generate the code for the
// deopt entry table that might have been referred to by optimized code in
@@ -2620,7 +2651,8 @@ void Isolate::DumpAndResetCompilationStats() {
turbo_statistics_ = nullptr;
delete hstatistics_;
hstatistics_ = nullptr;
- if (FLAG_runtime_call_stats) {
+ if (FLAG_runtime_call_stats &&
+ !TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED()) {
OFStream os(stdout);
counters()->runtime_call_stats()->Print(os);
counters()->runtime_call_stats()->Reset();
@@ -2823,6 +2855,15 @@ void Isolate::InvalidateArraySpeciesProtector() {
DCHECK(!IsArraySpeciesLookupChainIntact());
}
+void Isolate::InvalidateStringLengthOverflowProtector() {
+ DCHECK(factory()->string_length_protector()->value()->IsSmi());
+ DCHECK(IsStringLengthOverflowIntact());
+ PropertyCell::SetValueWithInvalidation(
+ factory()->string_length_protector(),
+ handle(Smi::FromInt(kArrayProtectorInvalid), this));
+ DCHECK(!IsStringLengthOverflowIntact());
+}
+
bool Isolate::IsAnyInitialArrayPrototype(Handle<JSArray> array) {
DisallowHeapAllocation no_gc;
return IsInAnyContext(*array, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
@@ -2964,9 +3005,44 @@ void Isolate::ReportPromiseReject(Handle<JSObject> promise,
v8::Utils::StackTraceToLocal(stack_trace)));
}
+void Isolate::PromiseResolveThenableJob(Handle<PromiseContainer> container,
+ MaybeHandle<Object>* result,
+ MaybeHandle<Object>* maybe_exception) {
+ if (debug()->is_active()) {
+ Handle<Object> before_debug_event(container->before_debug_event(), this);
+ if (before_debug_event->IsJSObject()) {
+ debug()->OnAsyncTaskEvent(Handle<JSObject>::cast(before_debug_event));
+ }
+ }
+
+ Handle<JSReceiver> thenable(container->thenable(), this);
+ Handle<JSFunction> resolve(container->resolve(), this);
+ Handle<JSFunction> reject(container->reject(), this);
+ Handle<JSReceiver> then(container->then(), this);
+ Handle<Object> argv[] = {resolve, reject};
+ *result = Execution::TryCall(this, then, thenable, arraysize(argv), argv,
+ maybe_exception);
+
+ Handle<Object> reason;
+ if (maybe_exception->ToHandle(&reason)) {
+ DCHECK(result->is_null());
+ Handle<Object> reason_arg[] = {reason};
+ *result =
+ Execution::TryCall(this, reject, factory()->undefined_value(),
+ arraysize(reason_arg), reason_arg, maybe_exception);
+ }
+
+ if (debug()->is_active()) {
+ Handle<Object> after_debug_event(container->after_debug_event(), this);
+ if (after_debug_event->IsJSObject()) {
+ debug()->OnAsyncTaskEvent(Handle<JSObject>::cast(after_debug_event));
+ }
+ }
+}
void Isolate::EnqueueMicrotask(Handle<Object> microtask) {
- DCHECK(microtask->IsJSFunction() || microtask->IsCallHandlerInfo());
+ DCHECK(microtask->IsJSFunction() || microtask->IsCallHandlerInfo() ||
+ microtask->IsPromiseContainer());
Handle<FixedArray> queue(heap()->microtask_queue(), this);
int num_tasks = pending_microtask_count();
DCHECK(num_tasks <= queue->length());
@@ -2995,6 +3071,8 @@ void Isolate::RunMicrotasks() {
void Isolate::RunMicrotasksInternal() {
+ if (!pending_microtask_count()) return;
+ TRACE_EVENT0("v8.execute", "RunMicrotasks");
while (pending_microtask_count() > 0) {
HandleScope scope(this);
int num_tasks = pending_microtask_count();
@@ -3006,18 +3084,41 @@ void Isolate::RunMicrotasksInternal() {
Isolate* isolate = this;
FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < num_tasks, i++, {
Handle<Object> microtask(queue->get(i), this);
- if (microtask->IsJSFunction()) {
- Handle<JSFunction> microtask_function =
- Handle<JSFunction>::cast(microtask);
+
+ if (microtask->IsCallHandlerInfo()) {
+ Handle<CallHandlerInfo> callback_info =
+ Handle<CallHandlerInfo>::cast(microtask);
+ v8::MicrotaskCallback callback =
+ v8::ToCData<v8::MicrotaskCallback>(callback_info->callback());
+ void* data = v8::ToCData<void*>(callback_info->data());
+ callback(data);
+ } else {
SaveContext save(this);
- set_context(microtask_function->context()->native_context());
+ Context* context = microtask->IsJSFunction()
+ ? Handle<JSFunction>::cast(microtask)->context()
+ : Handle<PromiseContainer>::cast(microtask)
+ ->resolve()
+ ->context();
+ set_context(context->native_context());
handle_scope_implementer_->EnterMicrotaskContext(
- handle(microtask_function->context(), this));
+ Handle<Context>(context, this));
+
+ MaybeHandle<Object> result;
MaybeHandle<Object> maybe_exception;
- MaybeHandle<Object> result = Execution::TryCall(
- this, microtask_function, factory()->undefined_value(), 0, NULL,
- &maybe_exception);
+
+ if (microtask->IsJSFunction()) {
+ Handle<JSFunction> microtask_function =
+ Handle<JSFunction>::cast(microtask);
+ result = Execution::TryCall(this, microtask_function,
+ factory()->undefined_value(), 0, NULL,
+ &maybe_exception);
+ } else {
+ PromiseResolveThenableJob(Handle<PromiseContainer>::cast(microtask),
+ &result, &maybe_exception);
+ }
+
handle_scope_implementer_->LeaveMicrotaskContext();
+
// If execution is terminating, just bail out.
if (result.is_null() && maybe_exception.is_null()) {
// Clear out any remaining callbacks in the queue.
@@ -3025,13 +3126,6 @@ void Isolate::RunMicrotasksInternal() {
set_pending_microtask_count(0);
return;
}
- } else {
- Handle<CallHandlerInfo> callback_info =
- Handle<CallHandlerInfo>::cast(microtask);
- v8::MicrotaskCallback callback =
- v8::ToCData<v8::MicrotaskCallback>(callback_info->callback());
- void* data = v8::ToCData<void*>(callback_info->data());
- callback(data);
}
});
}
@@ -3179,6 +3273,15 @@ void Isolate::IsolateInForegroundNotification() {
is_isolate_in_background_ = false;
}
+void Isolate::PrintWithTimestamp(const char* format, ...) {
+ base::OS::Print("[%d:%p] %8.0f ms: ", base::OS::GetCurrentProcessId(),
+ static_cast<void*>(this), time_millis_since_init());
+ va_list arguments;
+ va_start(arguments, format);
+ base::OS::VPrint(format, arguments);
+ va_end(arguments);
+}
+
bool StackLimitCheck::JsHasOverflowed(uintptr_t gap) const {
StackGuard* stack_guard = isolate_->stack_guard();
#ifdef USE_SIMULATOR
@@ -3190,6 +3293,21 @@ bool StackLimitCheck::JsHasOverflowed(uintptr_t gap) const {
return GetCurrentStackPosition() - gap < stack_guard->real_climit();
}
+SaveContext::SaveContext(Isolate* isolate)
+ : isolate_(isolate), prev_(isolate->save_context()) {
+ if (isolate->context() != NULL) {
+ context_ = Handle<Context>(isolate->context());
+ }
+ isolate->set_save_context(this);
+
+ c_entry_fp_ = isolate->c_entry_fp(isolate->thread_local_top());
+}
+
+SaveContext::~SaveContext() {
+ isolate_->set_context(context_.is_null() ? NULL : *context_);
+ isolate_->set_save_context(prev_);
+}
+
#ifdef DEBUG
AssertNoContextChange::AssertNoContextChange(Isolate* isolate)
: isolate_(isolate), context_(isolate->context(), isolate) {}
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index eb1841d4d8..8d0d3b478f 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -23,13 +23,11 @@
#include "src/messages.h"
#include "src/regexp/regexp-stack.h"
#include "src/runtime/runtime.h"
-#include "src/tracing/trace-event.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace base {
-class AccountingAllocator;
class RandomNumberGenerator;
}
@@ -52,6 +50,7 @@ class Counters;
class CpuFeatures;
class CpuProfiler;
class DeoptimizerData;
+class DescriptorLookupCache;
class Deserializer;
class EmptyStatement;
class ExternalCallbackScope;
@@ -63,6 +62,7 @@ class HStatistics;
class HTracer;
class InlineRuntimeFunctionsTable;
class InnerPointerToCodeCache;
+class KeyedLookupCache;
class Logger;
class MaterializedObjectStore;
class OptimizingCompileDispatcher;
@@ -94,14 +94,6 @@ namespace interpreter {
class Interpreter;
}
-// Static indirection table for handles to constants. If a frame
-// element represents a constant, the data contains an index into
-// this table of handles to the actual constants.
-// Static indirection table for handles to constants. If a Result
-// represents a constant, the data contains an index into this table
-// of handles to the actual constants.
-typedef ZoneList<Handle<Object> > ZoneObjectList;
-
#define RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate) \
do { \
Isolate* __isolate__ = (isolate); \
@@ -369,9 +361,9 @@ class ThreadLocalTop BASE_EMBEDDED {
#if USE_SIMULATOR
-#define ISOLATE_INIT_SIMULATOR_LIST(V) \
- V(bool, simulator_initialized, false) \
- V(base::HashMap*, simulator_i_cache, NULL) \
+#define ISOLATE_INIT_SIMULATOR_LIST(V) \
+ V(bool, simulator_initialized, false) \
+ V(base::CustomMatcherHashMap*, simulator_i_cache, NULL) \
V(Redirection*, simulator_redirection, NULL)
#else
@@ -629,6 +621,7 @@ class Isolate {
bool IsExternalHandlerOnTop(Object* exception);
inline bool is_catchable_by_javascript(Object* exception);
+ inline bool is_catchable_by_wasm(Object* exception);
// JS execution stack (see frames.h).
static Address c_entry_fp(ThreadLocalTop* thread) {
@@ -672,8 +665,14 @@ class Isolate {
// Push and pop a promise and the current try-catch handler.
void PushPromise(Handle<JSObject> promise);
void PopPromise();
+
+ // Return the relevant Promise that a throw/rejection pertains to, based
+ // on the contents of the Promise stack
Handle<Object> GetPromiseOnStackOnThrow();
+ // Heuristically guess whether a Promise is handled by user catch handler
+ bool PromiseHasUserDefinedRejectHandler(Handle<Object> promise);
+
class ExceptionScope {
public:
// Scope currently can only be used for regular exceptions,
@@ -750,7 +749,9 @@ class Isolate {
NOT_CAUGHT,
CAUGHT_BY_JAVASCRIPT,
CAUGHT_BY_EXTERNAL,
- CAUGHT_BY_DESUGARING
+ CAUGHT_BY_DESUGARING,
+ CAUGHT_BY_PROMISE,
+ CAUGHT_BY_ASYNC_AWAIT
};
CatchType PredictExceptionCatcher();
@@ -843,9 +844,6 @@ class Isolate {
DCHECK(counters_ != NULL);
return counters_;
}
- tracing::TraceEventStatsTable* trace_event_stats_table() {
- return &trace_event_stats_table_;
- }
RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
CompilationCache* compilation_cache() { return compilation_cache_; }
Logger* logger() {
@@ -889,7 +887,6 @@ class Isolate {
return handle_scope_implementer_;
}
Zone* runtime_zone() { return runtime_zone_; }
- Zone* interface_descriptor_zone() { return interface_descriptor_zone_; }
UnicodeCache* unicode_cache() {
return unicode_cache_;
@@ -1005,6 +1002,7 @@ class Isolate {
inline bool IsHasInstanceLookupChainIntact();
bool IsIsConcatSpreadableLookupChainIntact();
bool IsIsConcatSpreadableLookupChainIntact(JSReceiver* receiver);
+ inline bool IsStringLengthOverflowIntact();
// On intent to set an element in object, make sure that appropriate
// notifications occur if the set is on the elements of the array or
@@ -1023,6 +1021,7 @@ class Isolate {
void InvalidateArraySpeciesProtector();
void InvalidateHasInstanceProtector();
void InvalidateIsConcatSpreadableProtector();
+ void InvalidateStringLengthOverflowProtector();
// Returns true if array is the initial array prototype in any native context.
bool IsAnyInitialArrayPrototype(Handle<JSArray> array);
@@ -1064,12 +1063,6 @@ class Isolate {
void* stress_deopt_count_address() { return &stress_deopt_count_; }
- void* virtual_handler_register_address() {
- return &virtual_handler_register_;
- }
-
- void* virtual_slot_register_address() { return &virtual_slot_register_; }
-
base::RandomNumberGenerator* random_number_generator();
// Given an address occupied by a live code object, return that object.
@@ -1108,6 +1101,9 @@ class Isolate {
void ReportPromiseReject(Handle<JSObject> promise, Handle<Object> value,
v8::PromiseRejectEvent event);
+ void PromiseResolveThenableJob(Handle<PromiseContainer> container,
+ MaybeHandle<Object>* result,
+ MaybeHandle<Object>* maybe_exception);
void EnqueueMicrotask(Handle<Object> microtask);
void RunMicrotasks();
bool IsRunningMicrotasks() const { return is_running_microtasks_; }
@@ -1153,7 +1149,7 @@ class Isolate {
interpreter::Interpreter* interpreter() const { return interpreter_; }
- base::AccountingAllocator* allocator() { return allocator_; }
+ AccountingAllocator* allocator() { return allocator_; }
bool IsInAnyContext(Object* object, uint32_t index);
@@ -1165,6 +1161,12 @@ class Isolate {
bool IsIsolateInBackground() { return is_isolate_in_background_; }
+ PRINTF_FORMAT(2, 3) void PrintWithTimestamp(const char* format, ...);
+
+#ifdef USE_SIMULATOR
+ base::Mutex* simulator_i_cache_mutex() { return &simulator_i_cache_mutex_; }
+#endif
+
protected:
explicit Isolate(bool enable_serializer);
bool IsArrayOrObjectPrototype(Object* object);
@@ -1303,7 +1305,6 @@ class Isolate {
RuntimeProfiler* runtime_profiler_;
CompilationCache* compilation_cache_;
Counters* counters_;
- tracing::TraceEventStatsTable trace_event_stats_table_;
base::RecursiveMutex break_access_;
Logger* logger_;
StackGuard stack_guard_;
@@ -1324,9 +1325,8 @@ class Isolate {
HandleScopeData handle_scope_data_;
HandleScopeImplementer* handle_scope_implementer_;
UnicodeCache* unicode_cache_;
- base::AccountingAllocator* allocator_;
+ AccountingAllocator* allocator_;
Zone* runtime_zone_;
- Zone* interface_descriptor_zone_;
InnerPointerToCodeCache* inner_pointer_to_code_cache_;
GlobalHandles* global_handles_;
EternalHandles* eternal_handles_;
@@ -1407,9 +1407,6 @@ class Isolate {
// Counts deopt points if deopt_every_n_times is enabled.
unsigned int stress_deopt_count_;
- Address virtual_handler_register_;
- Address virtual_slot_register_;
-
int next_optimization_id_;
// Counts javascript calls from the API. Wraps around on overflow.
@@ -1443,6 +1440,10 @@ class Isolate {
v8::Isolate::AbortOnUncaughtExceptionCallback
abort_on_uncaught_exception_callback_;
+#ifdef USE_SIMULATOR
+ base::Mutex simulator_i_cache_mutex_;
+#endif
+
friend class ExecutionAccess;
friend class HandleScopeImplementer;
friend class OptimizingCompileDispatcher;
@@ -1485,8 +1486,8 @@ class PromiseOnStack {
// versions of GCC. See V8 issue 122 for details.
class SaveContext BASE_EMBEDDED {
public:
- explicit inline SaveContext(Isolate* isolate);
- inline ~SaveContext();
+ explicit SaveContext(Isolate* isolate);
+ ~SaveContext();
Handle<Context> context() { return context_; }
SaveContext* prev() { return prev_; }
@@ -1496,8 +1497,6 @@ class SaveContext BASE_EMBEDDED {
return (c_entry_fp_ == 0) || (c_entry_fp_ > frame->sp());
}
- Isolate* isolate() { return isolate_; }
-
private:
Isolate* const isolate_;
Handle<Context> context_;
diff --git a/deps/v8/src/js/async-await.js b/deps/v8/src/js/async-await.js
new file mode 100644
index 0000000000..b733f3d9fa
--- /dev/null
+++ b/deps/v8/src/js/async-await.js
@@ -0,0 +1,180 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils, extrasUtils) {
+
+"use strict";
+
+%CheckIsBootstrapping();
+
+// -------------------------------------------------------------------
+// Imports
+
+var AsyncFunctionNext;
+var AsyncFunctionThrow;
+var GlobalPromise;
+var IsPromise;
+var NewPromiseCapability;
+var PerformPromiseThen;
+var PromiseCreate;
+var PromiseNextMicrotaskID;
+var RejectPromise;
+var ResolvePromise;
+
+utils.Import(function(from) {
+ AsyncFunctionNext = from.AsyncFunctionNext;
+ AsyncFunctionThrow = from.AsyncFunctionThrow;
+ GlobalPromise = from.GlobalPromise;
+ IsPromise = from.IsPromise;
+ NewPromiseCapability = from.NewPromiseCapability;
+ PerformPromiseThen = from.PerformPromiseThen;
+ PromiseCreate = from.PromiseCreate;
+ PromiseNextMicrotaskID = from.PromiseNextMicrotaskID;
+ RejectPromise = from.RejectPromise;
+ ResolvePromise = from.ResolvePromise;
+});
+
+var promiseAsyncStackIDSymbol =
+ utils.ImportNow("promise_async_stack_id_symbol");
+var promiseHandledBySymbol =
+ utils.ImportNow("promise_handled_by_symbol");
+var promiseForwardingHandlerSymbol =
+ utils.ImportNow("promise_forwarding_handler_symbol");
+var promiseHandledHintSymbol =
+ utils.ImportNow("promise_handled_hint_symbol");
+var promiseHasHandlerSymbol =
+ utils.ImportNow("promise_has_handler_symbol");
+
+// -------------------------------------------------------------------
+
+function PromiseCastResolved(value) {
+ if (IsPromise(value)) {
+ return value;
+ } else {
+ var promise = PromiseCreate();
+ ResolvePromise(promise, value);
+ return promise;
+ }
+}
+
+// ES#abstract-ops-async-function-await
+// AsyncFunctionAwait ( value )
+// Shared logic for the core of await. The parser desugars
+// await awaited
+// into
+// yield AsyncFunctionAwait{Caught,Uncaught}(.generator, awaited, .promise)
+// The 'awaited' parameter is the value; the generator stands in
+// for the asyncContext, and .promise is the larger promise under
+// construction by the enclosing async function.
+function AsyncFunctionAwait(generator, awaited, outerPromise) {
+ // Promise.resolve(awaited).then(
+ // value => AsyncFunctionNext(value),
+ // error => AsyncFunctionThrow(error)
+ // );
+ var promise = PromiseCastResolved(awaited);
+
+ var onFulfilled = sentValue => {
+ %_Call(AsyncFunctionNext, generator, sentValue);
+ // The resulting Promise is a throwaway, so it doesn't matter what it
+ // resolves to. What is important is that we don't end up keeping the
+ // whole chain of intermediate Promises alive by returning the value
+ // of AsyncFunctionNext, as that would create a memory leak.
+ return;
+ };
+ var onRejected = sentError => {
+ %_Call(AsyncFunctionThrow, generator, sentError);
+ // Similarly, returning the huge Promise here would cause a long
+ // resolution chain to find what the exception to throw is, and
+ // create a similar memory leak, and it does not matter what
+ // sort of rejection this intermediate Promise becomes.
+ return;
+ }
+
+ // Just forwarding the exception, so no debugEvent for throwawayCapability
+ var throwawayCapability = NewPromiseCapability(GlobalPromise, false);
+
+ // The Promise will be thrown away and not handled, but it shouldn't trigger
+ // unhandled reject events as its work is done
+ SET_PRIVATE(throwawayCapability.promise, promiseHasHandlerSymbol, true);
+
+ if (DEBUG_IS_ACTIVE) {
+ if (IsPromise(awaited)) {
+ // Mark the reject handler callback to be a forwarding edge, rather
+ // than a meaningful catch handler
+ SET_PRIVATE(onRejected, promiseForwardingHandlerSymbol, true);
+ }
+
+ // Mark the dependency to outerPromise in case the throwaway Promise is
+ // found on the Promise stack
+ SET_PRIVATE(throwawayCapability.promise, promiseHandledBySymbol,
+ outerPromise);
+ }
+
+ PerformPromiseThen(promise, onFulfilled, onRejected, throwawayCapability);
+}
+
+// Called by the parser from the desugaring of 'await' when catch
+// prediction indicates no locally surrounding catch block
+function AsyncFunctionAwaitUncaught(generator, awaited, outerPromise) {
+ AsyncFunctionAwait(generator, awaited, outerPromise);
+}
+
+// Called by the parser from the desugaring of 'await' when catch
+// prediction indicates that there is a locally surrounding catch block
+function AsyncFunctionAwaitCaught(generator, awaited, outerPromise) {
+ if (DEBUG_IS_ACTIVE && IsPromise(awaited)) {
+ SET_PRIVATE(awaited, promiseHandledHintSymbol, true);
+ }
+ AsyncFunctionAwait(generator, awaited, outerPromise);
+}
+
+// How the parser rejects promises from async/await desugaring
+function RejectPromiseNoDebugEvent(promise, reason) {
+ return RejectPromise(promise, reason, false);
+}
+
+function AsyncFunctionPromiseCreate() {
+ var promise = PromiseCreate();
+ if (DEBUG_IS_ACTIVE) {
+ // Push the Promise under construction in an async function on
+ // the catch prediction stack to handle exceptions thrown before
+ // the first await.
+ %DebugPushPromise(promise);
+ // Assign ID and create a recurring task to save stack for future
+ // resumptions from await.
+ var id = PromiseNextMicrotaskID();
+ SET_PRIVATE(promise, promiseAsyncStackIDSymbol, id);
+ %DebugAsyncTaskEvent({
+ type: "enqueueRecurring",
+ id: id,
+ name: "async function",
+ });
+ }
+ return promise;
+}
+
+function AsyncFunctionPromiseRelease(promise) {
+ if (DEBUG_IS_ACTIVE) {
+ // Cancel
+ var id = GET_PRIVATE(promise, promiseAsyncStackIDSymbol);
+ %DebugAsyncTaskEvent({
+ type: "cancel",
+ id: id,
+ name: "async function",
+ });
+ // Pop the Promise under construction in an async function on
+ // from catch prediction stack.
+ %DebugPopPromise();
+ }
+}
+
+%InstallToContext([
+ "async_function_await_caught", AsyncFunctionAwaitCaught,
+ "async_function_await_uncaught", AsyncFunctionAwaitUncaught,
+ "reject_promise_no_debug_event", RejectPromiseNoDebugEvent,
+ "async_function_promise_create", AsyncFunctionPromiseCreate,
+ "async_function_promise_release", AsyncFunctionPromiseRelease,
+]);
+
+})
diff --git a/deps/v8/src/js/collection.js b/deps/v8/src/js/collection.js
index 83763af860..6fe880d913 100644
--- a/deps/v8/src/js/collection.js
+++ b/deps/v8/src/js/collection.js
@@ -16,7 +16,6 @@ var GlobalSet = global.Set;
var hashCodeSymbol = utils.ImportNow("hash_code_symbol");
var MathRandom;
var MapIterator;
-var NumberIsNaN;
var SetIterator;
var speciesSymbol = utils.ImportNow("species_symbol");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
@@ -24,7 +23,6 @@ var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
MathRandom = from.MathRandom;
MapIterator = from.MapIterator;
- NumberIsNaN = from.NumberIsNaN;
SetIterator = from.SetIterator;
});
@@ -42,9 +40,9 @@ function SetFindEntry(table, numBuckets, key, hash) {
if (entry === NOT_FOUND) return entry;
var candidate = ORDERED_HASH_SET_KEY_AT(table, entry, numBuckets);
if (key === candidate) return entry;
- var keyIsNaN = NumberIsNaN(key);
+ var keyIsNaN = NUMBER_IS_NAN(key);
while (true) {
- if (keyIsNaN && NumberIsNaN(candidate)) {
+ if (keyIsNaN && NUMBER_IS_NAN(candidate)) {
return entry;
}
entry = ORDERED_HASH_SET_CHAIN_AT(table, entry, numBuckets);
@@ -62,9 +60,9 @@ function MapFindEntry(table, numBuckets, key, hash) {
if (entry === NOT_FOUND) return entry;
var candidate = ORDERED_HASH_MAP_KEY_AT(table, entry, numBuckets);
if (key === candidate) return entry;
- var keyIsNaN = NumberIsNaN(key);
+ var keyIsNaN = NUMBER_IS_NAN(key);
while (true) {
- if (keyIsNaN && NumberIsNaN(candidate)) {
+ if (keyIsNaN && NUMBER_IS_NAN(candidate)) {
return entry;
}
entry = ORDERED_HASH_MAP_CHAIN_AT(table, entry, numBuckets);
diff --git a/deps/v8/src/js/datetime-format-to-parts.js b/deps/v8/src/js/datetime-format-to-parts.js
new file mode 100644
index 0000000000..3194f50672
--- /dev/null
+++ b/deps/v8/src/js/datetime-format-to-parts.js
@@ -0,0 +1,16 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+"use strict";
+
+%CheckIsBootstrapping();
+
+var GlobalIntl = global.Intl;
+var FormatDateToParts = utils.ImportNow("FormatDateToParts");
+
+utils.InstallFunctions(GlobalIntl.DateTimeFormat.prototype, DONT_ENUM, [
+ 'formatToParts', FormatDateToParts
+]);
+})
diff --git a/deps/v8/src/js/harmony-async-await.js b/deps/v8/src/js/harmony-async-await.js
deleted file mode 100644
index 3a48d0c100..0000000000
--- a/deps/v8/src/js/harmony-async-await.js
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils, extrasUtils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var AsyncFunctionNext;
-var AsyncFunctionThrow;
-var GlobalPromise;
-var NewPromiseCapability;
-var PerformPromiseThen;
-var PromiseCastResolved;
-
-utils.Import(function(from) {
- AsyncFunctionNext = from.AsyncFunctionNext;
- AsyncFunctionThrow = from.AsyncFunctionThrow;
- GlobalPromise = from.GlobalPromise;
- NewPromiseCapability = from.NewPromiseCapability;
- PromiseCastResolved = from.PromiseCastResolved;
- PerformPromiseThen = from.PerformPromiseThen;
-});
-
-// -------------------------------------------------------------------
-
-function AsyncFunctionAwait(generator, value) {
- // Promise.resolve(value).then(
- // value => AsyncFunctionNext(value),
- // error => AsyncFunctionThrow(error)
- // );
- var promise = PromiseCastResolved(value);
-
- var onFulfilled =
- (sentValue) => %_Call(AsyncFunctionNext, generator, sentValue);
- var onRejected =
- (sentError) => %_Call(AsyncFunctionThrow, generator, sentError);
-
- var throwawayCapability = NewPromiseCapability(GlobalPromise);
- return PerformPromiseThen(promise, onFulfilled, onRejected,
- throwawayCapability);
-}
-
-%InstallToContext([ "async_function_await", AsyncFunctionAwait ]);
-
-})
diff --git a/deps/v8/src/js/i18n.js b/deps/v8/src/js/i18n.js
index 6046a6f2f9..a397849395 100644
--- a/deps/v8/src/js/i18n.js
+++ b/deps/v8/src/js/i18n.js
@@ -19,7 +19,6 @@
var ArrayJoin;
var ArrayPush;
-var FLAG_intl_extra;
var GlobalDate = global.Date;
var GlobalNumber = global.Number;
var GlobalRegExp = global.RegExp;
@@ -29,31 +28,21 @@ var InstallGetter = utils.InstallGetter;
var InternalArray = utils.InternalArray;
var InternalRegExpMatch;
var InternalRegExpReplace
-var IsNaN;
var ObjectHasOwnProperty = utils.ImportNow("ObjectHasOwnProperty");
var OverrideFunction = utils.OverrideFunction;
var patternSymbol = utils.ImportNow("intl_pattern_symbol");
var resolvedSymbol = utils.ImportNow("intl_resolved_symbol");
var SetFunctionName = utils.SetFunctionName;
var StringIndexOf;
-var StringLastIndexOf;
-var StringSubstr;
-var StringSubstring;
+var StringSubstr = GlobalString.prototype.substr;
+var StringSubstring = GlobalString.prototype.substring;
utils.Import(function(from) {
ArrayJoin = from.ArrayJoin;
ArrayPush = from.ArrayPush;
- IsNaN = from.IsNaN;
InternalRegExpMatch = from.InternalRegExpMatch;
InternalRegExpReplace = from.InternalRegExpReplace;
StringIndexOf = from.StringIndexOf;
- StringLastIndexOf = from.StringLastIndexOf;
- StringSubstr = from.StringSubstr;
- StringSubstring = from.StringSubstring;
-});
-
-utils.ImportFromExperimental(function(from) {
- FLAG_intl_extra = from.FLAG_intl_extra;
});
// Utilities for definitions
@@ -318,7 +307,7 @@ function lookupSupportedLocalesOf(requestedLocales, availableLocales) {
break;
}
// Truncate locale if possible, if not break.
- var pos = %_Call(StringLastIndexOf, locale, '-');
+ var pos = %StringLastIndexOf(locale, '-');
if (pos === -1) {
break;
}
@@ -441,7 +430,7 @@ function lookupMatcher(service, requestedLocales) {
return {'locale': locale, 'extension': extension, 'position': i};
}
// Truncate locale if possible.
- var pos = %_Call(StringLastIndexOf, locale, '-');
+ var pos = %StringLastIndexOf(locale, '-');
if (pos === -1) {
break;
}
@@ -1038,9 +1027,6 @@ function initializeCollator(collator, locales, options) {
// Writable, configurable and enumerable are set to false by default.
%MarkAsInitializedIntlObjectOfType(collator, 'collator', internalCollator);
collator[resolvedSymbol] = resolved;
- if (FLAG_intl_extra) {
- %object_define_property(collator, 'resolved', resolvedAccessor);
- }
return collator;
}
@@ -1282,10 +1268,6 @@ function initializeNumberFormat(numberFormat, locales, options) {
%MarkAsInitializedIntlObjectOfType(numberFormat, 'numberformat', formatter);
numberFormat[resolvedSymbol] = resolved;
- if (FLAG_intl_extra) {
- %object_define_property(resolved, 'pattern', patternAccessor);
- %object_define_property(numberFormat, 'resolved', resolvedAccessor);
- }
return numberFormat;
}
@@ -1388,14 +1370,6 @@ function formatNumber(formatter, value) {
}
-/**
- * Returns a Number that represents string value that was passed in.
- */
-function IntlParseNumber(formatter, value) {
- return %InternalNumberParse(%GetImplFromInitializedIntlObject(formatter),
- TO_STRING(value));
-}
-
AddBoundMethod(Intl.NumberFormat, 'format', formatNumber, 1, 'numberformat');
/**
@@ -1676,10 +1650,6 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
%MarkAsInitializedIntlObjectOfType(dateFormat, 'dateformat', formatter);
dateFormat[resolvedSymbol] = resolved;
- if (FLAG_intl_extra) {
- %object_define_property(resolved, 'pattern', patternAccessor);
- %object_define_property(dateFormat, 'resolved', resolvedAccessor);
- }
return dateFormat;
}
@@ -1797,18 +1767,29 @@ function formatDate(formatter, dateValue) {
new GlobalDate(dateMs));
}
+function FormatDateToParts(dateValue) {
+ if (!IS_UNDEFINED(new.target)) {
+ throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
+ }
+ CHECK_OBJECT_COERCIBLE(this, "Intl.DateTimeFormat.prototype.formatToParts");
+ if (!IS_OBJECT(this)) {
+ throw %make_type_error(kCalledOnNonObject, this);
+ }
+ var dateMs;
+ if (IS_UNDEFINED(dateValue)) {
+ dateMs = %DateCurrentTime();
+ } else {
+ dateMs = TO_NUMBER(dateValue);
+ }
+
+ if (!NUMBER_IS_FINITE(dateMs)) throw %make_range_error(kDateRange);
-/**
- * Returns a Date object representing the result of calling ToString(value)
- * according to the effective locale and the formatting options of this
- * DateTimeFormat.
- * Returns undefined if date string cannot be parsed.
- */
-function IntlParseDate(formatter, value) {
- return %InternalDateParse(%GetImplFromInitializedIntlObject(formatter),
- TO_STRING(value));
+ return %InternalDateFormatToParts(
+ %GetImplFromInitializedIntlObject(this), new GlobalDate(dateMs));
}
+%FunctionSetLength(FormatDateToParts, 0);
+
// 0 because date is optional argument.
AddBoundMethod(Intl.DateTimeFormat, 'format', formatDate, 0, 'dateformat');
@@ -1889,9 +1870,6 @@ function initializeBreakIterator(iterator, locales, options) {
%MarkAsInitializedIntlObjectOfType(iterator, 'breakiterator',
internalIterator);
iterator[resolvedSymbol] = resolved;
- if (FLAG_intl_extra) {
- %object_define_property(iterator, 'resolved', resolvedAccessor);
- }
return iterator;
}
@@ -2227,7 +2205,8 @@ function toLocaleDateTime(date, locales, options, required, defaults, service) {
throw %make_type_error(kMethodInvokedOnWrongType, "Date");
}
- if (IsNaN(date)) return 'Invalid Date';
+ var dateValue = TO_NUMBER(date);
+ if (NUMBER_IS_NAN(dateValue)) return 'Invalid Date';
var internalOptions = toDateTimeOptions(options, required, defaults);
@@ -2291,10 +2270,10 @@ OverrideFunction(GlobalDate.prototype, 'toLocaleTimeString', function() {
}
);
+%FunctionRemovePrototype(FormatDateToParts);
+
utils.Export(function(to) {
- to.AddBoundMethod = AddBoundMethod;
- to.IntlParseDate = IntlParseDate;
- to.IntlParseNumber = IntlParseNumber;
+ to.FormatDateToParts = FormatDateToParts;
});
})
diff --git a/deps/v8/src/js/intl-extra.js b/deps/v8/src/js/intl-extra.js
deleted file mode 100644
index a4d22568b9..0000000000
--- a/deps/v8/src/js/intl-extra.js
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-var GlobalIntl = global.Intl;
-
-var AddBoundMethod = utils.ImportNow("AddBoundMethod");
-var IntlParseDate = utils.ImportNow("IntlParseDate");
-var IntlParseNumber = utils.ImportNow("IntlParseNumber");
-
-AddBoundMethod(GlobalIntl.DateTimeFormat, 'v8Parse', IntlParseDate, 1,
- 'dateformat');
-AddBoundMethod(GlobalIntl.NumberFormat, 'v8Parse', IntlParseNumber, 1,
- 'numberformat');
-
-})
diff --git a/deps/v8/src/js/iterator-prototype.js b/deps/v8/src/js/iterator-prototype.js
deleted file mode 100644
index 6f2501979d..0000000000
--- a/deps/v8/src/js/iterator-prototype.js
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
- "use strict";
- %CheckIsBootstrapping();
-
- var GlobalObject = global.Object;
- var IteratorPrototype = utils.ImportNow("IteratorPrototype");
- var iteratorSymbol = utils.ImportNow("iterator_symbol");
-
- // 25.1.2.1 %IteratorPrototype% [ @@iterator ] ( )
- function IteratorPrototypeIterator() {
- return this;
- }
-
- utils.SetFunctionName(IteratorPrototypeIterator, iteratorSymbol);
- %AddNamedProperty(IteratorPrototype, iteratorSymbol,
- IteratorPrototypeIterator, DONT_ENUM);
-})
diff --git a/deps/v8/src/js/prologue.js b/deps/v8/src/js/prologue.js
index bb818791a0..8a07a4cb07 100644
--- a/deps/v8/src/js/prologue.js
+++ b/deps/v8/src/js/prologue.js
@@ -120,7 +120,7 @@ function InstallGetterSetter(object, name, getter, setter, attributes) {
SetFunctionName(setter, name, "set");
%FunctionRemovePrototype(getter);
%FunctionRemovePrototype(setter);
- %DefineAccessorPropertyUnchecked(object, name, getter, setter, DONT_ENUM);
+ %DefineAccessorPropertyUnchecked(object, name, getter, setter, attributes);
%SetNativeFlag(getter);
%SetNativeFlag(setter);
}
@@ -181,32 +181,15 @@ function PostNatives(utils) {
// Whitelist of exports from normal natives to experimental natives and debug.
var expose_list = [
- "AddBoundMethod",
"ArrayToString",
- "AsyncFunctionNext",
- "AsyncFunctionThrow",
+ "FormatDateToParts",
"GetIterator",
"GetMethod",
- "GlobalPromise",
- "IntlParseDate",
- "IntlParseNumber",
- "IsNaN",
"MapEntries",
"MapIterator",
"MapIteratorNext",
"MaxSimple",
"MinSimple",
- "NewPromiseCapability",
- "NumberIsInteger",
- "PerformPromiseThen",
- "PromiseCastResolved",
- "PromiseThen",
- "RegExpSubclassExecJS",
- "RegExpSubclassMatch",
- "RegExpSubclassReplace",
- "RegExpSubclassSearch",
- "RegExpSubclassSplit",
- "RegExpSubclassTest",
"SetIterator",
"SetIteratorNext",
"SetValues",
@@ -218,11 +201,11 @@ function PostNatives(utils) {
// From runtime:
"is_concat_spreadable_symbol",
"iterator_symbol",
- "promise_result_symbol",
- "promise_state_symbol",
"object_freeze",
"object_is_frozen",
"object_is_sealed",
+ "promise_result_symbol",
+ "promise_state_symbol",
"reflect_apply",
"reflect_construct",
"regexp_flags_symbol",
diff --git a/deps/v8/src/js/promise.js b/deps/v8/src/js/promise.js
index b50fc80b30..793d60fb0a 100644
--- a/deps/v8/src/js/promise.js
+++ b/deps/v8/src/js/promise.js
@@ -12,8 +12,12 @@
// Imports
var InternalArray = utils.InternalArray;
-var promiseCombinedDeferredSymbol =
- utils.ImportNow("promise_combined_deferred_symbol");
+var promiseAsyncStackIDSymbol =
+ utils.ImportNow("promise_async_stack_id_symbol");
+var promiseHandledBySymbol =
+ utils.ImportNow("promise_handled_by_symbol");
+var promiseForwardingHandlerSymbol =
+ utils.ImportNow("promise_forwarding_handler_symbol");
var promiseHasHandlerSymbol =
utils.ImportNow("promise_has_handler_symbol");
var promiseRejectReactionsSymbol =
@@ -22,14 +26,18 @@ var promiseFulfillReactionsSymbol =
utils.ImportNow("promise_fulfill_reactions_symbol");
var promiseDeferredReactionsSymbol =
utils.ImportNow("promise_deferred_reactions_symbol");
+var promiseHandledHintSymbol =
+ utils.ImportNow("promise_handled_hint_symbol");
var promiseRawSymbol = utils.ImportNow("promise_raw_symbol");
var promiseStateSymbol = utils.ImportNow("promise_state_symbol");
var promiseResultSymbol = utils.ImportNow("promise_result_symbol");
var SpeciesConstructor;
var speciesSymbol = utils.ImportNow("species_symbol");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
+var ObjectHasOwnProperty;
utils.Import(function(from) {
+ ObjectHasOwnProperty = from.ObjectHasOwnProperty;
SpeciesConstructor = from.SpeciesConstructor;
});
@@ -42,9 +50,13 @@ const kRejected = -1;
var lastMicrotaskId = 0;
+function PromiseNextMicrotaskID() {
+ return ++lastMicrotaskId;
+}
+
// ES#sec-createresolvingfunctions
// CreateResolvingFunctions ( promise )
-function CreateResolvingFunctions(promise) {
+function CreateResolvingFunctions(promise, debugEvent) {
var alreadyResolved = false;
// ES#sec-promise-resolve-functions
@@ -60,7 +72,7 @@ function CreateResolvingFunctions(promise) {
var reject = reason => {
if (alreadyResolved === true) return;
alreadyResolved = true;
- RejectPromise(promise, reason);
+ RejectPromise(promise, reason, debugEvent);
};
return {
@@ -83,7 +95,8 @@ var GlobalPromise = function Promise(executor) {
}
var promise = PromiseInit(%_NewObject(GlobalPromise, new.target));
- var callbacks = CreateResolvingFunctions(promise);
+ // Calling the reject function would be a new exception, so debugEvent = true
+ var callbacks = CreateResolvingFunctions(promise, true);
var debug_is_active = DEBUG_IS_ACTIVE;
try {
if (debug_is_active) %DebugPushPromise(promise);
@@ -182,9 +195,24 @@ function PromiseEnqueue(value, tasks, deferreds, status) {
}
});
if (instrumenting) {
- id = ++lastMicrotaskId;
- name = status === kFulfilled ? "Promise.resolve" : "Promise.reject";
- %DebugAsyncTaskEvent({ type: "enqueue", id: id, name: name });
+ // In an async function, reuse the existing stack related to the outer
+ // Promise. Otherwise, e.g. in a direct call to then, save a new stack.
+ // Promises with multiple reactions with one or more of them being async
+ // functions will not get a good stack trace, as async functions require
+ // different stacks from direct Promise use, but we save and restore a
+ // stack once for all reactions. TODO(littledan): Improve this case.
+ if (!IS_UNDEFINED(deferreds) &&
+ HAS_PRIVATE(deferreds.promise, promiseHandledBySymbol) &&
+ HAS_PRIVATE(GET_PRIVATE(deferreds.promise, promiseHandledBySymbol),
+ promiseAsyncStackIDSymbol)) {
+ id = GET_PRIVATE(GET_PRIVATE(deferreds.promise, promiseHandledBySymbol),
+ promiseAsyncStackIDSymbol);
+ name = "async function";
+ } else {
+ id = PromiseNextMicrotaskID();
+ name = status === kFulfilled ? "Promise.resolve" : "Promise.reject";
+ %DebugAsyncTaskEvent({ type: "enqueue", id: id, name: name });
+ }
}
}
@@ -209,16 +237,16 @@ function PromiseAttachCallbacks(promise, deferred, onResolve, onReject) {
SET_PRIVATE(promise, promiseFulfillReactionsSymbol, resolveCallbacks);
SET_PRIVATE(promise, promiseRejectReactionsSymbol, rejectCallbacks);
+ SET_PRIVATE(promise, promiseDeferredReactionsSymbol, UNDEFINED);
} else {
maybeResolveCallbacks.push(onResolve, deferred);
GET_PRIVATE(promise, promiseRejectReactionsSymbol).push(onReject, deferred);
}
}
-function PromiseIdResolveHandler(x) { return x }
-function PromiseIdRejectHandler(r) { throw r }
-
-function PromiseNopResolver() {}
+function PromiseIdResolveHandler(x) { return x; }
+function PromiseIdRejectHandler(r) { %_ReThrow(r); }
+SET_PRIVATE(PromiseIdRejectHandler, promiseForwardingHandlerSymbol, true);
// -------------------------------------------------------------------
// Define exported functions.
@@ -231,21 +259,23 @@ function IsPromise(x) {
}
function PromiseCreate() {
- return new GlobalPromise(PromiseNopResolver)
+ return PromiseInit(new GlobalPromise(promiseRawSymbol));
}
// ES#sec-promise-resolve-functions
// Promise Resolve Functions, steps 6-13
function ResolvePromise(promise, resolution) {
if (resolution === promise) {
- return RejectPromise(promise, %make_type_error(kPromiseCyclic, resolution));
+ return RejectPromise(promise,
+ %make_type_error(kPromiseCyclic, resolution),
+ true);
}
if (IS_RECEIVER(resolution)) {
// 25.4.1.3.2 steps 8-12
try {
var then = resolution.then;
} catch (e) {
- return RejectPromise(promise, e);
+ return RejectPromise(promise, e, true);
}
// Resolution is a native promise and if it's already resolved or
@@ -268,63 +298,80 @@ function ResolvePromise(promise, resolution) {
// Revoke previously triggered reject event.
%PromiseRevokeReject(resolution);
}
- RejectPromise(promise, thenableValue);
+ // Don't cause a debug event as this case is forwarding a rejection
+ RejectPromise(promise, thenableValue, false);
SET_PRIVATE(resolution, promiseHasHandlerSymbol, true);
return;
}
}
if (IS_CALLABLE(then)) {
- // PromiseResolveThenableJob
- var id;
- var name = "PromiseResolveThenableJob";
+ var callbacks = CreateResolvingFunctions(promise, false);
+ var id, before_debug_event, after_debug_event;
var instrumenting = DEBUG_IS_ACTIVE;
- %EnqueueMicrotask(function() {
- if (instrumenting) {
- %DebugAsyncTaskEvent({ type: "willHandle", id: id, name: name });
- }
- var callbacks = CreateResolvingFunctions(promise);
- try {
- %_Call(then, resolution, callbacks.resolve, callbacks.reject);
- } catch (e) {
- %_Call(callbacks.reject, UNDEFINED, e);
- }
- if (instrumenting) {
- %DebugAsyncTaskEvent({ type: "didHandle", id: id, name: name });
- }
- });
if (instrumenting) {
- id = ++lastMicrotaskId;
- %DebugAsyncTaskEvent({ type: "enqueue", id: id, name: name });
+ if (IsPromise(resolution)) {
+ // Mark the dependency of the new promise on the resolution
+ SET_PRIVATE(resolution, promiseHandledBySymbol, promise);
+ }
+ id = PromiseNextMicrotaskID();
+ before_debug_event = {
+ type: "willHandle",
+ id: id,
+ name: "PromiseResolveThenableJob"
+ };
+ after_debug_event = {
+ type: "didHandle",
+ id: id,
+ name: "PromiseResolveThenableJob"
+ };
+ %DebugAsyncTaskEvent({
+ type: "enqueue",
+ id: id,
+ name: "PromiseResolveThenableJob"
+ });
}
+ %EnqueuePromiseResolveThenableJob(
+ resolution, then, callbacks.resolve, callbacks.reject,
+ before_debug_event, after_debug_event);
return;
}
}
- FulfillPromise(promise, kFulfilled, resolution, promiseFulfillReactionsSymbol);
+ FulfillPromise(promise, kFulfilled, resolution,
+ promiseFulfillReactionsSymbol);
}
// ES#sec-rejectpromise
// RejectPromise ( promise, reason )
-function RejectPromise(promise, reason) {
+function RejectPromise(promise, reason, debugEvent) {
// Check promise status to confirm that this reject has an effect.
// Call runtime for callbacks to the debugger or for unhandled reject.
+ // The debugEvent parameter sets whether a debug ExceptionEvent should
+ // be triggered. It should be set to false when forwarding a rejection
+ // rather than creating a new one.
if (GET_PRIVATE(promise, promiseStateSymbol) === kPending) {
- var debug_is_active = DEBUG_IS_ACTIVE;
- if (debug_is_active ||
+ // This check is redundant with checks in the runtime, but it may help
+ // avoid unnecessary runtime calls.
+ if ((debugEvent && DEBUG_IS_ACTIVE) ||
!HAS_DEFINED_PRIVATE(promise, promiseHasHandlerSymbol)) {
- %PromiseRejectEvent(promise, reason, debug_is_active);
+ %PromiseRejectEvent(promise, reason, debugEvent);
}
}
FulfillPromise(promise, kRejected, reason, promiseRejectReactionsSymbol)
}
+// Export to bindings
+function DoRejectPromise(promise, reason) {
+ return RejectPromise(promise, reason, true);
+}
+
// ES#sec-newpromisecapability
// NewPromiseCapability ( C )
-function NewPromiseCapability(C) {
+function NewPromiseCapability(C, debugEvent) {
if (C === GlobalPromise) {
// Optimized case, avoid extra closure.
- var promise = PromiseInit(new GlobalPromise(promiseRawSymbol));
- var callbacks = CreateResolvingFunctions(promise);
+ var promise = PromiseCreate();
+ var callbacks = CreateResolvingFunctions(promise, debugEvent);
return {
promise: promise,
resolve: callbacks.resolve,
@@ -355,39 +402,17 @@ function PromiseReject(r) {
if (this === GlobalPromise) {
// Optimized case, avoid extra closure.
var promise = PromiseCreateAndSet(kRejected, r);
- // The debug event for this would always be an uncaught promise reject,
- // which is usually simply noise. Do not trigger that debug event.
- %PromiseRejectEvent(promise, r, false);
+ // Trigger debug events if the debugger is on, as Promise.reject is
+ // equivalent to throwing an exception directly.
+ %PromiseRejectEventFromStack(promise, r);
return promise;
} else {
- var promiseCapability = NewPromiseCapability(this);
+ var promiseCapability = NewPromiseCapability(this, true);
%_Call(promiseCapability.reject, UNDEFINED, r);
return promiseCapability.promise;
}
}
-// Shortcut Promise.reject and Promise.resolve() implementations, used by
-// Async Functions implementation.
-function PromiseCreateRejected(r) {
- return %_Call(PromiseReject, GlobalPromise, r);
-}
-
-function PromiseCreateResolved(value) {
- var promise = PromiseInit(new GlobalPromise(promiseRawSymbol));
- var resolveResult = ResolvePromise(promise, value);
- return promise;
-}
-
-function PromiseCastResolved(value) {
- if (IsPromise(value)) {
- return value;
- } else {
- var promise = PromiseInit(new GlobalPromise(promiseRawSymbol));
- var resolveResult = ResolvePromise(promise, value);
- return promise;
- }
-}
-
function PerformPromiseThen(promise, onResolve, onReject, resultCapability) {
if (!IS_CALLABLE(onResolve)) onResolve = PromiseIdResolveHandler;
if (!IS_CALLABLE(onReject)) onReject = PromiseIdRejectHandler;
@@ -427,7 +452,9 @@ function PromiseThen(onResolve, onReject) {
}
var constructor = SpeciesConstructor(this, GlobalPromise);
- var resultCapability = NewPromiseCapability(constructor);
+ // Pass false for debugEvent so .then chaining does not trigger
+ // redundant ExceptionEvents.
+ var resultCapability = NewPromiseCapability(constructor, false);
return PerformPromiseThen(this, onResolve, onReject, resultCapability);
}
@@ -449,12 +476,13 @@ function PromiseResolve(x) {
// Avoid creating resolving functions.
if (this === GlobalPromise) {
- var promise = PromiseInit(new GlobalPromise(promiseRawSymbol));
+ var promise = PromiseCreate();
var resolveResult = ResolvePromise(promise, x);
return promise;
}
- var promiseCapability = NewPromiseCapability(this);
+ // debugEvent is not so meaningful here as it will be resolved
+ var promiseCapability = NewPromiseCapability(this, true);
var resolveResult = %_Call(promiseCapability.resolve, UNDEFINED, x);
return promiseCapability.promise;
}
@@ -466,10 +494,19 @@ function PromiseAll(iterable) {
throw %make_type_error(kCalledOnNonObject, "Promise.all");
}
- var deferred = NewPromiseCapability(this);
+ // false debugEvent so that forwarding the rejection through all does not
+ // trigger redundant ExceptionEvents
+ var deferred = NewPromiseCapability(this, false);
var resolutions = new InternalArray();
var count;
+ // For catch prediction, don't treat the .then calls as handling it;
+ // instead, recurse outwards.
+ var instrumenting = DEBUG_IS_ACTIVE;
+ if (instrumenting) {
+ SET_PRIVATE(deferred.reject, promiseForwardingHandlerSymbol, true);
+ }
+
function CreateResolveElementFunction(index, values, promiseCapability) {
var alreadyCalled = false;
return (x) => {
@@ -490,10 +527,14 @@ function PromiseAll(iterable) {
for (var value of iterable) {
var nextPromise = this.resolve(value);
++count;
- nextPromise.then(
+ var throwawayPromise = nextPromise.then(
CreateResolveElementFunction(i, resolutions, deferred),
deferred.reject);
- SET_PRIVATE(deferred.reject, promiseCombinedDeferredSymbol, deferred);
+ // For catch prediction, mark that rejections here are semantically
+ // handled by the combined Promise.
+ if (instrumenting && IsPromise(throwawayPromise)) {
+ SET_PRIVATE(throwawayPromise, promiseHandledBySymbol, deferred.promise);
+ }
++i;
}
@@ -517,11 +558,26 @@ function PromiseRace(iterable) {
throw %make_type_error(kCalledOnNonObject, PromiseRace);
}
- var deferred = NewPromiseCapability(this);
+ // false debugEvent so that forwarding the rejection through race does not
+ // trigger redundant ExceptionEvents
+ var deferred = NewPromiseCapability(this, false);
+
+ // For catch prediction, don't treat the .then calls as handling it;
+ // instead, recurse outwards.
+ var instrumenting = DEBUG_IS_ACTIVE;
+ if (instrumenting) {
+ SET_PRIVATE(deferred.reject, promiseForwardingHandlerSymbol, true);
+ }
+
try {
for (var value of iterable) {
- this.resolve(value).then(deferred.resolve, deferred.reject);
- SET_PRIVATE(deferred.reject, promiseCombinedDeferredSymbol, deferred);
+ var throwawayPromise = this.resolve(value).then(deferred.resolve,
+ deferred.reject);
+ // For catch prediction, mark that rejections here are semantically
+ // handled by the combined Promise.
+ if (instrumenting && IsPromise(throwawayPromise)) {
+ SET_PRIVATE(throwawayPromise, promiseHandledBySymbol, deferred.promise);
+ }
}
} catch (e) {
deferred.reject(e)
@@ -533,29 +589,48 @@ function PromiseRace(iterable) {
// Utility for debugger
function PromiseHasUserDefinedRejectHandlerCheck(handler, deferred) {
- if (handler !== PromiseIdRejectHandler) {
- var combinedDeferred = GET_PRIVATE(handler, promiseCombinedDeferredSymbol);
- if (IS_UNDEFINED(combinedDeferred)) return true;
- if (PromiseHasUserDefinedRejectHandlerRecursive(combinedDeferred.promise)) {
- return true;
- }
- } else if (PromiseHasUserDefinedRejectHandlerRecursive(deferred.promise)) {
- return true;
+ // Recurse to the forwarding Promise, if any. This may be due to
+ // - await reaction forwarding to the throwaway Promise, which has
+ // a dependency edge to the outer Promise.
+ // - PromiseIdResolveHandler forwarding to the output of .then
+ // - Promise.all/Promise.race forwarding to a throwaway Promise, which
+ // has a dependency edge to the generated outer Promise.
+ if (GET_PRIVATE(handler, promiseForwardingHandlerSymbol)) {
+ return PromiseHasUserDefinedRejectHandlerRecursive(deferred.promise);
}
- return false;
+
+ // Otherwise, this is a real reject handler for the Promise
+ return true;
}
function PromiseHasUserDefinedRejectHandlerRecursive(promise) {
+ // If this promise was marked as being handled by a catch block
+ // in an async function, then it has a user-defined reject handler.
+ if (GET_PRIVATE(promise, promiseHandledHintSymbol)) return true;
+
+ // If this Promise is subsumed by another Promise (a Promise resolved
+ // with another Promise, or an intermediate, hidden, throwaway Promise
+ // within async/await), then recurse on the outer Promise.
+ // In this case, the dependency is one possible way that the Promise
+ // could be resolved, so it does not subsume the other following cases.
+ var outerPromise = GET_PRIVATE(promise, promiseHandledBySymbol);
+ if (outerPromise &&
+ PromiseHasUserDefinedRejectHandlerRecursive(outerPromise)) {
+ return true;
+ }
+
var queue = GET_PRIVATE(promise, promiseRejectReactionsSymbol);
var deferreds = GET_PRIVATE(promise, promiseDeferredReactionsSymbol);
+
if (IS_UNDEFINED(queue)) return false;
+
if (!IS_ARRAY(queue)) {
return PromiseHasUserDefinedRejectHandlerCheck(queue, deferreds);
- } else {
- for (var i = 0; i < queue.length; i += 2) {
- if (PromiseHasUserDefinedRejectHandlerCheck(queue[i], queue[i + 1])) {
- return true;
- }
+ }
+
+ for (var i = 0; i < queue.length; i += 2) {
+ if (PromiseHasUserDefinedRejectHandlerCheck(queue[i], queue[i + 1])) {
+ return true;
}
}
return false;
@@ -564,6 +639,8 @@ function PromiseHasUserDefinedRejectHandlerRecursive(promise) {
// Return whether the promise will be handled by a user-defined reject
// handler somewhere down the promise chain. For this, we do a depth-first
// search for a reject handler that's not the default PromiseIdRejectHandler.
+// This function also traverses dependencies of one Promise on another,
+// set up through async/await and Promises resolved with Promises.
function PromiseHasUserDefinedRejectHandler() {
return PromiseHasUserDefinedRejectHandlerRecursive(this);
};
@@ -598,11 +675,9 @@ utils.InstallFunctions(GlobalPromise.prototype, DONT_ENUM, [
"promise_catch", PromiseCatch,
"promise_create", PromiseCreate,
"promise_has_user_defined_reject_handler", PromiseHasUserDefinedRejectHandler,
- "promise_reject", RejectPromise,
+ "promise_reject", DoRejectPromise,
"promise_resolve", ResolvePromise,
- "promise_then", PromiseThen,
- "promise_create_rejected", PromiseCreateRejected,
- "promise_create_resolved", PromiseCreateResolved
+ "promise_then", PromiseThen
]);
// This allows extras to create promises quickly without building extra
@@ -611,16 +686,20 @@ utils.InstallFunctions(GlobalPromise.prototype, DONT_ENUM, [
utils.InstallFunctions(extrasUtils, 0, [
"createPromise", PromiseCreate,
"resolvePromise", ResolvePromise,
- "rejectPromise", RejectPromise
+ "rejectPromise", DoRejectPromise
]);
utils.Export(function(to) {
- to.PromiseCastResolved = PromiseCastResolved;
+ to.IsPromise = IsPromise;
+ to.PromiseCreate = PromiseCreate;
to.PromiseThen = PromiseThen;
+ to.PromiseNextMicrotaskID = PromiseNextMicrotaskID;
to.GlobalPromise = GlobalPromise;
to.NewPromiseCapability = NewPromiseCapability;
to.PerformPromiseThen = PerformPromiseThen;
+ to.ResolvePromise = ResolvePromise;
+ to.RejectPromise = RejectPromise;
});
})
diff --git a/deps/v8/src/js/regexp.js b/deps/v8/src/js/regexp.js
index dbe4837c64..49da45b84c 100644
--- a/deps/v8/src/js/regexp.js
+++ b/deps/v8/src/js/regexp.js
@@ -4,20 +4,22 @@
(function(global, utils) {
+'use strict';
+
%CheckIsBootstrapping();
// -------------------------------------------------------------------
// Imports
-var ExpandReplacement;
var GlobalArray = global.Array;
var GlobalObject = global.Object;
var GlobalRegExp = global.RegExp;
-var GlobalRegExpPrototype;
+var GlobalRegExpPrototype = GlobalRegExp.prototype;
var InternalArray = utils.InternalArray;
var InternalPackedArray = utils.InternalPackedArray;
var MaxSimple;
var MinSimple;
+var RegExpExecJS = GlobalRegExp.prototype.exec;
var matchSymbol = utils.ImportNow("match_symbol");
var replaceSymbol = utils.ImportNow("replace_symbol");
var searchSymbol = utils.ImportNow("search_symbol");
@@ -26,7 +28,6 @@ var splitSymbol = utils.ImportNow("split_symbol");
var SpeciesConstructor;
utils.Import(function(from) {
- ExpandReplacement = from.ExpandReplacement;
MaxSimple = from.MaxSimple;
MinSimple = from.MinSimple;
SpeciesConstructor = from.SpeciesConstructor;
@@ -80,37 +81,6 @@ function PatternFlags(pattern) {
}
-// ES#sec-regexp-pattern-flags
-// RegExp ( pattern, flags )
-function RegExpConstructor(pattern, flags) {
- var newtarget = new.target;
- var pattern_is_regexp = IsRegExp(pattern);
-
- if (IS_UNDEFINED(newtarget)) {
- newtarget = GlobalRegExp;
-
- // ES6 section 21.2.3.1 step 3.b
- if (pattern_is_regexp && IS_UNDEFINED(flags) &&
- pattern.constructor === newtarget) {
- return pattern;
- }
- }
-
- if (IS_REGEXP(pattern)) {
- if (IS_UNDEFINED(flags)) flags = PatternFlags(pattern);
- pattern = REGEXP_SOURCE(pattern);
-
- } else if (pattern_is_regexp) {
- var input_pattern = pattern;
- pattern = pattern.source;
- if (IS_UNDEFINED(flags)) flags = input_pattern.flags;
- }
-
- var object = %_NewObject(GlobalRegExp, newtarget);
- return RegExpInitialize(object, pattern, flags);
-}
-
-
// ES#sec-regexp.prototype.compile RegExp.prototype.compile (pattern, flags)
function RegExpCompileJS(pattern, flags) {
if (!IS_REGEXP(this)) {
@@ -163,105 +133,6 @@ macro RETURN_NEW_RESULT_FROM_MATCH_INFO(MATCHINFO, STRING)
endmacro
-function RegExpExecNoTests(regexp, string, start) {
- // Must be called with RegExp, string and positive integer as arguments.
- var matchInfo = %_RegExpExec(regexp, string, start, RegExpLastMatchInfo);
- if (matchInfo !== null) {
- // ES6 21.2.5.2.2 step 18.
- if (REGEXP_STICKY(regexp)) regexp.lastIndex = matchInfo[CAPTURE1];
- RETURN_NEW_RESULT_FROM_MATCH_INFO(matchInfo, string);
- }
- regexp.lastIndex = 0;
- return null;
-}
-
-
-// ES#sec-regexp.prototype.exec
-// RegExp.prototype.exec ( string )
-function RegExpSubclassExecJS(string) {
- if (!IS_REGEXP(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'RegExp.prototype.exec', this);
- }
-
- string = TO_STRING(string);
- var lastIndex = this.lastIndex;
-
- // Conversion is required by the ES2015 specification (RegExpBuiltinExec
- // algorithm, step 4) even if the value is discarded for non-global RegExps.
- var i = TO_LENGTH(lastIndex);
-
- var global = TO_BOOLEAN(REGEXP_GLOBAL(this));
- var sticky = TO_BOOLEAN(REGEXP_STICKY(this));
- var updateLastIndex = global || sticky;
- if (updateLastIndex) {
- if (i > string.length) {
- this.lastIndex = 0;
- return null;
- }
- } else {
- i = 0;
- }
-
- // matchIndices is either null or the RegExpLastMatchInfo array.
- // TODO(littledan): Whether a RegExp is sticky is compiled into the RegExp
- // itself, but ES2015 allows monkey-patching this property to differ from
- // the internal flags. If it differs, recompile a different RegExp?
- var matchIndices = %_RegExpExec(this, string, i, RegExpLastMatchInfo);
-
- if (IS_NULL(matchIndices)) {
- this.lastIndex = 0;
- return null;
- }
-
- // Successful match.
- if (updateLastIndex) {
- this.lastIndex = RegExpLastMatchInfo[CAPTURE1];
- }
- RETURN_NEW_RESULT_FROM_MATCH_INFO(matchIndices, string);
-}
-%FunctionRemovePrototype(RegExpSubclassExecJS);
-
-
-// Legacy implementation of RegExp.prototype.exec
-function RegExpExecJS(string) {
- if (!IS_REGEXP(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'RegExp.prototype.exec', this);
- }
-
- string = TO_STRING(string);
- var lastIndex = this.lastIndex;
-
- // Conversion is required by the ES2015 specification (RegExpBuiltinExec
- // algorithm, step 4) even if the value is discarded for non-global RegExps.
- var i = TO_LENGTH(lastIndex);
-
- var updateLastIndex = REGEXP_GLOBAL(this) || REGEXP_STICKY(this);
- if (updateLastIndex) {
- if (i < 0 || i > string.length) {
- this.lastIndex = 0;
- return null;
- }
- } else {
- i = 0;
- }
-
- // matchIndices is either null or the RegExpLastMatchInfo array.
- var matchIndices = %_RegExpExec(this, string, i, RegExpLastMatchInfo);
-
- if (IS_NULL(matchIndices)) {
- this.lastIndex = 0;
- return null;
- }
-
- // Successful match.
- if (updateLastIndex) {
- this.lastIndex = RegExpLastMatchInfo[CAPTURE1];
- }
- RETURN_NEW_RESULT_FROM_MATCH_INFO(matchIndices, string);
-}
-
// ES#sec-regexpexec Runtime Semantics: RegExpExec ( R, S )
// Also takes an optional exec method in case our caller
@@ -282,65 +153,6 @@ function RegExpSubclassExec(regexp, string, exec) {
%SetForceInlineFlag(RegExpSubclassExec);
-// One-element cache for the simplified test regexp.
-var regexp_key;
-var regexp_val;
-
-// Legacy implementation of RegExp.prototype.test
-// Section 15.10.6.3 doesn't actually make sense, but the intention seems to be
-// that test is defined in terms of String.prototype.exec. However, it probably
-// means the original value of String.prototype.exec, which is what everybody
-// else implements.
-function RegExpTest(string) {
- if (!IS_REGEXP(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'RegExp.prototype.test', this);
- }
- string = TO_STRING(string);
-
- var lastIndex = this.lastIndex;
-
- // Conversion is required by the ES2015 specification (RegExpBuiltinExec
- // algorithm, step 4) even if the value is discarded for non-global RegExps.
- var i = TO_LENGTH(lastIndex);
-
- if (REGEXP_GLOBAL(this) || REGEXP_STICKY(this)) {
- if (i < 0 || i > string.length) {
- this.lastIndex = 0;
- return false;
- }
- // matchIndices is either null or the RegExpLastMatchInfo array.
- var matchIndices = %_RegExpExec(this, string, i, RegExpLastMatchInfo);
- if (IS_NULL(matchIndices)) {
- this.lastIndex = 0;
- return false;
- }
- this.lastIndex = RegExpLastMatchInfo[CAPTURE1];
- return true;
- } else {
- // Non-global, non-sticky regexp.
- // Remove irrelevant preceeding '.*' in a test regexp. The expression
- // checks whether this.source starts with '.*' and that the third char is
- // not a '?'. But see https://code.google.com/p/v8/issues/detail?id=3560
- var regexp = this;
- var source = REGEXP_SOURCE(regexp);
- if (source.length >= 3 &&
- %_StringCharCodeAt(source, 0) == 46 && // '.'
- %_StringCharCodeAt(source, 1) == 42 && // '*'
- %_StringCharCodeAt(source, 2) != 63) { // '?'
- regexp = TrimRegExp(regexp);
- }
- // matchIndices is either null or the RegExpLastMatchInfo array.
- var matchIndices = %_RegExpExec(regexp, string, 0, RegExpLastMatchInfo);
- if (IS_NULL(matchIndices)) {
- this.lastIndex = 0;
- return false;
- }
- return true;
- }
-}
-
-
// ES#sec-regexp.prototype.test RegExp.prototype.test ( S )
function RegExpSubclassTest(string) {
if (!IS_RECEIVER(this)) {
@@ -353,18 +165,6 @@ function RegExpSubclassTest(string) {
}
%FunctionRemovePrototype(RegExpSubclassTest);
-function TrimRegExp(regexp) {
- if (regexp_key !== regexp) {
- regexp_key = regexp;
- regexp_val =
- new GlobalRegExp(
- %_SubString(REGEXP_SOURCE(regexp), 2, REGEXP_SOURCE(regexp).length),
- (REGEXP_IGNORE_CASE(regexp) ? REGEXP_MULTILINE(regexp) ? "im" : "i"
- : REGEXP_MULTILINE(regexp) ? "m" : ""));
- }
- return regexp_val;
-}
-
function RegExpToString() {
if (!IS_RECEIVER(this)) {
@@ -383,14 +183,13 @@ function AtSurrogatePair(subject, index) {
var first = %_StringCharCodeAt(subject, index);
if (first < 0xD800 || first > 0xDBFF) return false;
var second = %_StringCharCodeAt(subject, index + 1);
- return second >= 0xDC00 || second <= 0xDFFF;
+ return second >= 0xDC00 && second <= 0xDFFF;
}
-// Legacy implementation of RegExp.prototype[Symbol.split] which
+// Fast path implementation of RegExp.prototype[Symbol.split] which
// doesn't properly call the underlying exec, @@species methods
function RegExpSplit(string, limit) {
- // TODO(yangguo): allow non-regexp receivers.
if (!IS_REGEXP(this)) {
throw %make_type_error(kIncompatibleMethodReceiver,
"RegExp.prototype.@@split", this);
@@ -473,15 +272,11 @@ function RegExpSubclassSplit(string, limit) {
var constructor = SpeciesConstructor(this, GlobalRegExp);
var flags = TO_STRING(this.flags);
- // TODO(adamk): this fast path is wrong with respect to this.global
- // and this.sticky, but hopefully the spec will remove those gets
- // and thus make the assumption of 'exec' having no side-effects
- // more correct. Also, we doesn't ensure that 'exec' is actually
- // a data property on RegExp.prototype.
- var exec;
+ // TODO(adamk): this fast path is wrong as we doesn't ensure that 'exec'
+ // is actually a data property on RegExp.prototype.
if (IS_REGEXP(this) && constructor === GlobalRegExp) {
- exec = this.exec;
- if (exec === RegExpSubclassExecJS) {
+ var exec = this.exec;
+ if (exec === RegExpExecJS) {
return %_Call(RegExpSplit, this, string, limit);
}
}
@@ -505,9 +300,7 @@ function RegExpSubclassSplit(string, limit) {
var stringIndex = prevStringIndex;
while (stringIndex < size) {
splitter.lastIndex = stringIndex;
- result = RegExpSubclassExec(splitter, string, exec);
- // Ensure exec will be read again on the next loop through.
- exec = UNDEFINED;
+ result = RegExpSubclassExec(splitter, string);
if (IS_NULL(result)) {
stringIndex += AdvanceStringIndex(string, stringIndex, unicode);
} else {
@@ -697,6 +490,31 @@ function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
return result + %_SubString(subject, endOfMatch, subject.length);
}
+// Wraps access to matchInfo's captures into a format understood by
+// GetSubstitution.
+function MatchInfoCaptureWrapper(matches, subject) {
+ this.length = NUMBER_OF_CAPTURES(matches) >> 1;
+ this.match = matches;
+ this.subject = subject;
+}
+
+MatchInfoCaptureWrapper.prototype.at = function(ix) {
+ const match = this.match;
+ const start = match[CAPTURE(ix << 1)];
+ if (start < 0) return UNDEFINED;
+ return %_SubString(this.subject, start, match[CAPTURE((ix << 1) + 1)]);
+};
+%SetForceInlineFlag(MatchInfoCaptureWrapper.prototype.at);
+
+function ArrayCaptureWrapper(array) {
+ this.length = array.length;
+ this.array = array;
+}
+
+ArrayCaptureWrapper.prototype.at = function(ix) {
+ return this.array[ix];
+};
+%SetForceInlineFlag(ArrayCaptureWrapper.prototype.at);
function RegExpReplace(string, replace) {
if (!IS_REGEXP(this)) {
@@ -720,9 +538,17 @@ function RegExpReplace(string, replace) {
return %_SubString(subject, 0, match[CAPTURE0]) +
%_SubString(subject, match[CAPTURE1], subject.length)
}
- return ExpandReplacement(replace, subject, RegExpLastMatchInfo,
- %_SubString(subject, 0, match[CAPTURE0])) +
- %_SubString(subject, match[CAPTURE1], subject.length);
+ const captures = new MatchInfoCaptureWrapper(match, subject);
+ const start = match[CAPTURE0];
+ const end = match[CAPTURE1];
+
+ const prefix = %_SubString(subject, 0, start);
+ const matched = %_SubString(subject, start, end);
+ const suffix = %_SubString(subject, end, subject.length);
+
+ return prefix +
+ GetSubstitution(matched, subject, start, captures, replace) +
+ suffix;
}
// Global regexp search, string replace.
@@ -744,8 +570,6 @@ function RegExpReplace(string, replace) {
// GetSubstitution(matched, str, position, captures, replacement)
// Expand the $-expressions in the string and return a new string with
// the result.
-// TODO(littledan): Call this function from String.prototype.replace instead
-// of the very similar ExpandReplacement in src/js/string.js
function GetSubstitution(matched, string, position, captures, replacement) {
var matchLength = matched.length;
var stringLength = string.length;
@@ -794,7 +618,7 @@ function GetSubstitution(matched, string, position, captures, replacement) {
}
}
if (scaledIndex != 0 && scaledIndex < capturesLength) {
- var capture = captures[scaledIndex];
+ var capture = captures.at(scaledIndex);
if (!IS_UNDEFINED(capture)) result += capture;
pos += advance;
} else {
@@ -869,16 +693,12 @@ function RegExpSubclassReplace(string, replace) {
this.lastIndex = 0;
}
- // TODO(adamk): this fast path is wrong with respect to this.global
- // and this.sticky, but hopefully the spec will remove those gets
- // and thus make the assumption of 'exec' having no side-effects
- // more correct. Also, we doesn't ensure that 'exec' is actually
- // a data property on RegExp.prototype, nor does the fast path
- // correctly handle lastIndex setting.
+ // TODO(adamk): this fast path is wrong as we doesn't ensure that 'exec'
+ // is actually a data property on RegExp.prototype.
var exec;
if (IS_REGEXP(this)) {
exec = this.exec;
- if (exec === RegExpSubclassExecJS) {
+ if (exec === RegExpExecJS) {
return %_Call(RegExpReplace, this, string, replace);
}
}
@@ -922,7 +742,8 @@ function RegExpSubclassReplace(string, replace) {
replacement = %reflect_apply(replace, UNDEFINED, parameters, 0,
parameters.length);
} else {
- replacement = GetSubstitution(matched, string, position, captures,
+ const capturesWrapper = new ArrayCaptureWrapper(captures);
+ replacement = GetSubstitution(matched, string, position, capturesWrapper,
replace);
}
if (position >= nextSourcePosition) {
@@ -946,9 +767,10 @@ function RegExpSubclassSearch(string) {
}
string = TO_STRING(string);
var previousLastIndex = this.lastIndex;
- this.lastIndex = 0;
+ if (previousLastIndex != 0) this.lastIndex = 0;
var result = RegExpSubclassExec(this, string);
- this.lastIndex = previousLastIndex;
+ var currentLastIndex = this.lastIndex;
+ if (currentLastIndex != previousLastIndex) this.lastIndex = previousLastIndex;
if (IS_NULL(result)) return -1;
return result.index;
}
@@ -1035,7 +857,6 @@ function RegExpGetFlags() {
// ES6 21.2.5.4.
function RegExpGetGlobal() {
if (!IS_REGEXP(this)) {
- // TODO(littledan): Remove this RegExp compat workaround
if (this === GlobalRegExpPrototype) {
%IncrementUseCounter(kRegExpPrototypeOldFlagGetter);
return UNDEFINED;
@@ -1050,7 +871,6 @@ function RegExpGetGlobal() {
// ES6 21.2.5.5.
function RegExpGetIgnoreCase() {
if (!IS_REGEXP(this)) {
- // TODO(littledan): Remove this RegExp compat workaround
if (this === GlobalRegExpPrototype) {
%IncrementUseCounter(kRegExpPrototypeOldFlagGetter);
return UNDEFINED;
@@ -1064,7 +884,6 @@ function RegExpGetIgnoreCase() {
// ES6 21.2.5.7.
function RegExpGetMultiline() {
if (!IS_REGEXP(this)) {
- // TODO(littledan): Remove this RegExp compat workaround
if (this === GlobalRegExpPrototype) {
%IncrementUseCounter(kRegExpPrototypeOldFlagGetter);
return UNDEFINED;
@@ -1078,7 +897,6 @@ function RegExpGetMultiline() {
// ES6 21.2.5.10.
function RegExpGetSource() {
if (!IS_REGEXP(this)) {
- // TODO(littledan): Remove this RegExp compat workaround
if (this === GlobalRegExpPrototype) {
%IncrementUseCounter(kRegExpPrototypeSourceGetter);
return "(?:)";
@@ -1092,8 +910,6 @@ function RegExpGetSource() {
// ES6 21.2.5.12.
function RegExpGetSticky() {
if (!IS_REGEXP(this)) {
- // Compat fix: RegExp.prototype.sticky == undefined; UseCounter tracks it
- // TODO(littledan): Remove this workaround or standardize it
if (this === GlobalRegExpPrototype) {
%IncrementUseCounter(kRegExpPrototypeStickyGetter);
return UNDEFINED;
@@ -1108,7 +924,6 @@ function RegExpGetSticky() {
// ES6 21.2.5.15.
function RegExpGetUnicode() {
if (!IS_REGEXP(this)) {
- // TODO(littledan): Remove this RegExp compat workaround
if (this === GlobalRegExpPrototype) {
%IncrementUseCounter(kRegExpPrototypeUnicodeGetter);
return UNDEFINED;
@@ -1127,17 +942,9 @@ function RegExpSpecies() {
// -------------------------------------------------------------------
-%FunctionSetInstanceClassName(GlobalRegExp, 'RegExp');
-GlobalRegExpPrototype = new GlobalObject();
-%FunctionSetPrototype(GlobalRegExp, GlobalRegExpPrototype);
-%AddNamedProperty(
- GlobalRegExp.prototype, 'constructor', GlobalRegExp, DONT_ENUM);
-%SetCode(GlobalRegExp, RegExpConstructor);
-
utils.InstallGetter(GlobalRegExp, speciesSymbol, RegExpSpecies);
utils.InstallFunctions(GlobalRegExp.prototype, DONT_ENUM, [
- "exec", RegExpSubclassExecJS,
"test", RegExpSubclassTest,
"toString", RegExpToString,
"compile", RegExpCompileJS,
@@ -1166,11 +973,20 @@ var RegExpSetInput = function(string) {
LAST_INPUT(RegExpLastMatchInfo) = TO_STRING(string);
};
+// TODO(jgruber): All of these getters and setters were intended to be installed
+// with various attributes (e.g. DONT_ENUM | DONT_DELETE), but
+// InstallGetterSetter had a bug which ignored the passed attributes and
+// simply installed as DONT_ENUM instead. We might want to change back
+// to the intended attributes at some point.
+// On the other hand, installing attributes as DONT_ENUM matches the draft
+// specification at
+// https://github.com/claudepache/es-regexp-legacy-static-properties
+
%OptimizeObjectForAddingMultipleProperties(GlobalRegExp, 22);
utils.InstallGetterSetter(GlobalRegExp, 'input', RegExpGetInput, RegExpSetInput,
- DONT_DELETE);
+ DONT_ENUM);
utils.InstallGetterSetter(GlobalRegExp, '$_', RegExpGetInput, RegExpSetInput,
- DONT_ENUM | DONT_DELETE);
+ DONT_ENUM);
var NoOpSetter = function(ignored) {};
@@ -1178,28 +994,30 @@ var NoOpSetter = function(ignored) {};
// Static properties set by a successful match.
utils.InstallGetterSetter(GlobalRegExp, 'lastMatch', RegExpGetLastMatch,
- NoOpSetter, DONT_DELETE);
+ NoOpSetter, DONT_ENUM);
utils.InstallGetterSetter(GlobalRegExp, '$&', RegExpGetLastMatch, NoOpSetter,
- DONT_ENUM | DONT_DELETE);
+ DONT_ENUM);
utils.InstallGetterSetter(GlobalRegExp, 'lastParen', RegExpGetLastParen,
- NoOpSetter, DONT_DELETE);
+ NoOpSetter, DONT_ENUM);
utils.InstallGetterSetter(GlobalRegExp, '$+', RegExpGetLastParen, NoOpSetter,
- DONT_ENUM | DONT_DELETE);
+ DONT_ENUM);
utils.InstallGetterSetter(GlobalRegExp, 'leftContext', RegExpGetLeftContext,
- NoOpSetter, DONT_DELETE);
+ NoOpSetter, DONT_ENUM);
utils.InstallGetterSetter(GlobalRegExp, '$`', RegExpGetLeftContext, NoOpSetter,
- DONT_ENUM | DONT_DELETE);
+ DONT_ENUM);
utils.InstallGetterSetter(GlobalRegExp, 'rightContext', RegExpGetRightContext,
- NoOpSetter, DONT_DELETE);
+ NoOpSetter, DONT_ENUM);
utils.InstallGetterSetter(GlobalRegExp, "$'", RegExpGetRightContext, NoOpSetter,
- DONT_ENUM | DONT_DELETE);
+ DONT_ENUM);
for (var i = 1; i < 10; ++i) {
utils.InstallGetterSetter(GlobalRegExp, '$' + i, RegExpMakeCaptureGetter(i),
- NoOpSetter, DONT_DELETE);
+ NoOpSetter, DONT_ENUM);
}
%ToFastProperties(GlobalRegExp);
+%InstallToContext(["regexp_last_match_info", RegExpLastMatchInfo]);
+
// -------------------------------------------------------------------
// Internal
@@ -1228,13 +1046,13 @@ function InternalRegExpReplace(regexp, subject, replacement) {
// Exports
utils.Export(function(to) {
+ to.GetSubstitution = GetSubstitution;
to.InternalRegExpMatch = InternalRegExpMatch;
to.InternalRegExpReplace = InternalRegExpReplace;
to.IsRegExp = IsRegExp;
to.RegExpExec = DoRegExpExec;
to.RegExpInitialize = RegExpInitialize;
to.RegExpLastMatchInfo = RegExpLastMatchInfo;
- to.RegExpTest = RegExpTest;
});
})
diff --git a/deps/v8/src/js/string-iterator.js b/deps/v8/src/js/string-iterator.js
deleted file mode 100644
index 2319e5a679..0000000000
--- a/deps/v8/src/js/string-iterator.js
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalString = global.String;
-var IteratorPrototype = utils.ImportNow("IteratorPrototype");
-var iteratorSymbol = utils.ImportNow("iterator_symbol");
-var stringIteratorIteratedStringSymbol =
- utils.ImportNow("string_iterator_iterated_string_symbol");
-var stringIteratorNextIndexSymbol =
- utils.ImportNow("string_iterator_next_index_symbol");
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-
-// -------------------------------------------------------------------
-
-function StringIterator() {}
-
-
-// 21.1.5.1 CreateStringIterator Abstract Operation
-function CreateStringIterator(string) {
- CHECK_OBJECT_COERCIBLE(string, 'String.prototype[Symbol.iterator]');
- var s = TO_STRING(string);
- var iterator = new StringIterator;
- SET_PRIVATE(iterator, stringIteratorIteratedStringSymbol, s);
- SET_PRIVATE(iterator, stringIteratorNextIndexSymbol, 0);
- return iterator;
-}
-
-
-// ES6 section 21.1.5.2.1 %StringIteratorPrototype%.next ( )
-function StringIteratorNext() {
- var iterator = this;
- var value = UNDEFINED;
- var done = true;
-
- if (!IS_RECEIVER(iterator) ||
- !HAS_DEFINED_PRIVATE(iterator, stringIteratorNextIndexSymbol)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'String Iterator.prototype.next');
- }
-
- var s = GET_PRIVATE(iterator, stringIteratorIteratedStringSymbol);
- if (!IS_UNDEFINED(s)) {
- var position = GET_PRIVATE(iterator, stringIteratorNextIndexSymbol);
- var length = TO_UINT32(s.length);
- if (position >= length) {
- SET_PRIVATE(iterator, stringIteratorIteratedStringSymbol, UNDEFINED);
- } else {
- var first = %_StringCharCodeAt(s, position);
- value = %_StringCharFromCode(first);
- done = false;
- position++;
-
- if (first >= 0xD800 && first <= 0xDBFF && position < length) {
- var second = %_StringCharCodeAt(s, position);
- if (second >= 0xDC00 && second <= 0xDFFF) {
- value += %_StringCharFromCode(second);
- position++;
- }
- }
-
- SET_PRIVATE(iterator, stringIteratorNextIndexSymbol, position);
- }
- }
- return %_CreateIterResultObject(value, done);
-}
-
-
-// 21.1.3.27 String.prototype [ @@iterator ]( )
-function StringPrototypeIterator() {
- return CreateStringIterator(this);
-}
-
-//-------------------------------------------------------------------
-
-%FunctionSetPrototype(StringIterator, {__proto__: IteratorPrototype});
-%FunctionSetInstanceClassName(StringIterator, 'String Iterator');
-
-utils.InstallFunctions(StringIterator.prototype, DONT_ENUM, [
- 'next', StringIteratorNext
-]);
-%AddNamedProperty(StringIterator.prototype, toStringTagSymbol,
- "String Iterator", READ_ONLY | DONT_ENUM);
-
-utils.SetFunctionName(StringPrototypeIterator, iteratorSymbol);
-%AddNamedProperty(GlobalString.prototype, iteratorSymbol,
- StringPrototypeIterator, DONT_ENUM);
-
-})
diff --git a/deps/v8/src/js/string.js b/deps/v8/src/js/string.js
index 38caab7b12..7c552a93a9 100644
--- a/deps/v8/src/js/string.js
+++ b/deps/v8/src/js/string.js
@@ -10,6 +10,7 @@
// Imports
var ArrayJoin;
+var GetSubstitution;
var GlobalRegExp = global.RegExp;
var GlobalString = global.String;
var IsRegExp;
@@ -23,6 +24,7 @@ var splitSymbol = utils.ImportNow("split_symbol");
utils.Import(function(from) {
ArrayJoin = from.ArrayJoin;
+ GetSubstitution = from.GetSubstitution;
IsRegExp = from.IsRegExp;
MaxSimple = from.MaxSimple;
MinSimple = from.MinSimple;
@@ -59,45 +61,6 @@ function StringIndexOf(pattern, position) { // length == 1
%FunctionSetLength(StringIndexOf, 1);
-// ECMA-262 section 15.5.4.8
-function StringLastIndexOf(pat, pos) { // length == 1
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.lastIndexOf");
-
- var sub = TO_STRING(this);
- var subLength = sub.length;
- var pat = TO_STRING(pat);
- var patLength = pat.length;
- var index = subLength - patLength;
- var position = TO_NUMBER(pos);
- if (!NUMBER_IS_NAN(position)) {
- position = TO_INTEGER(position);
- if (position < 0) {
- position = 0;
- }
- if (position + patLength < subLength) {
- index = position;
- }
- }
- if (index < 0) {
- return -1;
- }
- return %StringLastIndexOf(sub, pat, index);
-}
-
-%FunctionSetLength(StringLastIndexOf, 1);
-
-
-// ECMA-262 section 15.5.4.9
-//
-// This function is implementation specific. For now, we do not
-// do anything locale specific.
-function StringLocaleCompareJS(other) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.localeCompare");
-
- return %StringLocaleCompare(TO_STRING(this), TO_STRING(other));
-}
-
-
// ES6 21.1.3.11.
function StringMatchJS(pattern) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.match");
@@ -118,38 +81,6 @@ function StringMatchJS(pattern) {
}
-// ECMA-262 v6, section 21.1.3.12
-//
-// For now we do nothing, as proper normalization requires big tables.
-// If Intl is enabled, then i18n.js will override it and provide the the
-// proper functionality.
-function StringNormalize(formArg) { // length == 0
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.normalize");
- var s = TO_STRING(this);
-
- var form = IS_UNDEFINED(formArg) ? 'NFC' : TO_STRING(formArg);
-
- var NORMALIZATION_FORMS = ['NFC', 'NFD', 'NFKC', 'NFKD'];
- var normalizationForm = %ArrayIndexOf(NORMALIZATION_FORMS, form, 0);
- if (normalizationForm === -1) {
- throw %make_range_error(kNormalizationForm,
- %_Call(ArrayJoin, NORMALIZATION_FORMS, ', '));
- }
-
- return s;
-}
-
-%FunctionSetLength(StringNormalize, 0);
-
-
-// This has the same size as the RegExpLastMatchInfo array, and can be used
-// for functions that expect that structure to be returned. It is used when
-// the needle is a string rather than a regexp. In this case we can't update
-// lastMatchArray without erroneously affecting the properties on the global
-// RegExp object.
-var reusableMatchInfo = [2, "", "", -1, -1];
-
-
// ES6, section 21.1.3.14
function StringReplace(search, replace) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.replace");
@@ -201,101 +132,18 @@ function StringReplace(search, replace) {
if (IS_CALLABLE(replace)) {
result += replace(search, start, subject);
} else {
- reusableMatchInfo[CAPTURE0] = start;
- reusableMatchInfo[CAPTURE1] = end;
- result = ExpandReplacement(TO_STRING(replace),
- subject,
- reusableMatchInfo,
- result);
+ // In this case, we don't have any capture groups and can get away with
+ // faking the captures object by simply setting its length to 1.
+ const captures = { length: 1 };
+ const matched = %_SubString(subject, start, end);
+ result += GetSubstitution(matched, subject, start, captures,
+ TO_STRING(replace));
}
return result + %_SubString(subject, end, subject.length);
}
-// Expand the $-expressions in the string and return a new string with
-// the result.
-function ExpandReplacement(string, subject, matchInfo, result) {
- var length = string.length;
- var next = %StringIndexOf(string, '$', 0);
- if (next < 0) {
- if (length > 0) result += string;
- return result;
- }
-
- if (next > 0) result += %_SubString(string, 0, next);
-
- while (true) {
- var expansion = '$';
- var position = next + 1;
- if (position < length) {
- var peek = %_StringCharCodeAt(string, position);
- if (peek == 36) { // $$
- ++position;
- result += '$';
- } else if (peek == 38) { // $& - match
- ++position;
- result +=
- %_SubString(subject, matchInfo[CAPTURE0], matchInfo[CAPTURE1]);
- } else if (peek == 96) { // $` - prefix
- ++position;
- result += %_SubString(subject, 0, matchInfo[CAPTURE0]);
- } else if (peek == 39) { // $' - suffix
- ++position;
- result += %_SubString(subject, matchInfo[CAPTURE1], subject.length);
- } else if (peek >= 48 && peek <= 57) {
- // Valid indices are $1 .. $9, $01 .. $09 and $10 .. $99
- var scaled_index = (peek - 48) << 1;
- var advance = 1;
- var number_of_captures = NUMBER_OF_CAPTURES(matchInfo);
- if (position + 1 < string.length) {
- var next = %_StringCharCodeAt(string, position + 1);
- if (next >= 48 && next <= 57) {
- var new_scaled_index = scaled_index * 10 + ((next - 48) << 1);
- if (new_scaled_index < number_of_captures) {
- scaled_index = new_scaled_index;
- advance = 2;
- }
- }
- }
- if (scaled_index != 0 && scaled_index < number_of_captures) {
- var start = matchInfo[CAPTURE(scaled_index)];
- if (start >= 0) {
- result +=
- %_SubString(subject, start, matchInfo[CAPTURE(scaled_index + 1)]);
- }
- position += advance;
- } else {
- result += '$';
- }
- } else {
- result += '$';
- }
- } else {
- result += '$';
- }
-
- // Go the the next $ in the string.
- next = %StringIndexOf(string, '$', position);
-
- // Return if there are no more $ characters in the string. If we
- // haven't reached the end, we need to append the suffix.
- if (next < 0) {
- if (position < length) {
- result += %_SubString(string, position, length);
- }
- return result;
- }
-
- // Append substring between the previous and the next $ character.
- if (next > position) {
- result += %_SubString(string, position, next);
- }
- }
- return result;
-}
-
-
// ES6 21.1.3.15.
function StringSearch(pattern) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.search");
@@ -390,55 +238,6 @@ function StringSplitJS(separator, limit) {
}
-// ECMA-262 section 15.5.4.15
-function StringSubstring(start, end) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.subString");
-
- var s = TO_STRING(this);
- var s_len = s.length;
-
- var start_i = TO_INTEGER(start);
- if (start_i < 0) {
- start_i = 0;
- } else if (start_i > s_len) {
- start_i = s_len;
- }
-
- var end_i = s_len;
- if (!IS_UNDEFINED(end)) {
- end_i = TO_INTEGER(end);
- if (end_i > s_len) {
- end_i = s_len;
- } else {
- if (end_i < 0) end_i = 0;
- if (start_i > end_i) {
- var tmp = end_i;
- end_i = start_i;
- start_i = tmp;
- }
- }
- }
-
- return %_SubString(s, start_i, end_i);
-}
-
-
-// ecma262/#sec-string.prototype.substr
-function StringSubstr(start, length) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.substr");
- var s = TO_STRING(this);
- var size = s.length;
- start = TO_INTEGER(start);
- length = IS_UNDEFINED(length) ? size : TO_INTEGER(length);
-
- if (start < 0) start = MaxSimple(size + start, 0);
- length = MinSimple(MaxSimple(length, 0), size - start);
-
- if (length <= 0) return '';
- return %_SubString(s, start, start + length);
-}
-
-
// ECMA-262, 15.5.4.16
function StringToLowerCaseJS() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLowerCase");
@@ -737,17 +536,12 @@ utils.InstallFunctions(GlobalString.prototype, DONT_ENUM, [
"endsWith", StringEndsWith,
"includes", StringIncludes,
"indexOf", StringIndexOf,
- "lastIndexOf", StringLastIndexOf,
- "localeCompare", StringLocaleCompareJS,
"match", StringMatchJS,
- "normalize", StringNormalize,
"repeat", StringRepeat,
"replace", StringReplace,
"search", StringSearch,
"slice", StringSlice,
"split", StringSplitJS,
- "substring", StringSubstring,
- "substr", StringSubstr,
"startsWith", StringStartsWith,
"toLowerCase", StringToLowerCaseJS,
"toLocaleLowerCase", StringToLocaleLowerCase,
@@ -773,15 +567,11 @@ utils.InstallFunctions(GlobalString.prototype, DONT_ENUM, [
// Exports
utils.Export(function(to) {
- to.ExpandReplacement = ExpandReplacement;
to.StringIndexOf = StringIndexOf;
- to.StringLastIndexOf = StringLastIndexOf;
to.StringMatch = StringMatchJS;
to.StringReplace = StringReplace;
to.StringSlice = StringSlice;
to.StringSplit = StringSplitJS;
- to.StringSubstr = StringSubstr;
- to.StringSubstring = StringSubstring;
});
})
diff --git a/deps/v8/src/js/typedarray.js b/deps/v8/src/js/typedarray.js
index b97a9c86ce..edb3b06a74 100644
--- a/deps/v8/src/js/typedarray.js
+++ b/deps/v8/src/js/typedarray.js
@@ -19,7 +19,6 @@ var GetMethod;
var GlobalArray = global.Array;
var GlobalArrayBuffer = global.ArrayBuffer;
var GlobalArrayBufferPrototype = GlobalArrayBuffer.prototype;
-var GlobalDataView = global.DataView;
var GlobalObject = global.Object;
var InnerArrayCopyWithin;
var InnerArrayEvery;
@@ -35,7 +34,6 @@ var InnerArraySome;
var InnerArraySort;
var InnerArrayToLocaleString;
var InternalArray = utils.InternalArray;
-var IsNaN;
var MaxSimple;
var MinSimple;
var PackedArrayReverse;
@@ -84,7 +82,6 @@ utils.Import(function(from) {
InnerArraySome = from.InnerArraySome;
InnerArraySort = from.InnerArraySort;
InnerArrayToLocaleString = from.InnerArrayToLocaleString;
- IsNaN = from.IsNaN;
MaxSimple = from.MaxSimple;
MinSimple = from.MinSimple;
PackedArrayReverse = from.PackedArrayReverse;
@@ -545,9 +542,9 @@ function TypedArrayComparefn(x, y) {
return -1;
} else if (x > y) {
return 1;
- } else if (IsNaN(x) && IsNaN(y)) {
- return IsNaN(y) ? 0 : 1;
- } else if (IsNaN(x)) {
+ } else if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) {
+ return NUMBER_IS_NAN(y) ? 0 : 1;
+ } else if (NUMBER_IS_NAN(x)) {
return 1;
}
return 0;
@@ -915,68 +912,4 @@ endmacro
TYPED_ARRAYS(SETUP_TYPED_ARRAY)
-// --------------------------- DataView -----------------------------
-
-macro DATA_VIEW_TYPES(FUNCTION)
- FUNCTION(Int8)
- FUNCTION(Uint8)
- FUNCTION(Int16)
- FUNCTION(Uint16)
- FUNCTION(Int32)
- FUNCTION(Uint32)
- FUNCTION(Float32)
- FUNCTION(Float64)
-endmacro
-
-
-macro DATA_VIEW_GETTER_SETTER(TYPENAME)
-function DataViewGetTYPENAMEJS(offset, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'DataView.getTYPENAME', this);
- }
- offset = IS_UNDEFINED(offset) ? 0 : ToIndex(offset, kInvalidDataViewAccessorOffset);
- return %DataViewGetTYPENAME(this, offset, !!little_endian);
-}
-%FunctionSetLength(DataViewGetTYPENAMEJS, 1);
-
-function DataViewSetTYPENAMEJS(offset, value, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'DataView.setTYPENAME', this);
- }
- offset = IS_UNDEFINED(offset) ? 0 : ToIndex(offset, kInvalidDataViewAccessorOffset);
- %DataViewSetTYPENAME(this, offset, TO_NUMBER(value), !!little_endian);
-}
-%FunctionSetLength(DataViewSetTYPENAMEJS, 2);
-endmacro
-
-DATA_VIEW_TYPES(DATA_VIEW_GETTER_SETTER)
-
-utils.InstallFunctions(GlobalDataView.prototype, DONT_ENUM, [
- "getInt8", DataViewGetInt8JS,
- "setInt8", DataViewSetInt8JS,
-
- "getUint8", DataViewGetUint8JS,
- "setUint8", DataViewSetUint8JS,
-
- "getInt16", DataViewGetInt16JS,
- "setInt16", DataViewSetInt16JS,
-
- "getUint16", DataViewGetUint16JS,
- "setUint16", DataViewSetUint16JS,
-
- "getInt32", DataViewGetInt32JS,
- "setInt32", DataViewSetInt32JS,
-
- "getUint32", DataViewGetUint32JS,
- "setUint32", DataViewSetUint32JS,
-
- "getFloat32", DataViewGetFloat32JS,
- "setFloat32", DataViewSetFloat32JS,
-
- "getFloat64", DataViewGetFloat64JS,
- "setFloat64", DataViewSetFloat64JS
-]);
-
})
diff --git a/deps/v8/src/js/v8natives.js b/deps/v8/src/js/v8natives.js
index 0c0a7925b9..93636a036b 100644
--- a/deps/v8/src/js/v8natives.js
+++ b/deps/v8/src/js/v8natives.js
@@ -18,20 +18,6 @@ var ObjectToString = utils.ImportNow("object_to_string");
// ----------------------------------------------------------------------------
-// ES6 18.2.3 isNaN(number)
-function GlobalIsNaN(number) {
- number = TO_NUMBER(number);
- return NUMBER_IS_NAN(number);
-}
-
-
-// ES6 18.2.2 isFinite(number)
-function GlobalIsFinite(number) {
- number = TO_NUMBER(number);
- return NUMBER_IS_FINITE(number);
-}
-
-
// ES6 18.2.5 parseInt(string, radix)
function GlobalParseInt(string, radix) {
if (IS_UNDEFINED(radix) || radix === 10 || radix === 0) {
@@ -91,8 +77,6 @@ utils.InstallConstants(global, [
// Set up non-enumerable function on the global object.
utils.InstallFunctions(global, DONT_ENUM, [
- "isNaN", GlobalIsNaN,
- "isFinite", GlobalIsFinite,
"parseInt", GlobalParseInt,
"parseFloat", GlobalParseFloat,
]);
@@ -207,38 +191,6 @@ utils.InstallFunctions(GlobalObject, DONT_ENUM, [
// ----------------------------------------------------------------------------
// Number
-// Harmony isFinite.
-function NumberIsFinite(number) {
- return IS_NUMBER(number) && NUMBER_IS_FINITE(number);
-}
-
-
-// Harmony isInteger
-function NumberIsInteger(number) {
- return NumberIsFinite(number) && TO_INTEGER(number) == number;
-}
-
-
-// Harmony isNaN.
-function NumberIsNaN(number) {
- return IS_NUMBER(number) && NUMBER_IS_NAN(number);
-}
-
-
-// Harmony isSafeInteger
-function NumberIsSafeInteger(number) {
- if (NumberIsFinite(number)) {
- var integral = TO_INTEGER(number);
- if (integral == number) {
- return -kMaxSafeInteger <= integral && integral <= kMaxSafeInteger;
- }
- }
- return false;
-}
-
-
-// ----------------------------------------------------------------------------
-
utils.InstallConstants(GlobalNumber, [
// ECMA-262 section 15.7.3.1.
"MAX_VALUE", 1.7976931348623157e+308,
@@ -260,15 +212,10 @@ utils.InstallConstants(GlobalNumber, [
// Harmony Number constructor additions
utils.InstallFunctions(GlobalNumber, DONT_ENUM, [
- "isFinite", NumberIsFinite,
- "isInteger", NumberIsInteger,
- "isNaN", NumberIsNaN,
- "isSafeInteger", NumberIsSafeInteger,
"parseInt", GlobalParseInt,
"parseFloat", GlobalParseFloat
]);
-%SetForceInlineFlag(NumberIsNaN);
// ----------------------------------------------------------------------------
@@ -295,9 +242,6 @@ function GetIterator(obj, method) {
utils.Export(function(to) {
to.GetIterator = GetIterator;
to.GetMethod = GetMethod;
- to.IsNaN = GlobalIsNaN;
- to.NumberIsNaN = NumberIsNaN;
- to.NumberIsInteger = NumberIsInteger;
to.ObjectHasOwnProperty = GlobalObject.prototype.hasOwnProperty;
});
diff --git a/deps/v8/src/json-parser.cc b/deps/v8/src/json-parser.cc
index bf2fd0d673..576100ab84 100644
--- a/deps/v8/src/json-parser.cc
+++ b/deps/v8/src/json-parser.cc
@@ -11,10 +11,10 @@
#include "src/field-type.h"
#include "src/messages.h"
#include "src/objects-inl.h"
-#include "src/parsing/scanner.h"
#include "src/parsing/token.h"
#include "src/property-descriptor.h"
#include "src/transitions.h"
+#include "src/unicode-cache.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index 2f81248ec1..f64143ed24 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -39,9 +39,14 @@ void SetTracingController(
const int DefaultPlatform::kMaxThreadPoolSize = 8;
DefaultPlatform::DefaultPlatform()
- : initialized_(false), thread_pool_size_(0), tracing_controller_(NULL) {}
+ : initialized_(false), thread_pool_size_(0) {}
DefaultPlatform::~DefaultPlatform() {
+ if (tracing_controller_) {
+ tracing_controller_->StopTracing();
+ tracing_controller_.reset();
+ }
+
base::LockGuard<base::Mutex> guard(&lock_);
queue_.Terminate();
if (initialized_) {
@@ -63,11 +68,6 @@ DefaultPlatform::~DefaultPlatform() {
i->second.pop();
}
}
-
- if (tracing_controller_) {
- tracing_controller_->StopTracing();
- delete tracing_controller_;
- }
}
@@ -178,16 +178,17 @@ double DefaultPlatform::MonotonicallyIncreasingTime() {
static_cast<double>(base::Time::kMicrosecondsPerSecond);
}
-
uint64_t DefaultPlatform::AddTraceEvent(
char phase, const uint8_t* category_enabled_flag, const char* name,
const char* scope, uint64_t id, uint64_t bind_id, int num_args,
const char** arg_names, const uint8_t* arg_types,
- const uint64_t* arg_values, unsigned int flags) {
+ const uint64_t* arg_values,
+ std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
+ unsigned int flags) {
if (tracing_controller_) {
return tracing_controller_->AddTraceEvent(
phase, category_enabled_flag, name, scope, id, bind_id, num_args,
- arg_names, arg_types, arg_values, flags);
+ arg_names, arg_types, arg_values, arg_convertables, flags);
}
return 0;
@@ -218,12 +219,22 @@ const char* DefaultPlatform::GetCategoryGroupName(
void DefaultPlatform::SetTracingController(
tracing::TracingController* tracing_controller) {
- tracing_controller_ = tracing_controller;
+ tracing_controller_.reset(tracing_controller);
}
size_t DefaultPlatform::NumberOfAvailableBackgroundThreads() {
return static_cast<size_t>(thread_pool_size_);
}
+void DefaultPlatform::AddTraceStateObserver(TraceStateObserver* observer) {
+ if (!tracing_controller_) return;
+ tracing_controller_->AddTraceStateObserver(observer);
+}
+
+void DefaultPlatform::RemoveTraceStateObserver(TraceStateObserver* observer) {
+ if (!tracing_controller_) return;
+ tracing_controller_->RemoveTraceStateObserver(observer);
+}
+
} // namespace platform
} // namespace v8
diff --git a/deps/v8/src/libplatform/default-platform.h b/deps/v8/src/libplatform/default-platform.h
index 0fd7e5ad89..e36234f528 100644
--- a/deps/v8/src/libplatform/default-platform.h
+++ b/deps/v8/src/libplatform/default-platform.h
@@ -7,6 +7,7 @@
#include <functional>
#include <map>
+#include <memory>
#include <queue>
#include <vector>
@@ -51,16 +52,21 @@ class DefaultPlatform : public Platform {
const uint8_t* GetCategoryGroupEnabled(const char* name) override;
const char* GetCategoryGroupName(
const uint8_t* category_enabled_flag) override;
- uint64_t AddTraceEvent(char phase, const uint8_t* category_enabled_flag,
- const char* name, const char* scope, uint64_t id,
- uint64_t bind_id, int32_t num_args,
- const char** arg_names, const uint8_t* arg_types,
- const uint64_t* arg_values,
- unsigned int flags) override;
+ using Platform::AddTraceEvent;
+ uint64_t AddTraceEvent(
+ char phase, const uint8_t* category_enabled_flag, const char* name,
+ const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
+ const char** arg_names, const uint8_t* arg_types,
+ const uint64_t* arg_values,
+ std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
+ unsigned int flags) override;
void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
const char* name, uint64_t handle) override;
void SetTracingController(tracing::TracingController* tracing_controller);
+ void AddTraceStateObserver(TraceStateObserver* observer) override;
+ void RemoveTraceStateObserver(TraceStateObserver* observer) override;
+
private:
static const int kMaxThreadPoolSize;
@@ -79,7 +85,7 @@ class DefaultPlatform : public Platform {
std::priority_queue<DelayedEntry, std::vector<DelayedEntry>,
std::greater<DelayedEntry> > >
main_thread_delayed_queue_;
- tracing::TracingController* tracing_controller_;
+ std::unique_ptr<tracing::TracingController> tracing_controller_;
DISALLOW_COPY_AND_ASSIGN(DefaultPlatform);
};
diff --git a/deps/v8/src/libplatform/tracing/trace-object.cc b/deps/v8/src/libplatform/tracing/trace-object.cc
index 55be8921cb..bb4bf71390 100644
--- a/deps/v8/src/libplatform/tracing/trace-object.cc
+++ b/deps/v8/src/libplatform/tracing/trace-object.cc
@@ -5,6 +5,7 @@
#include "include/libplatform/v8-tracing.h"
#include "base/trace_event/common/trace_event_common.h"
+#include "include/v8-platform.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
@@ -30,11 +31,13 @@ V8_INLINE static void CopyTraceObjectParameter(char** buffer,
}
}
-void TraceObject::Initialize(char phase, const uint8_t* category_enabled_flag,
- const char* name, const char* scope, uint64_t id,
- uint64_t bind_id, int num_args,
- const char** arg_names, const uint8_t* arg_types,
- const uint64_t* arg_values, unsigned int flags) {
+void TraceObject::Initialize(
+ char phase, const uint8_t* category_enabled_flag, const char* name,
+ const char* scope, uint64_t id, uint64_t bind_id, int num_args,
+ const char** arg_names, const uint8_t* arg_types,
+ const uint64_t* arg_values,
+ std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
+ unsigned int flags) {
pid_ = base::OS::GetCurrentProcessId();
tid_ = base::OS::GetCurrentThreadId();
phase_ = phase;
@@ -55,6 +58,8 @@ void TraceObject::Initialize(char phase, const uint8_t* category_enabled_flag,
arg_names_[i] = arg_names[i];
arg_values_[i].as_uint = arg_values[i];
arg_types_[i] = arg_types[i];
+ if (arg_types[i] == TRACE_VALUE_TYPE_CONVERTABLE)
+ arg_convertables_[i] = std::move(arg_convertables[i]);
}
bool copy = !!(flags & TRACE_EVENT_FLAG_COPY);
@@ -107,8 +112,10 @@ void TraceObject::InitializeForTesting(
char phase, const uint8_t* category_enabled_flag, const char* name,
const char* scope, uint64_t id, uint64_t bind_id, int num_args,
const char** arg_names, const uint8_t* arg_types,
- const uint64_t* arg_values, unsigned int flags, int pid, int tid,
- int64_t ts, int64_t tts, uint64_t duration, uint64_t cpu_duration) {
+ const uint64_t* arg_values,
+ std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
+ unsigned int flags, int pid, int tid, int64_t ts, int64_t tts,
+ uint64_t duration, uint64_t cpu_duration) {
pid_ = pid;
tid_ = tid;
phase_ = phase;
diff --git a/deps/v8/src/libplatform/tracing/trace-writer.cc b/deps/v8/src/libplatform/tracing/trace-writer.cc
index ec95527d5f..7445087c56 100644
--- a/deps/v8/src/libplatform/tracing/trace-writer.cc
+++ b/deps/v8/src/libplatform/tracing/trace-writer.cc
@@ -7,6 +7,7 @@
#include <cmath>
#include "base/trace_event/common/trace_event_common.h"
+#include "include/v8-platform.h"
#include "src/base/platform/platform.h"
namespace v8 {
@@ -112,6 +113,12 @@ void JSONTraceWriter::AppendArgValue(uint8_t type,
}
}
+void JSONTraceWriter::AppendArgValue(ConvertableToTraceFormat* value) {
+ std::string arg_stringified;
+ value->AppendAsTraceFormat(&arg_stringified);
+ stream_ << arg_stringified;
+}
+
JSONTraceWriter::JSONTraceWriter(std::ostream& stream) : stream_(stream) {
stream_ << "{\"traceEvents\":[";
}
@@ -143,10 +150,16 @@ void JSONTraceWriter::AppendTraceEvent(TraceObject* trace_event) {
const char** arg_names = trace_event->arg_names();
const uint8_t* arg_types = trace_event->arg_types();
TraceObject::ArgValue* arg_values = trace_event->arg_values();
+ std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables =
+ trace_event->arg_convertables();
for (int i = 0; i < trace_event->num_args(); ++i) {
if (i > 0) stream_ << ",";
stream_ << "\"" << arg_names[i] << "\":";
- AppendArgValue(arg_types[i], arg_values[i]);
+ if (arg_types[i] == TRACE_VALUE_TYPE_CONVERTABLE) {
+ AppendArgValue(arg_convertables[i].get());
+ } else {
+ AppendArgValue(arg_types[i], arg_values[i]);
+ }
}
stream_ << "}}";
// TODO(fmeawad): Add support for Flow Events.
diff --git a/deps/v8/src/libplatform/tracing/trace-writer.h b/deps/v8/src/libplatform/tracing/trace-writer.h
index 963fc6a64d..43d7cb6a90 100644
--- a/deps/v8/src/libplatform/tracing/trace-writer.h
+++ b/deps/v8/src/libplatform/tracing/trace-writer.h
@@ -20,6 +20,7 @@ class JSONTraceWriter : public TraceWriter {
private:
void AppendArgValue(uint8_t type, TraceObject::ArgValue value);
+ void AppendArgValue(v8::ConvertableToTraceFormat*);
std::ostream& stream_;
bool append_comma_ = false;
diff --git a/deps/v8/src/libplatform/tracing/tracing-controller.cc b/deps/v8/src/libplatform/tracing/tracing-controller.cc
index e9a21725e2..c1a4057c05 100644
--- a/deps/v8/src/libplatform/tracing/tracing-controller.cc
+++ b/deps/v8/src/libplatform/tracing/tracing-controller.cc
@@ -38,21 +38,28 @@ const int g_num_builtin_categories = 4;
// Skip default categories.
v8::base::AtomicWord g_category_index = g_num_builtin_categories;
+TracingController::TracingController() {}
+
+TracingController::~TracingController() {}
+
void TracingController::Initialize(TraceBuffer* trace_buffer) {
trace_buffer_.reset(trace_buffer);
+ mutex_.reset(new base::Mutex());
}
uint64_t TracingController::AddTraceEvent(
char phase, const uint8_t* category_enabled_flag, const char* name,
const char* scope, uint64_t id, uint64_t bind_id, int num_args,
const char** arg_names, const uint8_t* arg_types,
- const uint64_t* arg_values, unsigned int flags) {
+ const uint64_t* arg_values,
+ std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
+ unsigned int flags) {
uint64_t handle;
TraceObject* trace_object = trace_buffer_->AddTraceEvent(&handle);
if (trace_object) {
trace_object->Initialize(phase, category_enabled_flag, name, scope, id,
bind_id, num_args, arg_names, arg_types,
- arg_values, flags);
+ arg_values, arg_convertables, flags);
}
return handle;
}
@@ -91,13 +98,29 @@ const char* TracingController::GetCategoryGroupName(
void TracingController::StartTracing(TraceConfig* trace_config) {
trace_config_.reset(trace_config);
- mode_ = RECORDING_MODE;
- UpdateCategoryGroupEnabledFlags();
+ std::unordered_set<Platform::TraceStateObserver*> observers_copy;
+ {
+ base::LockGuard<base::Mutex> lock(mutex_.get());
+ mode_ = RECORDING_MODE;
+ UpdateCategoryGroupEnabledFlags();
+ observers_copy = observers_;
+ }
+ for (auto o : observers_copy) {
+ o->OnTraceEnabled();
+ }
}
void TracingController::StopTracing() {
mode_ = DISABLED;
UpdateCategoryGroupEnabledFlags();
+ std::unordered_set<Platform::TraceStateObserver*> observers_copy;
+ {
+ base::LockGuard<base::Mutex> lock(mutex_.get());
+ observers_copy = observers_;
+ }
+ for (auto o : observers_copy) {
+ o->OnTraceDisabled();
+ }
trace_buffer_->Flush();
}
@@ -172,6 +195,24 @@ const uint8_t* TracingController::GetCategoryGroupEnabledInternal(
return category_group_enabled;
}
+void TracingController::AddTraceStateObserver(
+ Platform::TraceStateObserver* observer) {
+ {
+ base::LockGuard<base::Mutex> lock(mutex_.get());
+ observers_.insert(observer);
+ if (mode_ != RECORDING_MODE) return;
+ }
+ // Fire the observer if recording is already in progress.
+ observer->OnTraceEnabled();
+}
+
+void TracingController::RemoveTraceStateObserver(
+ Platform::TraceStateObserver* observer) {
+ base::LockGuard<base::Mutex> lock(mutex_.get());
+ DCHECK(observers_.find(observer) != observers_.end());
+ observers_.erase(observer);
+}
+
} // namespace tracing
} // namespace platform
} // namespace v8
diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc
index 71c667f4d6..0b40972b8e 100644
--- a/deps/v8/src/libsampler/sampler.cc
+++ b/deps/v8/src/libsampler/sampler.cc
@@ -217,7 +217,7 @@ class Sampler::PlatformData {
class SamplerManager {
public:
- SamplerManager() : sampler_map_(base::HashMap::PointersMatch) {}
+ SamplerManager() : sampler_map_() {}
void AddSampler(Sampler* sampler) {
AtomicGuard atomic_guard(&samplers_access_counter_);
diff --git a/deps/v8/src/lookup-cache-inl.h b/deps/v8/src/lookup-cache-inl.h
new file mode 100644
index 0000000000..1998a9de9a
--- /dev/null
+++ b/deps/v8/src/lookup-cache-inl.h
@@ -0,0 +1,40 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/lookup-cache.h"
+
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+int DescriptorLookupCache::Hash(Object* source, Name* name) {
+ DCHECK(name->IsUniqueName());
+ // Uses only lower 32 bits if pointers are larger.
+ uint32_t source_hash =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source)) >>
+ kPointerSizeLog2;
+ uint32_t name_hash = name->hash_field();
+ return (source_hash ^ name_hash) % kLength;
+}
+
+int DescriptorLookupCache::Lookup(Map* source, Name* name) {
+ int index = Hash(source, name);
+ Key& key = keys_[index];
+ if ((key.source == source) && (key.name == name)) return results_[index];
+ return kAbsent;
+}
+
+void DescriptorLookupCache::Update(Map* source, Name* name, int result) {
+ DCHECK(result != kAbsent);
+ int index = Hash(source, name);
+ Key& key = keys_[index];
+ key.source = source;
+ key.name = name;
+ results_[index] = result;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/lookup-cache.cc b/deps/v8/src/lookup-cache.cc
new file mode 100644
index 0000000000..18729d630d
--- /dev/null
+++ b/deps/v8/src/lookup-cache.cc
@@ -0,0 +1,84 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/lookup-cache.h"
+
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+void DescriptorLookupCache::Clear() {
+ for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
+}
+
+int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
+ DisallowHeapAllocation no_gc;
+ // Uses only lower 32 bits if pointers are larger.
+ uintptr_t addr_hash =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
+ return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
+}
+
+int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
+ DisallowHeapAllocation no_gc;
+ int index = (Hash(map, name) & kHashMask);
+ for (int i = 0; i < kEntriesPerBucket; i++) {
+ Key& key = keys_[index + i];
+ if ((key.map == *map) && key.name->Equals(*name)) {
+ return field_offsets_[index + i];
+ }
+ }
+ return kNotFound;
+}
+
+void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
+ int field_offset) {
+ DisallowHeapAllocation no_gc;
+ if (!name->IsUniqueName()) {
+ if (!StringTable::InternalizeStringIfExists(name->GetIsolate(),
+ Handle<String>::cast(name))
+ .ToHandle(&name)) {
+ return;
+ }
+ }
+ // This cache is cleared only between mark compact passes, so we expect the
+ // cache to only contain old space names.
+ DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
+
+ int index = (Hash(map, name) & kHashMask);
+ // After a GC there will be free slots, so we use them in order (this may
+ // help to get the most frequently used one in position 0).
+ for (int i = 0; i < kEntriesPerBucket; i++) {
+ Key& key = keys_[index];
+ Object* free_entry_indicator = NULL;
+ if (key.map == free_entry_indicator) {
+ key.map = *map;
+ key.name = *name;
+ field_offsets_[index + i] = field_offset;
+ return;
+ }
+ }
+ // No free entry found in this bucket, so we move them all down one and
+ // put the new entry at position zero.
+ for (int i = kEntriesPerBucket - 1; i > 0; i--) {
+ Key& key = keys_[index + i];
+ Key& key2 = keys_[index + i - 1];
+ key = key2;
+ field_offsets_[index + i] = field_offsets_[index + i - 1];
+ }
+
+ // Write the new first entry.
+ Key& key = keys_[index];
+ key.map = *map;
+ key.name = *name;
+ field_offsets_[index] = field_offset;
+}
+
+void KeyedLookupCache::Clear() {
+ for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/lookup-cache.h b/deps/v8/src/lookup-cache.h
new file mode 100644
index 0000000000..6da5e5b3d7
--- /dev/null
+++ b/deps/v8/src/lookup-cache.h
@@ -0,0 +1,117 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LOOKUP_CACHE_H_
+#define V8_LOOKUP_CACHE_H_
+
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+// Cache for mapping (map, property name) into descriptor index.
+// The cache contains both positive and negative results.
+// Descriptor index equals kNotFound means the property is absent.
+// Cleared at startup and prior to any gc.
+class DescriptorLookupCache {
+ public:
+ // Lookup descriptor index for (map, name).
+ // If absent, kAbsent is returned.
+ inline int Lookup(Map* source, Name* name);
+
+ // Update an element in the cache.
+ inline void Update(Map* source, Name* name, int result);
+
+ // Clear the cache.
+ void Clear();
+
+ static const int kAbsent = -2;
+
+ private:
+ DescriptorLookupCache() {
+ for (int i = 0; i < kLength; ++i) {
+ keys_[i].source = NULL;
+ keys_[i].name = NULL;
+ results_[i] = kAbsent;
+ }
+ }
+
+ static inline int Hash(Object* source, Name* name);
+
+ static const int kLength = 64;
+ struct Key {
+ Map* source;
+ Name* name;
+ };
+
+ Key keys_[kLength];
+ int results_[kLength];
+
+ friend class Isolate;
+ DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache);
+};
+
+// Cache for mapping (map, property name) into field offset.
+// Cleared at startup and prior to mark sweep collection.
+class KeyedLookupCache {
+ public:
+ // Lookup field offset for (map, name). If absent, -1 is returned.
+ int Lookup(Handle<Map> map, Handle<Name> name);
+
+ // Update an element in the cache.
+ void Update(Handle<Map> map, Handle<Name> name, int field_offset);
+
+ // Clear the cache.
+ void Clear();
+
+ static const int kLength = 256;
+ static const int kCapacityMask = kLength - 1;
+ static const int kMapHashShift = 5;
+ static const int kHashMask = -4; // Zero the last two bits.
+ static const int kEntriesPerBucket = 4;
+ static const int kEntryLength = 2;
+ static const int kMapIndex = 0;
+ static const int kKeyIndex = 1;
+ static const int kNotFound = -1;
+
+ // kEntriesPerBucket should be a power of 2.
+ STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0);
+ STATIC_ASSERT(kEntriesPerBucket == -kHashMask);
+
+ private:
+ KeyedLookupCache() {
+ for (int i = 0; i < kLength; ++i) {
+ keys_[i].map = NULL;
+ keys_[i].name = NULL;
+ field_offsets_[i] = kNotFound;
+ }
+ }
+
+ static inline int Hash(Handle<Map> map, Handle<Name> name);
+
+ // Get the address of the keys and field_offsets arrays. Used in
+ // generated code to perform cache lookups.
+ Address keys_address() { return reinterpret_cast<Address>(&keys_); }
+
+ Address field_offsets_address() {
+ return reinterpret_cast<Address>(&field_offsets_);
+ }
+
+ struct Key {
+ Map* map;
+ Name* name;
+ };
+
+ Key keys_[kLength];
+ int field_offsets_[kLength];
+
+ friend class ExternalReference;
+ friend class Isolate;
+ DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_LOOKUP_CACHE_H_
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index 727465ee80..b6c0b92a17 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -13,7 +13,6 @@
namespace v8 {
namespace internal {
-
// static
LookupIterator LookupIterator::PropertyOrElement(Isolate* isolate,
Handle<Object> receiver,
@@ -421,11 +420,6 @@ void LookupIterator::Delete() {
isolate_, is_prototype_map
? &RuntimeCallStats::PrototypeObject_DeleteProperty
: &RuntimeCallStats::Object_DeleteProperty);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate_,
- (is_prototype_map
- ? &tracing::TraceEventStatsTable::PrototypeObject_DeleteProperty
- : &tracing::TraceEventStatsTable::Object_DeleteProperty));
PropertyNormalizationMode mode =
is_prototype_map ? KEEP_INOBJECT_PROPERTIES : CLEAR_INOBJECT_PROPERTIES;
diff --git a/deps/v8/src/lookup.h b/deps/v8/src/lookup.h
index ffc7904b2a..687c677613 100644
--- a/deps/v8/src/lookup.h
+++ b/deps/v8/src/lookup.h
@@ -43,30 +43,26 @@ class LookupIterator final BASE_EMBEDDED {
LookupIterator(Handle<Object> receiver, Handle<Name> name,
Configuration configuration = DEFAULT)
- : configuration_(ComputeConfiguration(configuration, name)),
- interceptor_state_(InterceptorState::kUninitialized),
- property_details_(PropertyDetails::Empty()),
- isolate_(name->GetIsolate()),
- name_(isolate_->factory()->InternalizeName(name)),
- receiver_(receiver),
- initial_holder_(GetRoot(isolate_, receiver)),
- // kMaxUInt32 isn't a valid index.
- index_(kMaxUInt32),
- number_(DescriptorArray::kNotFound) {
-#ifdef DEBUG
- uint32_t index; // Assert that the name is not an array index.
- DCHECK(!name->AsArrayIndex(&index));
-#endif // DEBUG
- Start<false>();
- }
+ : LookupIterator(name->GetIsolate(), receiver, name, configuration) {}
+
+ LookupIterator(Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
+ Configuration configuration = DEFAULT)
+ : LookupIterator(isolate, receiver, name, GetRoot(isolate, receiver),
+ configuration) {}
LookupIterator(Handle<Object> receiver, Handle<Name> name,
Handle<JSReceiver> holder,
Configuration configuration = DEFAULT)
+ : LookupIterator(name->GetIsolate(), receiver, name, holder,
+ configuration) {}
+
+ LookupIterator(Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
+ Handle<JSReceiver> holder,
+ Configuration configuration = DEFAULT)
: configuration_(ComputeConfiguration(configuration, name)),
interceptor_state_(InterceptorState::kUninitialized),
property_details_(PropertyDetails::Empty()),
- isolate_(name->GetIsolate()),
+ isolate_(isolate),
name_(isolate_->factory()->InternalizeName(name)),
receiver_(receiver),
initial_holder_(holder),
@@ -82,18 +78,8 @@ class LookupIterator final BASE_EMBEDDED {
LookupIterator(Isolate* isolate, Handle<Object> receiver, uint32_t index,
Configuration configuration = DEFAULT)
- : configuration_(configuration),
- interceptor_state_(InterceptorState::kUninitialized),
- property_details_(PropertyDetails::Empty()),
- isolate_(isolate),
- receiver_(receiver),
- initial_holder_(GetRoot(isolate, receiver, index)),
- index_(index),
- number_(DescriptorArray::kNotFound) {
- // kMaxUInt32 isn't a valid index.
- DCHECK_NE(kMaxUInt32, index_);
- Start<true>();
- }
+ : LookupIterator(isolate, receiver, index,
+ GetRoot(isolate, receiver, index), configuration) {}
LookupIterator(Isolate* isolate, Handle<Object> receiver, uint32_t index,
Handle<JSReceiver> holder,
@@ -289,7 +275,7 @@ class LookupIterator final BASE_EMBEDDED {
MUST_USE_RESULT inline JSReceiver* NextHolder(Map* map);
template <bool is_element>
- void Start();
+ V8_EXPORT_PRIVATE void Start();
template <bool is_element>
void NextInternal(Map* map, JSReceiver* holder);
template <bool is_element>
diff --git a/deps/v8/src/machine-type.h b/deps/v8/src/machine-type.h
index bcc85b3e7c..e9605d7280 100644
--- a/deps/v8/src/machine-type.h
+++ b/deps/v8/src/machine-type.h
@@ -10,7 +10,7 @@
#include "src/base/bits.h"
#include "src/globals.h"
#include "src/signature.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
@@ -22,12 +22,14 @@ enum class MachineRepresentation : uint8_t {
kWord16,
kWord32,
kWord64,
- kFloat32,
- kFloat64, // must follow kFloat32
- kSimd128, // must follow kFloat64
kTaggedSigned,
kTaggedPointer,
- kTagged
+ kTagged,
+ // FP representations must be last, and in order of increasing size.
+ kFloat32,
+ kFloat64,
+ kSimd128,
+ kFirstFPRepresentation = kFloat32
};
const char* MachineReprToString(MachineRepresentation);
@@ -62,6 +64,8 @@ class MachineType {
MachineRepresentation representation() const { return representation_; }
MachineSemantic semantic() const { return semantic_; }
+ bool IsNone() { return representation() == MachineRepresentation::kNone; }
+
bool IsSigned() {
return semantic() == MachineSemantic::kInt32 ||
semantic() == MachineSemantic::kInt64;
@@ -119,6 +123,14 @@ class MachineType {
return MachineType(MachineRepresentation::kWord64,
MachineSemantic::kUint64);
}
+ static MachineType TaggedPointer() {
+ return MachineType(MachineRepresentation::kTaggedPointer,
+ MachineSemantic::kAny);
+ }
+ static MachineType TaggedSigned() {
+ return MachineType(MachineRepresentation::kTaggedSigned,
+ MachineSemantic::kInt32);
+ }
static MachineType AnyTagged() {
return MachineType(MachineRepresentation::kTagged, MachineSemantic::kAny);
}
@@ -161,7 +173,7 @@ class MachineType {
return MachineType(MachineRepresentation::kBit, MachineSemantic::kNone);
}
- static MachineType TypeForRepresentation(MachineRepresentation& rep,
+ static MachineType TypeForRepresentation(const MachineRepresentation& rep,
bool isSigned = true) {
switch (rep) {
case MachineRepresentation::kNone:
@@ -184,6 +196,10 @@ class MachineType {
return MachineType::Simd128();
case MachineRepresentation::kTagged:
return MachineType::AnyTagged();
+ case MachineRepresentation::kTaggedSigned:
+ return MachineType::TaggedSigned();
+ case MachineRepresentation::kTaggedPointer:
+ return MachineType::TaggedPointer();
default:
UNREACHABLE();
return MachineType::None();
@@ -204,14 +220,22 @@ V8_INLINE size_t hash_value(MachineType type) {
static_cast<size_t>(type.semantic()) * 16;
}
-std::ostream& operator<<(std::ostream& os, MachineRepresentation rep);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ MachineRepresentation rep);
std::ostream& operator<<(std::ostream& os, MachineSemantic type);
std::ostream& operator<<(std::ostream& os, MachineType type);
inline bool IsFloatingPoint(MachineRepresentation rep) {
- return rep == MachineRepresentation::kFloat32 ||
- rep == MachineRepresentation::kFloat64 ||
- rep == MachineRepresentation::kSimd128;
+ return rep >= MachineRepresentation::kFirstFPRepresentation;
+}
+
+inline bool CanBeTaggedPointer(MachineRepresentation rep) {
+ return rep == MachineRepresentation::kTagged ||
+ rep == MachineRepresentation::kTaggedPointer;
+}
+
+inline bool IsAnyTagged(MachineRepresentation rep) {
+ return CanBeTaggedPointer(rep) || rep == MachineRepresentation::kTaggedSigned;
}
// Gets the log2 of the element size in bytes of the machine type.
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index 5d03318963..cc6349d73c 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -164,71 +164,63 @@ std::unique_ptr<char[]> MessageHandler::GetLocalizedMessage(
return GetMessage(isolate, data)->ToCString(DISALLOW_NULLS);
}
-
-CallSite::CallSite(Isolate* isolate, Handle<JSObject> call_site_obj)
- : isolate_(isolate) {
- Handle<Object> maybe_function = JSObject::GetDataProperty(
- call_site_obj, isolate->factory()->call_site_function_symbol());
- if (maybe_function->IsJSFunction()) {
- // javascript
- fun_ = Handle<JSFunction>::cast(maybe_function);
- receiver_ = JSObject::GetDataProperty(
- call_site_obj, isolate->factory()->call_site_receiver_symbol());
- } else {
- Handle<Object> maybe_wasm_func_index = JSObject::GetDataProperty(
- call_site_obj, isolate->factory()->call_site_wasm_func_index_symbol());
- if (!maybe_wasm_func_index->IsSmi()) {
- // invalid: neither javascript nor wasm
- return;
- }
- // wasm
- wasm_obj_ = Handle<JSObject>::cast(JSObject::GetDataProperty(
- call_site_obj, isolate->factory()->call_site_wasm_obj_symbol()));
- wasm_func_index_ = Smi::cast(*maybe_wasm_func_index)->value();
- DCHECK(static_cast<int>(wasm_func_index_) >= 0);
- }
-
- CHECK(JSObject::GetDataProperty(
- call_site_obj, isolate->factory()->call_site_position_symbol())
- ->ToInt32(&pos_));
+void JSStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
+ int frame_ix) {
+ DCHECK(!array->IsWasmFrame(frame_ix));
+ isolate_ = isolate;
+ receiver_ = handle(array->Receiver(frame_ix), isolate);
+ function_ = handle(array->Function(frame_ix), isolate);
+ code_ = handle(array->Code(frame_ix), isolate);
+ offset_ = array->Offset(frame_ix)->value();
+
+ const int flags = array->Flags(frame_ix)->value();
+ force_constructor_ = (flags & FrameArray::kForceConstructor) != 0;
+ is_strict_ = (flags & FrameArray::kIsStrict) != 0;
}
-
-Handle<Object> CallSite::GetFileName() {
- if (!IsJavaScript()) return isolate_->factory()->null_value();
- Object* script = fun_->shared()->script();
- if (!script->IsScript()) return isolate_->factory()->null_value();
- return Handle<Object>(Script::cast(script)->name(), isolate_);
+JSStackFrame::JSStackFrame(Isolate* isolate, Handle<Object> receiver,
+ Handle<JSFunction> function,
+ Handle<AbstractCode> code, int offset)
+ : isolate_(isolate),
+ receiver_(receiver),
+ function_(function),
+ code_(code),
+ offset_(offset),
+ force_constructor_(false),
+ is_strict_(false) {}
+
+JSStackFrame::JSStackFrame() {}
+
+Handle<Object> JSStackFrame::GetFunction() const {
+ return Handle<Object>::cast(function_);
}
+Handle<Object> JSStackFrame::GetFileName() {
+ if (!HasScript()) return isolate_->factory()->null_value();
+ return handle(GetScript()->name(), isolate_);
+}
-Handle<Object> CallSite::GetFunctionName() {
- if (IsWasm()) {
- return wasm::GetWasmFunctionNameOrNull(isolate_, wasm_obj_,
- wasm_func_index_);
- }
- Handle<String> result = JSFunction::GetName(fun_);
+Handle<Object> JSStackFrame::GetFunctionName() {
+ Handle<String> result = JSFunction::GetName(function_);
if (result->length() != 0) return result;
- Handle<Object> script(fun_->shared()->script(), isolate_);
- if (script->IsScript() &&
- Handle<Script>::cast(script)->compilation_type() ==
- Script::COMPILATION_TYPE_EVAL) {
+ if (HasScript() &&
+ GetScript()->compilation_type() == Script::COMPILATION_TYPE_EVAL) {
return isolate_->factory()->eval_string();
}
return isolate_->factory()->null_value();
}
-Handle<Object> CallSite::GetScriptNameOrSourceUrl() {
- if (!IsJavaScript()) return isolate_->factory()->null_value();
- Object* script_obj = fun_->shared()->script();
- if (!script_obj->IsScript()) return isolate_->factory()->null_value();
- Handle<Script> script(Script::cast(script_obj), isolate_);
+Handle<Object> JSStackFrame::GetScriptNameOrSourceUrl() {
+ if (!HasScript()) return isolate_->factory()->null_value();
+ Handle<Script> script = GetScript();
Object* source_url = script->source_url();
- if (source_url->IsString()) return Handle<Object>(source_url, isolate_);
- return Handle<Object>(script->name(), isolate_);
+ return (source_url->IsString()) ? handle(source_url, isolate_)
+ : handle(script->name(), isolate_);
}
+namespace {
+
bool CheckMethodName(Isolate* isolate, Handle<JSObject> obj, Handle<Name> name,
Handle<JSFunction> fun,
LookupIterator::Configuration config) {
@@ -246,12 +238,13 @@ bool CheckMethodName(Isolate* isolate, Handle<JSObject> obj, Handle<Name> name,
return false;
}
+} // namespace
-Handle<Object> CallSite::GetMethodName() {
- if (!IsJavaScript() || receiver_->IsNull(isolate_) ||
- receiver_->IsUndefined(isolate_)) {
+Handle<Object> JSStackFrame::GetMethodName() {
+ if (receiver_->IsNull(isolate_) || receiver_->IsUndefined(isolate_)) {
return isolate_->factory()->null_value();
}
+
Handle<JSReceiver> receiver =
Object::ToObject(isolate_, receiver_).ToHandleChecked();
if (!receiver->IsJSObject()) {
@@ -259,7 +252,7 @@ Handle<Object> CallSite::GetMethodName() {
}
Handle<JSObject> obj = Handle<JSObject>::cast(receiver);
- Handle<Object> function_name(fun_->shared()->name(), isolate_);
+ Handle<Object> function_name(function_->shared()->name(), isolate_);
if (function_name->IsString()) {
Handle<String> name = Handle<String>::cast(function_name);
// ES2015 gives getters and setters name prefixes which must
@@ -268,7 +261,7 @@ Handle<Object> CallSite::GetMethodName() {
name->IsUtf8EqualTo(CStrVector("set "), true)) {
name = isolate_->factory()->NewProperSubString(name, 4, name->length());
}
- if (CheckMethodName(isolate_, obj, name, fun_,
+ if (CheckMethodName(isolate_, obj, name, function_,
LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR)) {
return name;
}
@@ -288,7 +281,7 @@ Handle<Object> CallSite::GetMethodName() {
HandleScope inner_scope(isolate_);
if (!keys->get(i)->IsName()) continue;
Handle<Name> name_key(Name::cast(keys->get(i)), isolate_);
- if (!CheckMethodName(isolate_, current_obj, name_key, fun_,
+ if (!CheckMethodName(isolate_, current_obj, name_key, function_,
LookupIterator::OWN_SKIP_INTERCEPTOR))
continue;
// Return null in case of duplicates to avoid confusion.
@@ -301,20 +294,6 @@ Handle<Object> CallSite::GetMethodName() {
return isolate_->factory()->null_value();
}
-Handle<Object> CallSite::GetTypeName() {
- // TODO(jgruber): Check for strict/constructor here as in
- // CallSitePrototypeGetThis.
-
- if (receiver_->IsNull(isolate_) || receiver_->IsUndefined(isolate_))
- return isolate_->factory()->null_value();
-
- if (receiver_->IsJSProxy()) return isolate_->factory()->Proxy_string();
-
- Handle<JSReceiver> receiver_object =
- Object::ToObject(isolate_, receiver_).ToHandleChecked();
- return JSReceiver::GetConstructorName(receiver_object);
-}
-
namespace {
Object* EvalFromFunctionName(Isolate* isolate, Handle<Script> script) {
@@ -414,126 +393,344 @@ MaybeHandle<String> FormatEvalOrigin(Isolate* isolate, Handle<Script> script) {
} // namespace
-Handle<Object> CallSite::GetEvalOrigin() {
- if (IsWasm()) return isolate_->factory()->undefined_value();
- DCHECK(IsJavaScript());
+Handle<Object> JSStackFrame::GetTypeName() {
+ // TODO(jgruber): Check for strict/constructor here as in
+ // CallSitePrototypeGetThis.
+
+ if (receiver_->IsNull(isolate_) || receiver_->IsUndefined(isolate_))
+ return isolate_->factory()->null_value();
+
+ if (receiver_->IsJSProxy()) return isolate_->factory()->Proxy_string();
+
+ Handle<JSReceiver> receiver_object =
+ Object::ToObject(isolate_, receiver_).ToHandleChecked();
+ return JSReceiver::GetConstructorName(receiver_object);
+}
- Handle<Object> script = handle(fun_->shared()->script(), isolate_);
- if (!script->IsScript()) return isolate_->factory()->undefined_value();
+Handle<Object> JSStackFrame::GetEvalOrigin() {
+ if (!HasScript()) return isolate_->factory()->undefined_value();
+ return FormatEvalOrigin(isolate_, GetScript()).ToHandleChecked();
+}
- return FormatEvalOrigin(isolate_, Handle<Script>::cast(script))
- .ToHandleChecked();
+int JSStackFrame::GetLineNumber() {
+ DCHECK_LE(0, GetPosition());
+ if (HasScript()) return Script::GetLineNumber(GetScript(), GetPosition()) + 1;
+ return -1;
}
-int CallSite::GetLineNumber() {
- if (pos_ >= 0 && IsJavaScript()) {
- Handle<Object> script_obj(fun_->shared()->script(), isolate_);
- if (script_obj->IsScript()) {
- Handle<Script> script = Handle<Script>::cast(script_obj);
- return Script::GetLineNumber(script, pos_) + 1;
- }
+int JSStackFrame::GetColumnNumber() {
+ DCHECK_LE(0, GetPosition());
+ if (HasScript()) {
+ return Script::GetColumnNumber(GetScript(), GetPosition()) + 1;
}
return -1;
}
+bool JSStackFrame::IsNative() {
+ return HasScript() && GetScript()->type() == Script::TYPE_NATIVE;
+}
+
+bool JSStackFrame::IsToplevel() {
+ return receiver_->IsJSGlobalProxy() || receiver_->IsNull(isolate_) ||
+ receiver_->IsUndefined(isolate_);
+}
+
+bool JSStackFrame::IsEval() {
+ return HasScript() &&
+ GetScript()->compilation_type() == Script::COMPILATION_TYPE_EVAL;
+}
+
+bool JSStackFrame::IsConstructor() {
+ if (force_constructor_) return true;
+ if (!receiver_->IsJSObject()) return false;
+ Handle<Object> constructor =
+ JSReceiver::GetDataProperty(Handle<JSObject>::cast(receiver_),
+ isolate_->factory()->constructor_string());
+ return constructor.is_identical_to(function_);
+}
+
+namespace {
-int CallSite::GetColumnNumber() {
- if (pos_ >= 0 && IsJavaScript()) {
- Handle<Object> script_obj(fun_->shared()->script(), isolate_);
- if (script_obj->IsScript()) {
- Handle<Script> script = Handle<Script>::cast(script_obj);
- return Script::GetColumnNumber(script, pos_) + 1;
+bool IsNonEmptyString(Handle<Object> object) {
+ return (object->IsString() && String::cast(*object)->length() > 0);
+}
+
+void AppendFileLocation(Isolate* isolate, JSStackFrame* call_site,
+ IncrementalStringBuilder* builder) {
+ if (call_site->IsNative()) {
+ builder->AppendCString("native");
+ return;
+ }
+
+ Handle<Object> file_name = call_site->GetScriptNameOrSourceUrl();
+ if (!file_name->IsString() && call_site->IsEval()) {
+ Handle<Object> eval_origin = call_site->GetEvalOrigin();
+ DCHECK(eval_origin->IsString());
+ builder->AppendString(Handle<String>::cast(eval_origin));
+ builder->AppendCString(", "); // Expecting source position to follow.
+ }
+
+ if (IsNonEmptyString(file_name)) {
+ builder->AppendString(Handle<String>::cast(file_name));
+ } else {
+ // Source code does not originate from a file and is not native, but we
+ // can still get the source position inside the source string, e.g. in
+ // an eval string.
+ builder->AppendCString("<anonymous>");
+ }
+
+ int line_number = call_site->GetLineNumber();
+ if (line_number != -1) {
+ builder->AppendCharacter(':');
+ Handle<String> line_string = isolate->factory()->NumberToString(
+ handle(Smi::FromInt(line_number), isolate), isolate);
+ builder->AppendString(line_string);
+
+ int column_number = call_site->GetColumnNumber();
+ if (column_number != -1) {
+ builder->AppendCharacter(':');
+ Handle<String> column_string = isolate->factory()->NumberToString(
+ handle(Smi::FromInt(column_number), isolate), isolate);
+ builder->AppendString(column_string);
}
}
- return -1;
}
+int StringIndexOf(Isolate* isolate, Handle<String> subject,
+ Handle<String> pattern) {
+ if (pattern->length() > subject->length()) return -1;
+ return String::IndexOf(isolate, subject, pattern, 0);
+}
+
+// Returns true iff
+// 1. the subject ends with '.' + pattern, or
+// 2. subject == pattern.
+bool StringEndsWithMethodName(Isolate* isolate, Handle<String> subject,
+ Handle<String> pattern) {
+ if (String::Equals(subject, pattern)) return true;
+
+ FlatStringReader subject_reader(isolate, String::Flatten(subject));
+ FlatStringReader pattern_reader(isolate, String::Flatten(pattern));
+
+ int pattern_index = pattern_reader.length() - 1;
+ int subject_index = subject_reader.length() - 1;
+ for (int i = 0; i <= pattern_reader.length(); i++) { // Iterate over len + 1.
+ if (subject_index < 0) {
+ return false;
+ }
+
+ const uc32 subject_char = subject_reader.Get(subject_index);
+ if (i == pattern_reader.length()) {
+ if (subject_char != '.') return false;
+ } else if (subject_char != pattern_reader.Get(pattern_index)) {
+ return false;
+ }
+
+ pattern_index--;
+ subject_index--;
+ }
+
+ return true;
+}
+
+void AppendMethodCall(Isolate* isolate, JSStackFrame* call_site,
+ IncrementalStringBuilder* builder) {
+ Handle<Object> type_name = call_site->GetTypeName();
+ Handle<Object> method_name = call_site->GetMethodName();
+ Handle<Object> function_name = call_site->GetFunctionName();
+
+ if (IsNonEmptyString(function_name)) {
+ Handle<String> function_string = Handle<String>::cast(function_name);
+ if (IsNonEmptyString(type_name)) {
+ Handle<String> type_string = Handle<String>::cast(type_name);
+ bool starts_with_type_name =
+ (StringIndexOf(isolate, function_string, type_string) == 0);
+ if (!starts_with_type_name) {
+ builder->AppendString(type_string);
+ builder->AppendCharacter('.');
+ }
+ }
+ builder->AppendString(function_string);
-bool CallSite::IsNative() {
- if (!IsJavaScript()) return false;
- Handle<Object> script(fun_->shared()->script(), isolate_);
- return script->IsScript() &&
- Handle<Script>::cast(script)->type() == Script::TYPE_NATIVE;
+ if (IsNonEmptyString(method_name)) {
+ Handle<String> method_string = Handle<String>::cast(method_name);
+ if (!StringEndsWithMethodName(isolate, function_string, method_string)) {
+ builder->AppendCString(" [as ");
+ builder->AppendString(method_string);
+ builder->AppendCharacter(']');
+ }
+ }
+ } else {
+ builder->AppendString(Handle<String>::cast(type_name));
+ builder->AppendCharacter('.');
+ if (IsNonEmptyString(method_name)) {
+ builder->AppendString(Handle<String>::cast(method_name));
+ } else {
+ builder->AppendCString("<anonymous>");
+ }
+ }
}
+} // namespace
-bool CallSite::IsToplevel() {
- if (IsWasm()) return false;
- return receiver_->IsJSGlobalProxy() || receiver_->IsNull(isolate_) ||
- receiver_->IsUndefined(isolate_);
+MaybeHandle<String> JSStackFrame::ToString() {
+ IncrementalStringBuilder builder(isolate_);
+
+ Handle<Object> function_name = GetFunctionName();
+
+ const bool is_toplevel = IsToplevel();
+ const bool is_constructor = IsConstructor();
+ const bool is_method_call = !(is_toplevel || is_constructor);
+
+ if (is_method_call) {
+ AppendMethodCall(isolate_, this, &builder);
+ } else if (is_constructor) {
+ builder.AppendCString("new ");
+ if (IsNonEmptyString(function_name)) {
+ builder.AppendString(Handle<String>::cast(function_name));
+ } else {
+ builder.AppendCString("<anonymous>");
+ }
+ } else if (IsNonEmptyString(function_name)) {
+ builder.AppendString(Handle<String>::cast(function_name));
+ } else {
+ AppendFileLocation(isolate_, this, &builder);
+ RETURN_RESULT(isolate_, builder.Finish(), String);
+ }
+
+ builder.AppendCString(" (");
+ AppendFileLocation(isolate_, this, &builder);
+ builder.AppendCString(")");
+
+ RETURN_RESULT(isolate_, builder.Finish(), String);
}
+int JSStackFrame::GetPosition() const { return code_->SourcePosition(offset_); }
-bool CallSite::IsEval() {
- if (!IsJavaScript()) return false;
- Handle<Object> script(fun_->shared()->script(), isolate_);
- return script->IsScript() &&
- Handle<Script>::cast(script)->compilation_type() ==
- Script::COMPILATION_TYPE_EVAL;
+bool JSStackFrame::HasScript() const {
+ return function_->shared()->script()->IsScript();
}
+Handle<Script> JSStackFrame::GetScript() const {
+ return handle(Script::cast(function_->shared()->script()), isolate_);
+}
-bool CallSite::IsConstructor() {
- // Builtin exit frames mark constructors by passing a special symbol as the
- // receiver.
- Object* ctor_symbol = isolate_->heap()->call_site_constructor_symbol();
- if (*receiver_ == ctor_symbol) return true;
- if (!IsJavaScript() || !receiver_->IsJSObject()) return false;
- Handle<Object> constructor =
- JSReceiver::GetDataProperty(Handle<JSObject>::cast(receiver_),
- isolate_->factory()->constructor_string());
- return constructor.is_identical_to(fun_);
+void WasmStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
+ int frame_ix) {
+ DCHECK(array->IsWasmFrame(frame_ix));
+ isolate_ = isolate;
+ wasm_obj_ = handle(array->WasmObject(frame_ix), isolate);
+ wasm_func_index_ = array->WasmFunctionIndex(frame_ix)->value();
+ code_ = handle(array->Code(frame_ix), isolate);
+ offset_ = array->Offset(frame_ix)->value();
+}
+
+Handle<Object> WasmStackFrame::GetFunction() const {
+ Handle<Object> obj(Smi::FromInt(wasm_func_index_), isolate_);
+ return obj;
+}
+
+Handle<Object> WasmStackFrame::GetFunctionName() {
+ return wasm::GetWasmFunctionNameOrNull(isolate_, wasm_obj_, wasm_func_index_);
+}
+
+MaybeHandle<String> WasmStackFrame::ToString() {
+ IncrementalStringBuilder builder(isolate_);
+
+ Handle<Object> name = GetFunctionName();
+ if (name->IsNull(isolate_)) {
+ builder.AppendCString("<WASM UNNAMED>");
+ } else {
+ DCHECK(name->IsString());
+ builder.AppendString(Handle<String>::cast(name));
+ }
+
+ builder.AppendCString(" (<WASM>[");
+
+ Handle<Smi> ix(Smi::FromInt(wasm_func_index_), isolate_);
+ builder.AppendString(isolate_->factory()->NumberToString(ix));
+
+ builder.AppendCString("]+");
+
+ Handle<Object> pos(Smi::FromInt(GetPosition()), isolate_);
+ builder.AppendString(isolate_->factory()->NumberToString(pos));
+ builder.AppendCString(")");
+
+ return builder.Finish();
+}
+
+int WasmStackFrame::GetPosition() const {
+ return (offset_ < 0) ? (-1 - offset_) : code_->SourcePosition(offset_);
+}
+
+Handle<Object> WasmStackFrame::Null() const {
+ return isolate_->factory()->null_value();
+}
+
+FrameArrayIterator::FrameArrayIterator(Isolate* isolate,
+ Handle<FrameArray> array, int frame_ix)
+ : isolate_(isolate), array_(array), next_frame_ix_(frame_ix) {}
+
+bool FrameArrayIterator::HasNext() const {
+ return (next_frame_ix_ < array_->FrameCount());
+}
+
+void FrameArrayIterator::Next() { next_frame_ix_++; }
+
+StackFrameBase* FrameArrayIterator::Frame() {
+ DCHECK(HasNext());
+ const int flags = array_->Flags(next_frame_ix_)->value();
+ const bool is_js_frame = (flags & FrameArray::kIsWasmFrame) == 0;
+ if (is_js_frame) {
+ js_frame_.FromFrameArray(isolate_, array_, next_frame_ix_);
+ return &js_frame_;
+ } else {
+ wasm_frame_.FromFrameArray(isolate_, array_, next_frame_ix_);
+ return &wasm_frame_;
+ }
}
namespace {
-// Convert the raw frames as written by Isolate::CaptureSimpleStackTrace into
-// a vector of JS CallSite objects.
-MaybeHandle<FixedArray> GetStackFrames(Isolate* isolate,
- Handle<Object> raw_stack) {
- DCHECK(raw_stack->IsJSArray());
- Handle<JSArray> raw_stack_array = Handle<JSArray>::cast(raw_stack);
+MaybeHandle<Object> ConstructCallSite(Isolate* isolate,
+ Handle<FrameArray> frame_array,
+ int frame_index) {
+ Handle<JSFunction> target =
+ handle(isolate->native_context()->callsite_function(), isolate);
- DCHECK(raw_stack_array->elements()->IsFixedArray());
- Handle<FixedArray> raw_stack_elements =
- handle(FixedArray::cast(raw_stack_array->elements()), isolate);
+ Handle<JSObject> obj;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, obj, JSObject::New(target, target),
+ Object);
- const int raw_stack_len = raw_stack_elements->length();
- DCHECK(raw_stack_len % 4 == 1); // Multiples of 4 plus sloppy frames count.
- const int frame_count = (raw_stack_len - 1) / 4;
+ Handle<Symbol> key = isolate->factory()->call_site_frame_array_symbol();
+ RETURN_ON_EXCEPTION(isolate, JSObject::SetOwnPropertyIgnoreAttributes(
+ obj, key, frame_array, DONT_ENUM),
+ Object);
- Handle<Object> sloppy_frames_obj =
- FixedArray::get(*raw_stack_elements, 0, isolate);
- int sloppy_frames = Handle<Smi>::cast(sloppy_frames_obj)->value();
+ key = isolate->factory()->call_site_frame_index_symbol();
+ Handle<Object> value(Smi::FromInt(frame_index), isolate);
+ RETURN_ON_EXCEPTION(isolate, JSObject::SetOwnPropertyIgnoreAttributes(
+ obj, key, value, DONT_ENUM),
+ Object);
- int dst_ix = 0;
- Handle<FixedArray> frames = isolate->factory()->NewFixedArray(frame_count);
- for (int i = 1; i < raw_stack_len; i += 4) {
- Handle<Object> recv = FixedArray::get(*raw_stack_elements, i, isolate);
- Handle<Object> fun = FixedArray::get(*raw_stack_elements, i + 1, isolate);
- Handle<AbstractCode> code = Handle<AbstractCode>::cast(
- FixedArray::get(*raw_stack_elements, i + 2, isolate));
- Handle<Smi> pc =
- Handle<Smi>::cast(FixedArray::get(*raw_stack_elements, i + 3, isolate));
-
- Handle<Object> pos =
- (fun->IsSmi() && pc->value() < 0)
- ? handle(Smi::FromInt(-1 - pc->value()), isolate)
- : handle(Smi::FromInt(code->SourcePosition(pc->value())), isolate);
-
- sloppy_frames--;
- Handle<Object> strict = isolate->factory()->ToBoolean(sloppy_frames < 0);
-
- Handle<Object> callsite;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, callsite,
- CallSiteUtils::Construct(isolate, recv, fun, pos, strict), FixedArray);
+ return obj;
+}
+
+// Convert the raw frames as written by Isolate::CaptureSimpleStackTrace into
+// a JSArray of JSCallSite objects.
+MaybeHandle<JSArray> GetStackFrames(Isolate* isolate,
+ Handle<FrameArray> elems) {
+ const int frame_count = elems->FrameCount();
- frames->set(dst_ix++, *callsite);
+ Handle<FixedArray> frames = isolate->factory()->NewFixedArray(frame_count);
+ for (int i = 0; i < frame_count; i++) {
+ Handle<Object> site;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, site,
+ ConstructCallSite(isolate, elems, i), JSArray);
+ frames->set(i, *site);
}
- DCHECK_EQ(frame_count, dst_ix);
- return frames;
+ return isolate->factory()->NewJSArrayWithElements(frames);
}
MaybeHandle<Object> AppendErrorString(Isolate* isolate, Handle<Object> error,
@@ -590,11 +787,11 @@ class PrepareStackTraceScope {
MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
Handle<JSObject> error,
Handle<Object> raw_stack) {
- // Create JS CallSite objects from the raw stack frame array.
+ DCHECK(raw_stack->IsJSArray());
+ Handle<JSArray> raw_stack_array = Handle<JSArray>::cast(raw_stack);
- Handle<FixedArray> frames;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, frames,
- GetStackFrames(isolate, raw_stack), Object);
+ DCHECK(raw_stack_array->elements()->IsFixedArray());
+ Handle<FrameArray> elems(FrameArray::cast(raw_stack_array->elements()));
// If there's a user-specified "prepareStackFrames" function, call it on the
// frames and use its result.
@@ -609,12 +806,16 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
const bool in_recursion = isolate->formatting_stack_trace();
if (prepare_stack_trace->IsJSFunction() && !in_recursion) {
PrepareStackTraceScope scope(isolate);
- Handle<JSArray> array = isolate->factory()->NewJSArrayWithElements(frames);
+
+ Handle<JSArray> sites;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, sites, GetStackFrames(isolate, elems),
+ Object);
const int argc = 2;
ScopedVector<Handle<Object>> argv(argc);
+
argv[0] = error;
- argv[1] = array;
+ argv[1] = sites;
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -625,17 +826,18 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
return result;
}
+ // Otherwise, run our internal formatting logic.
+
IncrementalStringBuilder builder(isolate);
RETURN_ON_EXCEPTION(isolate, AppendErrorString(isolate, error, &builder),
Object);
- for (int i = 0; i < frames->length(); i++) {
+ for (FrameArrayIterator it(isolate, elems); it.HasNext(); it.Next()) {
builder.AppendCString("\n at ");
- Handle<Object> frame = FixedArray::get(*frames, i, isolate);
- MaybeHandle<String> maybe_frame_string =
- CallSiteUtils::ToString(isolate, frame);
+ StackFrameBase* frame = it.Frame();
+ MaybeHandle<String> maybe_frame_string = frame->ToString();
if (maybe_frame_string.is_null()) {
// CallSite.toString threw. Try to return a string representation of the
// thrown exception instead.
@@ -902,290 +1104,5 @@ MaybeHandle<Object> ErrorUtils::MakeGenericError(
no_caller, false);
}
-#define SET_CALLSITE_PROPERTY(target, key, value) \
- RETURN_ON_EXCEPTION( \
- isolate, JSObject::SetOwnPropertyIgnoreAttributes( \
- target, isolate->factory()->key(), value, DONT_ENUM), \
- Object)
-
-MaybeHandle<Object> CallSiteUtils::Construct(Isolate* isolate,
- Handle<Object> receiver,
- Handle<Object> fun,
- Handle<Object> pos,
- Handle<Object> strict_mode) {
- // Create the JS object.
-
- Handle<JSFunction> target =
- handle(isolate->native_context()->callsite_function(), isolate);
-
- Handle<JSObject> obj;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, obj, JSObject::New(target, target),
- Object);
-
- // For wasm frames, receiver is the wasm object and fun is the function index
- // instead of an actual function.
- const bool is_wasm_object =
- receiver->IsJSObject() && wasm::IsWasmObject(JSObject::cast(*receiver));
- if (!fun->IsJSFunction() && !is_wasm_object) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kCallSiteExpectsFunction,
- Object::TypeOf(isolate, receiver),
- Object::TypeOf(isolate, fun)),
- Object);
- }
-
- if (is_wasm_object) {
- DCHECK(fun->IsSmi());
- DCHECK(wasm::GetNumberOfFunctions(JSObject::cast(*receiver)) >
- Smi::cast(*fun)->value());
-
- SET_CALLSITE_PROPERTY(obj, call_site_wasm_obj_symbol, receiver);
- SET_CALLSITE_PROPERTY(obj, call_site_wasm_func_index_symbol, fun);
- } else {
- DCHECK(fun->IsJSFunction());
- SET_CALLSITE_PROPERTY(obj, call_site_receiver_symbol, receiver);
- SET_CALLSITE_PROPERTY(obj, call_site_function_symbol, fun);
- }
-
- DCHECK(pos->IsSmi());
- SET_CALLSITE_PROPERTY(obj, call_site_position_symbol, pos);
- SET_CALLSITE_PROPERTY(
- obj, call_site_strict_symbol,
- isolate->factory()->ToBoolean(strict_mode->BooleanValue()));
-
- return obj;
-}
-
-#undef SET_CALLSITE_PROPERTY
-
-namespace {
-
-bool IsNonEmptyString(Handle<Object> object) {
- return (object->IsString() && String::cast(*object)->length() > 0);
-}
-
-MaybeHandle<JSObject> AppendWasmToString(Isolate* isolate,
- Handle<JSObject> recv,
- CallSite* call_site,
- IncrementalStringBuilder* builder) {
- Handle<Object> name = call_site->GetFunctionName();
- if (name->IsNull(isolate)) {
- builder->AppendCString("<WASM UNNAMED>");
- } else {
- DCHECK(name->IsString());
- builder->AppendString(Handle<String>::cast(name));
- }
-
- builder->AppendCString(" (<WASM>[");
-
- Handle<String> ix = isolate->factory()->NumberToString(
- handle(Smi::FromInt(call_site->wasm_func_index()), isolate));
- builder->AppendString(ix);
-
- builder->AppendCString("]+");
-
- Handle<Object> pos;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, pos, JSObject::GetProperty(
- recv, isolate->factory()->call_site_position_symbol()),
- JSObject);
- DCHECK(pos->IsNumber());
- builder->AppendString(isolate->factory()->NumberToString(pos));
- builder->AppendCString(")");
-
- return recv;
-}
-
-MaybeHandle<JSObject> AppendFileLocation(Isolate* isolate,
- Handle<JSObject> recv,
- CallSite* call_site,
- IncrementalStringBuilder* builder) {
- if (call_site->IsNative()) {
- builder->AppendCString("native");
- return recv;
- }
-
- Handle<Object> file_name = call_site->GetScriptNameOrSourceUrl();
- if (!file_name->IsString() && call_site->IsEval()) {
- Handle<Object> eval_origin = call_site->GetEvalOrigin();
- DCHECK(eval_origin->IsString());
- builder->AppendString(Handle<String>::cast(eval_origin));
- builder->AppendCString(", "); // Expecting source position to follow.
- }
-
- if (IsNonEmptyString(file_name)) {
- builder->AppendString(Handle<String>::cast(file_name));
- } else {
- // Source code does not originate from a file and is not native, but we
- // can still get the source position inside the source string, e.g. in
- // an eval string.
- builder->AppendCString("<anonymous>");
- }
-
- int line_number = call_site->GetLineNumber();
- if (line_number != -1) {
- builder->AppendCharacter(':');
- Handle<String> line_string = isolate->factory()->NumberToString(
- handle(Smi::FromInt(line_number), isolate), isolate);
- builder->AppendString(line_string);
-
- int column_number = call_site->GetColumnNumber();
- if (column_number != -1) {
- builder->AppendCharacter(':');
- Handle<String> column_string = isolate->factory()->NumberToString(
- handle(Smi::FromInt(column_number), isolate), isolate);
- builder->AppendString(column_string);
- }
- }
-
- return recv;
-}
-
-int StringIndexOf(Isolate* isolate, Handle<String> subject,
- Handle<String> pattern) {
- if (pattern->length() > subject->length()) return -1;
- return String::IndexOf(isolate, subject, pattern, 0);
-}
-
-// Returns true iff
-// 1. the subject ends with '.' + pattern, or
-// 2. subject == pattern.
-bool StringEndsWithMethodName(Isolate* isolate, Handle<String> subject,
- Handle<String> pattern) {
- if (String::Equals(subject, pattern)) return true;
-
- FlatStringReader subject_reader(isolate, String::Flatten(subject));
- FlatStringReader pattern_reader(isolate, String::Flatten(pattern));
-
- int pattern_index = pattern_reader.length() - 1;
- int subject_index = subject_reader.length() - 1;
- for (int i = 0; i <= pattern_reader.length(); i++) { // Iterate over len + 1.
- if (subject_index < 0) {
- return false;
- }
-
- const uc32 subject_char = subject_reader.Get(subject_index);
- if (i == pattern_reader.length()) {
- if (subject_char != '.') return false;
- } else if (subject_char != pattern_reader.Get(pattern_index)) {
- return false;
- }
-
- pattern_index--;
- subject_index--;
- }
-
- return true;
-}
-
-MaybeHandle<JSObject> AppendMethodCall(Isolate* isolate, Handle<JSObject> recv,
- CallSite* call_site,
- IncrementalStringBuilder* builder) {
- Handle<Object> type_name = call_site->GetTypeName();
- Handle<Object> method_name = call_site->GetMethodName();
- Handle<Object> function_name = call_site->GetFunctionName();
-
- if (IsNonEmptyString(function_name)) {
- Handle<String> function_string = Handle<String>::cast(function_name);
- if (IsNonEmptyString(type_name)) {
- Handle<String> type_string = Handle<String>::cast(type_name);
- bool starts_with_type_name =
- (StringIndexOf(isolate, function_string, type_string) == 0);
- if (!starts_with_type_name) {
- builder->AppendString(type_string);
- builder->AppendCharacter('.');
- }
- }
- builder->AppendString(function_string);
-
- if (IsNonEmptyString(method_name)) {
- Handle<String> method_string = Handle<String>::cast(method_name);
- if (!StringEndsWithMethodName(isolate, function_string, method_string)) {
- builder->AppendCString(" [as ");
- builder->AppendString(method_string);
- builder->AppendCharacter(']');
- }
- }
- } else {
- builder->AppendString(Handle<String>::cast(type_name));
- builder->AppendCharacter('.');
- if (IsNonEmptyString(method_name)) {
- builder->AppendString(Handle<String>::cast(method_name));
- } else {
- builder->AppendCString("<anonymous>");
- }
- }
-
- return recv;
-}
-
-} // namespace
-
-MaybeHandle<String> CallSiteUtils::ToString(Isolate* isolate,
- Handle<Object> receiver) {
- if (!receiver->IsJSObject()) {
- THROW_NEW_ERROR(
- isolate,
- NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
- isolate->factory()->NewStringFromAsciiChecked("toString"),
- receiver),
- String);
- }
- Handle<JSObject> recv = Handle<JSObject>::cast(receiver);
-
- if (!JSReceiver::HasOwnProperty(
- recv, isolate->factory()->call_site_position_symbol())
- .FromMaybe(false)) {
- THROW_NEW_ERROR(
- isolate,
- NewTypeError(MessageTemplate::kCallSiteMethod,
- isolate->factory()->NewStringFromAsciiChecked("toString")),
- String);
- }
-
- IncrementalStringBuilder builder(isolate);
-
- CallSite call_site(isolate, recv);
- if (call_site.IsWasm()) {
- RETURN_ON_EXCEPTION(isolate,
- AppendWasmToString(isolate, recv, &call_site, &builder),
- String);
- RETURN_RESULT(isolate, builder.Finish(), String);
- }
-
- DCHECK(!call_site.IsWasm());
- Handle<Object> function_name = call_site.GetFunctionName();
-
- const bool is_toplevel = call_site.IsToplevel();
- const bool is_constructor = call_site.IsConstructor();
- const bool is_method_call = !(is_toplevel || is_constructor);
-
- if (is_method_call) {
- RETURN_ON_EXCEPTION(
- isolate, AppendMethodCall(isolate, recv, &call_site, &builder), String);
- } else if (is_constructor) {
- builder.AppendCString("new ");
- if (IsNonEmptyString(function_name)) {
- builder.AppendString(Handle<String>::cast(function_name));
- } else {
- builder.AppendCString("<anonymous>");
- }
- } else if (IsNonEmptyString(function_name)) {
- builder.AppendString(Handle<String>::cast(function_name));
- } else {
- RETURN_ON_EXCEPTION(isolate,
- AppendFileLocation(isolate, recv, &call_site, &builder),
- String);
- RETURN_RESULT(isolate, builder.Finish(), String);
- }
-
- builder.AppendCString(" (");
- RETURN_ON_EXCEPTION(
- isolate, AppendFileLocation(isolate, recv, &call_site, &builder), String);
- builder.AppendCString(")");
-
- RETURN_RESULT(isolate, builder.Finish(), String);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index cf49ac9c5c..e7bbcc34c2 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -19,6 +19,8 @@ namespace v8 {
namespace internal {
// Forward declarations.
+class AbstractCode;
+class FrameArray;
class JSMessageObject;
class LookupIterator;
class SourceInfo;
@@ -42,38 +44,142 @@ class MessageLocation {
Handle<JSFunction> function_;
};
-
-class CallSite {
+class StackFrameBase {
public:
- CallSite(Isolate* isolate, Handle<JSObject> call_site_obj);
-
- Handle<Object> GetFileName();
- Handle<Object> GetFunctionName();
- Handle<Object> GetScriptNameOrSourceUrl();
- Handle<Object> GetMethodName();
- Handle<Object> GetTypeName();
- Handle<Object> GetEvalOrigin();
+ virtual ~StackFrameBase() {}
+
+ virtual Handle<Object> GetReceiver() const = 0;
+ virtual Handle<Object> GetFunction() const = 0;
+
+ virtual Handle<Object> GetFileName() = 0;
+ virtual Handle<Object> GetFunctionName() = 0;
+ virtual Handle<Object> GetScriptNameOrSourceUrl() = 0;
+ virtual Handle<Object> GetMethodName() = 0;
+ virtual Handle<Object> GetTypeName() = 0;
+ virtual Handle<Object> GetEvalOrigin() = 0;
+
+ virtual int GetPosition() const = 0;
// Return 1-based line number, including line offset.
- int GetLineNumber();
+ virtual int GetLineNumber() = 0;
// Return 1-based column number, including column offset if first line.
- int GetColumnNumber();
- bool IsNative();
- bool IsToplevel();
- bool IsEval();
- bool IsConstructor();
+ virtual int GetColumnNumber() = 0;
+
+ virtual bool IsNative() = 0;
+ virtual bool IsToplevel() = 0;
+ virtual bool IsEval() = 0;
+ virtual bool IsConstructor() = 0;
+ virtual bool IsStrict() const = 0;
+
+ virtual MaybeHandle<String> ToString() = 0;
+};
+
+class JSStackFrame : public StackFrameBase {
+ public:
+ JSStackFrame(Isolate* isolate, Handle<Object> receiver,
+ Handle<JSFunction> function, Handle<AbstractCode> code,
+ int offset);
+ virtual ~JSStackFrame() {}
+
+ Handle<Object> GetReceiver() const override { return receiver_; }
+ Handle<Object> GetFunction() const override;
+
+ Handle<Object> GetFileName() override;
+ Handle<Object> GetFunctionName() override;
+ Handle<Object> GetScriptNameOrSourceUrl() override;
+ Handle<Object> GetMethodName() override;
+ Handle<Object> GetTypeName() override;
+ Handle<Object> GetEvalOrigin() override;
+
+ int GetPosition() const override;
+ int GetLineNumber() override;
+ int GetColumnNumber() override;
- bool IsJavaScript() { return !fun_.is_null(); }
- bool IsWasm() { return !wasm_obj_.is_null(); }
+ bool IsNative() override;
+ bool IsToplevel() override;
+ bool IsEval() override;
+ bool IsConstructor() override;
+ bool IsStrict() const override { return is_strict_; }
- int wasm_func_index() const { return wasm_func_index_; }
+ MaybeHandle<String> ToString() override;
private:
+ JSStackFrame();
+ void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
+
+ bool HasScript() const;
+ Handle<Script> GetScript() const;
+
Isolate* isolate_;
+
Handle<Object> receiver_;
- Handle<JSFunction> fun_;
- int32_t pos_ = -1;
- Handle<JSObject> wasm_obj_;
- uint32_t wasm_func_index_ = static_cast<uint32_t>(-1);
+ Handle<JSFunction> function_;
+ Handle<AbstractCode> code_;
+ int offset_;
+
+ bool force_constructor_;
+ bool is_strict_;
+
+ friend class FrameArrayIterator;
+};
+
+class WasmStackFrame : public StackFrameBase {
+ public:
+ virtual ~WasmStackFrame() {}
+
+ Handle<Object> GetReceiver() const override { return wasm_obj_; }
+ Handle<Object> GetFunction() const override;
+
+ Handle<Object> GetFileName() override { return Null(); }
+ Handle<Object> GetFunctionName() override;
+ Handle<Object> GetScriptNameOrSourceUrl() override { return Null(); }
+ Handle<Object> GetMethodName() override { return Null(); }
+ Handle<Object> GetTypeName() override { return Null(); }
+ Handle<Object> GetEvalOrigin() override { return Null(); }
+
+ int GetPosition() const override;
+ int GetLineNumber() override { return wasm_func_index_; }
+ int GetColumnNumber() override { return -1; }
+
+ bool IsNative() override { return false; }
+ bool IsToplevel() override { return false; }
+ bool IsEval() override { return false; }
+ bool IsConstructor() override { return false; }
+ bool IsStrict() const override { return false; }
+
+ MaybeHandle<String> ToString() override;
+
+ private:
+ void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
+ Handle<Object> Null() const;
+
+ Isolate* isolate_;
+
+ Handle<Object> wasm_obj_;
+ uint32_t wasm_func_index_;
+ Handle<AbstractCode> code_;
+ int offset_;
+
+ friend class FrameArrayIterator;
+};
+
+class FrameArrayIterator {
+ public:
+ FrameArrayIterator(Isolate* isolate, Handle<FrameArray> array,
+ int frame_ix = 0);
+
+ StackFrameBase* Frame();
+
+ bool HasNext() const;
+ void Next();
+
+ private:
+ Isolate* isolate_;
+
+ Handle<FrameArray> array_;
+ int next_frame_ix_;
+
+ WasmStackFrame wasm_frame_;
+ JSStackFrame js_frame_;
};
// Determines how stack trace collection skips frames.
@@ -107,16 +213,6 @@ class ErrorUtils : public AllStatic {
Handle<Object> stack_trace);
};
-class CallSiteUtils : public AllStatic {
- public:
- static MaybeHandle<Object> Construct(Isolate* isolate,
- Handle<Object> receiver,
- Handle<Object> fun, Handle<Object> pos,
- Handle<Object> strict_mode);
-
- static MaybeHandle<String> ToString(Isolate* isolate, Handle<Object> recv);
-};
-
#define MESSAGE_TEMPLATES(T) \
/* Error */ \
T(None, "") \
@@ -158,6 +254,7 @@ class CallSiteUtils : public AllStatic {
T(ConstructorNotFunction, "Constructor % requires 'new'") \
T(ConstructorNotReceiver, "The .constructor property is not an object") \
T(CurrencyCode, "Currency code is required with currency style.") \
+ T(CyclicModuleDependency, "Detected cycle while resolving name '%'") \
T(DataViewNotArrayBuffer, \
"First argument to DataView constructor must be an ArrayBuffer") \
T(DateType, "this is not a Date object.") \
@@ -402,6 +499,7 @@ class CallSiteUtils : public AllStatic {
T(UnsupportedTimeZone, "Unsupported time zone specified %") \
T(ValueOutOfRange, "Value % out of range for % options property %") \
/* SyntaxError */ \
+ T(AmbiguousExport, "Multiple star exports provide name '%'") \
T(BadGetterArity, "Getter must not have any formal parameters.") \
T(BadSetterArity, "Setter must have exactly one formal parameter.") \
T(ConstructorIsAccessor, "Class constructor may not be an accessor") \
@@ -454,8 +552,6 @@ class CallSiteUtils : public AllStatic {
T(NoCatchOrFinally, "Missing catch or finally after try") \
T(NotIsvar, "builtin %%IS_VAR: not a variable") \
T(ParamAfterRest, "Rest parameter must be last formal parameter") \
- T(InvalidRestParameter, \
- "Rest parameter must be an identifier or destructuring pattern") \
T(PushPastSafeLength, \
"Pushing % elements on an array-like of length % " \
"is disallowed, as the total surpasses 2**53-1") \
@@ -497,19 +593,10 @@ class CallSiteUtils : public AllStatic {
T(UnexpectedEOS, "Unexpected end of input") \
T(UnexpectedFunctionSent, \
"function.sent expression is not allowed outside a generator") \
- T(UnexpectedInsideTailCall, "Unexpected expression inside tail call") \
T(UnexpectedReserved, "Unexpected reserved word") \
T(UnexpectedStrictReserved, "Unexpected strict mode reserved word") \
T(UnexpectedSuper, "'super' keyword unexpected here") \
- T(UnexpectedSloppyTailCall, \
- "Tail call expressions are not allowed in non-strict mode") \
T(UnexpectedNewTarget, "new.target expression is not allowed here") \
- T(UnexpectedTailCall, "Tail call expression is not allowed here") \
- T(UnexpectedTailCallInCatchBlock, \
- "Tail call expression in catch block when finally block is also present") \
- T(UnexpectedTailCallInForInOf, "Tail call expression in for-in/of body") \
- T(UnexpectedTailCallInTryBlock, "Tail call expression in try block") \
- T(UnexpectedTailCallOfEval, "Tail call of a direct eval is not allowed") \
T(UnexpectedTemplateString, "Unexpected template string") \
T(UnexpectedToken, "Unexpected token %") \
T(UnexpectedTokenIdentifier, "Unexpected identifier") \
@@ -517,6 +604,7 @@ class CallSiteUtils : public AllStatic {
T(UnexpectedTokenString, "Unexpected string") \
T(UnexpectedTokenRegExp, "Unexpected regular expression") \
T(UnknownLabel, "Undefined label '%'") \
+ T(UnresolvableExport, "Module does not provide an export named '%'") \
T(UnterminatedArgList, "missing ) after argument list") \
T(UnterminatedRegExp, "Invalid regular expression: missing /") \
T(UnterminatedTemplate, "Unterminated template literal") \
@@ -540,7 +628,18 @@ class CallSiteUtils : public AllStatic {
T(WasmTrapFuncInvalid, "invalid function") \
T(WasmTrapFuncSigMismatch, "function signature mismatch") \
T(WasmTrapInvalidIndex, "invalid index into function table") \
- T(WasmTrapTypeError, "invalid type")
+ T(WasmTrapTypeError, "invalid type") \
+ /* DataCloneError messages */ \
+ T(DataCloneError, "% could not be cloned.") \
+ T(DataCloneErrorNeuteredArrayBuffer, \
+ "An ArrayBuffer is neutered and could not be cloned.") \
+ T(DataCloneErrorSharedArrayBufferNotTransferred, \
+ "A SharedArrayBuffer could not be cloned. SharedArrayBuffer must be " \
+ "transferred.") \
+ T(DataCloneDeserializationError, "Unable to deserialize cloned data.") \
+ T(DataCloneDeserializationVersionError, \
+ "Unable to deserialize cloned data due to invalid or unsupported " \
+ "version.")
class MessageTemplate {
public:
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index 20a8a11fbb..f5b235d1f6 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -2453,6 +2453,11 @@ void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
}
+void Assembler::madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft) {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_S);
+}
void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft) {
@@ -2460,6 +2465,37 @@ void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
}
+void Assembler::msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft) {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_S);
+}
+
+void Assembler::msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft) {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_D);
+}
+
+void Assembler::maddf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(COP1, S, ft, fs, fd, MADDF_S);
+}
+
+void Assembler::maddf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(COP1, D, ft, fs, fd, MADDF_D);
+}
+
+void Assembler::msubf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(COP1, S, ft, fs, fd, MSUBF_S);
+}
+
+void Assembler::msubf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(COP1, D, ft, fs, fd, MSUBF_D);
+}
void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, S, ft, fs, fd, DIV_S);
@@ -2492,13 +2528,11 @@ void Assembler::mov_s(FPURegister fd, FPURegister fs) {
void Assembler::neg_s(FPURegister fd, FPURegister fs) {
- DCHECK(!IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, S, f0, fs, fd, NEG_S);
}
void Assembler::neg_d(FPURegister fd, FPURegister fs) {
- DCHECK(!IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
}
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 0e41671a67..e58abd8c0c 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -878,7 +878,14 @@ class Assembler : public AssemblerBase {
void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
void mul_s(FPURegister fd, FPURegister fs, FPURegister ft);
void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
+ void msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
+ void msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
+ void maddf_s(FPURegister fd, FPURegister fs, FPURegister ft);
+ void maddf_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void msubf_s(FPURegister fd, FPURegister fs, FPURegister ft);
+ void msubf_d(FPURegister fd, FPURegister fs, FPURegister ft);
void div_s(FPURegister fd, FPURegister fs, FPURegister ft);
void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
void abs_s(FPURegister fd, FPURegister fs);
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 844958ec47..43e67354f2 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -1782,7 +1782,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// a2 : feedback vector
// a3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
- Label done_initialize_count, done_increment_count;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
@@ -1801,7 +1800,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
Register feedback_map = t1;
Register weak_value = t4;
__ lw(weak_value, FieldMemOperand(t2, WeakCell::kValueOffset));
- __ Branch(&done_increment_count, eq, a1, Operand(weak_value));
+ __ Branch(&done, eq, a1, Operand(weak_value));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ Branch(&done, eq, t2, Operand(at));
__ lw(feedback_map, FieldMemOperand(t2, HeapObject::kMapOffset));
@@ -1823,7 +1822,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Make sure the function is the Array() function
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2);
__ Branch(&megamorphic, ne, a1, Operand(t2));
- __ jmp(&done_increment_count);
+ __ jmp(&done);
__ bind(&miss);
@@ -1850,28 +1849,19 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub);
- __ Branch(&done_initialize_count);
+ __ Branch(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub);
- __ bind(&done_initialize_count);
- // Initialize the call counter.
- __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
- __ li(t0, Operand(Smi::FromInt(1)));
- __ Branch(USE_DELAY_SLOT, &done);
- __ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
-
- __ bind(&done_increment_count);
+ __ bind(&done);
- // Increment the call count for monomorphic function calls.
+ // Increment the call count for all function calls.
__ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ lw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
__ Addu(t0, t0, Operand(Smi::FromInt(1)));
__ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
-
- __ bind(&done);
}
@@ -1917,6 +1907,14 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
+// Note: feedback_vector and slot are clobbered after the call.
+static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
+ Register slot) {
+ __ Lsa(at, feedback_vector, slot, kPointerSizeLog2 - kSmiTagSize);
+ __ lw(slot, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
+ __ Addu(slot, slot, Operand(Smi::FromInt(1)));
+ __ sw(slot, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
+}
void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// a1 - function
@@ -1929,10 +1927,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ li(a0, Operand(arg_count()));
// Increment the call count for monomorphic function calls.
- __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
- __ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
- __ Addu(a3, a3, Operand(Smi::FromInt(1)));
- __ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
+ IncrementCallCount(masm, a2, a3);
__ mov(a2, t0);
__ mov(a3, a1);
@@ -1945,7 +1940,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// a1 - function
// a3 - slot id (Smi)
// a2 - vector
- Label extra_checks_or_miss, call, call_function;
+ Label extra_checks_or_miss, call, call_function, call_count_incremented;
int argc = arg_count();
ParameterCount actual(argc);
@@ -1974,13 +1969,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
// convincing us that we have a monomorphic JSFunction.
__ JumpIfSmi(a1, &extra_checks_or_miss);
+ __ bind(&call_function);
+
// Increment the call count for monomorphic function calls.
- __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
- __ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
- __ Addu(a3, a3, Operand(Smi::FromInt(1)));
- __ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
+ IncrementCallCount(masm, a2, a3);
- __ bind(&call_function);
__ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
tail_call_mode()),
RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
@@ -2021,6 +2014,10 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
__ bind(&call);
+ IncrementCallCount(masm, a2, a3);
+
+ __ bind(&call_count_incremented);
+
__ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
USE_DELAY_SLOT);
@@ -2046,11 +2043,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ lw(t1, NativeContextMemOperand());
__ Branch(&miss, ne, t0, Operand(t1));
- // Initialize the call counter.
- __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
- __ li(t0, Operand(Smi::FromInt(1)));
- __ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
-
// Store the function. Use a stub since we need a frame for allocation.
// a2 - vector
// a3 - slot
@@ -2058,9 +2050,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(masm->isolate());
+ __ Push(a2, a3);
__ Push(cp, a1);
__ CallStub(&create_stub);
__ Pop(cp, a1);
+ __ Pop(a2, a3);
}
__ Branch(&call_function);
@@ -2070,7 +2064,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&miss);
GenerateMiss(masm);
- __ Branch(&call);
+ __ Branch(&call_count_incremented);
}
@@ -2275,293 +2269,6 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
}
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
- // Stack frame on entry.
- // ra: return address
- // sp[0]: to
- // sp[4]: from
- // sp[8]: string
-
- // This stub is called from the native-call %_SubString(...), so
- // nothing can be assumed about the arguments. It is tested that:
- // "string" is a sequential string,
- // both "from" and "to" are smis, and
- // 0 <= from <= to <= string.length.
- // If any of these assumptions fail, we call the runtime system.
-
- const int kToOffset = 0 * kPointerSize;
- const int kFromOffset = 1 * kPointerSize;
- const int kStringOffset = 2 * kPointerSize;
-
- __ lw(a2, MemOperand(sp, kToOffset));
- __ lw(a3, MemOperand(sp, kFromOffset));
- STATIC_ASSERT(kFromOffset == kToOffset + 4);
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
-
- // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
- // safe in this case.
- __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
- __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
- // Both a2 and a3 are untagged integers.
-
- __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
-
- __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to.
- __ Subu(a2, a2, a3);
-
- // Make sure first argument is a string.
- __ lw(v0, MemOperand(sp, kStringOffset));
- __ JumpIfSmi(v0, &runtime);
- __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
- __ And(t0, a1, Operand(kIsNotStringMask));
-
- __ Branch(&runtime, ne, t0, Operand(zero_reg));
-
- Label single_char;
- __ Branch(&single_char, eq, a2, Operand(1));
-
- // Short-cut for the case of trivial substring.
- Label return_v0;
- // v0: original string
- // a2: result string length
- __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
- __ sra(t0, t0, 1);
- // Return original string.
- __ Branch(&return_v0, eq, a2, Operand(t0));
- // Longer than original string's length or negative: unsafe arguments.
- __ Branch(&runtime, hi, a2, Operand(t0));
- // Shorter than original string's length: an actual substring.
-
- // Deal with different string types: update the index if necessary
- // and put the underlying string into t1.
- // v0: original string
- // a1: instance type
- // a2: length
- // a3: from index (untagged)
- Label underlying_unpacked, sliced_string, seq_or_external_string;
- // If the string is not indirect, it can only be sequential or external.
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
- STATIC_ASSERT(kIsIndirectStringMask != 0);
- __ And(t0, a1, Operand(kIsIndirectStringMask));
- __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
- // t0 is used as a scratch register and can be overwritten in either case.
- __ And(t0, a1, Operand(kSlicedNotConsMask));
- __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
- // Cons string. Check whether it is flat, then fetch first part.
- __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
- __ LoadRoot(t0, Heap::kempty_stringRootIndex);
- __ Branch(&runtime, ne, t1, Operand(t0));
- __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
- // Update instance type.
- __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
- __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked);
-
- __ bind(&sliced_string);
- // Sliced string. Fetch parent and correct start index by offset.
- __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
- __ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
- __ sra(t0, t0, 1); // Add offset to index.
- __ Addu(a3, a3, t0);
- // Update instance type.
- __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
- __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked);
-
- __ bind(&seq_or_external_string);
- // Sequential or external string. Just move string to the expected register.
- __ mov(t1, v0);
-
- __ bind(&underlying_unpacked);
-
- if (FLAG_string_slices) {
- Label copy_routine;
- // t1: underlying subject string
- // a1: instance type of underlying subject string
- // a2: length
- // a3: adjusted start index (untagged)
- // Short slice. Copy instead of slicing.
- __ Branch(&copy_routine, lt, a2, Operand(SlicedString::kMinLength));
- // Allocate new sliced string. At this point we do not reload the instance
- // type including the string encoding because we simply rely on the info
- // provided by the original string. It does not matter if the original
- // string's encoding is wrong because we always have to recheck encoding of
- // the newly created string's parent anyways due to externalized strings.
- Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ And(t0, a1, Operand(kStringEncodingMask));
- __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
- __ AllocateOneByteSlicedString(v0, a2, t2, t3, &runtime);
- __ jmp(&set_slice_header);
- __ bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
- __ bind(&set_slice_header);
- __ sll(a3, a3, 1);
- __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
- __ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
- __ jmp(&return_v0);
-
- __ bind(&copy_routine);
- }
-
- // t1: underlying subject string
- // a1: instance type of underlying subject string
- // a2: length
- // a3: adjusted start index (untagged)
- Label two_byte_sequential, sequential_string, allocate_result;
- STATIC_ASSERT(kExternalStringTag != 0);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ And(t0, a1, Operand(kExternalStringTag));
- __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
-
- // Handle external string.
- // Rule out short external strings.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ And(t0, a1, Operand(kShortExternalStringTag));
- __ Branch(&runtime, ne, t0, Operand(zero_reg));
- __ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
- // t1 already points to the first character of underlying string.
- __ jmp(&allocate_result);
-
- __ bind(&sequential_string);
- // Locate first character of underlying subject string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- __ bind(&allocate_result);
- // Sequential acii string. Allocate the result.
- STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
- __ And(t0, a1, Operand(kStringEncodingMask));
- __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
-
- // Allocate and copy the resulting ASCII string.
- __ AllocateOneByteString(v0, a2, t0, t2, t3, &runtime);
-
- // Locate first character of substring to copy.
- __ Addu(t1, t1, a3);
-
- // Locate first character of result.
- __ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- // v0: result string
- // a1: first character of result string
- // a2: result string length
- // t1: first character of substring to copy
- STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharacters(
- masm, a1, t1, a2, a3, String::ONE_BYTE_ENCODING);
- __ jmp(&return_v0);
-
- // Allocate and copy the resulting two-byte string.
- __ bind(&two_byte_sequential);
- __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
-
- // Locate first character of substring to copy.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ Lsa(t1, t1, a3, 1);
- // Locate first character of result.
- __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- // v0: result string.
- // a1: first character of result.
- // a2: result length.
- // t1: first character of substring to copy.
- STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharacters(
- masm, a1, t1, a2, a3, String::TWO_BYTE_ENCODING);
-
- __ bind(&return_v0);
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
- __ DropAndRet(3);
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString);
-
- __ bind(&single_char);
- // v0: original string
- // a1: instance type
- // a2: length
- // a3: from index (untagged)
- __ SmiTag(a3, a3);
- StringCharAtGenerator generator(v0, a3, a2, v0, &runtime, &runtime, &runtime,
- RECEIVER_IS_STRING);
- generator.GenerateFast(masm);
- __ DropAndRet(3);
- generator.SkipSlow(masm, &runtime);
-}
-
-
-void ToStringStub::Generate(MacroAssembler* masm) {
- // The ToString stub takes on argument in a0.
- Label is_number;
- __ JumpIfSmi(a0, &is_number);
-
- Label not_string;
- __ GetObjectType(a0, a1, a1);
- // a0: receiver
- // a1: receiver instance type
- __ Branch(&not_string, ge, a1, Operand(FIRST_NONSTRING_TYPE));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
- __ bind(&not_string);
-
- Label not_heap_number;
- __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
- __ bind(&is_number);
- NumberToStringStub stub(isolate());
- __ TailCallStub(&stub);
- __ bind(&not_heap_number);
-
- Label not_oddball;
- __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
- __ Ret(USE_DELAY_SLOT);
- __ lw(v0, FieldMemOperand(a0, Oddball::kToStringOffset));
- __ bind(&not_oddball);
-
- __ push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kToString);
-}
-
-
-void ToNameStub::Generate(MacroAssembler* masm) {
- // The ToName stub takes on argument in a0.
- Label is_number;
- __ JumpIfSmi(a0, &is_number);
-
- Label not_name;
- STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
- __ GetObjectType(a0, a1, a1);
- // a0: receiver
- // a1: receiver instance type
- __ Branch(&not_name, gt, a1, Operand(LAST_NAME_TYPE));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
- __ bind(&not_name);
-
- Label not_heap_number;
- __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
- __ bind(&is_number);
- NumberToStringStub stub(isolate());
- __ TailCallStub(&stub);
- __ bind(&not_heap_number);
-
- Label not_oddball;
- __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
- __ Ret(USE_DELAY_SLOT);
- __ lw(v0, FieldMemOperand(a0, Oddball::kToStringOffset));
- __ bind(&not_oddball);
-
- __ push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kToName);
-}
-
-
void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {
@@ -3915,7 +3622,7 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
__ lw(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
// Load the map into the correct register.
- DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
+ DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
__ mov(feedback, too_far);
__ Addu(t9, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -4624,7 +4331,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
Label too_big_for_new_space;
__ bind(&allocate);
__ Branch(&too_big_for_new_space, gt, t0,
- Operand(Page::kMaxRegularHeapObjectSize));
+ Operand(kMaxRegularHeapObjectSize));
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(t0);
@@ -4968,8 +4675,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// Fall back to %AllocateInNewSpace (if not too big).
Label too_big_for_new_space;
__ bind(&allocate);
- __ Branch(&too_big_for_new_space, gt, t0,
- Operand(Page::kMaxRegularHeapObjectSize));
+ __ Branch(&too_big_for_new_space, gt, t0, Operand(kMaxRegularHeapObjectSize));
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(t0);
diff --git a/deps/v8/src/mips/constants-mips.cc b/deps/v8/src/mips/constants-mips.cc
index 3afb88146d..ad97e4170c 100644
--- a/deps/v8/src/mips/constants-mips.cc
+++ b/deps/v8/src/mips/constants-mips.cc
@@ -123,116 +123,6 @@ int FPURegisters::Number(const char* name) {
}
-// -----------------------------------------------------------------------------
-// Instructions.
-
-bool Instruction::IsForbiddenAfterBranchInstr(Instr instr) {
- Opcode opcode = static_cast<Opcode>(instr & kOpcodeMask);
- switch (opcode) {
- case J:
- case JAL:
- case BEQ:
- case BNE:
- case BLEZ: // POP06 bgeuc/bleuc, blezalc, bgezalc
- case BGTZ: // POP07 bltuc/bgtuc, bgtzalc, bltzalc
- case BEQL:
- case BNEL:
- case BLEZL: // POP26 bgezc, blezc, bgec/blec
- case BGTZL: // POP27 bgtzc, bltzc, bltc/bgtc
- case BC:
- case BALC:
- case POP10: // beqzalc, bovc, beqc
- case POP30: // bnezalc, bnvc, bnec
- case POP66: // beqzc, jic
- case POP76: // bnezc, jialc
- return true;
- case REGIMM:
- switch (instr & kRtFieldMask) {
- case BLTZ:
- case BGEZ:
- case BLTZAL:
- case BGEZAL:
- return true;
- default:
- return false;
- }
- break;
- case SPECIAL:
- switch (instr & kFunctionFieldMask) {
- case JR:
- case JALR:
- return true;
- default:
- return false;
- }
- break;
- case COP1:
- switch (instr & kRsFieldMask) {
- case BC1:
- case BC1EQZ:
- case BC1NEZ:
- return true;
- break;
- default:
- return false;
- }
- break;
- default:
- return false;
- }
-}
-
-
-bool Instruction::IsLinkingInstruction() const {
- switch (OpcodeFieldRaw()) {
- case JAL:
- return true;
- case POP76:
- if (RsFieldRawNoAssert() == JIALC)
- return true; // JIALC
- else
- return false; // BNEZC
- case REGIMM:
- switch (RtFieldRaw()) {
- case BGEZAL:
- case BLTZAL:
- return true;
- default:
- return false;
- }
- case SPECIAL:
- switch (FunctionFieldRaw()) {
- case JALR:
- return true;
- default:
- return false;
- }
- default:
- return false;
- }
-}
-
-
-bool Instruction::IsTrap() const {
- if (OpcodeFieldRaw() != SPECIAL) {
- return false;
- } else {
- switch (FunctionFieldRaw()) {
- case BREAK:
- case TGE:
- case TGEU:
- case TLT:
- case TLTU:
- case TEQ:
- case TNE:
- return true;
- default:
- return false;
- }
- }
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index 8301c5e5de..200939d65a 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -525,6 +525,8 @@ enum SecondaryField : uint32_t {
FLOOR_W_S = ((1U << 3) + 7),
RECIP_S = ((2U << 3) + 5),
RSQRT_S = ((2U << 3) + 6),
+ MADDF_S = ((3U << 3) + 0),
+ MSUBF_S = ((3U << 3) + 1),
CLASS_S = ((3U << 3) + 3),
CVT_D_S = ((4U << 3) + 1),
CVT_W_S = ((4U << 3) + 4),
@@ -550,6 +552,8 @@ enum SecondaryField : uint32_t {
FLOOR_W_D = ((1U << 3) + 7),
RECIP_D = ((2U << 3) + 5),
RSQRT_D = ((2U << 3) + 6),
+ MADDF_D = ((3U << 3) + 0),
+ MSUBF_D = ((3U << 3) + 1),
CLASS_D = ((3U << 3) + 3),
MIN = ((3U << 3) + 4),
MINA = ((3U << 3) + 5),
@@ -616,8 +620,12 @@ enum SecondaryField : uint32_t {
MOVF = ((2U << 3) + 1), // Function field for MOVT.fmt and MOVF.fmt
SELNEZ_C = ((2U << 3) + 7), // COP1 on FPR registers.
// COP1 Encoding of Function Field When rs=PS.
+
// COP1X Encoding of Function Field.
+ MADD_S = ((4U << 3) + 0),
MADD_D = ((4U << 3) + 1),
+ MSUB_S = ((5U << 3) + 0),
+ MSUB_D = ((5U << 3) + 1),
// PCREL Encoding of rt Field.
ADDIUPC = ((0U << 2) + 0),
@@ -858,8 +866,7 @@ static constexpr uint64_t OpcodeToBitNumber(Opcode opcode) {
return 1ULL << (static_cast<uint32_t>(opcode) >> kOpcodeShift);
}
-
-class Instruction {
+class InstructionBase {
public:
enum {
kInstrSize = 4,
@@ -869,6 +876,9 @@ class Instruction {
kPCReadOffset = 0
};
+ // Instruction type.
+ enum Type { kRegisterType, kImmediateType, kJumpType, kUnsupported = -1 };
+
// Get the raw instruction bits.
inline Instr InstructionBits() const {
return *reinterpret_cast<const Instr*>(this);
@@ -889,16 +899,6 @@ class Instruction {
return (InstructionBits() >> lo) & ((2U << (hi - lo)) - 1);
}
- // Instruction type.
- enum Type {
- kRegisterType,
- kImmediateType,
- kJumpType,
- kUnsupported = -1
- };
-
- enum TypeChecks { NORMAL, EXTRA };
-
static constexpr uint64_t kOpcodeImmediateTypeMask =
OpcodeToBitNumber(REGIMM) | OpcodeToBitNumber(BEQ) |
@@ -943,122 +943,140 @@ class Instruction {
FunctionFieldToBitNumber(MOVCI) | FunctionFieldToBitNumber(SELEQZ_S) |
FunctionFieldToBitNumber(SELNEZ_S) | FunctionFieldToBitNumber(SYNC);
- // Get the encoding type of the instruction.
- inline Type InstructionType(TypeChecks checks = NORMAL) const;
-
// Accessors for the different named fields used in the MIPS encoding.
inline Opcode OpcodeValue() const {
return static_cast<Opcode>(
Bits(kOpcodeShift + kOpcodeBits - 1, kOpcodeShift));
}
+ inline int FunctionFieldRaw() const {
+ return InstructionBits() & kFunctionFieldMask;
+ }
+
+ // Return the fields at their original place in the instruction encoding.
+ inline Opcode OpcodeFieldRaw() const {
+ return static_cast<Opcode>(InstructionBits() & kOpcodeMask);
+ }
+
+ // Safe to call within InstructionType().
+ inline int RsFieldRawNoAssert() const {
+ return InstructionBits() & kRsFieldMask;
+ }
+
+ inline int SaFieldRaw() const { return InstructionBits() & kSaFieldMask; }
+
+ // Get the encoding type of the instruction.
+ inline Type InstructionType() const;
+
+ protected:
+ InstructionBase() {}
+};
+
+template <class T>
+class InstructionGetters : public T {
+ public:
inline int RsValue() const {
- DCHECK(InstructionType() == kRegisterType ||
- InstructionType() == kImmediateType);
- return Bits(kRsShift + kRsBits - 1, kRsShift);
+ DCHECK(this->InstructionType() == InstructionBase::kRegisterType ||
+ this->InstructionType() == InstructionBase::kImmediateType);
+ return InstructionBase::Bits(kRsShift + kRsBits - 1, kRsShift);
}
inline int RtValue() const {
- DCHECK(InstructionType() == kRegisterType ||
- InstructionType() == kImmediateType);
- return Bits(kRtShift + kRtBits - 1, kRtShift);
+ DCHECK(this->InstructionType() == InstructionBase::kRegisterType ||
+ this->InstructionType() == InstructionBase::kImmediateType);
+ return this->Bits(kRtShift + kRtBits - 1, kRtShift);
}
inline int RdValue() const {
- DCHECK(InstructionType() == kRegisterType);
- return Bits(kRdShift + kRdBits - 1, kRdShift);
+ DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+ return this->Bits(kRdShift + kRdBits - 1, kRdShift);
}
inline int SaValue() const {
- DCHECK(InstructionType() == kRegisterType);
- return Bits(kSaShift + kSaBits - 1, kSaShift);
+ DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+ return this->Bits(kSaShift + kSaBits - 1, kSaShift);
}
inline int LsaSaValue() const {
- DCHECK(InstructionType() == kRegisterType);
- return Bits(kSaShift + kLsaSaBits - 1, kSaShift);
+ DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+ return this->Bits(kSaShift + kLsaSaBits - 1, kSaShift);
}
inline int FunctionValue() const {
- DCHECK(InstructionType() == kRegisterType ||
- InstructionType() == kImmediateType);
- return Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift);
+ DCHECK(this->InstructionType() == InstructionBase::kRegisterType ||
+ this->InstructionType() == InstructionBase::kImmediateType);
+ return this->Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift);
}
inline int FdValue() const {
- return Bits(kFdShift + kFdBits - 1, kFdShift);
+ return this->Bits(kFdShift + kFdBits - 1, kFdShift);
}
inline int FsValue() const {
- return Bits(kFsShift + kFsBits - 1, kFsShift);
+ return this->Bits(kFsShift + kFsBits - 1, kFsShift);
}
inline int FtValue() const {
- return Bits(kFtShift + kFtBits - 1, kFtShift);
+ return this->Bits(kFtShift + kFtBits - 1, kFtShift);
}
inline int FrValue() const {
- return Bits(kFrShift + kFrBits -1, kFrShift);
+ return this->Bits(kFrShift + kFrBits - 1, kFrShift);
}
inline int Bp2Value() const {
- DCHECK(InstructionType() == kRegisterType);
- return Bits(kBp2Shift + kBp2Bits - 1, kBp2Shift);
+ DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+ return this->Bits(kBp2Shift + kBp2Bits - 1, kBp2Shift);
}
// Float Compare condition code instruction bits.
inline int FCccValue() const {
- return Bits(kFCccShift + kFCccBits - 1, kFCccShift);
+ return this->Bits(kFCccShift + kFCccBits - 1, kFCccShift);
}
// Float Branch condition code instruction bits.
inline int FBccValue() const {
- return Bits(kFBccShift + kFBccBits - 1, kFBccShift);
+ return this->Bits(kFBccShift + kFBccBits - 1, kFBccShift);
}
// Float Branch true/false instruction bit.
inline int FBtrueValue() const {
- return Bits(kFBtrueShift + kFBtrueBits - 1, kFBtrueShift);
+ return this->Bits(kFBtrueShift + kFBtrueBits - 1, kFBtrueShift);
}
// Return the fields at their original place in the instruction encoding.
inline Opcode OpcodeFieldRaw() const {
- return static_cast<Opcode>(InstructionBits() & kOpcodeMask);
+ return static_cast<Opcode>(this->InstructionBits() & kOpcodeMask);
}
inline int RsFieldRaw() const {
- DCHECK(InstructionType() == kRegisterType ||
- InstructionType() == kImmediateType);
- return InstructionBits() & kRsFieldMask;
- }
-
- // Same as above function, but safe to call within InstructionType().
- inline int RsFieldRawNoAssert() const {
- return InstructionBits() & kRsFieldMask;
+ DCHECK(this->InstructionType() == InstructionBase::kRegisterType ||
+ this->InstructionType() == InstructionBase::kImmediateType);
+ return this->InstructionBits() & kRsFieldMask;
}
inline int RtFieldRaw() const {
- DCHECK(InstructionType() == kRegisterType ||
- InstructionType() == kImmediateType);
- return InstructionBits() & kRtFieldMask;
+ DCHECK(this->InstructionType() == InstructionBase::kRegisterType ||
+ this->InstructionType() == InstructionBase::kImmediateType);
+ return this->InstructionBits() & kRtFieldMask;
}
inline int RdFieldRaw() const {
- DCHECK(InstructionType() == kRegisterType);
- return InstructionBits() & kRdFieldMask;
+ DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+ return this->InstructionBits() & kRdFieldMask;
}
inline int SaFieldRaw() const {
- return InstructionBits() & kSaFieldMask;
+ return this->InstructionBits() & kSaFieldMask;
}
inline int FunctionFieldRaw() const {
- return InstructionBits() & kFunctionFieldMask;
+ return this->InstructionBits() & kFunctionFieldMask;
}
// Get the secondary field according to the opcode.
inline int SecondaryValue() const {
- Opcode op = OpcodeFieldRaw();
+ Opcode op = this->OpcodeFieldRaw();
switch (op) {
case SPECIAL:
case SPECIAL2:
@@ -1073,34 +1091,34 @@ class Instruction {
}
inline int32_t ImmValue(int bits) const {
- DCHECK(InstructionType() == kImmediateType);
- return Bits(bits - 1, 0);
+ DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ return this->Bits(bits - 1, 0);
}
inline int32_t Imm16Value() const {
- DCHECK(InstructionType() == kImmediateType);
- return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
+ DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ return this->Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
}
inline int32_t Imm18Value() const {
- DCHECK(InstructionType() == kImmediateType);
- return Bits(kImm18Shift + kImm18Bits - 1, kImm18Shift);
+ DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ return this->Bits(kImm18Shift + kImm18Bits - 1, kImm18Shift);
}
inline int32_t Imm19Value() const {
- DCHECK(InstructionType() == kImmediateType);
- return Bits(kImm19Shift + kImm19Bits - 1, kImm19Shift);
+ DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ return this->Bits(kImm19Shift + kImm19Bits - 1, kImm19Shift);
}
inline int32_t Imm21Value() const {
- DCHECK(InstructionType() == kImmediateType);
- return Bits(kImm21Shift + kImm21Bits - 1, kImm21Shift);
+ DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ return this->Bits(kImm21Shift + kImm21Bits - 1, kImm21Shift);
}
inline int32_t Imm26Value() const {
- DCHECK((InstructionType() == kJumpType) ||
- (InstructionType() == kImmediateType));
- return Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift);
+ DCHECK((this->InstructionType() == InstructionBase::kJumpType) ||
+ (this->InstructionType() == InstructionBase::kImmediateType));
+ return this->Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift);
}
static bool IsForbiddenAfterBranchInstr(Instr instr);
@@ -1108,7 +1126,7 @@ class Instruction {
// Say if the instruction should not be used in a branch delay slot or
// immediately after a compact branch.
inline bool IsForbiddenAfterBranch() const {
- return IsForbiddenAfterBranchInstr(InstructionBits());
+ return IsForbiddenAfterBranchInstr(this->InstructionBits());
}
inline bool IsForbiddenInBranchDelay() const {
@@ -1119,7 +1137,10 @@ class Instruction {
bool IsLinkingInstruction() const;
// Say if the instruction is a break or a trap.
bool IsTrap() const;
+};
+class Instruction : public InstructionGetters<InstructionBase> {
+ public:
// Instructions are read of out a code stream. The only way to get a
// reference to an instruction is to convert a pointer. There is no way
// to allocate or create instances of class Instruction.
@@ -1148,26 +1169,14 @@ const int kBArgsSlotsSize = 0 * Instruction::kInstrSize;
const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
-
-Instruction::Type Instruction::InstructionType(TypeChecks checks) const {
- if (checks == EXTRA) {
- if (OpcodeToBitNumber(OpcodeFieldRaw()) & kOpcodeImmediateTypeMask) {
- return kImmediateType;
- }
- }
+InstructionBase::Type InstructionBase::InstructionType() const {
switch (OpcodeFieldRaw()) {
case SPECIAL:
- if (checks == EXTRA) {
- if (FunctionFieldToBitNumber(FunctionFieldRaw()) &
- kFunctionFieldRegisterTypeMask) {
- return kRegisterType;
- } else {
- return kUnsupported;
- }
- } else {
+ if (FunctionFieldToBitNumber(FunctionFieldRaw()) &
+ kFunctionFieldRegisterTypeMask) {
return kRegisterType;
}
- break;
+ return kUnsupported;
case SPECIAL2:
switch (FunctionFieldRaw()) {
case MUL:
@@ -1222,16 +1231,124 @@ Instruction::Type Instruction::InstructionType(TypeChecks checks) const {
return kJumpType;
default:
- if (checks == NORMAL) {
return kImmediateType;
- } else {
- return kUnsupported;
- }
}
}
#undef OpcodeToBitNumber
#undef FunctionFieldToBitNumber
+
+// -----------------------------------------------------------------------------
+// Instructions.
+
+template <class P>
+bool InstructionGetters<P>::IsLinkingInstruction() const {
+ uint32_t op = this->OpcodeFieldRaw();
+ switch (op) {
+ case JAL:
+ return true;
+ case POP76:
+ if (this->RsFieldRawNoAssert() == JIALC)
+ return true; // JIALC
+ else
+ return false; // BNEZC
+ case REGIMM:
+ switch (this->RtFieldRaw()) {
+ case BGEZAL:
+ case BLTZAL:
+ return true;
+ default:
+ return false;
+ }
+ case SPECIAL:
+ switch (this->FunctionFieldRaw()) {
+ case JALR:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+template <class P>
+bool InstructionGetters<P>::IsTrap() const {
+ if (this->OpcodeFieldRaw() != SPECIAL) {
+ return false;
+ } else {
+ switch (this->FunctionFieldRaw()) {
+ case BREAK:
+ case TGE:
+ case TGEU:
+ case TLT:
+ case TLTU:
+ case TEQ:
+ case TNE:
+ return true;
+ default:
+ return false;
+ }
+ }
+}
+
+// static
+template <class T>
+bool InstructionGetters<T>::IsForbiddenAfterBranchInstr(Instr instr) {
+ Opcode opcode = static_cast<Opcode>(instr & kOpcodeMask);
+ switch (opcode) {
+ case J:
+ case JAL:
+ case BEQ:
+ case BNE:
+ case BLEZ: // POP06 bgeuc/bleuc, blezalc, bgezalc
+ case BGTZ: // POP07 bltuc/bgtuc, bgtzalc, bltzalc
+ case BEQL:
+ case BNEL:
+ case BLEZL: // POP26 bgezc, blezc, bgec/blec
+ case BGTZL: // POP27 bgtzc, bltzc, bltc/bgtc
+ case BC:
+ case BALC:
+ case POP10: // beqzalc, bovc, beqc
+ case POP30: // bnezalc, bnvc, bnec
+ case POP66: // beqzc, jic
+ case POP76: // bnezc, jialc
+ return true;
+ case REGIMM:
+ switch (instr & kRtFieldMask) {
+ case BLTZ:
+ case BGEZ:
+ case BLTZAL:
+ case BGEZAL:
+ return true;
+ default:
+ return false;
+ }
+ break;
+ case SPECIAL:
+ switch (instr & kFunctionFieldMask) {
+ case JR:
+ case JALR:
+ return true;
+ default:
+ return false;
+ }
+ break;
+ case COP1:
+ switch (instr & kRsFieldMask) {
+ case BC1:
+ case BC1EQZ:
+ case BC1NEZ:
+ return true;
+ break;
+ default:
+ return false;
+ }
+ break;
+ default:
+ return false;
+ }
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index bd07874bd6..f541e9143f 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -918,6 +918,12 @@ void Decoder::DecodeTypeRegisterSRsType(Instruction* instr) {
case CVT_D_S:
Format(instr, "cvt.d.'t 'fd, 'fs");
break;
+ case MADDF_S:
+ Format(instr, "maddf.s 'fd, 'fs, 'ft");
+ break;
+ case MSUBF_S:
+ Format(instr, "msubf.s 'fd, 'fs, 'ft");
+ break;
default:
Format(instr, "unknown.cop1.'t");
break;
@@ -928,7 +934,17 @@ void Decoder::DecodeTypeRegisterSRsType(Instruction* instr) {
void Decoder::DecodeTypeRegisterDRsType(Instruction* instr) {
if (!DecodeTypeRegisterRsType(instr)) {
- Format(instr, "unknown.cop1.'t");
+ switch (instr->FunctionFieldRaw()) {
+ case MADDF_D:
+ Format(instr, "maddf.d 'fd, 'fs, 'ft");
+ break;
+ case MSUBF_D:
+ Format(instr, "msubf.d 'fd, 'fs, 'ft");
+ break;
+ default:
+ Format(instr, "unknown.cop1.'t");
+ break;
+ }
}
}
@@ -1360,9 +1376,18 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
break;
case COP1X:
switch (instr->FunctionFieldRaw()) {
+ case MADD_S:
+ Format(instr, "madd.s 'fd, 'fr, 'fs, 'ft");
+ break;
case MADD_D:
Format(instr, "madd.d 'fd, 'fr, 'fs, 'ft");
break;
+ case MSUB_S:
+ Format(instr, "msub.s 'fd, 'fr, 'fs, 'ft");
+ break;
+ case MSUB_D:
+ Format(instr, "msub.d 'fd, 'fr, 'fs, 'ft");
+ break;
default:
UNREACHABLE();
}
@@ -1687,7 +1712,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%08x ",
instr->InstructionBits());
- switch (instr->InstructionType(Instruction::EXTRA)) {
+ switch (instr->InstructionType()) {
case Instruction::kRegisterType: {
DecodeTypeRegister(instr);
break;
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index bafe0b661b..aed41420d6 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -40,13 +40,9 @@ const Register StoreDescriptor::SlotRegister() { return t0; }
const Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
-const Register VectorStoreTransitionDescriptor::SlotRegister() { return t0; }
-const Register VectorStoreTransitionDescriptor::VectorRegister() { return a3; }
-const Register VectorStoreTransitionDescriptor::MapRegister() { return t1; }
-
-
-const Register StoreTransitionDescriptor::MapRegister() { return a3; }
-
+const Register StoreTransitionDescriptor::SlotRegister() { return t0; }
+const Register StoreTransitionDescriptor::VectorRegister() { return a3; }
+const Register StoreTransitionDescriptor::MapRegister() { return t1; }
const Register StoreGlobalViaContextDescriptor::SlotRegister() { return a2; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return a0; }
@@ -357,7 +353,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ApiCallbackDescriptorBase::InitializePlatformSpecific(
+void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
a0, // callee
@@ -392,7 +388,19 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
a0, // argument count (not including receiver)
a3, // new target
a1, // constructor to call
- a2 // address of the first argument
+ a2, // allocation site feedback if available, undefined otherwise.
+ t4 // address of the first argument
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterPushArgsAndConstructArrayDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ a0, // argument count (not including receiver)
+ a1, // the target to call verified to be Array function
+ a2, // allocation site feedback
+ a3, // address of first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index dba1fae975..d61717d222 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -184,9 +184,7 @@ void MacroAssembler::InNewSpace(Register object,
Condition cc,
Label* branch) {
DCHECK(cc == eq || cc == ne);
- const int mask =
- 1 << MemoryChunk::IN_FROM_SPACE | 1 << MemoryChunk::IN_TO_SPACE;
- CheckPageFlag(object, scratch, mask, cc, branch);
+ CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cc, branch);
}
@@ -1126,8 +1124,13 @@ void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sltu(rd, rs, rt.rm());
} else {
- if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
+ const uint32_t int16_min = std::numeric_limits<int16_t>::min();
+ if (is_uint15(rt.imm32_) && !MustUseReg(rt.rmode_)) {
+ // Imm range is: [0, 32767].
sltiu(rd, rs, rt.imm32_);
+ } else if (is_uint15(rt.imm32_ - int16_min) && !MustUseReg(rt.rmode_)) {
+ // Imm range is: [max_unsigned-32767,max_unsigned].
+ sltiu(rd, rs, static_cast<uint16_t>(rt.imm32_));
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -1915,9 +1918,12 @@ void MacroAssembler::Ins(Register rt,
}
void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
- Register scratch1 = t8;
- Register scratch2 = t9;
- if (IsMipsArchVariant(kMips32r2)) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ // r6 neg_s changes the sign for NaN-like operands as well.
+ neg_s(fd, fs);
+ } else {
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+ IsMipsArchVariant(kLoongson));
Label is_nan, done;
Register scratch1 = t8;
Register scratch2 = t9;
@@ -1926,7 +1932,6 @@ void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
// For NaN input, neg_s will return the same NaN value,
// while the sign has to be changed separately.
neg_s(fd, fs); // In delay slot.
-
bind(&is_nan);
mfc1(scratch1, fs);
And(scratch2, scratch1, Operand(~kBinary32SignMask));
@@ -1935,27 +1940,24 @@ void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
Or(scratch2, scratch2, scratch1);
mtc1(scratch2, fd);
bind(&done);
- } else {
- mfc1(scratch1, fs);
- And(scratch2, scratch1, Operand(~kBinary32SignMask));
- And(scratch1, scratch1, Operand(kBinary32SignMask));
- Xor(scratch1, scratch1, Operand(kBinary32SignMask));
- Or(scratch2, scratch2, scratch1);
- mtc1(scratch2, fd);
}
}
void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) {
- Register scratch1 = t8;
- Register scratch2 = t9;
- if (IsMipsArchVariant(kMips32r2)) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ // r6 neg_d changes the sign for NaN-like operands as well.
+ neg_d(fd, fs);
+ } else {
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+ IsMipsArchVariant(kLoongson));
Label is_nan, done;
+ Register scratch1 = t8;
+ Register scratch2 = t9;
BranchF64(nullptr, &is_nan, eq, fs, fs);
Branch(USE_DELAY_SLOT, &done);
// For NaN input, neg_d will return the same NaN value,
// while the sign has to be changed separately.
neg_d(fd, fs); // In delay slot.
-
bind(&is_nan);
Mfhc1(scratch1, fs);
And(scratch2, scratch1, Operand(~HeapNumber::kSignMask));
@@ -1964,14 +1966,6 @@ void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) {
Or(scratch2, scratch2, scratch1);
Mthc1(scratch2, fd);
bind(&done);
- } else {
- Move_d(fd, fs);
- Mfhc1(scratch1, fs);
- And(scratch2, scratch1, Operand(~HeapNumber::kSignMask));
- And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- Xor(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- Or(scratch2, scratch2, scratch1);
- Mthc1(scratch2, fd);
}
}
@@ -2170,7 +2164,7 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
// Check for unordered (NaN) cases.
if (nan) {
bool long_branch =
- nan->is_bound() ? is_near(nan) : is_trampoline_emitted();
+ nan->is_bound() ? !is_near(nan) : is_trampoline_emitted();
if (!IsMipsArchVariant(kMips32r6)) {
if (long_branch) {
Label skip;
@@ -2209,7 +2203,7 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
if (target) {
bool long_branch =
- target->is_bound() ? is_near(target) : is_trampoline_emitted();
+ target->is_bound() ? !is_near(target) : is_trampoline_emitted();
if (long_branch) {
Label skip;
Condition neg_cond = NegateFpuCondition(cond);
@@ -4220,7 +4214,7 @@ void MacroAssembler::Allocate(int object_size,
Register scratch2,
Label* gc_required,
AllocationFlags flags) {
- DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(object_size <= kMaxRegularHeapObjectSize);
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
@@ -4402,7 +4396,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
void MacroAssembler::FastAllocate(int object_size, Register result,
Register scratch1, Register scratch2,
AllocationFlags flags) {
- DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(object_size <= kMaxRegularHeapObjectSize);
DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
// Make object size into bytes.
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index aa5b0f9524..4024e52c6f 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -215,6 +215,18 @@ class MacroAssembler: public Assembler {
Func GetLabelFunction);
#undef COND_ARGS
+ // Emit code that loads |parameter_index|'th parameter from the stack to
+ // the register according to the CallInterfaceDescriptor definition.
+ // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
+ // below the caller's sp.
+ template <class Descriptor>
+ void LoadParameterFromStack(
+ Register reg, typename Descriptor::ParameterIndices parameter_index,
+ int sp_to_ra_offset_in_words = 0) {
+ DCHECK(Descriptor::kPassLastArgsOnStack);
+ UNIMPLEMENTED();
+ }
+
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Drop(int count,
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 59dc300f68..bd423996d8 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -808,8 +808,8 @@ void Simulator::set_last_debugger_input(char* input) {
last_debugger_input_ = input;
}
-void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
- size_t size) {
+void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
+ void* start_addr, size_t size) {
intptr_t start = reinterpret_cast<intptr_t>(start_addr);
int intra_line = (start & CachePage::kLineMask);
start -= intra_line;
@@ -829,8 +829,10 @@ void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
}
}
-CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
- base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
+CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
+ void* page) {
+ base::CustomMatcherHashMap::Entry* entry =
+ i_cache->LookupOrInsert(page, ICacheHash(page));
if (entry->value == NULL) {
CachePage* new_page = new CachePage();
entry->value = new_page;
@@ -840,7 +842,8 @@ CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
// Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start, int size) {
+void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache,
+ intptr_t start, int size) {
DCHECK(size <= CachePage::kPageSize);
DCHECK(AllOnOnePage(start, size - 1));
DCHECK((start & CachePage::kLineMask) == 0);
@@ -852,7 +855,8 @@ void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start, int size) {
memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
}
-void Simulator::CheckICache(base::HashMap* i_cache, Instruction* instr) {
+void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
+ Instruction* instr) {
intptr_t address = reinterpret_cast<intptr_t>(instr);
void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@@ -885,7 +889,7 @@ void Simulator::Initialize(Isolate* isolate) {
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
if (i_cache_ == NULL) {
- i_cache_ = new base::HashMap(&ICacheMatch);
+ i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
Initialize(isolate);
@@ -997,11 +1001,12 @@ class Redirection {
// static
-void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
+void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
+ Redirection* first) {
Redirection::DeleteChain(first);
if (i_cache != nullptr) {
- for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
- entry = i_cache->Next(entry)) {
+ for (base::CustomMatcherHashMap::Entry* entry = i_cache->Start();
+ entry != nullptr; entry = i_cache->Next(entry)) {
delete static_cast<CachePage*>(entry->value);
}
delete i_cache;
@@ -1929,16 +1934,16 @@ typedef void (*SimulatorRuntimeProfilingGetterCall)(
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime. They are also used for debugging with simulator.
-void Simulator::SoftwareInterrupt(Instruction* instr) {
+void Simulator::SoftwareInterrupt() {
// There are several instructions that could get us here,
// the break_ instruction, or several variants of traps. All
// Are "SPECIAL" class opcode, and are distinuished by function.
- int32_t func = instr->FunctionFieldRaw();
- uint32_t code = (func == BREAK) ? instr->Bits(25, 6) : -1;
+ int32_t func = instr_.FunctionFieldRaw();
+ uint32_t code = (func == BREAK) ? instr_.Bits(25, 6) : -1;
// We first check if we met a call_rt_redirected.
- if (instr->InstructionBits() == rtCallRedirInstr) {
- Redirection* redirection = Redirection::FromSwiInstruction(instr);
+ if (instr_.InstructionBits() == rtCallRedirInstr) {
+ Redirection* redirection = Redirection::FromSwiInstruction(instr_.instr());
int32_t arg0 = get_register(a0);
int32_t arg1 = get_register(a1);
int32_t arg2 = get_register(a2);
@@ -2173,7 +2178,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintWatchpoint(code);
} else {
IncreaseStopCounter(code);
- HandleStop(code, instr);
+ HandleStop(code, instr_.instr());
}
} else {
// All remaining break_ codes, and all traps are handled here.
@@ -2366,6 +2371,49 @@ static T FPUMaxA(T a, T b) {
return result;
}
+enum class KeepSign : bool { no = false, yes };
+
+template <typename T, typename std::enable_if<std::is_floating_point<T>::value,
+ int>::type = 0>
+T FPUCanonalizeNaNArg(T result, T arg, KeepSign keepSign = KeepSign::no) {
+ DCHECK(std::isnan(arg));
+ T qNaN = std::numeric_limits<T>::quiet_NaN();
+ if (keepSign == KeepSign::yes) {
+ return std::copysign(qNaN, result);
+ }
+ return qNaN;
+}
+
+template <typename T>
+T FPUCanonalizeNaNArgs(T result, KeepSign keepSign, T first) {
+ if (std::isnan(first)) {
+ return FPUCanonalizeNaNArg(result, first, keepSign);
+ }
+ return result;
+}
+
+template <typename T, typename... Args>
+T FPUCanonalizeNaNArgs(T result, KeepSign keepSign, T first, Args... args) {
+ if (std::isnan(first)) {
+ return FPUCanonalizeNaNArg(result, first, keepSign);
+ }
+ return FPUCanonalizeNaNArgs(result, keepSign, args...);
+}
+
+template <typename Func, typename T, typename... Args>
+T FPUCanonalizeOperation(Func f, T first, Args... args) {
+ return FPUCanonalizeOperation(f, KeepSign::no, first, args...);
+}
+
+template <typename Func, typename T, typename... Args>
+T FPUCanonalizeOperation(Func f, KeepSign keepSign, T first, Args... args) {
+ T result = f(first, args...);
+ if (std::isnan(result)) {
+ result = FPUCanonalizeNaNArgs(result, keepSign, first, args...);
+ }
+ return result;
+}
+
// Handle execution based on instruction types.
void Simulator::DecodeTypeRegisterDRsType() {
@@ -2373,15 +2421,14 @@ void Simulator::DecodeTypeRegisterDRsType() {
uint32_t cc, fcsr_cc;
int64_t i64;
fs = get_fpu_register_double(fs_reg());
- ft = (get_instr()->FunctionFieldRaw() != MOVF)
- ? get_fpu_register_double(ft_reg())
- : 0.0;
+ ft = (instr_.FunctionFieldRaw() != MOVF) ? get_fpu_register_double(ft_reg())
+ : 0.0;
fd = get_fpu_register_double(fd_reg());
int64_t ft_int = bit_cast<int64_t>(ft);
int64_t fd_int = bit_cast<int64_t>(fd);
- cc = get_instr()->FCccValue();
+ cc = instr_.FCccValue();
fcsr_cc = get_fcsr_condition_bit(cc);
- switch (get_instr()->FunctionFieldRaw()) {
+ switch (instr_.FunctionFieldRaw()) {
case RINT: {
DCHECK(IsMipsArchVariant(kMips32r6));
double result, temp, temp_result;
@@ -2440,7 +2487,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
}
case MOVN_C: {
DCHECK(IsMipsArchVariant(kMips32r2));
- int32_t rt_reg = get_instr()->RtValue();
+ int32_t rt_reg = instr_.RtValue();
int32_t rt = get_register(rt_reg);
if (rt != 0) {
set_fpu_register_double(fd_reg(), fs);
@@ -2451,7 +2498,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
// Same function field for MOVT.D and MOVF.D
uint32_t ft_cc = (ft_reg() >> 2) & 0x7;
ft_cc = get_fcsr_condition_bit(ft_cc);
- if (get_instr()->Bit(16)) { // Read Tf bit.
+ if (instr_.Bit(16)) { // Read Tf bit.
// MOVT.D
if (test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg(), fs);
} else {
@@ -2477,43 +2524,65 @@ void Simulator::DecodeTypeRegisterDRsType() {
set_fpu_register_double(fd_reg(), FPUMaxA(ft, fs));
break;
case ADD_D:
- set_fpu_register_double(fd_reg(), fs + ft);
+ set_fpu_register_double(
+ fd_reg(),
+ FPUCanonalizeOperation(
+ [](double lhs, double rhs) { return lhs + rhs; }, fs, ft));
break;
case SUB_D:
- set_fpu_register_double(fd_reg(), fs - ft);
+ set_fpu_register_double(
+ fd_reg(),
+ FPUCanonalizeOperation(
+ [](double lhs, double rhs) { return lhs - rhs; }, fs, ft));
+ break;
+ case MADDF_D:
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ set_fpu_register_double(fd_reg(), fd + (fs * ft));
+ break;
+ case MSUBF_D:
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ set_fpu_register_double(fd_reg(), fd - (fs * ft));
break;
case MUL_D:
- set_fpu_register_double(fd_reg(), fs * ft);
+ set_fpu_register_double(
+ fd_reg(),
+ FPUCanonalizeOperation(
+ [](double lhs, double rhs) { return lhs * rhs; }, fs, ft));
break;
case DIV_D:
- set_fpu_register_double(fd_reg(), fs / ft);
+ set_fpu_register_double(
+ fd_reg(),
+ FPUCanonalizeOperation(
+ [](double lhs, double rhs) { return lhs / rhs; }, fs, ft));
break;
case ABS_D:
- set_fpu_register_double(fd_reg(), fabs(fs));
+ set_fpu_register_double(
+ fd_reg(),
+ FPUCanonalizeOperation([](double fs) { return FPAbs(fs); }, fs));
break;
case MOV_D:
set_fpu_register_double(fd_reg(), fs);
break;
case NEG_D:
- set_fpu_register_double(fd_reg(), -fs);
+ set_fpu_register_double(
+ fd_reg(), FPUCanonalizeOperation([](double src) { return -src; },
+ KeepSign::yes, fs));
break;
case SQRT_D:
- lazily_initialize_fast_sqrt(isolate_);
- set_fpu_register_double(fd_reg(), fast_sqrt(fs, isolate_));
+ set_fpu_register_double(
+ fd_reg(),
+ FPUCanonalizeOperation([](double fs) { return std::sqrt(fs); }, fs));
break;
- case RSQRT_D: {
- DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
- lazily_initialize_fast_sqrt(isolate_);
- double result = 1.0 / fast_sqrt(fs, isolate_);
- set_fpu_register_double(fd_reg(), result);
+ case RSQRT_D:
+ set_fpu_register_double(
+ fd_reg(), FPUCanonalizeOperation(
+ [](double fs) { return 1.0 / std::sqrt(fs); }, fs));
break;
- }
- case RECIP_D: {
- DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
- double result = 1.0 / fs;
- set_fpu_register_double(fd_reg(), result);
+ case RECIP_D:
+ set_fpu_register_double(
+ fd_reg(),
+ FPUCanonalizeOperation([](double fs) { return 1.0 / fs; }, fs));
break;
- }
case C_UN_D:
set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
break;
@@ -2744,7 +2813,7 @@ void Simulator::DecodeTypeRegisterWRsType() {
float fs = get_fpu_register_float(fs_reg());
float ft = get_fpu_register_float(ft_reg());
int32_t alu_out = 0x12345678;
- switch (get_instr()->FunctionFieldRaw()) {
+ switch (instr_.FunctionFieldRaw()) {
case CVT_S_W: // Convert word to float (single).
alu_out = get_fpu_register_signed_word(fs_reg());
set_fpu_register_float(fd_reg(), static_cast<float>(alu_out));
@@ -2840,9 +2909,9 @@ void Simulator::DecodeTypeRegisterSRsType() {
int32_t ft_int = bit_cast<int32_t>(ft);
int32_t fd_int = bit_cast<int32_t>(fd);
uint32_t cc, fcsr_cc;
- cc = get_instr()->FCccValue();
+ cc = instr_.FCccValue();
fcsr_cc = get_fcsr_condition_bit(cc);
- switch (get_instr()->FunctionFieldRaw()) {
+ switch (instr_.FunctionFieldRaw()) {
case RINT: {
DCHECK(IsMipsArchVariant(kMips32r6));
float result, temp_result;
@@ -2882,43 +2951,65 @@ void Simulator::DecodeTypeRegisterSRsType() {
break;
}
case ADD_S:
- set_fpu_register_float(fd_reg(), fs + ft);
+ set_fpu_register_float(
+ fd_reg(),
+ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs + rhs; },
+ fs, ft));
break;
case SUB_S:
- set_fpu_register_float(fd_reg(), fs - ft);
+ set_fpu_register_float(
+ fd_reg(),
+ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs - rhs; },
+ fs, ft));
+ break;
+ case MADDF_S:
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ set_fpu_register_float(fd_reg(), fd + (fs * ft));
+ break;
+ case MSUBF_S:
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ set_fpu_register_float(fd_reg(), fd - (fs * ft));
break;
case MUL_S:
- set_fpu_register_float(fd_reg(), fs * ft);
+ set_fpu_register_float(
+ fd_reg(),
+ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs * rhs; },
+ fs, ft));
break;
case DIV_S:
- set_fpu_register_float(fd_reg(), fs / ft);
+ set_fpu_register_float(
+ fd_reg(),
+ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs / rhs; },
+ fs, ft));
break;
case ABS_S:
- set_fpu_register_float(fd_reg(), fabs(fs));
+ set_fpu_register_float(
+ fd_reg(),
+ FPUCanonalizeOperation([](float fs) { return FPAbs(fs); }, fs));
break;
case MOV_S:
set_fpu_register_float(fd_reg(), fs);
break;
case NEG_S:
- set_fpu_register_float(fd_reg(), -fs);
+ set_fpu_register_float(
+ fd_reg(), FPUCanonalizeOperation([](float src) { return -src; },
+ KeepSign::yes, fs));
break;
case SQRT_S:
- lazily_initialize_fast_sqrt(isolate_);
- set_fpu_register_float(fd_reg(), fast_sqrt(fs, isolate_));
+ set_fpu_register_float(
+ fd_reg(),
+ FPUCanonalizeOperation([](float src) { return std::sqrt(src); }, fs));
break;
- case RSQRT_S: {
- DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
- lazily_initialize_fast_sqrt(isolate_);
- float result = 1.0 / fast_sqrt(fs, isolate_);
- set_fpu_register_float(fd_reg(), result);
+ case RSQRT_S:
+ set_fpu_register_float(
+ fd_reg(), FPUCanonalizeOperation(
+ [](float src) { return 1.0 / std::sqrt(src); }, fs));
break;
- }
- case RECIP_S: {
- DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
- float result = 1.0 / fs;
- set_fpu_register_float(fd_reg(), result);
+ case RECIP_S:
+ set_fpu_register_float(
+ fd_reg(),
+ FPUCanonalizeOperation([](float src) { return 1.0 / src; }, fs));
break;
- }
case C_F_D:
set_fcsr_bit(fcsr_cc, false);
break;
@@ -3047,7 +3138,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
uint32_t ft_cc = (ft_reg() >> 2) & 0x7;
ft_cc = get_fcsr_condition_bit(ft_cc);
- if (get_instr()->Bit(16)) { // Read Tf bit.
+ if (instr_.Bit(16)) { // Read Tf bit.
// MOVT.D
if (test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg(), fs);
} else {
@@ -3209,7 +3300,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
void Simulator::DecodeTypeRegisterLRsType() {
double fs = get_fpu_register_double(fs_reg());
double ft = get_fpu_register_double(ft_reg());
- switch (get_instr()->FunctionFieldRaw()) {
+ switch (instr_.FunctionFieldRaw()) {
case CVT_D_L: // Mips32r2 instruction.
// Watch the signs here, we want 2 32-bit vals
// to make a sign-64.
@@ -3311,7 +3402,7 @@ void Simulator::DecodeTypeRegisterLRsType() {
void Simulator::DecodeTypeRegisterCOP1() {
- switch (get_instr()->RsFieldRaw()) {
+ switch (instr_.RsFieldRaw()) {
case CFC1:
// At the moment only FCSR is supported.
DCHECK(fs_reg() == kFCSRRegister);
@@ -3374,14 +3465,43 @@ void Simulator::DecodeTypeRegisterCOP1() {
void Simulator::DecodeTypeRegisterCOP1X() {
- switch (get_instr()->FunctionFieldRaw()) {
- case MADD_D:
+ switch (instr_.FunctionFieldRaw()) {
+ case MADD_S: {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ float fr, ft, fs;
+ fr = get_fpu_register_float(fr_reg());
+ fs = get_fpu_register_float(fs_reg());
+ ft = get_fpu_register_float(ft_reg());
+ set_fpu_register_float(fd_reg(), fs * ft + fr);
+ break;
+ }
+ case MSUB_S: {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ float fr, ft, fs;
+ fr = get_fpu_register_float(fr_reg());
+ fs = get_fpu_register_float(fs_reg());
+ ft = get_fpu_register_float(ft_reg());
+ set_fpu_register_float(fd_reg(), fs * ft - fr);
+ break;
+ }
+ case MADD_D: {
+ DCHECK(IsMipsArchVariant(kMips32r2));
double fr, ft, fs;
fr = get_fpu_register_double(fr_reg());
fs = get_fpu_register_double(fs_reg());
ft = get_fpu_register_double(ft_reg());
set_fpu_register_double(fd_reg(), fs * ft + fr);
break;
+ }
+ case MSUB_D: {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ double fr, ft, fs;
+ fr = get_fpu_register_double(fr_reg());
+ fs = get_fpu_register_double(fs_reg());
+ ft = get_fpu_register_double(ft_reg());
+ set_fpu_register_double(fd_reg(), fs * ft - fr);
+ break;
+ }
default:
UNREACHABLE();
}
@@ -3394,7 +3514,7 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
uint64_t u64hilo = 0;
bool do_interrupt = false;
- switch (get_instr()->FunctionFieldRaw()) {
+ switch (instr_.FunctionFieldRaw()) {
case SELEQZ_S:
DCHECK(IsMipsArchVariant(kMips32r6));
set_register(rd_reg(), rt() == 0 ? rs() : 0);
@@ -3534,7 +3654,7 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
break;
case DIV:
if (IsMipsArchVariant(kMips32r6)) {
- switch (get_instr()->SaValue()) {
+ switch (sa()) {
case DIV_OP:
if (rs() == INT_MIN && rt() == -1) {
set_register(rd_reg(), INT_MIN);
@@ -3569,7 +3689,7 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
break;
case DIVU:
if (IsMipsArchVariant(kMips32r6)) {
- switch (get_instr()->SaValue()) {
+ switch (sa()) {
case DIV_OP:
if (rt_u() != 0) {
set_register(rd_reg(), rs_u() / rt_u());
@@ -3676,9 +3796,9 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
}
break;
case MOVCI: {
- uint32_t cc = get_instr()->FBccValue();
+ uint32_t cc = instr_.FBccValue();
uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
- if (get_instr()->Bit(16)) { // Read Tf bit.
+ if (instr_.Bit(16)) { // Read Tf bit.
if (test_fcsr_bit(fcsr_cc)) set_register(rd_reg(), rs());
} else {
if (!test_fcsr_bit(fcsr_cc)) set_register(rd_reg(), rs());
@@ -3695,14 +3815,14 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
UNREACHABLE();
}
if (do_interrupt) {
- SoftwareInterrupt(get_instr());
+ SoftwareInterrupt();
}
}
void Simulator::DecodeTypeRegisterSPECIAL2() {
int32_t alu_out;
- switch (get_instr()->FunctionFieldRaw()) {
+ switch (instr_.FunctionFieldRaw()) {
case MUL:
// Only the lower 32 bits are kept.
alu_out = rs_u() * rt_u();
@@ -3725,7 +3845,7 @@ void Simulator::DecodeTypeRegisterSPECIAL2() {
void Simulator::DecodeTypeRegisterSPECIAL3() {
int32_t alu_out;
- switch (get_instr()->FunctionFieldRaw()) {
+ switch (instr_.FunctionFieldRaw()) {
case INS: { // Mips32r2 instruction.
// Interpret rd field as 5-bit msb of insert.
uint16_t msb = rd_reg();
@@ -3750,7 +3870,7 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
break;
}
case BSHFL: {
- int sa = get_instr()->SaFieldRaw() >> kSaShift;
+ int sa = instr_.SaFieldRaw() >> kSaShift;
switch (sa) {
case BITSWAP: {
uint32_t input = static_cast<uint32_t>(rt());
@@ -3822,7 +3942,7 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
break;
}
default: {
- const uint8_t bp = get_instr()->Bp2Value();
+ const uint8_t bp = instr_.Bp2Value();
sa >>= kBp2Bits;
switch (sa) {
case ALIGN: {
@@ -3850,16 +3970,9 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
}
}
-
-void Simulator::DecodeTypeRegister(Instruction* instr) {
- const Opcode op = instr->OpcodeFieldRaw();
-
- // Set up the variables if needed before executing the instruction.
- // ConfigureTypeRegister(instr);
- set_instr(instr);
-
+void Simulator::DecodeTypeRegister() {
// ---------- Execution.
- switch (op) {
+ switch (instr_.OpcodeFieldRaw()) {
case COP1:
DecodeTypeRegisterCOP1();
break;
@@ -3882,17 +3995,17 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
// Type 2: instructions using a 16, 21 or 26 bits immediate. (e.g. beq, beqc).
-void Simulator::DecodeTypeImmediate(Instruction* instr) {
+void Simulator::DecodeTypeImmediate() {
// Instruction fields.
- Opcode op = instr->OpcodeFieldRaw();
- int32_t rs_reg = instr->RsValue();
- int32_t rs = get_register(instr->RsValue());
+ Opcode op = instr_.OpcodeFieldRaw();
+ int32_t rs_reg = instr_.RsValue();
+ int32_t rs = get_register(instr_.RsValue());
uint32_t rs_u = static_cast<uint32_t>(rs);
- int32_t rt_reg = instr->RtValue(); // Destination register.
+ int32_t rt_reg = instr_.RtValue(); // Destination register.
int32_t rt = get_register(rt_reg);
- int16_t imm16 = instr->Imm16Value();
+ int16_t imm16 = instr_.Imm16Value();
- int32_t ft_reg = instr->FtValue(); // Destination register.
+ int32_t ft_reg = instr_.FtValue(); // Destination register.
// Zero extended immediate.
uint32_t oe_imm16 = 0xffff & imm16;
@@ -3912,38 +4025,36 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
int32_t addr = 0x0;
// Branch instructions common part.
- auto BranchAndLinkHelper = [this, instr, &next_pc,
- &execute_branch_delay_instruction](
- bool do_branch) {
- execute_branch_delay_instruction = true;
- int32_t current_pc = get_pc();
- if (do_branch) {
- int16_t imm16 = instr->Imm16Value();
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- set_register(31, current_pc + 2 * Instruction::kInstrSize);
- } else {
- next_pc = current_pc + 2 * Instruction::kInstrSize;
- }
- };
+ auto BranchAndLinkHelper =
+ [this, &next_pc, &execute_branch_delay_instruction](bool do_branch) {
+ execute_branch_delay_instruction = true;
+ int32_t current_pc = get_pc();
+ if (do_branch) {
+ int16_t imm16 = this->instr_.Imm16Value();
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+ set_register(31, current_pc + 2 * Instruction::kInstrSize);
+ } else {
+ next_pc = current_pc + 2 * Instruction::kInstrSize;
+ }
+ };
- auto BranchHelper = [this, instr, &next_pc,
+ auto BranchHelper = [this, &next_pc,
&execute_branch_delay_instruction](bool do_branch) {
execute_branch_delay_instruction = true;
int32_t current_pc = get_pc();
if (do_branch) {
- int16_t imm16 = instr->Imm16Value();
+ int16_t imm16 = this->instr_.Imm16Value();
next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
} else {
next_pc = current_pc + 2 * Instruction::kInstrSize;
}
};
- auto BranchAndLinkCompactHelper = [this, instr, &next_pc](bool do_branch,
- int bits) {
+ auto BranchAndLinkCompactHelper = [this, &next_pc](bool do_branch, int bits) {
int32_t current_pc = get_pc();
CheckForbiddenSlot(current_pc);
if (do_branch) {
- int32_t imm = instr->ImmValue(bits);
+ int32_t imm = this->instr_.ImmValue(bits);
imm <<= 32 - bits;
imm >>= 32 - bits;
next_pc = current_pc + (imm << 2) + Instruction::kInstrSize;
@@ -3951,28 +4062,27 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
}
};
- auto BranchCompactHelper = [&next_pc, this, instr](bool do_branch, int bits) {
+ auto BranchCompactHelper = [this, &next_pc](bool do_branch, int bits) {
int32_t current_pc = get_pc();
CheckForbiddenSlot(current_pc);
if (do_branch) {
- int32_t imm = instr->ImmValue(bits);
+ int32_t imm = this->instr_.ImmValue(bits);
imm <<= 32 - bits;
imm >>= 32 - bits;
next_pc = get_pc() + (imm << 2) + Instruction::kInstrSize;
}
};
-
switch (op) {
// ------------- COP1. Coprocessor instructions.
case COP1:
- switch (instr->RsFieldRaw()) {
+ switch (instr_.RsFieldRaw()) {
case BC1: { // Branch on coprocessor condition.
// Floating point.
- uint32_t cc = instr->FBccValue();
+ uint32_t cc = instr_.FBccValue();
uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
uint32_t cc_value = test_fcsr_bit(fcsr_cc);
- bool do_branch = (instr->FBtrueValue()) ? cc_value : !cc_value;
+ bool do_branch = (instr_.FBtrueValue()) ? cc_value : !cc_value;
BranchHelper(do_branch);
break;
}
@@ -3988,7 +4098,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
break;
// ------------- REGIMM class.
case REGIMM:
- switch (instr->RtFieldRaw()) {
+ switch (instr_.RtFieldRaw()) {
case BLTZ:
BranchHelper(rs < 0);
break;
@@ -4196,7 +4306,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
set_register(rt_reg, ReadB(rs + se_imm16));
break;
case LH:
- set_register(rt_reg, ReadH(rs + se_imm16, instr));
+ set_register(rt_reg, ReadH(rs + se_imm16, instr_.instr()));
break;
case LWL: {
// al_offset is offset of the effective address within an aligned word.
@@ -4204,20 +4314,20 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
uint8_t byte_shift = kPointerAlignmentMask - al_offset;
uint32_t mask = (1 << byte_shift * 8) - 1;
addr = rs + se_imm16 - al_offset;
- alu_out = ReadW(addr, instr);
+ alu_out = ReadW(addr, instr_.instr());
alu_out <<= byte_shift * 8;
alu_out |= rt & mask;
set_register(rt_reg, alu_out);
break;
}
case LW:
- set_register(rt_reg, ReadW(rs + se_imm16, instr));
+ set_register(rt_reg, ReadW(rs + se_imm16, instr_.instr()));
break;
case LBU:
set_register(rt_reg, ReadBU(rs + se_imm16));
break;
case LHU:
- set_register(rt_reg, ReadHU(rs + se_imm16, instr));
+ set_register(rt_reg, ReadHU(rs + se_imm16, instr_.instr()));
break;
case LWR: {
// al_offset is offset of the effective address within an aligned word.
@@ -4225,7 +4335,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
uint8_t byte_shift = kPointerAlignmentMask - al_offset;
uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0;
addr = rs + se_imm16 - al_offset;
- alu_out = ReadW(addr, instr);
+ alu_out = ReadW(addr, instr_.instr());
alu_out = static_cast<uint32_t> (alu_out) >> al_offset * 8;
alu_out |= rt & mask;
set_register(rt_reg, alu_out);
@@ -4235,7 +4345,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
WriteB(rs + se_imm16, static_cast<int8_t>(rt));
break;
case SH:
- WriteH(rs + se_imm16, static_cast<uint16_t>(rt), instr);
+ WriteH(rs + se_imm16, static_cast<uint16_t>(rt), instr_.instr());
break;
case SWL: {
uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
@@ -4243,40 +4353,40 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0;
addr = rs + se_imm16 - al_offset;
// Value to be written in memory.
- uint32_t mem_value = ReadW(addr, instr) & mask;
+ uint32_t mem_value = ReadW(addr, instr_.instr()) & mask;
mem_value |= static_cast<uint32_t>(rt) >> byte_shift * 8;
- WriteW(addr, mem_value, instr);
+ WriteW(addr, mem_value, instr_.instr());
break;
}
case SW:
- WriteW(rs + se_imm16, rt, instr);
+ WriteW(rs + se_imm16, rt, instr_.instr());
break;
case SWR: {
uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
uint32_t mask = (1 << al_offset * 8) - 1;
addr = rs + se_imm16 - al_offset;
- uint32_t mem_value = ReadW(addr, instr);
+ uint32_t mem_value = ReadW(addr, instr_.instr());
mem_value = (rt << al_offset * 8) | (mem_value & mask);
- WriteW(addr, mem_value, instr);
+ WriteW(addr, mem_value, instr_.instr());
break;
}
case LWC1:
set_fpu_register_hi_word(ft_reg, 0);
- set_fpu_register_word(ft_reg, ReadW(rs + se_imm16, instr));
+ set_fpu_register_word(ft_reg, ReadW(rs + se_imm16, instr_.instr()));
break;
case LDC1:
- set_fpu_register_double(ft_reg, ReadD(rs + se_imm16, instr));
+ set_fpu_register_double(ft_reg, ReadD(rs + se_imm16, instr_.instr()));
break;
case SWC1:
- WriteW(rs + se_imm16, get_fpu_register_word(ft_reg), instr);
+ WriteW(rs + se_imm16, get_fpu_register_word(ft_reg), instr_.instr());
break;
case SDC1:
- WriteD(rs + se_imm16, get_fpu_register_double(ft_reg), instr);
+ WriteD(rs + se_imm16, get_fpu_register_double(ft_reg), instr_.instr());
break;
// ------------- PC-Relative instructions.
case PCREL: {
// rt field: checking 5-bits.
- int32_t imm21 = instr->Imm21Value();
+ int32_t imm21 = instr_.Imm21Value();
int32_t current_pc = get_pc();
uint8_t rt = (imm21 >> kImm16Bits);
switch (rt) {
@@ -4288,7 +4398,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
alu_out = current_pc + (se_imm16 << 16);
break;
default: {
- int32_t imm19 = instr->Imm19Value();
+ int32_t imm19 = instr_.Imm19Value();
// rt field: checking the most significant 2-bits.
rt = (imm21 >> kImm19Bits);
switch (rt) {
@@ -4336,13 +4446,15 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
-void Simulator::DecodeTypeJump(Instruction* instr) {
+void Simulator::DecodeTypeJump() {
+ SimInstruction simInstr = instr_;
// Get current pc.
int32_t current_pc = get_pc();
// Get unchanged bits of pc.
int32_t pc_high_bits = current_pc & 0xf0000000;
// Next pc.
- int32_t next_pc = pc_high_bits | (instr->Imm26Value() << 2);
+
+ int32_t next_pc = pc_high_bits | (simInstr.Imm26Value() << 2);
// Execute branch delay slot.
// We don't check for end_sim_pc. First it should not be met as the current pc
@@ -4353,7 +4465,7 @@ void Simulator::DecodeTypeJump(Instruction* instr) {
// Update pc and ra if necessary.
// Do this after the branch delay execution.
- if (instr->IsLinkingInstruction()) {
+ if (simInstr.IsLinkingInstruction()) {
set_register(31, current_pc + 2 * Instruction::kInstrSize);
}
set_pc(next_pc);
@@ -4375,15 +4487,16 @@ void Simulator::InstructionDecode(Instruction* instr) {
dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
}
- switch (instr->InstructionType(Instruction::TypeChecks::EXTRA)) {
+ instr_ = instr;
+ switch (instr_.InstructionType()) {
case Instruction::kRegisterType:
- DecodeTypeRegister(instr);
+ DecodeTypeRegister();
break;
case Instruction::kImmediateType:
- DecodeTypeImmediate(instr);
+ DecodeTypeImmediate();
break;
case Instruction::kJumpType:
- DecodeTypeJump(instr);
+ DecodeTypeJump();
break;
default:
UNSUPPORTED();
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index 5c77756394..3795eecc78 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -113,6 +113,39 @@ class CachePage {
char validity_map_[kValidityMapSize]; // One byte per line.
};
+class SimInstructionBase : public InstructionBase {
+ public:
+ Type InstructionType() const { return type_; }
+ inline Instruction* instr() const { return instr_; }
+ inline int32_t operand() const { return operand_; }
+
+ protected:
+ SimInstructionBase() : operand_(-1), instr_(nullptr), type_(kUnsupported) {}
+ explicit SimInstructionBase(Instruction* instr) {}
+
+ int32_t operand_;
+ Instruction* instr_;
+ Type type_;
+
+ private:
+ DISALLOW_ASSIGN(SimInstructionBase);
+};
+
+class SimInstruction : public InstructionGetters<SimInstructionBase> {
+ public:
+ SimInstruction() {}
+
+ explicit SimInstruction(Instruction* instr) { *this = instr; }
+
+ SimInstruction& operator=(Instruction* instr) {
+ operand_ = *reinterpret_cast<const int32_t*>(instr);
+ instr_ = instr;
+ type_ = InstructionBase::InstructionType();
+ DCHECK(reinterpret_cast<void*>(&operand_) == this);
+ return *this;
+ }
+};
+
class Simulator {
public:
friend class MipsDebugger;
@@ -216,7 +249,7 @@ class Simulator {
// Call on program start.
static void Initialize(Isolate* isolate);
- static void TearDown(base::HashMap* i_cache, Redirection* first);
+ static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
@@ -236,7 +269,8 @@ class Simulator {
char* last_debugger_input() { return last_debugger_input_; }
// ICache checking.
- static void FlushICache(base::HashMap* i_cache, void* start, size_t size);
+ static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
+ size_t size);
// Returns true if pc register contains one of the 'special_values' defined
// below (bad_ra, end_sim_pc).
@@ -299,8 +333,10 @@ class Simulator {
inline int32_t SetDoubleHIW(double* addr);
inline int32_t SetDoubleLOW(double* addr);
+ SimInstruction instr_;
+
// Executing is handled based on the instruction type.
- void DecodeTypeRegister(Instruction* instr);
+ void DecodeTypeRegister();
// Functions called from DecodeTypeRegister.
void DecodeTypeRegisterCOP1();
@@ -322,39 +358,34 @@ class Simulator {
void DecodeTypeRegisterLRsType();
- Instruction* currentInstr_;
-
- inline Instruction* get_instr() const { return currentInstr_; }
- inline void set_instr(Instruction* instr) { currentInstr_ = instr; }
-
- inline int32_t rs_reg() const { return currentInstr_->RsValue(); }
+ inline int32_t rs_reg() const { return instr_.RsValue(); }
inline int32_t rs() const { return get_register(rs_reg()); }
inline uint32_t rs_u() const {
return static_cast<uint32_t>(get_register(rs_reg()));
}
- inline int32_t rt_reg() const { return currentInstr_->RtValue(); }
+ inline int32_t rt_reg() const { return instr_.RtValue(); }
inline int32_t rt() const { return get_register(rt_reg()); }
inline uint32_t rt_u() const {
return static_cast<uint32_t>(get_register(rt_reg()));
}
- inline int32_t rd_reg() const { return currentInstr_->RdValue(); }
- inline int32_t fr_reg() const { return currentInstr_->FrValue(); }
- inline int32_t fs_reg() const { return currentInstr_->FsValue(); }
- inline int32_t ft_reg() const { return currentInstr_->FtValue(); }
- inline int32_t fd_reg() const { return currentInstr_->FdValue(); }
- inline int32_t sa() const { return currentInstr_->SaValue(); }
- inline int32_t lsa_sa() const { return currentInstr_->LsaSaValue(); }
+ inline int32_t rd_reg() const { return instr_.RdValue(); }
+ inline int32_t fr_reg() const { return instr_.FrValue(); }
+ inline int32_t fs_reg() const { return instr_.FsValue(); }
+ inline int32_t ft_reg() const { return instr_.FtValue(); }
+ inline int32_t fd_reg() const { return instr_.FdValue(); }
+ inline int32_t sa() const { return instr_.SaValue(); }
+ inline int32_t lsa_sa() const { return instr_.LsaSaValue(); }
inline void SetResult(int32_t rd_reg, int32_t alu_out) {
set_register(rd_reg, alu_out);
TraceRegWr(alu_out);
}
- void DecodeTypeImmediate(Instruction* instr);
- void DecodeTypeJump(Instruction* instr);
+ void DecodeTypeImmediate();
+ void DecodeTypeJump();
// Used for breakpoints and traps.
- void SoftwareInterrupt(Instruction* instr);
+ void SoftwareInterrupt();
// Compact branch guard.
void CheckForbiddenSlot(int32_t current_pc) {
@@ -400,9 +431,12 @@ class Simulator {
}
// ICache.
- static void CheckICache(base::HashMap* i_cache, Instruction* instr);
- static void FlushOnePage(base::HashMap* i_cache, intptr_t start, int size);
- static CachePage* GetCachePage(base::HashMap* i_cache, void* page);
+ static void CheckICache(base::CustomMatcherHashMap* i_cache,
+ Instruction* instr);
+ static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start,
+ int size);
+ static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
+ void* page);
enum Exception {
none,
@@ -448,7 +482,7 @@ class Simulator {
char* last_debugger_input_;
// Icache simulation.
- base::HashMap* i_cache_;
+ base::CustomMatcherHashMap* i_cache_;
v8::internal::Isolate* isolate_;
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index 21a243453a..b35b166a2e 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -2780,12 +2780,49 @@ void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
}
+void Assembler::madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft) {
+ DCHECK(kArchVariant == kMips64r2);
+ GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_S);
+}
void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft) {
+ DCHECK(kArchVariant == kMips64r2);
GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
}
+void Assembler::msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft) {
+ DCHECK(kArchVariant == kMips64r2);
+ GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_S);
+}
+
+void Assembler::msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft) {
+ DCHECK(kArchVariant == kMips64r2);
+ GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_D);
+}
+
+void Assembler::maddf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
+ DCHECK(kArchVariant == kMips64r6);
+ GenInstrRegister(COP1, S, ft, fs, fd, MADDF_S);
+}
+
+void Assembler::maddf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ DCHECK(kArchVariant == kMips64r6);
+ GenInstrRegister(COP1, D, ft, fs, fd, MADDF_D);
+}
+
+void Assembler::msubf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
+ DCHECK(kArchVariant == kMips64r6);
+ GenInstrRegister(COP1, S, ft, fs, fd, MSUBF_S);
+}
+
+void Assembler::msubf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ DCHECK(kArchVariant == kMips64r6);
+ GenInstrRegister(COP1, D, ft, fs, fd, MSUBF_D);
+}
void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, S, ft, fs, fd, DIV_D);
@@ -2818,13 +2855,11 @@ void Assembler::mov_s(FPURegister fd, FPURegister fs) {
void Assembler::neg_s(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips64r2);
GenInstrRegister(COP1, S, f0, fs, fd, NEG_D);
}
void Assembler::neg_d(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips64r2);
GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
}
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index e269acfc28..dc3198cd9f 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -939,7 +939,14 @@ class Assembler : public AssemblerBase {
void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
void mul_s(FPURegister fd, FPURegister fs, FPURegister ft);
void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
+ void msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
+ void msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
+ void maddf_s(FPURegister fd, FPURegister fs, FPURegister ft);
+ void maddf_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void msubf_s(FPURegister fd, FPURegister fs, FPURegister ft);
+ void msubf_d(FPURegister fd, FPURegister fs, FPURegister ft);
void div_s(FPURegister fd, FPURegister fs, FPURegister ft);
void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
void abs_s(FPURegister fd, FPURegister fs);
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index 4d9f1209b4..e089b54f87 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -1783,7 +1783,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// a2 : feedback vector
// a3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
- Label done_initialize_count, done_increment_count;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
@@ -1803,7 +1802,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
Register feedback_map = a6;
Register weak_value = t0;
__ ld(weak_value, FieldMemOperand(a5, WeakCell::kValueOffset));
- __ Branch(&done_increment_count, eq, a1, Operand(weak_value));
+ __ Branch(&done, eq, a1, Operand(weak_value));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ Branch(&done, eq, a5, Operand(at));
__ ld(feedback_map, FieldMemOperand(a5, HeapObject::kMapOffset));
@@ -1825,7 +1824,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Make sure the function is the Array() function
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5);
__ Branch(&megamorphic, ne, a1, Operand(a5));
- __ jmp(&done_increment_count);
+ __ jmp(&done);
__ bind(&miss);
@@ -1853,32 +1852,21 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub);
- __ Branch(&done_initialize_count);
+ __ Branch(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub);
- __ bind(&done_initialize_count);
- // Initialize the call counter.
-
- __ SmiScale(a4, a3, kPointerSizeLog2);
- __ Daddu(a4, a2, Operand(a4));
- __ li(a5, Operand(Smi::FromInt(1)));
- __ Branch(USE_DELAY_SLOT, &done);
- __ sd(a5, FieldMemOperand(a4, FixedArray::kHeaderSize + kPointerSize));
-
- __ bind(&done_increment_count);
+ __ bind(&done);
- // Increment the call count for monomorphic function calls.
+ // Increment the call count for all function calls.
__ SmiScale(a4, a3, kPointerSizeLog2);
__ Daddu(a5, a2, Operand(a4));
__ ld(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
__ Daddu(a4, a4, Operand(Smi::FromInt(1)));
__ sd(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
-
- __ bind(&done);
}
@@ -1965,6 +1953,15 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ bind(&exit_);
}
+// Note: feedback_vector and slot are clobbered after the call.
+static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
+ Register slot) {
+ __ dsrl(t0, slot, 32 - kPointerSizeLog2);
+ __ Daddu(slot, feedback_vector, Operand(t0));
+ __ ld(t0, FieldMemOperand(slot, FixedArray::kHeaderSize + kPointerSize));
+ __ Daddu(t0, t0, Operand(Smi::FromInt(1)));
+ __ sd(t0, FieldMemOperand(slot, FixedArray::kHeaderSize + kPointerSize));
+}
void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// a1 - function
@@ -1977,11 +1974,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ li(a0, Operand(arg_count()));
// Increment the call count for monomorphic function calls.
- __ dsrl(t0, a3, 32 - kPointerSizeLog2);
- __ Daddu(a3, a2, Operand(t0));
- __ ld(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
- __ Daddu(t0, t0, Operand(Smi::FromInt(1)));
- __ sd(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
+ IncrementCallCount(masm, a2, a3);
__ mov(a2, a4);
__ mov(a3, a1);
@@ -1994,7 +1987,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// a1 - function
// a3 - slot id (Smi)
// a2 - vector
- Label extra_checks_or_miss, call, call_function;
+ Label extra_checks_or_miss, call, call_function, call_count_incremented;
int argc = arg_count();
ParameterCount actual(argc);
@@ -2024,14 +2017,10 @@ void CallICStub::Generate(MacroAssembler* masm) {
// convincing us that we have a monomorphic JSFunction.
__ JumpIfSmi(a1, &extra_checks_or_miss);
+ __ bind(&call_function);
// Increment the call count for monomorphic function calls.
- __ dsrl(t0, a3, 32 - kPointerSizeLog2);
- __ Daddu(a3, a2, Operand(t0));
- __ ld(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
- __ Daddu(t0, t0, Operand(Smi::FromInt(1)));
- __ sd(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
+ IncrementCallCount(masm, a2, a3);
- __ bind(&call_function);
__ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
tail_call_mode()),
RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
@@ -2073,6 +2062,10 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
__ bind(&call);
+ IncrementCallCount(masm, a2, a3);
+
+ __ bind(&call_count_incremented);
+
__ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
USE_DELAY_SLOT);
@@ -2098,12 +2091,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ ld(t1, NativeContextMemOperand());
__ Branch(&miss, ne, t0, Operand(t1));
- // Initialize the call counter.
- __ dsrl(at, a3, 32 - kPointerSizeLog2);
- __ Daddu(at, a2, Operand(at));
- __ li(t0, Operand(Smi::FromInt(1)));
- __ sd(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
-
// Store the function. Use a stub since we need a frame for allocation.
// a2 - vector
// a3 - slot
@@ -2111,9 +2098,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(masm->isolate());
+ __ Push(a2, a3);
__ Push(cp, a1);
__ CallStub(&create_stub);
__ Pop(cp, a1);
+ __ Pop(a2, a3);
}
__ Branch(&call_function);
@@ -2123,7 +2112,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&miss);
GenerateMiss(masm);
- __ Branch(&call);
+ __ Branch(&call_count_incremented);
}
@@ -2283,293 +2272,6 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
}
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
- // Stack frame on entry.
- // ra: return address
- // sp[0]: to
- // sp[4]: from
- // sp[8]: string
-
- // This stub is called from the native-call %_SubString(...), so
- // nothing can be assumed about the arguments. It is tested that:
- // "string" is a sequential string,
- // both "from" and "to" are smis, and
- // 0 <= from <= to <= string.length.
- // If any of these assumptions fail, we call the runtime system.
-
- const int kToOffset = 0 * kPointerSize;
- const int kFromOffset = 1 * kPointerSize;
- const int kStringOffset = 2 * kPointerSize;
-
- __ ld(a2, MemOperand(sp, kToOffset));
- __ ld(a3, MemOperand(sp, kFromOffset));
-
- STATIC_ASSERT(kSmiTag == 0);
-
- // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
- // safe in this case.
- __ JumpIfNotSmi(a2, &runtime);
- __ JumpIfNotSmi(a3, &runtime);
- // Both a2 and a3 are untagged integers.
-
- __ SmiUntag(a2, a2);
- __ SmiUntag(a3, a3);
- __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
-
- __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to.
- __ Dsubu(a2, a2, a3);
-
- // Make sure first argument is a string.
- __ ld(v0, MemOperand(sp, kStringOffset));
- __ JumpIfSmi(v0, &runtime);
- __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
- __ And(a4, a1, Operand(kIsNotStringMask));
-
- __ Branch(&runtime, ne, a4, Operand(zero_reg));
-
- Label single_char;
- __ Branch(&single_char, eq, a2, Operand(1));
-
- // Short-cut for the case of trivial substring.
- Label return_v0;
- // v0: original string
- // a2: result string length
- __ ld(a4, FieldMemOperand(v0, String::kLengthOffset));
- __ SmiUntag(a4);
- // Return original string.
- __ Branch(&return_v0, eq, a2, Operand(a4));
- // Longer than original string's length or negative: unsafe arguments.
- __ Branch(&runtime, hi, a2, Operand(a4));
- // Shorter than original string's length: an actual substring.
-
- // Deal with different string types: update the index if necessary
- // and put the underlying string into a5.
- // v0: original string
- // a1: instance type
- // a2: length
- // a3: from index (untagged)
- Label underlying_unpacked, sliced_string, seq_or_external_string;
- // If the string is not indirect, it can only be sequential or external.
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
- STATIC_ASSERT(kIsIndirectStringMask != 0);
- __ And(a4, a1, Operand(kIsIndirectStringMask));
- __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, a4, Operand(zero_reg));
- // a4 is used as a scratch register and can be overwritten in either case.
- __ And(a4, a1, Operand(kSlicedNotConsMask));
- __ Branch(&sliced_string, ne, a4, Operand(zero_reg));
- // Cons string. Check whether it is flat, then fetch first part.
- __ ld(a5, FieldMemOperand(v0, ConsString::kSecondOffset));
- __ LoadRoot(a4, Heap::kempty_stringRootIndex);
- __ Branch(&runtime, ne, a5, Operand(a4));
- __ ld(a5, FieldMemOperand(v0, ConsString::kFirstOffset));
- // Update instance type.
- __ ld(a1, FieldMemOperand(a5, HeapObject::kMapOffset));
- __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked);
-
- __ bind(&sliced_string);
- // Sliced string. Fetch parent and correct start index by offset.
- __ ld(a5, FieldMemOperand(v0, SlicedString::kParentOffset));
- __ ld(a4, FieldMemOperand(v0, SlicedString::kOffsetOffset));
- __ SmiUntag(a4); // Add offset to index.
- __ Daddu(a3, a3, a4);
- // Update instance type.
- __ ld(a1, FieldMemOperand(a5, HeapObject::kMapOffset));
- __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked);
-
- __ bind(&seq_or_external_string);
- // Sequential or external string. Just move string to the expected register.
- __ mov(a5, v0);
-
- __ bind(&underlying_unpacked);
-
- if (FLAG_string_slices) {
- Label copy_routine;
- // a5: underlying subject string
- // a1: instance type of underlying subject string
- // a2: length
- // a3: adjusted start index (untagged)
- // Short slice. Copy instead of slicing.
- __ Branch(&copy_routine, lt, a2, Operand(SlicedString::kMinLength));
- // Allocate new sliced string. At this point we do not reload the instance
- // type including the string encoding because we simply rely on the info
- // provided by the original string. It does not matter if the original
- // string's encoding is wrong because we always have to recheck encoding of
- // the newly created string's parent anyways due to externalized strings.
- Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ And(a4, a1, Operand(kStringEncodingMask));
- __ Branch(&two_byte_slice, eq, a4, Operand(zero_reg));
- __ AllocateOneByteSlicedString(v0, a2, a6, a7, &runtime);
- __ jmp(&set_slice_header);
- __ bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(v0, a2, a6, a7, &runtime);
- __ bind(&set_slice_header);
- __ SmiTag(a3);
- __ sd(a5, FieldMemOperand(v0, SlicedString::kParentOffset));
- __ sd(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
- __ jmp(&return_v0);
-
- __ bind(&copy_routine);
- }
-
- // a5: underlying subject string
- // a1: instance type of underlying subject string
- // a2: length
- // a3: adjusted start index (untagged)
- Label two_byte_sequential, sequential_string, allocate_result;
- STATIC_ASSERT(kExternalStringTag != 0);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ And(a4, a1, Operand(kExternalStringTag));
- __ Branch(&sequential_string, eq, a4, Operand(zero_reg));
-
- // Handle external string.
- // Rule out short external strings.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ And(a4, a1, Operand(kShortExternalStringTag));
- __ Branch(&runtime, ne, a4, Operand(zero_reg));
- __ ld(a5, FieldMemOperand(a5, ExternalString::kResourceDataOffset));
- // a5 already points to the first character of underlying string.
- __ jmp(&allocate_result);
-
- __ bind(&sequential_string);
- // Locate first character of underlying subject string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ Daddu(a5, a5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- __ bind(&allocate_result);
- // Sequential acii string. Allocate the result.
- STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
- __ And(a4, a1, Operand(kStringEncodingMask));
- __ Branch(&two_byte_sequential, eq, a4, Operand(zero_reg));
-
- // Allocate and copy the resulting one_byte string.
- __ AllocateOneByteString(v0, a2, a4, a6, a7, &runtime);
-
- // Locate first character of substring to copy.
- __ Daddu(a5, a5, a3);
-
- // Locate first character of result.
- __ Daddu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- // v0: result string
- // a1: first character of result string
- // a2: result string length
- // a5: first character of substring to copy
- STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharacters(
- masm, a1, a5, a2, a3, String::ONE_BYTE_ENCODING);
- __ jmp(&return_v0);
-
- // Allocate and copy the resulting two-byte string.
- __ bind(&two_byte_sequential);
- __ AllocateTwoByteString(v0, a2, a4, a6, a7, &runtime);
-
- // Locate first character of substring to copy.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ Dlsa(a5, a5, a3, 1);
- // Locate first character of result.
- __ Daddu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- // v0: result string.
- // a1: first character of result.
- // a2: result length.
- // a5: first character of substring to copy.
- STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharacters(
- masm, a1, a5, a2, a3, String::TWO_BYTE_ENCODING);
-
- __ bind(&return_v0);
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->sub_string_native(), 1, a3, a4);
- __ DropAndRet(3);
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString);
-
- __ bind(&single_char);
- // v0: original string
- // a1: instance type
- // a2: length
- // a3: from index (untagged)
- __ SmiTag(a3);
- StringCharAtGenerator generator(v0, a3, a2, v0, &runtime, &runtime, &runtime,
- RECEIVER_IS_STRING);
- generator.GenerateFast(masm);
- __ DropAndRet(3);
- generator.SkipSlow(masm, &runtime);
-}
-
-void ToStringStub::Generate(MacroAssembler* masm) {
- // The ToString stub takes on argument in a0.
- Label is_number;
- __ JumpIfSmi(a0, &is_number);
-
- Label not_string;
- __ GetObjectType(a0, a1, a1);
- // a0: receiver
- // a1: receiver instance type
- __ Branch(&not_string, ge, a1, Operand(FIRST_NONSTRING_TYPE));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
- __ bind(&not_string);
-
- Label not_heap_number;
- __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
- __ bind(&is_number);
- NumberToStringStub stub(isolate());
- __ TailCallStub(&stub);
- __ bind(&not_heap_number);
-
- Label not_oddball;
- __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
- __ Ret(USE_DELAY_SLOT);
- __ ld(v0, FieldMemOperand(a0, Oddball::kToStringOffset));
- __ bind(&not_oddball);
-
- __ push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kToString);
-}
-
-
-void ToNameStub::Generate(MacroAssembler* masm) {
- // The ToName stub takes on argument in a0.
- Label is_number;
- __ JumpIfSmi(a0, &is_number);
-
- Label not_name;
- STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
- __ GetObjectType(a0, a1, a1);
- // a0: receiver
- // a1: receiver instance type
- __ Branch(&not_name, gt, a1, Operand(LAST_NAME_TYPE));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
- __ bind(&not_name);
-
- Label not_heap_number;
- __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
- __ bind(&is_number);
- NumberToStringStub stub(isolate());
- __ TailCallStub(&stub);
- __ bind(&not_heap_number);
-
- Label not_oddball;
- __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
- __ Ret(USE_DELAY_SLOT);
- __ ld(v0, FieldMemOperand(a0, Oddball::kToStringOffset));
- __ bind(&not_oddball);
-
- __ push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kToName);
-}
-
-
void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {
@@ -3927,7 +3629,7 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
__ ld(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
// Load the map into the correct register.
- DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
+ DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
__ Move(feedback, too_far);
__ Daddu(t9, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(t9);
@@ -4638,7 +4340,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
Label too_big_for_new_space;
__ bind(&allocate);
__ Branch(&too_big_for_new_space, gt, a5,
- Operand(Page::kMaxRegularHeapObjectSize));
+ Operand(kMaxRegularHeapObjectSize));
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(a0);
@@ -4993,8 +4695,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// Fall back to %AllocateInNewSpace (if not too big).
Label too_big_for_new_space;
__ bind(&allocate);
- __ Branch(&too_big_for_new_space, gt, a5,
- Operand(Page::kMaxRegularHeapObjectSize));
+ __ Branch(&too_big_for_new_space, gt, a5, Operand(kMaxRegularHeapObjectSize));
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(a0);
diff --git a/deps/v8/src/mips64/constants-mips64.cc b/deps/v8/src/mips64/constants-mips64.cc
index c0e98eb623..11ae2421b0 100644
--- a/deps/v8/src/mips64/constants-mips64.cc
+++ b/deps/v8/src/mips64/constants-mips64.cc
@@ -121,118 +121,6 @@ int FPURegisters::Number(const char* name) {
// No Cregister with the reguested name found.
return kInvalidFPURegister;
}
-
-
-// -----------------------------------------------------------------------------
-// Instructions.
-
-bool Instruction::IsForbiddenAfterBranchInstr(Instr instr) {
- Opcode opcode = static_cast<Opcode>(instr & kOpcodeMask);
- switch (opcode) {
- case J:
- case JAL:
- case BEQ:
- case BNE:
- case BLEZ: // POP06 bgeuc/bleuc, blezalc, bgezalc
- case BGTZ: // POP07 bltuc/bgtuc, bgtzalc, bltzalc
- case BEQL:
- case BNEL:
- case BLEZL: // POP26 bgezc, blezc, bgec/blec
- case BGTZL: // POP27 bgtzc, bltzc, bltc/bgtc
- case BC:
- case BALC:
- case POP10: // beqzalc, bovc, beqc
- case POP30: // bnezalc, bnvc, bnec
- case POP66: // beqzc, jic
- case POP76: // bnezc, jialc
- return true;
- case REGIMM:
- switch (instr & kRtFieldMask) {
- case BLTZ:
- case BGEZ:
- case BLTZAL:
- case BGEZAL:
- return true;
- default:
- return false;
- }
- break;
- case SPECIAL:
- switch (instr & kFunctionFieldMask) {
- case JR:
- case JALR:
- return true;
- default:
- return false;
- }
- break;
- case COP1:
- switch (instr & kRsFieldMask) {
- case BC1:
- case BC1EQZ:
- case BC1NEZ:
- return true;
- break;
- default:
- return false;
- }
- break;
- default:
- return false;
- }
-}
-
-
-bool Instruction::IsLinkingInstruction() const {
- switch (OpcodeFieldRaw()) {
- case JAL:
- return true;
- case POP76:
- if (RsFieldRawNoAssert() == JIALC)
- return true; // JIALC
- else
- return false; // BNEZC
- case REGIMM:
- switch (RtFieldRaw()) {
- case BGEZAL:
- case BLTZAL:
- return true;
- default:
- return false;
- }
- case SPECIAL:
- switch (FunctionFieldRaw()) {
- case JALR:
- return true;
- default:
- return false;
- }
- default:
- return false;
- }
-}
-
-
-bool Instruction::IsTrap() const {
- if (OpcodeFieldRaw() != SPECIAL) {
- return false;
- } else {
- switch (FunctionFieldRaw()) {
- case BREAK:
- case TGE:
- case TGEU:
- case TLT:
- case TLTU:
- case TEQ:
- case TNE:
- return true;
- default:
- return false;
- }
- }
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips64/constants-mips64.h b/deps/v8/src/mips64/constants-mips64.h
index d2b1e92957..f96ea2340e 100644
--- a/deps/v8/src/mips64/constants-mips64.h
+++ b/deps/v8/src/mips64/constants-mips64.h
@@ -555,6 +555,8 @@ enum SecondaryField : uint32_t {
FLOOR_W_S = ((1U << 3) + 7),
RECIP_S = ((2U << 3) + 5),
RSQRT_S = ((2U << 3) + 6),
+ MADDF_S = ((3U << 3) + 0),
+ MSUBF_S = ((3U << 3) + 1),
CLASS_S = ((3U << 3) + 3),
CVT_D_S = ((4U << 3) + 1),
CVT_W_S = ((4U << 3) + 4),
@@ -579,6 +581,8 @@ enum SecondaryField : uint32_t {
FLOOR_W_D = ((1U << 3) + 7),
RECIP_D = ((2U << 3) + 5),
RSQRT_D = ((2U << 3) + 6),
+ MADDF_D = ((3U << 3) + 0),
+ MSUBF_D = ((3U << 3) + 1),
CLASS_D = ((3U << 3) + 3),
MIN = ((3U << 3) + 4),
MINA = ((3U << 3) + 5),
@@ -646,8 +650,12 @@ enum SecondaryField : uint32_t {
SELNEZ_C = ((2U << 3) + 7), // COP1 on FPR registers.
// COP1 Encoding of Function Field When rs=PS.
+
// COP1X Encoding of Function Field.
+ MADD_S = ((4U << 3) + 0),
MADD_D = ((4U << 3) + 1),
+ MSUB_S = ((5U << 3) + 0),
+ MSUB_D = ((5U << 3) + 1),
// PCREL Encoding of rt Field.
ADDIUPC = ((0U << 2) + 0),
@@ -891,8 +899,7 @@ static constexpr uint64_t OpcodeToBitNumber(Opcode opcode) {
return 1ULL << (static_cast<uint32_t>(opcode) >> kOpcodeShift);
}
-
-class Instruction {
+class InstructionBase {
public:
enum {
kInstrSize = 4,
@@ -902,6 +909,9 @@ class Instruction {
kPCReadOffset = 0
};
+ // Instruction type.
+ enum Type { kRegisterType, kImmediateType, kJumpType, kUnsupported = -1 };
+
// Get the raw instruction bits.
inline Instr InstructionBits() const {
return *reinterpret_cast<const Instr*>(this);
@@ -922,16 +932,6 @@ class Instruction {
return (InstructionBits() >> lo) & ((2U << (hi - lo)) - 1);
}
- // Instruction type.
- enum Type {
- kRegisterType,
- kImmediateType,
- kJumpType,
- kUnsupported = -1
- };
-
- enum TypeChecks { NORMAL, EXTRA };
-
static constexpr uint64_t kOpcodeImmediateTypeMask =
OpcodeToBitNumber(REGIMM) | OpcodeToBitNumber(BEQ) |
OpcodeToBitNumber(BNE) | OpcodeToBitNumber(BLEZ) |
@@ -988,9 +988,6 @@ class Instruction {
FunctionFieldToBitNumber(MOVCI) | FunctionFieldToBitNumber(SELEQZ_S) |
FunctionFieldToBitNumber(SELNEZ_S) | FunctionFieldToBitNumber(SYNC);
- // Get the encoding type of the instruction.
- inline Type InstructionType(TypeChecks checks = NORMAL) const;
-
// Accessors for the different named fields used in the MIPS encoding.
inline Opcode OpcodeValue() const {
@@ -998,118 +995,144 @@ class Instruction {
Bits(kOpcodeShift + kOpcodeBits - 1, kOpcodeShift));
}
+ inline int FunctionFieldRaw() const {
+ return InstructionBits() & kFunctionFieldMask;
+ }
+
+ // Return the fields at their original place in the instruction encoding.
+ inline Opcode OpcodeFieldRaw() const {
+ return static_cast<Opcode>(InstructionBits() & kOpcodeMask);
+ }
+
+ // Safe to call within InstructionType().
+ inline int RsFieldRawNoAssert() const {
+ return InstructionBits() & kRsFieldMask;
+ }
+
+ inline int SaFieldRaw() const { return InstructionBits() & kSaFieldMask; }
+
+ // Get the encoding type of the instruction.
+ inline Type InstructionType() const;
+
+ protected:
+ InstructionBase() {}
+};
+
+template <class T>
+class InstructionGetters : public T {
+ public:
inline int RsValue() const {
- DCHECK(InstructionType() == kRegisterType ||
- InstructionType() == kImmediateType);
- return Bits(kRsShift + kRsBits - 1, kRsShift);
+ DCHECK(this->InstructionType() == InstructionBase::kRegisterType ||
+ this->InstructionType() == InstructionBase::kImmediateType);
+ return this->Bits(kRsShift + kRsBits - 1, kRsShift);
}
inline int RtValue() const {
- DCHECK(InstructionType() == kRegisterType ||
- InstructionType() == kImmediateType);
- return Bits(kRtShift + kRtBits - 1, kRtShift);
+ DCHECK(this->InstructionType() == InstructionBase::kRegisterType ||
+ this->InstructionType() == InstructionBase::kImmediateType);
+ return this->Bits(kRtShift + kRtBits - 1, kRtShift);
}
inline int RdValue() const {
- DCHECK(InstructionType() == kRegisterType);
- return Bits(kRdShift + kRdBits - 1, kRdShift);
+ DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+ return this->Bits(kRdShift + kRdBits - 1, kRdShift);
}
inline int SaValue() const {
- DCHECK(InstructionType() == kRegisterType);
- return Bits(kSaShift + kSaBits - 1, kSaShift);
+ DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+ return this->Bits(kSaShift + kSaBits - 1, kSaShift);
}
inline int LsaSaValue() const {
- DCHECK(InstructionType() == kRegisterType);
- return Bits(kSaShift + kLsaSaBits - 1, kSaShift);
+ DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+ return this->Bits(kSaShift + kLsaSaBits - 1, kSaShift);
}
inline int FunctionValue() const {
- DCHECK(InstructionType() == kRegisterType ||
- InstructionType() == kImmediateType);
- return Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift);
+ DCHECK(this->InstructionType() == InstructionBase::kRegisterType ||
+ this->InstructionType() == InstructionBase::kImmediateType);
+ return this->Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift);
}
inline int FdValue() const {
- return Bits(kFdShift + kFdBits - 1, kFdShift);
+ return this->Bits(kFdShift + kFdBits - 1, kFdShift);
}
inline int FsValue() const {
- return Bits(kFsShift + kFsBits - 1, kFsShift);
+ return this->Bits(kFsShift + kFsBits - 1, kFsShift);
}
inline int FtValue() const {
- return Bits(kFtShift + kFtBits - 1, kFtShift);
+ return this->Bits(kFtShift + kFtBits - 1, kFtShift);
}
inline int FrValue() const {
- return Bits(kFrShift + kFrBits -1, kFrShift);
+ return this->Bits(kFrShift + kFrBits - 1, kFrShift);
}
inline int Bp2Value() const {
- DCHECK(InstructionType() == kRegisterType);
- return Bits(kBp2Shift + kBp2Bits - 1, kBp2Shift);
+ DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+ return this->Bits(kBp2Shift + kBp2Bits - 1, kBp2Shift);
}
inline int Bp3Value() const {
- DCHECK(InstructionType() == kRegisterType);
- return Bits(kBp3Shift + kBp3Bits - 1, kBp3Shift);
+ DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+ return this->Bits(kBp3Shift + kBp3Bits - 1, kBp3Shift);
}
// Float Compare condition code instruction bits.
inline int FCccValue() const {
- return Bits(kFCccShift + kFCccBits - 1, kFCccShift);
+ return this->Bits(kFCccShift + kFCccBits - 1, kFCccShift);
}
// Float Branch condition code instruction bits.
inline int FBccValue() const {
- return Bits(kFBccShift + kFBccBits - 1, kFBccShift);
+ return this->Bits(kFBccShift + kFBccBits - 1, kFBccShift);
}
// Float Branch true/false instruction bit.
inline int FBtrueValue() const {
- return Bits(kFBtrueShift + kFBtrueBits - 1, kFBtrueShift);
+ return this->Bits(kFBtrueShift + kFBtrueBits - 1, kFBtrueShift);
}
// Return the fields at their original place in the instruction encoding.
inline Opcode OpcodeFieldRaw() const {
- return static_cast<Opcode>(InstructionBits() & kOpcodeMask);
+ return static_cast<Opcode>(this->InstructionBits() & kOpcodeMask);
}
inline int RsFieldRaw() const {
- DCHECK(InstructionType() == kRegisterType ||
- InstructionType() == kImmediateType);
- return InstructionBits() & kRsFieldMask;
+ DCHECK(this->InstructionType() == InstructionBase::kRegisterType ||
+ this->InstructionType() == InstructionBase::kImmediateType);
+ return this->InstructionBits() & kRsFieldMask;
}
// Same as above function, but safe to call within InstructionType().
inline int RsFieldRawNoAssert() const {
- return InstructionBits() & kRsFieldMask;
+ return this->InstructionBits() & kRsFieldMask;
}
inline int RtFieldRaw() const {
- DCHECK(InstructionType() == kRegisterType ||
- InstructionType() == kImmediateType);
- return InstructionBits() & kRtFieldMask;
+ DCHECK(this->InstructionType() == InstructionBase::kRegisterType ||
+ this->InstructionType() == InstructionBase::kImmediateType);
+ return this->InstructionBits() & kRtFieldMask;
}
inline int RdFieldRaw() const {
- DCHECK(InstructionType() == kRegisterType);
- return InstructionBits() & kRdFieldMask;
+ DCHECK(this->InstructionType() == InstructionBase::kRegisterType);
+ return this->InstructionBits() & kRdFieldMask;
}
inline int SaFieldRaw() const {
- return InstructionBits() & kSaFieldMask;
+ return this->InstructionBits() & kSaFieldMask;
}
inline int FunctionFieldRaw() const {
- return InstructionBits() & kFunctionFieldMask;
+ return this->InstructionBits() & kFunctionFieldMask;
}
// Get the secondary field according to the opcode.
inline int SecondaryValue() const {
- Opcode op = OpcodeFieldRaw();
+ Opcode op = this->OpcodeFieldRaw();
switch (op) {
case SPECIAL:
case SPECIAL2:
@@ -1124,34 +1147,34 @@ class Instruction {
}
inline int32_t ImmValue(int bits) const {
- DCHECK(InstructionType() == kImmediateType);
- return Bits(bits - 1, 0);
+ DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ return this->Bits(bits - 1, 0);
}
inline int32_t Imm16Value() const {
- DCHECK(InstructionType() == kImmediateType);
- return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
+ DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ return this->Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
}
inline int32_t Imm18Value() const {
- DCHECK(InstructionType() == kImmediateType);
- return Bits(kImm18Shift + kImm18Bits - 1, kImm18Shift);
+ DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ return this->Bits(kImm18Shift + kImm18Bits - 1, kImm18Shift);
}
inline int32_t Imm19Value() const {
- DCHECK(InstructionType() == kImmediateType);
- return Bits(kImm19Shift + kImm19Bits - 1, kImm19Shift);
+ DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ return this->Bits(kImm19Shift + kImm19Bits - 1, kImm19Shift);
}
inline int32_t Imm21Value() const {
- DCHECK(InstructionType() == kImmediateType);
- return Bits(kImm21Shift + kImm21Bits - 1, kImm21Shift);
+ DCHECK(this->InstructionType() == InstructionBase::kImmediateType);
+ return this->Bits(kImm21Shift + kImm21Bits - 1, kImm21Shift);
}
inline int32_t Imm26Value() const {
- DCHECK((InstructionType() == kJumpType) ||
- (InstructionType() == kImmediateType));
- return Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift);
+ DCHECK((this->InstructionType() == InstructionBase::kJumpType) ||
+ (this->InstructionType() == InstructionBase::kImmediateType));
+ return this->Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift);
}
static bool IsForbiddenAfterBranchInstr(Instr instr);
@@ -1159,14 +1182,21 @@ class Instruction {
// Say if the instruction should not be used in a branch delay slot or
// immediately after a compact branch.
inline bool IsForbiddenAfterBranch() const {
- return IsForbiddenAfterBranchInstr(InstructionBits());
+ return IsForbiddenAfterBranchInstr(this->InstructionBits());
+ }
+
+ inline bool IsForbiddenInBranchDelay() const {
+ return IsForbiddenAfterBranch();
}
// Say if the instruction 'links'. e.g. jal, bal.
bool IsLinkingInstruction() const;
// Say if the instruction is a break or a trap.
bool IsTrap() const;
+};
+class Instruction : public InstructionGetters<InstructionBase> {
+ public:
// Instructions are read of out a code stream. The only way to get a
// reference to an instruction is to convert a pointer. There is no way
// to allocate or create instances of class Instruction.
@@ -1194,26 +1224,14 @@ const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize * 2;
const int kInvalidStackOffset = -1;
const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
-
-Instruction::Type Instruction::InstructionType(TypeChecks checks) const {
- if (checks == EXTRA) {
- if (OpcodeToBitNumber(OpcodeFieldRaw()) & kOpcodeImmediateTypeMask) {
- return kImmediateType;
- }
- }
+InstructionBase::Type InstructionBase::InstructionType() const {
switch (OpcodeFieldRaw()) {
case SPECIAL:
- if (checks == EXTRA) {
- if (FunctionFieldToBitNumber(FunctionFieldRaw()) &
- kFunctionFieldRegisterTypeMask) {
- return kRegisterType;
- } else {
- return kUnsupported;
- }
- } else {
+ if (FunctionFieldToBitNumber(FunctionFieldRaw()) &
+ kFunctionFieldRegisterTypeMask) {
return kRegisterType;
}
- break;
+ return kUnsupported;
case SPECIAL2:
switch (FunctionFieldRaw()) {
case MUL:
@@ -1290,17 +1308,123 @@ Instruction::Type Instruction::InstructionType(TypeChecks checks) const {
return kJumpType;
default:
- if (checks == NORMAL) {
- return kImmediateType;
- } else {
- return kUnsupported;
- }
+ return kImmediateType;
}
return kUnsupported;
}
-
#undef OpcodeToBitNumber
#undef FunctionFieldToBitNumber
+
+// -----------------------------------------------------------------------------
+// Instructions.
+
+template <class P>
+bool InstructionGetters<P>::IsLinkingInstruction() const {
+ switch (OpcodeFieldRaw()) {
+ case JAL:
+ return true;
+ case POP76:
+ if (RsFieldRawNoAssert() == JIALC)
+ return true; // JIALC
+ else
+ return false; // BNEZC
+ case REGIMM:
+ switch (RtFieldRaw()) {
+ case BGEZAL:
+ case BLTZAL:
+ return true;
+ default:
+ return false;
+ }
+ case SPECIAL:
+ switch (FunctionFieldRaw()) {
+ case JALR:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+template <class P>
+bool InstructionGetters<P>::IsTrap() const {
+ if (OpcodeFieldRaw() != SPECIAL) {
+ return false;
+ } else {
+ switch (FunctionFieldRaw()) {
+ case BREAK:
+ case TGE:
+ case TGEU:
+ case TLT:
+ case TLTU:
+ case TEQ:
+ case TNE:
+ return true;
+ default:
+ return false;
+ }
+ }
+}
+
+// static
+template <class T>
+bool InstructionGetters<T>::IsForbiddenAfterBranchInstr(Instr instr) {
+ Opcode opcode = static_cast<Opcode>(instr & kOpcodeMask);
+ switch (opcode) {
+ case J:
+ case JAL:
+ case BEQ:
+ case BNE:
+ case BLEZ: // POP06 bgeuc/bleuc, blezalc, bgezalc
+ case BGTZ: // POP07 bltuc/bgtuc, bgtzalc, bltzalc
+ case BEQL:
+ case BNEL:
+ case BLEZL: // POP26 bgezc, blezc, bgec/blec
+ case BGTZL: // POP27 bgtzc, bltzc, bltc/bgtc
+ case BC:
+ case BALC:
+ case POP10: // beqzalc, bovc, beqc
+ case POP30: // bnezalc, bnvc, bnec
+ case POP66: // beqzc, jic
+ case POP76: // bnezc, jialc
+ return true;
+ case REGIMM:
+ switch (instr & kRtFieldMask) {
+ case BLTZ:
+ case BGEZ:
+ case BLTZAL:
+ case BGEZAL:
+ return true;
+ default:
+ return false;
+ }
+ break;
+ case SPECIAL:
+ switch (instr & kFunctionFieldMask) {
+ case JR:
+ case JALR:
+ return true;
+ default:
+ return false;
+ }
+ break;
+ case COP1:
+ switch (instr & kRsFieldMask) {
+ case BC1:
+ case BC1EQZ:
+ case BC1NEZ:
+ return true;
+ break;
+ default:
+ return false;
+ }
+ break;
+ default:
+ return false;
+ }
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips64/disasm-mips64.cc b/deps/v8/src/mips64/disasm-mips64.cc
index 5485f3ee95..d73f22ac07 100644
--- a/deps/v8/src/mips64/disasm-mips64.cc
+++ b/deps/v8/src/mips64/disasm-mips64.cc
@@ -959,6 +959,12 @@ void Decoder::DecodeTypeRegisterSRsType(Instruction* instr) {
case CVT_D_S:
Format(instr, "cvt.d.'t 'fd, 'fs");
break;
+ case MADDF_S:
+ Format(instr, "maddf.s 'fd, 'fs, 'ft");
+ break;
+ case MSUBF_S:
+ Format(instr, "msubf.s 'fd, 'fs, 'ft");
+ break;
default:
Format(instr, "unknown.cop1.'t");
break;
@@ -969,7 +975,17 @@ void Decoder::DecodeTypeRegisterSRsType(Instruction* instr) {
void Decoder::DecodeTypeRegisterDRsType(Instruction* instr) {
if (!DecodeTypeRegisterRsType(instr)) {
- Format(instr, "unknown.cop1.'t");
+ switch (instr->FunctionFieldRaw()) {
+ case MADDF_D:
+ Format(instr, "maddf.d 'fd, 'fs, 'ft");
+ break;
+ case MSUBF_D:
+ Format(instr, "msubf.d 'fd, 'fs, 'ft");
+ break;
+ default:
+ Format(instr, "unknown.cop1.'t");
+ break;
+ }
}
}
@@ -1115,9 +1131,18 @@ void Decoder::DecodeTypeRegisterCOP1(Instruction* instr) {
void Decoder::DecodeTypeRegisterCOP1X(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
+ case MADD_S:
+ Format(instr, "madd.s 'fd, 'fr, 'fs, 'ft");
+ break;
case MADD_D:
Format(instr, "madd.d 'fd, 'fr, 'fs, 'ft");
break;
+ case MSUB_S:
+ Format(instr, "msub.s 'fd, 'fr, 'fs, 'ft");
+ break;
+ case MSUB_D:
+ Format(instr, "msub.d 'fd, 'fr, 'fs, 'ft");
+ break;
default:
UNREACHABLE();
}
@@ -1483,6 +1508,10 @@ void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
}
break;
}
+ case DINS: {
+ Format(instr, "dins 'rt, 'rs, 'sa, 'ss2");
+ break;
+ }
case DBSHFL: {
int sa = instr->SaFieldRaw() >> kSaShift;
switch (sa) {
@@ -1917,7 +1946,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%08x ",
instr->InstructionBits());
- switch (instr->InstructionType(Instruction::TypeChecks::EXTRA)) {
+ switch (instr->InstructionType()) {
case Instruction::kRegisterType: {
return DecodeTypeRegister(instr);
}
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index 77c71aae78..e5b9c2e7bd 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -40,13 +40,9 @@ const Register StoreDescriptor::SlotRegister() { return a4; }
const Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
-const Register VectorStoreTransitionDescriptor::SlotRegister() { return a4; }
-const Register VectorStoreTransitionDescriptor::VectorRegister() { return a3; }
-const Register VectorStoreTransitionDescriptor::MapRegister() { return a5; }
-
-
-const Register StoreTransitionDescriptor::MapRegister() { return a3; }
-
+const Register StoreTransitionDescriptor::SlotRegister() { return a4; }
+const Register StoreTransitionDescriptor::VectorRegister() { return a3; }
+const Register StoreTransitionDescriptor::MapRegister() { return a5; }
const Register StoreGlobalViaContextDescriptor::SlotRegister() { return a2; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return a0; }
@@ -356,7 +352,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ApiCallbackDescriptorBase::InitializePlatformSpecific(
+void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
a0, // callee
@@ -391,7 +387,19 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
a0, // argument count (not including receiver)
a3, // new target
a1, // constructor to call
- a2 // address of the first argument
+ a2, // allocation site feedback if available, undefined otherwise.
+ a4 // address of the first argument
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterPushArgsAndConstructArrayDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ a0, // argument count (not including receiver)
+ a1, // the target to call verified to be Array function
+ a2, // allocation site feedback
+ a3, // address of first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index aa0de26b88..dd12f9b51a 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -200,9 +200,7 @@ void MacroAssembler::InNewSpace(Register object,
Condition cc,
Label* branch) {
DCHECK(cc == eq || cc == ne);
- const int mask =
- 1 << MemoryChunk::IN_FROM_SPACE | 1 << MemoryChunk::IN_TO_SPACE;
- CheckPageFlag(object, scratch, mask, cc, branch);
+ CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cc, branch);
}
@@ -1260,8 +1258,13 @@ void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sltu(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
+ const uint64_t int16_min = std::numeric_limits<int16_t>::min();
+ if (is_uint15(rt.imm64_) && !MustUseReg(rt.rmode_)) {
+ // Imm range is: [0, 32767].
sltiu(rd, rs, static_cast<int32_t>(rt.imm64_));
+ } else if (is_uint15(rt.imm64_ - int16_min) && !MustUseReg(rt.rmode_)) {
+ // Imm range is: [max_unsigned-32767,max_unsigned].
+ sltiu(rd, rs, static_cast<uint16_t>(rt.imm64_));
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -1960,10 +1963,14 @@ void MacroAssembler::Ins(Register rt,
}
void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
- Register scratch1 = t8;
- Register scratch2 = t9;
- if (kArchVariant == kMips64r2) {
+ if (kArchVariant == kMips64r6) {
+ // r6 neg_s changes the sign for NaN-like operands as well.
+ neg_s(fd, fs);
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
Label is_nan, done;
+ Register scratch1 = t8;
+ Register scratch2 = t9;
BranchF32(nullptr, &is_nan, eq, fs, fs);
Branch(USE_DELAY_SLOT, &done);
// For NaN input, neg_s will return the same NaN value,
@@ -1977,21 +1984,18 @@ void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
Or(scratch2, scratch2, scratch1);
mtc1(scratch2, fd);
bind(&done);
- } else {
- mfc1(scratch1, fs);
- And(scratch2, scratch1, Operand(~kBinary32SignMask));
- And(scratch1, scratch1, Operand(kBinary32SignMask));
- Xor(scratch1, scratch1, Operand(kBinary32SignMask));
- Or(scratch2, scratch2, scratch1);
- mtc1(scratch2, fd);
}
}
void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) {
- Register scratch1 = t8;
- Register scratch2 = t9;
- if (kArchVariant == kMips64r2) {
+ if (kArchVariant == kMips64r6) {
+ // r6 neg_d changes the sign for NaN-like operands as well.
+ neg_d(fd, fs);
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
Label is_nan, done;
+ Register scratch1 = t8;
+ Register scratch2 = t9;
BranchF64(nullptr, &is_nan, eq, fs, fs);
Branch(USE_DELAY_SLOT, &done);
// For NaN input, neg_d will return the same NaN value,
@@ -2005,13 +2009,6 @@ void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) {
Or(scratch2, scratch2, scratch1);
dmtc1(scratch2, fd);
bind(&done);
- } else {
- dmfc1(scratch1, fs);
- And(scratch2, scratch1, Operand(~Double::kSignMask));
- And(scratch1, scratch1, Operand(Double::kSignMask));
- Xor(scratch1, scratch1, Operand(Double::kSignMask));
- Or(scratch2, scratch2, scratch1);
- dmtc1(scratch2, fd);
}
}
@@ -2387,7 +2384,8 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
DCHECK(nan || target);
// Check for unordered (NaN) cases.
if (nan) {
- bool long_branch = nan->is_bound() ? is_near(nan) : is_trampoline_emitted();
+ bool long_branch =
+ nan->is_bound() ? !is_near(nan) : is_trampoline_emitted();
if (kArchVariant != kMips64r6) {
if (long_branch) {
Label skip;
@@ -2427,7 +2425,7 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
if (target) {
bool long_branch =
- target->is_bound() ? is_near(target) : is_trampoline_emitted();
+ target->is_bound() ? !is_near(target) : is_trampoline_emitted();
if (long_branch) {
Label skip;
Condition neg_cond = NegateFpuCondition(cond);
@@ -4379,7 +4377,7 @@ void MacroAssembler::Allocate(int object_size,
Register scratch2,
Label* gc_required,
AllocationFlags flags) {
- DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(object_size <= kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -4543,7 +4541,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
void MacroAssembler::FastAllocate(int object_size, Register result,
Register scratch1, Register scratch2,
AllocationFlags flags) {
- DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(object_size <= kMaxRegularHeapObjectSize);
DCHECK(!AreAliased(result, scratch1, scratch2, at));
// Make object size into bytes.
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index c96525c6ae..4f67d70e0c 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -243,6 +243,18 @@ class MacroAssembler: public Assembler {
Func GetLabelFunction);
#undef COND_ARGS
+ // Emit code that loads |parameter_index|'th parameter from the stack to
+ // the register according to the CallInterfaceDescriptor definition.
+ // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
+ // below the caller's sp.
+ template <class Descriptor>
+ void LoadParameterFromStack(
+ Register reg, typename Descriptor::ParameterIndices parameter_index,
+ int sp_to_ra_offset_in_words = 0) {
+ DCHECK(Descriptor::kPassLastArgsOnStack);
+ UNIMPLEMENTED();
+ }
+
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Drop(int count,
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index 780c90c16b..02387d0f4f 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -96,7 +96,7 @@ class MipsDebugger {
void RedoBreakpoints();
};
-#define UNSUPPORTED() printf("Sim: Unsupported instruction.\n");
+inline void UNSUPPORTED() { printf("Sim: Unsupported instruction.\n"); }
void MipsDebugger::Stop(Instruction* instr) {
// Get the stop code.
@@ -741,8 +741,8 @@ void Simulator::set_last_debugger_input(char* input) {
last_debugger_input_ = input;
}
-void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
- size_t size) {
+void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
+ void* start_addr, size_t size) {
int64_t start = reinterpret_cast<int64_t>(start_addr);
int64_t intra_line = (start & CachePage::kLineMask);
start -= intra_line;
@@ -762,7 +762,8 @@ void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
}
}
-CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
+CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
+ void* page) {
base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
if (entry->value == NULL) {
CachePage* new_page = new CachePage();
@@ -773,8 +774,8 @@ CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
// Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start,
- size_t size) {
+void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache,
+ intptr_t start, size_t size) {
DCHECK(size <= CachePage::kPageSize);
DCHECK(AllOnOnePage(start, size - 1));
DCHECK((start & CachePage::kLineMask) == 0);
@@ -786,7 +787,8 @@ void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start,
memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
}
-void Simulator::CheckICache(base::HashMap* i_cache, Instruction* instr) {
+void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
+ Instruction* instr) {
int64_t address = reinterpret_cast<int64_t>(instr);
void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@@ -819,7 +821,7 @@ void Simulator::Initialize(Isolate* isolate) {
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
if (i_cache_ == NULL) {
- i_cache_ = new base::HashMap(&ICacheMatch);
+ i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
Initialize(isolate);
@@ -933,7 +935,8 @@ class Redirection {
// static
-void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
+void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
+ Redirection* first) {
Redirection::DeleteChain(first);
if (i_cache != nullptr) {
for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
@@ -1935,15 +1938,15 @@ typedef void (*SimulatorRuntimeProfilingGetterCall)(
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime. They are also used for debugging with simulator.
-void Simulator::SoftwareInterrupt(Instruction* instr) {
+void Simulator::SoftwareInterrupt() {
// There are several instructions that could get us here,
// the break_ instruction, or several variants of traps. All
// Are "SPECIAL" class opcode, and are distinuished by function.
- int32_t func = instr->FunctionFieldRaw();
- uint32_t code = (func == BREAK) ? instr->Bits(25, 6) : -1;
+ int32_t func = instr_.FunctionFieldRaw();
+ uint32_t code = (func == BREAK) ? instr_.Bits(25, 6) : -1;
// We first check if we met a call_rt_redirected.
- if (instr->InstructionBits() == rtCallRedirInstr) {
- Redirection* redirection = Redirection::FromSwiInstruction(instr);
+ if (instr_.InstructionBits() == rtCallRedirInstr) {
+ Redirection* redirection = Redirection::FromSwiInstruction(instr_.instr());
int64_t arg0 = get_register(a0);
int64_t arg1 = get_register(a1);
int64_t arg2 = get_register(a2);
@@ -2169,7 +2172,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintWatchpoint(code);
} else {
IncreaseStopCounter(code);
- HandleStop(code, instr);
+ HandleStop(code, instr_.instr());
}
} else {
// All remaining break_ codes, and all traps are handled here.
@@ -2364,6 +2367,49 @@ static T FPUMaxA(T a, T b) {
return result;
}
+enum class KeepSign : bool { no = false, yes };
+
+template <typename T, typename std::enable_if<std::is_floating_point<T>::value,
+ int>::type = 0>
+T FPUCanonalizeNaNArg(T result, T arg, KeepSign keepSign = KeepSign::no) {
+ DCHECK(std::isnan(arg));
+ T qNaN = std::numeric_limits<T>::quiet_NaN();
+ if (keepSign == KeepSign::yes) {
+ return std::copysign(qNaN, result);
+ }
+ return qNaN;
+}
+
+template <typename T>
+T FPUCanonalizeNaNArgs(T result, KeepSign keepSign, T first) {
+ if (std::isnan(first)) {
+ return FPUCanonalizeNaNArg(result, first, keepSign);
+ }
+ return result;
+}
+
+template <typename T, typename... Args>
+T FPUCanonalizeNaNArgs(T result, KeepSign keepSign, T first, Args... args) {
+ if (std::isnan(first)) {
+ return FPUCanonalizeNaNArg(result, first, keepSign);
+ }
+ return FPUCanonalizeNaNArgs(result, keepSign, args...);
+}
+
+template <typename Func, typename T, typename... Args>
+T FPUCanonalizeOperation(Func f, T first, Args... args) {
+ return FPUCanonalizeOperation(f, KeepSign::no, first, args...);
+}
+
+template <typename Func, typename T, typename... Args>
+T FPUCanonalizeOperation(Func f, KeepSign keepSign, T first, Args... args) {
+ T result = f(first, args...);
+ if (std::isnan(result)) {
+ result = FPUCanonalizeNaNArgs(result, keepSign, first, args...);
+ }
+ return result;
+}
+
// Handle execution based on instruction types.
void Simulator::DecodeTypeRegisterSRsType() {
@@ -2374,9 +2420,9 @@ void Simulator::DecodeTypeRegisterSRsType() {
int32_t ft_int = bit_cast<int32_t>(ft);
int32_t fd_int = bit_cast<int32_t>(fd);
uint32_t cc, fcsr_cc;
- cc = get_instr()->FCccValue();
+ cc = instr_.FCccValue();
fcsr_cc = get_fcsr_condition_bit(cc);
- switch (get_instr()->FunctionFieldRaw()) {
+ switch (instr_.FunctionFieldRaw()) {
case RINT: {
DCHECK(kArchVariant == kMips64r6);
float result, temp_result;
@@ -2416,41 +2462,65 @@ void Simulator::DecodeTypeRegisterSRsType() {
break;
}
case ADD_S:
- set_fpu_register_float(fd_reg(), fs + ft);
+ set_fpu_register_float(
+ fd_reg(),
+ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs + rhs; },
+ fs, ft));
break;
case SUB_S:
- set_fpu_register_float(fd_reg(), fs - ft);
+ set_fpu_register_float(
+ fd_reg(),
+ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs - rhs; },
+ fs, ft));
+ break;
+ case MADDF_S:
+ DCHECK(kArchVariant == kMips64r6);
+ set_fpu_register_float(fd_reg(), fd + (fs * ft));
+ break;
+ case MSUBF_S:
+ DCHECK(kArchVariant == kMips64r6);
+ set_fpu_register_float(fd_reg(), fd - (fs * ft));
break;
case MUL_S:
- set_fpu_register_float(fd_reg(), fs * ft);
+ set_fpu_register_float(
+ fd_reg(),
+ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs * rhs; },
+ fs, ft));
break;
case DIV_S:
- set_fpu_register_float(fd_reg(), fs / ft);
+ set_fpu_register_float(
+ fd_reg(),
+ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs / rhs; },
+ fs, ft));
break;
case ABS_S:
- set_fpu_register_float(fd_reg(), fabs(fs));
+ set_fpu_register_float(
+ fd_reg(),
+ FPUCanonalizeOperation([](float fs) { return FPAbs(fs); }, fs));
break;
case MOV_S:
set_fpu_register_float(fd_reg(), fs);
break;
case NEG_S:
- set_fpu_register_float(fd_reg(), -fs);
+ set_fpu_register_float(
+ fd_reg(), FPUCanonalizeOperation([](float src) { return -src; },
+ KeepSign::yes, fs));
break;
case SQRT_S:
- lazily_initialize_fast_sqrt(isolate_);
- set_fpu_register_float(fd_reg(), fast_sqrt(fs, isolate_));
+ set_fpu_register_float(
+ fd_reg(),
+ FPUCanonalizeOperation([](float src) { return std::sqrt(src); }, fs));
break;
- case RSQRT_S: {
- lazily_initialize_fast_sqrt(isolate_);
- float result = 1.0 / fast_sqrt(fs, isolate_);
- set_fpu_register_float(fd_reg(), result);
+ case RSQRT_S:
+ set_fpu_register_float(
+ fd_reg(), FPUCanonalizeOperation(
+ [](float src) { return 1.0 / std::sqrt(src); }, fs));
break;
- }
- case RECIP_S: {
- float result = 1.0 / fs;
- set_fpu_register_float(fd_reg(), result);
+ case RECIP_S:
+ set_fpu_register_float(
+ fd_reg(),
+ FPUCanonalizeOperation([](float src) { return 1.0 / src; }, fs));
break;
- }
case C_F_D:
set_fcsr_bit(fcsr_cc, false);
break;
@@ -2696,7 +2766,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
uint32_t ft_cc = (ft_reg() >> 2) & 0x7;
ft_cc = get_fcsr_condition_bit(ft_cc);
- if (get_instr()->Bit(16)) { // Read Tf bit.
+ if (instr_.Bit(16)) { // Read Tf bit.
// MOVT.D
if (test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg(), fs);
} else {
@@ -2717,15 +2787,14 @@ void Simulator::DecodeTypeRegisterDRsType() {
double ft, fs, fd;
uint32_t cc, fcsr_cc;
fs = get_fpu_register_double(fs_reg());
- ft = (get_instr()->FunctionFieldRaw() != MOVF)
- ? get_fpu_register_double(ft_reg())
- : 0.0;
+ ft = (instr_.FunctionFieldRaw() != MOVF) ? get_fpu_register_double(ft_reg())
+ : 0.0;
fd = get_fpu_register_double(fd_reg());
- cc = get_instr()->FCccValue();
+ cc = instr_.FCccValue();
fcsr_cc = get_fcsr_condition_bit(cc);
int64_t ft_int = bit_cast<int64_t>(ft);
int64_t fd_int = bit_cast<int64_t>(fd);
- switch (get_instr()->FunctionFieldRaw()) {
+ switch (instr_.FunctionFieldRaw()) {
case RINT: {
DCHECK(kArchVariant == kMips64r6);
double result, temp, temp_result;
@@ -2793,7 +2862,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
// Same function field for MOVT.D and MOVF.D
uint32_t ft_cc = (ft_reg() >> 2) & 0x7;
ft_cc = get_fcsr_condition_bit(ft_cc);
- if (get_instr()->Bit(16)) { // Read Tf bit.
+ if (instr_.Bit(16)) { // Read Tf bit.
// MOVT.D
if (test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg(), fs);
} else {
@@ -2819,41 +2888,65 @@ void Simulator::DecodeTypeRegisterDRsType() {
set_fpu_register_double(fd_reg(), FPUMax(ft, fs));
break;
case ADD_D:
- set_fpu_register_double(fd_reg(), fs + ft);
+ set_fpu_register_double(
+ fd_reg(),
+ FPUCanonalizeOperation(
+ [](double lhs, double rhs) { return lhs + rhs; }, fs, ft));
break;
case SUB_D:
- set_fpu_register_double(fd_reg(), fs - ft);
+ set_fpu_register_double(
+ fd_reg(),
+ FPUCanonalizeOperation(
+ [](double lhs, double rhs) { return lhs - rhs; }, fs, ft));
+ break;
+ case MADDF_D:
+ DCHECK(kArchVariant == kMips64r6);
+ set_fpu_register_double(fd_reg(), fd + (fs * ft));
+ break;
+ case MSUBF_D:
+ DCHECK(kArchVariant == kMips64r6);
+ set_fpu_register_double(fd_reg(), fd - (fs * ft));
break;
case MUL_D:
- set_fpu_register_double(fd_reg(), fs * ft);
+ set_fpu_register_double(
+ fd_reg(),
+ FPUCanonalizeOperation(
+ [](double lhs, double rhs) { return lhs * rhs; }, fs, ft));
break;
case DIV_D:
- set_fpu_register_double(fd_reg(), fs / ft);
+ set_fpu_register_double(
+ fd_reg(),
+ FPUCanonalizeOperation(
+ [](double lhs, double rhs) { return lhs / rhs; }, fs, ft));
break;
case ABS_D:
- set_fpu_register_double(fd_reg(), fabs(fs));
+ set_fpu_register_double(
+ fd_reg(),
+ FPUCanonalizeOperation([](double fs) { return FPAbs(fs); }, fs));
break;
case MOV_D:
set_fpu_register_double(fd_reg(), fs);
break;
case NEG_D:
- set_fpu_register_double(fd_reg(), -fs);
+ set_fpu_register_double(
+ fd_reg(), FPUCanonalizeOperation([](double src) { return -src; },
+ KeepSign::yes, fs));
break;
case SQRT_D:
- lazily_initialize_fast_sqrt(isolate_);
- set_fpu_register_double(fd_reg(), fast_sqrt(fs, isolate_));
+ set_fpu_register_double(
+ fd_reg(),
+ FPUCanonalizeOperation([](double fs) { return std::sqrt(fs); }, fs));
break;
- case RSQRT_D: {
- lazily_initialize_fast_sqrt(isolate_);
- double result = 1.0 / fast_sqrt(fs, isolate_);
- set_fpu_register_double(fd_reg(), result);
+ case RSQRT_D:
+ set_fpu_register_double(
+ fd_reg(), FPUCanonalizeOperation(
+ [](double fs) { return 1.0 / std::sqrt(fs); }, fs));
break;
- }
- case RECIP_D: {
- double result = 1.0 / fs;
- set_fpu_register_double(fd_reg(), result);
+ case RECIP_D:
+ set_fpu_register_double(
+ fd_reg(),
+ FPUCanonalizeOperation([](double fs) { return 1.0 / fs; }, fs));
break;
- }
case C_UN_D:
set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
break;
@@ -3060,7 +3153,7 @@ void Simulator::DecodeTypeRegisterWRsType() {
float fs = get_fpu_register_float(fs_reg());
float ft = get_fpu_register_float(ft_reg());
int64_t alu_out = 0x12345678;
- switch (get_instr()->FunctionFieldRaw()) {
+ switch (instr_.FunctionFieldRaw()) {
case CVT_S_W: // Convert word to float (single).
alu_out = get_fpu_register_signed_word(fs_reg());
set_fpu_register_float(fd_reg(), static_cast<float>(alu_out));
@@ -3152,7 +3245,7 @@ void Simulator::DecodeTypeRegisterLRsType() {
double fs = get_fpu_register_double(fs_reg());
double ft = get_fpu_register_double(ft_reg());
int64_t i64;
- switch (get_instr()->FunctionFieldRaw()) {
+ switch (instr_.FunctionFieldRaw()) {
case CVT_D_L: // Mips32r2 instruction.
i64 = get_fpu_register(fs_reg());
set_fpu_register_double(fd_reg(), static_cast<double>(i64));
@@ -3241,7 +3334,7 @@ void Simulator::DecodeTypeRegisterLRsType() {
void Simulator::DecodeTypeRegisterCOP1() {
- switch (get_instr()->RsFieldRaw()) {
+ switch (instr_.RsFieldRaw()) {
case BC1: // Branch on coprocessor condition.
case BC1EQZ:
case BC1NEZ:
@@ -3304,14 +3397,43 @@ void Simulator::DecodeTypeRegisterCOP1() {
void Simulator::DecodeTypeRegisterCOP1X() {
- switch (get_instr()->FunctionFieldRaw()) {
- case MADD_D:
+ switch (instr_.FunctionFieldRaw()) {
+ case MADD_S: {
+ DCHECK(kArchVariant == kMips64r2);
+ float fr, ft, fs;
+ fr = get_fpu_register_float(fr_reg());
+ fs = get_fpu_register_float(fs_reg());
+ ft = get_fpu_register_float(ft_reg());
+ set_fpu_register_float(fd_reg(), fs * ft + fr);
+ break;
+ }
+ case MSUB_S: {
+ DCHECK(kArchVariant == kMips64r2);
+ float fr, ft, fs;
+ fr = get_fpu_register_float(fr_reg());
+ fs = get_fpu_register_float(fs_reg());
+ ft = get_fpu_register_float(ft_reg());
+ set_fpu_register_float(fd_reg(), fs * ft - fr);
+ break;
+ }
+ case MADD_D: {
+ DCHECK(kArchVariant == kMips64r2);
double fr, ft, fs;
fr = get_fpu_register_double(fr_reg());
fs = get_fpu_register_double(fs_reg());
ft = get_fpu_register_double(ft_reg());
set_fpu_register_double(fd_reg(), fs * ft + fr);
break;
+ }
+ case MSUB_D: {
+ DCHECK(kArchVariant == kMips64r2);
+ double fr, ft, fs;
+ fr = get_fpu_register_double(fr_reg());
+ fs = get_fpu_register_double(fs_reg());
+ ft = get_fpu_register_double(ft_reg());
+ set_fpu_register_double(fd_reg(), fs * ft - fr);
+ break;
+ }
default:
UNREACHABLE();
}
@@ -3324,7 +3446,7 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
int64_t alu_out;
bool do_interrupt = false;
- switch (get_instr()->FunctionFieldRaw()) {
+ switch (instr_.FunctionFieldRaw()) {
case SELEQZ_S:
DCHECK(kArchVariant == kMips64r6);
set_register(rd_reg(), rt() == 0 ? rs() : 0);
@@ -3570,7 +3692,7 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
case DIV:
case DDIV: {
const int64_t int_min_value =
- get_instr()->FunctionFieldRaw() == DIV ? INT_MIN : LONG_MIN;
+ instr_.FunctionFieldRaw() == DIV ? INT_MIN : LONG_MIN;
switch (kArchVariant) {
case kMips64r2:
// Divide by zero and overflow was not checked in the
@@ -3616,7 +3738,7 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
case kMips64r6: {
uint32_t rt_u_32 = static_cast<uint32_t>(rt_u());
uint32_t rs_u_32 = static_cast<uint32_t>(rs_u());
- switch (get_instr()->SaValue()) {
+ switch (sa()) {
case DIV_OP:
if (rt_u_32 != 0) {
set_register(rd_reg(), rs_u_32 / rt_u_32);
@@ -3645,7 +3767,7 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
case DDIVU:
switch (kArchVariant) {
case kMips64r6: {
- switch (get_instr()->SaValue()) {
+ switch (instr_.SaValue()) {
case DIV_OP:
if (rt_u() != 0) {
set_register(rd_reg(), rs_u() / rt_u());
@@ -3767,9 +3889,9 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
}
break;
case MOVCI: {
- uint32_t cc = get_instr()->FBccValue();
+ uint32_t cc = instr_.FBccValue();
uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
- if (get_instr()->Bit(16)) { // Read Tf bit.
+ if (instr_.Bit(16)) { // Read Tf bit.
if (test_fcsr_bit(fcsr_cc)) set_register(rd_reg(), rs());
} else {
if (!test_fcsr_bit(fcsr_cc)) set_register(rd_reg(), rs());
@@ -3785,14 +3907,14 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
UNREACHABLE();
}
if (do_interrupt) {
- SoftwareInterrupt(get_instr());
+ SoftwareInterrupt();
}
}
void Simulator::DecodeTypeRegisterSPECIAL2() {
int64_t alu_out;
- switch (get_instr()->FunctionFieldRaw()) {
+ switch (instr_.FunctionFieldRaw()) {
case MUL:
alu_out = static_cast<int32_t>(rs_u()) * static_cast<int32_t>(rt_u());
SetResult(rd_reg(), alu_out);
@@ -3821,7 +3943,7 @@ void Simulator::DecodeTypeRegisterSPECIAL2() {
void Simulator::DecodeTypeRegisterSPECIAL3() {
int64_t alu_out;
- switch (get_instr()->FunctionFieldRaw()) {
+ switch (instr_.FunctionFieldRaw()) {
case INS: { // Mips64r2 instruction.
// Interpret rd field as 5-bit msb of insert.
uint16_t msb = rd_reg();
@@ -3890,7 +4012,7 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
break;
}
case BSHFL: {
- int32_t sa = get_instr()->SaFieldRaw() >> kSaShift;
+ int32_t sa = instr_.SaFieldRaw() >> kSaShift;
switch (sa) {
case BITSWAP: {
uint32_t input = static_cast<uint32_t>(rt());
@@ -3968,7 +4090,7 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
break;
}
default: {
- const uint8_t bp2 = get_instr()->Bp2Value();
+ const uint8_t bp2 = instr_.Bp2Value();
sa >>= kBp2Bits;
switch (sa) {
case ALIGN: {
@@ -3993,7 +4115,7 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
break;
}
case DBSHFL: {
- int32_t sa = get_instr()->SaFieldRaw() >> kSaShift;
+ int32_t sa = instr_.SaFieldRaw() >> kSaShift;
switch (sa) {
case DBITSWAP: {
switch (sa) {
@@ -4067,7 +4189,7 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
break;
}
default: {
- const uint8_t bp3 = get_instr()->Bp3Value();
+ const uint8_t bp3 = instr_.Bp3Value();
sa >>= kBp3Bits;
switch (sa) {
case DALIGN: {
@@ -4096,12 +4218,9 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
}
}
-
-void Simulator::DecodeTypeRegister(Instruction* instr) {
- set_instr(instr);
-
+void Simulator::DecodeTypeRegister() {
// ---------- Execution.
- switch (instr->OpcodeFieldRaw()) {
+ switch (instr_.OpcodeFieldRaw()) {
case COP1:
DecodeTypeRegisterCOP1();
break;
@@ -4127,18 +4246,18 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
// Type 2: instructions using a 16, 21 or 26 bits immediate. (e.g. beq, beqc).
-void Simulator::DecodeTypeImmediate(Instruction* instr) {
+void Simulator::DecodeTypeImmediate() {
// Instruction fields.
- Opcode op = instr->OpcodeFieldRaw();
- int32_t rs_reg = instr->RsValue();
- int64_t rs = get_register(instr->RsValue());
+ Opcode op = instr_.OpcodeFieldRaw();
+ int32_t rs_reg = instr_.RsValue();
+ int64_t rs = get_register(instr_.RsValue());
uint64_t rs_u = static_cast<uint64_t>(rs);
- int32_t rt_reg = instr->RtValue(); // Destination register.
+ int32_t rt_reg = instr_.RtValue(); // Destination register.
int64_t rt = get_register(rt_reg);
- int16_t imm16 = instr->Imm16Value();
- int32_t imm18 = instr->Imm18Value();
+ int16_t imm16 = instr_.Imm16Value();
+ int32_t imm18 = instr_.Imm18Value();
- int32_t ft_reg = instr->FtValue(); // Destination register.
+ int32_t ft_reg = instr_.FtValue(); // Destination register.
// Zero extended immediate.
uint64_t oe_imm16 = 0xffff & imm16;
@@ -4163,38 +4282,36 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
const int kInt64AlignmentMask = sizeof(uint64_t) - 1;
// Branch instructions common part.
- auto BranchAndLinkHelper = [this, instr, &next_pc,
- &execute_branch_delay_instruction](
- bool do_branch) {
- execute_branch_delay_instruction = true;
- int64_t current_pc = get_pc();
- if (do_branch) {
- int16_t imm16 = instr->Imm16Value();
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- set_register(31, current_pc + 2 * Instruction::kInstrSize);
- } else {
- next_pc = current_pc + 2 * Instruction::kInstrSize;
- }
- };
+ auto BranchAndLinkHelper =
+ [this, &next_pc, &execute_branch_delay_instruction](bool do_branch) {
+ execute_branch_delay_instruction = true;
+ int64_t current_pc = get_pc();
+ if (do_branch) {
+ int16_t imm16 = instr_.Imm16Value();
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+ set_register(31, current_pc + 2 * Instruction::kInstrSize);
+ } else {
+ next_pc = current_pc + 2 * Instruction::kInstrSize;
+ }
+ };
- auto BranchHelper = [this, instr, &next_pc,
+ auto BranchHelper = [this, &next_pc,
&execute_branch_delay_instruction](bool do_branch) {
execute_branch_delay_instruction = true;
int64_t current_pc = get_pc();
if (do_branch) {
- int16_t imm16 = instr->Imm16Value();
+ int16_t imm16 = instr_.Imm16Value();
next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
} else {
next_pc = current_pc + 2 * Instruction::kInstrSize;
}
};
- auto BranchAndLinkCompactHelper = [this, instr, &next_pc](bool do_branch,
- int bits) {
+ auto BranchAndLinkCompactHelper = [this, &next_pc](bool do_branch, int bits) {
int64_t current_pc = get_pc();
CheckForbiddenSlot(current_pc);
if (do_branch) {
- int32_t imm = instr->ImmValue(bits);
+ int32_t imm = instr_.ImmValue(bits);
imm <<= 32 - bits;
imm >>= 32 - bits;
next_pc = current_pc + (imm << 2) + Instruction::kInstrSize;
@@ -4202,11 +4319,11 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
}
};
- auto BranchCompactHelper = [&next_pc, this, instr](bool do_branch, int bits) {
+ auto BranchCompactHelper = [this, &next_pc](bool do_branch, int bits) {
int64_t current_pc = get_pc();
CheckForbiddenSlot(current_pc);
if (do_branch) {
- int32_t imm = instr->ImmValue(bits);
+ int32_t imm = instr_.ImmValue(bits);
imm <<= 32 - bits;
imm >>= 32 - bits;
next_pc = get_pc() + (imm << 2) + Instruction::kInstrSize;
@@ -4216,12 +4333,12 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
switch (op) {
// ------------- COP1. Coprocessor instructions.
case COP1:
- switch (instr->RsFieldRaw()) {
+ switch (instr_.RsFieldRaw()) {
case BC1: { // Branch on coprocessor condition.
- uint32_t cc = instr->FBccValue();
+ uint32_t cc = instr_.FBccValue();
uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
uint32_t cc_value = test_fcsr_bit(fcsr_cc);
- bool do_branch = (instr->FBtrueValue()) ? cc_value : !cc_value;
+ bool do_branch = (instr_.FBtrueValue()) ? cc_value : !cc_value;
BranchHelper(do_branch);
break;
}
@@ -4237,7 +4354,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
break;
// ------------- REGIMM class.
case REGIMM:
- switch (instr->RtFieldRaw()) {
+ switch (instr_.RtFieldRaw()) {
case BLTZ:
BranchHelper(rs < 0);
break;
@@ -4455,7 +4572,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
set_register(rt_reg, ReadB(rs + se_imm16));
break;
case LH:
- set_register(rt_reg, ReadH(rs + se_imm16, instr));
+ set_register(rt_reg, ReadH(rs + se_imm16, instr_.instr()));
break;
case LWL: {
// al_offset is offset of the effective address within an aligned word.
@@ -4463,26 +4580,26 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
uint8_t byte_shift = kInt32AlignmentMask - al_offset;
uint32_t mask = (1 << byte_shift * 8) - 1;
addr = rs + se_imm16 - al_offset;
- int32_t val = ReadW(addr, instr);
+ int32_t val = ReadW(addr, instr_.instr());
val <<= byte_shift * 8;
val |= rt & mask;
set_register(rt_reg, static_cast<int64_t>(val));
break;
}
case LW:
- set_register(rt_reg, ReadW(rs + se_imm16, instr));
+ set_register(rt_reg, ReadW(rs + se_imm16, instr_.instr()));
break;
case LWU:
- set_register(rt_reg, ReadWU(rs + se_imm16, instr));
+ set_register(rt_reg, ReadWU(rs + se_imm16, instr_.instr()));
break;
case LD:
- set_register(rt_reg, Read2W(rs + se_imm16, instr));
+ set_register(rt_reg, Read2W(rs + se_imm16, instr_.instr()));
break;
case LBU:
set_register(rt_reg, ReadBU(rs + se_imm16));
break;
case LHU:
- set_register(rt_reg, ReadHU(rs + se_imm16, instr));
+ set_register(rt_reg, ReadHU(rs + se_imm16, instr_.instr()));
break;
case LWR: {
// al_offset is offset of the effective address within an aligned word.
@@ -4490,7 +4607,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
uint8_t byte_shift = kInt32AlignmentMask - al_offset;
uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0;
addr = rs + se_imm16 - al_offset;
- alu_out = ReadW(addr, instr);
+ alu_out = ReadW(addr, instr_.instr());
alu_out = static_cast<uint32_t> (alu_out) >> al_offset * 8;
alu_out |= rt & mask;
set_register(rt_reg, alu_out);
@@ -4502,7 +4619,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
uint8_t byte_shift = kInt64AlignmentMask - al_offset;
uint64_t mask = (1UL << byte_shift * 8) - 1;
addr = rs + se_imm16 - al_offset;
- alu_out = Read2W(addr, instr);
+ alu_out = Read2W(addr, instr_.instr());
alu_out <<= byte_shift * 8;
alu_out |= rt & mask;
set_register(rt_reg, alu_out);
@@ -4514,7 +4631,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
uint8_t byte_shift = kInt64AlignmentMask - al_offset;
uint64_t mask = al_offset ? (~0UL << (byte_shift + 1) * 8) : 0UL;
addr = rs + se_imm16 - al_offset;
- alu_out = Read2W(addr, instr);
+ alu_out = Read2W(addr, instr_.instr());
alu_out = alu_out >> al_offset * 8;
alu_out |= rt & mask;
set_register(rt_reg, alu_out);
@@ -4524,31 +4641,31 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
WriteB(rs + se_imm16, static_cast<int8_t>(rt));
break;
case SH:
- WriteH(rs + se_imm16, static_cast<uint16_t>(rt), instr);
+ WriteH(rs + se_imm16, static_cast<uint16_t>(rt), instr_.instr());
break;
case SWL: {
uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask;
uint8_t byte_shift = kInt32AlignmentMask - al_offset;
uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0;
addr = rs + se_imm16 - al_offset;
- uint64_t mem_value = ReadW(addr, instr) & mask;
+ uint64_t mem_value = ReadW(addr, instr_.instr()) & mask;
mem_value |= static_cast<uint32_t>(rt) >> byte_shift * 8;
- WriteW(addr, static_cast<int32_t>(mem_value), instr);
+ WriteW(addr, static_cast<int32_t>(mem_value), instr_.instr());
break;
}
case SW:
- WriteW(rs + se_imm16, static_cast<int32_t>(rt), instr);
+ WriteW(rs + se_imm16, static_cast<int32_t>(rt), instr_.instr());
break;
case SD:
- Write2W(rs + se_imm16, rt, instr);
+ Write2W(rs + se_imm16, rt, instr_.instr());
break;
case SWR: {
uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask;
uint32_t mask = (1 << al_offset * 8) - 1;
addr = rs + se_imm16 - al_offset;
- uint64_t mem_value = ReadW(addr, instr);
+ uint64_t mem_value = ReadW(addr, instr_.instr());
mem_value = (rt << al_offset * 8) | (mem_value & mask);
- WriteW(addr, static_cast<int32_t>(mem_value), instr);
+ WriteW(addr, static_cast<int32_t>(mem_value), instr_.instr());
break;
}
case SDL: {
@@ -4556,39 +4673,39 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
uint8_t byte_shift = kInt64AlignmentMask - al_offset;
uint64_t mask = byte_shift ? (~0UL << (al_offset + 1) * 8) : 0;
addr = rs + se_imm16 - al_offset;
- uint64_t mem_value = Read2W(addr, instr) & mask;
+ uint64_t mem_value = Read2W(addr, instr_.instr()) & mask;
mem_value |= rt >> byte_shift * 8;
- Write2W(addr, mem_value, instr);
+ Write2W(addr, mem_value, instr_.instr());
break;
}
case SDR: {
uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask;
uint64_t mask = (1UL << al_offset * 8) - 1;
addr = rs + se_imm16 - al_offset;
- uint64_t mem_value = Read2W(addr, instr);
+ uint64_t mem_value = Read2W(addr, instr_.instr());
mem_value = (rt << al_offset * 8) | (mem_value & mask);
- Write2W(addr, mem_value, instr);
+ Write2W(addr, mem_value, instr_.instr());
break;
}
case LWC1:
set_fpu_register(ft_reg, kFPUInvalidResult); // Trash upper 32 bits.
- set_fpu_register_word(ft_reg, ReadW(rs + se_imm16, instr));
+ set_fpu_register_word(ft_reg, ReadW(rs + se_imm16, instr_.instr()));
break;
case LDC1:
- set_fpu_register_double(ft_reg, ReadD(rs + se_imm16, instr));
+ set_fpu_register_double(ft_reg, ReadD(rs + se_imm16, instr_.instr()));
break;
case SWC1: {
int32_t alu_out_32 = static_cast<int32_t>(get_fpu_register(ft_reg));
- WriteW(rs + se_imm16, alu_out_32, instr);
+ WriteW(rs + se_imm16, alu_out_32, instr_.instr());
break;
}
case SDC1:
- WriteD(rs + se_imm16, get_fpu_register_double(ft_reg), instr);
+ WriteD(rs + se_imm16, get_fpu_register_double(ft_reg), instr_.instr());
break;
// ------------- PC-Relative instructions.
case PCREL: {
// rt field: checking 5-bits.
- int32_t imm21 = instr->Imm21Value();
+ int32_t imm21 = instr_.Imm21Value();
int64_t current_pc = get_pc();
uint8_t rt = (imm21 >> kImm16Bits);
switch (rt) {
@@ -4600,14 +4717,14 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
alu_out = current_pc + (se_imm16 << 16);
break;
default: {
- int32_t imm19 = instr->Imm19Value();
+ int32_t imm19 = instr_.Imm19Value();
// rt field: checking the most significant 3-bits.
rt = (imm21 >> kImm18Bits);
switch (rt) {
case LDPC:
addr =
(current_pc & static_cast<int64_t>(~0x7)) + (se_imm18 << 3);
- alu_out = Read2W(addr, instr);
+ alu_out = Read2W(addr, instr_.instr());
break;
default: {
// rt field: checking the most significant 2-bits.
@@ -4671,13 +4788,14 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
-void Simulator::DecodeTypeJump(Instruction* instr) {
+void Simulator::DecodeTypeJump() {
+ SimInstruction simInstr = instr_;
// Get current pc.
int64_t current_pc = get_pc();
// Get unchanged bits of pc.
int64_t pc_high_bits = current_pc & 0xfffffffff0000000;
// Next pc.
- int64_t next_pc = pc_high_bits | (instr->Imm26Value() << 2);
+ int64_t next_pc = pc_high_bits | (simInstr.Imm26Value() << 2);
// Execute branch delay slot.
// We don't check for end_sim_pc. First it should not be met as the current pc
@@ -4688,7 +4806,7 @@ void Simulator::DecodeTypeJump(Instruction* instr) {
// Update pc and ra if necessary.
// Do this after the branch delay execution.
- if (instr->IsLinkingInstruction()) {
+ if (simInstr.IsLinkingInstruction()) {
set_register(31, current_pc + 2 * Instruction::kInstrSize);
}
set_pc(next_pc);
@@ -4713,15 +4831,16 @@ void Simulator::InstructionDecode(Instruction* instr) {
dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
}
- switch (instr->InstructionType(Instruction::TypeChecks::EXTRA)) {
+ instr_ = instr;
+ switch (instr_.InstructionType()) {
case Instruction::kRegisterType:
- DecodeTypeRegister(instr);
+ DecodeTypeRegister();
break;
case Instruction::kImmediateType:
- DecodeTypeImmediate(instr);
+ DecodeTypeImmediate();
break;
case Instruction::kJumpType:
- DecodeTypeJump(instr);
+ DecodeTypeJump();
break;
default:
UNSUPPORTED();
diff --git a/deps/v8/src/mips64/simulator-mips64.h b/deps/v8/src/mips64/simulator-mips64.h
index cd606e2402..df98465c24 100644
--- a/deps/v8/src/mips64/simulator-mips64.h
+++ b/deps/v8/src/mips64/simulator-mips64.h
@@ -122,6 +122,39 @@ class CachePage {
char validity_map_[kValidityMapSize]; // One byte per line.
};
+class SimInstructionBase : public InstructionBase {
+ public:
+ Type InstructionType() const { return type_; }
+ inline Instruction* instr() const { return instr_; }
+ inline int32_t operand() const { return operand_; }
+
+ protected:
+ SimInstructionBase() : operand_(-1), instr_(nullptr), type_(kUnsupported) {}
+ explicit SimInstructionBase(Instruction* instr) {}
+
+ int32_t operand_;
+ Instruction* instr_;
+ Type type_;
+
+ private:
+ DISALLOW_ASSIGN(SimInstructionBase);
+};
+
+class SimInstruction : public InstructionGetters<SimInstructionBase> {
+ public:
+ SimInstruction() {}
+
+ explicit SimInstruction(Instruction* instr) { *this = instr; }
+
+ SimInstruction& operator=(Instruction* instr) {
+ operand_ = *reinterpret_cast<const int32_t*>(instr);
+ instr_ = instr;
+ type_ = InstructionBase::InstructionType();
+ DCHECK(reinterpret_cast<void*>(&operand_) == this);
+ return *this;
+ }
+};
+
class Simulator {
public:
friend class MipsDebugger;
@@ -226,7 +259,7 @@ class Simulator {
// Call on program start.
static void Initialize(Isolate* isolate);
- static void TearDown(base::HashMap* i_cache, Redirection* first);
+ static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
@@ -246,7 +279,8 @@ class Simulator {
char* last_debugger_input() { return last_debugger_input_; }
// ICache checking.
- static void FlushICache(base::HashMap* i_cache, void* start, size_t size);
+ static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
+ size_t size);
// Returns true if pc register contains one of the 'special_values' defined
// below (bad_ra, end_sim_pc).
@@ -314,6 +348,8 @@ class Simulator {
inline int32_t SetDoubleHIW(double* addr);
inline int32_t SetDoubleLOW(double* addr);
+ SimInstruction instr_;
+
// functions called from DecodeTypeRegister.
void DecodeTypeRegisterCOP1();
@@ -335,40 +371,36 @@ class Simulator {
void DecodeTypeRegisterLRsType();
// Executing is handled based on the instruction type.
- void DecodeTypeRegister(Instruction* instr);
-
- Instruction* currentInstr_;
- inline Instruction* get_instr() const { return currentInstr_; }
- inline void set_instr(Instruction* instr) { currentInstr_ = instr; }
+ void DecodeTypeRegister();
- inline int32_t rs_reg() const { return currentInstr_->RsValue(); }
+ inline int32_t rs_reg() const { return instr_.RsValue(); }
inline int64_t rs() const { return get_register(rs_reg()); }
inline uint64_t rs_u() const {
return static_cast<uint64_t>(get_register(rs_reg()));
}
- inline int32_t rt_reg() const { return currentInstr_->RtValue(); }
+ inline int32_t rt_reg() const { return instr_.RtValue(); }
inline int64_t rt() const { return get_register(rt_reg()); }
inline uint64_t rt_u() const {
return static_cast<uint64_t>(get_register(rt_reg()));
}
- inline int32_t rd_reg() const { return currentInstr_->RdValue(); }
- inline int32_t fr_reg() const { return currentInstr_->FrValue(); }
- inline int32_t fs_reg() const { return currentInstr_->FsValue(); }
- inline int32_t ft_reg() const { return currentInstr_->FtValue(); }
- inline int32_t fd_reg() const { return currentInstr_->FdValue(); }
- inline int32_t sa() const { return currentInstr_->SaValue(); }
- inline int32_t lsa_sa() const { return currentInstr_->LsaSaValue(); }
+ inline int32_t rd_reg() const { return instr_.RdValue(); }
+ inline int32_t fr_reg() const { return instr_.FrValue(); }
+ inline int32_t fs_reg() const { return instr_.FsValue(); }
+ inline int32_t ft_reg() const { return instr_.FtValue(); }
+ inline int32_t fd_reg() const { return instr_.FdValue(); }
+ inline int32_t sa() const { return instr_.SaValue(); }
+ inline int32_t lsa_sa() const { return instr_.LsaSaValue(); }
inline void SetResult(const int32_t rd_reg, const int64_t alu_out) {
set_register(rd_reg, alu_out);
TraceRegWr(alu_out);
}
- void DecodeTypeImmediate(Instruction* instr);
- void DecodeTypeJump(Instruction* instr);
+ void DecodeTypeImmediate();
+ void DecodeTypeJump();
// Used for breakpoints and traps.
- void SoftwareInterrupt(Instruction* instr);
+ void SoftwareInterrupt();
// Compact branch guard.
void CheckForbiddenSlot(int64_t current_pc) {
@@ -414,9 +446,12 @@ class Simulator {
}
// ICache.
- static void CheckICache(base::HashMap* i_cache, Instruction* instr);
- static void FlushOnePage(base::HashMap* i_cache, intptr_t start, size_t size);
- static CachePage* GetCachePage(base::HashMap* i_cache, void* page);
+ static void CheckICache(base::CustomMatcherHashMap* i_cache,
+ Instruction* instr);
+ static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start,
+ size_t size);
+ static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
+ void* page);
enum Exception {
none,
@@ -461,7 +496,7 @@ class Simulator {
char* last_debugger_input_;
// Icache simulation.
- base::HashMap* i_cache_;
+ base::CustomMatcherHashMap* i_cache_;
v8::internal::Isolate* isolate_;
diff --git a/deps/v8/src/objects-body-descriptors-inl.h b/deps/v8/src/objects-body-descriptors-inl.h
index ccee37b962..0252b64650 100644
--- a/deps/v8/src/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects-body-descriptors-inl.h
@@ -465,7 +465,6 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
case JS_PROMISE_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
- case JS_MODULE_TYPE:
case JS_VALUE_TYPE:
case JS_DATE_TYPE:
case JS_ARRAY_TYPE:
@@ -475,6 +474,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
case JS_MAP_TYPE:
case JS_SET_ITERATOR_TYPE:
case JS_MAP_ITERATOR_TYPE:
+ case JS_STRING_ITERATOR_TYPE:
case JS_REGEXP_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 7d426a045e..3c43f23074 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -152,6 +152,9 @@ void HeapObject::HeapObjectVerify() {
case JS_MAP_ITERATOR_TYPE:
JSMapIterator::cast(this)->JSMapIteratorVerify();
break;
+ case JS_STRING_ITERATOR_TYPE:
+ JSStringIterator::cast(this)->JSStringIteratorVerify();
+ break;
case JS_WEAK_MAP_TYPE:
JSWeakMap::cast(this)->JSWeakMapVerify();
break;
@@ -562,6 +565,7 @@ void SharedFunctionInfo::SharedFunctionInfoVerify() {
VerifyObjectField(kOptimizedCodeMapOffset);
VerifyObjectField(kFeedbackMetadataOffset);
VerifyObjectField(kScopeInfoOffset);
+ VerifyObjectField(kOuterScopeInfoOffset);
VerifyObjectField(kInstanceClassNameOffset);
CHECK(function_data()->IsUndefined(GetIsolate()) || IsApiFunction() ||
HasBytecodeArray() || HasAsmWasmData());
@@ -778,6 +782,14 @@ void JSWeakMap::JSWeakMapVerify() {
CHECK(table()->IsHashTable() || table()->IsUndefined(GetIsolate()));
}
+void JSStringIterator::JSStringIteratorVerify() {
+ CHECK(IsJSStringIterator());
+ JSObjectVerify();
+ CHECK(string()->IsString());
+
+ CHECK_GE(index(), 0);
+ CHECK_LE(index(), String::kMaxLength);
+}
void JSWeakSet::JSWeakSetVerify() {
CHECK(IsJSWeakSet());
@@ -831,7 +843,6 @@ void JSRegExp::JSRegExpVerify() {
}
}
-
void JSProxy::JSProxyVerify() {
CHECK(IsJSProxy());
VerifyPointer(target());
@@ -877,9 +888,7 @@ void JSTypedArray::JSTypedArrayVerify() {
CHECK(IsJSTypedArray());
JSArrayBufferViewVerify();
VerifyPointer(raw_length());
- CHECK(raw_length()->IsSmi() || raw_length()->IsHeapNumber() ||
- raw_length()->IsUndefined(GetIsolate()));
-
+ CHECK(raw_length()->IsSmi() || raw_length()->IsUndefined(GetIsolate()));
VerifyPointer(elements());
}
@@ -900,6 +909,27 @@ void Box::BoxVerify() {
value()->ObjectVerify();
}
+void PromiseContainer::PromiseContainerVerify() {
+ CHECK(IsPromiseContainer());
+ thenable()->ObjectVerify();
+ then()->ObjectVerify();
+ resolve()->ObjectVerify();
+ reject()->ObjectVerify();
+ before_debug_event()->ObjectVerify();
+ after_debug_event()->ObjectVerify();
+}
+
+void Module::ModuleVerify() {
+ CHECK(IsModule());
+ CHECK(code()->IsSharedFunctionInfo() || code()->IsJSFunction());
+ code()->ObjectVerify();
+ exports()->ObjectVerify();
+ requested_modules()->ObjectVerify();
+ VerifySmiField(kFlagsOffset);
+ embedder_data()->ObjectVerify();
+ CHECK(shared()->name()->IsSymbol());
+ // TODO(neis): Check more.
+}
void PrototypeInfo::PrototypeInfoVerify() {
CHECK(IsPrototypeInfo());
@@ -911,10 +941,8 @@ void PrototypeInfo::PrototypeInfoVerify() {
CHECK(validity_cell()->IsCell() || validity_cell()->IsSmi());
}
-
-void SloppyBlockWithEvalContextExtension::
- SloppyBlockWithEvalContextExtensionVerify() {
- CHECK(IsSloppyBlockWithEvalContextExtension());
+void ContextExtension::ContextExtensionVerify() {
+ CHECK(IsContextExtension());
VerifyObjectField(kScopeInfoOffset);
VerifyObjectField(kExtensionOffset);
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 3d82bf8205..af1261538e 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -27,6 +27,7 @@
#include "src/isolate.h"
#include "src/keys.h"
#include "src/layout-descriptor-inl.h"
+#include "src/lookup-cache-inl.h"
#include "src/lookup.h"
#include "src/objects.h"
#include "src/property.h"
@@ -700,6 +701,7 @@ TYPE_CHECKER(Map, MAP_TYPE)
TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
TYPE_CHECKER(WeakFixedArray, FIXED_ARRAY_TYPE)
TYPE_CHECKER(TransitionArray, TRANSITION_ARRAY_TYPE)
+TYPE_CHECKER(JSStringIterator, JS_STRING_ITERATOR_TYPE)
bool HeapObject::IsJSWeakCollection() const {
return IsJSWeakMap() || IsJSWeakSet();
@@ -709,6 +711,8 @@ bool HeapObject::IsJSCollection() const { return IsJSMap() || IsJSSet(); }
bool HeapObject::IsDescriptorArray() const { return IsFixedArray(); }
+bool HeapObject::IsFrameArray() const { return IsFixedArray(); }
+
bool HeapObject::IsArrayList() const { return IsFixedArray(); }
bool Object::IsLayoutDescriptor() const {
@@ -790,6 +794,13 @@ bool HeapObject::IsScopeInfo() const {
return map() == GetHeap()->scope_info_map();
}
+bool HeapObject::IsModuleInfoEntry() const {
+ return map() == GetHeap()->module_info_entry_map();
+}
+
+bool HeapObject::IsModuleInfo() const {
+ return map() == GetHeap()->module_info_map();
+}
TYPE_CHECKER(JSBoundFunction, JS_BOUND_FUNCTION_TYPE)
TYPE_CHECKER(JSFunction, JS_FUNCTION_TYPE)
@@ -2103,6 +2114,8 @@ int JSObject::GetHeaderSize(InstanceType type) {
return JSArgumentsObject::kHeaderSize;
case JS_ERROR_TYPE:
return JSObject::kHeaderSize;
+ case JS_STRING_ITERATOR_TYPE:
+ return JSStringIterator::kSize;
default:
UNREACHABLE();
return 0;
@@ -2610,6 +2623,29 @@ Object** FixedArray::RawFieldOfElementAt(int index) {
return HeapObject::RawField(this, OffsetOfElementAt(index));
}
+#define DEFINE_FRAME_ARRAY_ACCESSORS(name, type) \
+ type* FrameArray::name(int frame_ix) const { \
+ Object* obj = \
+ get(kFirstIndex + frame_ix * kElementsPerFrame + k##name##Offset); \
+ return type::cast(obj); \
+ } \
+ \
+ void FrameArray::Set##name(int frame_ix, type* value) { \
+ set(kFirstIndex + frame_ix * kElementsPerFrame + k##name##Offset, value); \
+ }
+FRAME_ARRAY_FIELD_LIST(DEFINE_FRAME_ARRAY_ACCESSORS)
+#undef DEFINE_FRAME_ARRAY_ACCESSORS
+
+bool FrameArray::IsWasmFrame(int frame_ix) const {
+ const int flags = Flags(frame_ix)->value();
+ return (flags & kIsWasmFrame) != 0;
+}
+
+int FrameArray::FrameCount() const {
+ const int frame_count = Smi::cast(get(kFrameCountIndex))->value();
+ DCHECK_LE(0, frame_count);
+ return frame_count;
+}
bool DescriptorArray::IsEmpty() {
DCHECK(length() >= kFirstIndex ||
@@ -3223,6 +3259,7 @@ CAST_ACCESSOR(FixedDoubleArray)
CAST_ACCESSOR(FixedTypedArrayBase)
CAST_ACCESSOR(Float32x4)
CAST_ACCESSOR(Foreign)
+CAST_ACCESSOR(FrameArray)
CAST_ACCESSOR(GlobalDictionary)
CAST_ACCESSOR(HandlerTable)
CAST_ACCESSOR(HeapObject)
@@ -3248,6 +3285,7 @@ CAST_ACCESSOR(JSReceiver)
CAST_ACCESSOR(JSRegExp)
CAST_ACCESSOR(JSSet)
CAST_ACCESSOR(JSSetIterator)
+CAST_ACCESSOR(JSStringIterator)
CAST_ACCESSOR(JSTypedArray)
CAST_ACCESSOR(JSValue)
CAST_ACCESSOR(JSWeakCollection)
@@ -3255,6 +3293,8 @@ CAST_ACCESSOR(JSWeakMap)
CAST_ACCESSOR(JSWeakSet)
CAST_ACCESSOR(LayoutDescriptor)
CAST_ACCESSOR(Map)
+CAST_ACCESSOR(ModuleInfoEntry)
+CAST_ACCESSOR(ModuleInfo)
CAST_ACCESSOR(Name)
CAST_ACCESSOR(NameDictionary)
CAST_ACCESSOR(NormalizedMapCache)
@@ -5614,6 +5654,13 @@ ACCESSORS(AccessorInfo, data, Object, kDataOffset)
ACCESSORS(Box, value, Object, kValueOffset)
+ACCESSORS(PromiseContainer, thenable, JSReceiver, kThenableOffset)
+ACCESSORS(PromiseContainer, then, JSReceiver, kThenOffset)
+ACCESSORS(PromiseContainer, resolve, JSFunction, kResolveOffset)
+ACCESSORS(PromiseContainer, reject, JSFunction, kRejectOffset)
+ACCESSORS(PromiseContainer, before_debug_event, Object, kBeforeDebugEventOffset)
+ACCESSORS(PromiseContainer, after_debug_event, Object, kAfterDebugEventOffset)
+
Map* PrototypeInfo::ObjectCreateMap() {
return Map::cast(WeakCell::cast(object_create_map())->value());
}
@@ -5662,10 +5709,26 @@ ACCESSORS(PrototypeInfo, validity_cell, Object, kValidityCellOffset)
SMI_ACCESSORS(PrototypeInfo, bit_field, kBitFieldOffset)
BOOL_ACCESSORS(PrototypeInfo, bit_field, should_be_fast_map, kShouldBeFastBit)
-ACCESSORS(SloppyBlockWithEvalContextExtension, scope_info, ScopeInfo,
- kScopeInfoOffset)
-ACCESSORS(SloppyBlockWithEvalContextExtension, extension, JSObject,
- kExtensionOffset)
+ACCESSORS(ContextExtension, scope_info, ScopeInfo, kScopeInfoOffset)
+ACCESSORS(ContextExtension, extension, Object, kExtensionOffset)
+
+ACCESSORS(Module, code, Object, kCodeOffset)
+ACCESSORS(Module, exports, ObjectHashTable, kExportsOffset)
+ACCESSORS(Module, requested_modules, FixedArray, kRequestedModulesOffset)
+SMI_ACCESSORS(Module, flags, kFlagsOffset)
+BOOL_ACCESSORS(Module, flags, evaluated, kEvaluatedBit)
+ACCESSORS(Module, embedder_data, Object, kEmbedderDataOffset)
+
+SharedFunctionInfo* Module::shared() const {
+ return code()->IsSharedFunctionInfo() ? SharedFunctionInfo::cast(code())
+ : JSFunction::cast(code())->shared();
+}
+
+ModuleInfo* Module::info() const {
+ return shared()->scope_info()->ModuleDescriptorInfo();
+}
+
+uint32_t Module::Hash() const { return Symbol::cast(shared()->name())->Hash(); }
ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
@@ -5679,8 +5742,10 @@ ACCESSORS(AccessCheckInfo, data, Object, kDataOffset)
ACCESSORS(InterceptorInfo, getter, Object, kGetterOffset)
ACCESSORS(InterceptorInfo, setter, Object, kSetterOffset)
ACCESSORS(InterceptorInfo, query, Object, kQueryOffset)
+ACCESSORS(InterceptorInfo, descriptor, Object, kDescriptorOffset)
ACCESSORS(InterceptorInfo, deleter, Object, kDeleterOffset)
ACCESSORS(InterceptorInfo, enumerator, Object, kEnumeratorOffset)
+ACCESSORS(InterceptorInfo, definer, Object, kDefinerOffset)
ACCESSORS(InterceptorInfo, data, Object, kDataOffset)
SMI_ACCESSORS(InterceptorInfo, flags, kFlagsOffset)
BOOL_ACCESSORS(InterceptorInfo, flags, can_intercept_symbols,
@@ -6031,8 +6096,7 @@ void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
set_compiler_hints(hints);
}
-
-FunctionKind SharedFunctionInfo::kind() {
+FunctionKind SharedFunctionInfo::kind() const {
return FunctionKindBits::decode(compiler_hints());
}
@@ -6057,23 +6121,12 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_function, kIsFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_crankshaft,
kDontCrankshaft)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_flush, kDontFlush)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_arrow, kIsArrow)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_generator, kIsGenerator)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_async, kIsAsyncFunction)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_concise_method,
- kIsConciseMethod)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_getter_function,
- kIsGetterFunction)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_setter_function,
- kIsSetterFunction)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_default_constructor,
- kIsDefaultConstructor)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_asm_wasm_broken,
kIsAsmWasmBroken)
-
-inline bool SharedFunctionInfo::is_resumable() const {
- return is_generator() || is_async();
-}
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, requires_class_field_init,
+ kRequiresClassFieldInit)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_class_field_initializer,
+ kIsClassFieldInitializer)
bool Script::HasValidSource() {
Object* src = this->source();
@@ -6155,6 +6208,9 @@ void SharedFunctionInfo::set_scope_info(ScopeInfo* value,
mode);
}
+ACCESSORS(SharedFunctionInfo, outer_scope_info, HeapObject,
+ kOuterScopeInfoOffset)
+
bool SharedFunctionInfo::is_compiled() const {
Builtins* builtins = GetIsolate()->builtins();
DCHECK(code() != builtins->builtin(Builtins::kCompileOptimizedConcurrent));
@@ -7890,6 +7946,44 @@ bool ScopeInfo::HasSimpleParameters() {
FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(SCOPE_INFO_FIELD_ACCESSORS)
#undef SCOPE_INFO_FIELD_ACCESSORS
+Object* ModuleInfoEntry::export_name() const { return get(kExportNameIndex); }
+
+Object* ModuleInfoEntry::local_name() const { return get(kLocalNameIndex); }
+
+Object* ModuleInfoEntry::import_name() const { return get(kImportNameIndex); }
+
+Object* ModuleInfoEntry::module_request() const {
+ return get(kModuleRequestIndex);
+}
+
+FixedArray* ModuleInfo::module_requests() const {
+ return FixedArray::cast(get(kModuleRequestsIndex));
+}
+
+FixedArray* ModuleInfo::special_exports() const {
+ return FixedArray::cast(get(kSpecialExportsIndex));
+}
+
+FixedArray* ModuleInfo::regular_exports() const {
+ return FixedArray::cast(get(kRegularExportsIndex));
+}
+
+FixedArray* ModuleInfo::regular_imports() const {
+ return FixedArray::cast(get(kRegularImportsIndex));
+}
+
+FixedArray* ModuleInfo::namespace_imports() const {
+ return FixedArray::cast(get(kNamespaceImportsIndex));
+}
+
+#ifdef DEBUG
+bool ModuleInfo::Equals(ModuleInfo* other) const {
+ return regular_exports() == other->regular_exports() &&
+ regular_imports() == other->regular_imports() &&
+ special_exports() == other->special_exports() &&
+ namespace_imports() == other->namespace_imports();
+}
+#endif
void Map::ClearCodeCache(Heap* heap) {
// No write barrier is needed since empty_fixed_array is not in new space.
@@ -8176,6 +8270,12 @@ static inline Handle<Object> MakeEntryPair(Isolate* isolate, Handle<Name> key,
FAST_ELEMENTS, 2);
}
+ACCESSORS(JSIteratorResult, value, Object, kValueOffset)
+ACCESSORS(JSIteratorResult, done, Object, kDoneOffset)
+
+ACCESSORS(JSStringIterator, string, String, kStringOffset)
+SMI_ACCESSORS(JSStringIterator, index, kNextIndexOffset)
+
#undef TYPE_CHECKER
#undef CAST_ACCESSOR
#undef INT_ACCESSORS
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 6f1f746e5e..9054371e84 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -28,7 +28,8 @@ void Object::Print() {
void Object::Print(std::ostream& os) { // NOLINT
if (IsSmi()) {
- Smi::cast(this)->SmiPrint(os);
+ os << "Smi: " << std::hex << "0x" << Smi::cast(this)->value();
+ os << std::dec << " (" << Smi::cast(this)->value() << ")\n";
} else {
HeapObject::cast(this)->HeapObjectPrint(os);
}
@@ -52,6 +53,7 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
HandleScope scope(GetIsolate());
if (instance_type < FIRST_NONSTRING_TYPE) {
String::cast(this)->StringPrint(os);
+ os << "\n";
return;
}
@@ -318,18 +320,37 @@ void JSObject::PrintProperties(std::ostream& os) { // NOLINT
}
}
+namespace {
+
+template <class T>
+double GetScalarElement(T* array, int index) {
+ return array->get_scalar(index);
+}
+
+double GetScalarElement(FixedDoubleArray* array, int index) {
+ if (array->is_the_hole(index)) return bit_cast<double>(kHoleNanInt64);
+ return array->get_scalar(index);
+}
+
+bool is_the_hole(double maybe_hole) {
+ return bit_cast<uint64_t>(maybe_hole) == kHoleNanInt64;
+}
+
+} // namespace
+
template <class T, bool print_the_hole>
static void DoPrintElements(std::ostream& os, Object* object) { // NOLINT
T* array = T::cast(object);
if (array->length() == 0) return;
int previous_index = 0;
- double previous_value = array->get_scalar(0);
+ double previous_value = GetScalarElement(array, 0);
double value = 0.0;
int i;
for (i = 1; i <= array->length(); i++) {
- if (i < array->length()) value = array->get_scalar(i);
+ if (i < array->length()) value = GetScalarElement(array, i);
bool values_are_nan = std::isnan(previous_value) && std::isnan(value);
- if ((previous_value == value || values_are_nan) && i != array->length()) {
+ if (i != array->length() && (previous_value == value || values_are_nan) &&
+ is_the_hole(previous_value) == is_the_hole(value)) {
continue;
}
os << "\n";
@@ -339,8 +360,7 @@ static void DoPrintElements(std::ostream& os, Object* object) { // NOLINT
ss << '-' << (i - 1);
}
os << std::setw(12) << ss.str() << ": ";
- if (print_the_hole &&
- FixedDoubleArray::cast(object)->is_the_hole(previous_index)) {
+ if (print_the_hole && is_the_hole(previous_value)) {
os << "<the_hole>";
} else {
os << previous_value;
@@ -390,22 +410,12 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
break;
}
-#define PRINT_ELEMENTS(Kind, Type) \
- case Kind: { \
- DoPrintElements<Type, false>(os, elements()); \
- break; \
+#define PRINT_ELEMENTS(Type, type, TYPE, elementType, size) \
+ case TYPE##_ELEMENTS: { \
+ DoPrintElements<Fixed##Type##Array, false>(os, elements()); \
+ break; \
}
-
- PRINT_ELEMENTS(UINT8_ELEMENTS, FixedUint8Array)
- PRINT_ELEMENTS(UINT8_CLAMPED_ELEMENTS, FixedUint8ClampedArray)
- PRINT_ELEMENTS(INT8_ELEMENTS, FixedInt8Array)
- PRINT_ELEMENTS(UINT16_ELEMENTS, FixedUint16Array)
- PRINT_ELEMENTS(INT16_ELEMENTS, FixedInt16Array)
- PRINT_ELEMENTS(UINT32_ELEMENTS, FixedUint32Array)
- PRINT_ELEMENTS(INT32_ELEMENTS, FixedInt32Array)
- PRINT_ELEMENTS(FLOAT32_ELEMENTS, FixedFloat32Array)
- PRINT_ELEMENTS(FLOAT64_ELEMENTS, FixedFloat64Array)
-
+ TYPED_ARRAYS(PRINT_ELEMENTS)
#undef PRINT_ELEMENTS
case DICTIONARY_ELEMENTS:
@@ -732,6 +742,16 @@ void TypeFeedbackVector::TypeFeedbackVectorPrint(std::ostream& os) { // NOLINT
os << Code::ICState2String(nexus.StateFromFeedback());
break;
}
+ case FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC: {
+ BinaryOpICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
+ case FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC: {
+ CompareICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
case FeedbackVectorSlotKind::GENERAL:
break;
case FeedbackVectorSlotKind::INVALID:
@@ -911,7 +931,7 @@ void JSArrayBuffer::JSArrayBufferPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSArrayBuffer");
os << "\n - backing_store = " << backing_store();
os << "\n - byte_length = " << Brief(byte_length());
- if (was_neutered()) os << " - neutered\n";
+ if (was_neutered()) os << "\n - neutered";
JSObjectPrintBody(os, this, !was_neutered());
}
@@ -922,7 +942,7 @@ void JSTypedArray::JSTypedArrayPrint(std::ostream& os) { // NOLINT
os << "\n - byte_offset = " << Brief(byte_offset());
os << "\n - byte_length = " << Brief(byte_length());
os << "\n - length = " << Brief(length());
- if (WasNeutered()) os << " - neutered\n";
+ if (WasNeutered()) os << "\n - neutered";
JSObjectPrintBody(os, this, !WasNeutered());
}
@@ -932,7 +952,7 @@ void JSDataView::JSDataViewPrint(std::ostream& os) { // NOLINT
os << "\n - buffer =" << Brief(buffer());
os << "\n - byte_offset = " << Brief(byte_offset());
os << "\n - byte_length = " << Brief(byte_length());
- if (WasNeutered()) os << " - neutered\n";
+ if (WasNeutered()) os << "\n - neutered";
JSObjectPrintBody(os, this, !WasNeutered());
}
@@ -954,9 +974,9 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
os << "\n - name = " << Brief(shared()->name());
os << "\n - formal_parameter_count = "
<< shared()->internal_formal_parameter_count();
- if (shared()->is_generator()) {
+ if (IsGeneratorFunction(shared()->kind())) {
os << "\n - generator";
- } else if (shared()->is_async()) {
+ } else if (IsAsyncFunction(shared()->kind())) {
os << "\n - async";
}
os << "\n - context = " << Brief(context());
@@ -1127,6 +1147,26 @@ void Box::BoxPrint(std::ostream& os) { // NOLINT
os << "\n";
}
+void PromiseContainer::PromiseContainerPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "PromiseContainer");
+ os << "\n - thenable: " << Brief(thenable());
+ os << "\n - then: " << Brief(then());
+ os << "\n - resolve: " << Brief(resolve());
+ os << "\n - reject: " << Brief(reject());
+ os << "\n - before debug event: " << Brief(before_debug_event());
+ os << "\n - after debug event: " << Brief(after_debug_event());
+ os << "\n";
+}
+
+void Module::ModulePrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "Module");
+ os << "\n - code: " << Brief(code());
+ os << "\n - exports: " << Brief(exports());
+ os << "\n - requested_modules: " << Brief(requested_modules());
+ os << "\n - evaluated: " << evaluated();
+ os << "\n - embedder_data: " << Brief(embedder_data());
+ os << "\n";
+}
void PrototypeInfo::PrototypeInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "PrototypeInfo");
@@ -1136,10 +1176,8 @@ void PrototypeInfo::PrototypeInfoPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-
-void SloppyBlockWithEvalContextExtension::
- SloppyBlockWithEvalContextExtensionPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "SloppyBlockWithEvalContextExtension");
+void ContextExtension::ContextExtensionPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "ContextExtension");
os << "\n - scope_info: " << Brief(scope_info());
os << "\n - extension: " << Brief(extension());
os << "\n";
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 00721c2d1b..44271db9fb 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -8,6 +8,8 @@
#include <iomanip>
#include <memory>
#include <sstream>
+#include <unordered_map>
+#include <unordered_set>
#include "src/objects-inl.h"
@@ -60,7 +62,7 @@
#include "src/string-stream.h"
#include "src/utils.h"
#include "src/wasm/wasm-module.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
#ifdef ENABLE_DISASSEMBLER
#include "src/disasm.h"
@@ -995,12 +997,12 @@ MaybeHandle<Object> Object::GetProperty(LookupIterator* it) {
case LookupIterator::ACCESSOR:
return GetPropertyWithAccessor(it);
case LookupIterator::INTEGER_INDEXED_EXOTIC:
- return ReadAbsentProperty(it);
+ return it->isolate()->factory()->undefined_value();
case LookupIterator::DATA:
return it->GetDataValue();
}
}
- return ReadAbsentProperty(it);
+ return it->isolate()->factory()->undefined_value();
}
@@ -1349,7 +1351,7 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(LookupIterator* it) {
Object::DONT_THROW);
Handle<Object> result = args.Call(call_fun, name);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- if (result.is_null()) return ReadAbsentProperty(isolate, receiver, name);
+ if (result.is_null()) return isolate->factory()->undefined_value();
// Rebox handle before return.
return handle(*result, isolate);
}
@@ -1366,7 +1368,7 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(LookupIterator* it) {
receiver, Handle<JSReceiver>::cast(getter));
}
// Getter is not a function.
- return ReadAbsentProperty(isolate, receiver, it->GetName());
+ return isolate->factory()->undefined_value();
}
// static
@@ -1677,6 +1679,71 @@ Maybe<bool> SetPropertyWithInterceptorInternal(
return Just(result);
}
+Maybe<bool> DefinePropertyWithInterceptorInternal(
+ LookupIterator* it, Handle<InterceptorInfo> interceptor,
+ Object::ShouldThrow should_throw, PropertyDescriptor& desc) {
+ Isolate* isolate = it->isolate();
+ // Make sure that the top context does not change when doing callbacks or
+ // interceptor calls.
+ AssertNoContextChange ncc(isolate);
+
+ if (interceptor->definer()->IsUndefined(isolate)) return Just(false);
+
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
+ bool result;
+ Handle<Object> receiver = it->GetReceiver();
+ if (!receiver->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
+ Object::ConvertReceiver(isolate, receiver),
+ Nothing<bool>());
+ }
+ PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+ *holder, should_throw);
+
+ std::unique_ptr<v8::PropertyDescriptor> descriptor(
+ new v8::PropertyDescriptor());
+ if (PropertyDescriptor::IsAccessorDescriptor(&desc)) {
+ descriptor.reset(new v8::PropertyDescriptor(
+ v8::Utils::ToLocal(desc.get()), v8::Utils::ToLocal(desc.set())));
+ } else if (PropertyDescriptor::IsDataDescriptor(&desc)) {
+ if (desc.has_writable()) {
+ descriptor.reset(new v8::PropertyDescriptor(
+ v8::Utils::ToLocal(desc.value()), desc.writable()));
+ } else {
+ descriptor.reset(
+ new v8::PropertyDescriptor(v8::Utils::ToLocal(desc.value())));
+ }
+ }
+ if (desc.has_enumerable()) {
+ descriptor->set_enumerable(desc.enumerable());
+ }
+ if (desc.has_configurable()) {
+ descriptor->set_configurable(desc.configurable());
+ }
+
+ if (it->IsElement()) {
+ uint32_t index = it->index();
+ v8::IndexedPropertyDefinerCallback definer =
+ v8::ToCData<v8::IndexedPropertyDefinerCallback>(interceptor->definer());
+ result = !args.Call(definer, index, *descriptor).is_null();
+ } else {
+ Handle<Name> name = it->name();
+ DCHECK(!name->IsPrivate());
+
+ if (name->IsSymbol() && !interceptor->can_intercept_symbols()) {
+ return Just(false);
+ }
+
+ v8::GenericNamedPropertyDefinerCallback definer =
+ v8::ToCData<v8::GenericNamedPropertyDefinerCallback>(
+ interceptor->definer());
+ result = !args.Call(definer, name, *descriptor).is_null();
+ }
+
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
+ return Just(result);
+}
+
} // namespace
MaybeHandle<Object> JSObject::GetPropertyWithFailedAccessCheck(
@@ -2415,10 +2482,6 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
accumulator->Add("<JS Generator>");
break;
}
- case JS_MODULE_TYPE: {
- accumulator->Add("<JS Module>");
- break;
- }
// All other JSObjects are rather similar to each other (JSObject,
// JSGlobalProxy, JSGlobalObject, JSUndetectable, JSValue).
default: {
@@ -3449,9 +3512,16 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
// Ensure that in-object space of slow-mode object does not contain random
// garbage.
int inobject_properties = new_map->GetInObjectProperties();
- for (int i = 0; i < inobject_properties; i++) {
- FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
- object->RawFastPropertyAtPut(index, Smi::FromInt(0));
+ if (inobject_properties) {
+ Heap* heap = isolate->heap();
+ heap->ClearRecordedSlotRange(
+ object->address() + map->GetInObjectPropertyOffset(0),
+ object->address() + new_instance_size);
+
+ for (int i = 0; i < inobject_properties; i++) {
+ FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
+ object->RawFastPropertyAtPut(index, Smi::FromInt(0));
+ }
}
isolate->counters()->props_to_dictionary()->Increment();
@@ -4576,13 +4646,6 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
if (result.IsNothing() || result.FromJust()) return result;
// Interceptor modified the store target but failed to set the
// property.
- // TODO(jochen): Remove after we've identified the faulty interceptor.
- if (!store_target_map.is_null() &&
- *store_target_map != it->GetStoreTarget()->map()) {
- it->isolate()->PushStackTraceAndDie(
- 0xabababaa, v8::ToCData<void*>(it->GetInterceptor()->setter()),
- nullptr, 0xabababab);
- }
Utils::ApiCheck(store_target_map.is_null() ||
*store_target_map == it->GetStoreTarget()->map(),
it->IsElement() ? "v8::IndexedPropertySetterCallback"
@@ -4761,17 +4824,6 @@ Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
return AddDataProperty(&own_lookup, value, NONE, should_throw, store_mode);
}
-MaybeHandle<Object> Object::ReadAbsentProperty(LookupIterator* it) {
- return it->isolate()->factory()->undefined_value();
-}
-
-MaybeHandle<Object> Object::ReadAbsentProperty(Isolate* isolate,
- Handle<Object> receiver,
- Handle<Object> name) {
- return isolate->factory()->undefined_value();
-}
-
-
Maybe<bool> Object::CannotCreateProperty(Isolate* isolate,
Handle<Object> receiver,
Handle<Object> name,
@@ -6542,6 +6594,34 @@ Maybe<bool> JSReceiver::OrdinaryDefineOwnProperty(Isolate* isolate,
it.Next();
}
+ // Handle interceptor
+ if (it.state() == LookupIterator::INTERCEPTOR) {
+ Handle<Map> store_target_map;
+ if (it.GetReceiver()->IsJSObject()) {
+ store_target_map = handle(it.GetStoreTarget()->map(), it.isolate());
+ }
+ if (it.HolderIsReceiverOrHiddenPrototype()) {
+ Maybe<bool> result = DefinePropertyWithInterceptorInternal(
+ &it, it.GetInterceptor(), should_throw, *desc);
+ if (result.IsNothing() || result.FromJust()) {
+ return result;
+ }
+ // Interceptor modified the store target but failed to set the
+ // property.
+ if (!store_target_map.is_null() &&
+ *store_target_map != it.GetStoreTarget()->map()) {
+ it.isolate()->PushStackTraceAndDie(
+ 0xabababaa, v8::ToCData<void*>(it.GetInterceptor()->definer()),
+ nullptr, 0xabababab);
+ }
+ Utils::ApiCheck(store_target_map.is_null() ||
+ *store_target_map == it.GetStoreTarget()->map(),
+ it.IsElement() ? "v8::IndexedPropertyDefinerCallback"
+ : "v8::NamedPropertyDefinerCallback",
+ "Interceptor silently changed store target.");
+ }
+ }
+
return OrdinaryDefineOwnProperty(&it, desc, should_throw);
}
@@ -7261,6 +7341,57 @@ Maybe<bool> JSReceiver::GetOwnPropertyDescriptor(Isolate* isolate,
return GetOwnPropertyDescriptor(&it, desc);
}
+namespace {
+
+Maybe<bool> GetPropertyDescriptorWithInterceptor(LookupIterator* it,
+ PropertyDescriptor* desc) {
+ if (it->state() == LookupIterator::INTERCEPTOR) {
+ Isolate* isolate = it->isolate();
+ Handle<InterceptorInfo> interceptor = it->GetInterceptor();
+ if (!interceptor->descriptor()->IsUndefined(isolate)) {
+ Handle<Object> result;
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
+
+ Handle<Object> receiver = it->GetReceiver();
+ if (!receiver->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, receiver, Object::ConvertReceiver(isolate, receiver),
+ Nothing<bool>());
+ }
+
+ PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+ *holder, Object::DONT_THROW);
+ if (it->IsElement()) {
+ uint32_t index = it->index();
+ v8::IndexedPropertyDescriptorCallback descriptorCallback =
+ v8::ToCData<v8::IndexedPropertyDescriptorCallback>(
+ interceptor->descriptor());
+
+ result = args.Call(descriptorCallback, index);
+ } else {
+ Handle<Name> name = it->name();
+ DCHECK(!name->IsPrivate());
+ v8::GenericNamedPropertyDescriptorCallback descriptorCallback =
+ v8::ToCData<v8::GenericNamedPropertyDescriptorCallback>(
+ interceptor->descriptor());
+ result = args.Call(descriptorCallback, name);
+ }
+ if (!result.is_null()) {
+ // Request successfully intercepted, try to set the property
+ // descriptor.
+ Utils::ApiCheck(
+ PropertyDescriptor::ToPropertyDescriptor(isolate, result, desc),
+ it->IsElement() ? "v8::IndexedPropertyDescriptorCallback"
+ : "v8::NamedPropertyDescriptorCallback",
+ "Invalid property descriptor.");
+
+ return Just(true);
+ }
+ }
+ }
+ return Just(false);
+}
+} // namespace
// ES6 9.1.5.1
// Returns true on success, false if the property didn't exist, nothing if
@@ -7275,6 +7406,13 @@ Maybe<bool> JSReceiver::GetOwnPropertyDescriptor(LookupIterator* it,
it->GetName(), desc);
}
+ Maybe<bool> intercepted = GetPropertyDescriptorWithInterceptor(it, desc);
+ MAYBE_RETURN(intercepted, Nothing<bool>());
+ if (intercepted.FromJust()) {
+ return Just(true);
+ }
+
+ // Request was not intercepted, continue as normal.
// 1. (Assert)
// 2. If O does not have an own property with key P, return undefined.
Maybe<PropertyAttributes> maybe = JSObject::GetPropertyAttributes(it);
@@ -9367,12 +9505,6 @@ Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
*map, map->is_prototype_map()
? &RuntimeCallStats::PrototypeMap_TransitionToDataProperty
: &RuntimeCallStats::Map_TransitionToDataProperty);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- map->GetIsolate(),
- (map->is_prototype_map()
- ? &tracing::TraceEventStatsTable::
- PrototypeMap_TransitionToDataProperty
- : &tracing::TraceEventStatsTable::Map_TransitionToDataProperty))
DCHECK(name->IsUniqueName());
DCHECK(!map->is_dictionary_map());
@@ -9459,12 +9591,6 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
map->is_prototype_map()
? &RuntimeCallStats::PrototypeMap_TransitionToAccessorProperty
: &RuntimeCallStats::Map_TransitionToAccessorProperty);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate,
- (map->is_prototype_map()
- ? &tracing::TraceEventStatsTable::
- PrototypeMap_TransitionToAccessorProperty
- : &tracing::TraceEventStatsTable::Map_TransitionToAccessorProperty));
// At least one of the accessors needs to be a new value.
DCHECK(!getter->IsNull(isolate) || !setter->IsNull(isolate));
@@ -10177,22 +10303,76 @@ bool ArrayList::IsFull() {
return kFirstIndex + Length() == capacity;
}
+namespace {
-Handle<ArrayList> ArrayList::EnsureSpace(Handle<ArrayList> array, int length) {
+Handle<FixedArray> EnsureSpaceInFixedArray(Handle<FixedArray> array,
+ int length) {
int capacity = array->length();
- bool empty = (capacity == 0);
- if (capacity < kFirstIndex + length) {
+ if (capacity < length) {
Isolate* isolate = array->GetIsolate();
- int new_capacity = kFirstIndex + length;
+ int new_capacity = length;
new_capacity = new_capacity + Max(new_capacity / 2, 2);
int grow_by = new_capacity - capacity;
array = Handle<ArrayList>::cast(
isolate->factory()->CopyFixedArrayAndGrow(array, grow_by));
- if (empty) array->SetLength(0);
}
return array;
}
+} // namespace
+
+Handle<ArrayList> ArrayList::EnsureSpace(Handle<ArrayList> array, int length) {
+ const bool empty = (array->length() == 0);
+ auto ret = Handle<ArrayList>::cast(
+ EnsureSpaceInFixedArray(array, kFirstIndex + length));
+ if (empty) ret->SetLength(0);
+ return ret;
+}
+
+// static
+Handle<FrameArray> FrameArray::AppendJSFrame(Handle<FrameArray> in,
+ Handle<Object> receiver,
+ Handle<JSFunction> function,
+ Handle<AbstractCode> code,
+ int offset, int flags) {
+ const int frame_count = in->FrameCount();
+ const int new_length = LengthFor(frame_count + 1);
+ Handle<FrameArray> array = EnsureSpace(in, new_length);
+ array->SetReceiver(frame_count, *receiver);
+ array->SetFunction(frame_count, *function);
+ array->SetCode(frame_count, *code);
+ array->SetOffset(frame_count, Smi::FromInt(offset));
+ array->SetFlags(frame_count, Smi::FromInt(flags));
+ array->set(kFrameCountIndex, Smi::FromInt(frame_count + 1));
+ return array;
+}
+
+// static
+Handle<FrameArray> FrameArray::AppendWasmFrame(Handle<FrameArray> in,
+ Handle<Object> wasm_object,
+ int wasm_function_index,
+ Handle<AbstractCode> code,
+ int offset, int flags) {
+ const int frame_count = in->FrameCount();
+ const int new_length = LengthFor(frame_count + 1);
+ Handle<FrameArray> array = EnsureSpace(in, new_length);
+ array->SetWasmObject(frame_count, *wasm_object);
+ array->SetWasmFunctionIndex(frame_count, Smi::FromInt(wasm_function_index));
+ array->SetCode(frame_count, *code);
+ array->SetOffset(frame_count, Smi::FromInt(offset));
+ array->SetFlags(frame_count, Smi::FromInt(flags));
+ array->set(kFrameCountIndex, Smi::FromInt(frame_count + 1));
+ return array;
+}
+
+void FrameArray::ShrinkToFit() { Shrink(LengthFor(FrameCount())); }
+
+// static
+Handle<FrameArray> FrameArray::EnsureSpace(Handle<FrameArray> array,
+ int length) {
+ return Handle<FrameArray>::cast(EnsureSpaceInFixedArray(array, length));
+}
+
Handle<DescriptorArray> DescriptorArray::Allocate(Isolate* isolate,
int number_of_descriptors,
int slack,
@@ -10919,7 +11099,7 @@ String* ConsStringIterator::NextLeaf(bool* blew_stack) {
if ((type & kStringRepresentationMask) != kConsStringTag) {
AdjustMaximumDepth();
int length = string->length();
- DCHECK(length != 0);
+ if (length == 0) break; // Skip empty left-hand sides of ConsStrings.
consumed_ += length;
return string;
}
@@ -11461,6 +11641,118 @@ int String::IndexOf(Isolate* isolate, Handle<String> sub, Handle<String> pat,
return SearchString(isolate, seq_sub.ToUC16Vector(), pat_vector, start_index);
}
+namespace { // for String.Prototype.lastIndexOf
+
+template <typename schar, typename pchar>
+int StringMatchBackwards(Vector<const schar> subject,
+ Vector<const pchar> pattern, int idx) {
+ int pattern_length = pattern.length();
+ DCHECK(pattern_length >= 1);
+ DCHECK(idx + pattern_length <= subject.length());
+
+ if (sizeof(schar) == 1 && sizeof(pchar) > 1) {
+ for (int i = 0; i < pattern_length; i++) {
+ uc16 c = pattern[i];
+ if (c > String::kMaxOneByteCharCode) {
+ return -1;
+ }
+ }
+ }
+
+ pchar pattern_first_char = pattern[0];
+ for (int i = idx; i >= 0; i--) {
+ if (subject[i] != pattern_first_char) continue;
+ int j = 1;
+ while (j < pattern_length) {
+ if (pattern[j] != subject[i + j]) {
+ break;
+ }
+ j++;
+ }
+ if (j == pattern_length) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+} // namespace
+
+Object* String::LastIndexOf(Isolate* isolate, Handle<Object> receiver,
+ Handle<Object> search, Handle<Object> position) {
+ if (receiver->IsNull(isolate) || receiver->IsUndefined(isolate)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "String.prototype.lastIndexOf")));
+ }
+ Handle<String> receiver_string;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver_string,
+ Object::ToString(isolate, receiver));
+
+ Handle<String> search_string;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, search_string,
+ Object::ToString(isolate, search));
+
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
+ Object::ToNumber(position));
+
+ uint32_t start_index;
+
+ if (position->IsNaN()) {
+ start_index = receiver_string->length();
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
+ Object::ToInteger(isolate, position));
+
+ double position_number = std::max(position->Number(), 0.0);
+ position_number = std::min(position_number,
+ static_cast<double>(receiver_string->length()));
+ start_index = static_cast<uint32_t>(position_number);
+ }
+
+ uint32_t pattern_length = search_string->length();
+ uint32_t receiver_length = receiver_string->length();
+
+ if (start_index + pattern_length > receiver_length) {
+ start_index = receiver_length - pattern_length;
+ }
+
+ if (pattern_length == 0) {
+ return Smi::FromInt(start_index);
+ }
+
+ receiver_string = String::Flatten(receiver_string);
+ search_string = String::Flatten(search_string);
+
+ int last_index = -1;
+ DisallowHeapAllocation no_gc; // ensure vectors stay valid
+
+ String::FlatContent receiver_content = receiver_string->GetFlatContent();
+ String::FlatContent search_content = search_string->GetFlatContent();
+
+ if (search_content.IsOneByte()) {
+ Vector<const uint8_t> pat_vector = search_content.ToOneByteVector();
+ if (receiver_content.IsOneByte()) {
+ last_index = StringMatchBackwards(receiver_content.ToOneByteVector(),
+ pat_vector, start_index);
+ } else {
+ last_index = StringMatchBackwards(receiver_content.ToUC16Vector(),
+ pat_vector, start_index);
+ }
+ } else {
+ Vector<const uc16> pat_vector = search_content.ToUC16Vector();
+ if (receiver_content.IsOneByte()) {
+ last_index = StringMatchBackwards(receiver_content.ToOneByteVector(),
+ pat_vector, start_index);
+ } else {
+ last_index = StringMatchBackwards(receiver_content.ToUC16Vector(),
+ pat_vector, start_index);
+ }
+ }
+ return Smi::FromInt(last_index);
+}
+
bool String::IsUtf8EqualTo(Vector<const char> str, bool allow_prefix_match) {
int slen = length();
// Can't check exact length equality, but we can check bounds.
@@ -12361,8 +12653,6 @@ Handle<Cell> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
void Map::SetPrototype(Handle<Map> map, Handle<Object> prototype,
PrototypeOptimizationMode proto_mode) {
RuntimeCallTimerScope stats_scope(*map, &RuntimeCallStats::Map_SetPrototype);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- map->GetIsolate(), &tracing::TraceEventStatsTable::Map_SetPrototype);
bool is_hidden = false;
if (prototype->IsJSObject()) {
@@ -12562,7 +12852,6 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case JS_MAP_ITERATOR_TYPE:
case JS_MAP_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
- case JS_MODULE_TYPE:
case JS_OBJECT_TYPE:
case JS_ERROR_TYPE:
case JS_ARGUMENTS_TYPE:
@@ -12620,7 +12909,8 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
- DCHECK(function->IsConstructor() || function->shared()->is_resumable());
+ DCHECK(function->IsConstructor() ||
+ IsResumableFunction(function->shared()->kind()));
if (function->has_initial_map()) return;
Isolate* isolate = function->GetIsolate();
@@ -12631,7 +12921,7 @@ void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
// First create a new map with the size and number of in-object properties
// suggested by the function.
InstanceType instance_type;
- if (function->shared()->is_resumable()) {
+ if (IsResumableFunction(function->shared()->kind())) {
instance_type = JS_GENERATOR_OBJECT_TYPE;
} else {
instance_type = JS_OBJECT_TYPE;
@@ -12862,17 +13152,18 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
}
IncrementalStringBuilder builder(isolate);
- if (!shared_info->is_arrow()) {
- if (shared_info->is_concise_method()) {
- if (shared_info->is_generator()) {
+ FunctionKind kind = shared_info->kind();
+ if (!IsArrowFunction(kind)) {
+ if (IsConciseMethod(kind)) {
+ if (IsGeneratorFunction(kind)) {
builder.AppendCharacter('*');
- } else if (shared_info->is_async()) {
+ } else if (IsAsyncFunction(kind)) {
builder.AppendCString("async ");
}
} else {
- if (shared_info->is_generator()) {
+ if (IsGeneratorFunction(kind)) {
builder.AppendCString("function* ");
- } else if (shared_info->is_async()) {
+ } else if (IsAsyncFunction(kind)) {
builder.AppendCString("async function ");
} else {
builder.AppendCString("function ");
@@ -13455,9 +13746,9 @@ void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
void SharedFunctionInfo::InitFromFunctionLiteral(
Handle<SharedFunctionInfo> shared_info, FunctionLiteral* lit) {
- // When adding fields here, make sure Scope::AnalyzePartially is updated
- // accordingly.
- shared_info->set_length(lit->scope()->default_function_length());
+ // When adding fields here, make sure DeclarationScope::AnalyzePartially is
+ // updated accordingly.
+ shared_info->set_length(lit->scope()->arity());
shared_info->set_internal_formal_parameter_count(lit->parameter_count());
shared_info->set_function_token_position(lit->function_token_position());
shared_info->set_start_position(lit->start_position());
@@ -13481,6 +13772,9 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
}
shared_info->set_needs_home_object(lit->scope()->NeedsHomeObject());
shared_info->set_asm_function(lit->scope()->asm_function());
+ shared_info->set_requires_class_field_init(lit->requires_class_field_init());
+ shared_info->set_is_class_field_initializer(
+ lit->is_class_field_initializer());
SetExpectedNofPropertiesFromEstimate(shared_info, lit);
}
@@ -15433,10 +15727,11 @@ bool AllocationSite::IsNestedSite() {
return false;
}
-
-void AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
+template <AllocationSiteUpdateMode update_or_check>
+bool AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
ElementsKind to_kind) {
Isolate* isolate = site->GetIsolate();
+ bool result = false;
if (site->SitePointsToLiteral() && site->transition_info()->IsJSArray()) {
Handle<JSArray> transition_info =
@@ -15452,6 +15747,9 @@ void AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
uint32_t length = 0;
CHECK(transition_info->length()->ToArrayLength(&length));
if (length <= kMaximumArrayBytesToPretransition) {
+ if (update_or_check == AllocationSiteUpdateMode::kCheckOnly) {
+ return true;
+ }
if (FLAG_trace_track_allocation_sites) {
bool is_nested = site->IsNestedSite();
PrintF(
@@ -15464,6 +15762,7 @@ void AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
JSObject::TransitionElementsKind(transition_info, to_kind);
site->dependent_code()->DeoptimizeDependentCodeGroup(
isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
+ result = true;
}
}
} else {
@@ -15473,6 +15772,7 @@ void AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
to_kind = GetHoleyElementsKind(to_kind);
}
if (IsMoreGeneralElementsKindTransition(kind, to_kind)) {
+ if (update_or_check == AllocationSiteUpdateMode::kCheckOnly) return true;
if (FLAG_trace_track_allocation_sites) {
PrintF("AllocationSite: JSArray %p site updated %s->%s\n",
reinterpret_cast<void*>(*site),
@@ -15482,8 +15782,10 @@ void AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
site->SetElementsKind(to_kind);
site->dependent_code()->DeoptimizeDependentCodeGroup(
isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
+ result = true;
}
}
+ return result;
}
@@ -15499,13 +15801,13 @@ const char* AllocationSite::PretenureDecisionName(PretenureDecision decision) {
return NULL;
}
-
-void JSObject::UpdateAllocationSite(Handle<JSObject> object,
+template <AllocationSiteUpdateMode update_or_check>
+bool JSObject::UpdateAllocationSite(Handle<JSObject> object,
ElementsKind to_kind) {
- if (!object->IsJSArray()) return;
+ if (!object->IsJSArray()) return false;
Heap* heap = object->GetHeap();
- if (!heap->InNewSpace(*object)) return;
+ if (!heap->InNewSpace(*object)) return false;
Handle<AllocationSite> site;
{
@@ -15513,14 +15815,21 @@ void JSObject::UpdateAllocationSite(Handle<JSObject> object,
AllocationMemento* memento =
heap->FindAllocationMemento<Heap::kForRuntime>(*object);
- if (memento == NULL) return;
+ if (memento == NULL) return false;
// Walk through to the Allocation Site
site = handle(memento->GetAllocationSite());
}
- AllocationSite::DigestTransitionFeedback(site, to_kind);
+ return AllocationSite::DigestTransitionFeedback<update_or_check>(site,
+ to_kind);
}
+template bool
+JSObject::UpdateAllocationSite<AllocationSiteUpdateMode::kCheckOnly>(
+ Handle<JSObject> object, ElementsKind to_kind);
+
+template bool JSObject::UpdateAllocationSite<AllocationSiteUpdateMode::kUpdate>(
+ Handle<JSObject> object, ElementsKind to_kind);
void JSObject::TransitionElementsKind(Handle<JSObject> object,
ElementsKind to_kind) {
@@ -15729,7 +16038,7 @@ Maybe<bool> JSObject::HasRealNamedCallbackProperty(Handle<JSObject> object,
}
int FixedArrayBase::GetMaxLengthForNewSpaceAllocation(ElementsKind kind) {
- return ((Page::kMaxRegularHeapObjectSize - FixedArrayBase::kHeaderSize) >>
+ return ((kMaxRegularHeapObjectSize - FixedArrayBase::kHeaderSize) >>
ElementsKindToShiftSize(kind));
}
@@ -17984,7 +18293,8 @@ Handle<ObjectHashTable> ObjectHashTable::Put(Handle<ObjectHashTable> table,
if (capacity > ObjectHashTable::kMaxCapacity) {
for (size_t i = 0; i < 2; ++i) {
isolate->heap()->CollectAllGarbage(
- Heap::kFinalizeIncrementalMarkingMask, "full object hash table");
+ Heap::kFinalizeIncrementalMarkingMask,
+ GarbageCollectionReason::kFullHashtable);
}
table->Rehash(isolate->factory()->undefined_value());
}
@@ -19281,5 +19591,359 @@ bool JSReceiver::HasProxyInPrototype(Isolate* isolate) {
return false;
}
+namespace {
+
+template <typename T>
+struct HandleValueHash {
+ V8_INLINE size_t operator()(Handle<T> handle) const { return handle->Hash(); }
+};
+
+struct ModuleHandleEqual {
+ V8_INLINE bool operator()(Handle<Module> lhs, Handle<Module> rhs) const {
+ return *lhs == *rhs;
+ }
+};
+
+struct StringHandleEqual {
+ V8_INLINE bool operator()(Handle<String> lhs, Handle<String> rhs) const {
+ return lhs->Equals(*rhs);
+ }
+};
+
+class UnorderedStringSet
+ : public std::unordered_set<Handle<String>, HandleValueHash<String>,
+ StringHandleEqual,
+ zone_allocator<Handle<String>>> {
+ public:
+ explicit UnorderedStringSet(Zone* zone)
+ : std::unordered_set<Handle<String>, HandleValueHash<String>,
+ StringHandleEqual, zone_allocator<Handle<String>>>(
+ 2 /* bucket count */, HandleValueHash<String>(),
+ StringHandleEqual(), zone_allocator<Handle<String>>(zone)) {}
+};
+
+} // anonymous namespace
+
+class Module::ResolveSet
+ : public std::unordered_map<
+ Handle<Module>, UnorderedStringSet*, HandleValueHash<Module>,
+ ModuleHandleEqual, zone_allocator<std::pair<const Handle<Module>,
+ UnorderedStringSet*>>> {
+ public:
+ explicit ResolveSet(Zone* zone)
+ : std::unordered_map<Handle<Module>, UnorderedStringSet*,
+ HandleValueHash<Module>, ModuleHandleEqual,
+ zone_allocator<std::pair<const Handle<Module>,
+ UnorderedStringSet*>>>(
+ 2 /* bucket count */, HandleValueHash<Module>(),
+ ModuleHandleEqual(),
+ zone_allocator<
+ std::pair<const Handle<Module>, UnorderedStringSet*>>(zone)),
+ zone_(zone) {}
+
+ Zone* zone() const { return zone_; }
+
+ private:
+ Zone* zone_;
+};
+
+void Module::CreateIndirectExport(Handle<Module> module, Handle<String> name,
+ Handle<ModuleInfoEntry> entry) {
+ Isolate* isolate = module->GetIsolate();
+ Handle<ObjectHashTable> exports(module->exports(), isolate);
+ DCHECK(exports->Lookup(name)->IsTheHole(isolate));
+ exports = ObjectHashTable::Put(exports, name, entry);
+ module->set_exports(*exports);
+}
+
+void Module::CreateExport(Handle<Module> module, Handle<FixedArray> names) {
+ DCHECK_LT(0, names->length());
+ Isolate* isolate = module->GetIsolate();
+ Handle<Cell> cell =
+ isolate->factory()->NewCell(isolate->factory()->undefined_value());
+ Handle<ObjectHashTable> exports(module->exports(), isolate);
+ for (int i = 0, n = names->length(); i < n; ++i) {
+ Handle<String> name(String::cast(names->get(i)), isolate);
+ DCHECK(exports->Lookup(name)->IsTheHole(isolate));
+ exports = ObjectHashTable::Put(exports, name, cell);
+ }
+ module->set_exports(*exports);
+}
+
+void Module::StoreExport(Handle<Module> module, Handle<String> name,
+ Handle<Object> value) {
+ Handle<Cell> cell(Cell::cast(module->exports()->Lookup(name)));
+ cell->set_value(*value);
+}
+
+Handle<Object> Module::LoadExport(Handle<Module> module, Handle<String> name) {
+ Isolate* isolate = module->GetIsolate();
+ Handle<Object> object(module->exports()->Lookup(name), isolate);
+
+ // TODO(neis): Namespace imports are not yet implemented. Trying to use this
+ // feature may crash here.
+ if (!object->IsCell()) UNIMPLEMENTED();
+
+ return handle(Handle<Cell>::cast(object)->value(), isolate);
+}
+
+Handle<Object> Module::LoadImport(Handle<Module> module, Handle<String> name,
+ int module_request) {
+ Isolate* isolate = module->GetIsolate();
+ Handle<Module> requested_module(
+ Module::cast(module->requested_modules()->get(module_request)), isolate);
+ return Module::LoadExport(requested_module, name);
+}
+
+MaybeHandle<Cell> Module::ResolveImport(Handle<Module> module,
+ Handle<String> name, int module_request,
+ bool must_resolve,
+ Module::ResolveSet* resolve_set) {
+ Isolate* isolate = module->GetIsolate();
+ Handle<Module> requested_module(
+ Module::cast(module->requested_modules()->get(module_request)), isolate);
+ return Module::ResolveExport(requested_module, name, must_resolve,
+ resolve_set);
+}
+
+MaybeHandle<Cell> Module::ResolveExport(Handle<Module> module,
+ Handle<String> name, bool must_resolve,
+ Module::ResolveSet* resolve_set) {
+ Isolate* isolate = module->GetIsolate();
+ Handle<Object> object(module->exports()->Lookup(name), isolate);
+ if (object->IsCell()) {
+ // Already resolved (e.g. because it's a local export).
+ return Handle<Cell>::cast(object);
+ }
+
+ // Check for cycle before recursing.
+ {
+ // Attempt insertion with a null string set.
+ auto result = resolve_set->insert({module, nullptr});
+ UnorderedStringSet*& name_set = result.first->second;
+ if (result.second) {
+ // |module| wasn't in the map previously, so allocate a new name set.
+ Zone* zone = resolve_set->zone();
+ name_set =
+ new (zone->New(sizeof(UnorderedStringSet))) UnorderedStringSet(zone);
+ } else if (name_set->count(name)) {
+ // Cycle detected.
+ if (must_resolve) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewSyntaxError(MessageTemplate::kCyclicModuleDependency, name),
+ Cell);
+ }
+ return MaybeHandle<Cell>();
+ }
+ name_set->insert(name);
+ }
+
+ if (object->IsModuleInfoEntry()) {
+ // Not yet resolved indirect export.
+ Handle<ModuleInfoEntry> entry = Handle<ModuleInfoEntry>::cast(object);
+ int module_request = Smi::cast(entry->module_request())->value();
+ Handle<String> import_name(String::cast(entry->import_name()), isolate);
+
+ Handle<Cell> cell;
+ if (!ResolveImport(module, import_name, module_request, true, resolve_set)
+ .ToHandle(&cell)) {
+ DCHECK(isolate->has_pending_exception());
+ return MaybeHandle<Cell>();
+ }
+
+ // The export table may have changed but the entry in question should be
+ // unchanged.
+ Handle<ObjectHashTable> exports(module->exports(), isolate);
+ DCHECK(exports->Lookup(name)->IsModuleInfoEntry());
+
+ exports = ObjectHashTable::Put(exports, name, cell);
+ module->set_exports(*exports);
+ return cell;
+ }
+
+ DCHECK(object->IsTheHole(isolate));
+ return Module::ResolveExportUsingStarExports(module, name, must_resolve,
+ resolve_set);
+}
+
+MaybeHandle<Cell> Module::ResolveExportUsingStarExports(
+ Handle<Module> module, Handle<String> name, bool must_resolve,
+ Module::ResolveSet* resolve_set) {
+ Isolate* isolate = module->GetIsolate();
+ if (!name->Equals(isolate->heap()->default_string())) {
+ // Go through all star exports looking for the given name. If multiple star
+ // exports provide the name, make sure they all map it to the same cell.
+ Handle<Cell> unique_cell;
+ Handle<FixedArray> special_exports(module->info()->special_exports(),
+ isolate);
+ for (int i = 0, n = special_exports->length(); i < n; ++i) {
+ i::Handle<i::ModuleInfoEntry> entry(
+ i::ModuleInfoEntry::cast(special_exports->get(i)), isolate);
+ if (!entry->export_name()->IsUndefined(isolate)) {
+ continue; // Indirect export.
+ }
+ int module_request = Smi::cast(entry->module_request())->value();
+
+ Handle<Cell> cell;
+ if (ResolveImport(module, name, module_request, false, resolve_set)
+ .ToHandle(&cell)) {
+ if (unique_cell.is_null()) unique_cell = cell;
+ if (*unique_cell != *cell) {
+ THROW_NEW_ERROR(
+ isolate, NewSyntaxError(MessageTemplate::kAmbiguousExport, name),
+ Cell);
+ }
+ } else if (isolate->has_pending_exception()) {
+ return MaybeHandle<Cell>();
+ }
+ }
+
+ if (!unique_cell.is_null()) {
+ // Found a unique star export for this name.
+ Handle<ObjectHashTable> exports(module->exports(), isolate);
+ DCHECK(exports->Lookup(name)->IsTheHole(isolate));
+ exports = ObjectHashTable::Put(exports, name, unique_cell);
+ module->set_exports(*exports);
+ return unique_cell;
+ }
+ }
+
+ // Unresolvable.
+ if (must_resolve) {
+ THROW_NEW_ERROR(isolate,
+ NewSyntaxError(MessageTemplate::kUnresolvableExport, name),
+ Cell);
+ }
+ return MaybeHandle<Cell>();
+}
+
+bool Module::Instantiate(Handle<Module> module, v8::Local<v8::Context> context,
+ v8::Module::ResolveCallback callback,
+ v8::Local<v8::Value> callback_data) {
+ // Already instantiated.
+ if (module->code()->IsJSFunction()) return true;
+
+ Isolate* isolate = module->GetIsolate();
+ Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(module->code()),
+ isolate);
+ Handle<JSFunction> function =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ shared,
+ handle(Utils::OpenHandle(*context)->native_context(), isolate));
+ module->set_code(*function);
+
+ Handle<ModuleInfo> module_info(shared->scope_info()->ModuleDescriptorInfo(),
+ isolate);
+
+ // Set up local exports.
+ Handle<FixedArray> regular_exports(module_info->regular_exports(), isolate);
+ for (int i = 0, n = regular_exports->length(); i < n; i += 2) {
+ Handle<FixedArray> export_names(
+ FixedArray::cast(regular_exports->get(i + 1)), isolate);
+ CreateExport(module, export_names);
+ }
+
+ // Partially set up indirect exports.
+ // For each indirect export, we create the appropriate slot in the export
+ // table and store its ModuleInfoEntry there. When we later find the correct
+ // Cell in the module that actually provides the value, we replace the
+ // ModuleInfoEntry by that Cell (see ResolveExport).
+ Handle<FixedArray> special_exports(module_info->special_exports(), isolate);
+ for (int i = 0, n = special_exports->length(); i < n; ++i) {
+ Handle<ModuleInfoEntry> entry(
+ ModuleInfoEntry::cast(special_exports->get(i)), isolate);
+ Handle<Object> export_name(entry->export_name(), isolate);
+ if (export_name->IsUndefined(isolate)) continue; // Star export.
+ CreateIndirectExport(module, Handle<String>::cast(export_name), entry);
+ }
+
+ Handle<FixedArray> module_requests(module_info->module_requests(), isolate);
+ for (int i = 0, length = module_requests->length(); i < length; ++i) {
+ Handle<String> specifier(String::cast(module_requests->get(i)), isolate);
+ v8::Local<v8::Module> api_requested_module;
+ // TODO(adamk): Revisit these failure cases once d8 knows how to
+ // persist a module_map across multiple top-level module loads, as
+ // the current module is left in a "half-instantiated" state.
+ if (!callback(context, v8::Utils::ToLocal(specifier),
+ v8::Utils::ToLocal(module), callback_data)
+ .ToLocal(&api_requested_module)) {
+ // TODO(adamk): Give this a better error message. But this is a
+ // misuse of the API anyway.
+ isolate->ThrowIllegalOperation();
+ return false;
+ }
+ Handle<Module> requested_module = Utils::OpenHandle(*api_requested_module);
+ module->requested_modules()->set(i, *requested_module);
+ if (!Instantiate(requested_module, context, callback, callback_data)) {
+ return false;
+ }
+ }
+
+ Zone zone(isolate->allocator());
+
+ // Resolve imports.
+ Handle<FixedArray> regular_imports(module_info->regular_imports(), isolate);
+ for (int i = 0, n = regular_imports->length(); i < n; ++i) {
+ Handle<ModuleInfoEntry> entry(
+ ModuleInfoEntry::cast(regular_imports->get(i)), isolate);
+ Handle<String> name(String::cast(entry->import_name()), isolate);
+ int module_request = Smi::cast(entry->module_request())->value();
+ ResolveSet resolve_set(&zone);
+ if (ResolveImport(module, name, module_request, true, &resolve_set)
+ .is_null()) {
+ return false;
+ }
+ }
+
+ // Resolve indirect exports.
+ for (int i = 0, n = special_exports->length(); i < n; ++i) {
+ Handle<ModuleInfoEntry> entry(
+ ModuleInfoEntry::cast(special_exports->get(i)), isolate);
+ Handle<Object> name(entry->export_name(), isolate);
+ if (name->IsUndefined(isolate)) continue; // Star export.
+ ResolveSet resolve_set(&zone);
+ if (ResolveExport(module, Handle<String>::cast(name), true, &resolve_set)
+ .is_null()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+MaybeHandle<Object> Module::Evaluate(Handle<Module> module) {
+ DCHECK(module->code()->IsJSFunction()); // Instantiated.
+
+ Isolate* isolate = module->GetIsolate();
+
+ // Each module can only be evaluated once.
+ if (module->evaluated()) return isolate->factory()->undefined_value();
+ module->set_evaluated(true);
+
+ // Initialization.
+ Handle<JSFunction> function(JSFunction::cast(module->code()), isolate);
+ DCHECK_EQ(MODULE_SCOPE, function->shared()->scope_info()->scope_type());
+ Handle<Object> receiver = isolate->factory()->undefined_value();
+ Handle<Object> argv[] = {module};
+ Handle<Object> generator;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, generator,
+ Execution::Call(isolate, function, receiver, arraysize(argv), argv),
+ Object);
+
+ // Recursion.
+ Handle<FixedArray> requested_modules(module->requested_modules(), isolate);
+ for (int i = 0, length = requested_modules->length(); i < length; ++i) {
+ Handle<Module> import(Module::cast(requested_modules->get(i)), isolate);
+ RETURN_ON_EXCEPTION(isolate, Evaluate(import), Object);
+ }
+
+ // Evaluation of module body.
+ Handle<JSFunction> resume(
+ isolate->native_context()->generator_next_internal(), isolate);
+ return Execution::Call(isolate, resume, generator, 0, nullptr);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index b7c67030c5..fcc1f9457b 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -22,7 +22,7 @@
#include "src/property-details.h"
#include "src/unicode-decoder.h"
#include "src/unicode.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
#if V8_TARGET_ARCH_ARM
#include "src/arm/constants-arm.h" // NOLINT
@@ -57,6 +57,7 @@
// - JSCollection
// - JSSet
// - JSMap
+// - JSStringIterator
// - JSSetIterator
// - JSMapIterator
// - JSWeakCollection
@@ -76,6 +77,7 @@
// - BytecodeArray
// - FixedArray
// - DescriptorArray
+// - FrameArray
// - LiteralsArray
// - HashTable
// - Dictionary
@@ -93,6 +95,8 @@
// - TemplateList
// - TransitionArray
// - ScopeInfo
+// - ModuleInfoEntry
+// - ModuleInfo
// - ScriptContextTable
// - WeakFixedArray
// - FixedDoubleArray
@@ -150,6 +154,7 @@
// - BreakPointInfo
// - CodeCache
// - PrototypeInfo
+// - Module
// - WeakCell
//
// Formats of Object*:
@@ -392,8 +397,10 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(TYPE_FEEDBACK_INFO_TYPE) \
V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
V(BOX_TYPE) \
+ V(PROMISE_CONTAINER_TYPE) \
V(PROTOTYPE_INFO_TYPE) \
- V(SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION_TYPE) \
+ V(CONTEXT_EXTENSION_TYPE) \
+ V(MODULE_TYPE) \
\
V(FIXED_ARRAY_TYPE) \
V(FIXED_DOUBLE_ARRAY_TYPE) \
@@ -409,7 +416,6 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_ARGUMENTS_TYPE) \
V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
V(JS_GENERATOR_OBJECT_TYPE) \
- V(JS_MODULE_TYPE) \
V(JS_GLOBAL_OBJECT_TYPE) \
V(JS_GLOBAL_PROXY_TYPE) \
V(JS_API_OBJECT_TYPE) \
@@ -428,6 +434,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_PROMISE_TYPE) \
V(JS_REGEXP_TYPE) \
V(JS_ERROR_TYPE) \
+ V(JS_STRING_ITERATOR_TYPE) \
\
V(JS_BOUND_FUNCTION_TYPE) \
V(JS_FUNCTION_TYPE) \
@@ -496,6 +503,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
// manually.
#define STRUCT_LIST(V) \
V(BOX, Box, box) \
+ V(PROMISE_CONTAINER, PromiseContainer, promise_container) \
V(ACCESSOR_INFO, AccessorInfo, accessor_info) \
V(ACCESSOR_PAIR, AccessorPair, accessor_pair) \
V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info) \
@@ -511,9 +519,8 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(DEBUG_INFO, DebugInfo, debug_info) \
V(BREAK_POINT_INFO, BreakPointInfo, break_point_info) \
V(PROTOTYPE_INFO, PrototypeInfo, prototype_info) \
- V(SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION, \
- SloppyBlockWithEvalContextExtension, \
- sloppy_block_with_eval_context_extension)
+ V(MODULE, Module, module) \
+ V(CONTEXT_EXTENSION, ContextExtension, context_extension)
// We use the full 8 bits of the instance_type field to encode heap object
// instance types. The high-order bit (bit 7) is set if the object is not a
@@ -678,6 +685,7 @@ enum InstanceType {
TYPE_FEEDBACK_INFO_TYPE,
ALIASED_ARGUMENTS_ENTRY_TYPE,
BOX_TYPE,
+ PROMISE_CONTAINER_TYPE,
DEBUG_INFO_TYPE,
BREAK_POINT_INFO_TYPE,
FIXED_ARRAY_TYPE,
@@ -687,7 +695,8 @@ enum InstanceType {
TRANSITION_ARRAY_TYPE,
PROPERTY_CELL_TYPE,
PROTOTYPE_INFO_TYPE,
- SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION_TYPE,
+ CONTEXT_EXTENSION_TYPE,
+ MODULE_TYPE,
// All the following types are subtypes of JSReceiver, which corresponds to
// objects in the JS sense. The first and the last type in this range are
@@ -708,7 +717,6 @@ enum InstanceType {
JS_ARGUMENTS_TYPE,
JS_CONTEXT_EXTENSION_OBJECT_TYPE,
JS_GENERATOR_OBJECT_TYPE,
- JS_MODULE_TYPE,
JS_ARRAY_TYPE,
JS_ARRAY_BUFFER_TYPE,
JS_TYPED_ARRAY_TYPE,
@@ -722,6 +730,7 @@ enum InstanceType {
JS_PROMISE_TYPE,
JS_REGEXP_TYPE,
JS_ERROR_TYPE,
+ JS_STRING_ITERATOR_TYPE,
JS_BOUND_FUNCTION_TYPE,
JS_FUNCTION_TYPE, // LAST_JS_OBJECT_TYPE, LAST_JS_RECEIVER_TYPE
@@ -789,7 +798,6 @@ std::ostream& operator<<(std::ostream& os, InstanceType instance_type);
V(FAST_PROPERTIES_SUB_TYPE) \
V(FAST_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE) \
V(HANDLER_TABLE_SUB_TYPE) \
- V(INTRINSIC_FUNCTION_NAMES_SUB_TYPE) \
V(JS_COLLECTION_SUB_TYPE) \
V(JS_WEAK_COLLECTION_SUB_TYPE) \
V(LITERALS_ARRAY_SUB_TYPE) \
@@ -862,7 +870,7 @@ enum class ComparisonResult {
INLINE(static type* cast(Object* object)); \
INLINE(static const type* cast(const Object* object));
-
+class AbstractCode;
class AccessorPair;
class AllocationSite;
class AllocationSiteCreationContext;
@@ -878,6 +886,9 @@ class LayoutDescriptor;
class LiteralsArray;
class LookupIterator;
class FieldType;
+class ModuleDescriptor;
+class ModuleInfoEntry;
+class ModuleInfo;
class ObjectHashTable;
class ObjectVisitor;
class PropertyCell;
@@ -961,6 +972,7 @@ template <class C> inline bool Is(Object* obj);
V(JSGeneratorObject) \
V(Map) \
V(DescriptorArray) \
+ V(FrameArray) \
V(TransitionArray) \
V(LiteralsArray) \
V(TypeFeedbackMetadata) \
@@ -977,6 +989,8 @@ template <class C> inline bool Is(Object* obj);
V(ScriptContextTable) \
V(NativeContext) \
V(ScopeInfo) \
+ V(ModuleInfoEntry) \
+ V(ModuleInfo) \
V(JSBoundFunction) \
V(JSFunction) \
V(Code) \
@@ -998,6 +1012,7 @@ template <class C> inline bool Is(Object* obj);
V(JSProxy) \
V(JSError) \
V(JSPromise) \
+ V(JSStringIterator) \
V(JSSet) \
V(JSMap) \
V(JSSetIterator) \
@@ -1287,7 +1302,8 @@ class Object {
MUST_USE_RESULT static MaybeHandle<Object> InstanceOf(
Isolate* isolate, Handle<Object> object, Handle<Object> callable);
- MUST_USE_RESULT static MaybeHandle<Object> GetProperty(LookupIterator* it);
+ V8_EXPORT_PRIVATE MUST_USE_RESULT static MaybeHandle<Object> GetProperty(
+ LookupIterator* it);
// ES6 [[Set]] (when passed DONT_THROW)
// Invariants for this and related functions (unless stated otherwise):
@@ -1313,10 +1329,6 @@ class Object {
LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
StoreFromKeyed store_mode);
- MUST_USE_RESULT static MaybeHandle<Object> ReadAbsentProperty(
- LookupIterator* it);
- MUST_USE_RESULT static MaybeHandle<Object> ReadAbsentProperty(
- Isolate* isolate, Handle<Object> receiver, Handle<Object> name);
MUST_USE_RESULT static Maybe<bool> CannotCreateProperty(
Isolate* isolate, Handle<Object> receiver, Handle<Object> name,
Handle<Object> value, ShouldThrow should_throw);
@@ -1848,6 +1860,8 @@ enum class KeyCollectionMode {
static_cast<int>(v8::KeyCollectionMode::kIncludePrototypes)
};
+enum class AllocationSiteUpdateMode { kUpdate, kCheckOnly };
+
// JSReceiver includes types on which properties can be defined, i.e.,
// JSObject and JSProxy.
class JSReceiver: public HeapObject {
@@ -1952,7 +1966,7 @@ class JSReceiver: public HeapObject {
PropertyDescriptor* desc, PropertyDescriptor* current,
ShouldThrow should_throw, Handle<Name> property_name = Handle<Name>());
- MUST_USE_RESULT static Maybe<bool> GetOwnPropertyDescriptor(
+ V8_EXPORT_PRIVATE MUST_USE_RESULT static Maybe<bool> GetOwnPropertyDescriptor(
Isolate* isolate, Handle<JSReceiver> object, Handle<Object> key,
PropertyDescriptor* desc);
MUST_USE_RESULT static Maybe<bool> GetOwnPropertyDescriptor(
@@ -2060,7 +2074,7 @@ class JSObject: public JSReceiver {
// [elements]: The elements (properties with names that are integers).
//
// Elements can be in two general modes: fast and slow. Each mode
- // corrensponds to a set of object representations of elements that
+ // corresponds to a set of object representations of elements that
// have something in common.
//
// In the fast mode elements is a FixedArray and so each element can
@@ -2298,7 +2312,9 @@ class JSObject: public JSReceiver {
}
// These methods do not perform access checks!
- static void UpdateAllocationSite(Handle<JSObject> object,
+ template <AllocationSiteUpdateMode update_or_check =
+ AllocationSiteUpdateMode::kUpdate>
+ static bool UpdateAllocationSite(Handle<JSObject> object,
ElementsKind to_kind);
// Lookup interceptors are used for handling properties controlled by host
@@ -2604,6 +2620,10 @@ class JSDataPropertyDescriptor: public JSObject {
// as specified by ES6 section 25.1.1.3 The IteratorResult Interface
class JSIteratorResult: public JSObject {
public:
+ DECL_ACCESSORS(value, Object)
+
+ DECL_ACCESSORS(done, Object)
+
// Offsets of object fields.
static const int kValueOffset = JSObject::kHeaderSize;
static const int kDoneOffset = kValueOffset + kPointerSize;
@@ -2895,7 +2915,6 @@ class WeakFixedArray : public FixedArray {
DISALLOW_IMPLICIT_CONSTRUCTORS(WeakFixedArray);
};
-
// Generic array grows dynamically with O(1) amortized insertion.
class ArrayList : public FixedArray {
public:
@@ -2925,6 +2944,82 @@ class ArrayList : public FixedArray {
DISALLOW_IMPLICIT_CONSTRUCTORS(ArrayList);
};
+#define FRAME_ARRAY_FIELD_LIST(V) \
+ V(WasmObject, Object) \
+ V(WasmFunctionIndex, Smi) \
+ V(Receiver, Object) \
+ V(Function, JSFunction) \
+ V(Code, AbstractCode) \
+ V(Offset, Smi) \
+ V(Flags, Smi)
+
+// Container object for data collected during simple stack trace captures.
+class FrameArray : public FixedArray {
+ public:
+#define DECLARE_FRAME_ARRAY_ACCESSORS(name, type) \
+ inline type* name(int frame_ix) const; \
+ inline void Set##name(int frame_ix, type* value);
+ FRAME_ARRAY_FIELD_LIST(DECLARE_FRAME_ARRAY_ACCESSORS)
+#undef DECLARE_FRAME_ARRAY_ACCESSORS
+
+ inline bool IsWasmFrame(int frame_ix) const;
+ inline int FrameCount() const;
+
+ void ShrinkToFit();
+
+ // Flags.
+ static const int kIsWasmFrame = 1 << 0;
+ static const int kIsStrict = 1 << 1;
+ static const int kForceConstructor = 1 << 2;
+
+ static Handle<FrameArray> AppendJSFrame(Handle<FrameArray> in,
+ Handle<Object> receiver,
+ Handle<JSFunction> function,
+ Handle<AbstractCode> code, int offset,
+ int flags);
+ static Handle<FrameArray> AppendWasmFrame(Handle<FrameArray> in,
+ Handle<Object> wasm_object,
+ int wasm_function_index,
+ Handle<AbstractCode> code,
+ int offset, int flags);
+
+ DECLARE_CAST(FrameArray)
+
+ private:
+ // The underlying fixed array embodies a captured stack trace. Frame i
+ // occupies indices
+ //
+ // kFirstIndex + 1 + [i * kElementsPerFrame, (i + 1) * kElementsPerFrame[,
+ //
+ // with internal offsets as below:
+
+ static const int kWasmObjectOffset = 0;
+ static const int kWasmFunctionIndexOffset = 1;
+
+ static const int kReceiverOffset = 0;
+ static const int kFunctionOffset = 1;
+
+ static const int kCodeOffset = 2;
+ static const int kOffsetOffset = 3;
+
+ static const int kFlagsOffset = 4;
+
+ static const int kElementsPerFrame = 5;
+
+ // Array layout indices.
+
+ static const int kFrameCountIndex = 0;
+ static const int kFirstIndex = 1;
+
+ static int LengthFor(int frame_count) {
+ return kFirstIndex + frame_count * kElementsPerFrame;
+ }
+
+ static Handle<FrameArray> EnsureSpace(Handle<FrameArray> array, int length);
+
+ friend class Factory;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FrameArray);
+};
// DescriptorArrays are fixed arrays used to hold instance descriptors.
// The format of the these objects is:
@@ -3410,7 +3505,8 @@ class StringTable: public HashTable<StringTable,
public:
// Find string in the string table. If it is not there yet, it is
// added. The return value is the string found.
- static Handle<String> LookupString(Isolate* isolate, Handle<String> key);
+ V8_EXPORT_PRIVATE static Handle<String> LookupString(Isolate* isolate,
+ Handle<String> key);
static Handle<String> LookupKey(Isolate* isolate, HashTableKey* key);
static String* LookupKeyIfExists(Isolate* isolate, HashTableKey* key);
@@ -4236,6 +4332,8 @@ class ScopeInfo : public FixedArray {
// Return the function_name if present.
String* FunctionName();
+ ModuleInfo* ModuleDescriptorInfo();
+
// Return the name of the given parameter.
String* ParameterName(int var);
@@ -4279,15 +4377,11 @@ class ScopeInfo : public FixedArray {
VariableMode* mode, InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag);
- // Similar to ContextSlotIndex() but this method searches only among
- // global slots of the serialized scope info. Returns the context slot index
- // for a given slot name if the slot is present; otherwise returns a
- // value < 0. The name must be an internalized string. If the slot is present
- // and mode != NULL, sets *mode to the corresponding mode for that variable.
- static int ContextGlobalSlotIndex(Handle<ScopeInfo> scope_info,
- Handle<String> name, VariableMode* mode,
- InitializationFlag* init_flag,
- MaybeAssignedFlag* maybe_assigned_flag);
+ // Lookup metadata of a MODULE-allocated variable. Return a negative value if
+ // there is no module variable with the given name.
+ int ModuleIndex(Handle<String> name, VariableMode* mode,
+ InitializationFlag* init_flag,
+ MaybeAssignedFlag* maybe_assigned_flag);
// Lookup the name of a certain context slot by its index.
String* ContextSlotName(int slot_index);
@@ -4301,7 +4395,7 @@ class ScopeInfo : public FixedArray {
// slot index if the function name is present and context-allocated (named
// function expressions, only), otherwise returns a value < 0. The name
// must be an internalized string.
- int FunctionContextSlotIndex(String* name, VariableMode* mode);
+ int FunctionContextSlotIndex(String* name);
// Lookup support for serialized scope info. Returns the receiver context
// slot index if scope has a "this" binding, and the binding is
@@ -4310,7 +4404,27 @@ class ScopeInfo : public FixedArray {
FunctionKind function_kind();
- static Handle<ScopeInfo> Create(Isolate* isolate, Zone* zone, Scope* scope);
+ // Returns true if this ScopeInfo is linked to a outer ScopeInfo.
+ bool HasOuterScopeInfo();
+
+ // Returns true if this ScopeInfo was created for a debug-evaluate scope.
+ bool IsDebugEvaluateScope();
+
+ // Can be used to mark a ScopeInfo that looks like a with-scope as actually
+ // being a debug-evaluate scope.
+ void SetIsDebugEvaluateScope();
+
+ // Return the outer ScopeInfo if present.
+ ScopeInfo* OuterScopeInfo();
+
+#ifdef DEBUG
+ bool Equals(ScopeInfo* other) const;
+#endif
+
+ static Handle<ScopeInfo> Create(Isolate* isolate, Zone* zone, Scope* scope,
+ MaybeHandle<ScopeInfo> outer_scope);
+ static Handle<ScopeInfo> CreateForWithScope(
+ Isolate* isolate, MaybeHandle<ScopeInfo> outer_scope);
static Handle<ScopeInfo> CreateGlobalThisBinding(Isolate* isolate);
// Serializes empty scope info.
@@ -4322,18 +4436,16 @@ class ScopeInfo : public FixedArray {
// The layout of the static part of a ScopeInfo is as follows. Each entry is
// numeric and occupies one array slot.
- // 1. A set of properties of the scope
- // 2. The number of parameters. This only applies to function scopes. For
- // non-function scopes this is 0.
- // 3. The number of non-parameter variables allocated on the stack.
- // 4. The number of non-parameter and parameter variables allocated in the
- // context.
+// 1. A set of properties of the scope.
+// 2. The number of parameters. For non-function scopes this is 0.
+// 3. The number of non-parameter variables allocated on the stack.
+// 4. The number of non-parameter and parameter variables allocated in the
+// context.
#define FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(V) \
V(Flags) \
V(ParameterCount) \
V(StackLocalCount) \
- V(ContextLocalCount) \
- V(ContextGlobalCount)
+ V(ContextLocalCount)
#define FIELD_ACCESSORS(name) \
inline void Set##name(int value); \
@@ -4350,7 +4462,7 @@ class ScopeInfo : public FixedArray {
private:
// The layout of the variable part of a ScopeInfo is as follows:
- // 1. ParameterEntries:
+ // 1. ParameterNames:
// This part stores the names of the parameters for function scopes. One
// slot is used per parameter, so in total this part occupies
// ParameterCount() slots in the array. For other scopes than function
@@ -4358,40 +4470,48 @@ class ScopeInfo : public FixedArray {
// 2. StackLocalFirstSlot:
// Index of a first stack slot for stack local. Stack locals belonging to
// this scope are located on a stack at slots starting from this index.
- // 3. StackLocalEntries:
+ // 3. StackLocalNames:
// Contains the names of local variables that are allocated on the stack,
- // in increasing order of the stack slot index. First local variable has
- // a stack slot index defined in StackLocalFirstSlot (point 2 above).
+ // in increasing order of the stack slot index. First local variable has a
+ // stack slot index defined in StackLocalFirstSlot (point 2 above).
// One slot is used per stack local, so in total this part occupies
// StackLocalCount() slots in the array.
- // 4. ContextLocalNameEntries:
+ // 4. ContextLocalNames:
// Contains the names of local variables and parameters that are allocated
// in the context. They are stored in increasing order of the context slot
// index starting with Context::MIN_CONTEXT_SLOTS. One slot is used per
// context local, so in total this part occupies ContextLocalCount() slots
// in the array.
- // 5. ContextLocalInfoEntries:
+ // 5. ContextLocalInfos:
// Contains the variable modes and initialization flags corresponding to
- // the context locals in ContextLocalNameEntries. One slot is used per
+ // the context locals in ContextLocalNames. One slot is used per
// context local, so in total this part occupies ContextLocalCount()
// slots in the array.
- // 6. RecieverEntryIndex:
+ // 6. ReceiverInfo:
// If the scope binds a "this" value, one slot is reserved to hold the
// context or stack slot index for the variable.
- // 7. FunctionNameEntryIndex:
+ // 7. FunctionNameInfo:
// If the scope belongs to a named function expression this part contains
// information about the function variable. It always occupies two array
// slots: a. The name of the function variable.
// b. The context or stack slot index for the variable.
- int ParameterEntriesIndex();
+ // 8. OuterScopeInfoIndex:
+ // The outer scope's ScopeInfo or the hole if there's none.
+ // 9. ModuleInfo, ModuleVariableCount, and ModuleVariables:
+ // For a module scope, this part contains the ModuleInfo, the number of
+ // MODULE-allocated variables, and the metadata of those variables. For
+ // non-module scopes it is empty.
+ int ParameterNamesIndex();
int StackLocalFirstSlotIndex();
- int StackLocalEntriesIndex();
- int ContextLocalNameEntriesIndex();
- int ContextGlobalNameEntriesIndex();
- int ContextLocalInfoEntriesIndex();
- int ContextGlobalInfoEntriesIndex();
- int ReceiverEntryIndex();
- int FunctionNameEntryIndex();
+ int StackLocalNamesIndex();
+ int ContextLocalNamesIndex();
+ int ContextLocalInfosIndex();
+ int ReceiverInfoIndex();
+ int FunctionNameInfoIndex();
+ int OuterScopeInfoIndex();
+ int ModuleInfoIndex();
+ int ModuleVariableCountIndex();
+ int ModuleVariablesIndex();
int Lookup(Handle<String> name, int start, int end, VariableMode* mode,
VariableLocation* location, InitializationFlag* init_flag,
@@ -4416,26 +4536,77 @@ class ScopeInfo : public FixedArray {
: public BitField<bool, ReceiverVariableField::kNext, 1> {};
class FunctionVariableField
: public BitField<VariableAllocationInfo, HasNewTargetField::kNext, 2> {};
- class FunctionVariableMode
- : public BitField<VariableMode, FunctionVariableField::kNext, 3> {};
- class AsmModuleField : public BitField<bool, FunctionVariableMode::kNext, 1> {
- };
+ class AsmModuleField
+ : public BitField<bool, FunctionVariableField::kNext, 1> {};
class AsmFunctionField : public BitField<bool, AsmModuleField::kNext, 1> {};
class HasSimpleParametersField
: public BitField<bool, AsmFunctionField::kNext, 1> {};
class FunctionKindField
- : public BitField<FunctionKind, HasSimpleParametersField::kNext, 9> {};
+ : public BitField<FunctionKind, HasSimpleParametersField::kNext, 10> {};
+ class HasOuterScopeInfoField
+ : public BitField<bool, FunctionKindField::kNext, 1> {};
+ class IsDebugEvaluateScopeField
+ : public BitField<bool, HasOuterScopeInfoField::kNext, 1> {};
- // BitFields representing the encoded information for context locals in the
- // ContextLocalInfoEntries part.
- class ContextLocalMode: public BitField<VariableMode, 0, 3> {};
- class ContextLocalInitFlag: public BitField<InitializationFlag, 3, 1> {};
- class ContextLocalMaybeAssignedFlag
- : public BitField<MaybeAssignedFlag, 4, 1> {};
+ // Properties of variables.
+ class VariableModeField : public BitField<VariableMode, 0, 3> {};
+ class InitFlagField : public BitField<InitializationFlag, 3, 1> {};
+ class MaybeAssignedFlagField : public BitField<MaybeAssignedFlag, 4, 1> {};
friend class ScopeIterator;
};
+class ModuleInfoEntry : public FixedArray {
+ public:
+ DECLARE_CAST(ModuleInfoEntry)
+ static Handle<ModuleInfoEntry> New(Isolate* isolate,
+ Handle<Object> export_name,
+ Handle<Object> local_name,
+ Handle<Object> import_name,
+ Handle<Object> module_request);
+ inline Object* export_name() const;
+ inline Object* local_name() const;
+ inline Object* import_name() const;
+ inline Object* module_request() const;
+
+ private:
+ friend class Factory;
+ enum {
+ kExportNameIndex,
+ kLocalNameIndex,
+ kImportNameIndex,
+ kModuleRequestIndex,
+ kLength
+ };
+};
+
+// ModuleInfo is to ModuleDescriptor what ScopeInfo is to Scope.
+class ModuleInfo : public FixedArray {
+ public:
+ DECLARE_CAST(ModuleInfo)
+ static Handle<ModuleInfo> New(Isolate* isolate, Zone* zone,
+ ModuleDescriptor* descr);
+ inline FixedArray* module_requests() const;
+ inline FixedArray* special_exports() const;
+ inline FixedArray* regular_exports() const;
+ inline FixedArray* namespace_imports() const;
+ inline FixedArray* regular_imports() const;
+
+#ifdef DEBUG
+ inline bool Equals(ModuleInfo* other) const;
+#endif
+
+ private:
+ friend class Factory;
+ enum {
+ kModuleRequestsIndex,
+ kSpecialExportsIndex,
+ kRegularExportsIndex,
+ kNamespaceImportsIndex,
+ kRegularImportsIndex,
+ kLength
+ };
+};
// The cache for maps used by normalized (dictionary mode) objects.
// Such maps do not have property descriptors, so a typical program
@@ -4487,6 +4658,9 @@ class HandlerTable : public FixedArray {
// catching are part of a desugaring and should therefore not
// be visible to the user (we won't notify the debugger of such
// exceptions).
+ ASYNC_AWAIT, // The exception will be caught and cause a promise rejection
+ // in the desugaring of an async function, so special
+ // async/await handling in the debugger can take place.
};
// Getters for handler table based on ranges.
@@ -4539,8 +4713,8 @@ class HandlerTable : public FixedArray {
static const int kReturnEntrySize = 2;
// Encoding of the {handler} field.
- class HandlerPredictionField : public BitField<CatchPrediction, 0, 2> {};
- class HandlerOffsetField : public BitField<int, 2, 30> {};
+ class HandlerPredictionField : public BitField<CatchPrediction, 0, 3> {};
+ class HandlerOffsetField : public BitField<int, 3, 29> {};
};
// ByteArray represents fixed sized byte arrays. Used for the relocation info
@@ -4684,6 +4858,13 @@ class BytecodeArray : public FixedArrayBase {
// Maximal length of a single BytecodeArray.
static const int kMaxLength = kMaxSize - kHeaderSize;
+ static const int kPointerFieldsBeginOffset = kConstantPoolOffset;
+ static const int kPointerFieldsEndOffset = kFrameSizeOffset;
+
+ typedef FixedBodyDescriptor<kPointerFieldsBeginOffset,
+ kPointerFieldsEndOffset, kHeaderSize>
+ MarkingBodyDescriptor;
+
class BodyDescriptor;
private:
@@ -4721,6 +4902,7 @@ class FreeSpace: public HeapObject {
// Size is smi tagged when it is stored.
static const int kSizeOffset = HeapObject::kHeaderSize;
static const int kNextOffset = POINTER_SIZE_ALIGN(kSizeOffset + kPointerSize);
+ static const int kSize = kNextOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeSpace);
@@ -6226,6 +6408,7 @@ class Map: public HeapObject {
inline bool IsJSFunctionMap();
inline bool IsStringMap();
inline bool IsJSProxyMap();
+ inline bool IsModuleMap();
inline bool IsJSGlobalProxyMap();
inline bool IsJSGlobalObjectMap();
inline bool IsJSTypedArrayMap();
@@ -6482,6 +6665,34 @@ class Struct: public HeapObject {
DECLARE_CAST(Struct)
};
+// A container struct to hold state required for
+// PromiseResolveThenableJob. {before, after}_debug_event could
+// potentially be undefined if the debugger is turned off.
+class PromiseContainer : public Struct {
+ public:
+ DECL_ACCESSORS(thenable, JSReceiver)
+ DECL_ACCESSORS(then, JSReceiver)
+ DECL_ACCESSORS(resolve, JSFunction)
+ DECL_ACCESSORS(reject, JSFunction)
+ DECL_ACCESSORS(before_debug_event, Object)
+ DECL_ACCESSORS(after_debug_event, Object)
+
+ static const int kThenableOffset = Struct::kHeaderSize;
+ static const int kThenOffset = kThenableOffset + kPointerSize;
+ static const int kResolveOffset = kThenOffset + kPointerSize;
+ static const int kRejectOffset = kResolveOffset + kPointerSize;
+ static const int kBeforeDebugEventOffset = kRejectOffset + kPointerSize;
+ static const int kAfterDebugEventOffset =
+ kBeforeDebugEventOffset + kPointerSize;
+ static const int kSize = kAfterDebugEventOffset + kPointerSize;
+
+ DECLARE_CAST(PromiseContainer)
+ DECLARE_PRINTER(PromiseContainer)
+ DECLARE_VERIFIER(PromiseContainer)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseContainer);
+};
// A simple one-element struct, useful where smis need to be boxed.
class Box : public Struct {
@@ -6559,28 +6770,29 @@ class PrototypeInfo : public Struct {
// Pair used to store both a ScopeInfo and an extension object in the extension
-// slot of a block context. Needed in the rare case where a declaration block
-// scope (a "varblock" as used to desugar parameter destructuring) also contains
-// a sloppy direct eval. (In no other case both are needed at the same time.)
-class SloppyBlockWithEvalContextExtension : public Struct {
+// slot of a block, catch, or with context. Needed in the rare case where a
+// declaration block scope (a "varblock" as used to desugar parameter
+// destructuring) also contains a sloppy direct eval, or for with and catch
+// scopes. (In no other case both are needed at the same time.)
+class ContextExtension : public Struct {
public:
// [scope_info]: Scope info.
DECL_ACCESSORS(scope_info, ScopeInfo)
// [extension]: Extension object.
- DECL_ACCESSORS(extension, JSObject)
+ DECL_ACCESSORS(extension, Object)
- DECLARE_CAST(SloppyBlockWithEvalContextExtension)
+ DECLARE_CAST(ContextExtension)
// Dispatched behavior.
- DECLARE_PRINTER(SloppyBlockWithEvalContextExtension)
- DECLARE_VERIFIER(SloppyBlockWithEvalContextExtension)
+ DECLARE_PRINTER(ContextExtension)
+ DECLARE_VERIFIER(ContextExtension)
static const int kScopeInfoOffset = HeapObject::kHeaderSize;
static const int kExtensionOffset = kScopeInfoOffset + kPointerSize;
static const int kSize = kExtensionOffset + kPointerSize;
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SloppyBlockWithEvalContextExtension);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ContextExtension);
};
@@ -6803,6 +7015,15 @@ class Script: public Struct {
V(Array.prototype, push, ArrayPush) \
V(Array.prototype, pop, ArrayPop) \
V(Array.prototype, shift, ArrayShift) \
+ V(Date.prototype, getDate, DateGetDate) \
+ V(Date.prototype, getDay, DateGetDay) \
+ V(Date.prototype, getFullYear, DateGetFullYear) \
+ V(Date.prototype, getHours, DateGetHours) \
+ V(Date.prototype, getMilliseconds, DateGetMilliseconds) \
+ V(Date.prototype, getMinutes, DateGetMinutes) \
+ V(Date.prototype, getMonth, DateGetMonth) \
+ V(Date.prototype, getSeconds, DateGetSeconds) \
+ V(Date.prototype, getTime, DateGetTime) \
V(Function.prototype, apply, FunctionApply) \
V(Function.prototype, call, FunctionCall) \
V(Object.prototype, hasOwnProperty, ObjectHasOwnProperty) \
@@ -6847,6 +7068,10 @@ class Script: public Struct {
V(Math, clz32, MathClz32) \
V(Math, fround, MathFround) \
V(Math, trunc, MathTrunc) \
+ V(Number, isFinite, NumberIsFinite) \
+ V(Number, isInteger, NumberIsInteger) \
+ V(Number, isNaN, NumberIsNaN) \
+ V(Number, isSafeInteger, NumberIsSafeInteger) \
V(Number, parseInt, NumberParseInt) \
V(Number.prototype, toString, NumberToString)
@@ -6869,16 +7094,20 @@ enum BuiltinFunctionId {
kDataViewBuffer,
kDataViewByteLength,
kDataViewByteOffset,
+ kFunctionHasInstance,
kGlobalDecodeURI,
kGlobalDecodeURIComponent,
kGlobalEncodeURI,
kGlobalEncodeURIComponent,
kGlobalEscape,
kGlobalUnescape,
+ kGlobalIsFinite,
+ kGlobalIsNaN,
kTypedArrayByteLength,
kTypedArrayByteOffset,
kTypedArrayLength,
kSharedArrayBufferByteLength,
+ kStringIteratorNext,
};
@@ -6984,6 +7213,10 @@ class SharedFunctionInfo: public HeapObject {
// [scope_info]: Scope info.
DECL_ACCESSORS(scope_info, ScopeInfo)
+ // The outer scope info for the purpose of parsing this function, or the hole
+ // value if it isn't yet known.
+ DECL_ACCESSORS(outer_scope_info, HeapObject)
+
// [construct stub]: Code stub for constructing instances of this function.
DECL_ACCESSORS(construct_stub, Code)
@@ -7191,30 +7424,11 @@ class SharedFunctionInfo: public HeapObject {
// Indicates that code for this function cannot be flushed.
DECL_BOOLEAN_ACCESSORS(dont_flush)
- // Indicates that this function is a generator.
- DECL_BOOLEAN_ACCESSORS(is_generator)
-
- // Indicates that this function is an async function.
- DECL_BOOLEAN_ACCESSORS(is_async)
-
- // Indicates that this function can be suspended, either via YieldExpressions
- // or AwaitExpressions.
- inline bool is_resumable() const;
-
- // Indicates that this function is an arrow function.
- DECL_BOOLEAN_ACCESSORS(is_arrow)
-
- // Indicates that this function is a concise method.
- DECL_BOOLEAN_ACCESSORS(is_concise_method)
-
- // Indicates that this function is a getter.
- DECL_BOOLEAN_ACCESSORS(is_getter_function)
-
- // Indicates that this function is a setter.
- DECL_BOOLEAN_ACCESSORS(is_setter_function)
-
- // Indicates that this function is a default constructor.
- DECL_BOOLEAN_ACCESSORS(is_default_constructor)
+ // Indicates that this is a constructor for a base class with instance fields.
+ DECL_BOOLEAN_ACCESSORS(requires_class_field_init)
+ // Indicates that this is a synthesized function to set up class instance
+ // fields.
+ DECL_BOOLEAN_ACCESSORS(is_class_field_initializer)
// Indicates that this function is an asm function.
DECL_BOOLEAN_ACCESSORS(asm_function)
@@ -7231,7 +7445,7 @@ class SharedFunctionInfo: public HeapObject {
// Indicates that asm->wasm conversion failed and should not be re-attempted.
DECL_BOOLEAN_ACCESSORS(is_asm_wasm_broken)
- inline FunctionKind kind();
+ inline FunctionKind kind() const;
inline void set_kind(FunctionKind kind);
// Indicates whether or not the code in the shared function support
@@ -7331,11 +7545,12 @@ class SharedFunctionInfo: public HeapObject {
// Layout description.
// Pointer fields.
- static const int kNameOffset = HeapObject::kHeaderSize;
- static const int kCodeOffset = kNameOffset + kPointerSize;
- static const int kOptimizedCodeMapOffset = kCodeOffset + kPointerSize;
+ static const int kCodeOffset = HeapObject::kHeaderSize;
+ static const int kNameOffset = kCodeOffset + kPointerSize;
+ static const int kOptimizedCodeMapOffset = kNameOffset + kPointerSize;
static const int kScopeInfoOffset = kOptimizedCodeMapOffset + kPointerSize;
- static const int kConstructStubOffset = kScopeInfoOffset + kPointerSize;
+ static const int kOuterScopeInfoOffset = kScopeInfoOffset + kPointerSize;
+ static const int kConstructStubOffset = kOuterScopeInfoOffset + kPointerSize;
static const int kInstanceClassNameOffset =
kConstructStubOffset + kPointerSize;
static const int kFunctionDataOffset =
@@ -7457,9 +7672,12 @@ class SharedFunctionInfo: public HeapObject {
static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
+ typedef FixedBodyDescriptor<kCodeOffset,
+ kLastPointerFieldOffset + kPointerSize, kSize>
+ BodyDescriptor;
typedef FixedBodyDescriptor<kNameOffset,
- kLastPointerFieldOffset + kPointerSize,
- kSize> BodyDescriptor;
+ kLastPointerFieldOffset + kPointerSize, kSize>
+ BodyDescriptorWeakCode;
// Bit positions in start_position_and_type.
// The source code start position is in the 30 most significant bits of
@@ -7491,38 +7709,19 @@ class SharedFunctionInfo: public HeapObject {
kDontFlush,
// byte 2
kFunctionKind,
- kIsArrow = kFunctionKind,
- kIsGenerator,
- kIsConciseMethod,
- kIsDefaultConstructor,
- kIsSubclassConstructor,
- kIsBaseConstructor,
- kIsGetterFunction,
- kIsSetterFunction,
+ // rest of byte 2 and first two bits of byte 3 are used by FunctionKind
// byte 3
- kIsAsyncFunction,
- kDeserialized,
+ kDeserialized = kFunctionKind + 10,
kIsDeclaration,
kIsAsmWasmBroken,
+ kRequiresClassFieldInit,
+ kIsClassFieldInitializer,
kCompilerHintsCount, // Pseudo entry
};
// kFunctionKind has to be byte-aligned
STATIC_ASSERT((kFunctionKind % kBitsPerByte) == 0);
-// Make sure that FunctionKind and byte 2 are in sync:
-#define ASSERT_FUNCTION_KIND_ORDER(functionKind, compilerFunctionKind) \
- STATIC_ASSERT(FunctionKind::functionKind == \
- 1 << (compilerFunctionKind - kFunctionKind))
- ASSERT_FUNCTION_KIND_ORDER(kArrowFunction, kIsArrow);
- ASSERT_FUNCTION_KIND_ORDER(kGeneratorFunction, kIsGenerator);
- ASSERT_FUNCTION_KIND_ORDER(kConciseMethod, kIsConciseMethod);
- ASSERT_FUNCTION_KIND_ORDER(kDefaultConstructor, kIsDefaultConstructor);
- ASSERT_FUNCTION_KIND_ORDER(kSubclassConstructor, kIsSubclassConstructor);
- ASSERT_FUNCTION_KIND_ORDER(kBaseConstructor, kIsBaseConstructor);
- ASSERT_FUNCTION_KIND_ORDER(kGetterFunction, kIsGetterFunction);
- ASSERT_FUNCTION_KIND_ORDER(kSetterFunction, kIsSetterFunction);
-#undef ASSERT_FUNCTION_KIND_ORDER
-
- class FunctionKindBits : public BitField<FunctionKind, kIsArrow, 9> {};
+
+ class FunctionKindBits : public BitField<FunctionKind, kFunctionKind, 10> {};
class DeoptCountBits : public BitField<int, 0, 4> {};
class OptReenableTriesBits : public BitField<int, 4, 18> {};
@@ -7554,21 +7753,10 @@ class SharedFunctionInfo: public HeapObject {
static const int kHasDuplicateParametersBit =
kHasDuplicateParameters + kCompilerHintsSmiTagSize;
- static const int kIsArrowBit = kIsArrow + kCompilerHintsSmiTagSize;
- static const int kIsGeneratorBit = kIsGenerator + kCompilerHintsSmiTagSize;
- static const int kIsConciseMethodBit =
- kIsConciseMethod + kCompilerHintsSmiTagSize;
- static const int kIsAsyncFunctionBit =
- kIsAsyncFunction + kCompilerHintsSmiTagSize;
-
- static const int kAccessorFunctionBits =
- FunctionKind::kAccessorFunction
- << (kFunctionKind + kCompilerHintsSmiTagSize);
- static const int kClassConstructorBits =
- FunctionKind::kClassConstructor
- << (kFunctionKind + kCompilerHintsSmiTagSize);
- static const int kFunctionKindMaskBits = FunctionKindBits::kMask
- << kCompilerHintsSmiTagSize;
+ static const int kFunctionKindShift =
+ kFunctionKind + kCompilerHintsSmiTagSize;
+ static const int kAllFunctionKindBitsMask = FunctionKindBits::kMask
+ << kCompilerHintsSmiTagSize;
// Constants for optimizing codegen for strict mode function and
// native tests.
@@ -7687,6 +7875,100 @@ class JSGeneratorObject: public JSObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(JSGeneratorObject);
};
+// A Module object is a mapping from export names to cells
+// This is still very much in flux.
+class Module : public Struct {
+ public:
+ DECLARE_CAST(Module)
+ DECLARE_VERIFIER(Module)
+ DECLARE_PRINTER(Module)
+
+ // The code representing this Module, either a
+ // SharedFunctionInfo or a JSFunction depending
+ // on whether it's been instantiated.
+ DECL_ACCESSORS(code, Object)
+
+ DECL_ACCESSORS(exports, ObjectHashTable)
+
+ // [[RequestedModules]]: Modules imported or re-exported by this module.
+ // Corresponds 1-to-1 to the module specifier strings in
+ // ModuleInfo::module_requests.
+ DECL_ACCESSORS(requested_modules, FixedArray)
+
+ // [[Evaluated]]: Whether this module has been evaluated. Modules
+ // are only evaluated a single time.
+ DECL_BOOLEAN_ACCESSORS(evaluated)
+
+ // Storage for [[Evaluated]]
+ DECL_INT_ACCESSORS(flags)
+
+ // Embedder-specified data
+ DECL_ACCESSORS(embedder_data, Object)
+
+ // Get the SharedFunctionInfo associated with the code.
+ inline SharedFunctionInfo* shared() const;
+
+ // Get the ModuleInfo associated with the code.
+ inline ModuleInfo* info() const;
+
+ // Compute a hash for this object.
+ inline uint32_t Hash() const;
+
+ // Implementation of spec operation ModuleDeclarationInstantiation.
+ // Returns false if an exception occurred during instantiation, true
+ // otherwise.
+ static MUST_USE_RESULT bool Instantiate(Handle<Module> module,
+ v8::Local<v8::Context> context,
+ v8::Module::ResolveCallback callback,
+ v8::Local<v8::Value> callback_data);
+
+ // Implementation of spec operation ModuleEvaluation.
+ static MUST_USE_RESULT MaybeHandle<Object> Evaluate(Handle<Module> module);
+
+ static Handle<Object> LoadExport(Handle<Module> module, Handle<String> name);
+ static void StoreExport(Handle<Module> module, Handle<String> name,
+ Handle<Object> value);
+
+ static Handle<Object> LoadImport(Handle<Module> module, Handle<String> name,
+ int module_request);
+
+ static const int kCodeOffset = HeapObject::kHeaderSize;
+ static const int kExportsOffset = kCodeOffset + kPointerSize;
+ static const int kRequestedModulesOffset = kExportsOffset + kPointerSize;
+ static const int kFlagsOffset = kRequestedModulesOffset + kPointerSize;
+ static const int kEmbedderDataOffset = kFlagsOffset + kPointerSize;
+ static const int kSize = kEmbedderDataOffset + kPointerSize;
+
+ private:
+ enum { kEvaluatedBit };
+
+ static void CreateExport(Handle<Module> module, Handle<FixedArray> names);
+ static void CreateIndirectExport(Handle<Module> module, Handle<String> name,
+ Handle<ModuleInfoEntry> entry);
+
+ // The [must_resolve] argument indicates whether or not an exception should be
+ // thrown in case the module does not provide an export named [name]
+ // (including when a cycle is detected). An exception is always thrown in the
+ // case of conflicting star exports.
+ //
+ // If [must_resolve] is true, a null result indicates an exception. If
+ // [must_resolve] is false, a null result may or may not indicate an
+ // exception (so check manually!).
+ class ResolveSet;
+ static MUST_USE_RESULT MaybeHandle<Cell> ResolveExport(
+ Handle<Module> module, Handle<String> name, bool must_resolve,
+ ResolveSet* resolve_set);
+ static MUST_USE_RESULT MaybeHandle<Cell> ResolveImport(
+ Handle<Module> module, Handle<String> name, int module_request,
+ bool must_resolve, ResolveSet* resolve_set);
+
+ // Helper for ResolveExport.
+ static MUST_USE_RESULT MaybeHandle<Cell> ResolveExportUsingStarExports(
+ Handle<Module> module, Handle<String> name, bool must_resolve,
+ ResolveSet* resolve_set);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Module);
+};
// JSBoundFunction describes a bound function exotic object.
class JSBoundFunction : public JSObject {
@@ -8219,7 +8501,8 @@ class JSRegExp: public JSObject {
DECL_ACCESSORS(flags, Object)
DECL_ACCESSORS(source, Object)
- static MaybeHandle<JSRegExp> New(Handle<String> source, Flags flags);
+ V8_EXPORT_PRIVATE static MaybeHandle<JSRegExp> New(Handle<String> source,
+ Flags flags);
static Handle<JSRegExp> Copy(Handle<JSRegExp> regexp);
static MaybeHandle<JSRegExp> Initialize(Handle<JSRegExp> regexp,
@@ -8585,7 +8868,9 @@ class AllocationSite: public Struct {
inline bool SitePointsToLiteral();
- static void DigestTransitionFeedback(Handle<AllocationSite> site,
+ template <AllocationSiteUpdateMode update_or_check =
+ AllocationSiteUpdateMode::kUpdate>
+ static bool DigestTransitionFeedback(Handle<AllocationSite> site,
ElementsKind to_kind);
DECLARE_PRINTER(AllocationSite)
@@ -8612,6 +8897,10 @@ class AllocationSite: public Struct {
static const int kPointerFieldsBeginOffset = kTransitionInfoOffset;
static const int kPointerFieldsEndOffset = kWeakNextOffset;
+ typedef FixedBodyDescriptor<kPointerFieldsBeginOffset,
+ kPointerFieldsEndOffset, kSize>
+ MarkingBodyDescriptor;
+
// For other visitors, use the fixed body descriptor below.
typedef FixedBodyDescriptor<HeapObject::kHeaderSize, kSize, kSize>
BodyDescriptor;
@@ -8674,8 +8963,7 @@ class AliasedArgumentsEntry: public Struct {
enum AllowNullsFlag {ALLOW_NULLS, DISALLOW_NULLS};
enum RobustnessFlag {ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL};
-
-class StringHasher {
+class V8_EXPORT_PRIVATE StringHasher {
public:
explicit inline StringHasher(int length, uint32_t seed);
@@ -9123,6 +9411,9 @@ class String: public Name {
static int IndexOf(Isolate* isolate, Handle<String> sub, Handle<String> pat,
int start_index);
+ static Object* LastIndexOf(Isolate* isolate, Handle<Object> receiver,
+ Handle<Object> search, Handle<Object> position);
+
// String equality operations.
inline bool Equals(String* other);
inline static bool Equals(Handle<String> one, Handle<String> two);
@@ -9295,7 +9586,7 @@ class String: public Name {
static bool SlowEquals(Handle<String> one, Handle<String> two);
// Slow case of AsArrayIndex.
- bool SlowAsArrayIndex(uint32_t* index);
+ V8_EXPORT_PRIVATE bool SlowAsArrayIndex(uint32_t* index);
// Compute and set the hash code.
uint32_t ComputeAndSetHash();
@@ -9860,9 +10151,6 @@ class PropertyCell : public HeapObject {
static const int kDependentCodeOffset = kValueOffset + kPointerSize;
static const int kSize = kDependentCodeOffset + kPointerSize;
- static const int kPointerFieldsBeginOffset = kValueOffset;
- static const int kPointerFieldsEndOffset = kSize;
-
typedef FixedBodyDescriptor<kValueOffset,
kSize,
kSize> BodyDescriptor;
@@ -10055,6 +10343,28 @@ class JSMap : public JSCollection {
DISALLOW_IMPLICIT_CONSTRUCTORS(JSMap);
};
+class JSStringIterator : public JSObject {
+ public:
+ // Dispatched behavior.
+ DECLARE_PRINTER(JSStringIterator)
+ DECLARE_VERIFIER(JSStringIterator)
+
+ DECLARE_CAST(JSStringIterator)
+
+ // [string]: the [[IteratedString]] internal field.
+ DECL_ACCESSORS(string, String)
+
+ // [index]: The [[StringIteratorNextIndex]] internal field.
+ inline int index() const;
+ inline void set_index(int value);
+
+ static const int kStringOffset = JSObject::kHeaderSize;
+ static const int kNextIndexOffset = kStringOffset + kPointerSize;
+ static const int kSize = kNextIndexOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSStringIterator);
+};
// OrderedHashTableIterator is an iterator that iterates over the keys and
// values of an OrderedHashTable.
@@ -10480,12 +10790,9 @@ class JSArray: public JSObject {
static const int kLengthOffset = JSObject::kHeaderSize;
static const int kSize = kLengthOffset + kPointerSize;
- // 600 * KB is the Page::kMaxRegularHeapObjectSize defined in spaces.h which
- // we do not want to include in objects.h
- // Note that Page::kMaxRegularHeapObjectSize has to be in sync with
- // kInitialMaxFastElementArray which is checked in a DCHECK in heap.cc.
static const int kInitialMaxFastElementArray =
- (600 * KB - FixedArray::kHeaderSize - kSize - AllocationMemento::kSize) /
+ (kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - kSize -
+ AllocationMemento::kSize) /
kPointerSize;
private:
@@ -10684,8 +10991,10 @@ class InterceptorInfo: public Struct {
DECL_ACCESSORS(getter, Object)
DECL_ACCESSORS(setter, Object)
DECL_ACCESSORS(query, Object)
+ DECL_ACCESSORS(descriptor, Object)
DECL_ACCESSORS(deleter, Object)
DECL_ACCESSORS(enumerator, Object)
+ DECL_ACCESSORS(definer, Object)
DECL_ACCESSORS(data, Object)
DECL_BOOLEAN_ACCESSORS(can_intercept_symbols)
DECL_BOOLEAN_ACCESSORS(all_can_read)
@@ -10703,9 +11012,11 @@ class InterceptorInfo: public Struct {
static const int kGetterOffset = HeapObject::kHeaderSize;
static const int kSetterOffset = kGetterOffset + kPointerSize;
static const int kQueryOffset = kSetterOffset + kPointerSize;
- static const int kDeleterOffset = kQueryOffset + kPointerSize;
+ static const int kDescriptorOffset = kQueryOffset + kPointerSize;
+ static const int kDeleterOffset = kDescriptorOffset + kPointerSize;
static const int kEnumeratorOffset = kDeleterOffset + kPointerSize;
- static const int kDataOffset = kEnumeratorOffset + kPointerSize;
+ static const int kDefinerOffset = kEnumeratorOffset + kPointerSize;
+ static const int kDataOffset = kDefinerOffset + kPointerSize;
static const int kFlagsOffset = kDataOffset + kPointerSize;
static const int kSize = kFlagsOffset + kPointerSize;
diff --git a/deps/v8/src/ostreams.h b/deps/v8/src/ostreams.h
index 977b5c6f4a..dea751413e 100644
--- a/deps/v8/src/ostreams.h
+++ b/deps/v8/src/ostreams.h
@@ -13,6 +13,7 @@
#include "include/v8config.h"
#include "src/base/macros.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -33,7 +34,7 @@ class OFStreamBase : public std::streambuf {
// An output stream writing to a file.
-class OFStream : public std::ostream {
+class V8_EXPORT_PRIVATE OFStream : public std::ostream {
public:
explicit OFStream(FILE* f);
virtual ~OFStream();
diff --git a/deps/v8/src/parsing/duplicate-finder.cc b/deps/v8/src/parsing/duplicate-finder.cc
new file mode 100644
index 0000000000..6b57153f9b
--- /dev/null
+++ b/deps/v8/src/parsing/duplicate-finder.cc
@@ -0,0 +1,145 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/parsing/duplicate-finder.h"
+
+#include "src/conversions.h"
+#include "src/unicode-cache.h"
+
+namespace v8 {
+namespace internal {
+
+int DuplicateFinder::AddOneByteSymbol(Vector<const uint8_t> key, int value) {
+ return AddSymbol(key, true, value);
+}
+
+int DuplicateFinder::AddTwoByteSymbol(Vector<const uint16_t> key, int value) {
+ return AddSymbol(Vector<const uint8_t>::cast(key), false, value);
+}
+
+int DuplicateFinder::AddSymbol(Vector<const uint8_t> key, bool is_one_byte,
+ int value) {
+ uint32_t hash = Hash(key, is_one_byte);
+ byte* encoding = BackupKey(key, is_one_byte);
+ base::HashMap::Entry* entry = map_.LookupOrInsert(encoding, hash);
+ int old_value = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
+ entry->value =
+ reinterpret_cast<void*>(static_cast<intptr_t>(value | old_value));
+ return old_value;
+}
+
+int DuplicateFinder::AddNumber(Vector<const uint8_t> key, int value) {
+ DCHECK(key.length() > 0);
+ // Quick check for already being in canonical form.
+ if (IsNumberCanonical(key)) {
+ return AddOneByteSymbol(key, value);
+ }
+
+ int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY;
+ double double_value = StringToDouble(unicode_constants_, key, flags, 0.0);
+ int length;
+ const char* string;
+ if (!std::isfinite(double_value)) {
+ string = "Infinity";
+ length = 8; // strlen("Infinity");
+ } else {
+ string = DoubleToCString(double_value,
+ Vector<char>(number_buffer_, kBufferSize));
+ length = StrLength(string);
+ }
+ return AddSymbol(
+ Vector<const byte>(reinterpret_cast<const byte*>(string), length), true,
+ value);
+}
+
+bool DuplicateFinder::IsNumberCanonical(Vector<const uint8_t> number) {
+ // Test for a safe approximation of number literals that are already
+ // in canonical form: max 15 digits, no leading zeroes, except an
+ // integer part that is a single zero, and no trailing zeros below
+ // the decimal point.
+ int pos = 0;
+ int length = number.length();
+ if (number.length() > 15) return false;
+ if (number[pos] == '0') {
+ pos++;
+ } else {
+ while (pos < length &&
+ static_cast<unsigned>(number[pos] - '0') <= ('9' - '0'))
+ pos++;
+ }
+ if (length == pos) return true;
+ if (number[pos] != '.') return false;
+ pos++;
+ bool invalid_last_digit = true;
+ while (pos < length) {
+ uint8_t digit = number[pos] - '0';
+ if (digit > '9' - '0') return false;
+ invalid_last_digit = (digit == 0);
+ pos++;
+ }
+ return !invalid_last_digit;
+}
+
+uint32_t DuplicateFinder::Hash(Vector<const uint8_t> key, bool is_one_byte) {
+ // Primitive hash function, almost identical to the one used
+ // for strings (except that it's seeded by the length and representation).
+ int length = key.length();
+ uint32_t hash = (length << 1) | (is_one_byte ? 1 : 0);
+ for (int i = 0; i < length; i++) {
+ uint32_t c = key[i];
+ hash = (hash + c) * 1025;
+ hash ^= (hash >> 6);
+ }
+ return hash;
+}
+
+bool DuplicateFinder::Match(void* first, void* second) {
+ // Decode lengths.
+ // Length + representation is encoded as base 128, most significant heptet
+ // first, with a 8th bit being non-zero while there are more heptets.
+ // The value encodes the number of bytes following, and whether the original
+ // was Latin1.
+ byte* s1 = reinterpret_cast<byte*>(first);
+ byte* s2 = reinterpret_cast<byte*>(second);
+ uint32_t length_one_byte_field = 0;
+ byte c1;
+ do {
+ c1 = *s1;
+ if (c1 != *s2) return false;
+ length_one_byte_field = (length_one_byte_field << 7) | (c1 & 0x7f);
+ s1++;
+ s2++;
+ } while ((c1 & 0x80) != 0);
+ int length = static_cast<int>(length_one_byte_field >> 1);
+ return memcmp(s1, s2, length) == 0;
+}
+
+byte* DuplicateFinder::BackupKey(Vector<const uint8_t> bytes,
+ bool is_one_byte) {
+ uint32_t one_byte_length = (bytes.length() << 1) | (is_one_byte ? 1 : 0);
+ backing_store_.StartSequence();
+ // Emit one_byte_length as base-128 encoded number, with the 7th bit set
+ // on the byte of every heptet except the last, least significant, one.
+ if (one_byte_length >= (1 << 7)) {
+ if (one_byte_length >= (1 << 14)) {
+ if (one_byte_length >= (1 << 21)) {
+ if (one_byte_length >= (1 << 28)) {
+ backing_store_.Add(
+ static_cast<uint8_t>((one_byte_length >> 28) | 0x80));
+ }
+ backing_store_.Add(
+ static_cast<uint8_t>((one_byte_length >> 21) | 0x80u));
+ }
+ backing_store_.Add(static_cast<uint8_t>((one_byte_length >> 14) | 0x80u));
+ }
+ backing_store_.Add(static_cast<uint8_t>((one_byte_length >> 7) | 0x80u));
+ }
+ backing_store_.Add(static_cast<uint8_t>(one_byte_length & 0x7f));
+
+ backing_store_.AddBlock(bytes);
+ return backing_store_.EndSequence().start();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/parsing/duplicate-finder.h b/deps/v8/src/parsing/duplicate-finder.h
new file mode 100644
index 0000000000..a3858e7c74
--- /dev/null
+++ b/deps/v8/src/parsing/duplicate-finder.h
@@ -0,0 +1,64 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PARSING_DUPLICATE_FINDER_H_
+#define V8_PARSING_DUPLICATE_FINDER_H_
+
+#include "src/base/hashmap.h"
+#include "src/collector.h"
+
+namespace v8 {
+namespace internal {
+
+class UnicodeCache;
+
+// DuplicateFinder discovers duplicate symbols.
+class DuplicateFinder {
+ public:
+ explicit DuplicateFinder(UnicodeCache* constants)
+ : unicode_constants_(constants), backing_store_(16), map_(&Match) {}
+
+ int AddOneByteSymbol(Vector<const uint8_t> key, int value);
+ int AddTwoByteSymbol(Vector<const uint16_t> key, int value);
+ // Add a a number literal by converting it (if necessary)
+ // to the string that ToString(ToNumber(literal)) would generate.
+ // and then adding that string with AddOneByteSymbol.
+ // This string is the actual value used as key in an object literal,
+ // and the one that must be different from the other keys.
+ int AddNumber(Vector<const uint8_t> key, int value);
+
+ private:
+ int AddSymbol(Vector<const uint8_t> key, bool is_one_byte, int value);
+ // Backs up the key and its length in the backing store.
+ // The backup is stored with a base 127 encoding of the
+ // length (plus a bit saying whether the string is one byte),
+ // followed by the bytes of the key.
+ uint8_t* BackupKey(Vector<const uint8_t> key, bool is_one_byte);
+
+ // Compare two encoded keys (both pointing into the backing store)
+ // for having the same base-127 encoded lengths and representation.
+ // and then having the same 'length' bytes following.
+ static bool Match(void* first, void* second);
+ // Creates a hash from a sequence of bytes.
+ static uint32_t Hash(Vector<const uint8_t> key, bool is_one_byte);
+ // Checks whether a string containing a JS number is its canonical
+ // form.
+ static bool IsNumberCanonical(Vector<const uint8_t> key);
+
+ // Size of buffer. Sufficient for using it to call DoubleToCString in
+ // from conversions.h.
+ static const int kBufferSize = 100;
+
+ UnicodeCache* unicode_constants_;
+ // Backing store used to store strings used as hashmap keys.
+ SequenceCollector<unsigned char> backing_store_;
+ base::CustomMatcherHashMap map_;
+ // Buffer used for string->number->canonical string conversions.
+ char number_buffer_[kBufferSize];
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PARSING_DUPLICATE_FINDER_H_
diff --git a/deps/v8/src/parsing/expression-classifier.h b/deps/v8/src/parsing/expression-classifier.h
index 9190e18c7d..6a1fbac35a 100644
--- a/deps/v8/src/parsing/expression-classifier.h
+++ b/deps/v8/src/parsing/expression-classifier.h
@@ -7,11 +7,12 @@
#include "src/messages.h"
#include "src/parsing/scanner.h"
-#include "src/parsing/token.h"
namespace v8 {
namespace internal {
+class DuplicateFinder;
+
#define ERROR_CODES(T) \
T(ExpressionProduction, 0) \
T(FormalParameterInitializerProduction, 1) \
@@ -21,11 +22,32 @@ namespace internal {
T(StrictModeFormalParametersProduction, 5) \
T(ArrowFormalParametersProduction, 6) \
T(LetPatternProduction, 7) \
- T(ObjectLiteralProduction, 8) \
- T(TailCallExpressionProduction, 9) \
- T(AsyncArrowFormalParametersProduction, 10)
-
-template <typename Traits>
+ T(TailCallExpressionProduction, 8) \
+ T(AsyncArrowFormalParametersProduction, 9)
+
+// Expression classifiers serve two purposes:
+//
+// 1) They keep track of error messages that are pending (and other
+// related information), waiting for the parser to decide whether
+// the parsed expression is a pattern or not.
+// 2) They keep track of expressions that may need to be rewritten, if
+// the parser decides that they are not patterns. (A different
+// mechanism implements the rewriting of patterns.)
+//
+// Expression classifiers are used by the parser in a stack fashion.
+// Each new classifier is pushed on top of the stack. This happens
+// automatically by the class's constructor. While on top of the
+// stack, the classifier records pending error messages and tracks the
+// pending non-patterns of the expression that is being parsed.
+//
+// At the end of its life, a classifier is either "accumulated" to the
+// one that is below it on the stack, or is "discarded". The former
+// is achieved by calling the method Accumulate. The latter is
+// achieved automatically by the destructor, but it can happen earlier
+// by calling the method Discard. Both actions result in removing the
+// classifier from the parser's stack.
+
+template <typename Types>
class ExpressionClassifier {
public:
enum ErrorKind : unsigned {
@@ -55,51 +77,41 @@ class ExpressionClassifier {
const char* arg;
};
+ // clang-format off
enum TargetProduction : unsigned {
#define DEFINE_PRODUCTION(NAME, CODE) NAME = 1 << CODE,
ERROR_CODES(DEFINE_PRODUCTION)
#undef DEFINE_PRODUCTION
- ExpressionProductions =
- (ExpressionProduction | FormalParameterInitializerProduction |
- TailCallExpressionProduction),
- PatternProductions = (BindingPatternProduction |
- AssignmentPatternProduction | LetPatternProduction),
- FormalParametersProductions = (DistinctFormalParametersProduction |
- StrictModeFormalParametersProduction),
- AllProductions =
- (ExpressionProductions | PatternProductions |
- FormalParametersProductions | ArrowFormalParametersProduction |
- ObjectLiteralProduction | AsyncArrowFormalParametersProduction)
+#define DEFINE_ALL_PRODUCTIONS(NAME, CODE) NAME |
+ AllProductions = ERROR_CODES(DEFINE_ALL_PRODUCTIONS) /* | */ 0
+#undef DEFINE_ALL_PRODUCTIONS
};
+ // clang-format on
enum FunctionProperties : unsigned {
NonSimpleParameter = 1 << 0
};
- explicit ExpressionClassifier(const Traits* t)
- : zone_(t->zone()),
- non_patterns_to_rewrite_(t->GetNonPatternList()),
- reported_errors_(t->GetReportedErrorList()),
- duplicate_finder_(nullptr),
- invalid_productions_(0),
- function_properties_(0) {
- reported_errors_begin_ = reported_errors_end_ = reported_errors_->length();
- non_pattern_begin_ = non_patterns_to_rewrite_->length();
- }
-
- ExpressionClassifier(const Traits* t, DuplicateFinder* duplicate_finder)
- : zone_(t->zone()),
- non_patterns_to_rewrite_(t->GetNonPatternList()),
- reported_errors_(t->GetReportedErrorList()),
+ explicit ExpressionClassifier(typename Types::Base* base,
+ DuplicateFinder* duplicate_finder = nullptr)
+ : base_(base),
+ previous_(base->classifier_),
+ zone_(base->impl()->zone()),
+ non_patterns_to_rewrite_(base->impl()->GetNonPatternList()),
+ reported_errors_(base->impl()->GetReportedErrorList()),
duplicate_finder_(duplicate_finder),
invalid_productions_(0),
function_properties_(0) {
+ base->classifier_ = this;
reported_errors_begin_ = reported_errors_end_ = reported_errors_->length();
non_pattern_begin_ = non_patterns_to_rewrite_->length();
}
- ~ExpressionClassifier() { Discard(); }
+ V8_INLINE ~ExpressionClassifier() {
+ Discard();
+ if (base_->classifier_ == this) base_->classifier_ = previous_;
+ }
V8_INLINE bool is_valid(unsigned productions) const {
return (invalid_productions_ & productions) == 0;
@@ -179,14 +191,6 @@ class ExpressionClassifier {
return reported_error(kLetPatternProduction);
}
- V8_INLINE bool has_object_literal_error() const {
- return !is_valid(ObjectLiteralProduction);
- }
-
- V8_INLINE const Error& object_literal_error() const {
- return reported_error(kObjectLiteralProduction);
- }
-
V8_INLINE bool has_tail_call_expression() const {
return !is_valid(TailCallExpressionProduction);
}
@@ -295,14 +299,6 @@ class ExpressionClassifier {
Add(Error(loc, message, kLetPatternProduction, arg));
}
- void RecordObjectLiteralError(const Scanner::Location& loc,
- MessageTemplate::Template message,
- const char* arg = nullptr) {
- if (has_object_literal_error()) return;
- invalid_productions_ |= ObjectLiteralProduction;
- Add(Error(loc, message, kObjectLiteralProduction, arg));
- }
-
void RecordTailCallExpressionError(const Scanner::Location& loc,
MessageTemplate::Template message,
const char* arg = nullptr) {
@@ -316,7 +312,14 @@ class ExpressionClassifier {
DCHECK_EQ(inner->reported_errors_, reported_errors_);
DCHECK_EQ(inner->reported_errors_begin_, reported_errors_end_);
DCHECK_EQ(inner->reported_errors_end_, reported_errors_->length());
- if (merge_non_patterns) MergeNonPatterns(inner);
+ DCHECK_EQ(inner->non_patterns_to_rewrite_, non_patterns_to_rewrite_);
+ DCHECK_LE(non_pattern_begin_, inner->non_pattern_begin_);
+ DCHECK_LE(inner->non_pattern_begin_, non_patterns_to_rewrite_->length());
+ // Merge non-patterns from the inner classifier, or discard them.
+ if (merge_non_patterns)
+ inner->non_pattern_begin_ = non_patterns_to_rewrite_->length();
+ else
+ non_patterns_to_rewrite_->Rewind(inner->non_pattern_begin_);
// Propagate errors from inner, but don't overwrite already recorded
// errors.
unsigned non_arrow_inner_invalid_productions =
@@ -393,10 +396,7 @@ class ExpressionClassifier {
non_patterns_to_rewrite_->Rewind(non_pattern_begin_);
}
- V8_INLINE void MergeNonPatterns(ExpressionClassifier* inner) {
- DCHECK_LE(non_pattern_begin_, inner->non_pattern_begin_);
- inner->non_pattern_begin_ = inner->non_patterns_to_rewrite_->length();
- }
+ ExpressionClassifier* previous() const { return previous_; }
private:
V8_INLINE const Error& reported_error(ErrorKind kind) const {
@@ -410,6 +410,9 @@ class ExpressionClassifier {
// We should only be looking for an error when we know that one has
// been reported. But we're not... So this is to make sure we have
// the same behaviour.
+ UNREACHABLE();
+
+ // Make MSVC happy by returning an error from this inaccessible path.
static Error none;
return none;
}
@@ -434,8 +437,10 @@ class ExpressionClassifier {
reported_errors_end_++;
}
+ typename Types::Base* base_;
+ ExpressionClassifier* previous_;
Zone* zone_;
- ZoneList<typename Traits::Type::Expression>* non_patterns_to_rewrite_;
+ ZoneList<typename Types::Expression>* non_patterns_to_rewrite_;
ZoneList<Error>* reported_errors_;
DuplicateFinder* duplicate_finder_;
// The uint16_t for non_pattern_begin_ will not be enough in the case,
@@ -456,6 +461,8 @@ class ExpressionClassifier {
// stack overflow while parsing.
uint16_t reported_errors_begin_;
uint16_t reported_errors_end_;
+
+ DISALLOW_COPY_AND_ASSIGN(ExpressionClassifier);
};
diff --git a/deps/v8/src/parsing/func-name-inferrer.cc b/deps/v8/src/parsing/func-name-inferrer.cc
index 0821be0a68..a86e1c299f 100644
--- a/deps/v8/src/parsing/func-name-inferrer.cc
+++ b/deps/v8/src/parsing/func-name-inferrer.cc
@@ -45,9 +45,11 @@ void FuncNameInferrer::PushVariableName(const AstRawString* name) {
}
void FuncNameInferrer::RemoveAsyncKeywordFromEnd() {
- DCHECK(names_stack_.length() > 0);
- DCHECK(names_stack_.last().name->IsOneByteEqualTo("async"));
- names_stack_.RemoveLast();
+ if (IsOpen()) {
+ DCHECK(names_stack_.length() > 0);
+ DCHECK(names_stack_.last().name->IsOneByteEqualTo("async"));
+ names_stack_.RemoveLast();
+ }
}
const AstString* FuncNameInferrer::MakeNameFromStack() {
diff --git a/deps/v8/src/parsing/func-name-inferrer.h b/deps/v8/src/parsing/func-name-inferrer.h
index cffd8a8c18..cc9204bb6d 100644
--- a/deps/v8/src/parsing/func-name-inferrer.h
+++ b/deps/v8/src/parsing/func-name-inferrer.h
@@ -6,7 +6,7 @@
#define V8_PARSING_FUNC_NAME_INFERRER_H_
#include "src/handles.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/parsing/parameter-initializer-rewriter.cc b/deps/v8/src/parsing/parameter-initializer-rewriter.cc
index b12a80f9b8..73224a23d1 100644
--- a/deps/v8/src/parsing/parameter-initializer-rewriter.cc
+++ b/deps/v8/src/parsing/parameter-initializer-rewriter.cc
@@ -47,9 +47,9 @@ void Rewriter::VisitClassLiteral(ClassLiteral* class_literal) {
}
// No need to visit the constructor since it will have the class
// scope on its scope chain.
- ZoneList<ObjectLiteralProperty*>* props = class_literal->properties();
+ ZoneList<ClassLiteralProperty*>* props = class_literal->properties();
for (int i = 0; i < props->length(); ++i) {
- ObjectLiteralProperty* prop = props->at(i);
+ ClassLiteralProperty* prop = props->at(i);
if (!prop->key()->IsLiteral()) {
Visit(prop->key());
}
diff --git a/deps/v8/src/parsing/parameter-initializer-rewriter.h b/deps/v8/src/parsing/parameter-initializer-rewriter.h
index a0ff7d2b38..5e409b4fbc 100644
--- a/deps/v8/src/parsing/parameter-initializer-rewriter.h
+++ b/deps/v8/src/parsing/parameter-initializer-rewriter.h
@@ -5,7 +5,7 @@
#ifndef V8_PARSING_PARAMETER_EXPRESSION_REWRITER_H_
#define V8_PARSING_PARAMETER_EXPRESSION_REWRITER_H_
-#include "src/types.h"
+#include "src/ast/ast-types.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index dfec0610e1..5b9b5e4ef0 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -33,7 +33,9 @@ ParseInfo::ParseInfo(Zone* zone)
ParseInfo::ParseInfo(Zone* zone, Handle<JSFunction> function)
: ParseInfo(zone, Handle<SharedFunctionInfo>(function->shared())) {
- set_context(Handle<Context>(function->context()));
+ if (!function->context()->IsNativeContext()) {
+ set_outer_scope_info(handle(function->context()->scope_info()));
+ }
}
ParseInfo::ParseInfo(Zone* zone, Handle<SharedFunctionInfo> shared)
@@ -86,17 +88,13 @@ bool ParseInfo::is_declaration() const {
return (compiler_hints_ & (1 << SharedFunctionInfo::kIsDeclaration)) != 0;
}
-bool ParseInfo::is_arrow() const {
- return (compiler_hints_ & (1 << SharedFunctionInfo::kIsArrow)) != 0;
-}
-
-bool ParseInfo::is_async() const {
- return (compiler_hints_ & (1 << SharedFunctionInfo::kIsAsyncFunction)) != 0;
+bool ParseInfo::requires_class_field_init() const {
+ return (compiler_hints_ &
+ (1 << SharedFunctionInfo::kRequiresClassFieldInit)) != 0;
}
-
-bool ParseInfo::is_default_constructor() const {
- return (compiler_hints_ & (1 << SharedFunctionInfo::kIsDefaultConstructor)) !=
- 0;
+bool ParseInfo::is_class_field_initializer() const {
+ return (compiler_hints_ &
+ (1 << SharedFunctionInfo::kIsClassFieldInitializer)) != 0;
}
FunctionKind ParseInfo::function_kind() const {
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index 6176135c59..4aedae4978 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -148,9 +148,8 @@ class ParseInfo {
// Getters for individual compiler hints.
bool is_declaration() const;
- bool is_arrow() const;
- bool is_async() const;
- bool is_default_constructor() const;
+ bool requires_class_field_init() const;
+ bool is_class_field_initializer() const;
FunctionKind function_kind() const;
//--------------------------------------------------------------------------
@@ -159,11 +158,15 @@ class ParseInfo {
Isolate* isolate() const { return isolate_; }
Handle<SharedFunctionInfo> shared_info() const { return shared_; }
Handle<Script> script() const { return script_; }
- Handle<Context> context() const { return context_; }
+ MaybeHandle<ScopeInfo> maybe_outer_scope_info() const {
+ return maybe_outer_scope_info_;
+ }
void clear_script() { script_ = Handle<Script>::null(); }
void set_isolate(Isolate* isolate) { isolate_ = isolate; }
void set_shared_info(Handle<SharedFunctionInfo> shared) { shared_ = shared; }
- void set_context(Handle<Context> context) { context_ = context; }
+ void set_outer_scope_info(Handle<ScopeInfo> outer_scope_info) {
+ maybe_outer_scope_info_ = outer_scope_info;
+ }
void set_script(Handle<Script> script) { script_ = script; }
//--------------------------------------------------------------------------
@@ -178,7 +181,10 @@ class ParseInfo {
void ReopenHandlesInNewHandleScope() {
shared_ = Handle<SharedFunctionInfo>(*shared_);
script_ = Handle<Script>(*script_);
- context_ = Handle<Context>(*context_);
+ Handle<ScopeInfo> outer_scope_info;
+ if (maybe_outer_scope_info_.ToHandle(&outer_scope_info)) {
+ maybe_outer_scope_info_ = Handle<ScopeInfo>(*outer_scope_info);
+ }
}
#ifdef DEBUG
@@ -224,7 +230,7 @@ class ParseInfo {
Isolate* isolate_;
Handle<SharedFunctionInfo> shared_;
Handle<Script> script_;
- Handle<Context> context_;
+ MaybeHandle<ScopeInfo> maybe_outer_scope_info_;
//----------- Inputs+Outputs of parsing and scope analysis -----------------
ScriptData** cached_data_; // used if available, populated if requested.
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index b8703d0691..1ebbee4959 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -5,6 +5,7 @@
#ifndef V8_PARSING_PARSER_BASE_H
#define V8_PARSING_PARSER_BASE_H
+#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/bailout-reason.h"
#include "src/base/hashmap.h"
@@ -56,59 +57,6 @@ static inline bool operator&(ParseFunctionFlags bitfield,
return static_cast<T>(bitfield) & static_cast<T>(mask);
}
-enum class MethodKind {
- kNormal = 0,
- kStatic = 1 << 0,
- kGenerator = 1 << 1,
- kStaticGenerator = kStatic | kGenerator,
- kAsync = 1 << 2,
- kStaticAsync = kStatic | kAsync,
-
- /* Any non-ordinary method kinds */
- kSpecialMask = kGenerator | kAsync
-};
-
-inline bool IsValidMethodKind(MethodKind kind) {
- return kind == MethodKind::kNormal || kind == MethodKind::kStatic ||
- kind == MethodKind::kGenerator ||
- kind == MethodKind::kStaticGenerator || kind == MethodKind::kAsync ||
- kind == MethodKind::kStaticAsync;
-}
-
-static inline MethodKind operator|(MethodKind lhs, MethodKind rhs) {
- typedef unsigned char T;
- return static_cast<MethodKind>(static_cast<T>(lhs) | static_cast<T>(rhs));
-}
-
-static inline MethodKind& operator|=(MethodKind& lhs, const MethodKind& rhs) {
- lhs = lhs | rhs;
- DCHECK(IsValidMethodKind(lhs));
- return lhs;
-}
-
-static inline bool operator&(MethodKind bitfield, MethodKind mask) {
- typedef unsigned char T;
- return static_cast<T>(bitfield) & static_cast<T>(mask);
-}
-
-inline bool IsNormalMethod(MethodKind kind) {
- return kind == MethodKind::kNormal;
-}
-
-inline bool IsSpecialMethod(MethodKind kind) {
- return kind & MethodKind::kSpecialMask;
-}
-
-inline bool IsStaticMethod(MethodKind kind) {
- return kind & MethodKind::kStatic;
-}
-
-inline bool IsGeneratorMethod(MethodKind kind) {
- return kind & MethodKind::kGenerator;
-}
-
-inline bool IsAsyncMethod(MethodKind kind) { return kind & MethodKind::kAsync; }
-
struct FormalParametersBase {
explicit FormalParametersBase(DeclarationScope* scope) : scope(scope) {}
DeclarationScope* scope;
@@ -126,8 +74,8 @@ struct FormalParametersBase {
// thus it must never be used where only a single statement
// is correct (e.g. an if statement branch w/o braces)!
-#define CHECK_OK_CUSTOM(x) ok); \
- if (!*ok) return this->x(); \
+#define CHECK_OK_CUSTOM(x, ...) ok); \
+ if (!*ok) return impl()->x(__VA_ARGS__); \
((void)0
#define DUMMY ) // to make indentation work
#undef DUMMY
@@ -140,93 +88,86 @@ struct FormalParametersBase {
// following the Curiously Recurring Template Pattern (CRTP).
// The structure of the parser objects is roughly the following:
//
-// // Common denominator, needed to avoid cyclic dependency.
-// // Instances of this template will end up with very minimal
-// // definitions, ideally containing just typedefs.
+// // A structure template containing type definitions, needed to
+// // avoid a cyclic dependency.
// template <typename Impl>
-// class ParserBaseTraits;
-
+// struct ParserTypes;
+//
// // The parser base object, which should just implement pure
// // parser behavior. The Impl parameter is the actual derived
// // class (according to CRTP), which implements impure parser
// // behavior.
// template <typename Impl>
-// class ParserBase : public ParserBaseTraits<Impl> { ... };
+// class ParserBase { ... };
//
// // And then, for each parser variant (e.g., parser, preparser, etc):
// class Parser;
//
// template <>
-// class ParserBaseTraits<Parser> { ... };
+// class ParserTypes<Parser> { ... };
//
// class Parser : public ParserBase<Parser> { ... };
//
-// TODO(nikolaos): Currently the traits objects contain many things
-// that will be moved to the implementation objects or to the parser
-// base. The following comments will have to change, when this happens.
-
-// The traits class template encapsulates the differences between
-// parser/pre-parser implementations. In particular:
-
-// - Return types: For example, Parser functions return Expression* and
-// PreParser functions return PreParserExpression.
-
-// - Creating parse tree nodes: Parser generates an AST during the recursive
-// descent. PreParser doesn't create a tree. Instead, it passes around minimal
-// data objects (PreParserExpression, PreParserIdentifier etc.) which contain
-// just enough data for the upper layer functions. PreParserFactory is
-// responsible for creating these dummy objects. It provides a similar kind of
-// interface as AstNodeFactory, so ParserBase doesn't need to care which one is
-// used.
-
-// - Miscellaneous other tasks interleaved with the recursive descent. For
-// example, Parser keeps track of which function literals should be marked as
-// pretenured, and PreParser doesn't care.
-
-// The traits are expected to contain the following typedefs:
+// The parser base object implements pure parsing, according to the
+// language grammar. Different parser implementations may exhibit
+// different parser-driven behavior that is not considered as pure
+// parsing, e.g., early error detection and reporting, AST generation, etc.
+
+// The ParserTypes structure encapsulates the differences in the
+// types used in parsing methods. E.g., Parser methods use Expression*
+// and PreParser methods use PreParserExpression. For any given parser
+// implementation class Impl, it is expected to contain the following typedefs:
+//
// template <>
-// class ParserBaseTraits<Impl> {
-// // In particular...
-// struct Type {
-// typedef GeneratorVariable;
-// typedef AstProperties;
-// typedef ExpressionClassifier;
-// // Return types for traversing functions.
-// typedef Identifier;
-// typedef Expression;
-// typedef YieldExpression;
-// typedef FunctionLiteral;
-// typedef ClassLiteral;
-// typedef Literal;
-// typedef ObjectLiteralProperty;
-// typedef ExpressionList;
-// typedef PropertyList;
-// typedef FormalParameter;
-// typedef FormalParameters;
-// typedef StatementList;
-// // For constructing objects returned by the traversing functions.
-// typedef Factory;
-// };
-// // ...
+// struct ParserTypes<Impl> {
+// // Synonyms for ParserBase<Impl> and Impl, respectively.
+// typedef Base;
+// typedef Impl;
+// // TODO(nikolaos): this one will probably go away, as it is
+// // not related to pure parsing.
+// typedef Variable;
+// // Return types for traversing functions.
+// typedef Identifier;
+// typedef Expression;
+// typedef FunctionLiteral;
+// typedef ObjectLiteralProperty;
+// typedef ClassLiteralProperty;
+// typedef ExpressionList;
+// typedef ObjectPropertyList;
+// typedef ClassPropertyList;
+// typedef FormalParameters;
+// typedef Statement;
+// typedef StatementList;
+// typedef Block;
+// typedef BreakableStatement;
+// typedef IterationStatement;
+// // For constructing objects returned by the traversing functions.
+// typedef Factory;
+// // For other implementation-specific tasks.
+// typedef Target;
+// typedef TargetScope;
// };
template <typename Impl>
-class ParserBaseTraits;
+struct ParserTypes;
template <typename Impl>
-class ParserBase : public ParserBaseTraits<Impl> {
+class ParserBase {
public:
- // Shorten type names defined by Traits.
- typedef ParserBaseTraits<Impl> Traits;
- typedef typename Traits::Type::Expression ExpressionT;
- typedef typename Traits::Type::Identifier IdentifierT;
- typedef typename Traits::Type::FormalParameter FormalParameterT;
- typedef typename Traits::Type::FormalParameters FormalParametersT;
- typedef typename Traits::Type::FunctionLiteral FunctionLiteralT;
- typedef typename Traits::Type::Literal LiteralT;
- typedef typename Traits::Type::ObjectLiteralProperty ObjectLiteralPropertyT;
- typedef typename Traits::Type::StatementList StatementListT;
- typedef typename Traits::Type::ExpressionClassifier ExpressionClassifier;
+ // Shorten type names defined by ParserTypes<Impl>.
+ typedef ParserTypes<Impl> Types;
+ typedef typename Types::Identifier IdentifierT;
+ typedef typename Types::Expression ExpressionT;
+ typedef typename Types::FunctionLiteral FunctionLiteralT;
+ typedef typename Types::ObjectLiteralProperty ObjectLiteralPropertyT;
+ typedef typename Types::ClassLiteralProperty ClassLiteralPropertyT;
+ typedef typename Types::ExpressionList ExpressionListT;
+ typedef typename Types::FormalParameters FormalParametersT;
+ typedef typename Types::Statement StatementT;
+ typedef typename Types::StatementList StatementListT;
+ typedef typename Types::Block BlockT;
+ typedef typename v8::internal::ExpressionClassifier<Types>
+ ExpressionClassifier;
// All implementation-specific methods must be called through this.
Impl* impl() { return static_cast<Impl*>(this); }
@@ -246,6 +187,7 @@ class ParserBase : public ParserBaseTraits<Impl> {
parsing_module_(false),
stack_limit_(stack_limit),
zone_(zone),
+ classifier_(nullptr),
scanner_(scanner),
stack_overflow_(false),
allow_lazy_(false),
@@ -257,7 +199,8 @@ class ParserBase : public ParserBaseTraits<Impl> {
allow_harmony_function_sent_(false),
allow_harmony_async_await_(false),
allow_harmony_restrictive_generators_(false),
- allow_harmony_trailing_commas_(false) {}
+ allow_harmony_trailing_commas_(false),
+ allow_harmony_class_fields_(false) {}
#define ALLOW_ACCESSORS(name) \
bool allow_##name() const { return allow_##name##_; } \
@@ -273,6 +216,7 @@ class ParserBase : public ParserBaseTraits<Impl> {
ALLOW_ACCESSORS(harmony_async_await);
ALLOW_ACCESSORS(harmony_restrictive_generators);
ALLOW_ACCESSORS(harmony_trailing_commas);
+ ALLOW_ACCESSORS(harmony_class_fields);
#undef ALLOW_ACCESSORS
@@ -280,7 +224,12 @@ class ParserBase : public ParserBaseTraits<Impl> {
void set_stack_limit(uintptr_t stack_limit) { stack_limit_ = stack_limit; }
+ Zone* zone() const { return zone_; }
+
protected:
+ friend class v8::internal::ExpressionClassifier<ParserTypes<Impl>>;
+
+ // clang-format off
enum AllowRestrictedIdentifiers {
kAllowRestrictedIdentifiers,
kDontAllowRestrictedIdentifiers
@@ -291,14 +240,26 @@ class ParserBase : public ParserBaseTraits<Impl> {
PARSE_EAGERLY
};
+ enum LazyParsingResult {
+ kLazyParsingComplete,
+ kLazyParsingAborted
+ };
+
enum VariableDeclarationContext {
kStatementListItem,
kStatement,
kForStatement
};
+ enum class FunctionBodyType {
+ kNormal,
+ kSingleExpression
+ };
+ // clang-format on
+
class Checkpoint;
- class ObjectLiteralCheckerBase;
+ class ClassLiteralChecker;
+ class ObjectLiteralChecker;
// ---------------------------------------------------------------------------
// ScopeState and its subclasses implement the parser's scope stack.
@@ -333,8 +294,8 @@ class ParserBase : public ParserBaseTraits<Impl> {
// allocation.
// TODO(verwaest): Move to LazyBlockState class that only allocates the
// scope when needed.
- explicit BlockState(ScopeState** scope_stack)
- : ScopeState(scope_stack, NewScope(*scope_stack)) {}
+ explicit BlockState(Zone* zone, ScopeState** scope_stack)
+ : ScopeState(scope_stack, NewScope(zone, *scope_stack)) {}
void SetNonlinear() { this->scope()->SetNonlinear(); }
void set_start_position(int pos) { this->scope()->set_start_position(pos); }
@@ -348,9 +309,8 @@ class ParserBase : public ParserBaseTraits<Impl> {
}
private:
- Scope* NewScope(ScopeState* outer_state) {
+ Scope* NewScope(Zone* zone, ScopeState* outer_state) {
Scope* parent = outer_state->scope();
- Zone* zone = outer_state->zone();
return new (zone) Scope(zone, parent, BLOCK_SCOPE);
}
};
@@ -384,14 +344,6 @@ class ParserBase : public ParserBaseTraits<Impl> {
expressions_.Add(expr, zone_);
}
- void AddExplicitTailCall(ExpressionT expr, const Scanner::Location& loc) {
- if (!has_explicit_tail_calls()) {
- loc_ = loc;
- has_explicit_tail_calls_ = true;
- }
- expressions_.Add(expr, zone_);
- }
-
void Append(const TailCallExpressionList& other) {
if (!has_explicit_tail_calls()) {
loc_ = other.loc_;
@@ -425,9 +377,13 @@ class ParserBase : public ParserBaseTraits<Impl> {
class FunctionState final : public ScopeState {
public:
FunctionState(FunctionState** function_state_stack,
- ScopeState** scope_stack, Scope* scope, FunctionKind kind);
+ ScopeState** scope_stack, DeclarationScope* scope);
~FunctionState();
+ DeclarationScope* scope() const {
+ return ScopeState::scope()->AsDeclarationScope();
+ }
+
int NextMaterializedLiteralIndex() {
return next_materialized_literal_index_++;
}
@@ -442,24 +398,27 @@ class ParserBase : public ParserBaseTraits<Impl> {
void AddProperty() { expected_property_count_++; }
int expected_property_count() { return expected_property_count_; }
- bool is_generator() const { return IsGeneratorFunction(kind_); }
- bool is_async_function() const { return IsAsyncFunction(kind_); }
- bool is_resumable() const { return is_generator() || is_async_function(); }
-
- FunctionKind kind() const { return kind_; }
+ FunctionKind kind() const { return scope()->function_kind(); }
FunctionState* outer() const { return outer_function_state_; }
- void set_generator_object_variable(
- typename Traits::Type::GeneratorVariable* variable) {
+ void set_generator_object_variable(typename Types::Variable* variable) {
DCHECK(variable != NULL);
- DCHECK(is_resumable());
+ DCHECK(IsResumableFunction(kind()));
generator_object_variable_ = variable;
}
- typename Traits::Type::GeneratorVariable* generator_object_variable()
- const {
+ typename Types::Variable* generator_object_variable() const {
return generator_object_variable_;
}
+ void set_promise_variable(typename Types::Variable* variable) {
+ DCHECK(variable != NULL);
+ DCHECK(IsAsyncFunction(kind()));
+ promise_variable_ = variable;
+ }
+ typename Types::Variable* promise_variable() const {
+ return promise_variable_;
+ }
+
const ZoneList<DestructuringAssignment>&
destructuring_assignments_to_rewrite() const {
return destructuring_assignments_to_rewrite_;
@@ -474,14 +433,6 @@ class ParserBase : public ParserBaseTraits<Impl> {
tail_call_expressions_.AddImplicitTailCall(expression);
}
}
- void AddExplicitTailCallExpression(ExpressionT expression,
- const Scanner::Location& loc) {
- DCHECK(expression->IsCall());
- if (return_expr_context() ==
- ReturnExprContext::kInsideValidReturnStatement) {
- tail_call_expressions_.AddExplicitTailCall(expression, loc);
- }
- }
ZoneList<typename ExpressionClassifier::Error>* GetReportedErrorList() {
return &reported_errors_;
@@ -530,11 +481,13 @@ class ParserBase : public ParserBaseTraits<Impl> {
// Properties count estimation.
int expected_property_count_;
- FunctionKind kind_;
// For generators, this variable may hold the generator object. It variable
// is used by yield expressions and return statements. It is not necessary
// for generator functions to have this variable set.
Variable* generator_object_variable_;
+ // For async functions, this variable holds a temporary for the Promise
+ // being created as output of the async function.
+ Variable* promise_variable_;
FunctionState** function_state_stack_;
FunctionState* outer_function_state_;
@@ -644,8 +597,97 @@ class ParserBase : public ParserBaseTraits<Impl> {
Mode old_mode_;
};
+ struct DeclarationDescriptor {
+ enum Kind { NORMAL, PARAMETER };
+ Scope* scope;
+ Scope* hoist_scope;
+ VariableMode mode;
+ int declaration_pos;
+ int initialization_pos;
+ Kind declaration_kind;
+ };
+
+ struct DeclarationParsingResult {
+ struct Declaration {
+ Declaration(ExpressionT pattern, int initializer_position,
+ ExpressionT initializer)
+ : pattern(pattern),
+ initializer_position(initializer_position),
+ initializer(initializer) {}
+
+ ExpressionT pattern;
+ int initializer_position;
+ ExpressionT initializer;
+ };
+
+ DeclarationParsingResult()
+ : declarations(4),
+ first_initializer_loc(Scanner::Location::invalid()),
+ bindings_loc(Scanner::Location::invalid()) {}
+
+ DeclarationDescriptor descriptor;
+ List<Declaration> declarations;
+ Scanner::Location first_initializer_loc;
+ Scanner::Location bindings_loc;
+ };
+
+ struct CatchInfo {
+ public:
+ explicit CatchInfo(ParserBase* parser)
+ : name(parser->impl()->EmptyIdentifier()),
+ variable(nullptr),
+ pattern(parser->impl()->EmptyExpression()),
+ scope(nullptr),
+ init_block(parser->impl()->NullBlock()),
+ inner_block(parser->impl()->NullBlock()),
+ for_promise_reject(false),
+ bound_names(1, parser->zone()),
+ tail_call_expressions(parser->zone()) {}
+ IdentifierT name;
+ Variable* variable;
+ ExpressionT pattern;
+ Scope* scope;
+ BlockT init_block;
+ BlockT inner_block;
+ bool for_promise_reject;
+ ZoneList<const AstRawString*> bound_names;
+ TailCallExpressionList tail_call_expressions;
+ };
+
+ struct ForInfo {
+ public:
+ explicit ForInfo(ParserBase* parser)
+ : bound_names(1, parser->zone()),
+ mode(ForEachStatement::ENUMERATE),
+ each_loc(),
+ parsing_result() {}
+ ZoneList<const AstRawString*> bound_names;
+ ForEachStatement::VisitMode mode;
+ Scanner::Location each_loc;
+ DeclarationParsingResult parsing_result;
+ };
+
+ struct ClassInfo {
+ public:
+ explicit ClassInfo(ParserBase* parser)
+ : proxy(nullptr),
+ extends(parser->impl()->EmptyExpression()),
+ properties(parser->impl()->NewClassPropertyList(4)),
+ instance_field_initializers(parser->impl()->NewExpressionList(0)),
+ constructor(parser->impl()->EmptyFunctionLiteral()),
+ has_seen_constructor(false),
+ static_initializer_var(nullptr) {}
+ VariableProxy* proxy;
+ ExpressionT extends;
+ typename Types::ClassPropertyList properties;
+ ExpressionListT instance_field_initializers;
+ FunctionLiteralT constructor;
+ bool has_seen_constructor;
+ Variable* static_initializer_var;
+ };
+
DeclarationScope* NewScriptScope() const {
- return new (zone()) DeclarationScope(zone());
+ return new (zone()) DeclarationScope(zone(), ast_value_factory());
}
DeclarationScope* NewVarblockScope() const {
@@ -653,7 +695,7 @@ class ParserBase : public ParserBaseTraits<Impl> {
}
ModuleScope* NewModuleScope(DeclarationScope* parent) const {
- return new (zone()) ModuleScope(zone(), parent, ast_value_factory());
+ return new (zone()) ModuleScope(parent, ast_value_factory());
}
DeclarationScope* NewEvalScope(Scope* parent) const {
@@ -683,12 +725,18 @@ class ParserBase : public ParserBaseTraits<Impl> {
new (zone()) DeclarationScope(zone(), scope(), FUNCTION_SCOPE, kind);
// TODO(verwaest): Move into the DeclarationScope constructor.
if (!IsArrowFunction(kind)) {
- result->DeclareThis(ast_value_factory());
result->DeclareDefaultFunctionVariables(ast_value_factory());
}
return result;
}
+ V8_INLINE DeclarationScope* GetDeclarationScope() const {
+ return scope()->GetDeclarationScope();
+ }
+ V8_INLINE DeclarationScope* GetClosureScope() const {
+ return scope()->GetClosureScope();
+ }
+
Scanner* scanner() const { return scanner_; }
AstValueFactory* ast_value_factory() const { return ast_value_factory_; }
int position() const { return scanner_->location().beg_pos; }
@@ -696,7 +744,6 @@ class ParserBase : public ParserBaseTraits<Impl> {
bool stack_overflow() const { return stack_overflow_; }
void set_stack_overflow() { stack_overflow_ = true; }
Mode mode() const { return mode_; }
- Zone* zone() const { return zone_; }
INLINE(Token::Value peek()) {
if (stack_overflow_) return Token::ILLEGAL;
@@ -761,8 +808,12 @@ class ParserBase : public ParserBaseTraits<Impl> {
Expect(Token::SEMICOLON, ok);
}
- // A dummy function, just useful as an argument to CHECK_OK_CUSTOM.
+ // Dummy functions, just useful as arguments to CHECK_OK_CUSTOM.
static void Void() {}
+ template <typename T>
+ static T Return(T result) {
+ return result;
+ }
bool is_any_identifier(Token::Value token) {
return token == Token::IDENTIFIER || token == Token::ENUM ||
@@ -796,7 +847,7 @@ class ParserBase : public ParserBaseTraits<Impl> {
}
}
- bool CheckInOrOf(ForEachStatement::VisitMode* visit_mode, bool* ok) {
+ bool CheckInOrOf(ForEachStatement::VisitMode* visit_mode) {
if (Check(Token::IN)) {
*visit_mode = ForEachStatement::ENUMERATE;
return true;
@@ -818,21 +869,19 @@ class ParserBase : public ParserBaseTraits<Impl> {
Scanner::Location octal = scanner()->octal_position();
if (octal.IsValid() && beg_pos <= octal.beg_pos &&
octal.end_pos <= end_pos) {
- ReportMessageAt(octal, message);
+ impl()->ReportMessageAt(octal, message);
scanner()->clear_octal_position();
*ok = false;
}
}
// for now, this check just collects statistics.
- void CheckDecimalLiteralWithLeadingZero(int* use_counts, int beg_pos,
- int end_pos) {
+ void CheckDecimalLiteralWithLeadingZero(int beg_pos, int end_pos) {
Scanner::Location token_location =
scanner()->decimal_with_leading_zero_position();
if (token_location.IsValid() && beg_pos <= token_location.beg_pos &&
token_location.end_pos <= end_pos) {
scanner()->clear_decimal_with_leading_zero_position();
- if (use_counts != nullptr)
- ++use_counts[v8::Isolate::kDecimalWithLeadingZeroInStrictMode];
+ impl()->CountUsage(v8::Isolate::kDecimalWithLeadingZeroInStrictMode);
}
}
@@ -846,9 +895,7 @@ class ParserBase : public ParserBaseTraits<Impl> {
ok);
}
- void CheckDestructuringElement(ExpressionT element,
- ExpressionClassifier* classifier, int beg_pos,
- int end_pos);
+ void CheckDestructuringElement(ExpressionT element, int beg_pos, int end_pos);
// Checking the name of a function literal. This has to be done after parsing
// the function, since the function can declare itself strict.
@@ -859,14 +906,14 @@ class ParserBase : public ParserBaseTraits<Impl> {
// The function name needs to be checked in strict mode.
if (is_sloppy(language_mode)) return;
- if (this->IsEvalOrArguments(function_name)) {
- Traits::ReportMessageAt(function_name_loc,
+ if (impl()->IsEvalOrArguments(function_name)) {
+ impl()->ReportMessageAt(function_name_loc,
MessageTemplate::kStrictEvalArguments);
*ok = false;
return;
}
if (function_name_validity == kFunctionNameIsStrictReserved) {
- Traits::ReportMessageAt(function_name_loc,
+ impl()->ReportMessageAt(function_name_loc,
MessageTemplate::kUnexpectedStrictReserved);
*ok = false;
return;
@@ -880,50 +927,45 @@ class ParserBase : public ParserBaseTraits<Impl> {
return Token::Precedence(token);
}
- typename Traits::Type::Factory* factory() { return &ast_node_factory_; }
+ typename Types::Factory* factory() { return &ast_node_factory_; }
DeclarationScope* GetReceiverScope() const {
return scope()->GetReceiverScope();
}
LanguageMode language_mode() { return scope()->language_mode(); }
- bool is_generator() const { return function_state_->is_generator(); }
+ void RaiseLanguageMode(LanguageMode mode) {
+ LanguageMode old = scope()->language_mode();
+ impl()->SetLanguageMode(scope(), old > mode ? old : mode);
+ }
+ bool is_generator() const {
+ return IsGeneratorFunction(function_state_->kind());
+ }
bool is_async_function() const {
- return function_state_->is_async_function();
+ return IsAsyncFunction(function_state_->kind());
+ }
+ bool is_resumable() const {
+ return IsResumableFunction(function_state_->kind());
}
- bool is_resumable() const { return function_state_->is_resumable(); }
// Report syntax errors.
- void ReportMessage(MessageTemplate::Template message, const char* arg = NULL,
- ParseErrorType error_type = kSyntaxError) {
+ void ReportMessage(MessageTemplate::Template message) {
Scanner::Location source_location = scanner()->location();
- Traits::ReportMessageAt(source_location, message, arg, error_type);
+ impl()->ReportMessageAt(source_location, message,
+ static_cast<const char*>(nullptr), kSyntaxError);
}
- void ReportMessage(MessageTemplate::Template message, const AstRawString* arg,
+ template <typename T>
+ void ReportMessage(MessageTemplate::Template message, T arg,
ParseErrorType error_type = kSyntaxError) {
Scanner::Location source_location = scanner()->location();
- Traits::ReportMessageAt(source_location, message, arg, error_type);
- }
-
- void ReportMessageAt(Scanner::Location location,
- MessageTemplate::Template message,
- const char* arg = NULL,
- ParseErrorType error_type = kSyntaxError) {
- Traits::ReportMessageAt(location, message, arg, error_type);
- }
-
- void ReportMessageAt(Scanner::Location location,
- MessageTemplate::Template message,
- const AstRawString* arg,
- ParseErrorType error_type = kSyntaxError) {
- Traits::ReportMessageAt(location, message, arg, error_type);
+ impl()->ReportMessageAt(source_location, message, arg, error_type);
}
void ReportMessageAt(Scanner::Location location,
MessageTemplate::Template message,
ParseErrorType error_type) {
- ReportMessageAt(location, message, static_cast<const char*>(nullptr),
- error_type);
+ impl()->ReportMessageAt(location, message,
+ static_cast<const char*>(nullptr), error_type);
}
void GetUnexpectedTokenMessage(
@@ -938,59 +980,47 @@ class ParserBase : public ParserBaseTraits<Impl> {
void ReportClassifierError(
const typename ExpressionClassifier::Error& error) {
- Traits::ReportMessageAt(error.location, error.message, error.arg,
+ impl()->ReportMessageAt(error.location, error.message, error.arg,
error.type);
}
- void ValidateExpression(const ExpressionClassifier* classifier, bool* ok) {
- if (!classifier->is_valid_expression() ||
- classifier->has_object_literal_error()) {
- const Scanner::Location& a = classifier->expression_error().location;
- const Scanner::Location& b =
- classifier->object_literal_error().location;
- if (a.beg_pos < 0 || (b.beg_pos >= 0 && a.beg_pos > b.beg_pos)) {
- ReportClassifierError(classifier->object_literal_error());
- } else {
- ReportClassifierError(classifier->expression_error());
- }
+ void ValidateExpression(bool* ok) {
+ if (!classifier()->is_valid_expression()) {
+ ReportClassifierError(classifier()->expression_error());
*ok = false;
}
}
- void ValidateFormalParameterInitializer(
- const ExpressionClassifier* classifier, bool* ok) {
- if (!classifier->is_valid_formal_parameter_initializer()) {
- ReportClassifierError(classifier->formal_parameter_initializer_error());
+ void ValidateFormalParameterInitializer(bool* ok) {
+ if (!classifier()->is_valid_formal_parameter_initializer()) {
+ ReportClassifierError(classifier()->formal_parameter_initializer_error());
*ok = false;
}
}
- void ValidateBindingPattern(const ExpressionClassifier* classifier,
- bool* ok) {
- if (!classifier->is_valid_binding_pattern()) {
- ReportClassifierError(classifier->binding_pattern_error());
+ void ValidateBindingPattern(bool* ok) {
+ if (!classifier()->is_valid_binding_pattern()) {
+ ReportClassifierError(classifier()->binding_pattern_error());
*ok = false;
}
}
- void ValidateAssignmentPattern(const ExpressionClassifier* classifier,
- bool* ok) {
- if (!classifier->is_valid_assignment_pattern()) {
- ReportClassifierError(classifier->assignment_pattern_error());
+ void ValidateAssignmentPattern(bool* ok) {
+ if (!classifier()->is_valid_assignment_pattern()) {
+ ReportClassifierError(classifier()->assignment_pattern_error());
*ok = false;
}
}
- void ValidateFormalParameters(const ExpressionClassifier* classifier,
- LanguageMode language_mode,
+ void ValidateFormalParameters(LanguageMode language_mode,
bool allow_duplicates, bool* ok) {
if (!allow_duplicates &&
- !classifier->is_valid_formal_parameter_list_without_duplicates()) {
- ReportClassifierError(classifier->duplicate_formal_parameter_error());
+ !classifier()->is_valid_formal_parameter_list_without_duplicates()) {
+ ReportClassifierError(classifier()->duplicate_formal_parameter_error());
*ok = false;
} else if (is_strict(language_mode) &&
- !classifier->is_valid_strict_mode_formal_parameters()) {
- ReportClassifierError(classifier->strict_mode_formal_parameter_error());
+ !classifier()->is_valid_strict_mode_formal_parameters()) {
+ ReportClassifierError(classifier()->strict_mode_formal_parameter_error());
*ok = false;
}
}
@@ -999,78 +1029,73 @@ class ParserBase : public ParserBaseTraits<Impl> {
return is_any_identifier(token) || token == Token::LPAREN;
}
- void ValidateArrowFormalParameters(const ExpressionClassifier* classifier,
- ExpressionT expr,
+ void ValidateArrowFormalParameters(ExpressionT expr,
bool parenthesized_formals, bool is_async,
bool* ok) {
- if (classifier->is_valid_binding_pattern()) {
+ if (classifier()->is_valid_binding_pattern()) {
// A simple arrow formal parameter: IDENTIFIER => BODY.
- if (!this->IsIdentifier(expr)) {
- Traits::ReportMessageAt(scanner()->location(),
+ if (!impl()->IsIdentifier(expr)) {
+ impl()->ReportMessageAt(scanner()->location(),
MessageTemplate::kUnexpectedToken,
Token::String(scanner()->current_token()));
*ok = false;
}
- } else if (!classifier->is_valid_arrow_formal_parameters()) {
+ } else if (!classifier()->is_valid_arrow_formal_parameters()) {
// If after parsing the expr, we see an error but the expression is
// neither a valid binding pattern nor a valid parenthesized formal
// parameter list, show the "arrow formal parameters" error if the formals
// started with a parenthesis, and the binding pattern error otherwise.
const typename ExpressionClassifier::Error& error =
- parenthesized_formals ? classifier->arrow_formal_parameters_error()
- : classifier->binding_pattern_error();
+ parenthesized_formals ? classifier()->arrow_formal_parameters_error()
+ : classifier()->binding_pattern_error();
ReportClassifierError(error);
*ok = false;
}
- if (is_async && !classifier->is_valid_async_arrow_formal_parameters()) {
+ if (is_async && !classifier()->is_valid_async_arrow_formal_parameters()) {
const typename ExpressionClassifier::Error& error =
- classifier->async_arrow_formal_parameters_error();
+ classifier()->async_arrow_formal_parameters_error();
ReportClassifierError(error);
*ok = false;
}
}
- void ValidateLetPattern(const ExpressionClassifier* classifier, bool* ok) {
- if (!classifier->is_valid_let_pattern()) {
- ReportClassifierError(classifier->let_pattern_error());
- *ok = false;
- }
- }
-
- void CheckNoTailCallExpressions(const ExpressionClassifier* classifier,
- bool* ok) {
- if (FLAG_harmony_explicit_tailcalls &&
- classifier->has_tail_call_expression()) {
- ReportClassifierError(classifier->tail_call_expression_error());
+ void ValidateLetPattern(bool* ok) {
+ if (!classifier()->is_valid_let_pattern()) {
+ ReportClassifierError(classifier()->let_pattern_error());
*ok = false;
}
}
- void ExpressionUnexpectedToken(ExpressionClassifier* classifier) {
+ void ExpressionUnexpectedToken() {
MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
const char* arg;
Scanner::Location location = scanner()->peek_location();
GetUnexpectedTokenMessage(peek(), &message, &location, &arg);
- classifier->RecordExpressionError(location, message, arg);
+ classifier()->RecordExpressionError(location, message, arg);
}
- void BindingPatternUnexpectedToken(ExpressionClassifier* classifier) {
+ void BindingPatternUnexpectedToken() {
MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
const char* arg;
Scanner::Location location = scanner()->peek_location();
GetUnexpectedTokenMessage(peek(), &message, &location, &arg);
- classifier->RecordBindingPatternError(location, message, arg);
+ classifier()->RecordBindingPatternError(location, message, arg);
}
- void ArrowFormalParametersUnexpectedToken(ExpressionClassifier* classifier) {
+ void ArrowFormalParametersUnexpectedToken() {
MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
const char* arg;
Scanner::Location location = scanner()->peek_location();
GetUnexpectedTokenMessage(peek(), &message, &location, &arg);
- classifier->RecordArrowFormalParametersError(location, message, arg);
+ classifier()->RecordArrowFormalParametersError(location, message, arg);
}
- // Recursive descent functions:
+ // Recursive descent functions.
+ // All ParseXXX functions take as the last argument an *ok parameter
+ // which is set to false if parsing failed; it is unchanged otherwise.
+ // By making the 'exception handling' explicit, we are forced to check
+ // for failure at the call sites. The family of CHECK_OK* macros can
+ // be useful for this.
// Parses an identifier that is valid for the current scope, in particular it
// fails on strict mode future reserved keywords in a strict scope. If
@@ -1078,8 +1103,7 @@ class ParserBase : public ParserBaseTraits<Impl> {
// "arguments" as identifier even in strict mode (this is needed in cases like
// "var foo = eval;").
IdentifierT ParseIdentifier(AllowRestrictedIdentifiers, bool* ok);
- IdentifierT ParseAndClassifyIdentifier(ExpressionClassifier* classifier,
- bool* ok);
+ IdentifierT ParseAndClassifyIdentifier(bool* ok);
// Parses an identifier or a strict mode future reserved word, and indicate
// whether it is strict mode future reserved. Allows passing in function_kind
// for the case of parsing the identifier in a function expression, where the
@@ -1098,76 +1122,173 @@ class ParserBase : public ParserBaseTraits<Impl> {
ExpressionT ParseRegExpLiteral(bool* ok);
- ExpressionT ParsePrimaryExpression(ExpressionClassifier* classifier,
- bool* is_async, bool* ok);
- ExpressionT ParsePrimaryExpression(ExpressionClassifier* classifier,
- bool* ok) {
+ ExpressionT ParsePrimaryExpression(bool* is_async, bool* ok);
+ ExpressionT ParsePrimaryExpression(bool* ok) {
bool is_async;
- return ParsePrimaryExpression(classifier, &is_async, ok);
+ return ParsePrimaryExpression(&is_async, ok);
}
- ExpressionT ParseExpression(bool accept_IN, bool* ok);
- ExpressionT ParseExpression(bool accept_IN, ExpressionClassifier* classifier,
- bool* ok);
- ExpressionT ParseArrayLiteral(ExpressionClassifier* classifier, bool* ok);
- ExpressionT ParsePropertyName(IdentifierT* name, bool* is_get, bool* is_set,
- bool* is_computed_name,
- ExpressionClassifier* classifier, bool* ok);
- ExpressionT ParseObjectLiteral(ExpressionClassifier* classifier, bool* ok);
- ObjectLiteralPropertyT ParsePropertyDefinition(
- ObjectLiteralCheckerBase* checker, bool in_class, bool has_extends,
- MethodKind kind, bool* is_computed_name, bool* has_seen_constructor,
- ExpressionClassifier* classifier, IdentifierT* name, bool* ok);
- typename Traits::Type::ExpressionList ParseArguments(
- Scanner::Location* first_spread_pos, bool maybe_arrow,
- ExpressionClassifier* classifier, bool* ok);
- typename Traits::Type::ExpressionList ParseArguments(
- Scanner::Location* first_spread_pos, ExpressionClassifier* classifier,
- bool* ok) {
- return ParseArguments(first_spread_pos, false, classifier, ok);
- }
-
- ExpressionT ParseAssignmentExpression(bool accept_IN,
- ExpressionClassifier* classifier,
- bool* ok);
- ExpressionT ParseYieldExpression(bool accept_IN,
- ExpressionClassifier* classifier, bool* ok);
- ExpressionT ParseTailCallExpression(ExpressionClassifier* classifier,
- bool* ok);
- ExpressionT ParseConditionalExpression(bool accept_IN,
- ExpressionClassifier* classifier,
- bool* ok);
- ExpressionT ParseBinaryExpression(int prec, bool accept_IN,
- ExpressionClassifier* classifier, bool* ok);
- ExpressionT ParseUnaryExpression(ExpressionClassifier* classifier, bool* ok);
- ExpressionT ParsePostfixExpression(ExpressionClassifier* classifier,
- bool* ok);
- ExpressionT ParseLeftHandSideExpression(ExpressionClassifier* classifier,
- bool* ok);
- ExpressionT ParseMemberWithNewPrefixesExpression(
- ExpressionClassifier* classifier, bool* is_async, bool* ok);
- ExpressionT ParseMemberExpression(ExpressionClassifier* classifier,
- bool* is_async, bool* ok);
- ExpressionT ParseMemberExpressionContinuation(
- ExpressionT expression, bool* is_async, ExpressionClassifier* classifier,
- bool* ok);
+
+ // This method wraps the parsing of the expression inside a new expression
+ // classifier and calls RewriteNonPattern if parsing is successful.
+ // It should be used whenever we're parsing an expression that will be
+ // used as a non-pattern (i.e., in most cases).
+ V8_INLINE ExpressionT ParseExpression(bool accept_IN, bool* ok);
+
+ // This method does not wrap the parsing of the expression inside a
+ // new expression classifier; it uses the top-level classifier instead.
+ // It should be used whenever we're parsing something with the "cover"
+ // grammar that recognizes both patterns and non-patterns (which roughly
+ // corresponds to what's inside the parentheses generated by the symbol
+ // "CoverParenthesizedExpressionAndArrowParameterList" in the ES 2017
+ // specification).
+ ExpressionT ParseExpressionCoverGrammar(bool accept_IN, bool* ok);
+
+ ExpressionT ParseArrayLiteral(bool* ok);
+
+ enum class PropertyKind {
+ kAccessorProperty,
+ kValueProperty,
+ kShorthandProperty,
+ kMethodProperty,
+ kClassField,
+ kNotSet
+ };
+
+ bool SetPropertyKindFromToken(Token::Value token, PropertyKind* kind);
+ ExpressionT ParsePropertyName(IdentifierT* name, PropertyKind* kind,
+ bool* is_generator, bool* is_get, bool* is_set,
+ bool* is_async, bool* is_computed_name,
+ bool* ok);
+ ExpressionT ParseObjectLiteral(bool* ok);
+ ClassLiteralPropertyT ParseClassPropertyDefinition(
+ ClassLiteralChecker* checker, bool has_extends, bool* is_computed_name,
+ bool* has_seen_constructor, bool* ok);
+ FunctionLiteralT ParseClassFieldForInitializer(bool has_initializer,
+ bool* ok);
+ ObjectLiteralPropertyT ParseObjectPropertyDefinition(
+ ObjectLiteralChecker* checker, bool* is_computed_name, bool* ok);
+ ExpressionListT ParseArguments(Scanner::Location* first_spread_pos,
+ bool maybe_arrow, bool* ok);
+ ExpressionListT ParseArguments(Scanner::Location* first_spread_pos,
+ bool* ok) {
+ return ParseArguments(first_spread_pos, false, ok);
+ }
+
+ ExpressionT ParseAssignmentExpression(bool accept_IN, bool* ok);
+ ExpressionT ParseYieldExpression(bool accept_IN, bool* ok);
+ ExpressionT ParseConditionalExpression(bool accept_IN, bool* ok);
+ ExpressionT ParseBinaryExpression(int prec, bool accept_IN, bool* ok);
+ ExpressionT ParseUnaryExpression(bool* ok);
+ ExpressionT ParsePostfixExpression(bool* ok);
+ ExpressionT ParseLeftHandSideExpression(bool* ok);
+ ExpressionT ParseMemberWithNewPrefixesExpression(bool* is_async, bool* ok);
+ ExpressionT ParseMemberExpression(bool* is_async, bool* ok);
+ ExpressionT ParseMemberExpressionContinuation(ExpressionT expression,
+ bool* is_async, bool* ok);
ExpressionT ParseArrowFunctionLiteral(bool accept_IN,
const FormalParametersT& parameters,
- bool is_async,
- const ExpressionClassifier& classifier,
bool* ok);
- ExpressionT ParseTemplateLiteral(ExpressionT tag, int start,
- ExpressionClassifier* classifier, bool* ok);
+ void ParseAsyncFunctionBody(Scope* scope, StatementListT body,
+ FunctionKind kind, FunctionBodyType type,
+ bool accept_IN, int pos, bool* ok);
+ ExpressionT ParseAsyncFunctionLiteral(bool* ok);
+ ExpressionT ParseClassLiteral(IdentifierT name,
+ Scanner::Location class_name_location,
+ bool name_is_strict_reserved,
+ int class_token_pos, bool* ok);
+ ExpressionT ParseTemplateLiteral(ExpressionT tag, int start, bool* ok);
ExpressionT ParseSuperExpression(bool is_new, bool* ok);
ExpressionT ParseNewTargetExpression(bool* ok);
- void ParseFormalParameter(FormalParametersT* parameters,
- ExpressionClassifier* classifier, bool* ok);
- void ParseFormalParameterList(FormalParametersT* parameters,
- ExpressionClassifier* classifier, bool* ok);
+ void ParseFormalParameter(FormalParametersT* parameters, bool* ok);
+ void ParseFormalParameterList(FormalParametersT* parameters, bool* ok);
void CheckArityRestrictions(int param_count, FunctionKind function_type,
bool has_rest, int formals_start_pos,
int formals_end_pos, bool* ok);
+ BlockT ParseVariableDeclarations(VariableDeclarationContext var_context,
+ DeclarationParsingResult* parsing_result,
+ ZoneList<const AstRawString*>* names,
+ bool* ok);
+ StatementT ParseAsyncFunctionDeclaration(ZoneList<const AstRawString*>* names,
+ bool default_export, bool* ok);
+ StatementT ParseFunctionDeclaration(bool* ok);
+ StatementT ParseHoistableDeclaration(ZoneList<const AstRawString*>* names,
+ bool default_export, bool* ok);
+ StatementT ParseHoistableDeclaration(int pos, ParseFunctionFlags flags,
+ ZoneList<const AstRawString*>* names,
+ bool default_export, bool* ok);
+ StatementT ParseClassDeclaration(ZoneList<const AstRawString*>* names,
+ bool default_export, bool* ok);
+ StatementT ParseNativeDeclaration(bool* ok);
+
+ // Under some circumstances, we allow preparsing to abort if the preparsed
+ // function is "long and trivial", and fully parse instead. Our current
+ // definition of "long and trivial" is:
+ // - over kLazyParseTrialLimit statements
+ // - all starting with an identifier (i.e., no if, for, while, etc.)
+ static const int kLazyParseTrialLimit = 200;
+
+ // TODO(nikolaos, marja): The first argument should not really be passed
+ // by value. The method is expected to add the parsed statements to the
+ // list. This works because in the case of the parser, StatementListT is
+ // a pointer whereas the preparser does not really modify the body.
+ V8_INLINE void ParseStatementList(StatementListT body, int end_token,
+ bool* ok) {
+ LazyParsingResult result = ParseStatementList(body, end_token, false, ok);
+ USE(result);
+ DCHECK_EQ(result, kLazyParsingComplete);
+ }
+ LazyParsingResult ParseStatementList(StatementListT body, int end_token,
+ bool may_abort, bool* ok);
+ StatementT ParseStatementListItem(bool* ok);
+ StatementT ParseStatement(ZoneList<const AstRawString*>* labels,
+ AllowLabelledFunctionStatement allow_function,
+ bool* ok);
+ StatementT ParseStatementAsUnlabelled(ZoneList<const AstRawString*>* labels,
+ bool* ok);
+ BlockT ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok);
+
+ // Parse a SubStatement in strict mode, or with an extra block scope in
+ // sloppy mode to handle
+ // ES#sec-functiondeclarations-in-ifstatement-statement-clauses
+ // The legacy parameter indicates whether function declarations are
+ // banned by the ES2015 specification in this location, and they are being
+ // permitted here to match previous V8 behavior.
+ StatementT ParseScopedStatement(ZoneList<const AstRawString*>* labels,
+ bool legacy, bool* ok);
+
+ StatementT ParseVariableStatement(VariableDeclarationContext var_context,
+ ZoneList<const AstRawString*>* names,
+ bool* ok);
+
+ // Magical syntax support.
+ ExpressionT ParseV8Intrinsic(bool* ok);
+
+ ExpressionT ParseDoExpression(bool* ok);
+
+ StatementT ParseDebuggerStatement(bool* ok);
+
+ StatementT ParseExpressionOrLabelledStatement(
+ ZoneList<const AstRawString*>* labels,
+ AllowLabelledFunctionStatement allow_function, bool* ok);
+ StatementT ParseIfStatement(ZoneList<const AstRawString*>* labels, bool* ok);
+ StatementT ParseContinueStatement(bool* ok);
+ StatementT ParseBreakStatement(ZoneList<const AstRawString*>* labels,
+ bool* ok);
+ StatementT ParseReturnStatement(bool* ok);
+ StatementT ParseWithStatement(ZoneList<const AstRawString*>* labels,
+ bool* ok);
+ StatementT ParseDoWhileStatement(ZoneList<const AstRawString*>* labels,
+ bool* ok);
+ StatementT ParseWhileStatement(ZoneList<const AstRawString*>* labels,
+ bool* ok);
+ StatementT ParseThrowStatement(bool* ok);
+ StatementT ParseSwitchStatement(ZoneList<const AstRawString*>* labels,
+ bool* ok);
+ StatementT ParseTryStatement(bool* ok);
+ StatementT ParseForStatement(ZoneList<const AstRawString*>* labels, bool* ok);
+
bool IsNextLetKeyword();
bool IsTrivialExpression();
@@ -1184,9 +1305,9 @@ class ParserBase : public ParserBaseTraits<Impl> {
bool IsValidReferenceExpression(ExpressionT expression);
bool IsAssignableIdentifier(ExpressionT expression) {
- if (!Traits::IsIdentifier(expression)) return false;
+ if (!impl()->IsIdentifier(expression)) return false;
if (is_strict(language_mode()) &&
- Traits::IsEvalOrArguments(Traits::AsIdentifier(expression))) {
+ impl()->IsEvalOrArguments(impl()->AsIdentifier(expression))) {
return false;
}
return true;
@@ -1201,8 +1322,8 @@ class ParserBase : public ParserBaseTraits<Impl> {
// forwards the information to scope.
Call::PossiblyEval CheckPossibleEvalCall(ExpressionT expression,
Scope* scope) {
- if (Traits::IsIdentifier(expression) &&
- Traits::IsEval(Traits::AsIdentifier(expression))) {
+ if (impl()->IsIdentifier(expression) &&
+ impl()->IsEval(impl()->AsIdentifier(expression))) {
scope->RecordEvalCall();
if (is_sloppy(scope->language_mode())) {
// For sloppy scopes we also have to record the call at function level,
@@ -1214,56 +1335,33 @@ class ParserBase : public ParserBaseTraits<Impl> {
return Call::NOT_EVAL;
}
- // Used to validate property names in object literals and class literals
- enum PropertyKind {
- kAccessorProperty,
- kValueProperty,
- kMethodProperty
- };
-
- class ObjectLiteralCheckerBase {
- public:
- explicit ObjectLiteralCheckerBase(ParserBase* parser) : parser_(parser) {}
-
- virtual void CheckProperty(Token::Value property, PropertyKind type,
- MethodKind method_type,
- ExpressionClassifier* classifier, bool* ok) = 0;
-
- virtual ~ObjectLiteralCheckerBase() {}
-
- protected:
- ParserBase* parser() const { return parser_; }
- Scanner* scanner() const { return parser_->scanner(); }
-
- private:
- ParserBase* parser_;
- };
-
// Validation per ES6 object literals.
- class ObjectLiteralChecker : public ObjectLiteralCheckerBase {
+ class ObjectLiteralChecker {
public:
explicit ObjectLiteralChecker(ParserBase* parser)
- : ObjectLiteralCheckerBase(parser), has_seen_proto_(false) {}
+ : parser_(parser), has_seen_proto_(false) {}
- void CheckProperty(Token::Value property, PropertyKind type,
- MethodKind method_type, ExpressionClassifier* classifier,
- bool* ok) override;
+ void CheckDuplicateProto(Token::Value property);
private:
bool IsProto() { return this->scanner()->LiteralMatches("__proto__", 9); }
+ ParserBase* parser() const { return parser_; }
+ Scanner* scanner() const { return parser_->scanner(); }
+
+ ParserBase* parser_;
bool has_seen_proto_;
};
// Validation per ES6 class literals.
- class ClassLiteralChecker : public ObjectLiteralCheckerBase {
+ class ClassLiteralChecker {
public:
explicit ClassLiteralChecker(ParserBase* parser)
- : ObjectLiteralCheckerBase(parser), has_seen_constructor_(false) {}
+ : parser_(parser), has_seen_constructor_(false) {}
- void CheckProperty(Token::Value property, PropertyKind type,
- MethodKind method_type, ExpressionClassifier* classifier,
- bool* ok) override;
+ void CheckClassMethodName(Token::Value property, PropertyKind type,
+ bool is_generator, bool is_async, bool is_static,
+ bool* ok);
private:
bool IsConstructor() {
@@ -1273,6 +1371,10 @@ class ParserBase : public ParserBaseTraits<Impl> {
return this->scanner()->LiteralMatches("prototype", 9);
}
+ ParserBase* parser() const { return parser_; }
+ Scanner* scanner() const { return parser_->scanner(); }
+
+ ParserBase* parser_;
bool has_seen_constructor_;
};
@@ -1281,19 +1383,63 @@ class ParserBase : public ParserBaseTraits<Impl> {
}
Scope* scope() const { return scope_state_->scope(); }
+ // Stack of expression classifiers.
+ // The top of the stack is always pointed to by classifier().
+ V8_INLINE ExpressionClassifier* classifier() const {
+ DCHECK_NOT_NULL(classifier_);
+ return classifier_;
+ }
+
+ // Accumulates the classifier that is on top of the stack (inner) to
+ // the one that is right below (outer) and pops the inner.
+ V8_INLINE void Accumulate(unsigned productions,
+ bool merge_non_patterns = true) {
+ DCHECK_NOT_NULL(classifier_);
+ ExpressionClassifier* previous = classifier_->previous();
+ DCHECK_NOT_NULL(previous);
+ previous->Accumulate(classifier_, productions, merge_non_patterns);
+ classifier_ = previous;
+ }
+
+ // Pops and discards the classifier that is on top of the stack
+ // without accumulating.
+ V8_INLINE void Discard() {
+ DCHECK_NOT_NULL(classifier_);
+ classifier_->Discard();
+ classifier_ = classifier_->previous();
+ }
+
+ // Accumulate errors that can be arbitrarily deep in an expression.
+ // These correspond to the ECMAScript spec's 'Contains' operation
+ // on productions. This includes:
+ //
+ // - YieldExpression is disallowed in arrow parameters in a generator.
+ // - AwaitExpression is disallowed in arrow parameters in an async function.
+ // - AwaitExpression is disallowed in async arrow parameters.
+ //
+ V8_INLINE void AccumulateFormalParameterContainmentErrors() {
+ Accumulate(ExpressionClassifier::FormalParameterInitializerProduction |
+ ExpressionClassifier::AsyncArrowFormalParametersProduction);
+ }
+
+ // Parser base's protected field members.
+
ScopeState* scope_state_; // Scope stack.
FunctionState* function_state_; // Function state stack.
v8::Extension* extension_;
FuncNameInferrer* fni_;
AstValueFactory* ast_value_factory_; // Not owned.
- typename Traits::Type::Factory ast_node_factory_;
+ typename Types::Factory ast_node_factory_;
ParserRecorder* log_;
Mode mode_;
bool parsing_module_;
uintptr_t stack_limit_;
+ // Parser base's private field members.
+
private:
Zone* zone_;
+ ExpressionClassifier* classifier_;
Scanner* scanner_;
bool stack_overflow_;
@@ -1308,6 +1454,7 @@ class ParserBase : public ParserBaseTraits<Impl> {
bool allow_harmony_async_await_;
bool allow_harmony_restrictive_generators_;
bool allow_harmony_trailing_commas_;
+ bool allow_harmony_class_fields_;
friend class DiscardableZoneScope;
};
@@ -1315,12 +1462,12 @@ class ParserBase : public ParserBaseTraits<Impl> {
template <typename Impl>
ParserBase<Impl>::FunctionState::FunctionState(
FunctionState** function_state_stack, ScopeState** scope_stack,
- Scope* scope, FunctionKind kind)
+ DeclarationScope* scope)
: ScopeState(scope_stack, scope),
next_materialized_literal_index_(0),
expected_property_count_(0),
- kind_(kind),
- generator_object_variable_(NULL),
+ generator_object_variable_(nullptr),
+ promise_variable_(nullptr),
function_state_stack_(function_state_stack),
outer_function_state_(*function_state_stack),
destructuring_assignments_to_rewrite_(16, scope->zone()),
@@ -1413,19 +1560,18 @@ void ParserBase<Impl>::ReportUnexpectedTokenAt(
MessageTemplate::Template message) {
const char* arg;
GetUnexpectedTokenMessage(token, &message, &source_location, &arg);
- Traits::ReportMessageAt(source_location, message, arg);
+ impl()->ReportMessageAt(source_location, message, arg);
}
template <typename Impl>
typename ParserBase<Impl>::IdentifierT ParserBase<Impl>::ParseIdentifier(
AllowRestrictedIdentifiers allow_restricted_identifiers, bool* ok) {
ExpressionClassifier classifier(this);
- auto result =
- ParseAndClassifyIdentifier(&classifier, CHECK_OK_CUSTOM(EmptyIdentifier));
+ auto result = ParseAndClassifyIdentifier(CHECK_OK_CUSTOM(EmptyIdentifier));
if (allow_restricted_identifiers == kDontAllowRestrictedIdentifiers) {
- ValidateAssignmentPattern(&classifier, CHECK_OK_CUSTOM(EmptyIdentifier));
- ValidateBindingPattern(&classifier, CHECK_OK_CUSTOM(EmptyIdentifier));
+ ValidateAssignmentPattern(CHECK_OK_CUSTOM(EmptyIdentifier));
+ ValidateBindingPattern(CHECK_OK_CUSTOM(EmptyIdentifier));
}
return result;
@@ -1433,33 +1579,32 @@ typename ParserBase<Impl>::IdentifierT ParserBase<Impl>::ParseIdentifier(
template <typename Impl>
typename ParserBase<Impl>::IdentifierT
-ParserBase<Impl>::ParseAndClassifyIdentifier(ExpressionClassifier* classifier,
- bool* ok) {
+ParserBase<Impl>::ParseAndClassifyIdentifier(bool* ok) {
Token::Value next = Next();
if (next == Token::IDENTIFIER || next == Token::ASYNC ||
(next == Token::AWAIT && !parsing_module_ && !is_async_function())) {
- IdentifierT name = this->GetSymbol(scanner());
+ IdentifierT name = impl()->GetSymbol();
// When this function is used to read a formal parameter, we don't always
// know whether the function is going to be strict or sloppy. Indeed for
// arrow functions we don't always know that the identifier we are reading
// is actually a formal parameter. Therefore besides the errors that we
// must detect because we know we're in strict mode, we also record any
// error that we might make in the future once we know the language mode.
- if (this->IsEvalOrArguments(name)) {
- classifier->RecordStrictModeFormalParameterError(
+ if (impl()->IsEvalOrArguments(name)) {
+ classifier()->RecordStrictModeFormalParameterError(
scanner()->location(), MessageTemplate::kStrictEvalArguments);
if (is_strict(language_mode())) {
- classifier->RecordBindingPatternError(
+ classifier()->RecordBindingPatternError(
scanner()->location(), MessageTemplate::kStrictEvalArguments);
}
} else if (next == Token::AWAIT) {
- classifier->RecordAsyncArrowFormalParametersError(
+ classifier()->RecordAsyncArrowFormalParametersError(
scanner()->location(), MessageTemplate::kAwaitBindingIdentifier);
}
- if (classifier->duplicate_finder() != nullptr &&
- scanner()->FindSymbol(classifier->duplicate_finder(), 1) != 0) {
- classifier->RecordDuplicateFormalParameterError(scanner()->location());
+ if (classifier()->duplicate_finder() != nullptr &&
+ scanner()->FindSymbol(classifier()->duplicate_finder(), 1) != 0) {
+ classifier()->RecordDuplicateFormalParameterError(scanner()->location());
}
return name;
} else if (is_sloppy(language_mode()) &&
@@ -1467,25 +1612,25 @@ ParserBase<Impl>::ParseAndClassifyIdentifier(ExpressionClassifier* classifier,
next == Token::ESCAPED_STRICT_RESERVED_WORD ||
next == Token::LET || next == Token::STATIC ||
(next == Token::YIELD && !is_generator()))) {
- classifier->RecordStrictModeFormalParameterError(
+ classifier()->RecordStrictModeFormalParameterError(
scanner()->location(), MessageTemplate::kUnexpectedStrictReserved);
if (next == Token::ESCAPED_STRICT_RESERVED_WORD &&
is_strict(language_mode())) {
ReportUnexpectedToken(next);
*ok = false;
- return Traits::EmptyIdentifier();
+ return impl()->EmptyIdentifier();
}
if (next == Token::LET ||
(next == Token::ESCAPED_STRICT_RESERVED_WORD &&
scanner()->is_literal_contextual_keyword(CStrVector("let")))) {
- classifier->RecordLetPatternError(scanner()->location(),
- MessageTemplate::kLetInLexicalBinding);
+ classifier()->RecordLetPatternError(
+ scanner()->location(), MessageTemplate::kLetInLexicalBinding);
}
- return this->GetSymbol(scanner());
+ return impl()->GetSymbol();
} else {
- this->ReportUnexpectedToken(next);
+ ReportUnexpectedToken(next);
*ok = false;
- return Traits::EmptyIdentifier();
+ return impl()->EmptyIdentifier();
}
}
@@ -1505,10 +1650,10 @@ ParserBase<Impl>::ParseIdentifierOrStrictReservedWord(
} else {
ReportUnexpectedToken(next);
*ok = false;
- return Traits::EmptyIdentifier();
+ return impl()->EmptyIdentifier();
}
- return this->GetSymbol(scanner());
+ return impl()->GetSymbol();
}
template <typename Impl>
@@ -1521,12 +1666,12 @@ typename ParserBase<Impl>::IdentifierT ParserBase<Impl>::ParseIdentifierName(
next != Token::FUTURE_STRICT_RESERVED_WORD &&
next != Token::ESCAPED_KEYWORD &&
next != Token::ESCAPED_STRICT_RESERVED_WORD && !Token::IsKeyword(next)) {
- this->ReportUnexpectedToken(next);
+ ReportUnexpectedToken(next);
*ok = false;
- return Traits::EmptyIdentifier();
+ return impl()->EmptyIdentifier();
}
- return this->GetSymbol(scanner());
+ return impl()->GetSymbol();
}
template <typename Impl>
@@ -1537,18 +1682,18 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseRegExpLiteral(
Next();
ReportMessage(MessageTemplate::kUnterminatedRegExp);
*ok = false;
- return Traits::EmptyExpression();
+ return impl()->EmptyExpression();
}
int literal_index = function_state_->NextMaterializedLiteralIndex();
- IdentifierT js_pattern = this->GetNextSymbol(scanner());
+ IdentifierT js_pattern = impl()->GetNextSymbol();
Maybe<RegExp::Flags> flags = scanner()->ScanRegExpFlags();
if (flags.IsNothing()) {
Next();
ReportMessage(MessageTemplate::kMalformedRegExpFlags);
*ok = false;
- return Traits::EmptyExpression();
+ return impl()->EmptyExpression();
}
int js_flags = flags.FromJust();
Next();
@@ -1557,7 +1702,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseRegExpLiteral(
template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
- ExpressionClassifier* classifier, bool* is_async, bool* ok) {
+ bool* is_async, bool* ok) {
// PrimaryExpression ::
// 'this'
// 'null'
@@ -1573,14 +1718,14 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
// '(' Expression ')'
// TemplateLiteral
// do Block
- // AsyncFunctionExpression
+ // AsyncFunctionLiteral
int beg_pos = peek_position();
switch (peek()) {
case Token::THIS: {
- BindingPatternUnexpectedToken(classifier);
+ BindingPatternUnexpectedToken();
Consume(Token::THIS);
- return this->ThisExpression(beg_pos);
+ return impl()->ThisExpression(beg_pos);
}
case Token::NULL_LITERAL:
@@ -1588,15 +1733,15 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
case Token::FALSE_LITERAL:
case Token::SMI:
case Token::NUMBER:
- BindingPatternUnexpectedToken(classifier);
- return this->ExpressionFromLiteral(Next(), beg_pos, scanner(), factory());
+ BindingPatternUnexpectedToken();
+ return impl()->ExpressionFromLiteral(Next(), beg_pos);
case Token::ASYNC:
if (allow_harmony_async_await() &&
!scanner()->HasAnyLineTerminatorAfterNext() &&
PeekAhead() == Token::FUNCTION) {
Consume(Token::ASYNC);
- return impl()->ParseAsyncFunctionExpression(CHECK_OK);
+ return ParseAsyncFunctionLiteral(CHECK_OK);
}
// CoverCallExpressionAndAsyncArrowHead
*is_async = true;
@@ -1609,28 +1754,28 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
case Token::ESCAPED_STRICT_RESERVED_WORD:
case Token::FUTURE_STRICT_RESERVED_WORD: {
// Using eval or arguments in this context is OK even in strict mode.
- IdentifierT name = ParseAndClassifyIdentifier(classifier, CHECK_OK);
- return this->ExpressionFromIdentifier(name, beg_pos,
- scanner()->location().end_pos);
+ IdentifierT name = ParseAndClassifyIdentifier(CHECK_OK);
+ return impl()->ExpressionFromIdentifier(name, beg_pos,
+ scanner()->location().end_pos);
}
case Token::STRING: {
- BindingPatternUnexpectedToken(classifier);
+ BindingPatternUnexpectedToken();
Consume(Token::STRING);
- return this->ExpressionFromString(beg_pos, scanner(), factory());
+ return impl()->ExpressionFromString(beg_pos);
}
case Token::ASSIGN_DIV:
case Token::DIV:
- classifier->RecordBindingPatternError(
+ classifier()->RecordBindingPatternError(
scanner()->peek_location(), MessageTemplate::kUnexpectedTokenRegExp);
- return this->ParseRegExpLiteral(ok);
+ return ParseRegExpLiteral(ok);
case Token::LBRACK:
- return this->ParseArrayLiteral(classifier, ok);
+ return ParseArrayLiteral(ok);
case Token::LBRACE:
- return this->ParseObjectLiteral(classifier, ok);
+ return ParseObjectLiteral(ok);
case Token::LPAREN: {
// Arrow function formal parameters are either a single identifier or a
@@ -1638,61 +1783,34 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
// Parentheses are not valid on the LHS of a BindingPattern, so we use the
// is_valid_binding_pattern() check to detect multiple levels of
// parenthesization.
- bool pattern_error = !classifier->is_valid_binding_pattern();
- classifier->RecordPatternError(scanner()->peek_location(),
- MessageTemplate::kUnexpectedToken,
- Token::String(Token::LPAREN));
- if (pattern_error) ArrowFormalParametersUnexpectedToken(classifier);
+ bool pattern_error = !classifier()->is_valid_binding_pattern();
+ classifier()->RecordPatternError(scanner()->peek_location(),
+ MessageTemplate::kUnexpectedToken,
+ Token::String(Token::LPAREN));
+ if (pattern_error) ArrowFormalParametersUnexpectedToken();
Consume(Token::LPAREN);
if (Check(Token::RPAREN)) {
// ()=>x. The continuation that looks for the => is in
// ParseAssignmentExpression.
- classifier->RecordExpressionError(scanner()->location(),
- MessageTemplate::kUnexpectedToken,
- Token::String(Token::RPAREN));
+ classifier()->RecordExpressionError(scanner()->location(),
+ MessageTemplate::kUnexpectedToken,
+ Token::String(Token::RPAREN));
return factory()->NewEmptyParentheses(beg_pos);
- } else if (Check(Token::ELLIPSIS)) {
- // (...x)=>x. The continuation that looks for the => is in
- // ParseAssignmentExpression.
- int ellipsis_pos = position();
- int expr_pos = peek_position();
- classifier->RecordExpressionError(scanner()->location(),
- MessageTemplate::kUnexpectedToken,
- Token::String(Token::ELLIPSIS));
- classifier->RecordNonSimpleParameter();
- ExpressionClassifier binding_classifier(this);
- ExpressionT expr = this->ParseAssignmentExpression(
- true, &binding_classifier, CHECK_OK);
- classifier->Accumulate(&binding_classifier,
- ExpressionClassifier::AllProductions);
- if (!this->IsIdentifier(expr) && !IsValidPattern(expr)) {
- classifier->RecordArrowFormalParametersError(
- Scanner::Location(ellipsis_pos, scanner()->location().end_pos),
- MessageTemplate::kInvalidRestParameter);
- }
- if (peek() == Token::COMMA) {
- ReportMessageAt(scanner()->peek_location(),
- MessageTemplate::kParamAfterRest);
- *ok = false;
- return this->EmptyExpression();
- }
- Expect(Token::RPAREN, CHECK_OK);
- return factory()->NewSpread(expr, ellipsis_pos, expr_pos);
}
// Heuristically try to detect immediately called functions before
// seeing the call parentheses.
function_state_->set_next_function_is_parenthesized(peek() ==
Token::FUNCTION);
- ExpressionT expr = this->ParseExpression(true, classifier, CHECK_OK);
+ ExpressionT expr = ParseExpressionCoverGrammar(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
return expr;
}
case Token::CLASS: {
- BindingPatternUnexpectedToken(classifier);
+ BindingPatternUnexpectedToken();
Consume(Token::CLASS);
- int class_token_position = position();
- IdentifierT name = this->EmptyIdentifier();
+ int class_token_pos = position();
+ IdentifierT name = impl()->EmptyIdentifier();
bool is_strict_reserved_name = false;
Scanner::Location class_name_location = Scanner::Location::invalid();
if (peek_any_identifier()) {
@@ -1700,28 +1818,26 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
CHECK_OK);
class_name_location = scanner()->location();
}
- return impl()->ParseClassLiteral(classifier, name, class_name_location,
- is_strict_reserved_name,
- class_token_position, ok);
+ return ParseClassLiteral(name, class_name_location,
+ is_strict_reserved_name, class_token_pos, ok);
}
case Token::TEMPLATE_SPAN:
case Token::TEMPLATE_TAIL:
- BindingPatternUnexpectedToken(classifier);
- return this->ParseTemplateLiteral(Traits::NoTemplateTag(), beg_pos,
- classifier, ok);
+ BindingPatternUnexpectedToken();
+ return ParseTemplateLiteral(impl()->NoTemplateTag(), beg_pos, ok);
case Token::MOD:
if (allow_natives() || extension_ != NULL) {
- BindingPatternUnexpectedToken(classifier);
- return impl()->ParseV8Intrinsic(ok);
+ BindingPatternUnexpectedToken();
+ return ParseV8Intrinsic(ok);
}
break;
case Token::DO:
if (allow_harmony_do_expressions()) {
- BindingPatternUnexpectedToken(classifier);
- return impl()->ParseDoExpression(ok);
+ BindingPatternUnexpectedToken();
+ return ParseDoExpression(ok);
}
break;
@@ -1731,78 +1847,71 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
ReportUnexpectedToken(Next());
*ok = false;
- return this->EmptyExpression();
+ return impl()->EmptyExpression();
}
template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseExpression(
bool accept_IN, bool* ok) {
ExpressionClassifier classifier(this);
- ExpressionT result = ParseExpression(accept_IN, &classifier, CHECK_OK);
- impl()->RewriteNonPattern(&classifier, CHECK_OK);
+ ExpressionT result = ParseExpressionCoverGrammar(accept_IN, CHECK_OK);
+ impl()->RewriteNonPattern(CHECK_OK);
return result;
}
template <typename Impl>
-typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseExpression(
- bool accept_IN, ExpressionClassifier* classifier, bool* ok) {
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseExpressionCoverGrammar(bool accept_IN, bool* ok) {
// Expression ::
// AssignmentExpression
// Expression ',' AssignmentExpression
- ExpressionT result;
- {
+ ExpressionT result = impl()->EmptyExpression();
+ while (true) {
+ int comma_pos = position();
ExpressionClassifier binding_classifier(this);
- result = this->ParseAssignmentExpression(accept_IN, &binding_classifier,
- CHECK_OK);
- classifier->Accumulate(&binding_classifier,
- ExpressionClassifier::AllProductions);
- }
- bool is_simple_parameter_list = this->IsIdentifier(result);
- bool seen_rest = false;
- while (peek() == Token::COMMA) {
- CheckNoTailCallExpressions(classifier, CHECK_OK);
- if (seen_rest) {
- // At this point the production can't possibly be valid, but we don't know
- // which error to signal.
- classifier->RecordArrowFormalParametersError(
- scanner()->peek_location(), MessageTemplate::kParamAfterRest);
- }
- Consume(Token::COMMA);
- bool is_rest = false;
+ ExpressionT right;
+ if (Check(Token::ELLIPSIS)) {
+ // 'x, y, ...z' in CoverParenthesizedExpressionAndArrowParameterList only
+ // as the formal parameters of'(x, y, ...z) => foo', and is not itself a
+ // valid expression.
+ classifier()->RecordExpressionError(scanner()->location(),
+ MessageTemplate::kUnexpectedToken,
+ Token::String(Token::ELLIPSIS));
+ int ellipsis_pos = position();
+ int pattern_pos = peek_position();
+ ExpressionT pattern = ParsePrimaryExpression(CHECK_OK);
+ ValidateBindingPattern(CHECK_OK);
+ right = factory()->NewSpread(pattern, ellipsis_pos, pattern_pos);
+ } else {
+ right = ParseAssignmentExpression(accept_IN, CHECK_OK);
+ }
+ // No need to accumulate binding pattern-related errors, since
+ // an Expression can't be a binding pattern anyway.
+ impl()->Accumulate(ExpressionClassifier::AllProductions &
+ ~(ExpressionClassifier::BindingPatternProduction |
+ ExpressionClassifier::LetPatternProduction));
+ if (!impl()->IsIdentifier(right)) classifier()->RecordNonSimpleParameter();
+ if (impl()->IsEmptyExpression(result)) {
+ // First time through the loop.
+ result = right;
+ } else {
+ result =
+ factory()->NewBinaryOperation(Token::COMMA, result, right, comma_pos);
+ }
+
+ if (!Check(Token::COMMA)) break;
+
+ if (right->IsSpread()) {
+ classifier()->RecordArrowFormalParametersError(
+ scanner()->location(), MessageTemplate::kParamAfterRest);
+ }
+
if (allow_harmony_trailing_commas() && peek() == Token::RPAREN &&
PeekAhead() == Token::ARROW) {
// a trailing comma is allowed at the end of an arrow parameter list
break;
- } else if (peek() == Token::ELLIPSIS) {
- // 'x, y, ...z' in CoverParenthesizedExpressionAndArrowParameterList only
- // as the formal parameters of'(x, y, ...z) => foo', and is not itself a
- // valid expression or binding pattern.
- ExpressionUnexpectedToken(classifier);
- BindingPatternUnexpectedToken(classifier);
- Consume(Token::ELLIPSIS);
- seen_rest = is_rest = true;
- }
- int pos = position(), expr_pos = peek_position();
- ExpressionClassifier binding_classifier(this);
- ExpressionT right = this->ParseAssignmentExpression(
- accept_IN, &binding_classifier, CHECK_OK);
- classifier->Accumulate(&binding_classifier,
- ExpressionClassifier::AllProductions);
- if (is_rest) {
- if (!this->IsIdentifier(right) && !IsValidPattern(right)) {
- classifier->RecordArrowFormalParametersError(
- Scanner::Location(pos, scanner()->location().end_pos),
- MessageTemplate::kInvalidRestParameter);
- }
- right = factory()->NewSpread(right, pos, expr_pos);
}
- is_simple_parameter_list =
- is_simple_parameter_list && this->IsIdentifier(right);
- result = factory()->NewBinaryOperation(Token::COMMA, result, right, pos);
- }
- if (!is_simple_parameter_list || seen_rest) {
- classifier->RecordNonSimpleParameter();
}
return result;
@@ -1810,26 +1919,23 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseExpression(
template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseArrayLiteral(
- ExpressionClassifier* classifier, bool* ok) {
+ bool* ok) {
// ArrayLiteral ::
// '[' Expression? (',' Expression?)* ']'
int pos = peek_position();
- typename Traits::Type::ExpressionList values =
- this->NewExpressionList(4, zone_);
+ ExpressionListT values = impl()->NewExpressionList(4);
int first_spread_index = -1;
Expect(Token::LBRACK, CHECK_OK);
while (peek() != Token::RBRACK) {
ExpressionT elem;
if (peek() == Token::COMMA) {
- elem = this->GetLiteralTheHole(peek_position(), factory());
+ elem = impl()->GetLiteralTheHole(peek_position());
} else if (peek() == Token::ELLIPSIS) {
int start_pos = peek_position();
Consume(Token::ELLIPSIS);
int expr_pos = peek_position();
- ExpressionT argument =
- this->ParseAssignmentExpression(true, classifier, CHECK_OK);
- CheckNoTailCallExpressions(classifier, CHECK_OK);
+ ExpressionT argument = ParseAssignmentExpression(true, CHECK_OK);
elem = factory()->NewSpread(argument, start_pos, expr_pos);
if (first_spread_index < 0) {
@@ -1837,25 +1943,23 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseArrayLiteral(
}
if (argument->IsAssignment()) {
- classifier->RecordPatternError(
+ classifier()->RecordPatternError(
Scanner::Location(start_pos, scanner()->location().end_pos),
MessageTemplate::kInvalidDestructuringTarget);
} else {
- CheckDestructuringElement(argument, classifier, start_pos,
+ CheckDestructuringElement(argument, start_pos,
scanner()->location().end_pos);
}
if (peek() == Token::COMMA) {
- classifier->RecordPatternError(
+ classifier()->RecordPatternError(
Scanner::Location(start_pos, scanner()->location().end_pos),
MessageTemplate::kElementAfterRest);
}
} else {
int beg_pos = peek_position();
- elem = this->ParseAssignmentExpression(true, classifier, CHECK_OK);
- CheckNoTailCallExpressions(classifier, CHECK_OK);
- CheckDestructuringElement(elem, classifier, beg_pos,
- scanner()->location().end_pos);
+ elem = ParseAssignmentExpression(true, CHECK_OK);
+ CheckDestructuringElement(elem, beg_pos, scanner()->location().end_pos);
}
values->Add(elem, zone_);
if (peek() != Token::RBRACK) {
@@ -1878,19 +1982,87 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseArrayLiteral(
// to change. Also, this error message will never appear while pre-
// parsing (this is OK, as it is an implementation limitation).
ReportMessage(MessageTemplate::kTooManySpreads);
- return this->EmptyExpression();
+ return impl()->EmptyExpression();
}
}
return result;
}
template <class Impl>
+bool ParserBase<Impl>::SetPropertyKindFromToken(Token::Value token,
+ PropertyKind* kind) {
+ // This returns true, setting the property kind, iff the given token is one
+ // which must occur after a property name, indicating that the previous token
+ // was in fact a name and not a modifier (like the "get" in "get x").
+ switch (token) {
+ case Token::COLON:
+ *kind = PropertyKind::kValueProperty;
+ return true;
+ case Token::COMMA:
+ case Token::RBRACE:
+ case Token::ASSIGN:
+ *kind = PropertyKind::kShorthandProperty;
+ return true;
+ case Token::LPAREN:
+ *kind = PropertyKind::kMethodProperty;
+ return true;
+ case Token::MUL:
+ case Token::SEMICOLON:
+ *kind = PropertyKind::kClassField;
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+template <class Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
- IdentifierT* name, bool* is_get, bool* is_set, bool* is_computed_name,
- ExpressionClassifier* classifier, bool* ok) {
+ IdentifierT* name, PropertyKind* kind, bool* is_generator, bool* is_get,
+ bool* is_set, bool* is_async, bool* is_computed_name, bool* ok) {
+ DCHECK(*kind == PropertyKind::kNotSet);
+ DCHECK(!*is_generator);
+ DCHECK(!*is_get);
+ DCHECK(!*is_set);
+ DCHECK(!*is_async);
+ DCHECK(!*is_computed_name);
+
+ *is_generator = Check(Token::MUL);
+ if (*is_generator) {
+ *kind = PropertyKind::kMethodProperty;
+ }
+
Token::Value token = peek();
int pos = peek_position();
+ if (allow_harmony_async_await() && !*is_generator && token == Token::ASYNC &&
+ !scanner()->HasAnyLineTerminatorAfterNext()) {
+ Consume(Token::ASYNC);
+ token = peek();
+ if (SetPropertyKindFromToken(token, kind)) {
+ *name = impl()->GetSymbol(); // TODO(bakkot) specialize on 'async'
+ impl()->PushLiteralName(*name);
+ return factory()->NewStringLiteral(*name, pos);
+ }
+ *kind = PropertyKind::kMethodProperty;
+ *is_async = true;
+ pos = peek_position();
+ }
+
+ if (token == Token::IDENTIFIER && !*is_generator && !*is_async) {
+ // This is checking for 'get' and 'set' in particular.
+ Consume(Token::IDENTIFIER);
+ token = peek();
+ if (SetPropertyKindFromToken(token, kind) ||
+ !scanner()->IsGetOrSet(is_get, is_set)) {
+ *name = impl()->GetSymbol();
+ impl()->PushLiteralName(*name);
+ return factory()->NewStringLiteral(*name, pos);
+ }
+ *kind = PropertyKind::kAccessorProperty;
+ pos = peek_position();
+ }
+
// For non computed property names we normalize the name a bit:
//
// "12" -> 12
@@ -1900,274 +2072,417 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
//
// This is important because we use the property name as a key in a hash
// table when we compute constant properties.
+ ExpressionT expression = impl()->EmptyExpression();
switch (token) {
case Token::STRING:
Consume(Token::STRING);
- *name = this->GetSymbol(scanner());
+ *name = impl()->GetSymbol();
break;
case Token::SMI:
Consume(Token::SMI);
- *name = this->GetNumberAsSymbol(scanner());
+ *name = impl()->GetNumberAsSymbol();
break;
case Token::NUMBER:
Consume(Token::NUMBER);
- *name = this->GetNumberAsSymbol(scanner());
+ *name = impl()->GetNumberAsSymbol();
break;
case Token::LBRACK: {
+ *name = impl()->EmptyIdentifier();
*is_computed_name = true;
Consume(Token::LBRACK);
ExpressionClassifier computed_name_classifier(this);
- ExpressionT expression =
- ParseAssignmentExpression(true, &computed_name_classifier, CHECK_OK);
- impl()->RewriteNonPattern(&computed_name_classifier, CHECK_OK);
- classifier->Accumulate(&computed_name_classifier,
- ExpressionClassifier::ExpressionProductions);
+ expression = ParseAssignmentExpression(true, CHECK_OK);
+ impl()->RewriteNonPattern(CHECK_OK);
+ impl()->AccumulateFormalParameterContainmentErrors();
Expect(Token::RBRACK, CHECK_OK);
- return expression;
+ break;
}
default:
*name = ParseIdentifierName(CHECK_OK);
- scanner()->IsGetOrSet(is_get, is_set);
break;
}
+ if (*kind == PropertyKind::kNotSet) {
+ SetPropertyKindFromToken(peek(), kind);
+ }
+
+ if (*is_computed_name) {
+ return expression;
+ }
+
+ impl()->PushLiteralName(*name);
+
uint32_t index;
- return this->IsArrayIndex(*name, &index)
+ return impl()->IsArrayIndex(*name, &index)
? factory()->NewNumberLiteral(index, pos)
: factory()->NewStringLiteral(*name, pos);
}
template <typename Impl>
-typename ParserBase<Impl>::ObjectLiteralPropertyT
-ParserBase<Impl>::ParsePropertyDefinition(
- ObjectLiteralCheckerBase* checker, bool in_class, bool has_extends,
- MethodKind method_kind, bool* is_computed_name, bool* has_seen_constructor,
- ExpressionClassifier* classifier, IdentifierT* name, bool* ok) {
- DCHECK(!in_class || IsStaticMethod(method_kind) ||
- has_seen_constructor != nullptr);
+typename ParserBase<Impl>::ClassLiteralPropertyT
+ParserBase<Impl>::ParseClassPropertyDefinition(ClassLiteralChecker* checker,
+ bool has_extends,
+ bool* is_computed_name,
+ bool* has_seen_constructor,
+ bool* ok) {
+ DCHECK(has_seen_constructor != nullptr);
bool is_get = false;
bool is_set = false;
- bool is_generator = Check(Token::MUL);
+ bool is_generator = false;
bool is_async = false;
- const bool is_static = IsStaticMethod(method_kind);
+ bool is_static = false;
+ PropertyKind kind = PropertyKind::kNotSet;
Token::Value name_token = peek();
- if (is_generator) {
- method_kind |= MethodKind::kGenerator;
- } else if (allow_harmony_async_await() && name_token == Token::ASYNC &&
- !scanner()->HasAnyLineTerminatorAfterNext() &&
- PeekAhead() != Token::LPAREN && PeekAhead()) {
- is_async = true;
+ IdentifierT name = impl()->EmptyIdentifier();
+ ExpressionT name_expression;
+ if (name_token == Token::STATIC) {
+ Consume(Token::STATIC);
+ if (peek() == Token::LPAREN) {
+ kind = PropertyKind::kMethodProperty;
+ name = impl()->GetSymbol(); // TODO(bakkot) specialize on 'static'
+ name_expression = factory()->NewStringLiteral(name, position());
+ } else if (peek() == Token::ASSIGN || peek() == Token::SEMICOLON ||
+ peek() == Token::RBRACE) {
+ name = impl()->GetSymbol(); // TODO(bakkot) specialize on 'static'
+ name_expression = factory()->NewStringLiteral(name, position());
+ } else {
+ is_static = true;
+ name_expression = ParsePropertyName(
+ &name, &kind, &is_generator, &is_get, &is_set, &is_async,
+ is_computed_name, CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+ }
+ } else {
+ name_expression = ParsePropertyName(
+ &name, &kind, &is_generator, &is_get, &is_set, &is_async,
+ is_computed_name, CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+ }
+
+ switch (kind) {
+ case PropertyKind::kClassField:
+ case PropertyKind::kNotSet: // This case is a name followed by a name or
+ // other property. Here we have to assume
+ // that's an uninitialized field followed by a
+ // linebreak followed by a property, with ASI
+ // adding the semicolon. If not, there will be
+ // a syntax error after parsing the first name
+ // as an uninitialized field.
+ case PropertyKind::kShorthandProperty:
+ case PropertyKind::kValueProperty:
+ if (allow_harmony_class_fields()) {
+ bool has_initializer = Check(Token::ASSIGN);
+ ExpressionT function_literal = ParseClassFieldForInitializer(
+ has_initializer, CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+ ExpectSemicolon(CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+ return factory()->NewClassLiteralProperty(
+ name_expression, function_literal, ClassLiteralProperty::FIELD,
+ is_static, *is_computed_name);
+ } else {
+ ReportUnexpectedToken(Next());
+ *ok = false;
+ return impl()->EmptyClassLiteralProperty();
+ }
+
+ case PropertyKind::kMethodProperty: {
+ DCHECK(!is_get && !is_set);
+
+ // MethodDefinition
+ // PropertyName '(' StrictFormalParameters ')' '{' FunctionBody '}'
+ // '*' PropertyName '(' StrictFormalParameters ')' '{' FunctionBody '}'
+
+ if (!*is_computed_name) {
+ checker->CheckClassMethodName(
+ name_token, PropertyKind::kMethodProperty, is_generator, is_async,
+ is_static, CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+ }
+
+ FunctionKind kind = is_generator
+ ? FunctionKind::kConciseGeneratorMethod
+ : is_async ? FunctionKind::kAsyncConciseMethod
+ : FunctionKind::kConciseMethod;
+
+ if (!is_static && impl()->IsConstructor(name)) {
+ *has_seen_constructor = true;
+ kind = has_extends ? FunctionKind::kSubclassConstructor
+ : FunctionKind::kBaseConstructor;
+ }
+
+ ExpressionT value = impl()->ParseFunctionLiteral(
+ name, scanner()->location(), kSkipFunctionNameCheck, kind,
+ kNoSourcePosition, FunctionLiteral::kAccessorOrMethod,
+ language_mode(), CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+
+ return factory()->NewClassLiteralProperty(name_expression, value,
+ ClassLiteralProperty::METHOD,
+ is_static, *is_computed_name);
+ }
+
+ case PropertyKind::kAccessorProperty: {
+ DCHECK((is_get || is_set) && !is_generator && !is_async);
+
+ if (!*is_computed_name) {
+ checker->CheckClassMethodName(
+ name_token, PropertyKind::kAccessorProperty, false, false,
+ is_static, CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+ // Make sure the name expression is a string since we need a Name for
+ // Runtime_DefineAccessorPropertyUnchecked and since we can determine
+ // this statically we can skip the extra runtime check.
+ name_expression =
+ factory()->NewStringLiteral(name, name_expression->position());
+ }
+
+ FunctionKind kind = is_get ? FunctionKind::kGetterFunction
+ : FunctionKind::kSetterFunction;
+
+ FunctionLiteralT value = impl()->ParseFunctionLiteral(
+ name, scanner()->location(), kSkipFunctionNameCheck, kind,
+ kNoSourcePosition, FunctionLiteral::kAccessorOrMethod,
+ language_mode(), CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+
+ if (!*is_computed_name) {
+ impl()->AddAccessorPrefixToFunctionName(is_get, value, name);
+ }
+
+ return factory()->NewClassLiteralProperty(
+ name_expression, value,
+ is_get ? ClassLiteralProperty::GETTER : ClassLiteralProperty::SETTER,
+ is_static, *is_computed_name);
+ }
}
+ UNREACHABLE();
+ return impl()->EmptyClassLiteralProperty();
+}
+template <typename Impl>
+typename ParserBase<Impl>::FunctionLiteralT
+ParserBase<Impl>::ParseClassFieldForInitializer(bool has_initializer,
+ bool* ok) {
+ // Makes a concise method which evaluates and returns the initialized value
+ // (or undefined if absent).
+ FunctionKind kind = FunctionKind::kConciseMethod;
+ DeclarationScope* initializer_scope = NewFunctionScope(kind);
+ initializer_scope->set_start_position(scanner()->location().end_pos);
+ FunctionState initializer_state(&function_state_, &scope_state_,
+ initializer_scope);
+ DCHECK(scope() == initializer_scope);
+ scope()->SetLanguageMode(STRICT);
+ ExpressionClassifier expression_classifier(this);
+ ExpressionT value;
+ if (has_initializer) {
+ value = this->ParseAssignmentExpression(
+ true, CHECK_OK_CUSTOM(EmptyFunctionLiteral));
+ impl()->RewriteNonPattern(CHECK_OK_CUSTOM(EmptyFunctionLiteral));
+ } else {
+ value = factory()->NewUndefinedLiteral(kNoSourcePosition);
+ }
+ initializer_scope->set_end_position(scanner()->location().end_pos);
+ typename Types::StatementList body = impl()->NewStatementList(1);
+ body->Add(factory()->NewReturnStatement(value, kNoSourcePosition), zone());
+ FunctionLiteralT function_literal = factory()->NewFunctionLiteral(
+ impl()->EmptyIdentifierString(), initializer_scope, body,
+ initializer_state.materialized_literal_count(),
+ initializer_state.expected_property_count(), 0,
+ FunctionLiteral::kNoDuplicateParameters,
+ FunctionLiteral::kAnonymousExpression,
+ FunctionLiteral::kShouldLazyCompile, initializer_scope->start_position());
+ function_literal->set_is_class_field_initializer(true);
+ return function_literal;
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::ObjectLiteralPropertyT
+ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
+ bool* is_computed_name,
+ bool* ok) {
+ bool is_get = false;
+ bool is_set = false;
+ bool is_generator = false;
+ bool is_async = false;
+ PropertyKind kind = PropertyKind::kNotSet;
+
+ IdentifierT name = impl()->EmptyIdentifier();
+ Token::Value name_token = peek();
int next_beg_pos = scanner()->peek_location().beg_pos;
int next_end_pos = scanner()->peek_location().end_pos;
- ExpressionT name_expression =
- ParsePropertyName(name, &is_get, &is_set, is_computed_name, classifier,
- CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
- if (fni_ != nullptr && !*is_computed_name) {
- this->PushLiteralName(fni_, *name);
- }
+ ExpressionT name_expression = ParsePropertyName(
+ &name, &kind, &is_generator, &is_get, &is_set, &is_async,
+ is_computed_name, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+
+ switch (kind) {
+ case PropertyKind::kValueProperty: {
+ DCHECK(!is_get && !is_set && !is_generator && !is_async);
- if (!in_class && !is_generator) {
- DCHECK(!IsStaticMethod(method_kind));
- if (peek() == Token::COLON) {
- // PropertyDefinition
- // PropertyName ':' AssignmentExpression
if (!*is_computed_name) {
- checker->CheckProperty(name_token, kValueProperty, MethodKind::kNormal,
- classifier,
- CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ checker->CheckDuplicateProto(name_token);
}
Consume(Token::COLON);
int beg_pos = peek_position();
- ExpressionT value = this->ParseAssignmentExpression(
- true, classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
- CheckDestructuringElement(value, classifier, beg_pos,
- scanner()->location().end_pos);
+ ExpressionT value = ParseAssignmentExpression(
+ true, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ CheckDestructuringElement(value, beg_pos, scanner()->location().end_pos);
+
+ ObjectLiteralPropertyT result = factory()->NewObjectLiteralProperty(
+ name_expression, value, *is_computed_name);
+
+ if (!*is_computed_name) {
+ impl()->SetFunctionNameFromPropertyName(result, name);
+ }
- return factory()->NewObjectLiteralProperty(name_expression, value,
- is_static, *is_computed_name);
+ return result;
}
- if (Token::IsIdentifier(name_token, language_mode(), this->is_generator(),
- parsing_module_ || is_async_function()) &&
- (peek() == Token::COMMA || peek() == Token::RBRACE ||
- peek() == Token::ASSIGN)) {
+ case PropertyKind::kShorthandProperty: {
// PropertyDefinition
// IdentifierReference
// CoverInitializedName
//
// CoverInitializedName
// IdentifierReference Initializer?
- if (classifier->duplicate_finder() != nullptr &&
- scanner()->FindSymbol(classifier->duplicate_finder(), 1) != 0) {
- classifier->RecordDuplicateFormalParameterError(scanner()->location());
+ DCHECK(!is_get && !is_set && !is_generator && !is_async);
+
+ if (!Token::IsIdentifier(name_token, language_mode(),
+ this->is_generator(),
+ parsing_module_ || is_async_function())) {
+ ReportUnexpectedToken(Next());
+ *ok = false;
+ return impl()->EmptyObjectLiteralProperty();
}
- if (this->IsEvalOrArguments(*name) && is_strict(language_mode())) {
- classifier->RecordBindingPatternError(
+ DCHECK(!*is_computed_name);
+
+ if (classifier()->duplicate_finder() != nullptr &&
+ scanner()->FindSymbol(classifier()->duplicate_finder(), 1) != 0) {
+ classifier()->RecordDuplicateFormalParameterError(
+ scanner()->location());
+ }
+
+ if (impl()->IsEvalOrArguments(name) && is_strict(language_mode())) {
+ classifier()->RecordBindingPatternError(
scanner()->location(), MessageTemplate::kStrictEvalArguments);
}
if (name_token == Token::LET) {
- classifier->RecordLetPatternError(
+ classifier()->RecordLetPatternError(
scanner()->location(), MessageTemplate::kLetInLexicalBinding);
}
if (name_token == Token::AWAIT) {
DCHECK(!is_async_function());
- classifier->RecordAsyncArrowFormalParametersError(
+ classifier()->RecordAsyncArrowFormalParametersError(
Scanner::Location(next_beg_pos, next_end_pos),
MessageTemplate::kAwaitBindingIdentifier);
}
ExpressionT lhs =
- this->ExpressionFromIdentifier(*name, next_beg_pos, next_end_pos);
- CheckDestructuringElement(lhs, classifier, next_beg_pos, next_end_pos);
+ impl()->ExpressionFromIdentifier(name, next_beg_pos, next_end_pos);
+ CheckDestructuringElement(lhs, next_beg_pos, next_end_pos);
ExpressionT value;
if (peek() == Token::ASSIGN) {
Consume(Token::ASSIGN);
ExpressionClassifier rhs_classifier(this);
- ExpressionT rhs = this->ParseAssignmentExpression(
- true, &rhs_classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
- impl()->RewriteNonPattern(&rhs_classifier,
- CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
- classifier->Accumulate(&rhs_classifier,
- ExpressionClassifier::ExpressionProductions);
+ ExpressionT rhs = ParseAssignmentExpression(
+ true, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ impl()->RewriteNonPattern(CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ impl()->AccumulateFormalParameterContainmentErrors();
value = factory()->NewAssignment(Token::ASSIGN, lhs, rhs,
kNoSourcePosition);
- classifier->RecordObjectLiteralError(
+ classifier()->RecordExpressionError(
Scanner::Location(next_beg_pos, scanner()->location().end_pos),
MessageTemplate::kInvalidCoverInitializedName);
- Traits::SetFunctionNameFromIdentifierRef(rhs, lhs);
+ impl()->SetFunctionNameFromIdentifierRef(rhs, lhs);
} else {
value = lhs;
}
return factory()->NewObjectLiteralProperty(
- name_expression, value, ObjectLiteralProperty::COMPUTED, is_static,
- false);
+ name_expression, value, ObjectLiteralProperty::COMPUTED, false);
}
- }
- // Method definitions are never valid in patterns.
- classifier->RecordPatternError(
- Scanner::Location(next_beg_pos, scanner()->location().end_pos),
- MessageTemplate::kInvalidDestructuringTarget);
+ case PropertyKind::kMethodProperty: {
+ DCHECK(!is_get && !is_set);
- if (is_async && !IsSpecialMethod(method_kind)) {
- DCHECK(!is_get);
- DCHECK(!is_set);
- bool dont_care;
- name_expression = ParsePropertyName(
- name, &dont_care, &dont_care, is_computed_name, classifier,
- CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
- method_kind |= MethodKind::kAsync;
- }
-
- if (is_generator || peek() == Token::LPAREN) {
- // MethodDefinition
- // PropertyName '(' StrictFormalParameters ')' '{' FunctionBody '}'
- // '*' PropertyName '(' StrictFormalParameters ')' '{' FunctionBody '}'
- if (!*is_computed_name) {
- checker->CheckProperty(name_token, kMethodProperty, method_kind,
- classifier,
- CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
- }
-
- FunctionKind kind = is_generator
- ? FunctionKind::kConciseGeneratorMethod
- : is_async ? FunctionKind::kAsyncConciseMethod
- : FunctionKind::kConciseMethod;
-
- if (in_class && !IsStaticMethod(method_kind) &&
- this->IsConstructor(*name)) {
- *has_seen_constructor = true;
- kind = has_extends ? FunctionKind::kSubclassConstructor
- : FunctionKind::kBaseConstructor;
- }
-
- ExpressionT value = impl()->ParseFunctionLiteral(
- *name, scanner()->location(), kSkipFunctionNameCheck, kind,
- kNoSourcePosition, FunctionLiteral::kAccessorOrMethod, language_mode(),
- CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
-
- return factory()->NewObjectLiteralProperty(name_expression, value,
- ObjectLiteralProperty::COMPUTED,
- is_static, *is_computed_name);
- }
-
- if (in_class && name_token == Token::STATIC && IsNormalMethod(method_kind)) {
- // ClassElement (static)
- // 'static' MethodDefinition
- *name = this->EmptyIdentifier();
- ObjectLiteralPropertyT property = ParsePropertyDefinition(
- checker, true, has_extends, MethodKind::kStatic, is_computed_name,
- nullptr, classifier, name, ok);
- impl()->RewriteNonPattern(classifier, ok);
- return property;
- }
-
- if (is_get || is_set) {
- // MethodDefinition (Accessors)
- // get PropertyName '(' ')' '{' FunctionBody '}'
- // set PropertyName '(' PropertySetParameterList ')' '{' FunctionBody '}'
- *name = this->EmptyIdentifier();
- bool dont_care = false;
- name_token = peek();
+ // MethodDefinition
+ // PropertyName '(' StrictFormalParameters ')' '{' FunctionBody '}'
+ // '*' PropertyName '(' StrictFormalParameters ')' '{' FunctionBody '}'
- name_expression = ParsePropertyName(
- name, &dont_care, &dont_care, is_computed_name, classifier,
- CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ classifier()->RecordPatternError(
+ Scanner::Location(next_beg_pos, scanner()->location().end_pos),
+ MessageTemplate::kInvalidDestructuringTarget);
+
+ FunctionKind kind = is_generator
+ ? FunctionKind::kConciseGeneratorMethod
+ : is_async ? FunctionKind::kAsyncConciseMethod
+ : FunctionKind::kConciseMethod;
+
+ ExpressionT value = impl()->ParseFunctionLiteral(
+ name, scanner()->location(), kSkipFunctionNameCheck, kind,
+ kNoSourcePosition, FunctionLiteral::kAccessorOrMethod,
+ language_mode(), CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
- if (!*is_computed_name) {
- checker->CheckProperty(name_token, kAccessorProperty, method_kind,
- classifier,
- CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ return factory()->NewObjectLiteralProperty(
+ name_expression, value, ObjectLiteralProperty::COMPUTED,
+ *is_computed_name);
}
- typename Traits::Type::FunctionLiteral value = impl()->ParseFunctionLiteral(
- *name, scanner()->location(), kSkipFunctionNameCheck,
- is_get ? FunctionKind::kGetterFunction : FunctionKind::kSetterFunction,
- kNoSourcePosition, FunctionLiteral::kAccessorOrMethod, language_mode(),
- CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ case PropertyKind::kAccessorProperty: {
+ DCHECK((is_get || is_set) && !(is_set && is_get) && !is_generator &&
+ !is_async);
+
+ classifier()->RecordPatternError(
+ Scanner::Location(next_beg_pos, scanner()->location().end_pos),
+ MessageTemplate::kInvalidDestructuringTarget);
+
+ if (!*is_computed_name) {
+ // Make sure the name expression is a string since we need a Name for
+ // Runtime_DefineAccessorPropertyUnchecked and since we can determine
+ // this statically we can skip the extra runtime check.
+ name_expression =
+ factory()->NewStringLiteral(name, name_expression->position());
+ }
+
+ FunctionKind kind = is_get ? FunctionKind::kGetterFunction
+ : FunctionKind::kSetterFunction;
- // Make sure the name expression is a string since we need a Name for
- // Runtime_DefineAccessorPropertyUnchecked and since we can determine this
- // statically we can skip the extra runtime check.
- if (!*is_computed_name) {
- name_expression =
- factory()->NewStringLiteral(*name, name_expression->position());
+ FunctionLiteralT value = impl()->ParseFunctionLiteral(
+ name, scanner()->location(), kSkipFunctionNameCheck, kind,
+ kNoSourcePosition, FunctionLiteral::kAccessorOrMethod,
+ language_mode(), CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+
+ if (!*is_computed_name) {
+ impl()->AddAccessorPrefixToFunctionName(is_get, value, name);
+ }
+
+ return factory()->NewObjectLiteralProperty(
+ name_expression, value, is_get ? ObjectLiteralProperty::GETTER
+ : ObjectLiteralProperty::SETTER,
+ *is_computed_name);
}
- return factory()->NewObjectLiteralProperty(
- name_expression, value,
- is_get ? ObjectLiteralProperty::GETTER : ObjectLiteralProperty::SETTER,
- is_static, *is_computed_name);
+ case PropertyKind::kClassField:
+ case PropertyKind::kNotSet:
+ ReportUnexpectedToken(Next());
+ *ok = false;
+ return impl()->EmptyObjectLiteralProperty();
}
-
- Token::Value next = Next();
- ReportUnexpectedToken(next);
- *ok = false;
- return this->EmptyObjectLiteralProperty();
+ UNREACHABLE();
+ return impl()->EmptyObjectLiteralProperty();
}
template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseObjectLiteral(
- ExpressionClassifier* classifier, bool* ok) {
+ bool* ok) {
// ObjectLiteral ::
// '{' (PropertyDefinition (',' PropertyDefinition)* ','? )? '}'
int pos = peek_position();
- typename Traits::Type::PropertyList properties =
- this->NewPropertyList(4, zone_);
+ typename Types::ObjectPropertyList properties =
+ impl()->NewObjectPropertyList(4);
int number_of_boilerplate_properties = 0;
bool has_computed_names = false;
ObjectLiteralChecker checker(this);
@@ -2177,20 +2492,16 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseObjectLiteral(
while (peek() != Token::RBRACE) {
FuncNameInferrer::State fni_state(fni_);
- const bool in_class = false;
- const bool has_extends = false;
bool is_computed_name = false;
- IdentifierT name = this->EmptyIdentifier();
- ObjectLiteralPropertyT property = this->ParsePropertyDefinition(
- &checker, in_class, has_extends, MethodKind::kNormal, &is_computed_name,
- NULL, classifier, &name, CHECK_OK);
+ ObjectLiteralPropertyT property =
+ ParseObjectPropertyDefinition(&checker, &is_computed_name, CHECK_OK);
if (is_computed_name) {
has_computed_names = true;
}
// Count CONSTANT or COMPUTED properties to maintain the enumeration order.
- if (!has_computed_names && this->IsBoilerplateProperty(property)) {
+ if (!has_computed_names && impl()->IsBoilerplateProperty(property)) {
number_of_boilerplate_properties++;
}
properties->Add(property, zone());
@@ -2201,8 +2512,6 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseObjectLiteral(
}
if (fni_ != nullptr) fni_->Infer();
-
- Traits::SetFunctionNameFromPropertyName(property, name);
}
Expect(Token::RBRACE, CHECK_OK);
@@ -2216,16 +2525,13 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseObjectLiteral(
}
template <typename Impl>
-typename ParserBase<Impl>::Traits::Type::ExpressionList
-ParserBase<Impl>::ParseArguments(Scanner::Location* first_spread_arg_loc,
- bool maybe_arrow,
- ExpressionClassifier* classifier, bool* ok) {
+typename ParserBase<Impl>::ExpressionListT ParserBase<Impl>::ParseArguments(
+ Scanner::Location* first_spread_arg_loc, bool maybe_arrow, bool* ok) {
// Arguments ::
// '(' (AssignmentExpression)*[','] ')'
Scanner::Location spread_arg = Scanner::Location::invalid();
- typename Traits::Type::ExpressionList result =
- this->NewExpressionList(4, zone_);
+ ExpressionListT result = impl()->NewExpressionList(4);
Expect(Token::LPAREN, CHECK_OK_CUSTOM(NullExpressionList));
bool done = (peek() == Token::RPAREN);
bool was_unspread = false;
@@ -2235,12 +2541,10 @@ ParserBase<Impl>::ParseArguments(Scanner::Location* first_spread_arg_loc,
bool is_spread = Check(Token::ELLIPSIS);
int expr_pos = peek_position();
- ExpressionT argument = this->ParseAssignmentExpression(
- true, classifier, CHECK_OK_CUSTOM(NullExpressionList));
- CheckNoTailCallExpressions(classifier, CHECK_OK_CUSTOM(NullExpressionList));
+ ExpressionT argument =
+ ParseAssignmentExpression(true, CHECK_OK_CUSTOM(NullExpressionList));
if (!maybe_arrow) {
- impl()->RewriteNonPattern(classifier,
- CHECK_OK_CUSTOM(NullExpressionList));
+ impl()->RewriteNonPattern(CHECK_OK_CUSTOM(NullExpressionList));
}
if (is_spread) {
if (!spread_arg.IsValid()) {
@@ -2263,7 +2567,7 @@ ParserBase<Impl>::ParseArguments(Scanner::Location* first_spread_arg_loc,
if (result->length() > Code::kMaxArguments) {
ReportMessage(MessageTemplate::kTooManyArguments);
*ok = false;
- return this->NullExpressionList();
+ return impl()->NullExpressionList();
}
done = (peek() != Token::COMMA);
if (!done) {
@@ -2276,22 +2580,21 @@ ParserBase<Impl>::ParseArguments(Scanner::Location* first_spread_arg_loc,
}
Scanner::Location location = scanner_->location();
if (Token::RPAREN != Next()) {
- ReportMessageAt(location, MessageTemplate::kUnterminatedArgList);
+ impl()->ReportMessageAt(location, MessageTemplate::kUnterminatedArgList);
*ok = false;
- return this->NullExpressionList();
+ return impl()->NullExpressionList();
}
*first_spread_arg_loc = spread_arg;
if (!maybe_arrow || peek() != Token::ARROW) {
if (maybe_arrow) {
- impl()->RewriteNonPattern(classifier,
- CHECK_OK_CUSTOM(NullExpressionList));
+ impl()->RewriteNonPattern(CHECK_OK_CUSTOM(NullExpressionList));
}
if (spread_arg.IsValid()) {
// Unspread parameter sequences are translated into array literals in the
// parser. Ensure that the number of materialized literals matches between
// the parser and preparser
- Traits::MaterializeUnspreadArgumentsLiterals(unspread_sequences_count);
+ impl()->MaterializeUnspreadArgumentsLiterals(unspread_sequences_count);
}
}
@@ -2301,9 +2604,7 @@ ParserBase<Impl>::ParseArguments(Scanner::Location* first_spread_arg_loc,
// Precedence = 2
template <typename Impl>
typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN,
- ExpressionClassifier* classifier,
- bool* ok) {
+ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
// AssignmentExpression ::
// ConditionalExpression
// ArrowFunction
@@ -2312,13 +2613,13 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN,
int lhs_beg_pos = peek_position();
if (peek() == Token::YIELD && is_generator()) {
- return this->ParseYieldExpression(accept_IN, classifier, ok);
+ return ParseYieldExpression(accept_IN, ok);
}
FuncNameInferrer::State fni_state(fni_);
Checkpoint checkpoint(this);
- ExpressionClassifier arrow_formals_classifier(this,
- classifier->duplicate_finder());
+ ExpressionClassifier arrow_formals_classifier(
+ this, classifier()->duplicate_finder());
Scope::Snapshot scope_snapshot(scope());
@@ -2328,26 +2629,23 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN,
bool parenthesized_formals = peek() == Token::LPAREN;
if (!is_async && !parenthesized_formals) {
- ArrowFormalParametersUnexpectedToken(&arrow_formals_classifier);
+ ArrowFormalParametersUnexpectedToken();
}
// Parse a simple, faster sub-grammar (primary expression) if it's evident
// that we have only a trivial expression to parse.
ExpressionT expression;
if (IsTrivialExpression()) {
- expression = this->ParsePrimaryExpression(&arrow_formals_classifier,
- &is_async, CHECK_OK);
+ expression = ParsePrimaryExpression(&is_async, CHECK_OK);
} else {
- expression = this->ParseConditionalExpression(
- accept_IN, &arrow_formals_classifier, CHECK_OK);
+ expression = ParseConditionalExpression(accept_IN, CHECK_OK);
}
- if (is_async && this->IsIdentifier(expression) && peek_any_identifier() &&
+ if (is_async && impl()->IsIdentifier(expression) && peek_any_identifier() &&
PeekAhead() == Token::ARROW) {
// async Identifier => AsyncConciseBody
- IdentifierT name =
- ParseAndClassifyIdentifier(&arrow_formals_classifier, CHECK_OK);
- expression = this->ExpressionFromIdentifier(
+ IdentifierT name = ParseAndClassifyIdentifier(CHECK_OK);
+ expression = impl()->ExpressionFromIdentifier(
name, position(), scanner()->location().end_pos, InferName::kNo);
if (fni_) {
// Remove `async` keyword from inferred name stack.
@@ -2357,26 +2655,29 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN,
if (peek() == Token::ARROW) {
Scanner::Location arrow_loc = scanner()->peek_location();
- ValidateArrowFormalParameters(&arrow_formals_classifier, expression,
- parenthesized_formals, is_async, CHECK_OK);
+ ValidateArrowFormalParameters(expression, parenthesized_formals, is_async,
+ CHECK_OK);
// This reads strangely, but is correct: it checks whether any
// sub-expression of the parameter list failed to be a valid formal
// parameter initializer. Since YieldExpressions are banned anywhere
// in an arrow parameter list, this is correct.
// TODO(adamk): Rename "FormalParameterInitializerError" to refer to
// "YieldExpression", which is its only use.
- ValidateFormalParameterInitializer(&arrow_formals_classifier, ok);
+ ValidateFormalParameterInitializer(ok);
Scanner::Location loc(lhs_beg_pos, scanner()->location().end_pos);
DeclarationScope* scope =
- this->NewFunctionScope(is_async ? FunctionKind::kAsyncArrowFunction
- : FunctionKind::kArrowFunction);
+ NewFunctionScope(is_async ? FunctionKind::kAsyncArrowFunction
+ : FunctionKind::kArrowFunction);
// Because the arrow's parameters were parsed in the outer scope, any
// usage flags that might have been triggered there need to be copied
// to the arrow scope.
this->scope()->PropagateUsageFlagsToScope(scope);
+
+ scope_snapshot.Reparent(scope);
+
FormalParametersT parameters(scope);
- if (!arrow_formals_classifier.is_simple_parameter_list()) {
+ if (!classifier()->is_simple_parameter_list()) {
scope->SetHasNonSimpleParameters();
parameters.is_simple = false;
}
@@ -2385,18 +2686,16 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN,
scope->set_start_position(lhs_beg_pos);
Scanner::Location duplicate_loc = Scanner::Location::invalid();
- this->ParseArrowFunctionFormalParameterList(
- &parameters, expression, loc, &duplicate_loc, scope_snapshot, CHECK_OK);
+ impl()->DeclareArrowFunctionFormalParameters(&parameters, expression, loc,
+ &duplicate_loc, CHECK_OK);
if (duplicate_loc.IsValid()) {
- arrow_formals_classifier.RecordDuplicateFormalParameterError(
- duplicate_loc);
+ classifier()->RecordDuplicateFormalParameterError(duplicate_loc);
}
- expression = this->ParseArrowFunctionLiteral(
- accept_IN, parameters, is_async, arrow_formals_classifier, CHECK_OK);
- arrow_formals_classifier.Discard();
- classifier->RecordPatternError(arrow_loc,
- MessageTemplate::kUnexpectedToken,
- Token::String(Token::ARROW));
+ expression = ParseArrowFunctionLiteral(accept_IN, parameters, CHECK_OK);
+ impl()->Discard();
+ classifier()->RecordPatternError(arrow_loc,
+ MessageTemplate::kUnexpectedToken,
+ Token::String(Token::ARROW));
if (fni_ != nullptr) fni_->Infer();
@@ -2407,87 +2706,70 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN,
// form part of one. Propagate speculative formal parameter error locations
// (including those for binding patterns, since formal parameters can
// themselves contain binding patterns).
- // Do not merge pending non-pattern expressions yet!
- unsigned productions =
- ExpressionClassifier::FormalParametersProductions |
- ExpressionClassifier::AsyncArrowFormalParametersProduction |
- ExpressionClassifier::FormalParameterInitializerProduction;
+ unsigned productions = ExpressionClassifier::AllProductions &
+ ~ExpressionClassifier::ArrowFormalParametersProduction;
// Parenthesized identifiers and property references are allowed as part
- // of a larger binding pattern, even though parenthesized patterns
+ // of a larger assignment pattern, even though parenthesized patterns
// themselves are not allowed, e.g., "[(x)] = []". Only accumulate
// assignment pattern errors if the parsed expression is more complex.
- if (this->IsValidReferenceExpression(expression)) {
- productions |= ExpressionClassifier::PatternProductions &
- ~ExpressionClassifier::AssignmentPatternProduction;
- } else {
- productions |= ExpressionClassifier::PatternProductions;
+ if (IsValidReferenceExpression(expression)) {
+ productions &= ~ExpressionClassifier::AssignmentPatternProduction;
}
const bool is_destructuring_assignment =
IsValidPattern(expression) && peek() == Token::ASSIGN;
- if (!is_destructuring_assignment) {
- // This may be an expression or a pattern, so we must continue to
- // accumulate expression-related errors.
- productions |= ExpressionClassifier::ExpressionProduction |
- ExpressionClassifier::TailCallExpressionProduction |
- ExpressionClassifier::ObjectLiteralProduction;
+ if (is_destructuring_assignment) {
+ // This is definitely not an expression so don't accumulate
+ // expression-related errors.
+ productions &= ~(ExpressionClassifier::ExpressionProduction |
+ ExpressionClassifier::TailCallExpressionProduction);
}
- classifier->Accumulate(&arrow_formals_classifier, productions, false);
-
if (!Token::IsAssignmentOp(peek())) {
// Parsed conditional expression only (no assignment).
- // Now pending non-pattern expressions must be merged.
- classifier->MergeNonPatterns(&arrow_formals_classifier);
+ // Pending non-pattern expressions must be merged.
+ impl()->Accumulate(productions);
return expression;
+ } else {
+ // Pending non-pattern expressions must be discarded.
+ impl()->Accumulate(productions, false);
}
- // Now pending non-pattern expressions must be discarded.
- arrow_formals_classifier.Discard();
-
- CheckNoTailCallExpressions(classifier, CHECK_OK);
-
if (is_destructuring_assignment) {
- ValidateAssignmentPattern(classifier, CHECK_OK);
+ ValidateAssignmentPattern(CHECK_OK);
} else {
- expression = this->CheckAndRewriteReferenceExpression(
+ expression = CheckAndRewriteReferenceExpression(
expression, lhs_beg_pos, scanner()->location().end_pos,
MessageTemplate::kInvalidLhsInAssignment, CHECK_OK);
}
- expression = this->MarkExpressionAsAssigned(expression);
+ expression = impl()->MarkExpressionAsAssigned(expression);
Token::Value op = Next(); // Get assignment operator.
if (op != Token::ASSIGN) {
- classifier->RecordPatternError(scanner()->location(),
- MessageTemplate::kUnexpectedToken,
- Token::String(op));
+ classifier()->RecordPatternError(scanner()->location(),
+ MessageTemplate::kUnexpectedToken,
+ Token::String(op));
}
int pos = position();
ExpressionClassifier rhs_classifier(this);
- ExpressionT right =
- this->ParseAssignmentExpression(accept_IN, &rhs_classifier, CHECK_OK);
- CheckNoTailCallExpressions(&rhs_classifier, CHECK_OK);
- impl()->RewriteNonPattern(&rhs_classifier, CHECK_OK);
- classifier->Accumulate(
- &rhs_classifier,
- ExpressionClassifier::ExpressionProductions |
- ExpressionClassifier::ObjectLiteralProduction |
- ExpressionClassifier::AsyncArrowFormalParametersProduction);
+ ExpressionT right = ParseAssignmentExpression(accept_IN, CHECK_OK);
+ impl()->RewriteNonPattern(CHECK_OK);
+ impl()->AccumulateFormalParameterContainmentErrors();
// TODO(1231235): We try to estimate the set of properties set by
// constructors. We define a new property whenever there is an
// assignment to a property of 'this'. We should probably only add
// properties if we haven't seen them before. Otherwise we'll
// probably overestimate the number of properties.
- if (op == Token::ASSIGN && this->IsThisProperty(expression)) {
+ if (op == Token::ASSIGN && impl()->IsThisProperty(expression)) {
function_state_->AddProperty();
}
- this->CheckAssigningFunctionLiteralToProperty(expression, right);
+ impl()->CheckAssigningFunctionLiteralToProperty(expression, right);
if (fni_ != NULL) {
// Check if the right hand side is a call to avoid inferring a
@@ -2502,7 +2784,7 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN,
}
if (op == Token::ASSIGN) {
- Traits::SetFunctionNameFromIdentifierRef(right, expression);
+ impl()->SetFunctionNameFromIdentifierRef(right, expression);
}
if (op == Token::ASSIGN_EXP) {
@@ -2522,19 +2804,19 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN,
template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseYieldExpression(
- bool accept_IN, ExpressionClassifier* classifier, bool* ok) {
+ bool accept_IN, bool* ok) {
// YieldExpression ::
// 'yield' ([no line terminator] '*'? AssignmentExpression)?
int pos = peek_position();
- classifier->RecordPatternError(scanner()->peek_location(),
- MessageTemplate::kInvalidDestructuringTarget);
- classifier->RecordFormalParameterInitializerError(
+ classifier()->RecordPatternError(
+ scanner()->peek_location(), MessageTemplate::kInvalidDestructuringTarget);
+ classifier()->RecordFormalParameterInitializerError(
scanner()->peek_location(), MessageTemplate::kYieldInParameter);
Expect(Token::YIELD, CHECK_OK);
ExpressionT generator_object =
factory()->NewVariableProxy(function_state_->generator_object_variable());
// The following initialization is necessary.
- ExpressionT expression = Traits::EmptyExpression();
+ ExpressionT expression = impl()->EmptyExpression();
bool delegating = false; // yield*
if (!scanner()->HasAnyLineTerminatorBeforeNext()) {
if (Check(Token::MUL)) delegating = true;
@@ -2553,8 +2835,8 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseYieldExpression(
if (!delegating) break;
// Delegating yields require an RHS; fall through.
default:
- expression = ParseAssignmentExpression(accept_IN, classifier, CHECK_OK);
- impl()->RewriteNonPattern(classifier, CHECK_OK);
+ expression = ParseAssignmentExpression(accept_IN, CHECK_OK);
+ impl()->RewriteNonPattern(CHECK_OK);
break;
}
}
@@ -2563,87 +2845,18 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseYieldExpression(
return impl()->RewriteYieldStar(generator_object, expression, pos);
}
- expression = Traits::BuildIteratorResult(expression, false);
+ expression = impl()->BuildIteratorResult(expression, false);
// Hackily disambiguate o from o.next and o [Symbol.iterator]().
// TODO(verwaest): Come up with a better solution.
- typename Traits::Type::YieldExpression yield = factory()->NewYield(
- generator_object, expression, pos, Yield::kOnExceptionThrow);
+ ExpressionT yield = factory()->NewYield(generator_object, expression, pos,
+ Yield::kOnExceptionThrow);
return yield;
}
-template <typename Impl>
-typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::ParseTailCallExpression(ExpressionClassifier* classifier,
- bool* ok) {
- // TailCallExpression::
- // 'continue' MemberExpression Arguments
- // 'continue' CallExpression Arguments
- // 'continue' MemberExpression TemplateLiteral
- // 'continue' CallExpression TemplateLiteral
- Expect(Token::CONTINUE, CHECK_OK);
- int pos = position();
- int sub_expression_pos = peek_position();
- ExpressionT expression =
- this->ParseLeftHandSideExpression(classifier, CHECK_OK);
- CheckNoTailCallExpressions(classifier, CHECK_OK);
-
- Scanner::Location loc(pos, scanner()->location().end_pos);
- if (!expression->IsCall()) {
- Scanner::Location sub_loc(sub_expression_pos, loc.end_pos);
- ReportMessageAt(sub_loc, MessageTemplate::kUnexpectedInsideTailCall);
- *ok = false;
- return Traits::EmptyExpression();
- }
- if (Traits::IsDirectEvalCall(expression)) {
- Scanner::Location sub_loc(sub_expression_pos, loc.end_pos);
- ReportMessageAt(sub_loc, MessageTemplate::kUnexpectedTailCallOfEval);
- *ok = false;
- return Traits::EmptyExpression();
- }
- if (!is_strict(language_mode())) {
- ReportMessageAt(loc, MessageTemplate::kUnexpectedSloppyTailCall);
- *ok = false;
- return Traits::EmptyExpression();
- }
- if (is_resumable()) {
- Scanner::Location sub_loc(sub_expression_pos, loc.end_pos);
- ReportMessageAt(sub_loc, MessageTemplate::kUnexpectedTailCall);
- *ok = false;
- return Traits::EmptyExpression();
- }
- ReturnExprContext return_expr_context =
- function_state_->return_expr_context();
- if (return_expr_context != ReturnExprContext::kInsideValidReturnStatement) {
- MessageTemplate::Template msg = MessageTemplate::kNone;
- switch (return_expr_context) {
- case ReturnExprContext::kInsideValidReturnStatement:
- UNREACHABLE();
- return Traits::EmptyExpression();
- case ReturnExprContext::kInsideValidBlock:
- msg = MessageTemplate::kUnexpectedTailCall;
- break;
- case ReturnExprContext::kInsideTryBlock:
- msg = MessageTemplate::kUnexpectedTailCallInTryBlock;
- break;
- case ReturnExprContext::kInsideForInOfBody:
- msg = MessageTemplate::kUnexpectedTailCallInForInOf;
- break;
- }
- ReportMessageAt(loc, msg);
- *ok = false;
- return Traits::EmptyExpression();
- }
- classifier->RecordTailCallExpressionError(
- loc, MessageTemplate::kUnexpectedTailCall);
- function_state_->AddExplicitTailCallExpression(expression, loc);
- return expression;
-}
-
// Precedence = 3
template <typename Impl>
typename ParserBase<Impl>::ExpressionT
ParserBase<Impl>::ParseConditionalExpression(bool accept_IN,
- ExpressionClassifier* classifier,
bool* ok) {
// ConditionalExpression ::
// LogicalOrExpression
@@ -2651,23 +2864,20 @@ ParserBase<Impl>::ParseConditionalExpression(bool accept_IN,
int pos = peek_position();
// We start using the binary expression parser for prec >= 4 only!
- ExpressionT expression =
- this->ParseBinaryExpression(4, accept_IN, classifier, CHECK_OK);
+ ExpressionT expression = ParseBinaryExpression(4, accept_IN, CHECK_OK);
if (peek() != Token::CONDITIONAL) return expression;
- CheckNoTailCallExpressions(classifier, CHECK_OK);
- impl()->RewriteNonPattern(classifier, CHECK_OK);
- BindingPatternUnexpectedToken(classifier);
- ArrowFormalParametersUnexpectedToken(classifier);
+ impl()->RewriteNonPattern(CHECK_OK);
+ BindingPatternUnexpectedToken();
+ ArrowFormalParametersUnexpectedToken();
Consume(Token::CONDITIONAL);
// In parsing the first assignment expression in conditional
// expressions we always accept the 'in' keyword; see ECMA-262,
// section 11.12, page 58.
- ExpressionT left = ParseAssignmentExpression(true, classifier, CHECK_OK);
- impl()->RewriteNonPattern(classifier, CHECK_OK);
+ ExpressionT left = ParseAssignmentExpression(true, CHECK_OK);
+ impl()->RewriteNonPattern(CHECK_OK);
Expect(Token::COLON, CHECK_OK);
- ExpressionT right =
- ParseAssignmentExpression(accept_IN, classifier, CHECK_OK);
- impl()->RewriteNonPattern(classifier, CHECK_OK);
+ ExpressionT right = ParseAssignmentExpression(accept_IN, CHECK_OK);
+ impl()->RewriteNonPattern(CHECK_OK);
return factory()->NewConditional(expression, left, right, pos);
}
@@ -2675,30 +2885,24 @@ ParserBase<Impl>::ParseConditionalExpression(bool accept_IN,
// Precedence >= 4
template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseBinaryExpression(
- int prec, bool accept_IN, ExpressionClassifier* classifier, bool* ok) {
+ int prec, bool accept_IN, bool* ok) {
DCHECK(prec >= 4);
- ExpressionT x = this->ParseUnaryExpression(classifier, CHECK_OK);
+ ExpressionT x = ParseUnaryExpression(CHECK_OK);
for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
// prec1 >= 4
while (Precedence(peek(), accept_IN) == prec1) {
- CheckNoTailCallExpressions(classifier, CHECK_OK);
- impl()->RewriteNonPattern(classifier, CHECK_OK);
- BindingPatternUnexpectedToken(classifier);
- ArrowFormalParametersUnexpectedToken(classifier);
+ impl()->RewriteNonPattern(CHECK_OK);
+ BindingPatternUnexpectedToken();
+ ArrowFormalParametersUnexpectedToken();
Token::Value op = Next();
int pos = position();
const bool is_right_associative = op == Token::EXP;
const int next_prec = is_right_associative ? prec1 : prec1 + 1;
- ExpressionT y =
- ParseBinaryExpression(next_prec, accept_IN, classifier, CHECK_OK);
- if (op != Token::OR && op != Token::AND) {
- CheckNoTailCallExpressions(classifier, CHECK_OK);
- }
- impl()->RewriteNonPattern(classifier, CHECK_OK);
+ ExpressionT y = ParseBinaryExpression(next_prec, accept_IN, CHECK_OK);
+ impl()->RewriteNonPattern(CHECK_OK);
- if (this->ShortcutNumericLiteralBinaryExpression(&x, y, op, pos,
- factory())) {
+ if (impl()->ShortcutNumericLiteralBinaryExpression(&x, y, op, pos)) {
continue;
}
@@ -2731,7 +2935,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseBinaryExpression(
template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseUnaryExpression(
- ExpressionClassifier* classifier, bool* ok) {
+ bool* ok) {
// UnaryExpression ::
// PostfixExpression
// 'delete' UnaryExpression
@@ -2747,44 +2951,42 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseUnaryExpression(
Token::Value op = peek();
if (Token::IsUnaryOp(op)) {
- BindingPatternUnexpectedToken(classifier);
- ArrowFormalParametersUnexpectedToken(classifier);
+ BindingPatternUnexpectedToken();
+ ArrowFormalParametersUnexpectedToken();
op = Next();
int pos = position();
- ExpressionT expression = ParseUnaryExpression(classifier, CHECK_OK);
- CheckNoTailCallExpressions(classifier, CHECK_OK);
- impl()->RewriteNonPattern(classifier, CHECK_OK);
+ ExpressionT expression = ParseUnaryExpression(CHECK_OK);
+ impl()->RewriteNonPattern(CHECK_OK);
if (op == Token::DELETE && is_strict(language_mode())) {
- if (this->IsIdentifier(expression)) {
+ if (impl()->IsIdentifier(expression)) {
// "delete identifier" is a syntax error in strict mode.
ReportMessage(MessageTemplate::kStrictDelete);
*ok = false;
- return this->EmptyExpression();
+ return impl()->EmptyExpression();
}
}
if (peek() == Token::EXP) {
ReportUnexpectedToken(Next());
*ok = false;
- return this->EmptyExpression();
+ return impl()->EmptyExpression();
}
- // Allow Traits do rewrite the expression.
- return this->BuildUnaryExpression(expression, op, pos, factory());
+ // Allow the parser's implementation to rewrite the expression.
+ return impl()->BuildUnaryExpression(expression, op, pos);
} else if (Token::IsCountOp(op)) {
- BindingPatternUnexpectedToken(classifier);
- ArrowFormalParametersUnexpectedToken(classifier);
+ BindingPatternUnexpectedToken();
+ ArrowFormalParametersUnexpectedToken();
op = Next();
int beg_pos = peek_position();
- ExpressionT expression = this->ParseUnaryExpression(classifier, CHECK_OK);
- CheckNoTailCallExpressions(classifier, CHECK_OK);
- expression = this->CheckAndRewriteReferenceExpression(
+ ExpressionT expression = ParseUnaryExpression(CHECK_OK);
+ expression = CheckAndRewriteReferenceExpression(
expression, beg_pos, scanner()->location().end_pos,
MessageTemplate::kInvalidLhsInPrefixOp, CHECK_OK);
- this->MarkExpressionAsAssigned(expression);
- impl()->RewriteNonPattern(classifier, CHECK_OK);
+ expression = impl()->MarkExpressionAsAssigned(expression);
+ impl()->RewriteNonPattern(CHECK_OK);
return factory()->NewCountOperation(op,
true /* prefix */,
@@ -2792,41 +2994,39 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseUnaryExpression(
position());
} else if (is_async_function() && peek() == Token::AWAIT) {
- classifier->RecordFormalParameterInitializerError(
+ classifier()->RecordFormalParameterInitializerError(
scanner()->peek_location(),
MessageTemplate::kAwaitExpressionFormalParameter);
int await_pos = peek_position();
Consume(Token::AWAIT);
- ExpressionT value = ParseUnaryExpression(classifier, CHECK_OK);
+ ExpressionT value = ParseUnaryExpression(CHECK_OK);
return impl()->RewriteAwaitExpression(value, await_pos);
} else {
- return this->ParsePostfixExpression(classifier, ok);
+ return ParsePostfixExpression(ok);
}
}
template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePostfixExpression(
- ExpressionClassifier* classifier, bool* ok) {
+ bool* ok) {
// PostfixExpression ::
// LeftHandSideExpression ('++' | '--')?
int lhs_beg_pos = peek_position();
- ExpressionT expression =
- this->ParseLeftHandSideExpression(classifier, CHECK_OK);
+ ExpressionT expression = ParseLeftHandSideExpression(CHECK_OK);
if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
Token::IsCountOp(peek())) {
- CheckNoTailCallExpressions(classifier, CHECK_OK);
- BindingPatternUnexpectedToken(classifier);
- ArrowFormalParametersUnexpectedToken(classifier);
+ BindingPatternUnexpectedToken();
+ ArrowFormalParametersUnexpectedToken();
- expression = this->CheckAndRewriteReferenceExpression(
+ expression = CheckAndRewriteReferenceExpression(
expression, lhs_beg_pos, scanner()->location().end_pos,
MessageTemplate::kInvalidLhsInPostfixOp, CHECK_OK);
- expression = this->MarkExpressionAsAssigned(expression);
- impl()->RewriteNonPattern(classifier, CHECK_OK);
+ expression = impl()->MarkExpressionAsAssigned(expression);
+ impl()->RewriteNonPattern(CHECK_OK);
Token::Value next = Next();
expression =
@@ -2840,40 +3040,33 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePostfixExpression(
template <typename Impl>
typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::ParseLeftHandSideExpression(ExpressionClassifier* classifier,
- bool* ok) {
+ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
// LeftHandSideExpression ::
// (NewExpression | MemberExpression) ...
- if (FLAG_harmony_explicit_tailcalls && peek() == Token::CONTINUE) {
- return this->ParseTailCallExpression(classifier, ok);
- }
-
bool is_async = false;
- ExpressionT result = this->ParseMemberWithNewPrefixesExpression(
- classifier, &is_async, CHECK_OK);
+ ExpressionT result =
+ ParseMemberWithNewPrefixesExpression(&is_async, CHECK_OK);
while (true) {
switch (peek()) {
case Token::LBRACK: {
- CheckNoTailCallExpressions(classifier, CHECK_OK);
- impl()->RewriteNonPattern(classifier, CHECK_OK);
- BindingPatternUnexpectedToken(classifier);
- ArrowFormalParametersUnexpectedToken(classifier);
+ impl()->RewriteNonPattern(CHECK_OK);
+ BindingPatternUnexpectedToken();
+ ArrowFormalParametersUnexpectedToken();
Consume(Token::LBRACK);
int pos = position();
- ExpressionT index = ParseExpression(true, classifier, CHECK_OK);
- impl()->RewriteNonPattern(classifier, CHECK_OK);
+ ExpressionT index = ParseExpressionCoverGrammar(true, CHECK_OK);
+ impl()->RewriteNonPattern(CHECK_OK);
result = factory()->NewProperty(result, index, pos);
Expect(Token::RBRACK, CHECK_OK);
break;
}
case Token::LPAREN: {
- CheckNoTailCallExpressions(classifier, CHECK_OK);
int pos;
- impl()->RewriteNonPattern(classifier, CHECK_OK);
- BindingPatternUnexpectedToken(classifier);
+ impl()->RewriteNonPattern(CHECK_OK);
+ BindingPatternUnexpectedToken();
if (scanner()->current_token() == Token::IDENTIFIER ||
scanner()->current_token() == Token::SUPER ||
scanner()->current_token() == Token::ASYNC) {
@@ -2895,36 +3088,36 @@ ParserBase<Impl>::ParseLeftHandSideExpression(ExpressionClassifier* classifier,
}
}
Scanner::Location spread_pos;
- typename Traits::Type::ExpressionList args;
- if (V8_UNLIKELY(is_async && this->IsIdentifier(result))) {
+ ExpressionListT args;
+ if (V8_UNLIKELY(is_async && impl()->IsIdentifier(result))) {
ExpressionClassifier async_classifier(this);
- args = ParseArguments(&spread_pos, true, &async_classifier, CHECK_OK);
+ args = ParseArguments(&spread_pos, true, CHECK_OK);
if (peek() == Token::ARROW) {
if (fni_) {
fni_->RemoveAsyncKeywordFromEnd();
}
- ValidateBindingPattern(&async_classifier, CHECK_OK);
- if (!async_classifier.is_valid_async_arrow_formal_parameters()) {
+ ValidateBindingPattern(CHECK_OK);
+ ValidateFormalParameterInitializer(CHECK_OK);
+ if (!classifier()->is_valid_async_arrow_formal_parameters()) {
ReportClassifierError(
- async_classifier.async_arrow_formal_parameters_error());
+ classifier()->async_arrow_formal_parameters_error());
*ok = false;
- return this->EmptyExpression();
+ return impl()->EmptyExpression();
}
if (args->length()) {
// async ( Arguments ) => ...
- return Traits::ExpressionListToExpression(args);
+ return impl()->ExpressionListToExpression(args);
}
// async () => ...
return factory()->NewEmptyParentheses(pos);
} else {
- classifier->Accumulate(&async_classifier,
- ExpressionClassifier::AllProductions);
+ impl()->AccumulateFormalParameterContainmentErrors();
}
} else {
- args = ParseArguments(&spread_pos, false, classifier, CHECK_OK);
+ args = ParseArguments(&spread_pos, false, CHECK_OK);
}
- ArrowFormalParametersUnexpectedToken(classifier);
+ ArrowFormalParametersUnexpectedToken();
// Keep track of eval() calls since they disable all local variable
// optimizations.
@@ -2947,7 +3140,8 @@ ParserBase<Impl>::ParseLeftHandSideExpression(ExpressionClassifier* classifier,
// Explicit calls to the super constructor using super() perform an
// implicit binding assignment to the 'this' variable.
if (is_super_call) {
- ExpressionT this_expr = this->ThisExpression(pos);
+ result = impl()->RewriteSuperCall(result);
+ ExpressionT this_expr = impl()->ThisExpression(pos);
result =
factory()->NewAssignment(Token::INIT, this_expr, result, pos);
}
@@ -2957,26 +3151,24 @@ ParserBase<Impl>::ParseLeftHandSideExpression(ExpressionClassifier* classifier,
}
case Token::PERIOD: {
- CheckNoTailCallExpressions(classifier, CHECK_OK);
- impl()->RewriteNonPattern(classifier, CHECK_OK);
- BindingPatternUnexpectedToken(classifier);
- ArrowFormalParametersUnexpectedToken(classifier);
+ impl()->RewriteNonPattern(CHECK_OK);
+ BindingPatternUnexpectedToken();
+ ArrowFormalParametersUnexpectedToken();
Consume(Token::PERIOD);
int pos = position();
IdentifierT name = ParseIdentifierName(CHECK_OK);
result = factory()->NewProperty(
result, factory()->NewStringLiteral(name, pos), pos);
- if (fni_ != NULL) this->PushLiteralName(fni_, name);
+ impl()->PushLiteralName(name);
break;
}
case Token::TEMPLATE_SPAN:
case Token::TEMPLATE_TAIL: {
- CheckNoTailCallExpressions(classifier, CHECK_OK);
- impl()->RewriteNonPattern(classifier, CHECK_OK);
- BindingPatternUnexpectedToken(classifier);
- ArrowFormalParametersUnexpectedToken(classifier);
- result = ParseTemplateLiteral(result, position(), classifier, CHECK_OK);
+ impl()->RewriteNonPattern(CHECK_OK);
+ BindingPatternUnexpectedToken();
+ ArrowFormalParametersUnexpectedToken();
+ result = ParseTemplateLiteral(result, position(), CHECK_OK);
break;
}
@@ -2988,8 +3180,8 @@ ParserBase<Impl>::ParseLeftHandSideExpression(ExpressionClassifier* classifier,
template <typename Impl>
typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::ParseMemberWithNewPrefixesExpression(
- ExpressionClassifier* classifier, bool* is_async, bool* ok) {
+ParserBase<Impl>::ParseMemberWithNewPrefixesExpression(bool* is_async,
+ bool* ok) {
// NewExpression ::
// ('new')+ MemberExpression
//
@@ -3011,8 +3203,8 @@ ParserBase<Impl>::ParseMemberWithNewPrefixesExpression(
// new new foo().bar().baz means (new (new foo()).bar()).baz
if (peek() == Token::NEW) {
- BindingPatternUnexpectedToken(classifier);
- ArrowFormalParametersUnexpectedToken(classifier);
+ BindingPatternUnexpectedToken();
+ ArrowFormalParametersUnexpectedToken();
Consume(Token::NEW);
int new_pos = position();
ExpressionT result;
@@ -3022,15 +3214,13 @@ ParserBase<Impl>::ParseMemberWithNewPrefixesExpression(
} else if (peek() == Token::PERIOD) {
return ParseNewTargetExpression(CHECK_OK);
} else {
- result = this->ParseMemberWithNewPrefixesExpression(classifier, is_async,
- CHECK_OK);
+ result = ParseMemberWithNewPrefixesExpression(is_async, CHECK_OK);
}
- impl()->RewriteNonPattern(classifier, CHECK_OK);
+ impl()->RewriteNonPattern(CHECK_OK);
if (peek() == Token::LPAREN) {
// NewExpression with arguments.
Scanner::Location spread_pos;
- typename Traits::Type::ExpressionList args =
- this->ParseArguments(&spread_pos, classifier, CHECK_OK);
+ ExpressionListT args = ParseArguments(&spread_pos, CHECK_OK);
if (spread_pos.IsValid()) {
args = impl()->PrepareSpreadArguments(args);
@@ -3039,21 +3229,19 @@ ParserBase<Impl>::ParseMemberWithNewPrefixesExpression(
result = factory()->NewCallNew(result, args, new_pos);
}
// The expression can still continue with . or [ after the arguments.
- result = this->ParseMemberExpressionContinuation(result, is_async,
- classifier, CHECK_OK);
+ result = ParseMemberExpressionContinuation(result, is_async, CHECK_OK);
return result;
}
// NewExpression without arguments.
- return factory()->NewCallNew(result, this->NewExpressionList(0, zone_),
- new_pos);
+ return factory()->NewCallNew(result, impl()->NewExpressionList(0), new_pos);
}
// No 'new' or 'super' keyword.
- return this->ParseMemberExpression(classifier, is_async, ok);
+ return ParseMemberExpression(is_async, ok);
}
template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberExpression(
- ExpressionClassifier* classifier, bool* is_async, bool* ok) {
+ bool* is_async, bool* ok) {
// MemberExpression ::
// (PrimaryExpression | FunctionLiteral | ClassLiteral)
// ('[' Expression ']' | '.' Identifier | Arguments | TemplateLiteral)*
@@ -3065,8 +3253,8 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberExpression(
// Parse the initial primary or function expression.
ExpressionT result;
if (peek() == Token::FUNCTION) {
- BindingPatternUnexpectedToken(classifier);
- ArrowFormalParametersUnexpectedToken(classifier);
+ BindingPatternUnexpectedToken();
+ ArrowFormalParametersUnexpectedToken();
Consume(Token::FUNCTION);
int function_token_position = position();
@@ -3078,19 +3266,19 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberExpression(
if (!is_generator()) {
// TODO(neis): allow escaping into closures?
- ReportMessageAt(scanner()->location(),
- MessageTemplate::kUnexpectedFunctionSent);
+ impl()->ReportMessageAt(scanner()->location(),
+ MessageTemplate::kUnexpectedFunctionSent);
*ok = false;
- return this->EmptyExpression();
+ return impl()->EmptyExpression();
}
- return this->FunctionSentExpression(factory(), pos);
+ return impl()->FunctionSentExpression(pos);
}
FunctionKind function_kind = Check(Token::MUL)
? FunctionKind::kGeneratorFunction
: FunctionKind::kNormalFunction;
- IdentifierT name = this->EmptyIdentifier();
+ IdentifierT name = impl()->EmptyIdentifier();
bool is_strict_reserved_name = false;
Scanner::Location function_name_location = Scanner::Location::invalid();
FunctionLiteral::FunctionType function_type =
@@ -3111,11 +3299,10 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberExpression(
const bool is_new = false;
result = ParseSuperExpression(is_new, CHECK_OK);
} else {
- result = ParsePrimaryExpression(classifier, is_async, CHECK_OK);
+ result = ParsePrimaryExpression(is_async, CHECK_OK);
}
- result =
- ParseMemberExpressionContinuation(result, is_async, classifier, CHECK_OK);
+ result = ParseMemberExpressionContinuation(result, is_async, CHECK_OK);
return result;
}
@@ -3131,20 +3318,21 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseSuperExpression(
IsClassConstructor(kind)) {
if (peek() == Token::PERIOD || peek() == Token::LBRACK) {
scope->RecordSuperPropertyUsage();
- return this->NewSuperPropertyReference(factory(), pos);
+ return impl()->NewSuperPropertyReference(pos);
}
// new super() is never allowed.
// super() is only allowed in derived constructor
if (!is_new && peek() == Token::LPAREN && IsSubclassConstructor(kind)) {
// TODO(rossberg): This might not be the correct FunctionState for the
// method here.
- return this->NewSuperCallReference(factory(), pos);
+ return impl()->NewSuperCallReference(pos);
}
}
- ReportMessageAt(scanner()->location(), MessageTemplate::kUnexpectedSuper);
+ impl()->ReportMessageAt(scanner()->location(),
+ MessageTemplate::kUnexpectedSuper);
*ok = false;
- return this->EmptyExpression();
+ return impl()->EmptyExpression();
}
template <typename Impl>
@@ -3154,7 +3342,7 @@ void ParserBase<Impl>::ExpectMetaProperty(Vector<const char> property_name,
Consume(Token::PERIOD);
ExpectContextualKeyword(property_name, CHECK_OK_CUSTOM(Void));
if (scanner()->literal_contains_escapes()) {
- Traits::ReportMessageAt(
+ impl()->ReportMessageAt(
Scanner::Location(pos, scanner()->location().end_pos),
MessageTemplate::kInvalidEscapedMetaProperty, full_name);
*ok = false;
@@ -3168,63 +3356,58 @@ ParserBase<Impl>::ParseNewTargetExpression(bool* ok) {
ExpectMetaProperty(CStrVector("target"), "new.target", pos, CHECK_OK);
if (!GetReceiverScope()->is_function_scope()) {
- ReportMessageAt(scanner()->location(),
- MessageTemplate::kUnexpectedNewTarget);
+ impl()->ReportMessageAt(scanner()->location(),
+ MessageTemplate::kUnexpectedNewTarget);
*ok = false;
- return this->EmptyExpression();
+ return impl()->EmptyExpression();
}
- return this->NewTargetExpression(pos);
+ return impl()->NewTargetExpression(pos);
}
template <typename Impl>
typename ParserBase<Impl>::ExpressionT
-ParserBase<Impl>::ParseMemberExpressionContinuation(
- ExpressionT expression, bool* is_async, ExpressionClassifier* classifier,
- bool* ok) {
+ParserBase<Impl>::ParseMemberExpressionContinuation(ExpressionT expression,
+ bool* is_async, bool* ok) {
// Parses this part of MemberExpression:
// ('[' Expression ']' | '.' Identifier | TemplateLiteral)*
while (true) {
switch (peek()) {
case Token::LBRACK: {
*is_async = false;
- impl()->RewriteNonPattern(classifier, CHECK_OK);
- BindingPatternUnexpectedToken(classifier);
- ArrowFormalParametersUnexpectedToken(classifier);
+ impl()->RewriteNonPattern(CHECK_OK);
+ BindingPatternUnexpectedToken();
+ ArrowFormalParametersUnexpectedToken();
Consume(Token::LBRACK);
int pos = position();
- ExpressionT index = this->ParseExpression(true, classifier, CHECK_OK);
- impl()->RewriteNonPattern(classifier, CHECK_OK);
+ ExpressionT index = ParseExpressionCoverGrammar(true, CHECK_OK);
+ impl()->RewriteNonPattern(CHECK_OK);
expression = factory()->NewProperty(expression, index, pos);
- if (fni_ != NULL) {
- this->PushPropertyName(fni_, index);
- }
+ impl()->PushPropertyName(index);
Expect(Token::RBRACK, CHECK_OK);
break;
}
case Token::PERIOD: {
*is_async = false;
- impl()->RewriteNonPattern(classifier, CHECK_OK);
- BindingPatternUnexpectedToken(classifier);
- ArrowFormalParametersUnexpectedToken(classifier);
+ impl()->RewriteNonPattern(CHECK_OK);
+ BindingPatternUnexpectedToken();
+ ArrowFormalParametersUnexpectedToken();
Consume(Token::PERIOD);
int pos = position();
IdentifierT name = ParseIdentifierName(CHECK_OK);
expression = factory()->NewProperty(
expression, factory()->NewStringLiteral(name, pos), pos);
- if (fni_ != NULL) {
- this->PushLiteralName(fni_, name);
- }
+ impl()->PushLiteralName(name);
break;
}
case Token::TEMPLATE_SPAN:
case Token::TEMPLATE_TAIL: {
*is_async = false;
- impl()->RewriteNonPattern(classifier, CHECK_OK);
- BindingPatternUnexpectedToken(classifier);
- ArrowFormalParametersUnexpectedToken(classifier);
+ impl()->RewriteNonPattern(CHECK_OK);
+ BindingPatternUnexpectedToken();
+ ArrowFormalParametersUnexpectedToken();
int pos;
if (scanner()->current_token() == Token::IDENTIFIER) {
pos = position();
@@ -3236,62 +3419,58 @@ ParserBase<Impl>::ParseMemberExpressionContinuation(
expression->AsFunctionLiteral()->set_should_eager_compile();
}
}
- expression =
- ParseTemplateLiteral(expression, pos, classifier, CHECK_OK);
+ expression = ParseTemplateLiteral(expression, pos, CHECK_OK);
break;
}
case Token::ILLEGAL: {
ReportUnexpectedTokenAt(scanner()->peek_location(), Token::ILLEGAL);
*ok = false;
- return this->EmptyExpression();
+ return impl()->EmptyExpression();
}
default:
return expression;
}
}
DCHECK(false);
- return this->EmptyExpression();
+ return impl()->EmptyExpression();
}
template <typename Impl>
void ParserBase<Impl>::ParseFormalParameter(FormalParametersT* parameters,
- ExpressionClassifier* classifier,
bool* ok) {
// FormalParameter[Yield,GeneratorParameter] :
// BindingElement[?Yield, ?GeneratorParameter]
bool is_rest = parameters->has_rest;
- ExpressionT pattern =
- ParsePrimaryExpression(classifier, CHECK_OK_CUSTOM(Void));
- ValidateBindingPattern(classifier, CHECK_OK_CUSTOM(Void));
+ ExpressionT pattern = ParsePrimaryExpression(CHECK_OK_CUSTOM(Void));
+ ValidateBindingPattern(CHECK_OK_CUSTOM(Void));
- if (!Traits::IsIdentifier(pattern)) {
+ if (!impl()->IsIdentifier(pattern)) {
parameters->is_simple = false;
- ValidateFormalParameterInitializer(classifier, CHECK_OK_CUSTOM(Void));
- classifier->RecordNonSimpleParameter();
+ ValidateFormalParameterInitializer(CHECK_OK_CUSTOM(Void));
+ classifier()->RecordNonSimpleParameter();
}
- ExpressionT initializer = Traits::EmptyExpression();
+ ExpressionT initializer = impl()->EmptyExpression();
if (!is_rest && Check(Token::ASSIGN)) {
ExpressionClassifier init_classifier(this);
- initializer = ParseAssignmentExpression(true, &init_classifier,
- CHECK_OK_CUSTOM(Void));
- impl()->RewriteNonPattern(&init_classifier, CHECK_OK_CUSTOM(Void));
- ValidateFormalParameterInitializer(&init_classifier, CHECK_OK_CUSTOM(Void));
+ initializer = ParseAssignmentExpression(true, CHECK_OK_CUSTOM(Void));
+ impl()->RewriteNonPattern(CHECK_OK_CUSTOM(Void));
+ ValidateFormalParameterInitializer(CHECK_OK_CUSTOM(Void));
parameters->is_simple = false;
- init_classifier.Discard();
- classifier->RecordNonSimpleParameter();
+ impl()->Discard();
+ classifier()->RecordNonSimpleParameter();
- Traits::SetFunctionNameFromIdentifierRef(initializer, pattern);
+ impl()->SetFunctionNameFromIdentifierRef(initializer, pattern);
}
- Traits::AddFormalParameter(parameters, pattern, initializer,
+ impl()->AddFormalParameter(parameters, pattern, initializer,
scanner()->location().end_pos, is_rest);
}
template <typename Impl>
-void ParserBase<Impl>::ParseFormalParameterList(
- FormalParametersT* parameters, ExpressionClassifier* classifier, bool* ok) {
+void ParserBase<Impl>::ParseFormalParameterList(FormalParametersT* parameters,
+ bool* ok) {
// FormalParameters[Yield] :
// [empty]
// FunctionRestParameter[?Yield]
@@ -3313,14 +3492,14 @@ void ParserBase<Impl>::ParseFormalParameterList(
return;
}
parameters->has_rest = Check(Token::ELLIPSIS);
- ParseFormalParameter(parameters, classifier, CHECK_OK_CUSTOM(Void));
+ ParseFormalParameter(parameters, CHECK_OK_CUSTOM(Void));
if (parameters->has_rest) {
parameters->is_simple = false;
- classifier->RecordNonSimpleParameter();
+ classifier()->RecordNonSimpleParameter();
if (peek() == Token::COMMA) {
- ReportMessageAt(scanner()->peek_location(),
- MessageTemplate::kParamAfterRest);
+ impl()->ReportMessageAt(scanner()->peek_location(),
+ MessageTemplate::kParamAfterRest);
*ok = false;
return;
}
@@ -3336,8 +3515,318 @@ void ParserBase<Impl>::ParseFormalParameterList(
for (int i = 0; i < parameters->Arity(); ++i) {
auto parameter = parameters->at(i);
- Traits::DeclareFormalParameter(parameters->scope, parameter, classifier);
+ impl()->DeclareFormalParameter(parameters->scope, parameter);
+ }
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseVariableDeclarations(
+ VariableDeclarationContext var_context,
+ DeclarationParsingResult* parsing_result,
+ ZoneList<const AstRawString*>* names, bool* ok) {
+ // VariableDeclarations ::
+ // ('var' | 'const' | 'let') (Identifier ('=' AssignmentExpression)?)+[',']
+ //
+ // ES6:
+ // FIXME(marja, nikolaos): Add an up-to-date comment about ES6 variable
+ // declaration syntax.
+
+ DCHECK_NOT_NULL(parsing_result);
+ parsing_result->descriptor.declaration_kind = DeclarationDescriptor::NORMAL;
+ parsing_result->descriptor.declaration_pos = peek_position();
+ parsing_result->descriptor.initialization_pos = peek_position();
+
+ BlockT init_block = impl()->NullBlock();
+ if (var_context != kForStatement) {
+ init_block = factory()->NewBlock(
+ nullptr, 1, true, parsing_result->descriptor.declaration_pos);
+ }
+
+ switch (peek()) {
+ case Token::VAR:
+ parsing_result->descriptor.mode = VAR;
+ Consume(Token::VAR);
+ break;
+ case Token::CONST:
+ Consume(Token::CONST);
+ DCHECK(var_context != kStatement);
+ parsing_result->descriptor.mode = CONST;
+ break;
+ case Token::LET:
+ Consume(Token::LET);
+ DCHECK(var_context != kStatement);
+ parsing_result->descriptor.mode = LET;
+ break;
+ default:
+ UNREACHABLE(); // by current callers
+ break;
+ }
+
+ parsing_result->descriptor.scope = scope();
+ parsing_result->descriptor.hoist_scope = nullptr;
+
+ // The scope of a var/const declared variable anywhere inside a function
+ // is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). The scope
+ // of a let declared variable is the scope of the immediately enclosing
+ // block.
+ int bindings_start = peek_position();
+ do {
+ // Parse binding pattern.
+ FuncNameInferrer::State fni_state(fni_);
+
+ ExpressionT pattern = impl()->EmptyExpression();
+ int decl_pos = peek_position();
+ {
+ ExpressionClassifier pattern_classifier(this);
+ pattern = ParsePrimaryExpression(CHECK_OK_CUSTOM(NullBlock));
+
+ ValidateBindingPattern(CHECK_OK_CUSTOM(NullBlock));
+ if (IsLexicalVariableMode(parsing_result->descriptor.mode)) {
+ ValidateLetPattern(CHECK_OK_CUSTOM(NullBlock));
+ }
+ }
+
+ Scanner::Location variable_loc = scanner()->location();
+ bool single_name = impl()->IsIdentifier(pattern);
+
+ if (single_name) {
+ impl()->PushVariableName(impl()->AsIdentifier(pattern));
+ }
+
+ ExpressionT value = impl()->EmptyExpression();
+ int initializer_position = kNoSourcePosition;
+ if (Check(Token::ASSIGN)) {
+ ExpressionClassifier classifier(this);
+ value = ParseAssignmentExpression(var_context != kForStatement,
+ CHECK_OK_CUSTOM(NullBlock));
+ impl()->RewriteNonPattern(CHECK_OK_CUSTOM(NullBlock));
+ variable_loc.end_pos = scanner()->location().end_pos;
+
+ if (!parsing_result->first_initializer_loc.IsValid()) {
+ parsing_result->first_initializer_loc = variable_loc;
+ }
+
+ // Don't infer if it is "a = function(){...}();"-like expression.
+ if (single_name && fni_ != nullptr) {
+ if (!value->IsCall() && !value->IsCallNew()) {
+ fni_->Infer();
+ } else {
+ fni_->RemoveLastFunction();
+ }
+ }
+
+ impl()->SetFunctionNameFromIdentifierRef(value, pattern);
+
+ // End position of the initializer is after the assignment expression.
+ initializer_position = scanner()->location().end_pos;
+ } else {
+ if (var_context != kForStatement || !PeekInOrOf()) {
+ // ES6 'const' and binding patterns require initializers.
+ if (parsing_result->descriptor.mode == CONST ||
+ !impl()->IsIdentifier(pattern)) {
+ impl()->ReportMessageAt(
+ Scanner::Location(decl_pos, scanner()->location().end_pos),
+ MessageTemplate::kDeclarationMissingInitializer,
+ !impl()->IsIdentifier(pattern) ? "destructuring" : "const");
+ *ok = false;
+ return impl()->NullBlock();
+ }
+ // 'let x' initializes 'x' to undefined.
+ if (parsing_result->descriptor.mode == LET) {
+ value = impl()->GetLiteralUndefined(position());
+ }
+ }
+
+ // End position of the initializer is after the variable.
+ initializer_position = position();
+ }
+
+ typename DeclarationParsingResult::Declaration decl(
+ pattern, initializer_position, value);
+ if (var_context == kForStatement) {
+ // Save the declaration for further handling in ParseForStatement.
+ parsing_result->declarations.Add(decl);
+ } else {
+ // Immediately declare the variable otherwise. This avoids O(N^2)
+ // behavior (where N is the number of variables in a single
+ // declaration) in the PatternRewriter having to do with removing
+ // and adding VariableProxies to the Scope (see bug 4699).
+ impl()->DeclareAndInitializeVariables(init_block,
+ &parsing_result->descriptor, &decl,
+ names, CHECK_OK_CUSTOM(NullBlock));
+ }
+ } while (Check(Token::COMMA));
+
+ parsing_result->bindings_loc =
+ Scanner::Location(bindings_start, scanner()->location().end_pos);
+
+ DCHECK(*ok);
+ return init_block;
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT
+ParserBase<Impl>::ParseFunctionDeclaration(bool* ok) {
+ Consume(Token::FUNCTION);
+ int pos = position();
+ ParseFunctionFlags flags = ParseFunctionFlags::kIsNormal;
+ if (Check(Token::MUL)) {
+ flags |= ParseFunctionFlags::kIsGenerator;
+ if (allow_harmony_restrictive_declarations()) {
+ impl()->ReportMessageAt(scanner()->location(),
+ MessageTemplate::kGeneratorInLegacyContext);
+ *ok = false;
+ return impl()->NullStatement();
+ }
+ }
+ return ParseHoistableDeclaration(pos, flags, nullptr, false, ok);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT
+ParserBase<Impl>::ParseHoistableDeclaration(
+ ZoneList<const AstRawString*>* names, bool default_export, bool* ok) {
+ Expect(Token::FUNCTION, CHECK_OK_CUSTOM(NullStatement));
+ int pos = position();
+ ParseFunctionFlags flags = ParseFunctionFlags::kIsNormal;
+ if (Check(Token::MUL)) {
+ flags |= ParseFunctionFlags::kIsGenerator;
+ }
+ return ParseHoistableDeclaration(pos, flags, names, default_export, ok);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT
+ParserBase<Impl>::ParseHoistableDeclaration(
+ int pos, ParseFunctionFlags flags, ZoneList<const AstRawString*>* names,
+ bool default_export, bool* ok) {
+ // FunctionDeclaration ::
+ // 'function' Identifier '(' FormalParameters ')' '{' FunctionBody '}'
+ // 'function' '(' FormalParameters ')' '{' FunctionBody '}'
+ // GeneratorDeclaration ::
+ // 'function' '*' Identifier '(' FormalParameters ')' '{' FunctionBody '}'
+ // 'function' '*' '(' FormalParameters ')' '{' FunctionBody '}'
+ //
+ // The anonymous forms are allowed iff [default_export] is true.
+ //
+ // 'function' and '*' (if present) have been consumed by the caller.
+
+ const bool is_generator = flags & ParseFunctionFlags::kIsGenerator;
+ const bool is_async = flags & ParseFunctionFlags::kIsAsync;
+ DCHECK(!is_generator || !is_async);
+
+ IdentifierT name;
+ FunctionNameValidity name_validity;
+ IdentifierT variable_name;
+ if (default_export && peek() == Token::LPAREN) {
+ impl()->GetDefaultStrings(&name, &variable_name);
+ name_validity = kSkipFunctionNameCheck;
+ } else {
+ bool is_strict_reserved;
+ name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved,
+ CHECK_OK_CUSTOM(NullStatement));
+ name_validity = is_strict_reserved ? kFunctionNameIsStrictReserved
+ : kFunctionNameValidityUnknown;
+ variable_name = name;
+ }
+
+ FuncNameInferrer::State fni_state(fni_);
+ impl()->PushEnclosingName(name);
+ FunctionLiteralT function = impl()->ParseFunctionLiteral(
+ name, scanner()->location(), name_validity,
+ is_generator ? FunctionKind::kGeneratorFunction
+ : is_async ? FunctionKind::kAsyncFunction
+ : FunctionKind::kNormalFunction,
+ pos, FunctionLiteral::kDeclaration, language_mode(),
+ CHECK_OK_CUSTOM(NullStatement));
+
+ return impl()->DeclareFunction(variable_name, function, pos, is_generator,
+ is_async, names, ok);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseClassDeclaration(
+ ZoneList<const AstRawString*>* names, bool default_export, bool* ok) {
+ // ClassDeclaration ::
+ // 'class' Identifier ('extends' LeftHandExpression)? '{' ClassBody '}'
+ // 'class' ('extends' LeftHandExpression)? '{' ClassBody '}'
+ //
+ // The anonymous form is allowed iff [default_export] is true.
+ //
+ // 'class' is expected to be consumed by the caller.
+ //
+ // A ClassDeclaration
+ //
+ // class C { ... }
+ //
+ // has the same semantics as:
+ //
+ // let C = class C { ... };
+ //
+ // so rewrite it as such.
+
+ int class_token_pos = position();
+ IdentifierT name = impl()->EmptyIdentifier();
+ bool is_strict_reserved = false;
+ IdentifierT variable_name = impl()->EmptyIdentifier();
+ if (default_export && (peek() == Token::EXTENDS || peek() == Token::LBRACE)) {
+ impl()->GetDefaultStrings(&name, &variable_name);
+ } else {
+ name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved,
+ CHECK_OK_CUSTOM(NullStatement));
+ variable_name = name;
+ }
+
+ ExpressionClassifier no_classifier(this);
+ ExpressionT value =
+ ParseClassLiteral(name, scanner()->location(), is_strict_reserved,
+ class_token_pos, CHECK_OK_CUSTOM(NullStatement));
+ int end_pos = position();
+ return impl()->DeclareClass(variable_name, value, names, class_token_pos,
+ end_pos, ok);
+}
+
+// Language extension which is only enabled for source files loaded
+// through the API's extension mechanism. A native function
+// declaration is resolved by looking up the function through a
+// callback provided by the extension.
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseNativeDeclaration(
+ bool* ok) {
+ int pos = peek_position();
+ Expect(Token::FUNCTION, CHECK_OK_CUSTOM(NullStatement));
+ // Allow "eval" or "arguments" for backward compatibility.
+ IdentifierT name = ParseIdentifier(kAllowRestrictedIdentifiers,
+ CHECK_OK_CUSTOM(NullStatement));
+ Expect(Token::LPAREN, CHECK_OK_CUSTOM(NullStatement));
+ if (peek() != Token::RPAREN) {
+ do {
+ ParseIdentifier(kAllowRestrictedIdentifiers,
+ CHECK_OK_CUSTOM(NullStatement));
+ } while (Check(Token::COMMA));
+ }
+ Expect(Token::RPAREN, CHECK_OK_CUSTOM(NullStatement));
+ Expect(Token::SEMICOLON, CHECK_OK_CUSTOM(NullStatement));
+ return impl()->DeclareNative(name, pos, ok);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT
+ParserBase<Impl>::ParseAsyncFunctionDeclaration(
+ ZoneList<const AstRawString*>* names, bool default_export, bool* ok) {
+ // AsyncFunctionDeclaration ::
+ // async [no LineTerminator here] function BindingIdentifier[Await]
+ // ( FormalParameters[Await] ) { AsyncFunctionBody }
+ DCHECK_EQ(scanner()->current_token(), Token::ASYNC);
+ int pos = position();
+ if (scanner()->HasAnyLineTerminatorBeforeNext()) {
+ *ok = false;
+ impl()->ReportUnexpectedToken(scanner()->current_token());
+ return impl()->NullStatement();
}
+ Expect(Token::FUNCTION, CHECK_OK_CUSTOM(NullStatement));
+ ParseFunctionFlags flags = ParseFunctionFlags::kIsAsync;
+ return ParseHoistableDeclaration(pos, flags, names, default_export, ok);
}
template <typename Impl>
@@ -3348,19 +3837,22 @@ void ParserBase<Impl>::CheckArityRestrictions(int param_count,
int formals_end_pos, bool* ok) {
if (IsGetterFunction(function_kind)) {
if (param_count != 0) {
- ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
- MessageTemplate::kBadGetterArity);
+ impl()->ReportMessageAt(
+ Scanner::Location(formals_start_pos, formals_end_pos),
+ MessageTemplate::kBadGetterArity);
*ok = false;
}
} else if (IsSetterFunction(function_kind)) {
if (param_count != 1) {
- ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
- MessageTemplate::kBadSetterArity);
+ impl()->ReportMessageAt(
+ Scanner::Location(formals_start_pos, formals_end_pos),
+ MessageTemplate::kBadSetterArity);
*ok = false;
}
if (has_rest) {
- ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
- MessageTemplate::kBadSetterRestParameter);
+ impl()->ReportMessageAt(
+ Scanner::Location(formals_start_pos, formals_end_pos),
+ MessageTemplate::kBadSetterRestParameter);
*ok = false;
}
}
@@ -3412,31 +3904,33 @@ bool ParserBase<Impl>::IsTrivialExpression() {
template <typename Impl>
typename ParserBase<Impl>::ExpressionT
ParserBase<Impl>::ParseArrowFunctionLiteral(
- bool accept_IN, const FormalParametersT& formal_parameters, bool is_async,
- const ExpressionClassifier& formals_classifier, bool* ok) {
+ bool accept_IN, const FormalParametersT& formal_parameters, bool* ok) {
if (peek() == Token::ARROW && scanner_->HasAnyLineTerminatorBeforeNext()) {
// ASI inserts `;` after arrow parameters if a line terminator is found.
// `=> ...` is never a valid expression, so report as syntax error.
// If next token is not `=>`, it's a syntax error anyways.
ReportUnexpectedTokenAt(scanner_->peek_location(), Token::ARROW);
*ok = false;
- return this->EmptyExpression();
+ return impl()->EmptyExpression();
}
- typename Traits::Type::StatementList body;
+ StatementListT body = impl()->NullStatementList();
int num_parameters = formal_parameters.scope->num_parameters();
int materialized_literal_count = -1;
int expected_property_count = -1;
- FunctionKind arrow_kind = is_async ? kAsyncArrowFunction : kArrowFunction;
+ FunctionKind kind = formal_parameters.scope->function_kind();
+ FunctionLiteral::EagerCompileHint eager_compile_hint =
+ FunctionLiteral::kShouldLazyCompile;
+ bool should_be_used_once_hint = false;
{
FunctionState function_state(&function_state_, &scope_state_,
- formal_parameters.scope, arrow_kind);
+ formal_parameters.scope);
function_state.SkipMaterializedLiterals(
formal_parameters.materialized_literals_count);
- this->ReindexLiterals(formal_parameters);
+ impl()->ReindexLiterals(formal_parameters);
Expect(Token::ARROW, CHECK_OK);
@@ -3444,20 +3938,42 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
// Multiple statement body
Consume(Token::LBRACE);
DCHECK_EQ(scope(), formal_parameters.scope);
- bool is_lazily_parsed = (mode() == PARSE_LAZILY &&
- formal_parameters.scope->AllowsLazyParsing());
+ bool is_lazily_parsed =
+ (mode() == PARSE_LAZILY &&
+ formal_parameters.scope
+ ->AllowsLazyParsingWithoutUnresolvedVariables());
+ // TODO(marja): consider lazy-parsing inner arrow functions too. is_this
+ // handling in Scope::ResolveVariable needs to change.
if (is_lazily_parsed) {
- body = this->NewStatementList(0, zone());
- impl()->SkipLazyFunctionBody(&materialized_literal_count,
- &expected_property_count, CHECK_OK);
+ Scanner::BookmarkScope bookmark(scanner());
+ bookmark.Set();
+ LazyParsingResult result = impl()->SkipLazyFunctionBody(
+ &materialized_literal_count, &expected_property_count, false, true,
+ CHECK_OK);
+ formal_parameters.scope->ResetAfterPreparsing(
+ ast_value_factory_, result == kLazyParsingAborted);
+
if (formal_parameters.materialized_literals_count > 0) {
materialized_literal_count +=
formal_parameters.materialized_literals_count;
}
- } else {
+
+ if (result == kLazyParsingAborted) {
+ bookmark.Apply();
+ // Trigger eager (re-)parsing, just below this block.
+ is_lazily_parsed = false;
+
+ // This is probably an initialization function. Inform the compiler it
+ // should also eager-compile this function, and that we expect it to
+ // be used once.
+ eager_compile_hint = FunctionLiteral::kShouldEagerCompile;
+ should_be_used_once_hint = true;
+ }
+ }
+ if (!is_lazily_parsed) {
body = impl()->ParseEagerFunctionBody(
- this->EmptyIdentifier(), kNoSourcePosition, formal_parameters,
- arrow_kind, FunctionLiteral::kAnonymousExpression, CHECK_OK);
+ impl()->EmptyIdentifier(), kNoSourcePosition, formal_parameters,
+ kind, FunctionLiteral::kAnonymousExpression, CHECK_OK);
materialized_literal_count =
function_state.materialized_literal_count();
expected_property_count = function_state.expected_property_count();
@@ -3469,18 +3985,18 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
function_state_->return_expr_context());
ReturnExprScope allow_tail_calls(
function_state_, ReturnExprContext::kInsideValidReturnStatement);
- body = this->NewStatementList(1, zone());
- this->AddParameterInitializationBlock(formal_parameters, body, is_async,
- CHECK_OK);
+ body = impl()->NewStatementList(1);
+ impl()->AddParameterInitializationBlock(
+ formal_parameters, body, kind == kAsyncArrowFunction, CHECK_OK);
ExpressionClassifier classifier(this);
- if (is_async) {
- impl()->ParseAsyncArrowSingleExpressionBody(body, accept_IN,
- &classifier, pos, CHECK_OK);
- impl()->RewriteNonPattern(&classifier, CHECK_OK);
+ if (kind == kAsyncArrowFunction) {
+ ParseAsyncFunctionBody(scope(), body, kAsyncArrowFunction,
+ FunctionBodyType::kSingleExpression, accept_IN,
+ pos, CHECK_OK);
+ impl()->RewriteNonPattern(CHECK_OK);
} else {
- ExpressionT expression =
- ParseAssignmentExpression(accept_IN, &classifier, CHECK_OK);
- impl()->RewriteNonPattern(&classifier, CHECK_OK);
+ ExpressionT expression = ParseAssignmentExpression(accept_IN, CHECK_OK);
+ impl()->RewriteNonPattern(CHECK_OK);
body->Add(factory()->NewReturnStatement(expression, pos), zone());
if (allow_tailcalls() && !is_sloppy(language_mode())) {
// ES6 14.6.1 Static Semantics: IsInTailPosition
@@ -3499,8 +4015,8 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
// that duplicates are not allowed. Of course, the arrow function may
// itself be strict as well.
const bool allow_duplicate_parameters = false;
- this->ValidateFormalParameters(&formals_classifier, language_mode(),
- allow_duplicate_parameters, CHECK_OK);
+ ValidateFormalParameters(language_mode(), allow_duplicate_parameters,
+ CHECK_OK);
// Validate strict mode.
if (is_strict(language_mode())) {
@@ -3513,24 +4029,141 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
}
FunctionLiteralT function_literal = factory()->NewFunctionLiteral(
- this->EmptyIdentifierString(), formal_parameters.scope, body,
+ impl()->EmptyIdentifierString(), formal_parameters.scope, body,
materialized_literal_count, expected_property_count, num_parameters,
FunctionLiteral::kNoDuplicateParameters,
- FunctionLiteral::kAnonymousExpression,
- FunctionLiteral::kShouldLazyCompile, arrow_kind,
+ FunctionLiteral::kAnonymousExpression, eager_compile_hint,
formal_parameters.scope->start_position());
function_literal->set_function_token_position(
formal_parameters.scope->start_position());
+ if (should_be_used_once_hint) {
+ function_literal->set_should_be_used_once_hint();
+ }
- if (fni_ != NULL) this->InferFunctionName(fni_, function_literal);
+ impl()->AddFunctionForNameInference(function_literal);
return function_literal;
}
template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
+ IdentifierT name, Scanner::Location class_name_location,
+ bool name_is_strict_reserved, int class_token_pos, bool* ok) {
+ // All parts of a ClassDeclaration and ClassExpression are strict code.
+ if (name_is_strict_reserved) {
+ impl()->ReportMessageAt(class_name_location,
+ MessageTemplate::kUnexpectedStrictReserved);
+ *ok = false;
+ return impl()->EmptyExpression();
+ }
+ if (impl()->IsEvalOrArguments(name)) {
+ impl()->ReportMessageAt(class_name_location,
+ MessageTemplate::kStrictEvalArguments);
+ *ok = false;
+ return impl()->EmptyExpression();
+ }
+
+ BlockState block_state(zone(), &scope_state_);
+ RaiseLanguageMode(STRICT);
+
+ ClassInfo class_info(this);
+ impl()->DeclareClassVariable(name, block_state.scope(), &class_info,
+ class_token_pos, CHECK_OK);
+
+ if (Check(Token::EXTENDS)) {
+ block_state.set_start_position(scanner()->location().end_pos);
+ ExpressionClassifier extends_classifier(this);
+ class_info.extends = ParseLeftHandSideExpression(CHECK_OK);
+ impl()->RewriteNonPattern(CHECK_OK);
+ impl()->AccumulateFormalParameterContainmentErrors();
+ } else {
+ block_state.set_start_position(scanner()->location().end_pos);
+ }
+
+ ClassLiteralChecker checker(this);
+
+ Expect(Token::LBRACE, CHECK_OK);
+
+ const bool has_extends = !impl()->IsEmptyExpression(class_info.extends);
+ while (peek() != Token::RBRACE) {
+ if (Check(Token::SEMICOLON)) continue;
+ FuncNameInferrer::State fni_state(fni_);
+ bool is_computed_name = false; // Classes do not care about computed
+ // property names here.
+ ExpressionClassifier property_classifier(this);
+ ClassLiteralPropertyT property = ParseClassPropertyDefinition(
+ &checker, has_extends, &is_computed_name,
+ &class_info.has_seen_constructor, CHECK_OK);
+ impl()->RewriteNonPattern(CHECK_OK);
+ impl()->AccumulateFormalParameterContainmentErrors();
+
+ impl()->DeclareClassProperty(name, property, &class_info, CHECK_OK);
+ impl()->InferFunctionName();
+ }
+
+ Expect(Token::RBRACE, CHECK_OK);
+ return impl()->RewriteClassLiteral(name, &class_info, class_token_pos, ok);
+}
+
+template <typename Impl>
+void ParserBase<Impl>::ParseAsyncFunctionBody(Scope* scope, StatementListT body,
+ FunctionKind kind,
+ FunctionBodyType body_type,
+ bool accept_IN, int pos,
+ bool* ok) {
+ scope->ForceContextAllocation();
+
+ impl()->PrepareAsyncFunctionBody(body, kind, pos);
+
+ BlockT block = factory()->NewBlock(nullptr, 8, true, kNoSourcePosition);
+
+ ExpressionT return_value = impl()->EmptyExpression();
+ if (body_type == FunctionBodyType::kNormal) {
+ ParseStatementList(block->statements(), Token::RBRACE,
+ CHECK_OK_CUSTOM(Void));
+ return_value = factory()->NewUndefinedLiteral(kNoSourcePosition);
+ } else {
+ return_value = ParseAssignmentExpression(accept_IN, CHECK_OK_CUSTOM(Void));
+ impl()->RewriteNonPattern(CHECK_OK_CUSTOM(Void));
+ }
+
+ impl()->RewriteAsyncFunctionBody(body, block, return_value,
+ CHECK_OK_CUSTOM(Void));
+ scope->set_end_position(scanner()->location().end_pos);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseAsyncFunctionLiteral(bool* ok) {
+ // AsyncFunctionLiteral ::
+ // async [no LineTerminator here] function ( FormalParameters[Await] )
+ // { AsyncFunctionBody }
+ //
+ // async [no LineTerminator here] function BindingIdentifier[Await]
+ // ( FormalParameters[Await] ) { AsyncFunctionBody }
+ DCHECK_EQ(scanner()->current_token(), Token::ASYNC);
+ int pos = position();
+ Expect(Token::FUNCTION, CHECK_OK);
+ bool is_strict_reserved = false;
+ IdentifierT name = impl()->EmptyIdentifier();
+ FunctionLiteral::FunctionType type = FunctionLiteral::kAnonymousExpression;
+
+ if (peek_any_identifier()) {
+ type = FunctionLiteral::kNamedExpression;
+ name = ParseIdentifierOrStrictReservedWord(FunctionKind::kAsyncFunction,
+ &is_strict_reserved, CHECK_OK);
+ }
+ return impl()->ParseFunctionLiteral(
+ name, scanner()->location(),
+ is_strict_reserved ? kFunctionNameIsStrictReserved
+ : kFunctionNameValidityUnknown,
+ FunctionKind::kAsyncFunction, pos, type, language_mode(), CHECK_OK);
+}
+
+template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseTemplateLiteral(
- ExpressionT tag, int start, ExpressionClassifier* classifier, bool* ok) {
+ ExpressionT tag, int start, bool* ok) {
// A TemplateLiteral is made up of 0 or more TEMPLATE_SPAN tokens (literal
// text followed by a substitution expression), finalized by a single
// TEMPLATE_TAIL.
@@ -3569,29 +4202,28 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseTemplateLiteral(
CheckTemplateOctalLiteral(pos, peek_position(), CHECK_OK);
next = peek();
if (next == Token::EOS) {
- ReportMessageAt(Scanner::Location(start, peek_position()),
- MessageTemplate::kUnterminatedTemplate);
+ impl()->ReportMessageAt(Scanner::Location(start, peek_position()),
+ MessageTemplate::kUnterminatedTemplate);
*ok = false;
- return Traits::EmptyExpression();
+ return impl()->EmptyExpression();
} else if (next == Token::ILLEGAL) {
- Traits::ReportMessageAt(
+ impl()->ReportMessageAt(
Scanner::Location(position() + 1, peek_position()),
MessageTemplate::kUnexpectedToken, "ILLEGAL", kSyntaxError);
*ok = false;
- return Traits::EmptyExpression();
+ return impl()->EmptyExpression();
}
int expr_pos = peek_position();
- ExpressionT expression = this->ParseExpression(true, classifier, CHECK_OK);
- CheckNoTailCallExpressions(classifier, CHECK_OK);
- impl()->RewriteNonPattern(classifier, CHECK_OK);
+ ExpressionT expression = ParseExpressionCoverGrammar(true, CHECK_OK);
+ impl()->RewriteNonPattern(CHECK_OK);
impl()->AddTemplateExpression(&ts, expression);
if (peek() != Token::RBRACE) {
- ReportMessageAt(Scanner::Location(expr_pos, peek_position()),
- MessageTemplate::kUnterminatedTemplateExpr);
+ impl()->ReportMessageAt(Scanner::Location(expr_pos, peek_position()),
+ MessageTemplate::kUnterminatedTemplateExpr);
*ok = false;
- return Traits::EmptyExpression();
+ return impl()->EmptyExpression();
}
// If we didn't die parsing that expression, our next token should be a
@@ -3601,16 +4233,16 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseTemplateLiteral(
pos = position();
if (next == Token::EOS) {
- ReportMessageAt(Scanner::Location(start, pos),
- MessageTemplate::kUnterminatedTemplate);
+ impl()->ReportMessageAt(Scanner::Location(start, pos),
+ MessageTemplate::kUnterminatedTemplate);
*ok = false;
- return Traits::EmptyExpression();
+ return impl()->EmptyExpression();
} else if (next == Token::ILLEGAL) {
- Traits::ReportMessageAt(
+ impl()->ReportMessageAt(
Scanner::Location(position() + 1, peek_position()),
MessageTemplate::kUnexpectedToken, "ILLEGAL", kSyntaxError);
*ok = false;
- return Traits::EmptyExpression();
+ return impl()->EmptyExpression();
}
impl()->AddTemplateSpan(&ts, next == Token::TEMPLATE_TAIL);
@@ -3627,8 +4259,8 @@ typename ParserBase<Impl>::ExpressionT
ParserBase<Impl>::CheckAndRewriteReferenceExpression(
ExpressionT expression, int beg_pos, int end_pos,
MessageTemplate::Template message, bool* ok) {
- return this->CheckAndRewriteReferenceExpression(expression, beg_pos, end_pos,
- message, kReferenceError, ok);
+ return CheckAndRewriteReferenceExpression(expression, beg_pos, end_pos,
+ message, kReferenceError, ok);
}
template <typename Impl>
@@ -3636,12 +4268,12 @@ typename ParserBase<Impl>::ExpressionT
ParserBase<Impl>::CheckAndRewriteReferenceExpression(
ExpressionT expression, int beg_pos, int end_pos,
MessageTemplate::Template message, ParseErrorType type, bool* ok) {
- if (this->IsIdentifier(expression) && is_strict(language_mode()) &&
- this->IsEvalOrArguments(this->AsIdentifier(expression))) {
+ if (impl()->IsIdentifier(expression) && is_strict(language_mode()) &&
+ impl()->IsEvalOrArguments(impl()->AsIdentifier(expression))) {
ReportMessageAt(Scanner::Location(beg_pos, end_pos),
MessageTemplate::kStrictEvalArguments, kSyntaxError);
*ok = false;
- return this->EmptyExpression();
+ return impl()->EmptyExpression();
}
if (expression->IsValidReferenceExpression()) {
return expression;
@@ -3649,47 +4281,1140 @@ ParserBase<Impl>::CheckAndRewriteReferenceExpression(
if (expression->IsCall()) {
// If it is a call, make it a runtime error for legacy web compatibility.
// Rewrite `expr' to `expr[throw ReferenceError]'.
- ExpressionT error = this->NewThrowReferenceError(message, beg_pos);
+ ExpressionT error = impl()->NewThrowReferenceError(message, beg_pos);
return factory()->NewProperty(expression, error, beg_pos);
}
ReportMessageAt(Scanner::Location(beg_pos, end_pos), message, type);
*ok = false;
- return this->EmptyExpression();
+ return impl()->EmptyExpression();
}
template <typename Impl>
bool ParserBase<Impl>::IsValidReferenceExpression(ExpressionT expression) {
- return this->IsAssignableIdentifier(expression) || expression->IsProperty();
+ return IsAssignableIdentifier(expression) || expression->IsProperty();
}
template <typename Impl>
-void ParserBase<Impl>::CheckDestructuringElement(
- ExpressionT expression, ExpressionClassifier* classifier, int begin,
- int end) {
+void ParserBase<Impl>::CheckDestructuringElement(ExpressionT expression,
+ int begin, int end) {
if (!IsValidPattern(expression) && !expression->IsAssignment() &&
!IsValidReferenceExpression(expression)) {
- classifier->RecordAssignmentPatternError(
+ classifier()->RecordAssignmentPatternError(
Scanner::Location(begin, end),
MessageTemplate::kInvalidDestructuringTarget);
}
}
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseV8Intrinsic(
+ bool* ok) {
+ // CallRuntime ::
+ // '%' Identifier Arguments
+
+ int pos = peek_position();
+ Expect(Token::MOD, CHECK_OK);
+ // Allow "eval" or "arguments" for backward compatibility.
+ IdentifierT name = ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
+ Scanner::Location spread_pos;
+ ExpressionClassifier classifier(this);
+ ExpressionListT args = ParseArguments(&spread_pos, CHECK_OK);
+
+ DCHECK(!spread_pos.IsValid());
+
+ return impl()->NewV8Intrinsic(name, args, pos, ok);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseDoExpression(
+ bool* ok) {
+ // AssignmentExpression ::
+ // do '{' StatementList '}'
+
+ int pos = peek_position();
+ Expect(Token::DO, CHECK_OK);
+ BlockT block = ParseBlock(nullptr, CHECK_OK);
+ return impl()->RewriteDoExpression(block, pos, ok);
+}
+// Redefinition of CHECK_OK for parsing statements.
#undef CHECK_OK
-#undef CHECK_OK_CUSTOM
+#define CHECK_OK CHECK_OK_CUSTOM(NullStatement)
+
+template <typename Impl>
+typename ParserBase<Impl>::LazyParsingResult
+ParserBase<Impl>::ParseStatementList(StatementListT body, int end_token,
+ bool may_abort, bool* ok) {
+ // StatementList ::
+ // (StatementListItem)* <end_token>
+
+ // Allocate a target stack to use for this set of source
+ // elements. This way, all scripts and functions get their own
+ // target stack thus avoiding illegal breaks and continues across
+ // functions.
+ typename Types::TargetScope target_scope(this);
+ int count_statements = 0;
+
+ DCHECK(!impl()->IsNullStatementList(body));
+ bool directive_prologue = true; // Parsing directive prologue.
+
+ while (peek() != end_token) {
+ if (directive_prologue && peek() != Token::STRING) {
+ directive_prologue = false;
+ }
+
+ bool starts_with_identifier = peek() == Token::IDENTIFIER;
+ Scanner::Location token_loc = scanner()->peek_location();
+ StatementT stat =
+ ParseStatementListItem(CHECK_OK_CUSTOM(Return, kLazyParsingComplete));
+
+ if (impl()->IsNullStatement(stat) || impl()->IsEmptyStatement(stat)) {
+ directive_prologue = false; // End of directive prologue.
+ continue;
+ }
+
+ if (directive_prologue) {
+ // The length of the token is used to distinguish between strings literals
+ // that evaluate equal to directives but contain either escape sequences
+ // (e.g., "use \x73trict") or line continuations (e.g., "use \(newline)
+ // strict").
+ if (impl()->IsUseStrictDirective(stat) &&
+ token_loc.end_pos - token_loc.beg_pos == sizeof("use strict") + 1) {
+ // Directive "use strict" (ES5 14.1).
+ RaiseLanguageMode(STRICT);
+ if (!scope()->HasSimpleParameters()) {
+ // TC39 deemed "use strict" directives to be an error when occurring
+ // in the body of a function with non-simple parameter list, on
+ // 29/7/2015. https://goo.gl/ueA7Ln
+ impl()->ReportMessageAt(
+ token_loc, MessageTemplate::kIllegalLanguageModeDirective,
+ "use strict");
+ *ok = false;
+ return kLazyParsingComplete;
+ }
+ // Because declarations in strict eval code don't leak into the scope
+ // of the eval call, it is likely that functions declared in strict
+ // eval code will be used within the eval code, so lazy parsing is
+ // probably not a win.
+ if (scope()->is_eval_scope()) mode_ = PARSE_EAGERLY;
+ } else if (impl()->IsUseAsmDirective(stat) &&
+ token_loc.end_pos - token_loc.beg_pos ==
+ sizeof("use asm") + 1) {
+ // Directive "use asm".
+ impl()->SetAsmModule();
+ } else if (impl()->IsStringLiteral(stat)) {
+ // Possibly an unknown directive.
+ // Should not change mode, but will increment usage counters
+ // as appropriate. Ditto usages below.
+ RaiseLanguageMode(SLOPPY);
+ } else {
+ // End of the directive prologue.
+ directive_prologue = false;
+ RaiseLanguageMode(SLOPPY);
+ }
+ } else {
+ RaiseLanguageMode(SLOPPY);
+ }
+
+ // If we're allowed to abort, we will do so when we see a "long and
+ // trivial" function. Our current definition of "long and trivial" is:
+ // - over kLazyParseTrialLimit statements
+ // - all starting with an identifier (i.e., no if, for, while, etc.)
+ if (may_abort) {
+ if (!starts_with_identifier) {
+ may_abort = false;
+ } else if (++count_statements > kLazyParseTrialLimit) {
+ return kLazyParsingAborted;
+ }
+ }
+
+ body->Add(stat, zone());
+ }
+ return kLazyParsingComplete;
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatementListItem(
+ bool* ok) {
+ // ECMA 262 6th Edition
+ // StatementListItem[Yield, Return] :
+ // Statement[?Yield, ?Return]
+ // Declaration[?Yield]
+ //
+ // Declaration[Yield] :
+ // HoistableDeclaration[?Yield]
+ // ClassDeclaration[?Yield]
+ // LexicalDeclaration[In, ?Yield]
+ //
+ // HoistableDeclaration[Yield, Default] :
+ // FunctionDeclaration[?Yield, ?Default]
+ // GeneratorDeclaration[?Yield, ?Default]
+ //
+ // LexicalDeclaration[In, Yield] :
+ // LetOrConst BindingList[?In, ?Yield] ;
+
+ switch (peek()) {
+ case Token::FUNCTION:
+ return ParseHoistableDeclaration(nullptr, false, ok);
+ case Token::CLASS:
+ Consume(Token::CLASS);
+ return ParseClassDeclaration(nullptr, false, ok);
+ case Token::VAR:
+ case Token::CONST:
+ return ParseVariableStatement(kStatementListItem, nullptr, ok);
+ case Token::LET:
+ if (IsNextLetKeyword()) {
+ return ParseVariableStatement(kStatementListItem, nullptr, ok);
+ }
+ break;
+ case Token::ASYNC:
+ if (allow_harmony_async_await() && PeekAhead() == Token::FUNCTION &&
+ !scanner()->HasAnyLineTerminatorAfterNext()) {
+ Consume(Token::ASYNC);
+ return ParseAsyncFunctionDeclaration(nullptr, false, ok);
+ }
+ /* falls through */
+ default:
+ break;
+ }
+ return ParseStatement(nullptr, kAllowLabelledFunctionStatement, ok);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatement(
+ ZoneList<const AstRawString*>* labels,
+ AllowLabelledFunctionStatement allow_function, bool* ok) {
+ // Statement ::
+ // Block
+ // VariableStatement
+ // EmptyStatement
+ // ExpressionStatement
+ // IfStatement
+ // IterationStatement
+ // ContinueStatement
+ // BreakStatement
+ // ReturnStatement
+ // WithStatement
+ // LabelledStatement
+ // SwitchStatement
+ // ThrowStatement
+ // TryStatement
+ // DebuggerStatement
+
+ // Note: Since labels can only be used by 'break' and 'continue'
+ // statements, which themselves are only valid within blocks,
+ // iterations or 'switch' statements (i.e., BreakableStatements),
+ // labels can be simply ignored in all other cases; except for
+ // trivial labeled break statements 'label: break label' which is
+ // parsed into an empty statement.
+ switch (peek()) {
+ case Token::LBRACE:
+ return ParseBlock(labels, ok);
+ case Token::SEMICOLON:
+ Next();
+ return factory()->NewEmptyStatement(kNoSourcePosition);
+ case Token::IF:
+ return ParseIfStatement(labels, ok);
+ case Token::DO:
+ return ParseDoWhileStatement(labels, ok);
+ case Token::WHILE:
+ return ParseWhileStatement(labels, ok);
+ case Token::FOR:
+ return ParseForStatement(labels, ok);
+ case Token::CONTINUE:
+ case Token::BREAK:
+ case Token::RETURN:
+ case Token::THROW:
+ case Token::TRY: {
+ // These statements must have their labels preserved in an enclosing
+ // block, as the corresponding AST nodes do not currently store their
+ // labels.
+ // TODO(nikolaos, marja): Consider adding the labels to the AST nodes.
+ if (labels == nullptr) {
+ return ParseStatementAsUnlabelled(labels, ok);
+ } else {
+ BlockT result =
+ factory()->NewBlock(labels, 1, false, kNoSourcePosition);
+ typename Types::Target target(this, result);
+ StatementT statement = ParseStatementAsUnlabelled(labels, CHECK_OK);
+ result->statements()->Add(statement, zone());
+ return result;
+ }
+ }
+ case Token::WITH:
+ return ParseWithStatement(labels, ok);
+ case Token::SWITCH:
+ return ParseSwitchStatement(labels, ok);
+ case Token::FUNCTION:
+ // FunctionDeclaration only allowed as a StatementListItem, not in
+ // an arbitrary Statement position. Exceptions such as
+ // ES#sec-functiondeclarations-in-ifstatement-statement-clauses
+ // are handled by calling ParseScopedStatement rather than
+ // ParseStatement directly.
+ impl()->ReportMessageAt(scanner()->peek_location(),
+ is_strict(language_mode())
+ ? MessageTemplate::kStrictFunction
+ : MessageTemplate::kSloppyFunction);
+ *ok = false;
+ return impl()->NullStatement();
+ case Token::DEBUGGER:
+ return ParseDebuggerStatement(ok);
+ case Token::VAR:
+ return ParseVariableStatement(kStatement, nullptr, ok);
+ default:
+ return ParseExpressionOrLabelledStatement(labels, allow_function, ok);
+ }
+}
+
+// This method parses a subset of statements (break, continue, return, throw,
+// try) which are to be grouped because they all require their labeles to be
+// preserved in an enclosing block.
+template <typename Impl>
+typename ParserBase<Impl>::StatementT
+ParserBase<Impl>::ParseStatementAsUnlabelled(
+ ZoneList<const AstRawString*>* labels, bool* ok) {
+ switch (peek()) {
+ case Token::CONTINUE:
+ return ParseContinueStatement(ok);
+ case Token::BREAK:
+ return ParseBreakStatement(labels, ok);
+ case Token::RETURN:
+ return ParseReturnStatement(ok);
+ case Token::THROW:
+ return ParseThrowStatement(ok);
+ case Token::TRY:
+ return ParseTryStatement(ok);
+ default:
+ UNREACHABLE();
+ return impl()->NullStatement();
+ }
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseBlock(
+ ZoneList<const AstRawString*>* labels, bool* ok) {
+ // Block ::
+ // '{' StatementList '}'
+
+ // Construct block expecting 16 statements.
+ BlockT body = factory()->NewBlock(labels, 16, false, kNoSourcePosition);
+
+ // Parse the statements and collect escaping labels.
+ Expect(Token::LBRACE, CHECK_OK_CUSTOM(NullBlock));
+ {
+ BlockState block_state(zone(), &scope_state_);
+ block_state.set_start_position(scanner()->location().beg_pos);
+ typename Types::Target target(this, body);
+
+ while (peek() != Token::RBRACE) {
+ StatementT stat = ParseStatementListItem(CHECK_OK_CUSTOM(NullBlock));
+ if (!impl()->IsNullStatement(stat) && !impl()->IsEmptyStatement(stat)) {
+ body->statements()->Add(stat, zone());
+ }
+ }
+
+ Expect(Token::RBRACE, CHECK_OK_CUSTOM(NullBlock));
+ block_state.set_end_position(scanner()->location().end_pos);
+ body->set_scope(block_state.FinalizedBlockScope());
+ }
+ return body;
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseScopedStatement(
+ ZoneList<const AstRawString*>* labels, bool legacy, bool* ok) {
+ if (is_strict(language_mode()) || peek() != Token::FUNCTION ||
+ (legacy && allow_harmony_restrictive_declarations())) {
+ return ParseStatement(labels, kDisallowLabelledFunctionStatement, ok);
+ } else {
+ if (legacy) {
+ impl()->CountUsage(v8::Isolate::kLegacyFunctionDeclaration);
+ }
+ // Make a block around the statement for a lexical binding
+ // is introduced by a FunctionDeclaration.
+ BlockState block_state(zone(), &scope_state_);
+ block_state.set_start_position(scanner()->location().beg_pos);
+ BlockT block = factory()->NewBlock(NULL, 1, false, kNoSourcePosition);
+ StatementT body = ParseFunctionDeclaration(CHECK_OK);
+ block->statements()->Add(body, zone());
+ block_state.set_end_position(scanner()->location().end_pos);
+ block->set_scope(block_state.FinalizedBlockScope());
+ return block;
+ }
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseVariableStatement(
+ VariableDeclarationContext var_context,
+ ZoneList<const AstRawString*>* names, bool* ok) {
+ // VariableStatement ::
+ // VariableDeclarations ';'
+
+ // The scope of a var declared variable anywhere inside a function
+ // is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). Thus we can
+ // transform a source-level var declaration into a (Function) Scope
+ // declaration, and rewrite the source-level initialization into an assignment
+ // statement. We use a block to collect multiple assignments.
+ //
+ // We mark the block as initializer block because we don't want the
+ // rewriter to add a '.result' assignment to such a block (to get compliant
+ // behavior for code such as print(eval('var x = 7')), and for cosmetic
+ // reasons when pretty-printing. Also, unless an assignment (initialization)
+ // is inside an initializer block, it is ignored.
+
+ DeclarationParsingResult parsing_result;
+ StatementT result =
+ ParseVariableDeclarations(var_context, &parsing_result, names, CHECK_OK);
+ ExpectSemicolon(CHECK_OK);
+ return result;
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseDebuggerStatement(
+ bool* ok) {
+ // In ECMA-262 'debugger' is defined as a reserved keyword. In some browser
+ // contexts this is used as a statement which invokes the debugger as i a
+ // break point is present.
+ // DebuggerStatement ::
+ // 'debugger' ';'
+
+ int pos = peek_position();
+ Expect(Token::DEBUGGER, CHECK_OK);
+ ExpectSemicolon(CHECK_OK);
+ return factory()->NewDebuggerStatement(pos);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT
+ParserBase<Impl>::ParseExpressionOrLabelledStatement(
+ ZoneList<const AstRawString*>* labels,
+ AllowLabelledFunctionStatement allow_function, bool* ok) {
+ // ExpressionStatement | LabelledStatement ::
+ // Expression ';'
+ // Identifier ':' Statement
+ //
+ // ExpressionStatement[Yield] :
+ // [lookahead ∉ {{, function, class, let [}] Expression[In, ?Yield] ;
+
+ int pos = peek_position();
+
+ switch (peek()) {
+ case Token::FUNCTION:
+ case Token::LBRACE:
+ UNREACHABLE(); // Always handled by the callers.
+ case Token::CLASS:
+ ReportUnexpectedToken(Next());
+ *ok = false;
+ return impl()->NullStatement();
+ default:
+ break;
+ }
+
+ bool starts_with_identifier = peek_any_identifier();
+ ExpressionT expr = ParseExpression(true, CHECK_OK);
+ if (peek() == Token::COLON && starts_with_identifier &&
+ impl()->IsIdentifier(expr)) {
+ // The whole expression was a single identifier, and not, e.g.,
+ // something starting with an identifier or a parenthesized identifier.
+ labels = impl()->DeclareLabel(labels, impl()->AsIdentifierExpression(expr),
+ CHECK_OK);
+ Consume(Token::COLON);
+ // ES#sec-labelled-function-declarations Labelled Function Declarations
+ if (peek() == Token::FUNCTION && is_sloppy(language_mode())) {
+ if (allow_function == kAllowLabelledFunctionStatement) {
+ return ParseFunctionDeclaration(ok);
+ } else {
+ return ParseScopedStatement(labels, true, ok);
+ }
+ }
+ return ParseStatement(labels, kDisallowLabelledFunctionStatement, ok);
+ }
+
+ // If we have an extension, we allow a native function declaration.
+ // A native function declaration starts with "native function" with
+ // no line-terminator between the two words.
+ if (extension_ != nullptr && peek() == Token::FUNCTION &&
+ !scanner()->HasAnyLineTerminatorBeforeNext() && impl()->IsNative(expr) &&
+ !scanner()->literal_contains_escapes()) {
+ return ParseNativeDeclaration(ok);
+ }
+
+ // Parsed expression statement, followed by semicolon.
+ ExpectSemicolon(CHECK_OK);
+ return factory()->NewExpressionStatement(expr, pos);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseIfStatement(
+ ZoneList<const AstRawString*>* labels, bool* ok) {
+ // IfStatement ::
+ // 'if' '(' Expression ')' Statement ('else' Statement)?
+
+ int pos = peek_position();
+ Expect(Token::IF, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
+ ExpressionT condition = ParseExpression(true, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
+ StatementT then_statement = ParseScopedStatement(labels, false, CHECK_OK);
+ StatementT else_statement = impl()->NullStatement();
+ if (Check(Token::ELSE)) {
+ else_statement = ParseScopedStatement(labels, false, CHECK_OK);
+ } else {
+ else_statement = factory()->NewEmptyStatement(kNoSourcePosition);
+ }
+ return factory()->NewIfStatement(condition, then_statement, else_statement,
+ pos);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseContinueStatement(
+ bool* ok) {
+ // ContinueStatement ::
+ // 'continue' Identifier? ';'
+
+ int pos = peek_position();
+ Expect(Token::CONTINUE, CHECK_OK);
+ IdentifierT label = impl()->EmptyIdentifier();
+ Token::Value tok = peek();
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() && tok != Token::SEMICOLON &&
+ tok != Token::RBRACE && tok != Token::EOS) {
+ // ECMA allows "eval" or "arguments" as labels even in strict mode.
+ label = ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
+ }
+ typename Types::IterationStatement target =
+ impl()->LookupContinueTarget(label, CHECK_OK);
+ if (impl()->IsNullStatement(target)) {
+ // Illegal continue statement.
+ MessageTemplate::Template message = MessageTemplate::kIllegalContinue;
+ if (!impl()->IsEmptyIdentifier(label)) {
+ message = MessageTemplate::kUnknownLabel;
+ }
+ ReportMessage(message, label);
+ *ok = false;
+ return impl()->NullStatement();
+ }
+ ExpectSemicolon(CHECK_OK);
+ return factory()->NewContinueStatement(target, pos);
+}
template <typename Impl>
-void ParserBase<Impl>::ObjectLiteralChecker::CheckProperty(
- Token::Value property, PropertyKind type, MethodKind method_type,
- ExpressionClassifier* classifier, bool* ok) {
- DCHECK(!IsStaticMethod(method_type));
- DCHECK(!IsSpecialMethod(method_type) || type == kMethodProperty);
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseBreakStatement(
+ ZoneList<const AstRawString*>* labels, bool* ok) {
+ // BreakStatement ::
+ // 'break' Identifier? ';'
+ int pos = peek_position();
+ Expect(Token::BREAK, CHECK_OK);
+ IdentifierT label = impl()->EmptyIdentifier();
+ Token::Value tok = peek();
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() && tok != Token::SEMICOLON &&
+ tok != Token::RBRACE && tok != Token::EOS) {
+ // ECMA allows "eval" or "arguments" as labels even in strict mode.
+ label = ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
+ }
+ // Parse labeled break statements that target themselves into
+ // empty statements, e.g. 'l1: l2: l3: break l2;'
+ if (!impl()->IsEmptyIdentifier(label) &&
+ impl()->ContainsLabel(labels, label)) {
+ ExpectSemicolon(CHECK_OK);
+ return factory()->NewEmptyStatement(pos);
+ }
+ typename Types::BreakableStatement target =
+ impl()->LookupBreakTarget(label, CHECK_OK);
+ if (impl()->IsNullStatement(target)) {
+ // Illegal break statement.
+ MessageTemplate::Template message = MessageTemplate::kIllegalBreak;
+ if (!impl()->IsEmptyIdentifier(label)) {
+ message = MessageTemplate::kUnknownLabel;
+ }
+ ReportMessage(message, label);
+ *ok = false;
+ return impl()->NullStatement();
+ }
+ ExpectSemicolon(CHECK_OK);
+ return factory()->NewBreakStatement(target, pos);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseReturnStatement(
+ bool* ok) {
+ // ReturnStatement ::
+ // 'return' [no line terminator] Expression? ';'
+
+ // Consume the return token. It is necessary to do that before
+ // reporting any errors on it, because of the way errors are
+ // reported (underlining).
+ Expect(Token::RETURN, CHECK_OK);
+ Scanner::Location loc = scanner()->location();
+
+ switch (GetDeclarationScope()->scope_type()) {
+ case SCRIPT_SCOPE:
+ case EVAL_SCOPE:
+ case MODULE_SCOPE:
+ impl()->ReportMessageAt(loc, MessageTemplate::kIllegalReturn);
+ *ok = false;
+ return impl()->NullStatement();
+ default:
+ break;
+ }
+
+ Token::Value tok = peek();
+ ExpressionT return_value = impl()->EmptyExpression();
+ if (scanner()->HasAnyLineTerminatorBeforeNext() || tok == Token::SEMICOLON ||
+ tok == Token::RBRACE || tok == Token::EOS) {
+ if (IsSubclassConstructor(function_state_->kind())) {
+ return_value = impl()->ThisExpression(loc.beg_pos);
+ } else {
+ return_value = impl()->GetLiteralUndefined(position());
+ }
+ } else {
+ if (IsSubclassConstructor(function_state_->kind())) {
+ // Because of the return code rewriting that happens in case of a subclass
+ // constructor we don't want to accept tail calls, therefore we don't set
+ // ReturnExprScope to kInsideValidReturnStatement here.
+ return_value = ParseExpression(true, CHECK_OK);
+ } else {
+ ReturnExprScope maybe_allow_tail_calls(
+ function_state_, ReturnExprContext::kInsideValidReturnStatement);
+ return_value = ParseExpression(true, CHECK_OK);
+
+ if (allow_tailcalls() && !is_sloppy(language_mode()) && !is_resumable()) {
+ // ES6 14.6.1 Static Semantics: IsInTailPosition
+ function_state_->AddImplicitTailCallExpression(return_value);
+ }
+ }
+ }
+ ExpectSemicolon(CHECK_OK);
+ return_value = impl()->RewriteReturn(return_value, loc.beg_pos);
+ return factory()->NewReturnStatement(return_value, loc.beg_pos);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseWithStatement(
+ ZoneList<const AstRawString*>* labels, bool* ok) {
+ // WithStatement ::
+ // 'with' '(' Expression ')' Statement
+
+ Expect(Token::WITH, CHECK_OK);
+ int pos = position();
+
+ if (is_strict(language_mode())) {
+ ReportMessage(MessageTemplate::kStrictWith);
+ *ok = false;
+ return impl()->NullStatement();
+ }
+
+ Expect(Token::LPAREN, CHECK_OK);
+ ExpressionT expr = ParseExpression(true, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
+
+ Scope* with_scope = NewScope(WITH_SCOPE);
+ StatementT body = impl()->NullStatement();
+ {
+ BlockState block_state(&scope_state_, with_scope);
+ with_scope->set_start_position(scanner()->peek_location().beg_pos);
+ body = ParseScopedStatement(labels, true, CHECK_OK);
+ with_scope->set_end_position(scanner()->location().end_pos);
+ }
+ return factory()->NewWithStatement(with_scope, expr, body, pos);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseDoWhileStatement(
+ ZoneList<const AstRawString*>* labels, bool* ok) {
+ // DoStatement ::
+ // 'do' Statement 'while' '(' Expression ')' ';'
+
+ auto loop = factory()->NewDoWhileStatement(labels, peek_position());
+ typename Types::Target target(this, loop);
+
+ Expect(Token::DO, CHECK_OK);
+ StatementT body = ParseScopedStatement(nullptr, true, CHECK_OK);
+ Expect(Token::WHILE, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
+
+ ExpressionT cond = ParseExpression(true, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
+
+ // Allow do-statements to be terminated with and without
+ // semi-colons. This allows code such as 'do;while(0)return' to
+ // parse, which would not be the case if we had used the
+ // ExpectSemicolon() functionality here.
+ Check(Token::SEMICOLON);
+
+ loop->Initialize(cond, body);
+ return loop;
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseWhileStatement(
+ ZoneList<const AstRawString*>* labels, bool* ok) {
+ // WhileStatement ::
+ // 'while' '(' Expression ')' Statement
+
+ auto loop = factory()->NewWhileStatement(labels, peek_position());
+ typename Types::Target target(this, loop);
+
+ Expect(Token::WHILE, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
+ ExpressionT cond = ParseExpression(true, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
+ StatementT body = ParseScopedStatement(nullptr, true, CHECK_OK);
+
+ loop->Initialize(cond, body);
+ return loop;
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseThrowStatement(
+ bool* ok) {
+ // ThrowStatement ::
+ // 'throw' Expression ';'
+
+ Expect(Token::THROW, CHECK_OK);
+ int pos = position();
+ if (scanner()->HasAnyLineTerminatorBeforeNext()) {
+ ReportMessage(MessageTemplate::kNewlineAfterThrow);
+ *ok = false;
+ return impl()->NullStatement();
+ }
+ ExpressionT exception = ParseExpression(true, CHECK_OK);
+ ExpectSemicolon(CHECK_OK);
+
+ return impl()->NewThrowStatement(exception, pos);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseSwitchStatement(
+ ZoneList<const AstRawString*>* labels, bool* ok) {
+ // SwitchStatement ::
+ // 'switch' '(' Expression ')' '{' CaseClause* '}'
+ // CaseClause ::
+ // 'case' Expression ':' StatementList
+ // 'default' ':' StatementList
+
+ int switch_pos = peek_position();
+
+ Expect(Token::SWITCH, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
+ ExpressionT tag = ParseExpression(true, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
+
+ auto switch_statement = factory()->NewSwitchStatement(labels, switch_pos);
+
+ {
+ BlockState cases_block_state(zone(), &scope_state_);
+ cases_block_state.set_start_position(scanner()->location().beg_pos);
+ cases_block_state.SetNonlinear();
+ typename Types::Target target(this, switch_statement);
+
+ bool default_seen = false;
+ auto cases = impl()->NewCaseClauseList(4);
+ Expect(Token::LBRACE, CHECK_OK);
+ while (peek() != Token::RBRACE) {
+ // An empty label indicates the default case.
+ ExpressionT label = impl()->EmptyExpression();
+ if (Check(Token::CASE)) {
+ label = ParseExpression(true, CHECK_OK);
+ } else {
+ Expect(Token::DEFAULT, CHECK_OK);
+ if (default_seen) {
+ ReportMessage(MessageTemplate::kMultipleDefaultsInSwitch);
+ *ok = false;
+ return impl()->NullStatement();
+ }
+ default_seen = true;
+ }
+ Expect(Token::COLON, CHECK_OK);
+ int clause_pos = position();
+ StatementListT statements = impl()->NewStatementList(5);
+ while (peek() != Token::CASE && peek() != Token::DEFAULT &&
+ peek() != Token::RBRACE) {
+ StatementT stat = ParseStatementListItem(CHECK_OK);
+ statements->Add(stat, zone());
+ }
+ auto clause = factory()->NewCaseClause(label, statements, clause_pos);
+ cases->Add(clause, zone());
+ }
+ Expect(Token::RBRACE, CHECK_OK);
+
+ cases_block_state.set_end_position(scanner()->location().end_pos);
+ return impl()->RewriteSwitchStatement(
+ tag, switch_statement, cases, cases_block_state.FinalizedBlockScope());
+ }
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement(
+ bool* ok) {
+ // TryStatement ::
+ // 'try' Block Catch
+ // 'try' Block Finally
+ // 'try' Block Catch Finally
+ //
+ // Catch ::
+ // 'catch' '(' Identifier ')' Block
+ //
+ // Finally ::
+ // 'finally' Block
+
+ Expect(Token::TRY, CHECK_OK);
+ int pos = position();
+
+ BlockT try_block = impl()->NullBlock();
+ {
+ ReturnExprScope no_tail_calls(function_state_,
+ ReturnExprContext::kInsideTryBlock);
+ try_block = ParseBlock(nullptr, CHECK_OK);
+ }
+
+ CatchInfo catch_info(this);
+ catch_info.for_promise_reject = allow_natives() && Check(Token::MOD);
+
+ if (peek() != Token::CATCH && peek() != Token::FINALLY) {
+ ReportMessage(MessageTemplate::kNoCatchOrFinally);
+ *ok = false;
+ return impl()->NullStatement();
+ }
+
+ BlockT catch_block = impl()->NullBlock();
+ if (Check(Token::CATCH)) {
+ Expect(Token::LPAREN, CHECK_OK);
+ catch_info.scope = NewScope(CATCH_SCOPE);
+ catch_info.scope->set_start_position(scanner()->location().beg_pos);
+
+ {
+ CollectExpressionsInTailPositionToListScope
+ collect_tail_call_expressions_scope(
+ function_state_, &catch_info.tail_call_expressions);
+ BlockState catch_block_state(&scope_state_, catch_info.scope);
+
+ catch_block = factory()->NewBlock(nullptr, 16, false, kNoSourcePosition);
+
+ // Create a block scope to hold any lexical declarations created
+ // as part of destructuring the catch parameter.
+ {
+ BlockState catch_variable_block_state(zone(), &scope_state_);
+ catch_variable_block_state.set_start_position(
+ scanner()->location().beg_pos);
+ typename Types::Target target(this, catch_block);
+
+ // This does not simply call ParsePrimaryExpression to avoid
+ // ExpressionFromIdentifier from being called in the first
+ // branch, which would introduce an unresolved symbol and mess
+ // with arrow function names.
+ if (peek_any_identifier()) {
+ catch_info.name =
+ ParseIdentifier(kDontAllowRestrictedIdentifiers, CHECK_OK);
+ } else {
+ ExpressionClassifier pattern_classifier(this);
+ catch_info.pattern = ParsePrimaryExpression(CHECK_OK);
+ ValidateBindingPattern(CHECK_OK);
+ }
+
+ Expect(Token::RPAREN, CHECK_OK);
+ impl()->RewriteCatchPattern(&catch_info, CHECK_OK);
+ if (!impl()->IsNullStatement(catch_info.init_block)) {
+ catch_block->statements()->Add(catch_info.init_block, zone());
+ }
+
+ catch_info.inner_block = ParseBlock(nullptr, CHECK_OK);
+ catch_block->statements()->Add(catch_info.inner_block, zone());
+ impl()->ValidateCatchBlock(catch_info, CHECK_OK);
+ catch_variable_block_state.set_end_position(
+ scanner()->location().end_pos);
+ catch_block->set_scope(
+ catch_variable_block_state.FinalizedBlockScope());
+ }
+ }
+
+ catch_info.scope->set_end_position(scanner()->location().end_pos);
+ }
+
+ BlockT finally_block = impl()->NullBlock();
+ DCHECK(peek() == Token::FINALLY || !impl()->IsNullStatement(catch_block));
+ if (Check(Token::FINALLY)) {
+ finally_block = ParseBlock(nullptr, CHECK_OK);
+ }
+
+ return impl()->RewriteTryStatement(try_block, catch_block, finally_block,
+ catch_info, pos);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
+ ZoneList<const AstRawString*>* labels, bool* ok) {
+ int stmt_pos = peek_position();
+ ForInfo for_info(this);
+ bool bound_names_are_lexical = false;
+
+ // Create an in-between scope for let-bound iteration variables.
+ BlockState for_state(zone(), &scope_state_);
+ Expect(Token::FOR, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
+ for_state.set_start_position(scanner()->location().beg_pos);
+ for_state.set_is_hidden();
+
+ StatementT init = impl()->NullStatement();
+ if (peek() != Token::SEMICOLON) {
+ // An initializer is present.
+ if (peek() == Token::VAR || peek() == Token::CONST ||
+ (peek() == Token::LET && IsNextLetKeyword())) {
+ // The initializer contains declarations.
+ ParseVariableDeclarations(kForStatement, &for_info.parsing_result,
+ nullptr, CHECK_OK);
+ bound_names_are_lexical =
+ IsLexicalVariableMode(for_info.parsing_result.descriptor.mode);
+ for_info.each_loc = scanner()->location();
+
+ if (CheckInOrOf(&for_info.mode)) {
+ // Just one declaration followed by in/of.
+ if (for_info.parsing_result.declarations.length() != 1) {
+ impl()->ReportMessageAt(
+ for_info.parsing_result.bindings_loc,
+ MessageTemplate::kForInOfLoopMultiBindings,
+ ForEachStatement::VisitModeString(for_info.mode));
+ *ok = false;
+ return impl()->NullStatement();
+ }
+ if (for_info.parsing_result.first_initializer_loc.IsValid() &&
+ (is_strict(language_mode()) ||
+ for_info.mode == ForEachStatement::ITERATE ||
+ bound_names_are_lexical ||
+ !impl()->IsIdentifier(
+ for_info.parsing_result.declarations[0].pattern) ||
+ allow_harmony_for_in())) {
+ // Only increment the use count if we would have let this through
+ // without the flag.
+ if (allow_harmony_for_in()) {
+ impl()->CountUsage(v8::Isolate::kForInInitializer);
+ }
+ impl()->ReportMessageAt(
+ for_info.parsing_result.first_initializer_loc,
+ MessageTemplate::kForInOfLoopInitializer,
+ ForEachStatement::VisitModeString(for_info.mode));
+ *ok = false;
+ return impl()->NullStatement();
+ }
+
+ BlockT init_block = impl()->RewriteForVarInLegacy(for_info);
+
+ auto loop =
+ factory()->NewForEachStatement(for_info.mode, labels, stmt_pos);
+ typename Types::Target target(this, loop);
+
+ int each_keyword_pos = scanner()->location().beg_pos;
+
+ ExpressionT enumerable = impl()->EmptyExpression();
+ if (for_info.mode == ForEachStatement::ITERATE) {
+ ExpressionClassifier classifier(this);
+ enumerable = ParseAssignmentExpression(true, CHECK_OK);
+ impl()->RewriteNonPattern(CHECK_OK);
+ } else {
+ enumerable = ParseExpression(true, CHECK_OK);
+ }
+
+ Expect(Token::RPAREN, CHECK_OK);
+
+ StatementT final_loop = impl()->NullStatement();
+ {
+ ReturnExprScope no_tail_calls(function_state_,
+ ReturnExprContext::kInsideForInOfBody);
+ BlockState block_state(zone(), &scope_state_);
+ block_state.set_start_position(scanner()->location().beg_pos);
+
+ StatementT body = ParseScopedStatement(nullptr, true, CHECK_OK);
+
+ BlockT body_block = impl()->NullBlock();
+ ExpressionT each_variable = impl()->EmptyExpression();
+ impl()->DesugarBindingInForEachStatement(&for_info, &body_block,
+ &each_variable, CHECK_OK);
+ body_block->statements()->Add(body, zone());
+ final_loop = impl()->InitializeForEachStatement(
+ loop, each_variable, enumerable, body_block, each_keyword_pos);
+
+ block_state.set_end_position(scanner()->location().end_pos);
+ body_block->set_scope(block_state.FinalizedBlockScope());
+ }
+
+ init_block =
+ impl()->CreateForEachStatementTDZ(init_block, for_info, ok);
+
+ for_state.set_end_position(scanner()->location().end_pos);
+ Scope* for_scope = for_state.FinalizedBlockScope();
+ // Parsed for-in loop w/ variable declarations.
+ if (!impl()->IsNullStatement(init_block)) {
+ init_block->statements()->Add(final_loop, zone());
+ init_block->set_scope(for_scope);
+ return init_block;
+ } else {
+ DCHECK_NULL(for_scope);
+ return final_loop;
+ }
+ } else {
+ // One or more declaration not followed by in/of.
+ init = impl()->BuildInitializationBlock(
+ &for_info.parsing_result,
+ bound_names_are_lexical ? &for_info.bound_names : nullptr,
+ CHECK_OK);
+ }
+ } else {
+ // The initializer does not contain declarations.
+ int lhs_beg_pos = peek_position();
+ ExpressionClassifier classifier(this);
+ ExpressionT expression = ParseExpressionCoverGrammar(false, CHECK_OK);
+ int lhs_end_pos = scanner()->location().end_pos;
+
+ bool is_for_each = CheckInOrOf(&for_info.mode);
+ bool is_destructuring = is_for_each && (expression->IsArrayLiteral() ||
+ expression->IsObjectLiteral());
+
+ if (is_destructuring) {
+ ValidateAssignmentPattern(CHECK_OK);
+ } else {
+ impl()->RewriteNonPattern(CHECK_OK);
+ }
+
+ if (is_for_each) {
+ // Initializer is reference followed by in/of.
+ if (!is_destructuring) {
+ expression = impl()->CheckAndRewriteReferenceExpression(
+ expression, lhs_beg_pos, lhs_end_pos,
+ MessageTemplate::kInvalidLhsInFor, kSyntaxError, CHECK_OK);
+ }
+
+ auto loop =
+ factory()->NewForEachStatement(for_info.mode, labels, stmt_pos);
+ typename Types::Target target(this, loop);
+
+ int each_keyword_pos = scanner()->location().beg_pos;
+
+ ExpressionT enumerable = impl()->EmptyExpression();
+ if (for_info.mode == ForEachStatement::ITERATE) {
+ ExpressionClassifier classifier(this);
+ enumerable = ParseAssignmentExpression(true, CHECK_OK);
+ impl()->RewriteNonPattern(CHECK_OK);
+ } else {
+ enumerable = ParseExpression(true, CHECK_OK);
+ }
+
+ Expect(Token::RPAREN, CHECK_OK);
+
+ {
+ ReturnExprScope no_tail_calls(function_state_,
+ ReturnExprContext::kInsideForInOfBody);
+ BlockState block_state(zone(), &scope_state_);
+ block_state.set_start_position(scanner()->location().beg_pos);
+
+ // For legacy compat reasons, give for loops similar treatment to
+ // if statements in allowing a function declaration for a body
+ StatementT body = ParseScopedStatement(nullptr, true, CHECK_OK);
+ block_state.set_end_position(scanner()->location().end_pos);
+ StatementT final_loop = impl()->InitializeForEachStatement(
+ loop, expression, enumerable, body, each_keyword_pos);
+
+ Scope* for_scope = for_state.FinalizedBlockScope();
+ DCHECK_NULL(for_scope);
+ USE(for_scope);
+ Scope* block_scope = block_state.FinalizedBlockScope();
+ DCHECK_NULL(block_scope);
+ USE(block_scope);
+ return final_loop;
+ }
+ } else {
+ // Initializer is just an expression.
+ init = factory()->NewExpressionStatement(expression, lhs_beg_pos);
+ }
+ }
+ }
+
+ // Standard 'for' loop, we have parsed the initializer at this point.
+ auto loop = factory()->NewForStatement(labels, stmt_pos);
+ typename Types::Target target(this, loop);
+
+ Expect(Token::SEMICOLON, CHECK_OK);
+
+ ExpressionT cond = impl()->EmptyExpression();
+ StatementT next = impl()->NullStatement();
+ StatementT body = impl()->NullStatement();
+
+ // If there are let bindings, then condition and the next statement of the
+ // for loop must be parsed in a new scope.
+ Scope* inner_scope = scope();
+ // TODO(verwaest): Allocate this through a ScopeState as well.
+ if (bound_names_are_lexical && for_info.bound_names.length() > 0) {
+ inner_scope = NewScopeWithParent(inner_scope, BLOCK_SCOPE);
+ inner_scope->set_start_position(scanner()->location().beg_pos);
+ }
+ {
+ BlockState block_state(&scope_state_, inner_scope);
+
+ if (peek() != Token::SEMICOLON) {
+ cond = ParseExpression(true, CHECK_OK);
+ }
+ Expect(Token::SEMICOLON, CHECK_OK);
+
+ if (peek() != Token::RPAREN) {
+ ExpressionT exp = ParseExpression(true, CHECK_OK);
+ next = factory()->NewExpressionStatement(exp, exp->position());
+ }
+ Expect(Token::RPAREN, CHECK_OK);
+
+ body = ParseScopedStatement(nullptr, true, CHECK_OK);
+ }
+
+ if (bound_names_are_lexical && for_info.bound_names.length() > 0) {
+ auto result = impl()->DesugarLexicalBindingsInForStatement(
+ loop, init, cond, next, body, inner_scope, for_info, CHECK_OK);
+ for_state.set_end_position(scanner()->location().end_pos);
+ return result;
+ } else {
+ for_state.set_end_position(scanner()->location().end_pos);
+ Scope* for_scope = for_state.FinalizedBlockScope();
+ if (for_scope != nullptr) {
+ // Rewrite a for statement of the form
+ // for (const x = i; c; n) b
+ //
+ // into
+ //
+ // {
+ // const x = i;
+ // for (; c; n) b
+ // }
+ //
+ // or, desugar
+ // for (; c; n) b
+ // into
+ // {
+ // for (; c; n) b
+ // }
+ // just in case b introduces a lexical binding some other way, e.g., if b
+ // is a FunctionDeclaration.
+ BlockT block = factory()->NewBlock(nullptr, 2, false, kNoSourcePosition);
+ if (!impl()->IsNullStatement(init)) {
+ block->statements()->Add(init, zone());
+ }
+ block->statements()->Add(loop, zone());
+ block->set_scope(for_scope);
+ loop->Initialize(init, cond, next, body);
+ return block;
+ } else {
+ loop->Initialize(init, cond, next, body);
+ return loop;
+ }
+ }
+}
+
+#undef CHECK_OK
+#undef CHECK_OK_CUSTOM
+
+template <typename Impl>
+void ParserBase<Impl>::ObjectLiteralChecker::CheckDuplicateProto(
+ Token::Value property) {
if (property == Token::SMI || property == Token::NUMBER) return;
- if (type == kValueProperty && IsProto()) {
+ if (IsProto()) {
if (has_seen_proto_) {
- classifier->RecordObjectLiteralError(
+ this->parser()->classifier()->RecordExpressionError(
this->scanner()->location(), MessageTemplate::kDuplicateProto);
return;
}
@@ -3698,23 +5423,22 @@ void ParserBase<Impl>::ObjectLiteralChecker::CheckProperty(
}
template <typename Impl>
-void ParserBase<Impl>::ClassLiteralChecker::CheckProperty(
- Token::Value property, PropertyKind type, MethodKind method_type,
- ExpressionClassifier* classifier, bool* ok) {
- DCHECK(type == kMethodProperty || type == kAccessorProperty);
+void ParserBase<Impl>::ClassLiteralChecker::CheckClassMethodName(
+ Token::Value property, PropertyKind type, bool is_generator, bool is_async,
+ bool is_static, bool* ok) {
+ DCHECK(type == PropertyKind::kMethodProperty ||
+ type == PropertyKind::kAccessorProperty);
if (property == Token::SMI || property == Token::NUMBER) return;
- if (IsStaticMethod(method_type)) {
+ if (is_static) {
if (IsPrototype()) {
this->parser()->ReportMessage(MessageTemplate::kStaticPrototype);
*ok = false;
return;
}
} else if (IsConstructor()) {
- const bool is_generator = IsGeneratorMethod(method_type);
- const bool is_async = IsAsyncMethod(method_type);
- if (is_generator || is_async || type == kAccessorProperty) {
+ if (is_generator || is_async || type == PropertyKind::kAccessorProperty) {
MessageTemplate::Template msg =
is_generator ? MessageTemplate::kConstructorIsGenerator
: is_async ? MessageTemplate::kConstructorIsAsync
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index cfc2de8f38..7b88695e77 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -15,6 +15,7 @@
#include "src/base/platform/platform.h"
#include "src/char-predicates-inl.h"
#include "src/messages.h"
+#include "src/parsing/duplicate-finder.h"
#include "src/parsing/parameter-initializer-rewriter.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/rewriter.h"
@@ -121,12 +122,20 @@ class DiscardableZoneScope {
if (use_temp_zone) {
parser_->fni_ = &fni_;
parser_->zone_ = temp_zone;
+ if (parser_->reusable_preparser_ != nullptr) {
+ parser_->reusable_preparser_->zone_ = temp_zone;
+ }
}
}
- ~DiscardableZoneScope() {
+ void Reset() {
parser_->fni_ = prev_fni_;
parser_->zone_ = prev_zone_;
+ if (parser_->reusable_preparser_ != nullptr) {
+ parser_->reusable_preparser_->zone_ = prev_zone_;
+ }
+ ast_node_factory_scope_.Reset();
}
+ ~DiscardableZoneScope() { Reset(); }
private:
AstNodeFactory::BodyScope ast_node_factory_scope_;
@@ -149,9 +158,64 @@ void Parser::SetCachedData(ParseInfo* info) {
}
}
+Expression* Parser::CallClassFieldInitializer(Scope* scope,
+ Expression* this_expr) {
+ // This produces the expression
+ // `.class_field_intializer(this_expr)`, where '.class_field_intializer' is
+ // the name
+ // of a synthetic variable.
+ // 'this_expr' will be 'this' in a base constructor and the result of calling
+ // 'super' in a derived one.
+ const AstRawString* init_fn_name =
+ ast_value_factory()->dot_class_field_init_string();
+ VariableProxy* init_fn_proxy = scope->NewUnresolved(factory(), init_fn_name);
+ ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
+ args->Add(init_fn_proxy, zone());
+ args->Add(this_expr, zone());
+ return factory()->NewCallRuntime(Runtime::kInlineCall, args,
+ kNoSourcePosition);
+}
+
+Expression* Parser::RewriteSuperCall(Expression* super_call) {
+ // TODO(bakkot) find a way to avoid this for classes without fields.
+ if (!allow_harmony_class_fields()) {
+ return super_call;
+ }
+ // This turns a super call `super()` into a do expression of the form
+ // do {
+ // tmp x = super();
+ // if (.class-field-init)
+ // .class-field-init(x)
+ // x; // This isn't actually present; our do-expression representation
+ // allows specifying that the expression returns x directly.
+ // }
+ Variable* var_tmp =
+ scope()->NewTemporary(ast_value_factory()->empty_string());
+ Block* block = factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
+ Assignment* assignment = factory()->NewAssignment(
+ Token::ASSIGN, factory()->NewVariableProxy(var_tmp), super_call,
+ kNoSourcePosition);
+ block->statements()->Add(
+ factory()->NewExpressionStatement(assignment, kNoSourcePosition), zone());
+ const AstRawString* init_fn_name =
+ ast_value_factory()->dot_class_field_init_string();
+ VariableProxy* init_fn_proxy =
+ scope()->NewUnresolved(factory(), init_fn_name);
+ Expression* condition = init_fn_proxy;
+ Statement* initialize = factory()->NewExpressionStatement(
+ CallClassFieldInitializer(scope(), factory()->NewVariableProxy(var_tmp)),
+ kNoSourcePosition);
+ IfStatement* if_statement = factory()->NewIfStatement(
+ condition, initialize, factory()->NewEmptyStatement(kNoSourcePosition),
+ kNoSourcePosition);
+ block->statements()->Add(if_statement, zone());
+ return factory()->NewDoExpression(block, var_tmp, kNoSourcePosition);
+}
+
FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
- bool call_super, int pos,
- int end_pos,
+ bool call_super,
+ bool requires_class_field_init,
+ int pos, int end_pos,
LanguageMode language_mode) {
int materialized_literal_count = -1;
int expected_property_count = -1;
@@ -170,7 +234,7 @@ FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
{
FunctionState function_state(&function_state_, &scope_state_,
- function_scope, kind);
+ function_scope);
body = new (zone()) ZoneList<Statement*>(call_super ? 2 : 1, zone());
if (call_super) {
@@ -204,8 +268,11 @@ FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
VariableProxy* new_target_proxy =
NewUnresolved(ast_value_factory()->new_target_string(), pos);
args->Add(new_target_proxy, zone());
- CallRuntime* call = factory()->NewCallRuntime(
+ Expression* call = factory()->NewCallRuntime(
Context::REFLECT_CONSTRUCT_INDEX, args, pos);
+ if (requires_class_field_init) {
+ call = CallClassFieldInitializer(scope(), call);
+ }
body->Add(factory()->NewReturnStatement(call, pos), zone());
}
@@ -218,7 +285,9 @@ FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
expected_property_count, parameter_count,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kAnonymousExpression,
- FunctionLiteral::kShouldLazyCompile, kind, pos);
+ FunctionLiteral::kShouldLazyCompile, pos);
+
+ function_literal->set_requires_class_field_init(requires_class_field_init);
return function_literal;
}
@@ -230,41 +299,39 @@ FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
// 'continue' statement targets). Upon construction, a new target is
// added; it is removed upon destruction.
-class Target BASE_EMBEDDED {
+class ParserTarget BASE_EMBEDDED {
public:
- Target(Target** variable, BreakableStatement* statement)
- : variable_(variable), statement_(statement), previous_(*variable) {
- *variable = this;
+ ParserTarget(ParserBase<Parser>* parser, BreakableStatement* statement)
+ : variable_(&parser->impl()->target_stack_),
+ statement_(statement),
+ previous_(parser->impl()->target_stack_) {
+ parser->impl()->target_stack_ = this;
}
- ~Target() {
- *variable_ = previous_;
- }
+ ~ParserTarget() { *variable_ = previous_; }
- Target* previous() { return previous_; }
+ ParserTarget* previous() { return previous_; }
BreakableStatement* statement() { return statement_; }
private:
- Target** variable_;
+ ParserTarget** variable_;
BreakableStatement* statement_;
- Target* previous_;
+ ParserTarget* previous_;
};
-
-class TargetScope BASE_EMBEDDED {
+class ParserTargetScope BASE_EMBEDDED {
public:
- explicit TargetScope(Target** variable)
- : variable_(variable), previous_(*variable) {
- *variable = NULL;
+ explicit ParserTargetScope(ParserBase<Parser>* parser)
+ : variable_(&parser->impl()->target_stack_),
+ previous_(parser->impl()->target_stack_) {
+ parser->impl()->target_stack_ = nullptr;
}
- ~TargetScope() {
- *variable_ = previous_;
- }
+ ~ParserTargetScope() { *variable_ = previous_; }
private:
- Target** variable_;
- Target* previous_;
+ ParserTarget** variable_;
+ ParserTarget* previous_;
};
@@ -276,17 +343,14 @@ class TargetScope BASE_EMBEDDED {
// thus it must never be used where only a single statement
// is correct (e.g. an if statement branch w/o braces)!
-#define CHECK_OK ok); \
- if (!*ok) return nullptr; \
+#define CHECK_OK_VALUE(x) ok); \
+ if (!*ok) return x; \
((void)0
#define DUMMY ) // to make indentation work
#undef DUMMY
-#define CHECK_OK_VOID ok); \
- if (!*ok) return; \
- ((void)0
-#define DUMMY ) // to make indentation work
-#undef DUMMY
+#define CHECK_OK CHECK_OK_VALUE(nullptr)
+#define CHECK_OK_VOID CHECK_OK_VALUE(this->Void())
#define CHECK_FAILED /**/); \
if (failed_) return nullptr; \
@@ -297,76 +361,9 @@ class TargetScope BASE_EMBEDDED {
// ----------------------------------------------------------------------------
// Implementation of Parser
-bool ParserBaseTraits<Parser>::IsEval(const AstRawString* identifier) const {
- return identifier == delegate()->ast_value_factory()->eval_string();
-}
-
-bool ParserBaseTraits<Parser>::IsArguments(
- const AstRawString* identifier) const {
- return identifier == delegate()->ast_value_factory()->arguments_string();
-}
-
-bool ParserBaseTraits<Parser>::IsEvalOrArguments(
- const AstRawString* identifier) const {
- return IsEval(identifier) || IsArguments(identifier);
-}
-
-bool ParserBaseTraits<Parser>::IsUndefined(
- const AstRawString* identifier) const {
- return identifier == delegate()->ast_value_factory()->undefined_string();
-}
-
-bool ParserBaseTraits<Parser>::IsPrototype(
- const AstRawString* identifier) const {
- return identifier == delegate()->ast_value_factory()->prototype_string();
-}
-
-bool ParserBaseTraits<Parser>::IsConstructor(
- const AstRawString* identifier) const {
- return identifier == delegate()->ast_value_factory()->constructor_string();
-}
-
-bool ParserBaseTraits<Parser>::IsThisProperty(Expression* expression) {
- DCHECK(expression != NULL);
- Property* property = expression->AsProperty();
- return property != NULL && property->obj()->IsVariableProxy() &&
- property->obj()->AsVariableProxy()->is_this();
-}
-
-bool ParserBaseTraits<Parser>::IsIdentifier(Expression* expression) {
- VariableProxy* operand = expression->AsVariableProxy();
- return operand != NULL && !operand->is_this();
-}
-
-void ParserBaseTraits<Parser>::PushPropertyName(FuncNameInferrer* fni,
- Expression* expression) {
- if (expression->IsPropertyName()) {
- fni->PushLiteralName(expression->AsLiteral()->AsRawPropertyName());
- } else {
- fni->PushLiteralName(
- delegate()->ast_value_factory()->anonymous_function_string());
- }
-}
-
-void ParserBaseTraits<Parser>::CheckAssigningFunctionLiteralToProperty(
- Expression* left, Expression* right) {
- DCHECK(left != NULL);
- if (left->IsProperty() && right->IsFunctionLiteral()) {
- right->AsFunctionLiteral()->set_pretenure();
- }
-}
-
-Expression* ParserBaseTraits<Parser>::MarkExpressionAsAssigned(
- Expression* expression) {
- VariableProxy* proxy =
- expression != NULL ? expression->AsVariableProxy() : NULL;
- if (proxy != NULL) proxy->set_is_assigned();
- return expression;
-}
-
-bool ParserBaseTraits<Parser>::ShortcutNumericLiteralBinaryExpression(
- Expression** x, Expression* y, Token::Value op, int pos,
- AstNodeFactory* factory) {
+bool Parser::ShortcutNumericLiteralBinaryExpression(Expression** x,
+ Expression* y,
+ Token::Value op, int pos) {
if ((*x)->AsLiteral() && (*x)->AsLiteral()->raw_value()->IsNumber() &&
y->AsLiteral() && y->AsLiteral()->raw_value()->IsNumber()) {
double x_val = (*x)->AsLiteral()->raw_value()->AsNumber();
@@ -376,53 +373,53 @@ bool ParserBaseTraits<Parser>::ShortcutNumericLiteralBinaryExpression(
bool has_dot = x_has_dot || y_has_dot;
switch (op) {
case Token::ADD:
- *x = factory->NewNumberLiteral(x_val + y_val, pos, has_dot);
+ *x = factory()->NewNumberLiteral(x_val + y_val, pos, has_dot);
return true;
case Token::SUB:
- *x = factory->NewNumberLiteral(x_val - y_val, pos, has_dot);
+ *x = factory()->NewNumberLiteral(x_val - y_val, pos, has_dot);
return true;
case Token::MUL:
- *x = factory->NewNumberLiteral(x_val * y_val, pos, has_dot);
+ *x = factory()->NewNumberLiteral(x_val * y_val, pos, has_dot);
return true;
case Token::DIV:
- *x = factory->NewNumberLiteral(x_val / y_val, pos, has_dot);
+ *x = factory()->NewNumberLiteral(x_val / y_val, pos, has_dot);
return true;
case Token::BIT_OR: {
int value = DoubleToInt32(x_val) | DoubleToInt32(y_val);
- *x = factory->NewNumberLiteral(value, pos, has_dot);
+ *x = factory()->NewNumberLiteral(value, pos, has_dot);
return true;
}
case Token::BIT_AND: {
int value = DoubleToInt32(x_val) & DoubleToInt32(y_val);
- *x = factory->NewNumberLiteral(value, pos, has_dot);
+ *x = factory()->NewNumberLiteral(value, pos, has_dot);
return true;
}
case Token::BIT_XOR: {
int value = DoubleToInt32(x_val) ^ DoubleToInt32(y_val);
- *x = factory->NewNumberLiteral(value, pos, has_dot);
+ *x = factory()->NewNumberLiteral(value, pos, has_dot);
return true;
}
case Token::SHL: {
int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1f);
- *x = factory->NewNumberLiteral(value, pos, has_dot);
+ *x = factory()->NewNumberLiteral(value, pos, has_dot);
return true;
}
case Token::SHR: {
uint32_t shift = DoubleToInt32(y_val) & 0x1f;
uint32_t value = DoubleToUint32(x_val) >> shift;
- *x = factory->NewNumberLiteral(value, pos, has_dot);
+ *x = factory()->NewNumberLiteral(value, pos, has_dot);
return true;
}
case Token::SAR: {
uint32_t shift = DoubleToInt32(y_val) & 0x1f;
int value = ArithmeticShiftRight(DoubleToInt32(x_val), shift);
- *x = factory->NewNumberLiteral(value, pos, has_dot);
+ *x = factory()->NewNumberLiteral(value, pos, has_dot);
return true;
}
case Token::EXP: {
double value = Pow(x_val, y_val);
int int_value = static_cast<int>(value);
- *x = factory->NewNumberLiteral(
+ *x = factory()->NewNumberLiteral(
int_value == value && value != -0.0 ? int_value : value, pos,
has_dot);
return true;
@@ -434,15 +431,15 @@ bool ParserBaseTraits<Parser>::ShortcutNumericLiteralBinaryExpression(
return false;
}
-Expression* ParserBaseTraits<Parser>::BuildUnaryExpression(
- Expression* expression, Token::Value op, int pos, AstNodeFactory* factory) {
+Expression* Parser::BuildUnaryExpression(Expression* expression,
+ Token::Value op, int pos) {
DCHECK(expression != NULL);
if (expression->IsLiteral()) {
const AstValue* literal = expression->AsLiteral()->raw_value();
if (op == Token::NOT) {
// Convert the literal to a boolean condition and negate it.
bool condition = literal->BooleanValue();
- return factory->NewBooleanLiteral(!condition, pos);
+ return factory()->NewBooleanLiteral(!condition, pos);
} else if (literal->IsNumber()) {
// Compute some expressions involving only number literals.
double value = literal->AsNumber();
@@ -451,9 +448,10 @@ Expression* ParserBaseTraits<Parser>::BuildUnaryExpression(
case Token::ADD:
return expression;
case Token::SUB:
- return factory->NewNumberLiteral(-value, pos, has_dot);
+ return factory()->NewNumberLiteral(-value, pos, has_dot);
case Token::BIT_NOT:
- return factory->NewNumberLiteral(~DoubleToInt32(value), pos, has_dot);
+ return factory()->NewNumberLiteral(~DoubleToInt32(value), pos,
+ has_dot);
default:
break;
}
@@ -461,53 +459,33 @@ Expression* ParserBaseTraits<Parser>::BuildUnaryExpression(
}
// Desugar '+foo' => 'foo*1'
if (op == Token::ADD) {
- return factory->NewBinaryOperation(
- Token::MUL, expression, factory->NewNumberLiteral(1, pos, true), pos);
+ return factory()->NewBinaryOperation(
+ Token::MUL, expression, factory()->NewNumberLiteral(1, pos, true), pos);
}
// The same idea for '-foo' => 'foo*(-1)'.
if (op == Token::SUB) {
- return factory->NewBinaryOperation(
- Token::MUL, expression, factory->NewNumberLiteral(-1, pos), pos);
+ return factory()->NewBinaryOperation(
+ Token::MUL, expression, factory()->NewNumberLiteral(-1, pos), pos);
}
// ...and one more time for '~foo' => 'foo^(~0)'.
if (op == Token::BIT_NOT) {
- return factory->NewBinaryOperation(
- Token::BIT_XOR, expression, factory->NewNumberLiteral(~0, pos), pos);
+ return factory()->NewBinaryOperation(
+ Token::BIT_XOR, expression, factory()->NewNumberLiteral(~0, pos), pos);
}
- return factory->NewUnaryOperation(op, expression, pos);
+ return factory()->NewUnaryOperation(op, expression, pos);
}
-Expression* ParserBaseTraits<Parser>::BuildIteratorResult(Expression* value,
- bool done) {
+Expression* Parser::BuildIteratorResult(Expression* value, bool done) {
int pos = kNoSourcePosition;
- AstNodeFactory* factory = delegate()->factory();
- Zone* zone = delegate()->zone();
-
- if (value == nullptr) value = factory->NewUndefinedLiteral(pos);
-
- auto args = new (zone) ZoneList<Expression*>(2, zone);
- args->Add(value, zone);
- args->Add(factory->NewBooleanLiteral(done, pos), zone);
-
- return factory->NewCallRuntime(Runtime::kInlineCreateIterResultObject, args,
- pos);
-}
-Expression* ParserBaseTraits<Parser>::NewThrowReferenceError(
- MessageTemplate::Template message, int pos) {
- return delegate()->NewThrowError(
- Runtime::kNewReferenceError, message,
- delegate()->ast_value_factory()->empty_string(), pos);
-}
+ if (value == nullptr) value = factory()->NewUndefinedLiteral(pos);
-Expression* ParserBaseTraits<Parser>::NewThrowSyntaxError(
- MessageTemplate::Template message, const AstRawString* arg, int pos) {
- return delegate()->NewThrowError(Runtime::kNewSyntaxError, message, arg, pos);
-}
+ auto args = new (zone()) ZoneList<Expression*>(2, zone());
+ args->Add(value, zone());
+ args->Add(factory()->NewBooleanLiteral(done, pos), zone());
-Expression* ParserBaseTraits<Parser>::NewThrowTypeError(
- MessageTemplate::Template message, const AstRawString* arg, int pos) {
- return delegate()->NewThrowError(Runtime::kNewTypeError, message, arg, pos);
+ return factory()->NewCallRuntime(Runtime::kInlineCreateIterResultObject, args,
+ pos);
}
Expression* Parser::NewThrowError(Runtime::FunctionId id,
@@ -520,124 +498,62 @@ Expression* Parser::NewThrowError(Runtime::FunctionId id,
return factory()->NewThrow(call_constructor, pos);
}
-void ParserBaseTraits<Parser>::ReportMessageAt(
- Scanner::Location source_location, MessageTemplate::Template message,
- const char* arg, ParseErrorType error_type) {
- if (delegate()->stack_overflow()) {
- // Suppress the error message (syntax error or such) in the presence of a
- // stack overflow. The isolate allows only one pending exception at at time
- // and we want to report the stack overflow later.
- return;
- }
- delegate()->pending_error_handler_.ReportMessageAt(source_location.beg_pos,
- source_location.end_pos,
- message, arg, error_type);
-}
-
-void ParserBaseTraits<Parser>::ReportMessageAt(
- Scanner::Location source_location, MessageTemplate::Template message,
- const AstRawString* arg, ParseErrorType error_type) {
- if (delegate()->stack_overflow()) {
- // Suppress the error message (syntax error or such) in the presence of a
- // stack overflow. The isolate allows only one pending exception at at time
- // and we want to report the stack overflow later.
- return;
- }
- delegate()->pending_error_handler_.ReportMessageAt(source_location.beg_pos,
- source_location.end_pos,
- message, arg, error_type);
-}
-
-const AstRawString* ParserBaseTraits<Parser>::GetSymbol(
- Scanner* scanner) const {
- const AstRawString* result =
- delegate()->scanner()->CurrentSymbol(delegate()->ast_value_factory());
- DCHECK(result != NULL);
- return result;
-}
-
-const AstRawString* ParserBaseTraits<Parser>::GetNumberAsSymbol(
- Scanner* scanner) const {
- double double_value = delegate()->scanner()->DoubleValue();
- char array[100];
- const char* string = DoubleToCString(double_value, ArrayVector(array));
- return delegate()->ast_value_factory()->GetOneByteString(string);
-}
-
-const AstRawString* ParserBaseTraits<Parser>::GetNextSymbol(
- Scanner* scanner) const {
- return delegate()->scanner()->NextSymbol(delegate()->ast_value_factory());
-}
-
-Expression* ParserBaseTraits<Parser>::ThisExpression(int pos) {
- return delegate()->NewUnresolved(
- delegate()->ast_value_factory()->this_string(), pos, pos + 4,
- Variable::THIS);
-}
-
-Expression* ParserBaseTraits<Parser>::NewSuperPropertyReference(
- AstNodeFactory* factory, int pos) {
+Expression* Parser::NewSuperPropertyReference(int pos) {
// this_function[home_object_symbol]
- VariableProxy* this_function_proxy = delegate()->NewUnresolved(
- delegate()->ast_value_factory()->this_function_string(), pos);
+ VariableProxy* this_function_proxy =
+ NewUnresolved(ast_value_factory()->this_function_string(), pos);
Expression* home_object_symbol_literal =
- factory->NewSymbolLiteral("home_object_symbol", kNoSourcePosition);
- Expression* home_object = factory->NewProperty(
+ factory()->NewSymbolLiteral("home_object_symbol", kNoSourcePosition);
+ Expression* home_object = factory()->NewProperty(
this_function_proxy, home_object_symbol_literal, pos);
- return factory->NewSuperPropertyReference(
+ return factory()->NewSuperPropertyReference(
ThisExpression(pos)->AsVariableProxy(), home_object, pos);
}
-Expression* ParserBaseTraits<Parser>::NewSuperCallReference(
- AstNodeFactory* factory, int pos) {
- VariableProxy* new_target_proxy = delegate()->NewUnresolved(
- delegate()->ast_value_factory()->new_target_string(), pos);
- VariableProxy* this_function_proxy = delegate()->NewUnresolved(
- delegate()->ast_value_factory()->this_function_string(), pos);
- return factory->NewSuperCallReference(ThisExpression(pos)->AsVariableProxy(),
- new_target_proxy, this_function_proxy,
- pos);
+Expression* Parser::NewSuperCallReference(int pos) {
+ VariableProxy* new_target_proxy =
+ NewUnresolved(ast_value_factory()->new_target_string(), pos);
+ VariableProxy* this_function_proxy =
+ NewUnresolved(ast_value_factory()->this_function_string(), pos);
+ return factory()->NewSuperCallReference(
+ ThisExpression(pos)->AsVariableProxy(), new_target_proxy,
+ this_function_proxy, pos);
}
-Expression* ParserBaseTraits<Parser>::NewTargetExpression(int pos) {
+Expression* Parser::NewTargetExpression(int pos) {
static const int kNewTargetStringLength = 10;
- auto proxy = delegate()->NewUnresolved(
- delegate()->ast_value_factory()->new_target_string(), pos,
- pos + kNewTargetStringLength);
+ auto proxy = NewUnresolved(ast_value_factory()->new_target_string(), pos,
+ pos + kNewTargetStringLength);
proxy->set_is_new_target();
return proxy;
}
-Expression* ParserBaseTraits<Parser>::FunctionSentExpression(
- AstNodeFactory* factory, int pos) const {
+Expression* Parser::FunctionSentExpression(int pos) {
// We desugar function.sent into %_GeneratorGetInputOrDebugPos(generator).
- Zone* zone = delegate()->zone();
- ZoneList<Expression*>* args = new (zone) ZoneList<Expression*>(1, zone);
- VariableProxy* generator = factory->NewVariableProxy(
- delegate()->function_state_->generator_object_variable());
- args->Add(generator, zone);
- return factory->NewCallRuntime(Runtime::kInlineGeneratorGetInputOrDebugPos,
- args, pos);
-}
-
-Literal* ParserBaseTraits<Parser>::ExpressionFromLiteral(
- Token::Value token, int pos, Scanner* scanner,
- AstNodeFactory* factory) const {
+ ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
+ VariableProxy* generator =
+ factory()->NewVariableProxy(function_state_->generator_object_variable());
+ args->Add(generator, zone());
+ return factory()->NewCallRuntime(Runtime::kInlineGeneratorGetInputOrDebugPos,
+ args, pos);
+}
+
+Literal* Parser::ExpressionFromLiteral(Token::Value token, int pos) {
switch (token) {
case Token::NULL_LITERAL:
- return factory->NewNullLiteral(pos);
+ return factory()->NewNullLiteral(pos);
case Token::TRUE_LITERAL:
- return factory->NewBooleanLiteral(true, pos);
+ return factory()->NewBooleanLiteral(true, pos);
case Token::FALSE_LITERAL:
- return factory->NewBooleanLiteral(false, pos);
+ return factory()->NewBooleanLiteral(false, pos);
case Token::SMI: {
- int value = scanner->smi_value();
- return factory->NewSmiLiteral(value, pos);
+ int value = scanner()->smi_value();
+ return factory()->NewSmiLiteral(value, pos);
}
case Token::NUMBER: {
- bool has_dot = scanner->ContainsDot();
- double value = scanner->DoubleValue();
- return factory->NewNumberLiteral(value, pos, has_dot);
+ bool has_dot = scanner()->ContainsDot();
+ double value = scanner()->DoubleValue();
+ return factory()->NewNumberLiteral(value, pos, has_dot);
}
default:
DCHECK(false);
@@ -645,43 +561,74 @@ Literal* ParserBaseTraits<Parser>::ExpressionFromLiteral(
return NULL;
}
-Expression* ParserBaseTraits<Parser>::ExpressionFromIdentifier(
- const AstRawString* name, int start_position, int end_position,
- InferName infer) {
- if (infer == InferName::kYes && delegate()->fni_ != NULL) {
- delegate()->fni_->PushVariableName(name);
- }
- return delegate()->NewUnresolved(name, start_position, end_position);
-}
-
-Expression* ParserBaseTraits<Parser>::ExpressionFromString(
- int pos, Scanner* scanner, AstNodeFactory* factory) const {
- const AstRawString* symbol = GetSymbol(scanner);
- if (delegate()->fni_ != NULL) delegate()->fni_->PushLiteralName(symbol);
- return factory->NewStringLiteral(symbol, pos);
-}
-
-Expression* ParserBaseTraits<Parser>::GetIterator(Expression* iterable,
- AstNodeFactory* factory,
- int pos) {
+Expression* Parser::GetIterator(Expression* iterable, int pos) {
Expression* iterator_symbol_literal =
- factory->NewSymbolLiteral("iterator_symbol", kNoSourcePosition);
+ factory()->NewSymbolLiteral("iterator_symbol", kNoSourcePosition);
Expression* prop =
- factory->NewProperty(iterable, iterator_symbol_literal, pos);
- Zone* zone = delegate()->zone();
- ZoneList<Expression*>* args = new (zone) ZoneList<Expression*>(0, zone);
- return factory->NewCall(prop, args, pos);
-}
-
-Literal* ParserBaseTraits<Parser>::GetLiteralTheHole(
- int position, AstNodeFactory* factory) const {
- return factory->NewTheHoleLiteral(kNoSourcePosition);
+ factory()->NewProperty(iterable, iterator_symbol_literal, pos);
+ ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(0, zone());
+ return factory()->NewCall(prop, args, pos);
}
void Parser::MarkTailPosition(Expression* expression) {
expression->MarkTail();
}
+Expression* Parser::NewV8Intrinsic(const AstRawString* name,
+ ZoneList<Expression*>* args, int pos,
+ bool* ok) {
+ if (extension_ != nullptr) {
+ // The extension structures are only accessible while parsing the
+ // very first time, not when reparsing because of lazy compilation.
+ GetClosureScope()->ForceEagerCompilation();
+ }
+
+ DCHECK(name->is_one_byte());
+ const Runtime::Function* function =
+ Runtime::FunctionForName(name->raw_data(), name->length());
+
+ if (function != nullptr) {
+ // Check for possible name clash.
+ DCHECK_EQ(Context::kNotFound,
+ Context::IntrinsicIndexForName(name->raw_data(), name->length()));
+ // Check for built-in IS_VAR macro.
+ if (function->function_id == Runtime::kIS_VAR) {
+ DCHECK_EQ(Runtime::RUNTIME, function->intrinsic_type);
+ // %IS_VAR(x) evaluates to x if x is a variable,
+ // leads to a parse error otherwise. Could be implemented as an
+ // inline function %_IS_VAR(x) to eliminate this special case.
+ if (args->length() == 1 && args->at(0)->AsVariableProxy() != nullptr) {
+ return args->at(0);
+ } else {
+ ReportMessage(MessageTemplate::kNotIsvar);
+ *ok = false;
+ return nullptr;
+ }
+ }
+
+ // Check that the expected number of arguments are being passed.
+ if (function->nargs != -1 && function->nargs != args->length()) {
+ ReportMessage(MessageTemplate::kRuntimeWrongNumArgs);
+ *ok = false;
+ return nullptr;
+ }
+
+ return factory()->NewCallRuntime(function, args, pos);
+ }
+
+ int context_index =
+ Context::IntrinsicIndexForName(name->raw_data(), name->length());
+
+ // Check that the function is defined.
+ if (context_index == Context::kNotFound) {
+ ReportMessage(MessageTemplate::kNotDefined, name);
+ *ok = false;
+ return nullptr;
+ }
+
+ return factory()->NewCallRuntime(context_index, args, pos);
+}
+
Parser::Parser(ParseInfo* info)
: ParserBase<Parser>(info->zone(), &scanner_, info->stack_limit(),
info->extension(), info->ast_value_factory(), NULL),
@@ -699,7 +646,8 @@ Parser::Parser(ParseInfo* info)
// ParseInfo during background parsing.
DCHECK(!info->script().is_null() || info->source_stream() != nullptr ||
info->character_stream() != nullptr);
- set_allow_lazy(info->allow_lazy_parsing());
+ set_allow_lazy(FLAG_lazy && info->allow_lazy_parsing() &&
+ !info->is_native() && info->extension() == nullptr);
set_allow_natives(FLAG_allow_natives_syntax || info->is_native());
set_allow_tailcalls(FLAG_harmony_tailcalls && !info->is_native() &&
info->isolate()->is_tail_call_elimination_enabled());
@@ -711,6 +659,7 @@ Parser::Parser(ParseInfo* info)
set_allow_harmony_async_await(FLAG_harmony_async_await);
set_allow_harmony_restrictive_generators(FLAG_harmony_restrictive_generators);
set_allow_harmony_trailing_commas(FLAG_harmony_trailing_commas);
+ set_allow_harmony_class_fields(FLAG_harmony_class_fields);
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
use_counts_[feature] = 0;
@@ -725,29 +674,19 @@ Parser::Parser(ParseInfo* info)
}
void Parser::DeserializeScopeChain(
- ParseInfo* info, Handle<Context> context,
- Scope::DeserializationMode deserialization_mode) {
+ ParseInfo* info, MaybeHandle<ScopeInfo> maybe_outer_scope_info) {
DCHECK(ThreadId::Current().Equals(info->isolate()->thread_id()));
// TODO(wingo): Add an outer SCRIPT_SCOPE corresponding to the native
// context, which will have the "this" binding for script scopes.
DeclarationScope* script_scope = NewScriptScope();
info->set_script_scope(script_scope);
Scope* scope = script_scope;
- if (!context.is_null() && !context->IsNativeContext()) {
- scope = Scope::DeserializeScopeChain(info->isolate(), zone(), *context,
- script_scope, ast_value_factory(),
- deserialization_mode);
- if (info->context().is_null()) {
- DCHECK(deserialization_mode ==
- Scope::DeserializationMode::kDeserializeOffHeap);
- } else {
- // The Scope is backed up by ScopeInfo (which is in the V8 heap); this
- // means the Parser cannot operate independent of the V8 heap. Tell the
- // string table to internalize strings and values right after they're
- // created. This kind of parsing can only be done in the main thread.
- DCHECK(parsing_on_main_thread_);
- ast_value_factory()->Internalize(info->isolate());
- }
+ Handle<ScopeInfo> outer_scope_info;
+ if (maybe_outer_scope_info.ToHandle(&outer_scope_info)) {
+ scope = Scope::DeserializeScopeChain(
+ info->isolate(), zone(), *outer_scope_info, script_scope,
+ ast_value_factory(), Scope::DeserializationMode::kScopesOnly);
+ DCHECK(!info->is_module() || scope->is_module_scope());
}
original_scope_ = scope;
}
@@ -762,8 +701,7 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
HistogramTimerScope timer_scope(isolate->counters()->parse(), true);
RuntimeCallTimerScope runtime_timer(isolate, &RuntimeCallStats::Parse);
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate, &tracing::TraceEventStatsTable::Parse);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.Parse");
Handle<String> source(String::cast(info->script()->source()));
isolate->counters()->total_parse_size()->Increment(source->length());
base::ElapsedTimer timer;
@@ -781,24 +719,13 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
cached_parse_data_->Initialize();
}
- DeserializeScopeChain(info, info->context(),
- Scope::DeserializationMode::kKeepScopeInfo);
+ DeserializeScopeChain(info, info->maybe_outer_scope_info());
source = String::Flatten(source);
FunctionLiteral* result;
{
- std::unique_ptr<Utf16CharacterStream> stream;
- if (source->IsExternalTwoByteString()) {
- stream.reset(new ExternalTwoByteStringUtf16CharacterStream(
- Handle<ExternalTwoByteString>::cast(source), 0, source->length()));
- } else if (source->IsExternalOneByteString()) {
- stream.reset(new ExternalOneByteStringUtf16CharacterStream(
- Handle<ExternalOneByteString>::cast(source), 0, source->length()));
- } else {
- stream.reset(
- new GenericStringUtf16CharacterStream(source, 0, source->length()));
- }
+ std::unique_ptr<Utf16CharacterStream> stream(ScannerStream::For(source));
scanner_.Initialize(stream.get());
result = DoParseProgram(info);
}
@@ -835,27 +762,25 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
DCHECK_NULL(scope_state_);
DCHECK_NULL(target_stack_);
- Mode parsing_mode = FLAG_lazy && allow_lazy() ? PARSE_LAZILY : PARSE_EAGERLY;
- if (allow_natives() || extension_ != NULL) parsing_mode = PARSE_EAGERLY;
+ Mode parsing_mode = allow_lazy() ? PARSE_LAZILY : PARSE_EAGERLY;
FunctionLiteral* result = NULL;
{
Scope* outer = original_scope_;
- // If there's a chance that there's a reference to global 'this', predeclare
- // it as a dynamic global on the script scope.
- if (outer->GetReceiverScope()->is_script_scope()) {
- info->script_scope()->DeclareDynamicGlobal(
- ast_value_factory()->this_string(), Variable::THIS);
- }
- DCHECK(outer);
+ DCHECK_NOT_NULL(outer);
+ parsing_module_ = info->is_module();
if (info->is_eval()) {
if (!outer->is_script_scope() || is_strict(info->language_mode())) {
parsing_mode = PARSE_EAGERLY;
}
outer = NewEvalScope(outer);
- } else if (info->is_module()) {
+ } else if (parsing_module_) {
DCHECK_EQ(outer, info->script_scope());
outer = NewModuleScope(info->script_scope());
+ // Never do lazy parsing in modules. If we want to support this in the
+ // future, we must force context-allocation for all variables that are
+ // declared at the module level but not MODULE-allocated.
+ parsing_mode = PARSE_EAGERLY;
}
DeclarationScope* scope = outer->AsDeclarationScope();
@@ -864,14 +789,29 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
// Enter 'scope' with the given parsing mode.
ParsingModeScope parsing_mode_scope(this, parsing_mode);
- FunctionState function_state(&function_state_, &scope_state_, scope,
- kNormalFunction);
+ FunctionState function_state(&function_state_, &scope_state_, scope);
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
bool ok = true;
int beg_pos = scanner()->location().beg_pos;
- parsing_module_ = info->is_module();
if (parsing_module_) {
+ // Declare the special module parameter.
+ auto name = ast_value_factory()->empty_string();
+ bool is_duplicate;
+ bool is_rest = false;
+ bool is_optional = false;
+ auto var = scope->DeclareParameter(name, VAR, is_optional, is_rest,
+ &is_duplicate, ast_value_factory());
+ DCHECK(!is_duplicate);
+ var->AllocateTo(VariableLocation::PARAMETER, 0);
+
+ PrepareGeneratorVariables(&function_state);
+ Expression* initial_yield =
+ BuildInitialYield(kNoSourcePosition, kGeneratorFunction);
+ body->Add(
+ factory()->NewExpressionStatement(initial_yield, kNoSourcePosition),
+ zone());
+
ParseModuleItemList(body, &ok);
ok = ok &&
module()->Validate(this->scope()->AsModuleScope(),
@@ -889,7 +829,7 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
if (ok && is_strict(language_mode())) {
CheckStrictOctalLiteral(beg_pos, scanner()->location().end_pos, &ok);
- CheckDecimalLiteralWithLeadingZero(use_counts_, beg_pos,
+ CheckDecimalLiteralWithLeadingZero(beg_pos,
scanner()->location().end_pos);
}
if (ok && is_sloppy(language_mode())) {
@@ -897,7 +837,7 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
// pre-existing bindings should be made writable, enumerable and
// nonconfigurable if possible, whereas this code will leave attributes
// unchanged if the property already exists.
- InsertSloppyBlockFunctionVarBindings(scope, nullptr, &ok);
+ InsertSloppyBlockFunctionVarBindings(scope);
}
if (ok) {
CheckConflictingVarDeclarations(scope, &ok);
@@ -915,9 +855,10 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
if (ok) {
RewriteDestructuringAssignments();
+ int parameter_count = parsing_module_ ? 1 : 0;
result = factory()->NewScriptOrEvalFunctionLiteral(
scope, body, function_state.materialized_literal_count(),
- function_state.expected_property_count());
+ function_state.expected_property_count(), parameter_count);
}
}
@@ -934,8 +875,7 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info) {
DCHECK(parsing_on_main_thread_);
RuntimeCallTimerScope runtime_timer(isolate, &RuntimeCallStats::ParseLazy);
HistogramTimerScope timer_scope(isolate->counters()->parse_lazy());
- TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
- isolate, &tracing::TraceEventStatsTable::ParseLazy);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.ParseLazy");
Handle<String> source(String::cast(info->script()->source()));
isolate->counters()->total_parse_size()->Increment(source->length());
base::ElapsedTimer timer;
@@ -943,26 +883,14 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info) {
timer.Start();
}
Handle<SharedFunctionInfo> shared_info = info->shared_info();
- DeserializeScopeChain(info, info->context(),
- Scope::DeserializationMode::kKeepScopeInfo);
+ DeserializeScopeChain(info, info->maybe_outer_scope_info());
// Initialize parser state.
source = String::Flatten(source);
FunctionLiteral* result;
{
- std::unique_ptr<Utf16CharacterStream> stream;
- if (source->IsExternalTwoByteString()) {
- stream.reset(new ExternalTwoByteStringUtf16CharacterStream(
- Handle<ExternalTwoByteString>::cast(source),
- shared_info->start_position(), shared_info->end_position()));
- } else if (source->IsExternalOneByteString()) {
- stream.reset(new ExternalOneByteStringUtf16CharacterStream(
- Handle<ExternalOneByteString>::cast(source),
- shared_info->start_position(), shared_info->end_position()));
- } else {
- stream.reset(new GenericStringUtf16CharacterStream(
- source, shared_info->start_position(), shared_info->end_position()));
- }
+ std::unique_ptr<Utf16CharacterStream> stream(ScannerStream::For(
+ source, shared_info->start_position(), shared_info->end_position()));
Handle<String> name(String::cast(shared_info->name()));
result =
DoParseLazy(info, ast_value_factory()->GetString(name), stream.get());
@@ -974,6 +902,8 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info) {
if (FLAG_trace_parse && result != NULL) {
double ms = timer.Elapsed().InMillisecondsF();
+ // We need to make sure that the debug-name is available.
+ ast_value_factory()->Internalize(isolate);
std::unique_ptr<char[]> name_chars = result->debug_name()->ToCString();
PrintF("[parsing function: %s - took %0.3f ms]\n", name_chars.get(), ms);
}
@@ -1010,24 +940,20 @@ FunctionLiteral* Parser::DoParseLazy(ParseInfo* info,
{
// Parse the function literal.
- Scope* scope = original_scope_;
- DCHECK(scope);
- // If there's a chance that there's a reference to global 'this', predeclare
- // it as a dynamic global on the script scope.
- if (info->is_arrow() && scope->GetReceiverScope()->is_script_scope()) {
- info->script_scope()->DeclareDynamicGlobal(
- ast_value_factory()->this_string(), Variable::THIS);
- }
- FunctionState function_state(&function_state_, &scope_state_, scope,
- info->function_kind());
- DCHECK(is_sloppy(scope->language_mode()) ||
+ Scope* outer = original_scope_;
+ DeclarationScope* outer_function = outer->GetClosureScope();
+ DCHECK(outer);
+ FunctionState function_state(&function_state_, &scope_state_,
+ outer_function);
+ BlockState block_state(&scope_state_, outer);
+ DCHECK(is_sloppy(outer->language_mode()) ||
is_strict(info->language_mode()));
FunctionLiteral::FunctionType function_type = ComputeFunctionType(info);
+ FunctionKind kind = info->function_kind();
bool ok = true;
- if (info->is_arrow()) {
- bool is_async = allow_harmony_async_await() && info->is_async();
- if (is_async) {
+ if (IsArrowFunction(kind)) {
+ if (allow_harmony_async_await() && IsAsyncFunction(kind)) {
DCHECK(!scanner()->HasAnyLineTerminatorAfterNext());
if (!Check(Token::ASYNC)) {
CHECK(stack_overflow());
@@ -1040,7 +966,7 @@ FunctionLiteral* Parser::DoParseLazy(ParseInfo* info,
}
// TODO(adamk): We should construct this scope from the ScopeInfo.
- DeclarationScope* scope = NewFunctionScope(FunctionKind::kArrowFunction);
+ DeclarationScope* scope = NewFunctionScope(kind);
// These two bits only need to be explicitly set because we're
// not passing the ScopeInfo to the Scope constructor.
@@ -1062,15 +988,12 @@ FunctionLiteral* Parser::DoParseLazy(ParseInfo* info,
BlockState block_state(&scope_state_, scope);
if (Check(Token::LPAREN)) {
// '(' StrictFormalParameters ')'
- ParseFormalParameterList(&formals, &formals_classifier, &ok);
+ ParseFormalParameterList(&formals, &ok);
if (ok) ok = Check(Token::RPAREN);
} else {
// BindingIdentifier
- ParseFormalParameter(&formals, &formals_classifier, &ok);
- if (ok) {
- DeclareFormalParameter(formals.scope, formals.at(0),
- &formals_classifier);
- }
+ ParseFormalParameter(&formals, &ok);
+ if (ok) DeclareFormalParameter(formals.scope, formals.at(0));
}
}
@@ -1078,8 +1001,7 @@ FunctionLiteral* Parser::DoParseLazy(ParseInfo* info,
checkpoint.Restore(&formals.materialized_literals_count);
// Pass `accept_IN=true` to ParseArrowFunctionLiteral --- This should
// not be observable, or else the preparser would have failed.
- Expression* expression = ParseArrowFunctionLiteral(
- true, formals, is_async, formals_classifier, &ok);
+ Expression* expression = ParseArrowFunctionLiteral(true, formals, &ok);
if (ok) {
// Scanning must end at the same position that was recorded
// previously. If not, parsing has been interrupted due to a stack
@@ -1097,16 +1019,31 @@ FunctionLiteral* Parser::DoParseLazy(ParseInfo* info,
}
}
}
- } else if (info->is_default_constructor()) {
- DCHECK_EQ(this->scope(), scope);
+ } else if (IsDefaultConstructor(kind)) {
+ DCHECK_EQ(scope(), outer);
+ bool is_subclass_constructor = IsSubclassConstructor(kind);
result = DefaultConstructor(
- raw_name, IsSubclassConstructor(info->function_kind()),
+ raw_name, is_subclass_constructor, info->requires_class_field_init(),
info->start_position(), info->end_position(), info->language_mode());
+ if (!is_subclass_constructor && info->requires_class_field_init()) {
+ result = InsertClassFieldInitializer(result);
+ }
+ } else if (info->is_class_field_initializer()) {
+ Handle<SharedFunctionInfo> shared_info = info->shared_info();
+ DCHECK(!shared_info.is_null());
+ if (shared_info->length() == 0) {
+ result = ParseClassFieldForInitializer(
+ info->start_position() != info->end_position(), &ok);
+ } else {
+ result = SynthesizeClassFieldInitializer(shared_info->length());
+ }
} else {
- result = ParseFunctionLiteral(raw_name, Scanner::Location::invalid(),
- kSkipFunctionNameCheck,
- info->function_kind(), kNoSourcePosition,
- function_type, info->language_mode(), &ok);
+ result = ParseFunctionLiteral(
+ raw_name, Scanner::Location::invalid(), kSkipFunctionNameCheck, kind,
+ kNoSourcePosition, function_type, info->language_mode(), &ok);
+ if (info->requires_class_field_init()) {
+ result = InsertClassFieldInitializer(result);
+ }
}
// Make sure the results agree.
DCHECK(ok == (result != nullptr));
@@ -1117,131 +1054,6 @@ FunctionLiteral* Parser::DoParseLazy(ParseInfo* info,
return result;
}
-
-void Parser::ParseStatementList(ZoneList<Statement*>* body, int end_token,
- bool* ok) {
- // StatementList ::
- // (StatementListItem)* <end_token>
-
- // Allocate a target stack to use for this set of source
- // elements. This way, all scripts and functions get their own
- // target stack thus avoiding illegal breaks and continues across
- // functions.
- TargetScope scope(&this->target_stack_);
-
- DCHECK(body != NULL);
- bool directive_prologue = true; // Parsing directive prologue.
-
- while (peek() != end_token) {
- if (directive_prologue && peek() != Token::STRING) {
- directive_prologue = false;
- }
-
- Scanner::Location token_loc = scanner()->peek_location();
- Statement* stat = ParseStatementListItem(CHECK_OK_VOID);
- if (stat == NULL || stat->IsEmpty()) {
- directive_prologue = false; // End of directive prologue.
- continue;
- }
-
- if (directive_prologue) {
- // A shot at a directive.
- ExpressionStatement* e_stat;
- Literal* literal;
- // Still processing directive prologue?
- if ((e_stat = stat->AsExpressionStatement()) != NULL &&
- (literal = e_stat->expression()->AsLiteral()) != NULL &&
- literal->raw_value()->IsString()) {
- // Check "use strict" directive (ES5 14.1), "use asm" directive.
- bool use_strict_found =
- literal->raw_value()->AsString() ==
- ast_value_factory()->use_strict_string() &&
- token_loc.end_pos - token_loc.beg_pos ==
- ast_value_factory()->use_strict_string()->length() + 2;
- if (use_strict_found) {
- if (is_sloppy(language_mode())) {
- RaiseLanguageMode(STRICT);
- }
-
- if (!this->scope()->HasSimpleParameters()) {
- // TC39 deemed "use strict" directives to be an error when occurring
- // in the body of a function with non-simple parameter list, on
- // 29/7/2015. https://goo.gl/ueA7Ln
- const AstRawString* string = literal->raw_value()->AsString();
- ReportMessageAt(token_loc,
- MessageTemplate::kIllegalLanguageModeDirective,
- string);
- *ok = false;
- return;
- }
- // Because declarations in strict eval code don't leak into the scope
- // of the eval call, it is likely that functions declared in strict
- // eval code will be used within the eval code, so lazy parsing is
- // probably not a win.
- if (this->scope()->is_eval_scope()) mode_ = PARSE_EAGERLY;
- } else if (literal->raw_value()->AsString() ==
- ast_value_factory()->use_asm_string() &&
- token_loc.end_pos - token_loc.beg_pos ==
- ast_value_factory()->use_asm_string()->length() + 2) {
- // Store the usage count; The actual use counter on the isolate is
- // incremented after parsing is done.
- ++use_counts_[v8::Isolate::kUseAsm];
- DCHECK(this->scope()->is_declaration_scope());
- this->scope()->AsDeclarationScope()->set_asm_module();
- } else {
- // Should not change mode, but will increment UseCounter
- // if appropriate. Ditto usages below.
- RaiseLanguageMode(SLOPPY);
- }
- } else {
- // End of the directive prologue.
- directive_prologue = false;
- RaiseLanguageMode(SLOPPY);
- }
- } else {
- RaiseLanguageMode(SLOPPY);
- }
-
- body->Add(stat, zone());
- }
-}
-
-
-Statement* Parser::ParseStatementListItem(bool* ok) {
- // (Ecma 262 6th Edition, 13.1):
- // StatementListItem:
- // Statement
- // Declaration
- const Token::Value peeked = peek();
- switch (peeked) {
- case Token::FUNCTION:
- return ParseHoistableDeclaration(NULL, false, ok);
- case Token::CLASS:
- Consume(Token::CLASS);
- return ParseClassDeclaration(NULL, false, ok);
- case Token::CONST:
- return ParseVariableStatement(kStatementListItem, NULL, ok);
- case Token::VAR:
- return ParseVariableStatement(kStatementListItem, NULL, ok);
- case Token::LET:
- if (IsNextLetKeyword()) {
- return ParseVariableStatement(kStatementListItem, NULL, ok);
- }
- break;
- case Token::ASYNC:
- if (allow_harmony_async_await() && PeekAhead() == Token::FUNCTION &&
- !scanner()->HasAnyLineTerminatorAfterNext()) {
- Consume(Token::ASYNC);
- return ParseAsyncFunctionDeclaration(NULL, false, ok);
- }
- /* falls through */
- default:
- break;
- }
- return ParseStatement(NULL, kAllowLabelledFunctionStatement, ok);
-}
-
-
Statement* Parser::ParseModuleItem(bool* ok) {
// ecma262/#prod-ModuleItem
// ModuleItem :
@@ -1285,7 +1097,7 @@ const AstRawString* Parser::ParseModuleSpecifier(bool* ok) {
// StringLiteral
Expect(Token::STRING, CHECK_OK);
- return GetSymbol(scanner());
+ return GetSymbol();
}
@@ -1413,7 +1225,7 @@ void Parser::ParseImportDeclaration(bool* ok) {
if (tok == Token::STRING) {
const AstRawString* module_specifier = ParseModuleSpecifier(CHECK_OK_VOID);
ExpectSemicolon(CHECK_OK_VOID);
- module()->AddEmptyImport(module_specifier, scanner()->location(), zone());
+ module()->AddEmptyImport(module_specifier);
return;
}
@@ -1481,7 +1293,7 @@ void Parser::ParseImportDeclaration(bool* ok) {
if (named_imports != nullptr) {
if (named_imports->length() == 0) {
- module()->AddEmptyImport(module_specifier, scanner()->location(), zone());
+ module()->AddEmptyImport(module_specifier);
} else {
for (int i = 0; i < named_imports->length(); ++i) {
const NamedImport* import = named_imports->at(i);
@@ -1526,9 +1338,8 @@ Statement* Parser::ParseExportDefault(bool* ok) {
default: {
int pos = position();
ExpressionClassifier classifier(this);
- Expression* value =
- ParseAssignmentExpression(true, &classifier, CHECK_OK);
- RewriteNonPattern(&classifier, CHECK_OK);
+ Expression* value = ParseAssignmentExpression(true, CHECK_OK);
+ RewriteNonPattern(CHECK_OK);
SetFunctionName(value, ast_value_factory()->default_string());
const AstRawString* local_name =
@@ -1621,8 +1432,7 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
export_locations[i], zone());
}
} else if (length == 0) {
- module()->AddEmptyImport(module_specifier, scanner()->location(),
- zone());
+ module()->AddEmptyImport(module_specifier);
} else {
for (int i = 0; i < length; ++i) {
module()->AddExport(original_names[i], export_names[i],
@@ -1673,141 +1483,8 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
return result;
}
-Statement* Parser::ParseStatement(ZoneList<const AstRawString*>* labels,
- AllowLabelledFunctionStatement allow_function,
- bool* ok) {
- // Statement ::
- // EmptyStatement
- // ...
-
- if (peek() == Token::SEMICOLON) {
- Next();
- return factory()->NewEmptyStatement(kNoSourcePosition);
- }
- return ParseSubStatement(labels, allow_function, ok);
-}
-
-Statement* Parser::ParseSubStatement(
- ZoneList<const AstRawString*>* labels,
- AllowLabelledFunctionStatement allow_function, bool* ok) {
- // Statement ::
- // Block
- // VariableStatement
- // EmptyStatement
- // ExpressionStatement
- // IfStatement
- // IterationStatement
- // ContinueStatement
- // BreakStatement
- // ReturnStatement
- // WithStatement
- // LabelledStatement
- // SwitchStatement
- // ThrowStatement
- // TryStatement
- // DebuggerStatement
-
- // Note: Since labels can only be used by 'break' and 'continue'
- // statements, which themselves are only valid within blocks,
- // iterations or 'switch' statements (i.e., BreakableStatements),
- // labels can be simply ignored in all other cases; except for
- // trivial labeled break statements 'label: break label' which is
- // parsed into an empty statement.
- switch (peek()) {
- case Token::LBRACE:
- return ParseBlock(labels, ok);
-
- case Token::SEMICOLON:
- Next();
- return factory()->NewEmptyStatement(kNoSourcePosition);
-
- case Token::IF:
- return ParseIfStatement(labels, ok);
-
- case Token::DO:
- return ParseDoWhileStatement(labels, ok);
-
- case Token::WHILE:
- return ParseWhileStatement(labels, ok);
-
- case Token::FOR:
- return ParseForStatement(labels, ok);
-
- case Token::CONTINUE:
- case Token::BREAK:
- case Token::RETURN:
- case Token::THROW:
- case Token::TRY: {
- // These statements must have their labels preserved in an enclosing
- // block
- if (labels == NULL) {
- return ParseStatementAsUnlabelled(labels, ok);
- } else {
- Block* result =
- factory()->NewBlock(labels, 1, false, kNoSourcePosition);
- Target target(&this->target_stack_, result);
- Statement* statement = ParseStatementAsUnlabelled(labels, CHECK_OK);
- if (result) result->statements()->Add(statement, zone());
- return result;
- }
- }
-
- case Token::WITH:
- return ParseWithStatement(labels, ok);
-
- case Token::SWITCH:
- return ParseSwitchStatement(labels, ok);
-
- case Token::FUNCTION:
- // FunctionDeclaration only allowed as a StatementListItem, not in
- // an arbitrary Statement position. Exceptions such as
- // ES#sec-functiondeclarations-in-ifstatement-statement-clauses
- // are handled by calling ParseScopedStatement rather than
- // ParseSubStatement directly.
- ReportMessageAt(scanner()->peek_location(),
- is_strict(language_mode())
- ? MessageTemplate::kStrictFunction
- : MessageTemplate::kSloppyFunction);
- *ok = false;
- return nullptr;
-
- case Token::DEBUGGER:
- return ParseDebuggerStatement(ok);
-
- case Token::VAR:
- return ParseVariableStatement(kStatement, NULL, ok);
-
- default:
- return ParseExpressionOrLabelledStatement(labels, allow_function, ok);
- }
-}
-
-Statement* Parser::ParseStatementAsUnlabelled(
- ZoneList<const AstRawString*>* labels, bool* ok) {
- switch (peek()) {
- case Token::CONTINUE:
- return ParseContinueStatement(ok);
-
- case Token::BREAK:
- return ParseBreakStatement(labels, ok);
-
- case Token::RETURN:
- return ParseReturnStatement(ok);
-
- case Token::THROW:
- return ParseThrowStatement(ok);
-
- case Token::TRY:
- return ParseTryStatement(ok);
-
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
VariableProxy* Parser::NewUnresolved(const AstRawString* name, int begin_pos,
- int end_pos, Variable::Kind kind) {
+ int end_pos, VariableKind kind) {
return scope()->NewUnresolved(factory(), name, begin_pos, end_pos, kind);
}
@@ -1816,25 +1493,19 @@ VariableProxy* Parser::NewUnresolved(const AstRawString* name) {
scanner()->location().end_pos);
}
-InitializationFlag Parser::DefaultInitializationFlag(VariableMode mode) {
- DCHECK(IsDeclaredVariableMode(mode));
- return mode == VAR ? kCreatedInitialized : kNeedsInitialization;
-}
-
Declaration* Parser::DeclareVariable(const AstRawString* name,
VariableMode mode, int pos, bool* ok) {
- return DeclareVariable(name, mode, DefaultInitializationFlag(mode), pos, ok);
+ return DeclareVariable(name, mode, Variable::DefaultInitializationFlag(mode),
+ pos, ok);
}
Declaration* Parser::DeclareVariable(const AstRawString* name,
VariableMode mode, InitializationFlag init,
int pos, bool* ok) {
DCHECK_NOT_NULL(name);
- Scope* scope =
- IsLexicalVariableMode(mode) ? this->scope() : GetDeclarationScope();
- VariableProxy* proxy =
- scope->NewUnresolved(factory(), name, scanner()->location().beg_pos,
- scanner()->location().end_pos);
+ VariableProxy* proxy = factory()->NewVariableProxy(
+ name, NORMAL_VARIABLE, scanner()->location().beg_pos,
+ scanner()->location().end_pos);
Declaration* declaration =
factory()->NewVariableDeclaration(proxy, this->scope(), pos);
Declare(declaration, DeclarationDescriptor::NORMAL, mode, init, CHECK_OK);
@@ -1845,229 +1516,67 @@ Variable* Parser::Declare(Declaration* declaration,
DeclarationDescriptor::Kind declaration_kind,
VariableMode mode, InitializationFlag init, bool* ok,
Scope* scope) {
- DCHECK(IsDeclaredVariableMode(mode) && mode != CONST_LEGACY);
-
- VariableProxy* proxy = declaration->proxy();
- DCHECK(proxy->raw_name() != NULL);
- const AstRawString* name = proxy->raw_name();
-
- if (scope == nullptr) scope = this->scope();
- if (mode == VAR) scope = scope->GetDeclarationScope();
- DCHECK(!scope->is_catch_scope());
- DCHECK(!scope->is_with_scope());
- DCHECK(scope->is_declaration_scope() ||
- (IsLexicalVariableMode(mode) && scope->is_block_scope()));
-
- bool is_function_declaration = declaration->IsFunctionDeclaration();
-
- Variable* var = NULL;
- if (scope->is_eval_scope() && is_sloppy(scope->language_mode()) &&
- mode == VAR) {
- // In a var binding in a sloppy direct eval, pollute the enclosing scope
- // with this new binding by doing the following:
- // The proxy is bound to a lookup variable to force a dynamic declaration
- // using the DeclareEvalVar or DeclareEvalFunction runtime functions.
- Variable::Kind kind = Variable::NORMAL;
- // TODO(sigurds) figure out if kNotAssigned is OK here
- var = new (zone()) Variable(scope, name, mode, kind, init, kNotAssigned);
- var->AllocateTo(VariableLocation::LOOKUP, -1);
- } else {
- // Declare the variable in the declaration scope.
- var = scope->LookupLocal(name);
- if (var == NULL) {
- // Declare the name.
- Variable::Kind kind = Variable::NORMAL;
- if (is_function_declaration) {
- kind = Variable::FUNCTION;
- }
- var = scope->DeclareLocal(name, mode, init, kind, kNotAssigned);
- } else if (IsLexicalVariableMode(mode) ||
- IsLexicalVariableMode(var->mode())) {
- // Allow duplicate function decls for web compat, see bug 4693.
- bool duplicate_allowed = false;
- if (is_sloppy(scope->language_mode()) && is_function_declaration &&
- var->is_function()) {
- DCHECK(IsLexicalVariableMode(mode) &&
- IsLexicalVariableMode(var->mode()));
- // If the duplication is allowed, then the var will show up
- // in the SloppyBlockFunctionMap and the new FunctionKind
- // will be a permitted duplicate.
- FunctionKind function_kind =
- declaration->AsFunctionDeclaration()->fun()->kind();
- duplicate_allowed =
- scope->GetDeclarationScope()->sloppy_block_function_map()->Lookup(
- const_cast<AstRawString*>(name), name->hash()) != nullptr &&
- !IsAsyncFunction(function_kind) &&
- !(allow_harmony_restrictive_generators() &&
- IsGeneratorFunction(function_kind));
- }
- if (duplicate_allowed) {
- ++use_counts_[v8::Isolate::kSloppyModeBlockScopedFunctionRedefinition];
- } else {
- // The name was declared in this scope before; check for conflicting
- // re-declarations. We have a conflict if either of the declarations
- // is not a var (in script scope, we also have to ignore legacy const
- // for compatibility). There is similar code in runtime.cc in the
- // Declare functions. The function CheckConflictingVarDeclarations
- // checks for var and let bindings from different scopes whereas this
- // is a check for conflicting declarations within the same scope. This
- // check also covers the special case
- //
- // function () { let x; { var x; } }
- //
- // because the var declaration is hoisted to the function scope where
- // 'x' is already bound.
- DCHECK(IsDeclaredVariableMode(var->mode()));
- // In harmony we treat re-declarations as early errors. See
- // ES5 16 for a definition of early errors.
- if (declaration_kind == DeclarationDescriptor::NORMAL) {
- ReportMessage(MessageTemplate::kVarRedeclaration, name);
- } else {
- ReportMessage(MessageTemplate::kParamDupe);
- }
- *ok = false;
- return nullptr;
- }
- } else if (mode == VAR) {
- var->set_maybe_assigned();
+ if (scope == nullptr) {
+ scope = this->scope();
+ }
+ bool sloppy_mode_block_scope_function_redefinition = false;
+ Variable* variable = scope->DeclareVariable(
+ declaration, mode, init, allow_harmony_restrictive_generators(),
+ &sloppy_mode_block_scope_function_redefinition, ok);
+ if (!*ok) {
+ if (declaration_kind == DeclarationDescriptor::NORMAL) {
+ ReportMessage(MessageTemplate::kVarRedeclaration,
+ declaration->proxy()->raw_name());
+ } else {
+ ReportMessage(MessageTemplate::kParamDupe);
}
+ return nullptr;
}
- DCHECK_NOT_NULL(var);
-
- // We add a declaration node for every declaration. The compiler
- // will only generate code if necessary. In particular, declarations
- // for inner local variables that do not represent functions won't
- // result in any generated code.
- //
- // This will lead to multiple declaration nodes for the
- // same variable if it is declared several times. This is not a
- // semantic issue, but it may be a performance issue since it may
- // lead to repeated DeclareEvalVar or DeclareEvalFunction calls.
- scope->AddDeclaration(declaration);
- proxy->BindTo(var);
- return var;
-}
-
-
-// Language extension which is only enabled for source files loaded
-// through the API's extension mechanism. A native function
-// declaration is resolved by looking up the function through a
-// callback provided by the extension.
-Statement* Parser::ParseNativeDeclaration(bool* ok) {
- int pos = peek_position();
- Expect(Token::FUNCTION, CHECK_OK);
- // Allow "eval" or "arguments" for backward compatibility.
- const AstRawString* name =
- ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
- bool done = (peek() == Token::RPAREN);
- while (!done) {
- ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
- done = (peek() == Token::RPAREN);
- if (!done) {
- Expect(Token::COMMA, CHECK_OK);
- }
+ if (sloppy_mode_block_scope_function_redefinition) {
+ ++use_counts_[v8::Isolate::kSloppyModeBlockScopedFunctionRedefinition];
}
- Expect(Token::RPAREN, CHECK_OK);
- Expect(Token::SEMICOLON, CHECK_OK);
-
- // Make sure that the function containing the native declaration
- // isn't lazily compiled. The extension structures are only
- // accessible while parsing the first time not when reparsing
- // because of lazy compilation.
- GetClosureScope()->ForceEagerCompilation();
-
- // TODO(1240846): It's weird that native function declarations are
- // introduced dynamically when we meet their declarations, whereas
- // other functions are set up when entering the surrounding scope.
- Declaration* decl = DeclareVariable(name, VAR, pos, CHECK_OK);
- NativeFunctionLiteral* lit =
- factory()->NewNativeFunctionLiteral(name, extension_, kNoSourcePosition);
- return factory()->NewExpressionStatement(
- factory()->NewAssignment(Token::INIT, decl->proxy(), lit,
- kNoSourcePosition),
- pos);
+ return variable;
}
-Statement* Parser::ParseHoistableDeclaration(
- ZoneList<const AstRawString*>* names, bool default_export, bool* ok) {
- Expect(Token::FUNCTION, CHECK_OK);
- int pos = position();
- ParseFunctionFlags flags = ParseFunctionFlags::kIsNormal;
- if (Check(Token::MUL)) {
- flags |= ParseFunctionFlags::kIsGenerator;
+Block* Parser::BuildInitializationBlock(
+ DeclarationParsingResult* parsing_result,
+ ZoneList<const AstRawString*>* names, bool* ok) {
+ Block* result = factory()->NewBlock(
+ NULL, 1, true, parsing_result->descriptor.declaration_pos);
+ for (auto declaration : parsing_result->declarations) {
+ PatternRewriter::DeclareAndInitializeVariables(
+ this, result, &(parsing_result->descriptor), &declaration, names,
+ CHECK_OK);
}
- return ParseHoistableDeclaration(pos, flags, names, default_export, ok);
+ return result;
}
-Statement* Parser::ParseAsyncFunctionDeclaration(
- ZoneList<const AstRawString*>* names, bool default_export, bool* ok) {
- DCHECK_EQ(scanner()->current_token(), Token::ASYNC);
- int pos = position();
- if (scanner()->HasAnyLineTerminatorBeforeNext()) {
- *ok = false;
- ReportUnexpectedToken(scanner()->current_token());
- return nullptr;
- }
- Expect(Token::FUNCTION, CHECK_OK);
- ParseFunctionFlags flags = ParseFunctionFlags::kIsAsync;
- return ParseHoistableDeclaration(pos, flags, names, default_export, ok);
+void Parser::DeclareAndInitializeVariables(
+ Block* block, const DeclarationDescriptor* declaration_descriptor,
+ const DeclarationParsingResult::Declaration* declaration,
+ ZoneList<const AstRawString*>* names, bool* ok) {
+ DCHECK_NOT_NULL(block);
+ PatternRewriter::DeclareAndInitializeVariables(
+ this, block, declaration_descriptor, declaration, names, ok);
}
-Statement* Parser::ParseHoistableDeclaration(
- int pos, ParseFunctionFlags flags, ZoneList<const AstRawString*>* names,
- bool default_export, bool* ok) {
- // FunctionDeclaration ::
- // 'function' Identifier '(' FormalParameters ')' '{' FunctionBody '}'
- // 'function' '(' FormalParameters ')' '{' FunctionBody '}'
- // GeneratorDeclaration ::
- // 'function' '*' Identifier '(' FormalParameters ')' '{' FunctionBody '}'
- // 'function' '*' '(' FormalParameters ')' '{' FunctionBody '}'
- //
- // The anonymous forms are allowed iff [default_export] is true.
- //
- // 'function' and '*' (if present) have been consumed by the caller.
-
- const bool is_generator = flags & ParseFunctionFlags::kIsGenerator;
- const bool is_async = flags & ParseFunctionFlags::kIsAsync;
- DCHECK(!is_generator || !is_async);
-
- const AstRawString* name;
- FunctionNameValidity name_validity;
- const AstRawString* variable_name;
- if (default_export && peek() == Token::LPAREN) {
- name = ast_value_factory()->default_string();
- name_validity = kSkipFunctionNameCheck;
- variable_name = ast_value_factory()->star_default_star_string();
- } else {
- bool is_strict_reserved;
- name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
- name_validity = is_strict_reserved ? kFunctionNameIsStrictReserved
- : kFunctionNameValidityUnknown;
- variable_name = name;
- }
-
- FuncNameInferrer::State fni_state(fni_);
- if (fni_ != NULL) fni_->PushEnclosingName(name);
- FunctionLiteral* fun = ParseFunctionLiteral(
- name, scanner()->location(), name_validity,
- is_generator ? FunctionKind::kGeneratorFunction
- : is_async ? FunctionKind::kAsyncFunction
- : FunctionKind::kNormalFunction,
- pos, FunctionLiteral::kDeclaration, language_mode(), CHECK_OK);
-
+Statement* Parser::DeclareFunction(const AstRawString* variable_name,
+ FunctionLiteral* function, int pos,
+ bool is_generator, bool is_async,
+ ZoneList<const AstRawString*>* names,
+ bool* ok) {
// In ES6, a function behaves as a lexical binding, except in
// a script scope, or the initial scope of eval or another function.
VariableMode mode =
(!scope()->is_declaration_scope() || scope()->is_module_scope()) ? LET
: VAR;
- VariableProxy* proxy = NewUnresolved(variable_name);
+ VariableProxy* proxy =
+ factory()->NewVariableProxy(variable_name, NORMAL_VARIABLE);
Declaration* declaration =
- factory()->NewFunctionDeclaration(proxy, fun, scope(), pos);
+ factory()->NewFunctionDeclaration(proxy, function, scope(), pos);
Declare(declaration, DeclarationDescriptor::NORMAL, mode, kCreatedInitialized,
CHECK_OK);
if (names) names->Add(variable_name, zone());
- EmptyStatement* empty = factory()->NewEmptyStatement(kNoSourcePosition);
// Async functions don't undergo sloppy mode block scoped hoisting, and don't
// allow duplicates in a block. Both are represented by the
// sloppy_block_function_map. Don't add them to the map for async functions.
@@ -2076,647 +1585,144 @@ Statement* Parser::ParseHoistableDeclaration(
if (is_sloppy(language_mode()) && !scope()->is_declaration_scope() &&
!is_async && !(allow_harmony_restrictive_generators() && is_generator)) {
SloppyBlockFunctionStatement* delegate =
- factory()->NewSloppyBlockFunctionStatement(empty, scope());
+ factory()->NewSloppyBlockFunctionStatement(scope());
DeclarationScope* target_scope = GetDeclarationScope();
target_scope->DeclareSloppyBlockFunction(variable_name, delegate);
return delegate;
}
- return empty;
+ return factory()->NewEmptyStatement(kNoSourcePosition);
}
-Statement* Parser::ParseClassDeclaration(ZoneList<const AstRawString*>* names,
- bool default_export, bool* ok) {
- // ClassDeclaration ::
- // 'class' Identifier ('extends' LeftHandExpression)? '{' ClassBody '}'
- // 'class' ('extends' LeftHandExpression)? '{' ClassBody '}'
- //
- // The anonymous form is allowed iff [default_export] is true.
- //
- // 'class' is expected to be consumed by the caller.
- //
- // A ClassDeclaration
- //
- // class C { ... }
- //
- // has the same semantics as:
- //
- // let C = class C { ... };
- //
- // so rewrite it as such.
-
- int pos = position();
-
- const AstRawString* name;
- bool is_strict_reserved;
- const AstRawString* variable_name;
- if (default_export && (peek() == Token::EXTENDS || peek() == Token::LBRACE)) {
- name = ast_value_factory()->default_string();
- is_strict_reserved = false;
- variable_name = ast_value_factory()->star_default_star_string();
- } else {
- name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
- variable_name = name;
- }
-
- Expression* value = ParseClassLiteral(nullptr, name, scanner()->location(),
- is_strict_reserved, pos, CHECK_OK);
-
- Declaration* decl = DeclareVariable(variable_name, LET, pos, CHECK_OK);
- decl->proxy()->var()->set_initializer_position(position());
- Assignment* assignment =
- factory()->NewAssignment(Token::INIT, decl->proxy(), value, pos);
+Statement* Parser::DeclareClass(const AstRawString* variable_name,
+ Expression* value,
+ ZoneList<const AstRawString*>* names,
+ int class_token_pos, int end_pos, bool* ok) {
+ Declaration* decl =
+ DeclareVariable(variable_name, LET, class_token_pos, CHECK_OK);
+ decl->proxy()->var()->set_initializer_position(end_pos);
+ Assignment* assignment = factory()->NewAssignment(Token::INIT, decl->proxy(),
+ value, class_token_pos);
Statement* assignment_statement =
factory()->NewExpressionStatement(assignment, kNoSourcePosition);
if (names) names->Add(variable_name, zone());
return assignment_statement;
}
-Block* Parser::ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok) {
- // The harmony mode uses block elements instead of statements.
- //
- // Block ::
- // '{' StatementList '}'
-
- // Construct block expecting 16 statements.
- Block* body = factory()->NewBlock(labels, 16, false, kNoSourcePosition);
-
- // Parse the statements and collect escaping labels.
- Expect(Token::LBRACE, CHECK_OK);
- {
- BlockState block_state(&scope_state_);
- block_state.set_start_position(scanner()->location().beg_pos);
- Target target(&this->target_stack_, body);
-
- while (peek() != Token::RBRACE) {
- Statement* stat = ParseStatementListItem(CHECK_OK);
- if (stat && !stat->IsEmpty()) {
- body->statements()->Add(stat, zone());
- }
- }
+Statement* Parser::DeclareNative(const AstRawString* name, int pos, bool* ok) {
+ // Make sure that the function containing the native declaration
+ // isn't lazily compiled. The extension structures are only
+ // accessible while parsing the first time not when reparsing
+ // because of lazy compilation.
+ GetClosureScope()->ForceEagerCompilation();
- Expect(Token::RBRACE, CHECK_OK);
- block_state.set_end_position(scanner()->location().end_pos);
- body->set_scope(block_state.FinalizedBlockScope());
- }
- return body;
+ // TODO(1240846): It's weird that native function declarations are
+ // introduced dynamically when we meet their declarations, whereas
+ // other functions are set up when entering the surrounding scope.
+ Declaration* decl = DeclareVariable(name, VAR, pos, CHECK_OK);
+ NativeFunctionLiteral* lit =
+ factory()->NewNativeFunctionLiteral(name, extension_, kNoSourcePosition);
+ return factory()->NewExpressionStatement(
+ factory()->NewAssignment(Token::INIT, decl->proxy(), lit,
+ kNoSourcePosition),
+ pos);
}
-
-Block* Parser::DeclarationParsingResult::BuildInitializationBlock(
- ZoneList<const AstRawString*>* names, bool* ok) {
- Block* result = descriptor.parser->factory()->NewBlock(
- NULL, 1, true, descriptor.declaration_pos);
- for (auto declaration : declarations) {
- PatternRewriter::DeclareAndInitializeVariables(
- result, &descriptor, &declaration, names, CHECK_OK);
+ZoneList<const AstRawString*>* Parser::DeclareLabel(
+ ZoneList<const AstRawString*>* labels, VariableProxy* var, bool* ok) {
+ const AstRawString* label = var->raw_name();
+ // TODO(1240780): We don't check for redeclaration of labels
+ // during preparsing since keeping track of the set of active
+ // labels requires nontrivial changes to the way scopes are
+ // structured. However, these are probably changes we want to
+ // make later anyway so we should go back and fix this then.
+ if (ContainsLabel(labels, label) || TargetStackContainsLabel(label)) {
+ ReportMessage(MessageTemplate::kLabelRedeclaration, label);
+ *ok = false;
+ return nullptr;
}
- return result;
-}
-
-
-Block* Parser::ParseVariableStatement(VariableDeclarationContext var_context,
- ZoneList<const AstRawString*>* names,
- bool* ok) {
- // VariableStatement ::
- // VariableDeclarations ';'
-
- // The scope of a var declared variable anywhere inside a function
- // is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). Thus we can
- // transform a source-level var declaration into a (Function) Scope
- // declaration, and rewrite the source-level initialization into an assignment
- // statement. We use a block to collect multiple assignments.
- //
- // We mark the block as initializer block because we don't want the
- // rewriter to add a '.result' assignment to such a block (to get compliant
- // behavior for code such as print(eval('var x = 7')), and for cosmetic
- // reasons when pretty-printing. Also, unless an assignment (initialization)
- // is inside an initializer block, it is ignored.
-
- DeclarationParsingResult parsing_result;
- Block* result =
- ParseVariableDeclarations(var_context, &parsing_result, names, CHECK_OK);
- ExpectSemicolon(CHECK_OK);
- return result;
-}
-
-Block* Parser::ParseVariableDeclarations(
- VariableDeclarationContext var_context,
- DeclarationParsingResult* parsing_result,
- ZoneList<const AstRawString*>* names, bool* ok) {
- // VariableDeclarations ::
- // ('var' | 'const' | 'let') (Identifier ('=' AssignmentExpression)?)+[',']
- //
- // The ES6 Draft Rev3 specifies the following grammar for const declarations
- //
- // ConstDeclaration ::
- // const ConstBinding (',' ConstBinding)* ';'
- // ConstBinding ::
- // Identifier '=' AssignmentExpression
- //
- // TODO(ES6):
- // ConstBinding ::
- // BindingPattern '=' AssignmentExpression
-
- parsing_result->descriptor.parser = this;
- parsing_result->descriptor.declaration_kind = DeclarationDescriptor::NORMAL;
- parsing_result->descriptor.declaration_pos = peek_position();
- parsing_result->descriptor.initialization_pos = peek_position();
- parsing_result->descriptor.mode = VAR;
-
- Block* init_block = nullptr;
- if (var_context != kForStatement) {
- init_block = factory()->NewBlock(
- NULL, 1, true, parsing_result->descriptor.declaration_pos);
- }
-
- if (peek() == Token::VAR) {
- Consume(Token::VAR);
- } else if (peek() == Token::CONST) {
- Consume(Token::CONST);
- DCHECK(var_context != kStatement);
- parsing_result->descriptor.mode = CONST;
- } else if (peek() == Token::LET) {
- Consume(Token::LET);
- DCHECK(var_context != kStatement);
- parsing_result->descriptor.mode = LET;
- } else {
- UNREACHABLE(); // by current callers
+ if (labels == nullptr) {
+ labels = new (zone()) ZoneList<const AstRawString*>(1, zone());
}
-
- parsing_result->descriptor.scope = scope();
- parsing_result->descriptor.hoist_scope = nullptr;
-
-
- bool first_declaration = true;
- int bindings_start = peek_position();
- do {
- FuncNameInferrer::State fni_state(fni_);
-
- // Parse name.
- if (!first_declaration) Consume(Token::COMMA);
-
- Expression* pattern;
- int decl_pos = peek_position();
- {
- ExpressionClassifier pattern_classifier(this);
- pattern = ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
- ValidateBindingPattern(&pattern_classifier, CHECK_OK);
- if (IsLexicalVariableMode(parsing_result->descriptor.mode)) {
- ValidateLetPattern(&pattern_classifier, CHECK_OK);
- }
- }
-
- Scanner::Location variable_loc = scanner()->location();
- const AstRawString* single_name =
- pattern->IsVariableProxy() ? pattern->AsVariableProxy()->raw_name()
- : nullptr;
- if (single_name != nullptr) {
- if (fni_ != NULL) fni_->PushVariableName(single_name);
- }
-
- Expression* value = NULL;
- int initializer_position = kNoSourcePosition;
- if (Check(Token::ASSIGN)) {
- ExpressionClassifier classifier(this);
- value = ParseAssignmentExpression(var_context != kForStatement,
- &classifier, CHECK_OK);
- RewriteNonPattern(&classifier, CHECK_OK);
- variable_loc.end_pos = scanner()->location().end_pos;
-
- if (!parsing_result->first_initializer_loc.IsValid()) {
- parsing_result->first_initializer_loc = variable_loc;
- }
-
- // Don't infer if it is "a = function(){...}();"-like expression.
- if (single_name) {
- if (fni_ != NULL && value->AsCall() == NULL &&
- value->AsCallNew() == NULL) {
- fni_->Infer();
- } else {
- fni_->RemoveLastFunction();
- }
- }
-
- ParserBaseTraits<Parser>::SetFunctionNameFromIdentifierRef(value,
- pattern);
-
- // End position of the initializer is after the assignment expression.
- initializer_position = scanner()->location().end_pos;
- } else {
- // Initializers may be either required or implied unless this is a
- // for-in/of iteration variable.
- if (var_context != kForStatement || !PeekInOrOf()) {
- // ES6 'const' and binding patterns require initializers.
- if (parsing_result->descriptor.mode == CONST ||
- !pattern->IsVariableProxy()) {
- ReportMessageAt(
- Scanner::Location(decl_pos, scanner()->location().end_pos),
- MessageTemplate::kDeclarationMissingInitializer,
- !pattern->IsVariableProxy() ? "destructuring" : "const");
- *ok = false;
- return nullptr;
- }
-
- // 'let x' initializes 'x' to undefined.
- if (parsing_result->descriptor.mode == LET) {
- value = GetLiteralUndefined(position());
- }
- }
-
- // End position of the initializer is after the variable.
- initializer_position = position();
- }
-
- DeclarationParsingResult::Declaration decl(pattern, initializer_position,
- value);
- if (var_context == kForStatement) {
- // Save the declaration for further handling in ParseForStatement.
- parsing_result->declarations.Add(decl);
- } else {
- // Immediately declare the variable otherwise. This avoids O(N^2)
- // behavior (where N is the number of variables in a single
- // declaration) in the PatternRewriter having to do with removing
- // and adding VariableProxies to the Scope (see bug 4699).
- DCHECK_NOT_NULL(init_block);
- PatternRewriter::DeclareAndInitializeVariables(
- init_block, &parsing_result->descriptor, &decl, names, CHECK_OK);
- }
- first_declaration = false;
- } while (peek() == Token::COMMA);
-
- parsing_result->bindings_loc =
- Scanner::Location(bindings_start, scanner()->location().end_pos);
-
- DCHECK(*ok);
- return init_block;
+ labels->Add(label, zone());
+ // Remove the "ghost" variable that turned out to be a label
+ // from the top scope. This way, we don't try to resolve it
+ // during the scope processing.
+ scope()->RemoveUnresolved(var);
+ return labels;
}
-
-static bool ContainsLabel(ZoneList<const AstRawString*>* labels,
- const AstRawString* label) {
- DCHECK(label != NULL);
- if (labels != NULL) {
- for (int i = labels->length(); i-- > 0; ) {
- if (labels->at(i) == label) {
- return true;
- }
+bool Parser::ContainsLabel(ZoneList<const AstRawString*>* labels,
+ const AstRawString* label) {
+ DCHECK_NOT_NULL(label);
+ if (labels != nullptr) {
+ for (int i = labels->length(); i-- > 0;) {
+ if (labels->at(i) == label) return true;
}
}
return false;
}
-Statement* Parser::ParseFunctionDeclaration(bool* ok) {
- Consume(Token::FUNCTION);
- int pos = position();
- ParseFunctionFlags flags = ParseFunctionFlags::kIsNormal;
- if (Check(Token::MUL)) {
- flags |= ParseFunctionFlags::kIsGenerator;
- if (allow_harmony_restrictive_declarations()) {
- ReportMessageAt(scanner()->location(),
- MessageTemplate::kGeneratorInLegacyContext);
- *ok = false;
- return nullptr;
- }
- }
-
- return ParseHoistableDeclaration(pos, flags, nullptr, false, CHECK_OK);
-}
-
-Statement* Parser::ParseExpressionOrLabelledStatement(
- ZoneList<const AstRawString*>* labels,
- AllowLabelledFunctionStatement allow_function, bool* ok) {
- // ExpressionStatement | LabelledStatement ::
- // Expression ';'
- // Identifier ':' Statement
- //
- // ExpressionStatement[Yield] :
- // [lookahead ∉ {{, function, class, let [}] Expression[In, ?Yield] ;
-
- int pos = peek_position();
-
- switch (peek()) {
- case Token::FUNCTION:
- case Token::LBRACE:
- UNREACHABLE(); // Always handled by the callers.
- case Token::CLASS:
- ReportUnexpectedToken(Next());
- *ok = false;
- return nullptr;
- default:
- break;
- }
-
- bool starts_with_idenfifier = peek_any_identifier();
- Expression* expr = ParseExpression(true, CHECK_OK);
- if (peek() == Token::COLON && starts_with_idenfifier && expr != NULL &&
- expr->AsVariableProxy() != NULL &&
- !expr->AsVariableProxy()->is_this()) {
- // Expression is a single identifier, and not, e.g., a parenthesized
- // identifier.
- VariableProxy* var = expr->AsVariableProxy();
- const AstRawString* label = var->raw_name();
- // TODO(1240780): We don't check for redeclaration of labels
- // during preparsing since keeping track of the set of active
- // labels requires nontrivial changes to the way scopes are
- // structured. However, these are probably changes we want to
- // make later anyway so we should go back and fix this then.
- if (ContainsLabel(labels, label) || TargetStackContainsLabel(label)) {
- ReportMessage(MessageTemplate::kLabelRedeclaration, label);
- *ok = false;
- return NULL;
- }
- if (labels == NULL) {
- labels = new(zone()) ZoneList<const AstRawString*>(4, zone());
- }
- labels->Add(label, zone());
- // Remove the "ghost" variable that turned out to be a label
- // from the top scope. This way, we don't try to resolve it
- // during the scope processing.
- scope()->RemoveUnresolved(var);
- Expect(Token::COLON, CHECK_OK);
- // ES#sec-labelled-function-declarations Labelled Function Declarations
- if (peek() == Token::FUNCTION && is_sloppy(language_mode())) {
- if (allow_function == kAllowLabelledFunctionStatement) {
- return ParseFunctionDeclaration(ok);
- } else {
- return ParseScopedStatement(labels, true, ok);
- }
- }
- return ParseStatement(labels, kDisallowLabelledFunctionStatement, ok);
- }
-
- // If we have an extension, we allow a native function declaration.
- // A native function declaration starts with "native function" with
- // no line-terminator between the two words.
- if (extension_ != NULL && peek() == Token::FUNCTION &&
- !scanner()->HasAnyLineTerminatorBeforeNext() && expr != NULL &&
- expr->AsVariableProxy() != NULL &&
- expr->AsVariableProxy()->raw_name() ==
- ast_value_factory()->native_string() &&
- !scanner()->literal_contains_escapes()) {
- return ParseNativeDeclaration(ok);
- }
-
- // Parsed expression statement, followed by semicolon.
- ExpectSemicolon(CHECK_OK);
- return factory()->NewExpressionStatement(expr, pos);
-}
-
-
-IfStatement* Parser::ParseIfStatement(ZoneList<const AstRawString*>* labels,
- bool* ok) {
- // IfStatement ::
- // 'if' '(' Expression ')' Statement ('else' Statement)?
-
- int pos = peek_position();
- Expect(Token::IF, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
- Expression* condition = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
- Statement* then_statement = ParseScopedStatement(labels, false, CHECK_OK);
- Statement* else_statement = NULL;
- if (peek() == Token::ELSE) {
- Next();
- else_statement = ParseScopedStatement(labels, false, CHECK_OK);
- } else {
- else_statement = factory()->NewEmptyStatement(kNoSourcePosition);
- }
- return factory()->NewIfStatement(
- condition, then_statement, else_statement, pos);
-}
-
-
-Statement* Parser::ParseContinueStatement(bool* ok) {
- // ContinueStatement ::
- // 'continue' Identifier? ';'
-
- int pos = peek_position();
- Expect(Token::CONTINUE, CHECK_OK);
- const AstRawString* label = NULL;
- Token::Value tok = peek();
- if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
- tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
- // ECMA allows "eval" or "arguments" as labels even in strict mode.
- label = ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
- }
- IterationStatement* target = LookupContinueTarget(label, CHECK_OK);
- if (target == NULL) {
- // Illegal continue statement.
- MessageTemplate::Template message = MessageTemplate::kIllegalContinue;
- if (label != NULL) {
- message = MessageTemplate::kUnknownLabel;
- }
- ReportMessage(message, label);
- *ok = false;
- return NULL;
- }
- ExpectSemicolon(CHECK_OK);
- return factory()->NewContinueStatement(target, pos);
-}
-
-
-Statement* Parser::ParseBreakStatement(ZoneList<const AstRawString*>* labels,
- bool* ok) {
- // BreakStatement ::
- // 'break' Identifier? ';'
-
- int pos = peek_position();
- Expect(Token::BREAK, CHECK_OK);
- const AstRawString* label = NULL;
- Token::Value tok = peek();
- if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
- tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
- // ECMA allows "eval" or "arguments" as labels even in strict mode.
- label = ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
- }
- // Parse labeled break statements that target themselves into
- // empty statements, e.g. 'l1: l2: l3: break l2;'
- if (label != NULL && ContainsLabel(labels, label)) {
- ExpectSemicolon(CHECK_OK);
- return factory()->NewEmptyStatement(pos);
- }
- BreakableStatement* target = NULL;
- target = LookupBreakTarget(label, CHECK_OK);
- if (target == NULL) {
- // Illegal break statement.
- MessageTemplate::Template message = MessageTemplate::kIllegalBreak;
- if (label != NULL) {
- message = MessageTemplate::kUnknownLabel;
- }
- ReportMessage(message, label);
- *ok = false;
- return NULL;
- }
- ExpectSemicolon(CHECK_OK);
- return factory()->NewBreakStatement(target, pos);
-}
-
-
-Statement* Parser::ParseReturnStatement(bool* ok) {
- // ReturnStatement ::
- // 'return' Expression? ';'
-
- // Consume the return token. It is necessary to do that before
- // reporting any errors on it, because of the way errors are
- // reported (underlining).
- Expect(Token::RETURN, CHECK_OK);
- Scanner::Location loc = scanner()->location();
-
- Token::Value tok = peek();
- Statement* result;
- Expression* return_value;
- if (scanner()->HasAnyLineTerminatorBeforeNext() ||
- tok == Token::SEMICOLON ||
- tok == Token::RBRACE ||
- tok == Token::EOS) {
- if (IsSubclassConstructor(function_state_->kind())) {
- return_value = ThisExpression(loc.beg_pos);
- } else {
- return_value = GetLiteralUndefined(position());
- }
- } else {
- int pos = peek_position();
-
- if (IsSubclassConstructor(function_state_->kind())) {
- // Because of the return code rewriting that happens in case of a subclass
- // constructor we don't want to accept tail calls, therefore we don't set
- // ReturnExprScope to kInsideValidReturnStatement here.
- return_value = ParseExpression(true, CHECK_OK);
-
- // For subclass constructors we need to return this in case of undefined
- // return a Smi (transformed into an exception in the ConstructStub)
- // for a non object.
- //
- // return expr;
- //
- // Is rewritten as:
- //
- // return (temp = expr) === undefined ? this :
- // %_IsJSReceiver(temp) ? temp : 1;
-
- // temp = expr
- Variable* temp = NewTemporary(ast_value_factory()->empty_string());
- Assignment* assign = factory()->NewAssignment(
- Token::ASSIGN, factory()->NewVariableProxy(temp), return_value, pos);
-
- // %_IsJSReceiver(temp)
- ZoneList<Expression*>* is_spec_object_args =
- new (zone()) ZoneList<Expression*>(1, zone());
- is_spec_object_args->Add(factory()->NewVariableProxy(temp), zone());
- Expression* is_spec_object_call = factory()->NewCallRuntime(
- Runtime::kInlineIsJSReceiver, is_spec_object_args, pos);
-
- // %_IsJSReceiver(temp) ? temp : 1;
- Expression* is_object_conditional = factory()->NewConditional(
- is_spec_object_call, factory()->NewVariableProxy(temp),
- factory()->NewSmiLiteral(1, pos), pos);
-
- // temp === undefined
- Expression* is_undefined = factory()->NewCompareOperation(
- Token::EQ_STRICT, assign,
- factory()->NewUndefinedLiteral(kNoSourcePosition), pos);
-
- // is_undefined ? this : is_object_conditional
- return_value = factory()->NewConditional(
- is_undefined, ThisExpression(pos), is_object_conditional, pos);
- } else {
- ReturnExprScope maybe_allow_tail_calls(
- function_state_, ReturnExprContext::kInsideValidReturnStatement);
- return_value = ParseExpression(true, CHECK_OK);
-
- if (allow_tailcalls() && !is_sloppy(language_mode()) && !is_resumable()) {
- // ES6 14.6.1 Static Semantics: IsInTailPosition
- function_state_->AddImplicitTailCallExpression(return_value);
- }
- }
+Expression* Parser::RewriteReturn(Expression* return_value, int pos) {
+ if (IsSubclassConstructor(function_state_->kind())) {
+ // For subclass constructors we need to return this in case of undefined
+ // return a Smi (transformed into an exception in the ConstructStub)
+ // for a non object.
+ //
+ // return expr;
+ //
+ // Is rewritten as:
+ //
+ // return (temp = expr) === undefined ? this :
+ // %_IsJSReceiver(temp) ? temp : 1;
+
+ // temp = expr
+ Variable* temp = NewTemporary(ast_value_factory()->empty_string());
+ Assignment* assign = factory()->NewAssignment(
+ Token::ASSIGN, factory()->NewVariableProxy(temp), return_value, pos);
+
+ // %_IsJSReceiver(temp)
+ ZoneList<Expression*>* is_spec_object_args =
+ new (zone()) ZoneList<Expression*>(1, zone());
+ is_spec_object_args->Add(factory()->NewVariableProxy(temp), zone());
+ Expression* is_spec_object_call = factory()->NewCallRuntime(
+ Runtime::kInlineIsJSReceiver, is_spec_object_args, pos);
+
+ // %_IsJSReceiver(temp) ? temp : 1;
+ Expression* is_object_conditional = factory()->NewConditional(
+ is_spec_object_call, factory()->NewVariableProxy(temp),
+ factory()->NewSmiLiteral(1, pos), pos);
+
+ // temp === undefined
+ Expression* is_undefined = factory()->NewCompareOperation(
+ Token::EQ_STRICT, assign,
+ factory()->NewUndefinedLiteral(kNoSourcePosition), pos);
+
+ // is_undefined ? this : is_object_conditional
+ return_value = factory()->NewConditional(is_undefined, ThisExpression(pos),
+ is_object_conditional, pos);
}
- ExpectSemicolon(CHECK_OK);
-
if (is_generator()) {
return_value = BuildIteratorResult(return_value, true);
} else if (is_async_function()) {
- return_value = BuildPromiseResolve(return_value, return_value->position());
- }
-
- result = factory()->NewReturnStatement(return_value, loc.beg_pos);
-
- DeclarationScope* decl_scope = GetDeclarationScope();
- if (decl_scope->is_script_scope() || decl_scope->is_eval_scope()) {
- ReportMessageAt(loc, MessageTemplate::kIllegalReturn);
- *ok = false;
- return NULL;
+ return_value = BuildResolvePromise(return_value, return_value->position());
}
- return result;
+ return return_value;
}
-
-Statement* Parser::ParseWithStatement(ZoneList<const AstRawString*>* labels,
- bool* ok) {
- // WithStatement ::
- // 'with' '(' Expression ')' Statement
-
- Expect(Token::WITH, CHECK_OK);
- int pos = position();
-
- if (is_strict(language_mode())) {
- ReportMessage(MessageTemplate::kStrictWith);
+Expression* Parser::RewriteDoExpression(Block* body, int pos, bool* ok) {
+ Variable* result = NewTemporary(ast_value_factory()->dot_result_string());
+ DoExpression* expr = factory()->NewDoExpression(body, result, pos);
+ if (!Rewriter::Rewrite(this, GetClosureScope(), expr, ast_value_factory())) {
*ok = false;
- return NULL;
- }
-
- Expect(Token::LPAREN, CHECK_OK);
- Expression* expr = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
-
- Scope* with_scope = NewScope(WITH_SCOPE);
- Statement* body;
- {
- BlockState block_state(&scope_state_, with_scope);
- with_scope->set_start_position(scanner()->peek_location().beg_pos);
- body = ParseScopedStatement(labels, true, CHECK_OK);
- with_scope->set_end_position(scanner()->location().end_pos);
- }
- return factory()->NewWithStatement(with_scope, expr, body, pos);
-}
-
-
-CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) {
- // CaseClause ::
- // 'case' Expression ':' StatementList
- // 'default' ':' StatementList
-
- Expression* label = NULL; // NULL expression indicates default case
- if (peek() == Token::CASE) {
- Expect(Token::CASE, CHECK_OK);
- label = ParseExpression(true, CHECK_OK);
- } else {
- Expect(Token::DEFAULT, CHECK_OK);
- if (*default_seen_ptr) {
- ReportMessage(MessageTemplate::kMultipleDefaultsInSwitch);
- *ok = false;
- return NULL;
- }
- *default_seen_ptr = true;
- }
- Expect(Token::COLON, CHECK_OK);
- int pos = position();
- ZoneList<Statement*>* statements =
- new(zone()) ZoneList<Statement*>(5, zone());
- Statement* stat = NULL;
- while (peek() != Token::CASE &&
- peek() != Token::DEFAULT &&
- peek() != Token::RBRACE) {
- stat = ParseStatementListItem(CHECK_OK);
- statements->Add(stat, zone());
+ return nullptr;
}
- return factory()->NewCaseClause(label, statements, pos);
+ return expr;
}
-
-Statement* Parser::ParseSwitchStatement(ZoneList<const AstRawString*>* labels,
- bool* ok) {
- // SwitchStatement ::
- // 'switch' '(' Expression ')' '{' CaseClause* '}'
+Statement* Parser::RewriteSwitchStatement(Expression* tag,
+ SwitchStatement* switch_statement,
+ ZoneList<CaseClause*>* cases,
+ Scope* scope) {
// In order to get the CaseClauses to execute in their own lexical scope,
// but without requiring downstream code to have special scope handling
// code for switch statements, desugar into blocks as follows:
@@ -2728,12 +1734,6 @@ Statement* Parser::ParseSwitchStatement(ZoneList<const AstRawString*>* labels,
// }
Block* switch_block = factory()->NewBlock(NULL, 2, false, kNoSourcePosition);
- int switch_pos = peek_position();
-
- Expect(Token::SWITCH, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
- Expression* tag = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
Variable* tag_variable =
NewTemporary(ast_value_factory()->dot_switch_tag_string());
@@ -2752,301 +1752,112 @@ Statement* Parser::ParseSwitchStatement(ZoneList<const AstRawString*>* labels,
factory()->NewUndefinedLiteral(kNoSourcePosition), kNoSourcePosition),
zone());
+ Expression* tag_read = factory()->NewVariableProxy(tag_variable);
+ switch_statement->Initialize(tag_read, cases);
Block* cases_block = factory()->NewBlock(NULL, 1, false, kNoSourcePosition);
-
- SwitchStatement* switch_statement =
- factory()->NewSwitchStatement(labels, switch_pos);
-
- {
- BlockState cases_block_state(&scope_state_);
- cases_block_state.set_start_position(scanner()->location().beg_pos);
- cases_block_state.SetNonlinear();
- Target target(&this->target_stack_, switch_statement);
-
- Expression* tag_read = factory()->NewVariableProxy(tag_variable);
-
- bool default_seen = false;
- ZoneList<CaseClause*>* cases =
- new (zone()) ZoneList<CaseClause*>(4, zone());
- Expect(Token::LBRACE, CHECK_OK);
- while (peek() != Token::RBRACE) {
- CaseClause* clause = ParseCaseClause(&default_seen, CHECK_OK);
- cases->Add(clause, zone());
- }
- switch_statement->Initialize(tag_read, cases);
- cases_block->statements()->Add(switch_statement, zone());
- Expect(Token::RBRACE, CHECK_OK);
-
- cases_block_state.set_end_position(scanner()->location().end_pos);
- cases_block->set_scope(cases_block_state.FinalizedBlockScope());
- }
-
+ cases_block->statements()->Add(switch_statement, zone());
+ cases_block->set_scope(scope);
switch_block->statements()->Add(cases_block, zone());
-
return switch_block;
}
-
-Statement* Parser::ParseThrowStatement(bool* ok) {
- // ThrowStatement ::
- // 'throw' Expression ';'
-
- Expect(Token::THROW, CHECK_OK);
- int pos = position();
- if (scanner()->HasAnyLineTerminatorBeforeNext()) {
- ReportMessage(MessageTemplate::kNewlineAfterThrow);
- *ok = false;
- return NULL;
- }
- Expression* exception = ParseExpression(true, CHECK_OK);
- ExpectSemicolon(CHECK_OK);
-
- return factory()->NewExpressionStatement(
- factory()->NewThrow(exception, pos), pos);
-}
-
-
-TryStatement* Parser::ParseTryStatement(bool* ok) {
- // TryStatement ::
- // 'try' Block Catch
- // 'try' Block Finally
- // 'try' Block Catch Finally
- //
- // Catch ::
- // 'catch' '(' Identifier ')' Block
- //
- // Finally ::
- // 'finally' Block
-
- Expect(Token::TRY, CHECK_OK);
- int pos = position();
-
- Block* try_block;
- {
- ReturnExprScope no_tail_calls(function_state_,
- ReturnExprContext::kInsideTryBlock);
- try_block = ParseBlock(NULL, CHECK_OK);
- }
-
- Token::Value tok = peek();
-
- bool catch_for_promise_reject = false;
- if (allow_natives() && tok == Token::MOD) {
- Consume(Token::MOD);
- catch_for_promise_reject = true;
- tok = peek();
+void Parser::RewriteCatchPattern(CatchInfo* catch_info, bool* ok) {
+ if (catch_info->name == nullptr) {
+ DCHECK_NOT_NULL(catch_info->pattern);
+ catch_info->name = ast_value_factory()->dot_catch_string();
}
+ catch_info->variable = catch_info->scope->DeclareLocal(
+ catch_info->name, VAR, kCreatedInitialized, NORMAL_VARIABLE);
+ if (catch_info->pattern != nullptr) {
+ DeclarationDescriptor descriptor;
+ descriptor.declaration_kind = DeclarationDescriptor::NORMAL;
+ descriptor.scope = scope();
+ descriptor.hoist_scope = nullptr;
+ descriptor.mode = LET;
+ descriptor.declaration_pos = catch_info->pattern->position();
+ descriptor.initialization_pos = catch_info->pattern->position();
- if (tok != Token::CATCH && tok != Token::FINALLY) {
- ReportMessage(MessageTemplate::kNoCatchOrFinally);
- *ok = false;
- return NULL;
- }
-
- Scope* catch_scope = NULL;
- Variable* catch_variable = NULL;
- Block* catch_block = NULL;
- TailCallExpressionList tail_call_expressions_in_catch_block(zone());
- if (tok == Token::CATCH) {
- Consume(Token::CATCH);
-
- Expect(Token::LPAREN, CHECK_OK);
- catch_scope = NewScope(CATCH_SCOPE);
- catch_scope->set_start_position(scanner()->location().beg_pos);
-
- {
- CollectExpressionsInTailPositionToListScope
- collect_tail_call_expressions_scope(
- function_state_, &tail_call_expressions_in_catch_block);
- BlockState block_state(&scope_state_, catch_scope);
-
- catch_block = factory()->NewBlock(nullptr, 16, false, kNoSourcePosition);
+ // Initializer position for variables declared by the pattern.
+ const int initializer_position = position();
- // Create a block scope to hold any lexical declarations created
- // as part of destructuring the catch parameter.
- {
- BlockState block_state(&scope_state_);
- block_state.set_start_position(scanner()->location().beg_pos);
- Target target(&this->target_stack_, catch_block);
-
- const AstRawString* name = ast_value_factory()->dot_catch_string();
- Expression* pattern = nullptr;
- if (peek_any_identifier()) {
- name = ParseIdentifier(kDontAllowRestrictedIdentifiers, CHECK_OK);
- } else {
- ExpressionClassifier pattern_classifier(this);
- pattern = ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
- ValidateBindingPattern(&pattern_classifier, CHECK_OK);
- }
- catch_variable = catch_scope->DeclareLocal(
- name, VAR, kCreatedInitialized, Variable::NORMAL);
-
- Expect(Token::RPAREN, CHECK_OK);
-
- ZoneList<const AstRawString*> bound_names(1, zone());
- if (pattern != nullptr) {
- DeclarationDescriptor descriptor;
- descriptor.declaration_kind = DeclarationDescriptor::NORMAL;
- descriptor.parser = this;
- descriptor.scope = scope();
- descriptor.hoist_scope = nullptr;
- descriptor.mode = LET;
- descriptor.declaration_pos = pattern->position();
- descriptor.initialization_pos = pattern->position();
-
- // Initializer position for variables declared by the pattern.
- const int initializer_position = position();
-
- DeclarationParsingResult::Declaration decl(
- pattern, initializer_position,
- factory()->NewVariableProxy(catch_variable));
-
- Block* init_block =
- factory()->NewBlock(nullptr, 8, true, kNoSourcePosition);
- PatternRewriter::DeclareAndInitializeVariables(
- init_block, &descriptor, &decl, &bound_names, CHECK_OK);
- catch_block->statements()->Add(init_block, zone());
- } else {
- bound_names.Add(name, zone());
- }
+ DeclarationParsingResult::Declaration decl(
+ catch_info->pattern, initializer_position,
+ factory()->NewVariableProxy(catch_info->variable));
- Block* inner_block = ParseBlock(nullptr, CHECK_OK);
- catch_block->statements()->Add(inner_block, zone());
-
- // Check for `catch(e) { let e; }` and similar errors.
- Scope* inner_block_scope = inner_block->scope();
- if (inner_block_scope != nullptr) {
- Declaration* decl =
- inner_block_scope->CheckLexDeclarationsConflictingWith(
- bound_names);
- if (decl != nullptr) {
- const AstRawString* name = decl->proxy()->raw_name();
- int position = decl->proxy()->position();
- Scanner::Location location =
- position == kNoSourcePosition
- ? Scanner::Location::invalid()
- : Scanner::Location(position, position + 1);
- ReportMessageAt(location, MessageTemplate::kVarRedeclaration, name);
- *ok = false;
- return nullptr;
- }
- }
- block_state.set_end_position(scanner()->location().end_pos);
- catch_block->set_scope(block_state.FinalizedBlockScope());
- }
+ catch_info->init_block =
+ factory()->NewBlock(nullptr, 8, true, kNoSourcePosition);
+ PatternRewriter::DeclareAndInitializeVariables(
+ this, catch_info->init_block, &descriptor, &decl,
+ &catch_info->bound_names, ok);
+ } else {
+ catch_info->bound_names.Add(catch_info->name, zone());
+ }
+}
+
+void Parser::ValidateCatchBlock(const CatchInfo& catch_info, bool* ok) {
+ // Check for `catch(e) { let e; }` and similar errors.
+ Scope* inner_block_scope = catch_info.inner_block->scope();
+ if (inner_block_scope != nullptr) {
+ Declaration* decl = inner_block_scope->CheckLexDeclarationsConflictingWith(
+ catch_info.bound_names);
+ if (decl != nullptr) {
+ const AstRawString* name = decl->proxy()->raw_name();
+ int position = decl->proxy()->position();
+ Scanner::Location location =
+ position == kNoSourcePosition
+ ? Scanner::Location::invalid()
+ : Scanner::Location(position, position + 1);
+ ReportMessageAt(location, MessageTemplate::kVarRedeclaration, name);
+ *ok = false;
}
-
- catch_scope->set_end_position(scanner()->location().end_pos);
- tok = peek();
- }
-
- Block* finally_block = NULL;
- DCHECK(tok == Token::FINALLY || catch_block != NULL);
- if (tok == Token::FINALLY) {
- Consume(Token::FINALLY);
- finally_block = ParseBlock(NULL, CHECK_OK);
}
+}
+Statement* Parser::RewriteTryStatement(Block* try_block, Block* catch_block,
+ Block* finally_block,
+ const CatchInfo& catch_info, int pos) {
// Simplify the AST nodes by converting:
// 'try B0 catch B1 finally B2'
// to:
// 'try { try B0 catch B1 } finally B2'
- if (catch_block != NULL && finally_block != NULL) {
+ if (catch_block != nullptr && finally_block != nullptr) {
// If we have both, create an inner try/catch.
- DCHECK(catch_scope != NULL && catch_variable != NULL);
+ DCHECK_NOT_NULL(catch_info.scope);
+ DCHECK_NOT_NULL(catch_info.variable);
TryCatchStatement* statement;
- if (catch_for_promise_reject) {
+ if (catch_info.for_promise_reject) {
statement = factory()->NewTryCatchStatementForPromiseReject(
- try_block, catch_scope, catch_variable, catch_block,
+ try_block, catch_info.scope, catch_info.variable, catch_block,
kNoSourcePosition);
} else {
- statement = factory()->NewTryCatchStatement(try_block, catch_scope,
- catch_variable, catch_block,
- kNoSourcePosition);
+ statement = factory()->NewTryCatchStatement(
+ try_block, catch_info.scope, catch_info.variable, catch_block,
+ kNoSourcePosition);
}
- try_block = factory()->NewBlock(NULL, 1, false, kNoSourcePosition);
+ try_block = factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
try_block->statements()->Add(statement, zone());
- catch_block = NULL; // Clear to indicate it's been handled.
+ catch_block = nullptr; // Clear to indicate it's been handled.
}
- TryStatement* result = NULL;
- if (catch_block != NULL) {
+ if (catch_block != nullptr) {
// For a try-catch construct append return expressions from the catch block
// to the list of return expressions.
function_state_->tail_call_expressions().Append(
- tail_call_expressions_in_catch_block);
+ catch_info.tail_call_expressions);
- DCHECK(finally_block == NULL);
- DCHECK(catch_scope != NULL && catch_variable != NULL);
- result = factory()->NewTryCatchStatement(try_block, catch_scope,
- catch_variable, catch_block, pos);
+ DCHECK_NULL(finally_block);
+ DCHECK_NOT_NULL(catch_info.scope);
+ DCHECK_NOT_NULL(catch_info.variable);
+ return factory()->NewTryCatchStatement(
+ try_block, catch_info.scope, catch_info.variable, catch_block, pos);
} else {
- if (FLAG_harmony_explicit_tailcalls &&
- tail_call_expressions_in_catch_block.has_explicit_tail_calls()) {
- // TODO(ishell): update chapter number.
- // ES8 XX.YY.ZZ
- ReportMessageAt(tail_call_expressions_in_catch_block.location(),
- MessageTemplate::kUnexpectedTailCallInCatchBlock);
- *ok = false;
- return NULL;
- }
- DCHECK(finally_block != NULL);
- result = factory()->NewTryFinallyStatement(try_block, finally_block, pos);
+ DCHECK_NOT_NULL(finally_block);
+ return factory()->NewTryFinallyStatement(try_block, finally_block, pos);
}
-
- return result;
-}
-
-
-DoWhileStatement* Parser::ParseDoWhileStatement(
- ZoneList<const AstRawString*>* labels, bool* ok) {
- // DoStatement ::
- // 'do' Statement 'while' '(' Expression ')' ';'
-
- DoWhileStatement* loop =
- factory()->NewDoWhileStatement(labels, peek_position());
- Target target(&this->target_stack_, loop);
-
- Expect(Token::DO, CHECK_OK);
- Statement* body = ParseScopedStatement(NULL, true, CHECK_OK);
- Expect(Token::WHILE, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
-
- Expression* cond = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
-
- // Allow do-statements to be terminated with and without
- // semi-colons. This allows code such as 'do;while(0)return' to
- // parse, which would not be the case if we had used the
- // ExpectSemicolon() functionality here.
- if (peek() == Token::SEMICOLON) Consume(Token::SEMICOLON);
-
- if (loop != NULL) loop->Initialize(cond, body);
- return loop;
-}
-
-
-WhileStatement* Parser::ParseWhileStatement(
- ZoneList<const AstRawString*>* labels, bool* ok) {
- // WhileStatement ::
- // 'while' '(' Expression ')' Statement
-
- WhileStatement* loop = factory()->NewWhileStatement(labels, peek_position());
- Target target(&this->target_stack_, loop);
-
- Expect(Token::WHILE, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
- Expression* cond = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
- Statement* body = ParseScopedStatement(NULL, true, CHECK_OK);
-
- if (loop != NULL) loop->Initialize(cond, body);
- return loop;
}
-
// !%_IsJSReceiver(result = iterator.next()) &&
// %ThrowIteratorResultNotAnObject(result)
Expression* Parser::BuildIteratorNextResult(Expression* iterator,
@@ -3115,6 +1926,138 @@ Statement* Parser::InitializeForEachStatement(ForEachStatement* stmt,
return stmt;
}
+// Special case for legacy for
+//
+// for (var x = initializer in enumerable) body
+//
+// An initialization block of the form
+//
+// {
+// x = initializer;
+// }
+//
+// is returned in this case. It has reserved space for two statements,
+// so that (later on during parsing), the equivalent of
+//
+// for (x in enumerable) body
+//
+// is added as a second statement to it.
+Block* Parser::RewriteForVarInLegacy(const ForInfo& for_info) {
+ const DeclarationParsingResult::Declaration& decl =
+ for_info.parsing_result.declarations[0];
+ if (!IsLexicalVariableMode(for_info.parsing_result.descriptor.mode) &&
+ decl.pattern->IsVariableProxy() && decl.initializer != nullptr) {
+ DCHECK(!allow_harmony_for_in());
+ ++use_counts_[v8::Isolate::kForInInitializer];
+ const AstRawString* name = decl.pattern->AsVariableProxy()->raw_name();
+ VariableProxy* single_var = NewUnresolved(name);
+ Block* init_block = factory()->NewBlock(
+ nullptr, 2, true, for_info.parsing_result.descriptor.declaration_pos);
+ init_block->statements()->Add(
+ factory()->NewExpressionStatement(
+ factory()->NewAssignment(Token::ASSIGN, single_var,
+ decl.initializer, kNoSourcePosition),
+ kNoSourcePosition),
+ zone());
+ return init_block;
+ }
+ return nullptr;
+}
+
+// Rewrite a for-in/of statement of the form
+//
+// for (let/const/var x in/of e) b
+//
+// into
+//
+// {
+// <let x' be a temporary variable>
+// for (x' in/of e) {
+// let/const/var x;
+// x = x';
+// b;
+// }
+// let x; // for TDZ
+// }
+void Parser::DesugarBindingInForEachStatement(ForInfo* for_info,
+ Block** body_block,
+ Expression** each_variable,
+ bool* ok) {
+ DeclarationParsingResult::Declaration& decl =
+ for_info->parsing_result.declarations[0];
+ Variable* temp = NewTemporary(ast_value_factory()->dot_for_string());
+ auto each_initialization_block =
+ factory()->NewBlock(nullptr, 1, true, kNoSourcePosition);
+ {
+ auto descriptor = for_info->parsing_result.descriptor;
+ descriptor.declaration_pos = kNoSourcePosition;
+ descriptor.initialization_pos = kNoSourcePosition;
+ decl.initializer = factory()->NewVariableProxy(temp);
+
+ bool is_for_var_of =
+ for_info->mode == ForEachStatement::ITERATE &&
+ for_info->parsing_result.descriptor.mode == VariableMode::VAR;
+
+ PatternRewriter::DeclareAndInitializeVariables(
+ this, each_initialization_block, &descriptor, &decl,
+ (IsLexicalVariableMode(for_info->parsing_result.descriptor.mode) ||
+ is_for_var_of)
+ ? &for_info->bound_names
+ : nullptr,
+ CHECK_OK_VOID);
+
+ // Annex B.3.5 prohibits the form
+ // `try {} catch(e) { for (var e of {}); }`
+ // So if we are parsing a statement like `for (var ... of ...)`
+ // we need to walk up the scope chain and look for catch scopes
+ // which have a simple binding, then compare their binding against
+ // all of the names declared in the init of the for-of we're
+ // parsing.
+ if (is_for_var_of) {
+ Scope* catch_scope = scope();
+ while (catch_scope != nullptr && !catch_scope->is_declaration_scope()) {
+ if (catch_scope->is_catch_scope()) {
+ auto name = catch_scope->catch_variable_name();
+ // If it's a simple binding and the name is declared in the for loop.
+ if (name != ast_value_factory()->dot_catch_string() &&
+ for_info->bound_names.Contains(name)) {
+ ReportMessageAt(for_info->parsing_result.bindings_loc,
+ MessageTemplate::kVarRedeclaration, name);
+ *ok = false;
+ return;
+ }
+ }
+ catch_scope = catch_scope->outer_scope();
+ }
+ }
+ }
+
+ *body_block = factory()->NewBlock(nullptr, 3, false, kNoSourcePosition);
+ (*body_block)->statements()->Add(each_initialization_block, zone());
+ *each_variable = factory()->NewVariableProxy(temp, for_info->each_loc.beg_pos,
+ for_info->each_loc.end_pos);
+}
+
+// Create a TDZ for any lexically-bound names in for in/of statements.
+Block* Parser::CreateForEachStatementTDZ(Block* init_block,
+ const ForInfo& for_info, bool* ok) {
+ if (IsLexicalVariableMode(for_info.parsing_result.descriptor.mode)) {
+ DCHECK_NULL(init_block);
+
+ init_block = factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
+
+ for (int i = 0; i < for_info.bound_names.length(); ++i) {
+ // TODO(adamk): This needs to be some sort of special
+ // INTERNAL variable that's invisible to the debugger
+ // but visible to everything else.
+ Declaration* tdz_decl = DeclareVariable(for_info.bound_names[i], LET,
+ kNoSourcePosition, CHECK_OK);
+ tdz_decl->proxy()->var()->set_initializer_position(position());
+ }
+ }
+ return init_block;
+}
+
Statement* Parser::InitializeForOfStatement(ForOfStatement* for_of,
Expression* each,
Expression* iterable,
@@ -3138,8 +2081,7 @@ Statement* Parser::InitializeForOfStatement(ForOfStatement* for_of,
{
assign_iterator = factory()->NewAssignment(
Token::ASSIGN, factory()->NewVariableProxy(iterator),
- GetIterator(iterable, factory(), iterable->position()),
- iterable->position());
+ GetIterator(iterable, iterable->position()), iterable->position());
}
// !%_IsJSReceiver(result = iterator.next()) &&
@@ -3240,9 +2182,8 @@ Statement* Parser::InitializeForOfStatement(ForOfStatement* for_of,
}
Statement* Parser::DesugarLexicalBindingsInForStatement(
- Scope* inner_scope, VariableMode mode, ZoneList<const AstRawString*>* names,
ForStatement* loop, Statement* init, Expression* cond, Statement* next,
- Statement* body, bool* ok) {
+ Statement* body, Scope* inner_scope, const ForInfo& for_info, bool* ok) {
// ES6 13.7.4.8 specifies that on each loop iteration the let variables are
// copied into a new environment. Moreover, the "next" statement must be
// evaluated not in the environment of the just completed iteration but in
@@ -3280,11 +2221,11 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
// }
// }
- DCHECK(names->length() > 0);
- ZoneList<Variable*> temps(names->length(), zone());
+ DCHECK(for_info.bound_names.length() > 0);
+ ZoneList<Variable*> temps(for_info.bound_names.length(), zone());
- Block* outer_block =
- factory()->NewBlock(NULL, names->length() + 4, false, kNoSourcePosition);
+ Block* outer_block = factory()->NewBlock(
+ nullptr, for_info.bound_names.length() + 4, false, kNoSourcePosition);
// Add statement: let/const x = i.
outer_block->statements()->Add(init, zone());
@@ -3293,8 +2234,8 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
// For each lexical variable x:
// make statement: temp_x = x.
- for (int i = 0; i < names->length(); i++) {
- VariableProxy* proxy = NewUnresolved(names->at(i));
+ for (int i = 0; i < for_info.bound_names.length(); i++) {
+ VariableProxy* proxy = NewUnresolved(for_info.bound_names[i]);
Variable* temp = NewTemporary(temp_name);
VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
Assignment* assignment = factory()->NewAssignment(Token::ASSIGN, temp_proxy,
@@ -3338,14 +2279,15 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
{
BlockState block_state(&scope_state_, inner_scope);
- Block* ignore_completion_block =
- factory()->NewBlock(NULL, names->length() + 3, true, kNoSourcePosition);
- ZoneList<Variable*> inner_vars(names->length(), zone());
+ Block* ignore_completion_block = factory()->NewBlock(
+ nullptr, for_info.bound_names.length() + 3, true, kNoSourcePosition);
+ ZoneList<Variable*> inner_vars(for_info.bound_names.length(), zone());
// For each let variable x:
// make statement: let/const x = temp_x.
- for (int i = 0; i < names->length(); i++) {
- Declaration* decl =
- DeclareVariable(names->at(i), mode, kNoSourcePosition, CHECK_OK);
+ for (int i = 0; i < for_info.bound_names.length(); i++) {
+ Declaration* decl = DeclareVariable(
+ for_info.bound_names[i], for_info.parsing_result.descriptor.mode,
+ kNoSourcePosition, CHECK_OK);
inner_vars.Add(decl->proxy()->var(), zone());
VariableProxy* temp_proxy = factory()->NewVariableProxy(temps.at(i));
Assignment* assignment = factory()->NewAssignment(
@@ -3429,7 +2371,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
// Make the comma-separated list of temp_x = x assignments.
int inner_var_proxy_pos = scanner()->location().beg_pos;
- for (int i = 0; i < names->length(); i++) {
+ for (int i = 0; i < for_info.bound_names.length(); i++) {
VariableProxy* temp_proxy = factory()->NewVariableProxy(temps.at(i));
VariableProxy* proxy =
factory()->NewVariableProxy(inner_vars.at(i), inner_var_proxy_pos);
@@ -3479,433 +2421,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
return outer_block;
}
-Statement* Parser::ParseScopedStatement(ZoneList<const AstRawString*>* labels,
- bool legacy, bool* ok) {
- if (is_strict(language_mode()) || peek() != Token::FUNCTION ||
- (legacy && allow_harmony_restrictive_declarations())) {
- return ParseSubStatement(labels, kDisallowLabelledFunctionStatement, ok);
- } else {
- if (legacy) {
- ++use_counts_[v8::Isolate::kLegacyFunctionDeclaration];
- }
- // Make a block around the statement for a lexical binding
- // is introduced by a FunctionDeclaration.
- BlockState block_state(&scope_state_);
- block_state.set_start_position(scanner()->location().beg_pos);
- Block* block = factory()->NewBlock(NULL, 1, false, kNoSourcePosition);
- Statement* body = ParseFunctionDeclaration(CHECK_OK);
- block->statements()->Add(body, zone());
- block_state.set_end_position(scanner()->location().end_pos);
- block->set_scope(block_state.FinalizedBlockScope());
- return block;
- }
-}
-
-Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
- bool* ok) {
- int stmt_pos = peek_position();
- Statement* init = NULL;
- ZoneList<const AstRawString*> bound_names(1, zone());
- bool bound_names_are_lexical = false;
-
- // Create an in-between scope for let-bound iteration variables.
- BlockState for_state(&scope_state_);
- Expect(Token::FOR, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
- for_state.set_start_position(scanner()->location().beg_pos);
- for_state.set_is_hidden();
- DeclarationParsingResult parsing_result;
- if (peek() != Token::SEMICOLON) {
- if (peek() == Token::VAR || peek() == Token::CONST ||
- (peek() == Token::LET && IsNextLetKeyword())) {
- ParseVariableDeclarations(kForStatement, &parsing_result, nullptr,
- CHECK_OK);
-
- ForEachStatement::VisitMode mode = ForEachStatement::ENUMERATE;
- int each_beg_pos = scanner()->location().beg_pos;
- int each_end_pos = scanner()->location().end_pos;
-
- if (CheckInOrOf(&mode, ok)) {
- if (!*ok) return nullptr;
- if (parsing_result.declarations.length() != 1) {
- ReportMessageAt(parsing_result.bindings_loc,
- MessageTemplate::kForInOfLoopMultiBindings,
- ForEachStatement::VisitModeString(mode));
- *ok = false;
- return nullptr;
- }
- DeclarationParsingResult::Declaration& decl =
- parsing_result.declarations[0];
- if (parsing_result.first_initializer_loc.IsValid() &&
- (is_strict(language_mode()) || mode == ForEachStatement::ITERATE ||
- IsLexicalVariableMode(parsing_result.descriptor.mode) ||
- !decl.pattern->IsVariableProxy() || allow_harmony_for_in())) {
- // Only increment the use count if we would have let this through
- // without the flag.
- if (allow_harmony_for_in()) {
- ++use_counts_[v8::Isolate::kForInInitializer];
- }
- ReportMessageAt(parsing_result.first_initializer_loc,
- MessageTemplate::kForInOfLoopInitializer,
- ForEachStatement::VisitModeString(mode));
- *ok = false;
- return nullptr;
- }
-
- Block* init_block = nullptr;
- bound_names_are_lexical =
- IsLexicalVariableMode(parsing_result.descriptor.mode);
-
- // special case for legacy for (var ... = ... in ...)
- if (!bound_names_are_lexical && decl.pattern->IsVariableProxy() &&
- decl.initializer != nullptr) {
- DCHECK(!allow_harmony_for_in());
- ++use_counts_[v8::Isolate::kForInInitializer];
- const AstRawString* name =
- decl.pattern->AsVariableProxy()->raw_name();
- VariableProxy* single_var = NewUnresolved(name);
- init_block = factory()->NewBlock(
- nullptr, 2, true, parsing_result.descriptor.declaration_pos);
- init_block->statements()->Add(
- factory()->NewExpressionStatement(
- factory()->NewAssignment(Token::ASSIGN, single_var,
- decl.initializer, kNoSourcePosition),
- kNoSourcePosition),
- zone());
- }
-
- // Rewrite a for-in/of statement of the form
- //
- // for (let/const/var x in/of e) b
- //
- // into
- //
- // {
- // <let x' be a temporary variable>
- // for (x' in/of e) {
- // let/const/var x;
- // x = x';
- // b;
- // }
- // let x; // for TDZ
- // }
-
- Variable* temp = NewTemporary(ast_value_factory()->dot_for_string());
- ForEachStatement* loop =
- factory()->NewForEachStatement(mode, labels, stmt_pos);
- Target target(&this->target_stack_, loop);
-
- int each_keyword_position = scanner()->location().beg_pos;
-
- Expression* enumerable;
- if (mode == ForEachStatement::ITERATE) {
- ExpressionClassifier classifier(this);
- enumerable = ParseAssignmentExpression(true, &classifier, CHECK_OK);
- RewriteNonPattern(&classifier, CHECK_OK);
- } else {
- enumerable = ParseExpression(true, CHECK_OK);
- }
-
- Expect(Token::RPAREN, CHECK_OK);
-
-
- Block* body_block =
- factory()->NewBlock(NULL, 3, false, kNoSourcePosition);
-
- Statement* final_loop;
- {
- ReturnExprScope no_tail_calls(function_state_,
- ReturnExprContext::kInsideForInOfBody);
- BlockState block_state(&scope_state_);
- block_state.set_start_position(scanner()->location().beg_pos);
-
- Statement* body = ParseScopedStatement(NULL, true, CHECK_OK);
-
- auto each_initialization_block =
- factory()->NewBlock(nullptr, 1, true, kNoSourcePosition);
- {
- auto descriptor = parsing_result.descriptor;
- descriptor.declaration_pos = kNoSourcePosition;
- descriptor.initialization_pos = kNoSourcePosition;
- decl.initializer = factory()->NewVariableProxy(temp);
-
- bool is_for_var_of =
- mode == ForEachStatement::ITERATE &&
- parsing_result.descriptor.mode == VariableMode::VAR;
-
- PatternRewriter::DeclareAndInitializeVariables(
- each_initialization_block, &descriptor, &decl,
- bound_names_are_lexical || is_for_var_of ? &bound_names
- : nullptr,
- CHECK_OK);
-
- // Annex B.3.5 prohibits the form
- // `try {} catch(e) { for (var e of {}); }`
- // So if we are parsing a statement like `for (var ... of ...)`
- // we need to walk up the scope chain and look for catch scopes
- // which have a simple binding, then compare their binding against
- // all of the names declared in the init of the for-of we're
- // parsing.
- if (is_for_var_of) {
- Scope* catch_scope = scope();
- while (catch_scope != nullptr &&
- !catch_scope->is_declaration_scope()) {
- if (catch_scope->is_catch_scope()) {
- auto name = catch_scope->catch_variable_name();
- if (name !=
- ast_value_factory()
- ->dot_catch_string()) { // i.e. is a simple binding
- if (bound_names.Contains(name)) {
- ReportMessageAt(parsing_result.bindings_loc,
- MessageTemplate::kVarRedeclaration, name);
- *ok = false;
- return nullptr;
- }
- }
- }
- catch_scope = catch_scope->outer_scope();
- }
- }
- }
-
- body_block->statements()->Add(each_initialization_block, zone());
- body_block->statements()->Add(body, zone());
- VariableProxy* temp_proxy =
- factory()->NewVariableProxy(temp, each_beg_pos, each_end_pos);
- final_loop = InitializeForEachStatement(
- loop, temp_proxy, enumerable, body_block, each_keyword_position);
- block_state.set_end_position(scanner()->location().end_pos);
- body_block->set_scope(block_state.FinalizedBlockScope());
- }
-
- // Create a TDZ for any lexically-bound names.
- if (bound_names_are_lexical) {
- DCHECK_NULL(init_block);
-
- init_block =
- factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
-
- for (int i = 0; i < bound_names.length(); ++i) {
- // TODO(adamk): This needs to be some sort of special
- // INTERNAL variable that's invisible to the debugger
- // but visible to everything else.
- Declaration* tdz_decl = DeclareVariable(
- bound_names[i], LET, kNoSourcePosition, CHECK_OK);
- tdz_decl->proxy()->var()->set_initializer_position(position());
- }
- }
-
- for_state.set_end_position(scanner()->location().end_pos);
- Scope* for_scope = for_state.FinalizedBlockScope();
- // Parsed for-in loop w/ variable declarations.
- if (init_block != nullptr) {
- init_block->statements()->Add(final_loop, zone());
- init_block->set_scope(for_scope);
- return init_block;
- } else {
- DCHECK_NULL(for_scope);
- return final_loop;
- }
- } else {
- bound_names_are_lexical =
- IsLexicalVariableMode(parsing_result.descriptor.mode);
- init = parsing_result.BuildInitializationBlock(
- bound_names_are_lexical ? &bound_names : nullptr, CHECK_OK);
- }
- } else {
- int lhs_beg_pos = peek_position();
- ExpressionClassifier classifier(this);
- Expression* expression = ParseExpression(false, &classifier, CHECK_OK);
- int lhs_end_pos = scanner()->location().end_pos;
- ForEachStatement::VisitMode mode = ForEachStatement::ENUMERATE;
-
- bool is_for_each = CheckInOrOf(&mode, CHECK_OK);
- bool is_destructuring = is_for_each && (expression->IsArrayLiteral() ||
- expression->IsObjectLiteral());
-
- if (is_destructuring) {
- ValidateAssignmentPattern(&classifier, CHECK_OK);
- } else {
- RewriteNonPattern(&classifier, CHECK_OK);
- }
-
- if (is_for_each) {
- if (!is_destructuring) {
- expression = this->CheckAndRewriteReferenceExpression(
- expression, lhs_beg_pos, lhs_end_pos,
- MessageTemplate::kInvalidLhsInFor, kSyntaxError, CHECK_OK);
- }
-
- ForEachStatement* loop =
- factory()->NewForEachStatement(mode, labels, stmt_pos);
- Target target(&this->target_stack_, loop);
-
- int each_keyword_position = scanner()->location().beg_pos;
-
- Expression* enumerable;
- if (mode == ForEachStatement::ITERATE) {
- ExpressionClassifier classifier(this);
- enumerable = ParseAssignmentExpression(true, &classifier, CHECK_OK);
- RewriteNonPattern(&classifier, CHECK_OK);
- } else {
- enumerable = ParseExpression(true, CHECK_OK);
- }
-
- Expect(Token::RPAREN, CHECK_OK);
-
- // For legacy compat reasons, give for loops similar treatment to
- // if statements in allowing a function declaration for a body
- Statement* body = ParseScopedStatement(NULL, true, CHECK_OK);
- Statement* final_loop = InitializeForEachStatement(
- loop, expression, enumerable, body, each_keyword_position);
-
- DCHECK_NULL(for_state.FinalizedBlockScope());
- return final_loop;
-
- } else {
- init = factory()->NewExpressionStatement(expression, lhs_beg_pos);
- }
- }
- }
-
- // Standard 'for' loop
- ForStatement* loop = factory()->NewForStatement(labels, stmt_pos);
- Target target(&this->target_stack_, loop);
-
- // Parsed initializer at this point.
- Expect(Token::SEMICOLON, CHECK_OK);
-
- Expression* cond = NULL;
- Statement* next = NULL;
- Statement* body = NULL;
-
- // If there are let bindings, then condition and the next statement of the
- // for loop must be parsed in a new scope.
- Scope* inner_scope = scope();
- // TODO(verwaest): Allocate this through a ScopeState as well.
- if (bound_names_are_lexical && bound_names.length() > 0) {
- inner_scope = NewScopeWithParent(inner_scope, BLOCK_SCOPE);
- inner_scope->set_start_position(scanner()->location().beg_pos);
- }
- {
- BlockState block_state(&scope_state_, inner_scope);
-
- if (peek() != Token::SEMICOLON) {
- cond = ParseExpression(true, CHECK_OK);
- }
- Expect(Token::SEMICOLON, CHECK_OK);
-
- if (peek() != Token::RPAREN) {
- Expression* exp = ParseExpression(true, CHECK_OK);
- next = factory()->NewExpressionStatement(exp, exp->position());
- }
- Expect(Token::RPAREN, CHECK_OK);
-
- body = ParseScopedStatement(NULL, true, CHECK_OK);
- }
-
- Statement* result = NULL;
- if (bound_names_are_lexical && bound_names.length() > 0) {
- result = DesugarLexicalBindingsInForStatement(
- inner_scope, parsing_result.descriptor.mode, &bound_names, loop, init,
- cond, next, body, CHECK_OK);
- for_state.set_end_position(scanner()->location().end_pos);
- } else {
- for_state.set_end_position(scanner()->location().end_pos);
- Scope* for_scope = for_state.FinalizedBlockScope();
- if (for_scope) {
- // Rewrite a for statement of the form
- // for (const x = i; c; n) b
- //
- // into
- //
- // {
- // const x = i;
- // for (; c; n) b
- // }
- //
- // or, desugar
- // for (; c; n) b
- // into
- // {
- // for (; c; n) b
- // }
- // just in case b introduces a lexical binding some other way, e.g., if b
- // is a FunctionDeclaration.
- Block* block = factory()->NewBlock(NULL, 2, false, kNoSourcePosition);
- if (init != nullptr) {
- block->statements()->Add(init, zone());
- }
- block->statements()->Add(loop, zone());
- block->set_scope(for_scope);
- loop->Initialize(NULL, cond, next, body);
- result = block;
- } else {
- loop->Initialize(init, cond, next, body);
- result = loop;
- }
- }
- return result;
-}
-
-
-DebuggerStatement* Parser::ParseDebuggerStatement(bool* ok) {
- // In ECMA-262 'debugger' is defined as a reserved keyword. In some browser
- // contexts this is used as a statement which invokes the debugger as i a
- // break point is present.
- // DebuggerStatement ::
- // 'debugger' ';'
-
- int pos = peek_position();
- Expect(Token::DEBUGGER, CHECK_OK);
- ExpectSemicolon(CHECK_OK);
- return factory()->NewDebuggerStatement(pos);
-}
-
-
-bool CompileTimeValue::IsCompileTimeValue(Expression* expression) {
- if (expression->IsLiteral()) return true;
- MaterializedLiteral* lit = expression->AsMaterializedLiteral();
- return lit != NULL && lit->is_simple();
-}
-
-
-Handle<FixedArray> CompileTimeValue::GetValue(Isolate* isolate,
- Expression* expression) {
- Factory* factory = isolate->factory();
- DCHECK(IsCompileTimeValue(expression));
- Handle<FixedArray> result = factory->NewFixedArray(2, TENURED);
- ObjectLiteral* object_literal = expression->AsObjectLiteral();
- if (object_literal != NULL) {
- DCHECK(object_literal->is_simple());
- if (object_literal->fast_elements()) {
- result->set(kLiteralTypeSlot, Smi::FromInt(OBJECT_LITERAL_FAST_ELEMENTS));
- } else {
- result->set(kLiteralTypeSlot, Smi::FromInt(OBJECT_LITERAL_SLOW_ELEMENTS));
- }
- result->set(kElementsSlot, *object_literal->constant_properties());
- } else {
- ArrayLiteral* array_literal = expression->AsArrayLiteral();
- DCHECK(array_literal != NULL && array_literal->is_simple());
- result->set(kLiteralTypeSlot, Smi::FromInt(ARRAY_LITERAL));
- result->set(kElementsSlot, *array_literal->constant_elements());
- }
- return result;
-}
-
-
-CompileTimeValue::LiteralType CompileTimeValue::GetLiteralType(
- Handle<FixedArray> value) {
- Smi* literal_type = Smi::cast(value->get(kLiteralTypeSlot));
- return static_cast<LiteralType>(literal_type->value());
-}
-
-
-Handle<FixedArray> CompileTimeValue::GetElements(Handle<FixedArray> value) {
- return Handle<FixedArray>(FixedArray::cast(value->get(kElementsSlot)));
-}
-
-void Parser::ParseArrowFunctionFormalParameters(
+void Parser::AddArrowFunctionFormalParameters(
ParserFormalParameters* parameters, Expression* expr, int end_pos,
bool* ok) {
// ArrowFunctionFormals ::
@@ -3929,8 +2445,8 @@ void Parser::ParseArrowFunctionFormalParameters(
Expression* left = binop->left();
Expression* right = binop->right();
int comma_pos = binop->position();
- ParseArrowFunctionFormalParameters(parameters, left, comma_pos,
- CHECK_OK_VOID);
+ AddArrowFunctionFormalParameters(parameters, left, comma_pos,
+ CHECK_OK_VOID);
// LHS of comma expression should be unparenthesized.
expr = right;
}
@@ -3958,80 +2474,14 @@ void Parser::ParseArrowFunctionFormalParameters(
AddFormalParameter(parameters, expr, initializer, end_pos, is_rest);
}
-void Parser::DesugarAsyncFunctionBody(const AstRawString* function_name,
- Scope* scope, ZoneList<Statement*>* body,
- ExpressionClassifier* classifier,
- FunctionKind kind,
- FunctionBodyType body_type,
- bool accept_IN, int pos, bool* ok) {
- // function async_function() {
- // try {
- // .generator_object = %CreateGeneratorObject();
- // ... function body ...
- // } catch (e) {
- // return Promise.reject(e);
- // }
- // }
- scope->ForceContextAllocation();
- Variable* temp =
- NewTemporary(ast_value_factory()->dot_generator_object_string());
- function_state_->set_generator_object_variable(temp);
-
- Expression* init_generator_variable = factory()->NewAssignment(
- Token::INIT, factory()->NewVariableProxy(temp),
- BuildCreateJSGeneratorObject(pos, kind), kNoSourcePosition);
- body->Add(factory()->NewExpressionStatement(init_generator_variable,
- kNoSourcePosition),
- zone());
-
- Block* try_block = factory()->NewBlock(NULL, 8, true, kNoSourcePosition);
-
- ZoneList<Statement*>* inner_body = try_block->statements();
-
- Expression* return_value = nullptr;
- if (body_type == FunctionBodyType::kNormal) {
- ParseStatementList(inner_body, Token::RBRACE, CHECK_OK_VOID);
- return_value = factory()->NewUndefinedLiteral(kNoSourcePosition);
- } else {
- return_value =
- ParseAssignmentExpression(accept_IN, classifier, CHECK_OK_VOID);
- RewriteNonPattern(classifier, CHECK_OK_VOID);
- }
-
- return_value = BuildPromiseResolve(return_value, return_value->position());
- inner_body->Add(
- factory()->NewReturnStatement(return_value, return_value->position()),
- zone());
- body->Add(BuildRejectPromiseOnException(try_block), zone());
- scope->set_end_position(scanner()->location().end_pos);
-}
-
-DoExpression* Parser::ParseDoExpression(bool* ok) {
- // AssignmentExpression ::
- // do '{' StatementList '}'
- int pos = peek_position();
-
- Expect(Token::DO, CHECK_OK);
- Variable* result = NewTemporary(ast_value_factory()->dot_result_string());
- Block* block = ParseBlock(nullptr, CHECK_OK);
- DoExpression* expr = factory()->NewDoExpression(block, result, pos);
- if (!Rewriter::Rewrite(this, GetClosureScope(), expr, ast_value_factory())) {
- *ok = false;
- return nullptr;
- }
- return expr;
-}
-
-void ParserBaseTraits<Parser>::ParseArrowFunctionFormalParameterList(
+void Parser::DeclareArrowFunctionFormalParameters(
ParserFormalParameters* parameters, Expression* expr,
const Scanner::Location& params_loc, Scanner::Location* duplicate_loc,
- const Scope::Snapshot& scope_snapshot, bool* ok) {
+ bool* ok) {
if (expr->IsEmptyParentheses()) return;
- delegate()->ParseArrowFunctionFormalParameters(
- parameters, expr, params_loc.end_pos, CHECK_OK_VOID);
-
- scope_snapshot.Reparent(parameters->scope);
+ AddArrowFunctionFormalParameters(parameters, expr, params_loc.end_pos,
+ CHECK_OK_VOID);
if (parameters->Arity() > Code::kMaxArguments) {
ReportMessageAt(params_loc, MessageTemplate::kMalformedArrowFunParamList);
@@ -4039,23 +2489,25 @@ void ParserBaseTraits<Parser>::ParseArrowFunctionFormalParameterList(
return;
}
- Type::ExpressionClassifier classifier(delegate());
+ ExpressionClassifier classifier(this);
if (!parameters->is_simple) {
- classifier.RecordNonSimpleParameter();
+ this->classifier()->RecordNonSimpleParameter();
}
for (int i = 0; i < parameters->Arity(); ++i) {
auto parameter = parameters->at(i);
- DeclareFormalParameter(parameters->scope, parameter, &classifier);
- if (!duplicate_loc->IsValid()) {
- *duplicate_loc = classifier.duplicate_formal_parameter_error().location;
+ DeclareFormalParameter(parameters->scope, parameter);
+ if (!this->classifier()
+ ->is_valid_formal_parameter_list_without_duplicates() &&
+ !duplicate_loc->IsValid()) {
+ *duplicate_loc =
+ this->classifier()->duplicate_formal_parameter_error().location;
}
}
DCHECK_EQ(parameters->is_simple, parameters->scope->has_simple_parameters());
}
-void ParserBaseTraits<Parser>::ReindexLiterals(
- const ParserFormalParameters& parameters) {
- if (delegate()->function_state_->materialized_literal_count() > 0) {
+void Parser::ReindexLiterals(const ParserFormalParameters& parameters) {
+ if (function_state_->materialized_literal_count() > 0) {
AstLiteralReindexer reindexer;
for (const auto p : parameters.params) {
@@ -4063,11 +2515,24 @@ void ParserBaseTraits<Parser>::ReindexLiterals(
if (p.initializer != nullptr) reindexer.Reindex(p.initializer);
}
- DCHECK(reindexer.count() <=
- delegate()->function_state_->materialized_literal_count());
+ DCHECK(reindexer.count() <= function_state_->materialized_literal_count());
}
}
+void Parser::PrepareGeneratorVariables(FunctionState* function_state) {
+ // For generators, allocating variables in contexts is currently a win
+ // because it minimizes the work needed to suspend and resume an
+ // activation. The machine code produced for generators (by full-codegen)
+ // relies on this forced context allocation, but not in an essential way.
+ scope()->ForceContextAllocation();
+
+ // Calling a generator returns a generator object. That object is stored
+ // in a temporary variable, a definition that is used by "yield"
+ // expressions.
+ Variable* temp =
+ NewTemporary(ast_value_factory()->dot_generator_object_string());
+ function_state->set_generator_object_variable(temp);
+}
FunctionLiteral* Parser::ParseFunctionLiteral(
const AstRawString* function_name, Scanner::Location function_name_location,
@@ -4119,7 +2584,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// These are all things we can know at this point, without looking at the
// function itself.
- // In addition, we need to distinguish between these cases:
+ // We separate between lazy parsing top level functions and lazy parsing inner
+ // functions, because the latter needs to do more work. In particular, we need
+ // to track unresolved variables to distinguish between these cases:
// (function foo() {
// bar = function() { return 1; }
// })();
@@ -4131,17 +2598,18 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// Now foo will be parsed eagerly and compiled eagerly (optimization: assume
// parenthesis before the function means that it will be called
- // immediately). The inner function *must* be parsed eagerly to resolve the
- // possible reference to the variable in foo's scope. However, it's possible
- // that it will be compiled lazily.
-
- // To make this additional case work, both Parser and PreParser implement a
- // logic where only top-level functions will be parsed lazily.
- bool is_lazily_parsed = mode() == PARSE_LAZILY &&
- this->scope()->AllowsLazyParsing() &&
- !function_state_->next_function_is_parenthesized();
-
- // Determine whether the function body can be discarded after parsing.
+ // immediately). bar can be parsed lazily, but we need to parse it in a mode
+ // that tracks unresolved variables.
+ DCHECK_IMPLIES(mode() == PARSE_LAZILY, FLAG_lazy);
+ DCHECK_IMPLIES(mode() == PARSE_LAZILY, allow_lazy());
+ DCHECK_IMPLIES(mode() == PARSE_LAZILY, extension_ == nullptr);
+
+ bool is_lazy_top_level_function =
+ mode() == PARSE_LAZILY &&
+ eager_compile_hint == FunctionLiteral::kShouldLazyCompile &&
+ scope()->AllowsLazyParsingWithoutUnresolvedVariables();
+
+ // Determine whether we can still lazy parse the inner function.
// The preconditions are:
// - Lazy compilation has to be enabled.
// - Neither V8 natives nor native function declarations can be allowed,
@@ -4156,18 +2624,20 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// - The function literal shouldn't be hinted to eagerly compile.
// - For asm.js functions the body needs to be available when module
// validation is active, because we examine the entire module at once.
+
+ // Inner functions will be parsed using a temporary Zone. After parsing, we
+ // will migrate unresolved variable into a Scope in the main Zone.
+ // TODO(marja): Refactor parsing modes: simplify this.
bool use_temp_zone =
- !is_lazily_parsed && FLAG_lazy && !allow_natives() &&
- extension_ == NULL && allow_lazy() &&
- function_type == FunctionLiteral::kDeclaration &&
+ allow_lazy() && function_type == FunctionLiteral::kDeclaration &&
eager_compile_hint != FunctionLiteral::kShouldEagerCompile &&
!(FLAG_validate_asm && scope()->IsAsmModule());
+ bool is_lazy_inner_function =
+ use_temp_zone && FLAG_lazy_inner_functions && !is_lazy_top_level_function;
- DeclarationScope* main_scope = nullptr;
- if (use_temp_zone) {
- // This Scope lives in the main Zone; we'll migrate data into it later.
- main_scope = NewFunctionScope(kind);
- }
+ // This Scope lives in the main zone. We'll migrate data into that zone later.
+ DeclarationScope* scope = NewFunctionScope(kind);
+ SetLanguageMode(scope, language_mode);
ZoneList<Statement*>* body = nullptr;
int arity = -1;
@@ -4177,6 +2647,32 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
bool should_be_used_once_hint = false;
bool has_duplicate_parameters;
+ FunctionState function_state(&function_state_, &scope_state_, scope);
+#ifdef DEBUG
+ scope->SetScopeName(function_name);
+#endif
+
+ ExpressionClassifier formals_classifier(this, &duplicate_finder);
+
+ if (is_generator) PrepareGeneratorVariables(&function_state);
+
+ Expect(Token::LPAREN, CHECK_OK);
+ int start_position = scanner()->location().beg_pos;
+ this->scope()->set_start_position(start_position);
+ ParserFormalParameters formals(scope);
+ ParseFormalParameterList(&formals, CHECK_OK);
+ arity = formals.Arity();
+ Expect(Token::RPAREN, CHECK_OK);
+ int formals_end_position = scanner()->location().end_pos;
+
+ CheckArityRestrictions(arity, kind, formals.has_rest, start_position,
+ formals_end_position, CHECK_OK);
+ Expect(Token::LBRACE, CHECK_OK);
+ // Don't include the rest parameter into the function's formal parameter
+ // count (esp. the SharedFunctionInfo::internal_formal_parameter_count,
+ // which says whether we need to create an arguments adaptor frame).
+ if (formals.has_rest) arity--;
+
{
// Temporary zones can nest. When we migrate free variables (see below), we
// need to recreate them in the previous Zone.
@@ -4187,94 +2683,58 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// new temporary zone if the preconditions are satisfied, and ensures that
// the previous zone is always restored after parsing the body. To be able
// to do scope analysis correctly after full parsing, we migrate needed
- // information from scope into main_scope when the function has been parsed.
+ // information when the function is parsed.
Zone temp_zone(zone()->allocator());
DiscardableZoneScope zone_scope(this, &temp_zone, use_temp_zone);
-
- DeclarationScope* scope = NewFunctionScope(kind);
- SetLanguageMode(scope, language_mode);
- if (!use_temp_zone) {
- main_scope = scope;
- } else {
- DCHECK(main_scope->zone() != scope->zone());
- }
-
- FunctionState function_state(&function_state_, &scope_state_, scope, kind);
#ifdef DEBUG
- scope->SetScopeName(function_name);
+ if (use_temp_zone) scope->set_needs_migration();
#endif
- ExpressionClassifier formals_classifier(this, &duplicate_finder);
-
- if (is_generator) {
- // For generators, allocating variables in contexts is currently a win
- // because it minimizes the work needed to suspend and resume an
- // activation. The machine code produced for generators (by full-codegen)
- // relies on this forced context allocation, but not in an essential way.
- this->scope()->ForceContextAllocation();
-
- // Calling a generator returns a generator object. That object is stored
- // in a temporary variable, a definition that is used by "yield"
- // expressions. This also marks the FunctionState as a generator.
- Variable* temp =
- NewTemporary(ast_value_factory()->dot_generator_object_string());
- function_state.set_generator_object_variable(temp);
- }
- Expect(Token::LPAREN, CHECK_OK);
- int start_position = scanner()->location().beg_pos;
- this->scope()->set_start_position(start_position);
- ParserFormalParameters formals(scope);
- ParseFormalParameterList(&formals, &formals_classifier, CHECK_OK);
- arity = formals.Arity();
- Expect(Token::RPAREN, CHECK_OK);
- int formals_end_position = scanner()->location().end_pos;
-
- CheckArityRestrictions(arity, kind, formals.has_rest, start_position,
- formals_end_position, CHECK_OK);
- Expect(Token::LBRACE, CHECK_OK);
- // Don't include the rest parameter into the function's formal parameter
- // count (esp. the SharedFunctionInfo::internal_formal_parameter_count,
- // which says whether we need to create an arguments adaptor frame).
- if (formals.has_rest) arity--;
-
- // Eager or lazy parse?
- // If is_lazily_parsed, we'll parse lazy. If we can set a bookmark, we'll
- // pass it to SkipLazyFunctionBody, which may use it to abort lazy
- // parsing if it suspect that wasn't a good idea. If so, or if we didn't
- // try to lazy parse in the first place, we'll have to parse eagerly.
- Scanner::BookmarkScope bookmark(scanner());
- if (is_lazily_parsed) {
- Scanner::BookmarkScope* maybe_bookmark =
- bookmark.Set() ? &bookmark : nullptr;
- SkipLazyFunctionBody(&materialized_literal_count,
- &expected_property_count, /*CHECK_OK*/ ok,
- maybe_bookmark);
+ // Eager or lazy parse? If is_lazy_top_level_function, we'll parse
+ // lazily. We'll call SkipLazyFunctionBody, which may decide to abort lazy
+ // parsing if it suspects that wasn't a good idea. If so (in which case the
+ // parser is expected to have backtracked), or if we didn't try to lazy
+ // parse in the first place, we'll have to parse eagerly.
+ if (is_lazy_top_level_function || is_lazy_inner_function) {
+ Scanner::BookmarkScope bookmark(scanner());
+ bookmark.Set();
+ LazyParsingResult result = SkipLazyFunctionBody(
+ &materialized_literal_count, &expected_property_count,
+ is_lazy_inner_function, is_lazy_top_level_function, CHECK_OK);
materialized_literal_count += formals.materialized_literals_count +
function_state.materialized_literal_count();
- if (bookmark.HasBeenReset()) {
+ if (result == kLazyParsingAborted) {
+ DCHECK(is_lazy_top_level_function);
+ bookmark.Apply();
// Trigger eager (re-)parsing, just below this block.
- is_lazily_parsed = false;
+ is_lazy_top_level_function = false;
// This is probably an initialization function. Inform the compiler it
// should also eager-compile this function, and that we expect it to be
// used once.
eager_compile_hint = FunctionLiteral::kShouldEagerCompile;
should_be_used_once_hint = true;
+ scope->ResetAfterPreparsing(ast_value_factory(), true);
+ zone_scope.Reset();
+ use_temp_zone = false;
}
}
- if (!is_lazily_parsed) {
+
+ if (!is_lazy_top_level_function && !is_lazy_inner_function) {
body = ParseEagerFunctionBody(function_name, pos, formals, kind,
function_type, CHECK_OK);
materialized_literal_count = function_state.materialized_literal_count();
expected_property_count = function_state.expected_property_count();
- if (use_temp_zone) {
- // If the preconditions are correct the function body should never be
- // accessed, but do this anyway for better behaviour if they're wrong.
- body = nullptr;
- }
+ }
+
+ if (use_temp_zone || is_lazy_top_level_function) {
+ // If the preconditions are correct the function body should never be
+ // accessed, but do this anyway for better behaviour if they're wrong.
+ body = nullptr;
+ scope->AnalyzePartially(&previous_zone_ast_node_factory);
}
// Parsing the body may change the language mode in our scope.
@@ -4286,13 +2746,13 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
function_name_location, CHECK_OK);
const bool allow_duplicate_parameters =
is_sloppy(language_mode) && formals.is_simple && !IsConciseMethod(kind);
- ValidateFormalParameters(&formals_classifier, language_mode,
- allow_duplicate_parameters, CHECK_OK);
+ ValidateFormalParameters(language_mode, allow_duplicate_parameters,
+ CHECK_OK);
if (is_strict(language_mode)) {
CheckStrictOctalLiteral(scope->start_position(), scope->end_position(),
CHECK_OK);
- CheckDecimalLiteralWithLeadingZero(use_counts_, scope->start_position(),
+ CheckDecimalLiteralWithLeadingZero(scope->start_position(),
scope->end_position());
}
CheckConflictingVarDeclarations(scope, CHECK_OK);
@@ -4302,12 +2762,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
RewriteDestructuringAssignments();
}
has_duplicate_parameters =
- !formals_classifier.is_valid_formal_parameter_list_without_duplicates();
-
- if (use_temp_zone) {
- DCHECK(main_scope != scope);
- scope->AnalyzePartially(main_scope, &previous_zone_ast_node_factory);
- }
+ !classifier()->is_valid_formal_parameter_list_without_duplicates();
} // DiscardableZoneScope goes out of scope.
FunctionLiteral::ParameterFlag duplicate_parameters =
@@ -4316,53 +2771,31 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// Note that the FunctionLiteral needs to be created in the main Zone again.
FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
- function_name, main_scope, body, materialized_literal_count,
+ function_name, scope, body, materialized_literal_count,
expected_property_count, arity, duplicate_parameters, function_type,
- eager_compile_hint, kind, pos);
+ eager_compile_hint, pos);
function_literal->set_function_token_position(function_token_pos);
if (should_be_used_once_hint)
function_literal->set_should_be_used_once_hint();
- if (fni_ != NULL && should_infer_name) fni_->AddFunction(function_literal);
+ if (should_infer_name) {
+ DCHECK_NOT_NULL(fni_);
+ fni_->AddFunction(function_literal);
+ }
return function_literal;
}
-Expression* Parser::ParseAsyncFunctionExpression(bool* ok) {
- // AsyncFunctionDeclaration ::
- // async [no LineTerminator here] function ( FormalParameters[Await] )
- // { AsyncFunctionBody }
- //
- // async [no LineTerminator here] function BindingIdentifier[Await]
- // ( FormalParameters[Await] ) { AsyncFunctionBody }
- DCHECK_EQ(scanner()->current_token(), Token::ASYNC);
- int pos = position();
- Expect(Token::FUNCTION, CHECK_OK);
- bool is_strict_reserved = false;
- const AstRawString* name = nullptr;
- FunctionLiteral::FunctionType type = FunctionLiteral::kAnonymousExpression;
-
- if (peek_any_identifier()) {
- type = FunctionLiteral::kNamedExpression;
- name = ParseIdentifierOrStrictReservedWord(FunctionKind::kAsyncFunction,
- &is_strict_reserved, CHECK_OK);
- }
- return ParseFunctionLiteral(name, scanner()->location(),
- is_strict_reserved ? kFunctionNameIsStrictReserved
- : kFunctionNameValidityUnknown,
- FunctionKind::kAsyncFunction, pos, type,
- language_mode(), CHECK_OK);
-}
-
-void Parser::SkipLazyFunctionBody(int* materialized_literal_count,
- int* expected_property_count, bool* ok,
- Scanner::BookmarkScope* bookmark) {
- DCHECK_IMPLIES(bookmark, bookmark->HasBeenSet());
+Parser::LazyParsingResult Parser::SkipLazyFunctionBody(
+ int* materialized_literal_count, int* expected_property_count,
+ bool is_inner_function, bool may_abort, bool* ok) {
if (produce_cached_parse_data()) CHECK(log_);
int function_block_pos = position();
- DeclarationScope* scope = this->scope()->AsDeclarationScope();
+ DeclarationScope* scope = function_state_->scope();
DCHECK(scope->is_function_scope());
- if (consume_cached_parse_data() && !cached_parse_data_->rejected()) {
+ // Inner functions are not part of the cached data.
+ if (!is_inner_function && consume_cached_parse_data() &&
+ !cached_parse_data_->rejected()) {
// If we have cached data, we use it to skip parsing the function body. The
// data contains the information we need to construct the lazy function.
FunctionEntry entry =
@@ -4374,14 +2807,14 @@ void Parser::SkipLazyFunctionBody(int* materialized_literal_count,
scanner()->SeekForward(entry.end_pos() - 1);
scope->set_end_position(entry.end_pos());
- Expect(Token::RBRACE, CHECK_OK_VOID);
+ Expect(Token::RBRACE, CHECK_OK_VALUE(kLazyParsingComplete));
total_preparse_skipped_ += scope->end_position() - function_block_pos;
*materialized_literal_count = entry.literal_count();
*expected_property_count = entry.property_count();
SetLanguageMode(scope, entry.language_mode());
if (entry.uses_super_property()) scope->RecordSuperPropertyUsage();
if (entry.calls_eval()) scope->RecordEvalCall();
- return;
+ return kLazyParsingComplete;
}
cached_parse_data_->Reject();
}
@@ -4389,32 +2822,32 @@ void Parser::SkipLazyFunctionBody(int* materialized_literal_count,
// AST. This gathers the data needed to build a lazy function.
SingletonLogger logger;
PreParser::PreParseResult result =
- ParseLazyFunctionBodyWithPreParser(&logger, bookmark);
- if (bookmark && bookmark->HasBeenReset()) {
- return; // Return immediately if pre-parser devided to abort parsing.
- }
+ ParseLazyFunctionBodyWithPreParser(&logger, is_inner_function, may_abort);
+
+ // Return immediately if pre-parser decided to abort parsing.
+ if (result == PreParser::kPreParseAbort) return kLazyParsingAborted;
if (result == PreParser::kPreParseStackOverflow) {
// Propagate stack overflow.
set_stack_overflow();
*ok = false;
- return;
+ return kLazyParsingComplete;
}
if (logger.has_error()) {
ReportMessageAt(Scanner::Location(logger.start(), logger.end()),
logger.message(), logger.argument_opt(),
logger.error_type());
*ok = false;
- return;
+ return kLazyParsingComplete;
}
scope->set_end_position(logger.end());
- Expect(Token::RBRACE, CHECK_OK_VOID);
+ Expect(Token::RBRACE, CHECK_OK_VALUE(kLazyParsingComplete));
total_preparse_skipped_ += scope->end_position() - function_block_pos;
*materialized_literal_count = logger.literals();
*expected_property_count = logger.properties();
SetLanguageMode(scope, logger.language_mode());
if (logger.uses_super_property()) scope->RecordSuperPropertyUsage();
if (logger.calls_eval()) scope->RecordEvalCall();
- if (produce_cached_parse_data()) {
+ if (!is_inner_function && produce_cached_parse_data()) {
DCHECK(log_);
// Position right after terminal '}'.
int body_end = scanner()->location().end_pos;
@@ -4422,6 +2855,7 @@ void Parser::SkipLazyFunctionBody(int* materialized_literal_count,
*expected_property_count, language_mode(),
scope->uses_super_property(), scope->calls_eval());
}
+ return kLazyParsingComplete;
}
@@ -4438,9 +2872,9 @@ Statement* Parser::BuildAssertIsCoercible(Variable* var) {
Token::EQ_STRICT, factory()->NewVariableProxy(var),
factory()->NewNullLiteral(kNoSourcePosition), kNoSourcePosition),
kNoSourcePosition);
- Expression* throw_type_error = this->NewThrowTypeError(
- MessageTemplate::kNonCoercible, ast_value_factory()->empty_string(),
- kNoSourcePosition);
+ Expression* throw_type_error =
+ NewThrowTypeError(MessageTemplate::kNonCoercible,
+ ast_value_factory()->empty_string(), kNoSourcePosition);
IfStatement* if_statement = factory()->NewIfStatement(
condition,
factory()->NewExpressionStatement(throw_type_error, kNoSourcePosition),
@@ -4495,7 +2929,6 @@ Block* Parser::BuildParameterInitializationBlock(
if (parameter.is_rest && parameter.pattern->IsVariableProxy()) break;
DeclarationDescriptor descriptor;
descriptor.declaration_kind = DeclarationDescriptor::PARAMETER;
- descriptor.parser = this;
descriptor.scope = scope();
descriptor.hoist_scope = nullptr;
descriptor.mode = LET;
@@ -4544,8 +2977,8 @@ Block* Parser::BuildParameterInitializationBlock(
BlockState block_state(&scope_state_, param_scope);
DeclarationParsingResult::Declaration decl(
parameter.pattern, parameter.initializer_end_position, initial_value);
- PatternRewriter::DeclareAndInitializeVariables(param_block, &descriptor,
- &decl, nullptr, CHECK_OK);
+ PatternRewriter::DeclareAndInitializeVariables(
+ this, param_block, &descriptor, &decl, nullptr, CHECK_OK);
if (param_block != init_block) {
param_scope = block_state.FinalizedBlockScope();
@@ -4558,28 +2991,74 @@ Block* Parser::BuildParameterInitializationBlock(
return init_block;
}
-Block* Parser::BuildRejectPromiseOnException(Block* block) {
- // try { <block> } catch (error) { return Promise.reject(error); }
- Block* try_block = block;
+Block* Parser::BuildRejectPromiseOnException(Block* inner_block, bool* ok) {
+ // .promise = %AsyncFunctionPromiseCreate();
+ // try {
+ // <inner_block>
+ // } catch (.catch) {
+ // %RejectPromise(.promise, .catch);
+ // return .promise;
+ // } finally {
+ // %AsyncFunctionPromiseRelease(.promise);
+ // }
+ Block* result = factory()->NewBlock(nullptr, 2, true, kNoSourcePosition);
+
+ // .promise = %AsyncFunctionPromiseCreate();
+ Statement* set_promise;
+ {
+ Expression* create_promise = factory()->NewCallRuntime(
+ Context::ASYNC_FUNCTION_PROMISE_CREATE_INDEX,
+ new (zone()) ZoneList<Expression*>(0, zone()), kNoSourcePosition);
+ Assignment* assign_promise = factory()->NewAssignment(
+ Token::INIT, factory()->NewVariableProxy(PromiseVariable()),
+ create_promise, kNoSourcePosition);
+ set_promise =
+ factory()->NewExpressionStatement(assign_promise, kNoSourcePosition);
+ }
+ result->statements()->Add(set_promise, zone());
+
+ // catch (.catch) { return %RejectPromise(.promise, .catch), .promise }
Scope* catch_scope = NewScope(CATCH_SCOPE);
catch_scope->set_is_hidden();
Variable* catch_variable =
catch_scope->DeclareLocal(ast_value_factory()->dot_catch_string(), VAR,
- kCreatedInitialized, Variable::NORMAL);
+ kCreatedInitialized, NORMAL_VARIABLE);
Block* catch_block = factory()->NewBlock(nullptr, 1, true, kNoSourcePosition);
- Expression* promise_reject = BuildPromiseReject(
+ Expression* promise_reject = BuildRejectPromise(
factory()->NewVariableProxy(catch_variable), kNoSourcePosition);
-
ReturnStatement* return_promise_reject =
factory()->NewReturnStatement(promise_reject, kNoSourcePosition);
catch_block->statements()->Add(return_promise_reject, zone());
- TryStatement* try_catch_statement = factory()->NewTryCatchStatement(
- try_block, catch_scope, catch_variable, catch_block, kNoSourcePosition);
- block = factory()->NewBlock(nullptr, 1, true, kNoSourcePosition);
- block->statements()->Add(try_catch_statement, zone());
- return block;
+ TryStatement* try_catch_statement =
+ factory()->NewTryCatchStatementForAsyncAwait(inner_block, catch_scope,
+ catch_variable, catch_block,
+ kNoSourcePosition);
+
+ // There is no TryCatchFinally node, so wrap it in an outer try/finally
+ Block* outer_try_block =
+ factory()->NewBlock(nullptr, 1, true, kNoSourcePosition);
+ outer_try_block->statements()->Add(try_catch_statement, zone());
+
+ // finally { %AsyncFunctionPromiseRelease(.promise) }
+ Block* finally_block =
+ factory()->NewBlock(nullptr, 1, true, kNoSourcePosition);
+ {
+ ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
+ args->Add(factory()->NewVariableProxy(PromiseVariable()), zone());
+ Expression* call_promise_release = factory()->NewCallRuntime(
+ Context::ASYNC_FUNCTION_PROMISE_RELEASE_INDEX, args, kNoSourcePosition);
+ Statement* promise_release = factory()->NewExpressionStatement(
+ call_promise_release, kNoSourcePosition);
+ finally_block->statements()->Add(promise_release, zone());
+ }
+
+ Statement* try_finally_statement = factory()->NewTryFinallyStatement(
+ outer_try_block, finally_block, kNoSourcePosition);
+
+ result->statements()->Add(try_finally_statement, zone());
+ return result;
}
Expression* Parser::BuildCreateJSGeneratorObject(int pos, FunctionKind kind) {
@@ -4593,26 +3072,68 @@ Expression* Parser::BuildCreateJSGeneratorObject(int pos, FunctionKind kind) {
pos);
}
-Expression* Parser::BuildPromiseResolve(Expression* value, int pos) {
- ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
+Expression* Parser::BuildResolvePromise(Expression* value, int pos) {
+ // %ResolvePromise(.promise, value), .promise
+ ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
+ args->Add(factory()->NewVariableProxy(PromiseVariable()), zone());
args->Add(value, zone());
- return factory()->NewCallRuntime(Context::PROMISE_CREATE_RESOLVED_INDEX, args,
- pos);
+ Expression* call_runtime =
+ factory()->NewCallRuntime(Context::PROMISE_RESOLVE_INDEX, args, pos);
+ return factory()->NewBinaryOperation(
+ Token::COMMA, call_runtime,
+ factory()->NewVariableProxy(PromiseVariable()), pos);
}
-Expression* Parser::BuildPromiseReject(Expression* value, int pos) {
- ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
+Expression* Parser::BuildRejectPromise(Expression* value, int pos) {
+ // %RejectPromiseNoDebugEvent(.promise, value, true), .promise
+ // The NoDebugEvent variant disables the additional debug event for the
+ // rejection since a debug event already happened for the exception that got
+ // us here.
+ ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
+ args->Add(factory()->NewVariableProxy(PromiseVariable()), zone());
args->Add(value, zone());
- return factory()->NewCallRuntime(Context::PROMISE_CREATE_REJECTED_INDEX, args,
- pos);
+ Expression* call_runtime = factory()->NewCallRuntime(
+ Context::REJECT_PROMISE_NO_DEBUG_EVENT_INDEX, args, pos);
+ return factory()->NewBinaryOperation(
+ Token::COMMA, call_runtime,
+ factory()->NewVariableProxy(PromiseVariable()), pos);
+}
+
+Variable* Parser::PromiseVariable() {
+ // Based on the various compilation paths, there are many different code
+ // paths which may be the first to access the Promise temporary. Whichever
+ // comes first should create it and stash it in the FunctionState.
+ Variable* promise = function_state_->promise_variable();
+ if (function_state_->promise_variable() == nullptr) {
+ promise = scope()->NewTemporary(ast_value_factory()->empty_string());
+ function_state_->set_promise_variable(promise);
+ }
+ return promise;
+}
+
+Expression* Parser::BuildInitialYield(int pos, FunctionKind kind) {
+ Expression* allocation = BuildCreateJSGeneratorObject(pos, kind);
+ VariableProxy* init_proxy =
+ factory()->NewVariableProxy(function_state_->generator_object_variable());
+ Assignment* assignment = factory()->NewAssignment(
+ Token::INIT, init_proxy, allocation, kNoSourcePosition);
+ VariableProxy* get_proxy =
+ factory()->NewVariableProxy(function_state_->generator_object_variable());
+ // The position of the yield is important for reporting the exception
+ // caused by calling the .throw method on a generator suspended at the
+ // initial yield (i.e. right after generator instantiation).
+ return factory()->NewYield(get_proxy, assignment, scope()->start_position(),
+ Yield::kOnExceptionThrow);
}
ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
const AstRawString* function_name, int pos,
const ParserFormalParameters& parameters, FunctionKind kind,
FunctionLiteral::FunctionType function_type, bool* ok) {
- // Everything inside an eagerly parsed function will be parsed eagerly
- // (see comment above).
+ // Everything inside an eagerly parsed function will be parsed eagerly (see
+ // comment above). Lazy inner functions are handled separately and they won't
+ // require the mode to be PARSE_LAZILY (see ParseFunctionLiteral).
+ // TODO(marja): Refactor parsing modes: remove this.
ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
ZoneList<Statement*>* result = new(zone()) ZoneList<Statement*>(8, zone());
@@ -4657,26 +3178,10 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
Block* try_block =
factory()->NewBlock(nullptr, 3, false, kNoSourcePosition);
-
- {
- Expression* allocation = BuildCreateJSGeneratorObject(pos, kind);
- VariableProxy* init_proxy = factory()->NewVariableProxy(
- function_state_->generator_object_variable());
- Assignment* assignment = factory()->NewAssignment(
- Token::INIT, init_proxy, allocation, kNoSourcePosition);
- VariableProxy* get_proxy = factory()->NewVariableProxy(
- function_state_->generator_object_variable());
- // The position of the yield is important for reporting the exception
- // caused by calling the .throw method on a generator suspended at the
- // initial yield (i.e. right after generator instantiation).
- Yield* yield = factory()->NewYield(get_proxy, assignment,
- scope()->start_position(),
- Yield::kOnExceptionThrow);
- try_block->statements()->Add(
- factory()->NewExpressionStatement(yield, kNoSourcePosition),
- zone());
- }
-
+ Expression* initial_yield = BuildInitialYield(pos, kind);
+ try_block->statements()->Add(
+ factory()->NewExpressionStatement(initial_yield, kNoSourcePosition),
+ zone());
ParseStatementList(try_block->statements(), Token::RBRACE, CHECK_OK);
Statement* final_return = factory()->NewReturnStatement(
@@ -4700,16 +3205,15 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
zone());
} else if (IsAsyncFunction(kind)) {
const bool accept_IN = true;
- DesugarAsyncFunctionBody(function_name, inner_scope, body, nullptr, kind,
- FunctionBodyType::kNormal, accept_IN, pos,
- CHECK_OK);
+ ParseAsyncFunctionBody(inner_scope, body, kind, FunctionBodyType::kNormal,
+ accept_IN, pos, CHECK_OK);
} else {
ParseStatementList(body, Token::RBRACE, CHECK_OK);
}
if (IsSubclassConstructor(kind)) {
- body->Add(factory()->NewReturnStatement(
- this->ThisExpression(kNoSourcePosition), kNoSourcePosition),
+ body->Add(factory()->NewReturnStatement(ThisExpression(kNoSourcePosition),
+ kNoSourcePosition),
zone());
}
}
@@ -4726,12 +3230,12 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
Block* init_block = BuildParameterInitializationBlock(parameters, CHECK_OK);
if (is_sloppy(inner_scope->language_mode())) {
- InsertSloppyBlockFunctionVarBindings(inner_scope, function_scope,
- CHECK_OK);
+ InsertSloppyBlockFunctionVarBindings(inner_scope);
}
+ // TODO(littledan): Merge the two rejection blocks into one
if (IsAsyncFunction(kind)) {
- init_block = BuildRejectPromiseOnException(init_block);
+ init_block = BuildRejectPromiseOnException(init_block, CHECK_OK);
}
DCHECK_NOT_NULL(init_block);
@@ -4748,31 +3252,42 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
} else {
DCHECK_EQ(inner_scope, function_scope);
if (is_sloppy(function_scope->language_mode())) {
- InsertSloppyBlockFunctionVarBindings(function_scope, nullptr, CHECK_OK);
+ InsertSloppyBlockFunctionVarBindings(function_scope);
}
}
+ if (!IsArrowFunction(kind)) {
+ // Declare arguments after parsing the function since lexical 'arguments'
+ // masks the arguments object. Declare arguments before declaring the
+ // function var since the arguments object masks 'function arguments'.
+ function_scope->DeclareArguments(ast_value_factory());
+ }
+
if (function_type == FunctionLiteral::kNamedExpression) {
- // Now that we know the language mode, we can create the const assignment
- // in the previously reserved spot.
- DCHECK_EQ(function_scope, scope());
- Variable* fvar = function_scope->DeclareFunctionVar(function_name);
- VariableProxy* fproxy = factory()->NewVariableProxy(fvar);
- result->Set(kFunctionNameAssignmentIndex,
- factory()->NewExpressionStatement(
- factory()->NewAssignment(Token::INIT, fproxy,
- factory()->NewThisFunction(pos),
- kNoSourcePosition),
- kNoSourcePosition));
+ Statement* statement;
+ if (function_scope->LookupLocal(function_name) == nullptr) {
+ // Now that we know the language mode, we can create the const assignment
+ // in the previously reserved spot.
+ DCHECK_EQ(function_scope, scope());
+ Variable* fvar = function_scope->DeclareFunctionVar(function_name);
+ VariableProxy* fproxy = factory()->NewVariableProxy(fvar);
+ statement = factory()->NewExpressionStatement(
+ factory()->NewAssignment(Token::INIT, fproxy,
+ factory()->NewThisFunction(pos),
+ kNoSourcePosition),
+ kNoSourcePosition);
+ } else {
+ statement = factory()->NewEmptyStatement(kNoSourcePosition);
+ }
+ result->Set(kFunctionNameAssignmentIndex, statement);
}
MarkCollectedTailCallExpressions();
return result;
}
-
PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
- SingletonLogger* logger, Scanner::BookmarkScope* bookmark) {
+ SingletonLogger* logger, bool is_inner_function, bool may_abort) {
// This function may be called on a background thread too; record only the
// main thread preparse times.
if (pre_parse_timer_ != NULL) {
@@ -4794,209 +3309,337 @@ PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
SET_ALLOW(harmony_restrictive_declarations);
SET_ALLOW(harmony_async_await);
SET_ALLOW(harmony_trailing_commas);
+ SET_ALLOW(harmony_class_fields);
#undef SET_ALLOW
}
+ // Aborting inner function preparsing would leave scopes in an inconsistent
+ // state; we don't parse inner functions in the abortable mode anyway.
+ DCHECK(!is_inner_function || !may_abort);
+
+ DeclarationScope* function_scope = function_state_->scope();
PreParser::PreParseResult result = reusable_preparser_->PreParseLazyFunction(
- language_mode(), function_state_->kind(),
- scope()->AsDeclarationScope()->has_simple_parameters(), parsing_module_,
- logger, bookmark, use_counts_);
+ function_scope, parsing_module_, logger, is_inner_function, may_abort,
+ use_counts_);
if (pre_parse_timer_ != NULL) {
pre_parse_timer_->Stop();
}
return result;
}
-Expression* Parser::ParseClassLiteral(ExpressionClassifier* classifier,
- const AstRawString* name,
- Scanner::Location class_name_location,
- bool name_is_strict_reserved, int pos,
- bool* ok) {
- // All parts of a ClassDeclaration and ClassExpression are strict code.
- if (name_is_strict_reserved) {
- ReportMessageAt(class_name_location,
- MessageTemplate::kUnexpectedStrictReserved);
- *ok = false;
- return nullptr;
- }
- if (IsEvalOrArguments(name)) {
- ReportMessageAt(class_name_location, MessageTemplate::kStrictEvalArguments);
- *ok = false;
- return nullptr;
- }
+Expression* Parser::InstallHomeObject(Expression* function_literal,
+ Expression* home_object) {
+ Block* do_block = factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
+ Variable* result_var =
+ scope()->NewTemporary(ast_value_factory()->empty_string());
+ DoExpression* do_expr =
+ factory()->NewDoExpression(do_block, result_var, kNoSourcePosition);
+ Assignment* init = factory()->NewAssignment(
+ Token::ASSIGN, factory()->NewVariableProxy(result_var), function_literal,
+ kNoSourcePosition);
+ do_block->statements()->Add(
+ factory()->NewExpressionStatement(init, kNoSourcePosition), zone());
+ Property* home_object_property = factory()->NewProperty(
+ factory()->NewVariableProxy(result_var),
+ factory()->NewSymbolLiteral("home_object_symbol", kNoSourcePosition),
+ kNoSourcePosition);
+ Assignment* assignment = factory()->NewAssignment(
+ Token::ASSIGN, home_object_property, home_object, kNoSourcePosition);
+ do_block->statements()->Add(
+ factory()->NewExpressionStatement(assignment, kNoSourcePosition), zone());
+ return do_expr;
+}
+
+const AstRawString* ClassFieldVariableName(bool is_name,
+ AstValueFactory* ast_value_factory,
+ int index) {
+ std::string name =
+ ".class-field-" + std::to_string(index) + (is_name ? "-name" : "-func");
+ return ast_value_factory->GetOneByteString(name.c_str());
+}
+
+FunctionLiteral* Parser::SynthesizeClassFieldInitializer(int count) {
+ DCHECK(count > 0);
+ // Makes a function which reads the names and initializers for each class
+ // field out of deterministically named local variables and sets each property
+ // to the result of evaluating its corresponding initializer in turn.
+
+ // This produces a function which looks like
+ // function () {
+ // this[.class-field-0-name] = .class-field-0-func();
+ // this[.class-field-1-name] = .class-field-1-func();
+ // [...]
+ // this[.class-field-n-name] = .class-field-n-func();
+ // return this;
+ // }
+ // except that it performs defineProperty, so that instead of '=' it has
+ // %DefineDataPropertyInLiteral(this, .class-field-0-name,
+ // .class-field-0-func(),
+ // DONT_ENUM, false)
- BlockState block_state(&scope_state_);
RaiseLanguageMode(STRICT);
+ FunctionKind kind = FunctionKind::kConciseMethod;
+ DeclarationScope* initializer_scope = NewFunctionScope(kind);
+ SetLanguageMode(initializer_scope, language_mode());
+ initializer_scope->set_start_position(scanner()->location().end_pos);
+ initializer_scope->set_end_position(scanner()->location().end_pos);
+ FunctionState initializer_state(&function_state_, &scope_state_,
+ initializer_scope);
+ ZoneList<Statement*>* body = new (zone()) ZoneList<Statement*>(count, zone());
+ for (int i = 0; i < count; ++i) {
+ const AstRawString* name =
+ ClassFieldVariableName(true, ast_value_factory(), i);
+ VariableProxy* name_proxy = scope()->NewUnresolved(factory(), name);
+ const AstRawString* function_name =
+ ClassFieldVariableName(false, ast_value_factory(), i);
+ VariableProxy* function_proxy =
+ scope()->NewUnresolved(factory(), function_name);
+ ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
+ args->Add(function_proxy, zone());
+ args->Add(ThisExpression(kNoSourcePosition), zone());
+ Expression* call = factory()->NewCallRuntime(Runtime::kInlineCall, args,
+ kNoSourcePosition);
+ ZoneList<Expression*>* define_property_args =
+ new (zone()) ZoneList<Expression*>(5, zone());
+ define_property_args->Add(ThisExpression(kNoSourcePosition), zone());
+ define_property_args->Add(name_proxy, zone());
+ define_property_args->Add(call, zone());
+ define_property_args->Add(
+ factory()->NewNumberLiteral(DONT_ENUM, kNoSourcePosition), zone());
+ define_property_args->Add(
+ factory()->NewNumberLiteral(
+ false, // TODO(bakkot) function name inference a la class { x =
+ // function(){}; static y = function(){}; }
+ kNoSourcePosition),
+ zone());
+ body->Add(factory()->NewExpressionStatement(
+ factory()->NewCallRuntime(
+ Runtime::kDefineDataProperty,
+ define_property_args, // TODO(bakkot) verify that this is
+ // the same as object_define_property
+ kNoSourcePosition),
+ kNoSourcePosition),
+ zone());
+ }
+ body->Add(factory()->NewReturnStatement(ThisExpression(kNoSourcePosition),
+ kNoSourcePosition),
+ zone());
+ FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
+ ast_value_factory()->empty_string(), initializer_scope, body,
+ initializer_state.materialized_literal_count(),
+ initializer_state.expected_property_count(), 0,
+ FunctionLiteral::kNoDuplicateParameters,
+ FunctionLiteral::kAnonymousExpression,
+ FunctionLiteral::kShouldLazyCompile, initializer_scope->start_position());
+ function_literal->set_is_class_field_initializer(true);
+ function_literal->scope()->set_arity(count);
+ return function_literal;
+}
+
+FunctionLiteral* Parser::InsertClassFieldInitializer(
+ FunctionLiteral* constructor) {
+ Statement* call_initializer = factory()->NewExpressionStatement(
+ CallClassFieldInitializer(
+ constructor->scope(),
+ constructor->scope()->NewUnresolved(
+ factory(), ast_value_factory()->this_string(), kNoSourcePosition,
+ kNoSourcePosition + 4, THIS_VARIABLE)),
+ kNoSourcePosition);
+ constructor->body()->InsertAt(0, call_initializer, zone());
+ return constructor;
+}
+
+// If a class name is specified, this method declares the class variable
+// and sets class_info->proxy to point to that name.
+void Parser::DeclareClassVariable(const AstRawString* name, Scope* block_scope,
+ ClassInfo* class_info, int class_token_pos,
+ bool* ok) {
#ifdef DEBUG
scope()->SetScopeName(name);
#endif
- VariableProxy* proxy = nullptr;
if (name != nullptr) {
- proxy = NewUnresolved(name);
- // TODO(verwaest): declare via block_state.
- Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, block_state.scope(), pos);
+ class_info->proxy = factory()->NewVariableProxy(name, NORMAL_VARIABLE);
+ Declaration* declaration = factory()->NewVariableDeclaration(
+ class_info->proxy, block_scope, class_token_pos);
Declare(declaration, DeclarationDescriptor::NORMAL, CONST,
- DefaultInitializationFlag(CONST), CHECK_OK);
- }
-
- Expression* extends = nullptr;
- if (Check(Token::EXTENDS)) {
- block_state.set_start_position(scanner()->location().end_pos);
- ExpressionClassifier extends_classifier(this);
- extends = ParseLeftHandSideExpression(&extends_classifier, CHECK_OK);
- CheckNoTailCallExpressions(&extends_classifier, CHECK_OK);
- RewriteNonPattern(&extends_classifier, CHECK_OK);
- if (classifier != nullptr) {
- classifier->Accumulate(&extends_classifier,
- ExpressionClassifier::ExpressionProductions);
- }
- } else {
- block_state.set_start_position(scanner()->location().end_pos);
+ Variable::DefaultInitializationFlag(CONST), ok);
+ }
+}
+
+// This method declares a property of the given class. It updates the
+// following fields of class_info, as appropriate:
+// - constructor
+// - static_initializer_var
+// - instance_field_initializers
+// - properties
+void Parser::DeclareClassProperty(const AstRawString* class_name,
+ ClassLiteralProperty* property,
+ ClassInfo* class_info, bool* ok) {
+ if (class_info->has_seen_constructor && class_info->constructor == nullptr) {
+ class_info->constructor = GetPropertyValue(property)->AsFunctionLiteral();
+ DCHECK_NOT_NULL(class_info->constructor);
+ class_info->constructor->set_raw_name(
+ class_name != nullptr ? class_name
+ : ast_value_factory()->empty_string());
+ return;
}
-
- ClassLiteralChecker checker(this);
- ZoneList<ObjectLiteral::Property*>* properties = NewPropertyList(4, zone());
- FunctionLiteral* constructor = nullptr;
- bool has_seen_constructor = false;
-
- Expect(Token::LBRACE, CHECK_OK);
-
- const bool has_extends = extends != nullptr;
- while (peek() != Token::RBRACE) {
- if (Check(Token::SEMICOLON)) continue;
- FuncNameInferrer::State fni_state(fni_);
- const bool in_class = true;
- bool is_computed_name = false; // Classes do not care about computed
- // property names here.
- ExpressionClassifier property_classifier(this);
- const AstRawString* property_name = nullptr;
- ObjectLiteral::Property* property = ParsePropertyDefinition(
- &checker, in_class, has_extends, MethodKind::kNormal, &is_computed_name,
- &has_seen_constructor, &property_classifier, &property_name, CHECK_OK);
- RewriteNonPattern(&property_classifier, CHECK_OK);
- if (classifier != nullptr) {
- classifier->Accumulate(&property_classifier,
- ExpressionClassifier::ExpressionProductions);
- }
-
- if (has_seen_constructor && constructor == nullptr) {
- constructor = GetPropertyValue(property)->AsFunctionLiteral();
- DCHECK_NOT_NULL(constructor);
- constructor->set_raw_name(
- name != nullptr ? name : ast_value_factory()->empty_string());
+ if (property->kind() == ClassLiteralProperty::FIELD) {
+ DCHECK(allow_harmony_class_fields());
+ if (property->is_static()) {
+ if (class_info->static_initializer_var == nullptr) {
+ class_info->static_initializer_var =
+ NewTemporary(ast_value_factory()->empty_string());
+ }
+ // TODO(bakkot) only do this conditionally
+ Expression* function = InstallHomeObject(
+ property->value(),
+ factory()->NewVariableProxy(class_info->static_initializer_var));
+ ZoneList<Expression*>* args =
+ new (zone()) ZoneList<Expression*>(2, zone());
+ args->Add(function, zone());
+ args->Add(factory()->NewVariableProxy(class_info->static_initializer_var),
+ zone());
+ Expression* call = factory()->NewCallRuntime(Runtime::kInlineCall, args,
+ kNoSourcePosition);
+ property->set_value(call);
} else {
- properties->Add(property, zone());
- }
-
- if (fni_ != nullptr) fni_->Infer();
-
- if (property_name != ast_value_factory()->constructor_string()) {
- SetFunctionNameFromPropertyName(property, property_name);
- }
- }
-
- Expect(Token::RBRACE, CHECK_OK);
+ // if (is_computed_name) { // TODO(bakkot) figure out why this is
+ // necessary for non-computed names in full-codegen
+ ZoneList<Expression*>* to_name_args =
+ new (zone()) ZoneList<Expression*>(1, zone());
+ to_name_args->Add(property->key(), zone());
+ property->set_key(factory()->NewCallRuntime(
+ Runtime::kToName, to_name_args, kNoSourcePosition));
+ //}
+ const AstRawString* name = ClassFieldVariableName(
+ true, ast_value_factory(),
+ class_info->instance_field_initializers->length());
+ VariableProxy* name_proxy =
+ factory()->NewVariableProxy(name, NORMAL_VARIABLE);
+ Declaration* name_declaration = factory()->NewVariableDeclaration(
+ name_proxy, scope(), kNoSourcePosition);
+ Variable* name_var =
+ Declare(name_declaration, DeclarationDescriptor::NORMAL, CONST,
+ kNeedsInitialization, ok, scope());
+ DCHECK(*ok);
+ if (!*ok) return;
+ class_info->instance_field_initializers->Add(property->value(), zone());
+ property->set_value(factory()->NewVariableProxy(name_var));
+ }
+ }
+ class_info->properties->Add(property, zone());
+}
+
+// This method rewrites a class literal into a do-expression.
+// It uses the following fields of class_info:
+// - constructor (if missing, it updates it with a default constructor)
+// - proxy
+// - extends
+// - static_initializer_var
+// - instance_field_initializers
+// - properties
+Expression* Parser::RewriteClassLiteral(const AstRawString* name,
+ ClassInfo* class_info, int pos,
+ bool* ok) {
int end_pos = scanner()->location().end_pos;
-
- if (constructor == nullptr) {
- constructor = DefaultConstructor(name, has_extends, pos, end_pos,
- block_state.language_mode());
- }
-
- // Note that we do not finalize this block scope because it is
- // used as a sentinel value indicating an anonymous class.
- block_state.set_end_position(end_pos);
-
- if (name != nullptr) {
- DCHECK_NOT_NULL(proxy);
- proxy->var()->set_initializer_position(end_pos);
- }
-
Block* do_block = factory()->NewBlock(nullptr, 1, false, pos);
Variable* result_var = NewTemporary(ast_value_factory()->empty_string());
- do_block->set_scope(block_state.FinalizedBlockScope());
DoExpression* do_expr = factory()->NewDoExpression(do_block, result_var, pos);
- ClassLiteral* class_literal = factory()->NewClassLiteral(
- proxy, extends, constructor, properties, pos, end_pos);
-
- do_block->statements()->Add(
- factory()->NewExpressionStatement(class_literal, pos), zone());
- do_expr->set_represented_function(constructor);
- Rewriter::Rewrite(this, GetClosureScope(), do_expr, ast_value_factory());
-
- return do_expr;
-}
-
-
-Expression* Parser::ParseV8Intrinsic(bool* ok) {
- // CallRuntime ::
- // '%' Identifier Arguments
+ bool has_extends = class_info->extends != nullptr;
+ bool has_instance_fields =
+ class_info->instance_field_initializers->length() > 0;
+ DCHECK(!has_instance_fields || allow_harmony_class_fields());
+ bool has_default_constructor = class_info->constructor == nullptr;
+ if (has_default_constructor) {
+ class_info->constructor =
+ DefaultConstructor(name, has_extends, has_instance_fields, pos, end_pos,
+ scope()->language_mode());
+ }
- int pos = peek_position();
- Expect(Token::MOD, CHECK_OK);
- // Allow "eval" or "arguments" for backward compatibility.
- const AstRawString* name = ParseIdentifier(kAllowRestrictedIdentifiers,
- CHECK_OK);
- Scanner::Location spread_pos;
- ExpressionClassifier classifier(this);
- ZoneList<Expression*>* args =
- ParseArguments(&spread_pos, &classifier, CHECK_OK);
+ if (has_instance_fields && !has_extends) {
+ class_info->constructor =
+ InsertClassFieldInitializer(class_info->constructor);
+ class_info->constructor->set_requires_class_field_init(true);
+ } // The derived case is handled by rewriting super calls.
- DCHECK(!spread_pos.IsValid());
+ scope()->set_end_position(end_pos);
- if (extension_ != NULL) {
- // The extension structures are only accessible while parsing the
- // very first time not when reparsing because of lazy compilation.
- GetClosureScope()->ForceEagerCompilation();
+ if (name != nullptr) {
+ DCHECK_NOT_NULL(class_info->proxy);
+ class_info->proxy->var()->set_initializer_position(end_pos);
}
- const Runtime::Function* function = Runtime::FunctionForName(name->string());
-
- if (function != NULL) {
- // Check for possible name clash.
- DCHECK_EQ(Context::kNotFound,
- Context::IntrinsicIndexForName(name->string()));
- // Check for built-in IS_VAR macro.
- if (function->function_id == Runtime::kIS_VAR) {
- DCHECK_EQ(Runtime::RUNTIME, function->intrinsic_type);
- // %IS_VAR(x) evaluates to x if x is a variable,
- // leads to a parse error otherwise. Could be implemented as an
- // inline function %_IS_VAR(x) to eliminate this special case.
- if (args->length() == 1 && args->at(0)->AsVariableProxy() != NULL) {
- return args->at(0);
- } else {
- ReportMessage(MessageTemplate::kNotIsvar);
- *ok = false;
- return NULL;
- }
- }
-
- // Check that the expected number of arguments are being passed.
- if (function->nargs != -1 && function->nargs != args->length()) {
- ReportMessage(MessageTemplate::kRuntimeWrongNumArgs);
- *ok = false;
- return NULL;
- }
+ ClassLiteral* class_literal = factory()->NewClassLiteral(
+ class_info->proxy, class_info->extends, class_info->constructor,
+ class_info->properties, pos, end_pos);
- return factory()->NewCallRuntime(function, args, pos);
+ if (class_info->static_initializer_var != nullptr) {
+ class_literal->set_static_initializer_proxy(
+ factory()->NewVariableProxy(class_info->static_initializer_var));
}
- int context_index = Context::IntrinsicIndexForName(name->string());
-
- // Check that the function is defined.
- if (context_index == Context::kNotFound) {
- ReportMessage(MessageTemplate::kNotDefined, name);
- *ok = false;
- return NULL;
+ do_block->statements()->Add(
+ factory()->NewExpressionStatement(
+ factory()->NewAssignment(Token::ASSIGN,
+ factory()->NewVariableProxy(result_var),
+ class_literal, kNoSourcePosition),
+ pos),
+ zone());
+ if (allow_harmony_class_fields() &&
+ (has_instance_fields || (has_extends && !has_default_constructor))) {
+ // Default constructors for derived classes without fields will not try to
+ // read this variable, so there's no need to create it.
+ const AstRawString* init_fn_name =
+ ast_value_factory()->dot_class_field_init_string();
+ Variable* init_fn_var = scope()->DeclareLocal(
+ init_fn_name, CONST, kCreatedInitialized, NORMAL_VARIABLE);
+ Expression* initializer =
+ has_instance_fields
+ ? static_cast<Expression*>(SynthesizeClassFieldInitializer(
+ class_info->instance_field_initializers->length()))
+ : factory()->NewBooleanLiteral(false, kNoSourcePosition);
+ Assignment* assignment = factory()->NewAssignment(
+ Token::INIT, factory()->NewVariableProxy(init_fn_var), initializer,
+ kNoSourcePosition);
+ do_block->statements()->Add(
+ factory()->NewExpressionStatement(assignment, kNoSourcePosition),
+ zone());
}
+ for (int i = 0; i < class_info->instance_field_initializers->length(); ++i) {
+ const AstRawString* function_name =
+ ClassFieldVariableName(false, ast_value_factory(), i);
+ VariableProxy* function_proxy =
+ factory()->NewVariableProxy(function_name, NORMAL_VARIABLE);
+ Declaration* function_declaration = factory()->NewVariableDeclaration(
+ function_proxy, scope(), kNoSourcePosition);
+ Variable* function_var =
+ Declare(function_declaration, DeclarationDescriptor::NORMAL, CONST,
+ kNeedsInitialization, ok, scope());
+ if (!*ok) return nullptr;
+ Property* prototype_property = factory()->NewProperty(
+ factory()->NewVariableProxy(result_var),
+ factory()->NewStringLiteral(ast_value_factory()->prototype_string(),
+ kNoSourcePosition),
+ kNoSourcePosition);
+ Expression* function_value = InstallHomeObject(
+ class_info->instance_field_initializers->at(i),
+ prototype_property); // TODO(bakkot) ideally this would be conditional,
+ // especially in trivial cases
+ Assignment* function_assignment = factory()->NewAssignment(
+ Token::INIT, factory()->NewVariableProxy(function_var), function_value,
+ kNoSourcePosition);
+ do_block->statements()->Add(factory()->NewExpressionStatement(
+ function_assignment, kNoSourcePosition),
+ zone());
+ }
+ do_block->set_scope(scope()->FinalizeBlockScope());
+ do_expr->set_represented_function(class_info->constructor);
- return factory()->NewCallRuntime(context_index, args, pos);
+ return do_expr;
}
-
Literal* Parser::GetLiteralUndefined(int position) {
return factory()->NewUndefinedLiteral(position);
}
@@ -5045,100 +3688,22 @@ void Parser::InsertShadowingVarBindingInitializers(Block* inner_block) {
}
}
-void Parser::InsertSloppyBlockFunctionVarBindings(DeclarationScope* scope,
- Scope* complex_params_scope,
- bool* ok) {
- // For each variable which is used as a function declaration in a sloppy
- // block,
- SloppyBlockFunctionMap* map = scope->sloppy_block_function_map();
- for (ZoneHashMap::Entry* p = map->Start(); p != nullptr; p = map->Next(p)) {
- AstRawString* name = static_cast<AstRawString*>(p->key);
-
- // If the variable wouldn't conflict with a lexical declaration
- // or parameter,
-
- // Check if there's a conflict with a parameter.
- // This depends on the fact that functions always have a scope solely to
- // hold complex parameters, and the names local to that scope are
- // precisely the names of the parameters. IsDeclaredParameter(name) does
- // not hold for names declared by complex parameters, nor are those
- // bindings necessarily declared lexically, so we have to check for them
- // explicitly. On the other hand, if there are not complex parameters,
- // it is sufficient to just check IsDeclaredParameter.
- if (complex_params_scope != nullptr) {
- if (complex_params_scope->LookupLocal(name) != nullptr) {
- continue;
- }
- } else {
- if (scope->IsDeclaredParameter(name)) {
- continue;
- }
- }
-
- bool var_created = false;
-
- // Write in assignments to var for each block-scoped function declaration
- auto delegates = static_cast<SloppyBlockFunctionStatement*>(p->value);
-
- DeclarationScope* decl_scope = scope;
- while (decl_scope->is_eval_scope()) {
- decl_scope = decl_scope->outer_scope()->GetDeclarationScope();
- }
- Scope* outer_scope = decl_scope->outer_scope();
-
- for (SloppyBlockFunctionStatement* delegate = delegates;
- delegate != nullptr; delegate = delegate->next()) {
- // Check if there's a conflict with a lexical declaration
- Scope* query_scope = delegate->scope()->outer_scope();
- Variable* var = nullptr;
- bool should_hoist = true;
-
- // Note that we perform this loop for each delegate named 'name',
- // which may duplicate work if those delegates share scopes.
- // It is not sufficient to just do a Lookup on query_scope: for
- // example, that does not prevent hoisting of the function in
- // `{ let e; try {} catch (e) { function e(){} } }`
- do {
- var = query_scope->LookupLocal(name);
- if (var != nullptr && IsLexicalVariableMode(var->mode())) {
- should_hoist = false;
- break;
- }
- query_scope = query_scope->outer_scope();
- } while (query_scope != outer_scope);
-
- if (!should_hoist) continue;
-
- // Declare a var-style binding for the function in the outer scope
- if (!var_created) {
- var_created = true;
- VariableProxy* proxy = scope->NewUnresolved(factory(), name);
- Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, scope, kNoSourcePosition);
- Declare(declaration, DeclarationDescriptor::NORMAL, VAR,
- DefaultInitializationFlag(VAR), ok, scope);
- DCHECK(ok); // Based on the preceding check, this should not fail
- if (!ok) return;
- }
-
- // Read from the local lexical scope and write to the function scope
- VariableProxy* to = scope->NewUnresolved(factory(), name);
- VariableProxy* from = delegate->scope()->NewUnresolved(factory(), name);
- Expression* assignment =
- factory()->NewAssignment(Token::ASSIGN, to, from, kNoSourcePosition);
- Statement* statement =
- factory()->NewExpressionStatement(assignment, kNoSourcePosition);
- delegate->set_statement(statement);
- }
+void Parser::InsertSloppyBlockFunctionVarBindings(DeclarationScope* scope) {
+ // For the outermost eval scope, we cannot hoist during parsing: let
+ // declarations in the surrounding scope may prevent hoisting, but the
+ // information is unaccessible during parsing. In this case, we hoist later in
+ // DeclarationScope::Analyze.
+ if (scope->is_eval_scope() && scope->outer_scope() == original_scope_) {
+ return;
}
+ scope->HoistSloppyBlockFunctions(factory());
}
-
// ----------------------------------------------------------------------------
// Parser support
bool Parser::TargetStackContainsLabel(const AstRawString* label) {
- for (Target* t = target_stack_; t != NULL; t = t->previous()) {
+ for (ParserTarget* t = target_stack_; t != NULL; t = t->previous()) {
if (ContainsLabel(t->statement()->labels(), label)) return true;
}
return false;
@@ -5148,7 +3713,7 @@ bool Parser::TargetStackContainsLabel(const AstRawString* label) {
BreakableStatement* Parser::LookupBreakTarget(const AstRawString* label,
bool* ok) {
bool anonymous = label == NULL;
- for (Target* t = target_stack_; t != NULL; t = t->previous()) {
+ for (ParserTarget* t = target_stack_; t != NULL; t = t->previous()) {
BreakableStatement* stat = t->statement();
if ((anonymous && stat->is_target_for_anonymous()) ||
(!anonymous && ContainsLabel(stat->labels(), label))) {
@@ -5162,7 +3727,7 @@ BreakableStatement* Parser::LookupBreakTarget(const AstRawString* label,
IterationStatement* Parser::LookupContinueTarget(const AstRawString* label,
bool* ok) {
bool anonymous = label == NULL;
- for (Target* t = target_stack_; t != NULL; t = t->previous()) {
+ for (ParserTarget* t = target_stack_; t != NULL; t = t->previous()) {
IterationStatement* stat = t->statement()->AsIterationStatement();
if (stat == NULL) continue;
@@ -5188,7 +3753,7 @@ void Parser::HandleSourceURLComments(Isolate* isolate, Handle<Script> script) {
void Parser::Internalize(Isolate* isolate, Handle<Script> script, bool error) {
- // Internalize strings.
+ // Internalize strings and values.
ast_value_factory()->Internalize(isolate);
// Error processing.
@@ -5240,12 +3805,6 @@ bool Parser::Parse(ParseInfo* info) {
DCHECK(parsing_on_main_thread_);
Isolate* isolate = info->isolate();
pre_parse_timer_ = isolate->counters()->pre_parse();
- if (FLAG_trace_parse || allow_natives() || extension_ != NULL) {
- // If intrinsics are allowed, the Parser cannot operate independent of the
- // V8 heap because of Runtime. Tell the string table to internalize strings
- // and values right after they're created.
- ast_value_factory()->Internalize(isolate);
- }
if (info->is_lazy()) {
DCHECK(!info->is_eval());
@@ -5261,7 +3820,6 @@ bool Parser::Parse(ParseInfo* info) {
info->set_literal(result);
Internalize(isolate, info->script(), result == NULL);
- DCHECK(ast_value_factory()->IsInternalized());
return (result != NULL);
}
@@ -5282,11 +3840,11 @@ void Parser::ParseOnBackground(ParseInfo* info) {
stream_ptr = info->character_stream();
} else {
DCHECK(info->character_stream() == nullptr);
- stream.reset(new ExternalStreamingStream(info->source_stream(),
- info->source_stream_encoding()));
+ stream.reset(ScannerStream::For(info->source_stream(),
+ info->source_stream_encoding()));
stream_ptr = stream.get();
}
- DCHECK(info->context().is_null() || info->context()->IsNativeContext());
+ DCHECK(info->maybe_outer_scope_info().is_null());
DCHECK(original_scope_);
@@ -5431,11 +3989,9 @@ uint32_t Parser::ComputeTemplateLiteralHash(const TemplateLiteral* lit) {
return running_hash;
}
-
-ZoneList<v8::internal::Expression*>* Parser::PrepareSpreadArguments(
- ZoneList<v8::internal::Expression*>* list) {
- ZoneList<v8::internal::Expression*>* args =
- new (zone()) ZoneList<v8::internal::Expression*>(1, zone());
+ZoneList<Expression*>* Parser::PrepareSpreadArguments(
+ ZoneList<Expression*>* list) {
+ ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
if (list->length() == 1) {
// Spread-call with single spread argument produces an InternalArray
// containing the values from the array.
@@ -5462,8 +4018,8 @@ ZoneList<v8::internal::Expression*>* Parser::PrepareSpreadArguments(
int n = list->length();
while (i < n) {
if (!list->at(i)->IsSpread()) {
- ZoneList<v8::internal::Expression*>* unspread =
- new (zone()) ZoneList<v8::internal::Expression*>(1, zone());
+ ZoneList<Expression*>* unspread =
+ new (zone()) ZoneList<Expression*>(1, zone());
// Push array of unspread parameters
while (i < n && !list->at(i)->IsSpread()) {
@@ -5478,15 +4034,15 @@ ZoneList<v8::internal::Expression*>* Parser::PrepareSpreadArguments(
}
// Push eagerly spread argument
- ZoneList<v8::internal::Expression*>* spread_list =
- new (zone()) ZoneList<v8::internal::Expression*>(1, zone());
+ ZoneList<Expression*>* spread_list =
+ new (zone()) ZoneList<Expression*>(1, zone());
spread_list->Add(list->at(i++)->AsSpread()->expression(), zone());
args->Add(factory()->NewCallRuntime(Context::SPREAD_ITERABLE_INDEX,
spread_list, kNoSourcePosition),
zone());
}
- list = new (zone()) ZoneList<v8::internal::Expression*>(1, zone());
+ list = new (zone()) ZoneList<Expression*>(1, zone());
list->Add(factory()->NewCallRuntime(Context::SPREAD_ARGUMENTS_INDEX, args,
kNoSourcePosition),
zone());
@@ -5495,10 +4051,8 @@ ZoneList<v8::internal::Expression*>* Parser::PrepareSpreadArguments(
UNREACHABLE();
}
-
Expression* Parser::SpreadCall(Expression* function,
- ZoneList<v8::internal::Expression*>* args,
- int pos) {
+ ZoneList<Expression*>* args, int pos) {
if (function->IsSuperCallReference()) {
// Super calls
// $super_constructor = %_GetSuperConstructor(<this-function>)
@@ -5540,10 +4094,8 @@ Expression* Parser::SpreadCall(Expression* function,
}
}
-
Expression* Parser::SpreadCallNew(Expression* function,
- ZoneList<v8::internal::Expression*>* args,
- int pos) {
+ ZoneList<Expression*>* args, int pos) {
args->InsertAt(0, function, zone());
return factory()->NewCallRuntime(Context::REFLECT_CONSTRUCT_INDEX, args, pos);
@@ -5562,90 +4114,141 @@ void Parser::SetLanguageMode(Scope* scope, LanguageMode mode) {
scope->SetLanguageMode(mode);
}
-
-void Parser::RaiseLanguageMode(LanguageMode mode) {
- LanguageMode old = scope()->language_mode();
- SetLanguageMode(scope(), old > mode ? old : mode);
+void Parser::SetAsmModule() {
+ // Store the usage count; The actual use counter on the isolate is
+ // incremented after parsing is done.
+ ++use_counts_[v8::Isolate::kUseAsm];
+ DCHECK(scope()->is_declaration_scope());
+ scope()->AsDeclarationScope()->set_asm_module();
}
void Parser::MarkCollectedTailCallExpressions() {
const ZoneList<Expression*>& tail_call_expressions =
function_state_->tail_call_expressions().expressions();
for (int i = 0; i < tail_call_expressions.length(); ++i) {
- Expression* expression = tail_call_expressions[i];
- // If only FLAG_harmony_explicit_tailcalls is enabled then expression
- // must be a Call expression.
- DCHECK(FLAG_harmony_tailcalls || !FLAG_harmony_explicit_tailcalls ||
- expression->IsCall());
- MarkTailPosition(expression);
+ MarkTailPosition(tail_call_expressions[i]);
}
}
-Expression* ParserBaseTraits<Parser>::ExpressionListToExpression(
- ZoneList<Expression*>* args) {
- AstNodeFactory* factory = delegate()->factory();
+Expression* Parser::ExpressionListToExpression(ZoneList<Expression*>* args) {
Expression* expr = args->at(0);
for (int i = 1; i < args->length(); ++i) {
- expr = factory->NewBinaryOperation(Token::COMMA, expr, args->at(i),
- expr->position());
+ expr = factory()->NewBinaryOperation(Token::COMMA, expr, args->at(i),
+ expr->position());
}
return expr;
}
+// This method intoduces the line initializing the generator object
+// when desugaring the body of async_function.
+void Parser::PrepareAsyncFunctionBody(ZoneList<Statement*>* body,
+ FunctionKind kind, int pos) {
+ // function async_function() {
+ // .generator_object = %CreateGeneratorObject();
+ // BuildRejectPromiseOnException({
+ // ... block ...
+ // return %ResolvePromise(.promise, expr), .promise;
+ // })
+ // }
+
+ Variable* temp =
+ NewTemporary(ast_value_factory()->dot_generator_object_string());
+ function_state_->set_generator_object_variable(temp);
+
+ Expression* init_generator_variable = factory()->NewAssignment(
+ Token::INIT, factory()->NewVariableProxy(temp),
+ BuildCreateJSGeneratorObject(pos, kind), kNoSourcePosition);
+ body->Add(factory()->NewExpressionStatement(init_generator_variable,
+ kNoSourcePosition),
+ zone());
+}
+
+// This method completes the desugaring of the body of async_function.
+void Parser::RewriteAsyncFunctionBody(ZoneList<Statement*>* body, Block* block,
+ Expression* return_value, bool* ok) {
+ // function async_function() {
+ // .generator_object = %CreateGeneratorObject();
+ // BuildRejectPromiseOnException({
+ // ... block ...
+ // return %ResolvePromise(.promise, expr), .promise;
+ // })
+ // }
+
+ return_value = BuildResolvePromise(return_value, return_value->position());
+ block->statements()->Add(
+ factory()->NewReturnStatement(return_value, return_value->position()),
+ zone());
+ block = BuildRejectPromiseOnException(block, CHECK_OK_VOID);
+ body->Add(block, zone());
+}
+
Expression* Parser::RewriteAwaitExpression(Expression* value, int await_pos) {
- // yield %AsyncFunctionAwait(.generator_object, <operand>)
+ // yield do {
+ // tmp = <operand>;
+ // %AsyncFunctionAwait(.generator_object, tmp, .promise);
+ // .promise
+ // }
+ // The value of the expression is returned to the caller of the async
+ // function for the first yield statement; for this, .promise is the
+ // appropriate return value, being a Promise that will be fulfilled or
+ // rejected with the appropriate value by the desugaring. Subsequent yield
+ // occurrences will return to the AsyncFunctionNext call within the
+ // implemementation of the intermediate throwaway Promise's then handler.
+ // This handler has nothing useful to do with the value, as the Promise is
+ // ignored. If we yielded the value of the throwawayPromise that
+ // AsyncFunctionAwait creates as an intermediate, it would create a memory
+ // leak; we must return .promise instead;
+ // The operand needs to be evaluated on a separate statement in order to get
+ // a break location, and the .promise needs to be read earlier so that it
+ // doesn't insert a false location.
+ // TODO(littledan): investigate why this ordering is needed in more detail.
Variable* generator_object_variable =
- delegate()->function_state_->generator_object_variable();
+ function_state_->generator_object_variable();
// If generator_object_variable is null,
+ // TODO(littledan): Is this necessary?
if (!generator_object_variable) return value;
- auto factory = delegate()->factory();
const int nopos = kNoSourcePosition;
- Variable* temp_var =
- delegate()->NewTemporary(delegate()->ast_value_factory()->empty_string());
- VariableProxy* temp_proxy = factory->NewVariableProxy(temp_var);
- Block* do_block = factory->NewBlock(nullptr, 2, false, nopos);
+ Block* do_block = factory()->NewBlock(nullptr, 2, false, nopos);
+
+ Variable* promise = PromiseVariable();
// Wrap value evaluation to provide a break location.
- Expression* value_assignment =
- factory->NewAssignment(Token::ASSIGN, temp_proxy, value, nopos);
+ Variable* temp_var = NewTemporary(ast_value_factory()->empty_string());
+ Expression* value_assignment = factory()->NewAssignment(
+ Token::ASSIGN, factory()->NewVariableProxy(temp_var), value, nopos);
do_block->statements()->Add(
- factory->NewExpressionStatement(value_assignment, value->position()),
+ factory()->NewExpressionStatement(value_assignment, value->position()),
zone());
ZoneList<Expression*>* async_function_await_args =
- new (zone()) ZoneList<Expression*>(2, zone());
+ new (zone()) ZoneList<Expression*>(3, zone());
Expression* generator_object =
- factory->NewVariableProxy(generator_object_variable);
+ factory()->NewVariableProxy(generator_object_variable);
async_function_await_args->Add(generator_object, zone());
- async_function_await_args->Add(temp_proxy, zone());
- Expression* async_function_await = delegate()->factory()->NewCallRuntime(
- Context::ASYNC_FUNCTION_AWAIT_INDEX, async_function_await_args, nopos);
- // Wrap await to provide a break location between value evaluation and yield.
- Expression* await_assignment = factory->NewAssignment(
- Token::ASSIGN, temp_proxy, async_function_await, nopos);
+ async_function_await_args->Add(factory()->NewVariableProxy(temp_var), zone());
+ async_function_await_args->Add(factory()->NewVariableProxy(promise), zone());
+
+ // The parser emits calls to AsyncFunctionAwaitCaught, but the
+ // AstNumberingVisitor will rewrite this to AsyncFunctionAwaitUncaught
+ // if there is no local enclosing try/catch block.
+ Expression* async_function_await =
+ factory()->NewCallRuntime(Context::ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX,
+ async_function_await_args, nopos);
do_block->statements()->Add(
- factory->NewExpressionStatement(await_assignment, await_pos), zone());
- Expression* do_expr = factory->NewDoExpression(do_block, temp_var, nopos);
-
- generator_object = factory->NewVariableProxy(generator_object_variable);
- return factory->NewYield(generator_object, do_expr, nopos,
- Yield::kOnExceptionRethrow);
-}
+ factory()->NewExpressionStatement(async_function_await, await_pos),
+ zone());
-ZoneList<Expression*>* ParserBaseTraits<Parser>::GetNonPatternList() const {
- return delegate()->function_state_->non_patterns_to_rewrite();
-}
+ // Wrap await to provide a break location between value evaluation and yield.
+ Expression* do_expr = factory()->NewDoExpression(do_block, promise, nopos);
-ZoneList<typename ParserBaseTraits<Parser>::Type::ExpressionClassifier::Error>*
-ParserBaseTraits<Parser>::GetReportedErrorList() const {
- return delegate()->function_state_->GetReportedErrorList();
+ generator_object = factory()->NewVariableProxy(generator_object_variable);
+ return factory()->NewYield(generator_object, do_expr, nopos,
+ Yield::kOnExceptionRethrow);
}
-Zone* ParserBaseTraits<Parser>::zone() const { return delegate()->zone(); }
-
class NonPatternRewriter : public AstExpressionRewriter {
public:
NonPatternRewriter(uintptr_t stack_limit, Parser* parser)
@@ -5674,7 +4277,7 @@ class NonPatternRewriter : public AstExpressionRewriter {
return false;
}
- void VisitObjectLiteralProperty(ObjectLiteralProperty* property) override {
+ void VisitLiteralProperty(LiteralProperty* property) override {
if (property == nullptr) return;
// Do not rewrite (computed) key expressions
AST_REWRITE_PROPERTY(Expression, property, value);
@@ -5683,11 +4286,10 @@ class NonPatternRewriter : public AstExpressionRewriter {
Parser* parser_;
};
-
-void Parser::RewriteNonPattern(ExpressionClassifier* classifier, bool* ok) {
- ValidateExpression(classifier, CHECK_OK_VOID);
+void Parser::RewriteNonPattern(bool* ok) {
+ ValidateExpression(CHECK_OK_VOID);
auto non_patterns_to_rewrite = function_state_->non_patterns_to_rewrite();
- int begin = classifier->GetNonPatternBegin();
+ int begin = classifier()->GetNonPatternBegin();
int end = non_patterns_to_rewrite->length();
if (begin < end) {
NonPatternRewriter rewriter(stack_limit_, this);
@@ -5711,8 +4313,11 @@ void Parser::RewriteDestructuringAssignments() {
pair.assignment->AsRewritableExpression();
DCHECK_NOT_NULL(to_rewrite);
if (!to_rewrite->is_rewritten()) {
- PatternRewriter::RewriteDestructuringAssignment(this, to_rewrite,
- pair.scope);
+ // Since this function is called at the end of parsing the program,
+ // pair.scope may already have been removed by FinalizeBlockScope in the
+ // meantime.
+ Scope* scope = pair.scope->GetUnremovedScope();
+ PatternRewriter::RewriteDestructuringAssignment(this, to_rewrite, scope);
}
}
}
@@ -5733,8 +4338,8 @@ Expression* Parser::RewriteAssignExponentiation(Expression* left,
Expression* result;
DCHECK_NOT_NULL(lhs->raw_name());
- result = this->ExpressionFromIdentifier(lhs->raw_name(), lhs->position(),
- lhs->end_position());
+ result = ExpressionFromIdentifier(lhs->raw_name(), lhs->position(),
+ lhs->end_position());
args->Add(left, zone());
args->Add(right, zone());
Expression* call =
@@ -5807,8 +4412,7 @@ Expression* Parser::RewriteSpreads(ArrayLiteral* lit) {
// ++($R.length)
if (!value->IsLiteral() ||
!value->AsLiteral()->raw_value()->IsTheHole()) {
- ZoneList<Expression*>* append_element_args =
- NewExpressionList(2, zone());
+ ZoneList<Expression*>* append_element_args = NewExpressionList(2);
append_element_args->Add(factory()->NewVariableProxy(result), zone());
append_element_args->Add(value, zone());
do_block->statements()->Add(
@@ -5837,8 +4441,7 @@ Expression* Parser::RewriteSpreads(ArrayLiteral* lit) {
// %AppendElement($R, each)
Statement* append_body;
{
- ZoneList<Expression*>* append_element_args =
- NewExpressionList(2, zone());
+ ZoneList<Expression*>* append_element_args = NewExpressionList(2);
append_element_args->Add(factory()->NewVariableProxy(result), zone());
append_element_args->Add(factory()->NewVariableProxy(each), zone());
append_body = factory()->NewExpressionStatement(
@@ -5865,7 +4468,7 @@ Expression* Parser::RewriteSpreads(ArrayLiteral* lit) {
void Parser::QueueDestructuringAssignmentForRewriting(Expression* expr) {
DCHECK(expr->IsRewritableExpression());
function_state_->AddDestructuringAssignment(
- DestructuringAssignment(expr, delegate()->scope()));
+ DestructuringAssignment(expr, scope()));
}
void Parser::QueueNonPatternForRewriting(Expression* expr, bool* ok) {
@@ -5873,43 +4476,38 @@ void Parser::QueueNonPatternForRewriting(Expression* expr, bool* ok) {
function_state_->AddNonPatternForRewriting(expr, ok);
}
-void ParserBaseTraits<Parser>::SetFunctionNameFromPropertyName(
- ObjectLiteralProperty* property, const AstRawString* name) {
- Expression* value = property->value();
+void Parser::AddAccessorPrefixToFunctionName(bool is_get,
+ FunctionLiteral* function,
+ const AstRawString* name) {
+ DCHECK_NOT_NULL(name);
+ const AstRawString* prefix = is_get ? ast_value_factory()->get_space_string()
+ : ast_value_factory()->set_space_string();
+ function->set_raw_name(ast_value_factory()->NewConsString(prefix, name));
+}
- // Computed name setting must happen at runtime.
- if (property->is_computed_name()) return;
+void Parser::SetFunctionNameFromPropertyName(ObjectLiteralProperty* property,
+ const AstRawString* name) {
+ DCHECK(property->kind() != ObjectLiteralProperty::GETTER);
+ DCHECK(property->kind() != ObjectLiteralProperty::SETTER);
- // Getter and setter names are handled here because their names
- // change in ES2015, even though they are not anonymous.
- auto function = value->AsFunctionLiteral();
- if (function != nullptr) {
- bool is_getter = property->kind() == ObjectLiteralProperty::GETTER;
- bool is_setter = property->kind() == ObjectLiteralProperty::SETTER;
- if (is_getter || is_setter) {
- DCHECK_NOT_NULL(name);
- const AstRawString* prefix =
- is_getter ? delegate()->ast_value_factory()->get_space_string()
- : delegate()->ast_value_factory()->set_space_string();
- function->set_raw_name(
- delegate()->ast_value_factory()->NewConsString(prefix, name));
- return;
- }
- }
+ // Computed name setting must happen at runtime.
+ DCHECK(!property->is_computed_name());
// Ignore "__proto__" as a name when it's being used to set the [[Prototype]]
// of an object literal.
if (property->kind() == ObjectLiteralProperty::PROTOTYPE) return;
+ Expression* value = property->value();
+
DCHECK(!value->IsAnonymousFunctionDefinition() ||
property->kind() == ObjectLiteralProperty::COMPUTED);
- delegate()->SetFunctionName(value, name);
+ SetFunctionName(value, name);
}
-void ParserBaseTraits<Parser>::SetFunctionNameFromIdentifierRef(
- Expression* value, Expression* identifier) {
+void Parser::SetFunctionNameFromIdentifierRef(Expression* value,
+ Expression* identifier) {
if (!identifier->IsVariableProxy()) return;
- delegate()->SetFunctionName(value, identifier->AsVariableProxy()->raw_name());
+ SetFunctionName(value, identifier->AsVariableProxy()->raw_name());
}
void Parser::SetFunctionName(Expression* value, const AstRawString* name) {
@@ -6050,7 +4648,7 @@ Expression* Parser::RewriteYieldStar(Expression* generator,
Variable* var_iterator = NewTemporary(ast_value_factory()->empty_string());
Statement* get_iterator;
{
- Expression* iterator = GetIterator(iterable, factory(), nopos);
+ Expression* iterator = GetIterator(iterable, nopos);
Expression* iterator_proxy = factory()->NewVariableProxy(var_iterator);
Expression* assignment = factory()->NewAssignment(
Token::ASSIGN, iterator_proxy, iterator, nopos);
@@ -6155,7 +4753,7 @@ Expression* Parser::RewriteYieldStar(Expression* generator,
Block* then = factory()->NewBlock(nullptr, 4 + 1, false, nopos);
BuildIteratorCloseForCompletion(
- then->statements(), var_iterator,
+ scope(), then->statements(), var_iterator,
factory()->NewSmiLiteral(Parser::kNormalCompletion, nopos));
then->statements()->Add(throw_call, zone());
check_throw = factory()->NewIfStatement(
@@ -6259,7 +4857,7 @@ Expression* Parser::RewriteYieldStar(Expression* generator,
// input = function.sent;
Statement* get_input;
{
- Expression* function_sent = FunctionSentExpression(factory(), nopos);
+ Expression* function_sent = FunctionSentExpression(nopos);
Expression* input_proxy = factory()->NewVariableProxy(var_input);
Expression* assignment = factory()->NewAssignment(
Token::ASSIGN, input_proxy, function_sent, nopos);
@@ -6313,9 +4911,8 @@ Expression* Parser::RewriteYieldStar(Expression* generator,
Scope* catch_scope = NewScope(CATCH_SCOPE);
catch_scope->set_is_hidden();
const AstRawString* name = ast_value_factory()->dot_catch_string();
- Variable* catch_variable =
- catch_scope->DeclareLocal(name, VAR, kCreatedInitialized,
- Variable::NORMAL);
+ Variable* catch_variable = catch_scope->DeclareLocal(
+ name, VAR, kCreatedInitialized, NORMAL_VARIABLE);
try_catch = factory()->NewTryCatchStatementForDesugaring(
try_block, catch_scope, catch_variable, catch_block, nopos);
@@ -6524,9 +5121,9 @@ void Parser::BuildIteratorClose(ZoneList<Statement*>* statements,
statements->Add(validate_output, zone());
}
-void Parser::FinalizeIteratorUse(Variable* completion, Expression* condition,
- Variable* iter, Block* iterator_use,
- Block* target) {
+void Parser::FinalizeIteratorUse(Scope* use_scope, Variable* completion,
+ Expression* condition, Variable* iter,
+ Block* iterator_use, Block* target) {
//
// This function adds two statements to [target], corresponding to the
// following code:
@@ -6582,7 +5179,8 @@ void Parser::FinalizeIteratorUse(Variable* completion, Expression* condition,
{
Block* block = factory()->NewBlock(nullptr, 2, true, nopos);
Expression* proxy = factory()->NewVariableProxy(completion);
- BuildIteratorCloseForCompletion(block->statements(), iter, proxy);
+ BuildIteratorCloseForCompletion(use_scope, block->statements(), iter,
+ proxy);
DCHECK(block->statements()->length() == 2);
maybe_close = factory()->NewBlock(nullptr, 1, true, nopos);
@@ -6599,10 +5197,10 @@ void Parser::FinalizeIteratorUse(Variable* completion, Expression* condition,
// }
Statement* try_catch;
{
- Scope* catch_scope = NewScopeWithParent(scope(), CATCH_SCOPE);
+ Scope* catch_scope = NewScopeWithParent(use_scope, CATCH_SCOPE);
Variable* catch_variable =
catch_scope->DeclareLocal(ast_value_factory()->dot_catch_string(), VAR,
- kCreatedInitialized, Variable::NORMAL);
+ kCreatedInitialized, NORMAL_VARIABLE);
catch_scope->set_is_hidden();
Statement* rethrow;
@@ -6639,7 +5237,8 @@ void Parser::FinalizeIteratorUse(Variable* completion, Expression* condition,
target->statements()->Add(try_finally, zone());
}
-void Parser::BuildIteratorCloseForCompletion(ZoneList<Statement*>* statements,
+void Parser::BuildIteratorCloseForCompletion(Scope* scope,
+ ZoneList<Statement*>* statements,
Variable* iterator,
Expression* completion) {
//
@@ -6705,10 +5304,10 @@ void Parser::BuildIteratorCloseForCompletion(ZoneList<Statement*>* statements,
Block* catch_block = factory()->NewBlock(nullptr, 0, false, nopos);
- Scope* catch_scope = NewScope(CATCH_SCOPE);
+ Scope* catch_scope = NewScopeWithParent(scope, CATCH_SCOPE);
Variable* catch_variable =
catch_scope->DeclareLocal(ast_value_factory()->dot_catch_string(), VAR,
- kCreatedInitialized, Variable::NORMAL);
+ kCreatedInitialized, NORMAL_VARIABLE);
catch_scope->set_is_hidden();
try_call_return = factory()->NewTryCatchStatement(
@@ -6842,20 +5441,18 @@ Statement* Parser::FinalizeForOfStatement(ForOfStatement* loop,
Block* try_block = factory()->NewBlock(nullptr, 1, false, nopos);
try_block->statements()->Add(loop, zone());
- FinalizeIteratorUse(var_completion, closing_condition, loop->iterator(),
- try_block, final_loop);
+ // The scope in which the parser creates this loop.
+ Scope* loop_scope = scope()->outer_scope();
+ DCHECK_EQ(loop_scope->scope_type(), BLOCK_SCOPE);
+ DCHECK_EQ(scope()->scope_type(), BLOCK_SCOPE);
+
+ FinalizeIteratorUse(loop_scope, var_completion, closing_condition,
+ loop->iterator(), try_block, final_loop);
}
return final_loop;
}
-#ifdef DEBUG
-void Parser::Print(AstNode* node) {
- ast_value_factory()->Internalize(Isolate::Current());
- node->Print(Isolate::Current());
-}
-#endif // DEBUG
-
#undef CHECK_OK
#undef CHECK_OK_VOID
#undef CHECK_FAILED
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index b069f9af98..418bedf81b 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -21,7 +21,8 @@ namespace internal {
class ParseInfo;
class ScriptData;
-class Target;
+class ParserTarget;
+class ParserTargetScope;
class FunctionEntry BASE_EMBEDDED {
public:
@@ -138,239 +139,33 @@ struct ParserFormalParameters : FormalParametersBase {
};
template <>
-class ParserBaseTraits<Parser> {
- public:
- typedef ParserBaseTraits<Parser> ParserTraits;
-
- struct Type {
- typedef Variable GeneratorVariable;
-
- typedef v8::internal::AstProperties AstProperties;
-
- typedef v8::internal::ExpressionClassifier<ParserTraits>
- ExpressionClassifier;
-
- // Return types for traversing functions.
- typedef const AstRawString* Identifier;
- typedef v8::internal::Expression* Expression;
- typedef Yield* YieldExpression;
- typedef v8::internal::FunctionLiteral* FunctionLiteral;
- typedef v8::internal::ClassLiteral* ClassLiteral;
- typedef v8::internal::Literal* Literal;
- typedef ObjectLiteral::Property* ObjectLiteralProperty;
- typedef ZoneList<v8::internal::Expression*>* ExpressionList;
- typedef ZoneList<ObjectLiteral::Property*>* PropertyList;
- typedef ParserFormalParameters::Parameter FormalParameter;
- typedef ParserFormalParameters FormalParameters;
- typedef ZoneList<v8::internal::Statement*>* StatementList;
-
- // For constructing objects returned by the traversing functions.
- typedef AstNodeFactory Factory;
- };
-
- // TODO(nikolaos): The traits methods should not need to call methods
- // of the implementation object.
- Parser* delegate() { return reinterpret_cast<Parser*>(this); }
- const Parser* delegate() const {
- return reinterpret_cast<const Parser*>(this);
- }
-
- // Helper functions for recursive descent.
- bool IsEval(const AstRawString* identifier) const;
- bool IsArguments(const AstRawString* identifier) const;
- bool IsEvalOrArguments(const AstRawString* identifier) const;
- bool IsUndefined(const AstRawString* identifier) const;
- V8_INLINE bool IsFutureStrictReserved(const AstRawString* identifier) const;
-
- // Returns true if the expression is of type "this.foo".
- static bool IsThisProperty(Expression* expression);
-
- static bool IsIdentifier(Expression* expression);
-
- static const AstRawString* AsIdentifier(Expression* expression) {
- DCHECK(IsIdentifier(expression));
- return expression->AsVariableProxy()->raw_name();
- }
-
- bool IsPrototype(const AstRawString* identifier) const;
-
- bool IsConstructor(const AstRawString* identifier) const;
-
- bool IsDirectEvalCall(Expression* expression) const {
- if (!expression->IsCall()) return false;
- expression = expression->AsCall()->expression();
- return IsIdentifier(expression) && IsEval(AsIdentifier(expression));
- }
-
- static bool IsBoilerplateProperty(ObjectLiteral::Property* property) {
- return ObjectLiteral::IsBoilerplateProperty(property);
- }
-
- static bool IsArrayIndex(const AstRawString* string, uint32_t* index) {
- return string->AsArrayIndex(index);
- }
-
- static Expression* GetPropertyValue(ObjectLiteral::Property* property) {
- return property->value();
- }
-
- // Functions for encapsulating the differences between parsing and preparsing;
- // operations interleaved with the recursive descent.
- static void PushLiteralName(FuncNameInferrer* fni, const AstRawString* id) {
- fni->PushLiteralName(id);
- }
-
- void PushPropertyName(FuncNameInferrer* fni, Expression* expression);
-
- static void InferFunctionName(FuncNameInferrer* fni,
- FunctionLiteral* func_to_infer) {
- fni->AddFunction(func_to_infer);
- }
-
- // If we assign a function literal to a property we pretenure the
- // literal so it can be added as a constant function property.
- static void CheckAssigningFunctionLiteralToProperty(Expression* left,
- Expression* right);
-
- // Determine if the expression is a variable proxy and mark it as being used
- // in an assignment or with a increment/decrement operator.
- static Expression* MarkExpressionAsAssigned(Expression* expression);
-
- // Returns true if we have a binary expression between two numeric
- // literals. In that case, *x will be changed to an expression which is the
- // computed value.
- bool ShortcutNumericLiteralBinaryExpression(Expression** x, Expression* y,
- Token::Value op, int pos,
- AstNodeFactory* factory);
-
- // Rewrites the following types of unary expressions:
- // not <literal> -> true / false
- // + <numeric literal> -> <numeric literal>
- // - <numeric literal> -> <numeric literal with value negated>
- // ! <literal> -> true / false
- // The following rewriting rules enable the collection of type feedback
- // without any special stub and the multiplication is removed later in
- // Crankshaft's canonicalization pass.
- // + foo -> foo * 1
- // - foo -> foo * (-1)
- // ~ foo -> foo ^(~0)
- Expression* BuildUnaryExpression(Expression* expression, Token::Value op,
- int pos, AstNodeFactory* factory);
-
- Expression* BuildIteratorResult(Expression* value, bool done);
-
- // Generate AST node that throws a ReferenceError with the given type.
- Expression* NewThrowReferenceError(MessageTemplate::Template message,
- int pos);
-
- // Generate AST node that throws a SyntaxError with the given
- // type. The first argument may be null (in the handle sense) in
- // which case no arguments are passed to the constructor.
- Expression* NewThrowSyntaxError(MessageTemplate::Template message,
- const AstRawString* arg, int pos);
-
- // Generate AST node that throws a TypeError with the given
- // type. Both arguments must be non-null (in the handle sense).
- Expression* NewThrowTypeError(MessageTemplate::Template message,
- const AstRawString* arg, int pos);
-
- // Reporting errors.
- void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate::Template message,
- const char* arg = NULL,
- ParseErrorType error_type = kSyntaxError);
- void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate::Template message,
- const AstRawString* arg,
- ParseErrorType error_type = kSyntaxError);
-
- // "null" return type creators.
- static const AstRawString* EmptyIdentifier() { return nullptr; }
- static Expression* EmptyExpression() { return nullptr; }
- static Literal* EmptyLiteral() { return nullptr; }
- static ObjectLiteralProperty* EmptyObjectLiteralProperty() { return nullptr; }
- static FunctionLiteral* EmptyFunctionLiteral() { return nullptr; }
-
- // Used in error return values.
- static ZoneList<Expression*>* NullExpressionList() { return nullptr; }
-
- // Non-NULL empty string.
- V8_INLINE const AstRawString* EmptyIdentifierString() const;
-
- // Odd-ball literal creators.
- Literal* GetLiteralTheHole(int position, AstNodeFactory* factory) const;
-
- // Producing data during the recursive descent.
- const AstRawString* GetSymbol(Scanner* scanner) const;
- const AstRawString* GetNextSymbol(Scanner* scanner) const;
- const AstRawString* GetNumberAsSymbol(Scanner* scanner) const;
-
- Expression* ThisExpression(int pos = kNoSourcePosition);
- Expression* NewSuperPropertyReference(AstNodeFactory* factory, int pos);
- Expression* NewSuperCallReference(AstNodeFactory* factory, int pos);
- Expression* NewTargetExpression(int pos);
- Expression* FunctionSentExpression(AstNodeFactory* factory, int pos) const;
- Literal* ExpressionFromLiteral(Token::Value token, int pos, Scanner* scanner,
- AstNodeFactory* factory) const;
- Expression* ExpressionFromIdentifier(const AstRawString* name,
- int start_position, int end_position,
- InferName = InferName::kYes);
- Expression* ExpressionFromString(int pos, Scanner* scanner,
- AstNodeFactory* factory) const;
- Expression* GetIterator(Expression* iterable, AstNodeFactory* factory,
- int pos);
- ZoneList<v8::internal::Expression*>* NewExpressionList(int size,
- Zone* zone) const {
- return new(zone) ZoneList<v8::internal::Expression*>(size, zone);
- }
- ZoneList<ObjectLiteral::Property*>* NewPropertyList(int size,
- Zone* zone) const {
- return new(zone) ZoneList<ObjectLiteral::Property*>(size, zone);
- }
- ZoneList<v8::internal::Statement*>* NewStatementList(int size,
- Zone* zone) const {
- return new(zone) ZoneList<v8::internal::Statement*>(size, zone);
- }
-
- V8_INLINE void AddParameterInitializationBlock(
- const ParserFormalParameters& parameters,
- ZoneList<v8::internal::Statement*>* body, bool is_async, bool* ok);
-
- V8_INLINE void AddFormalParameter(ParserFormalParameters* parameters,
- Expression* pattern,
- Expression* initializer,
- int initializer_end_position, bool is_rest);
- V8_INLINE void DeclareFormalParameter(
- DeclarationScope* scope,
- const ParserFormalParameters::Parameter& parameter,
- Type::ExpressionClassifier* classifier);
- void ParseArrowFunctionFormalParameterList(
- ParserFormalParameters* parameters, Expression* params,
- const Scanner::Location& params_loc, Scanner::Location* duplicate_loc,
- const Scope::Snapshot& scope_snapshot, bool* ok);
-
- void ReindexLiterals(const ParserFormalParameters& parameters);
-
- V8_INLINE Expression* NoTemplateTag() { return NULL; }
- V8_INLINE static bool IsTaggedTemplate(const Expression* tag) {
- return tag != NULL;
- }
-
- V8_INLINE void MaterializeUnspreadArgumentsLiterals(int count) {}
-
- Expression* ExpressionListToExpression(ZoneList<Expression*>* args);
-
- void SetFunctionNameFromPropertyName(ObjectLiteralProperty* property,
- const AstRawString* name);
-
- void SetFunctionNameFromIdentifierRef(Expression* value,
- Expression* identifier);
-
- V8_INLINE ZoneList<typename Type::ExpressionClassifier::Error>*
- GetReportedErrorList() const;
- V8_INLINE Zone* zone() const;
-
- V8_INLINE ZoneList<Expression*>* GetNonPatternList() const;
+struct ParserTypes<Parser> {
+ typedef ParserBase<Parser> Base;
+ typedef Parser Impl;
+
+ typedef v8::internal::Variable Variable;
+
+ // Return types for traversing functions.
+ typedef const AstRawString* Identifier;
+ typedef v8::internal::Expression* Expression;
+ typedef v8::internal::FunctionLiteral* FunctionLiteral;
+ typedef ObjectLiteral::Property* ObjectLiteralProperty;
+ typedef ClassLiteral::Property* ClassLiteralProperty;
+ typedef ZoneList<v8::internal::Expression*>* ExpressionList;
+ typedef ZoneList<ObjectLiteral::Property*>* ObjectPropertyList;
+ typedef ZoneList<ClassLiteral::Property*>* ClassPropertyList;
+ typedef ParserFormalParameters FormalParameters;
+ typedef v8::internal::Statement* Statement;
+ typedef ZoneList<v8::internal::Statement*>* StatementList;
+ typedef v8::internal::Block* Block;
+ typedef v8::internal::BreakableStatement* BreakableStatement;
+ typedef v8::internal::IterationStatement* IterationStatement;
+
+ // For constructing objects returned by the traversing functions.
+ typedef AstNodeFactory Factory;
+
+ typedef ParserTarget Target;
+ typedef ParserTargetScope TargetScope;
};
class Parser : public ParserBase<Parser> {
@@ -390,8 +185,16 @@ class Parser : public ParserBase<Parser> {
bool Parse(ParseInfo* info);
void ParseOnBackground(ParseInfo* info);
- void DeserializeScopeChain(ParseInfo* info, Handle<Context> context,
- Scope::DeserializationMode deserialization_mode);
+ // Deserialize the scope chain prior to parsing in which the script is going
+ // to be executed. If the script is a top-level script, or the scope chain
+ // consists of only a native context, maybe_outer_scope_info should be an
+ // empty handle.
+ //
+ // This only deserializes the scope chain, but doesn't connect the scopes to
+ // their corresponding scope infos. Therefore, looking up variables in the
+ // deserialized scopes is not possible.
+ void DeserializeScopeChain(ParseInfo* info,
+ MaybeHandle<ScopeInfo> maybe_outer_scope_info);
// Handle errors detected during parsing, move statistics to Isolate,
// internalize strings (move them to the heap).
@@ -400,9 +203,7 @@ class Parser : public ParserBase<Parser> {
private:
friend class ParserBase<Parser>;
- // TODO(nikolaos): This should not be necessary. It will be removed
- // when the traits object stops delegating to the implementation object.
- friend class ParserBaseTraits<Parser>;
+ friend class v8::internal::ExpressionClassifier<ParserTypes<Parser>>;
// Runtime encoding of different completion modes.
enum CompletionKind {
@@ -411,18 +212,12 @@ class Parser : public ParserBase<Parser> {
kAbruptCompletion
};
- enum class FunctionBodyType { kNormal, kSingleExpression };
-
- DeclarationScope* GetDeclarationScope() const {
- return scope()->GetDeclarationScope();
- }
- DeclarationScope* GetClosureScope() const {
- return scope()->GetClosureScope();
- }
Variable* NewTemporary(const AstRawString* name) {
return scope()->NewTemporary(name);
}
+ void PrepareGeneratorVariables(FunctionState* function_state);
+
// Limit the allowed number of local variables in a function. The hard limit
// is that offsets computed by FullCodeGenerator::StackOperand and similar
// functions are ints, and they should not overflow. In addition, accessing
@@ -455,12 +250,6 @@ class Parser : public ParserBase<Parser> {
return compile_options_ == ScriptCompiler::kProduceParserCache;
}
- // All ParseXXX functions take as the last argument an *ok parameter
- // which is set to false if parsing failed; it is unchanged otherwise.
- // By making the 'exception handling' explicit, we are forced to check
- // for failure at the call sites.
- void ParseStatementList(ZoneList<Statement*>* body, int end_token, bool* ok);
- Statement* ParseStatementListItem(bool* ok);
void ParseModuleItemList(ZoneList<Statement*>* body, bool* ok);
Statement* ParseModuleItem(bool* ok);
const AstRawString* ParseModuleSpecifier(bool* ok);
@@ -482,75 +271,52 @@ class Parser : public ParserBase<Parser> {
location(location) {}
};
ZoneList<const NamedImport*>* ParseNamedImports(int pos, bool* ok);
- Statement* ParseStatement(ZoneList<const AstRawString*>* labels,
- AllowLabelledFunctionStatement allow_function,
- bool* ok);
- Statement* ParseSubStatement(ZoneList<const AstRawString*>* labels,
- AllowLabelledFunctionStatement allow_function,
- bool* ok);
- Statement* ParseStatementAsUnlabelled(ZoneList<const AstRawString*>* labels,
- bool* ok);
- Statement* ParseFunctionDeclaration(bool* ok);
- Statement* ParseHoistableDeclaration(ZoneList<const AstRawString*>* names,
- bool default_export, bool* ok);
- Statement* ParseHoistableDeclaration(int pos, ParseFunctionFlags flags,
- ZoneList<const AstRawString*>* names,
- bool default_export, bool* ok);
- Statement* ParseAsyncFunctionDeclaration(ZoneList<const AstRawString*>* names,
- bool default_export, bool* ok);
- Expression* ParseAsyncFunctionExpression(bool* ok);
- Statement* ParseClassDeclaration(ZoneList<const AstRawString*>* names,
- bool default_export, bool* ok);
- Statement* ParseNativeDeclaration(bool* ok);
- Block* ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok);
- Block* ParseVariableStatement(VariableDeclarationContext var_context,
- ZoneList<const AstRawString*>* names,
- bool* ok);
- DoExpression* ParseDoExpression(bool* ok);
- Expression* ParseYieldStarExpression(bool* ok);
-
- struct DeclarationDescriptor {
- enum Kind { NORMAL, PARAMETER };
- Parser* parser;
- Scope* scope;
- Scope* hoist_scope;
- VariableMode mode;
- int declaration_pos;
- int initialization_pos;
- Kind declaration_kind;
- };
-
- struct DeclarationParsingResult {
- struct Declaration {
- Declaration(Expression* pattern, int initializer_position,
- Expression* initializer)
- : pattern(pattern),
- initializer_position(initializer_position),
- initializer(initializer) {}
-
- Expression* pattern;
- int initializer_position;
- Expression* initializer;
- };
-
- DeclarationParsingResult()
- : declarations(4),
- first_initializer_loc(Scanner::Location::invalid()),
- bindings_loc(Scanner::Location::invalid()) {}
-
- Block* BuildInitializationBlock(ZoneList<const AstRawString*>* names,
- bool* ok);
-
- DeclarationDescriptor descriptor;
- List<Declaration> declarations;
- Scanner::Location first_initializer_loc;
- Scanner::Location bindings_loc;
- };
+ Block* BuildInitializationBlock(DeclarationParsingResult* parsing_result,
+ ZoneList<const AstRawString*>* names,
+ bool* ok);
+ void DeclareAndInitializeVariables(
+ Block* block, const DeclarationDescriptor* declaration_descriptor,
+ const DeclarationParsingResult::Declaration* declaration,
+ ZoneList<const AstRawString*>* names, bool* ok);
+ ZoneList<const AstRawString*>* DeclareLabel(
+ ZoneList<const AstRawString*>* labels, VariableProxy* expr, bool* ok);
+ bool ContainsLabel(ZoneList<const AstRawString*>* labels,
+ const AstRawString* label);
+ Expression* RewriteReturn(Expression* return_value, int pos);
+ Statement* RewriteSwitchStatement(Expression* tag,
+ SwitchStatement* switch_statement,
+ ZoneList<CaseClause*>* cases, Scope* scope);
+ void RewriteCatchPattern(CatchInfo* catch_info, bool* ok);
+ void ValidateCatchBlock(const CatchInfo& catch_info, bool* ok);
+ Statement* RewriteTryStatement(Block* try_block, Block* catch_block,
+ Block* finally_block,
+ const CatchInfo& catch_info, int pos);
+
+ Statement* DeclareFunction(const AstRawString* variable_name,
+ FunctionLiteral* function, int pos,
+ bool is_generator, bool is_async,
+ ZoneList<const AstRawString*>* names, bool* ok);
+ V8_INLINE Statement* DeclareClass(const AstRawString* variable_name,
+ Expression* value,
+ ZoneList<const AstRawString*>* names,
+ int class_token_pos, int end_pos, bool* ok);
+ V8_INLINE void DeclareClassVariable(const AstRawString* name,
+ Scope* block_scope, ClassInfo* class_info,
+ int class_token_pos, bool* ok);
+ V8_INLINE void DeclareClassProperty(const AstRawString* class_name,
+ ClassLiteralProperty* property,
+ ClassInfo* class_info, bool* ok);
+ V8_INLINE Expression* RewriteClassLiteral(const AstRawString* name,
+ ClassInfo* class_info, int pos,
+ bool* ok);
+ V8_INLINE Statement* DeclareNative(const AstRawString* name, int pos,
+ bool* ok);
class PatternRewriter final : public AstVisitor<PatternRewriter> {
public:
static void DeclareAndInitializeVariables(
- Block* block, const DeclarationDescriptor* declaration_descriptor,
+ Parser* parser, Block* block,
+ const DeclarationDescriptor* declaration_descriptor,
const DeclarationParsingResult::Declaration* declaration,
ZoneList<const AstRawString*>* names, bool* ok);
@@ -627,47 +393,12 @@ class Parser : public ParserBase<Parser> {
DEFINE_AST_VISITOR_MEMBERS_WITHOUT_STACKOVERFLOW()
};
- Block* ParseVariableDeclarations(VariableDeclarationContext var_context,
- DeclarationParsingResult* parsing_result,
- ZoneList<const AstRawString*>* names,
- bool* ok);
- Statement* ParseExpressionOrLabelledStatement(
- ZoneList<const AstRawString*>* labels,
- AllowLabelledFunctionStatement allow_function, bool* ok);
- IfStatement* ParseIfStatement(ZoneList<const AstRawString*>* labels,
- bool* ok);
- Statement* ParseContinueStatement(bool* ok);
- Statement* ParseBreakStatement(ZoneList<const AstRawString*>* labels,
- bool* ok);
- Statement* ParseReturnStatement(bool* ok);
- Statement* ParseWithStatement(ZoneList<const AstRawString*>* labels,
- bool* ok);
- CaseClause* ParseCaseClause(bool* default_seen_ptr, bool* ok);
- Statement* ParseSwitchStatement(ZoneList<const AstRawString*>* labels,
- bool* ok);
- DoWhileStatement* ParseDoWhileStatement(ZoneList<const AstRawString*>* labels,
- bool* ok);
- WhileStatement* ParseWhileStatement(ZoneList<const AstRawString*>* labels,
- bool* ok);
- Statement* ParseForStatement(ZoneList<const AstRawString*>* labels, bool* ok);
- Statement* ParseThrowStatement(bool* ok);
- Expression* MakeCatchContext(Handle<String> id, VariableProxy* value);
- TryStatement* ParseTryStatement(bool* ok);
- DebuggerStatement* ParseDebuggerStatement(bool* ok);
- // Parse a SubStatement in strict mode, or with an extra block scope in
- // sloppy mode to handle
- // ES#sec-functiondeclarations-in-ifstatement-statement-clauses
- // The legacy parameter indicates whether function declarations are
- // banned by the ES2015 specification in this location, and they are being
- // permitted here to match previous V8 behavior.
- Statement* ParseScopedStatement(ZoneList<const AstRawString*>* labels,
- bool legacy, bool* ok);
-
// !%_IsJSReceiver(result = iterator.next()) &&
// %ThrowIteratorResultNotAnObject(result)
Expression* BuildIteratorNextResult(Expression* iterator, Variable* result,
int pos);
+ Expression* GetIterator(Expression* iterable, int pos);
// Initialize the components of a for-in / for-of statement.
Statement* InitializeForEachStatement(ForEachStatement* stmt,
@@ -677,18 +408,17 @@ class Parser : public ParserBase<Parser> {
Expression* iterable, Statement* body,
bool finalize,
int next_result_pos = kNoSourcePosition);
- Statement* DesugarLexicalBindingsInForStatement(
- Scope* inner_scope, VariableMode mode,
- ZoneList<const AstRawString*>* names, ForStatement* loop, Statement* init,
- Expression* cond, Statement* next, Statement* body, bool* ok);
+ Block* RewriteForVarInLegacy(const ForInfo& for_info);
+ void DesugarBindingInForEachStatement(ForInfo* for_info, Block** body_block,
+ Expression** each_variable, bool* ok);
+ Block* CreateForEachStatementTDZ(Block* init_block, const ForInfo& for_info,
+ bool* ok);
- void DesugarAsyncFunctionBody(const AstRawString* function_name, Scope* scope,
- ZoneList<Statement*>* body,
- Type::ExpressionClassifier* classifier,
- FunctionKind kind, FunctionBodyType type,
- bool accept_IN, int pos, bool* ok);
+ Statement* DesugarLexicalBindingsInForStatement(
+ ForStatement* loop, Statement* init, Expression* cond, Statement* next,
+ Statement* body, Scope* inner_scope, const ForInfo& for_info, bool* ok);
- void RewriteDoExpression(Expression* expr, bool* ok);
+ Expression* RewriteDoExpression(Block* body, int pos, bool* ok);
FunctionLiteral* ParseFunctionLiteral(
const AstRawString* name, Scanner::Location function_name_location,
@@ -696,14 +426,10 @@ class Parser : public ParserBase<Parser> {
int function_token_position, FunctionLiteral::FunctionType type,
LanguageMode language_mode, bool* ok);
- Expression* ParseClassLiteral(ExpressionClassifier* classifier,
- const AstRawString* name,
- Scanner::Location class_name_location,
- bool name_is_strict_reserved, int pos,
- bool* ok);
-
- // Magical syntax support.
- Expression* ParseV8Intrinsic(bool* ok);
+ Expression* InstallHomeObject(Expression* function_literal,
+ Expression* home_object);
+ FunctionLiteral* SynthesizeClassFieldInitializer(int count);
+ FunctionLiteral* InsertClassFieldInitializer(FunctionLiteral* constructor);
// Get odd-ball literals.
Literal* GetLiteralUndefined(int position);
@@ -724,14 +450,11 @@ class Parser : public ParserBase<Parser> {
void InsertShadowingVarBindingInitializers(Block* block);
// Implement sloppy block-scoped functions, ES2015 Annex B 3.3
- void InsertSloppyBlockFunctionVarBindings(DeclarationScope* scope,
- Scope* complex_params_scope,
- bool* ok);
+ void InsertSloppyBlockFunctionVarBindings(DeclarationScope* scope);
- static InitializationFlag DefaultInitializationFlag(VariableMode mode);
VariableProxy* NewUnresolved(const AstRawString* name, int begin_pos,
int end_pos = kNoSourcePosition,
- Variable::Kind kind = Variable::NORMAL);
+ VariableKind kind = NORMAL_VARIABLE);
VariableProxy* NewUnresolved(const AstRawString* name);
Variable* Declare(Declaration* declaration,
DeclarationDescriptor::Kind declaration_kind,
@@ -750,25 +473,24 @@ class Parser : public ParserBase<Parser> {
// Factory methods.
FunctionLiteral* DefaultConstructor(const AstRawString* name, bool call_super,
- int pos, int end_pos,
- LanguageMode language_mode);
+ bool requires_class_field_init, int pos,
+ int end_pos, LanguageMode language_mode);
// Skip over a lazy function, either using cached data if we have it, or
// by parsing the function with PreParser. Consumes the ending }.
- //
- // If bookmark is set, the (pre-)parser may decide to abort skipping
+ // If may_abort == true, the (pre-)parser may decide to abort skipping
// in order to force the function to be eagerly parsed, after all.
- // In this case, it'll reset the scanner using the bookmark.
- void SkipLazyFunctionBody(int* materialized_literal_count,
- int* expected_property_count, bool* ok,
- Scanner::BookmarkScope* bookmark = nullptr);
+ LazyParsingResult SkipLazyFunctionBody(int* materialized_literal_count,
+ int* expected_property_count,
+ bool is_inner_function, bool may_abort,
+ bool* ok);
PreParser::PreParseResult ParseLazyFunctionBodyWithPreParser(
- SingletonLogger* logger, Scanner::BookmarkScope* bookmark = nullptr);
+ SingletonLogger* logger, bool is_inner_function, bool may_abort);
Block* BuildParameterInitializationBlock(
const ParserFormalParameters& parameters, bool* ok);
- Block* BuildRejectPromiseOnException(Block* block);
+ Block* BuildRejectPromiseOnException(Block* block, bool* ok);
// Consumes the ending }.
ZoneList<Statement*>* ParseEagerFunctionBody(
@@ -817,25 +539,16 @@ class Parser : public ParserBase<Parser> {
Expression* tag);
uint32_t ComputeTemplateLiteralHash(const TemplateLiteral* lit);
- void ParseAsyncArrowSingleExpressionBody(ZoneList<Statement*>* body,
- bool accept_IN,
- ExpressionClassifier* classifier,
- int pos, bool* ok) {
- DesugarAsyncFunctionBody(ast_value_factory()->empty_string(), scope(), body,
- classifier, kAsyncArrowFunction,
- FunctionBodyType::kSingleExpression, accept_IN,
- pos, ok);
- }
-
- ZoneList<v8::internal::Expression*>* PrepareSpreadArguments(
- ZoneList<v8::internal::Expression*>* list);
- Expression* SpreadCall(Expression* function,
- ZoneList<v8::internal::Expression*>* args, int pos);
- Expression* SpreadCallNew(Expression* function,
- ZoneList<v8::internal::Expression*>* args, int pos);
+ ZoneList<Expression*>* PrepareSpreadArguments(ZoneList<Expression*>* list);
+ Expression* SpreadCall(Expression* function, ZoneList<Expression*>* args,
+ int pos);
+ Expression* SpreadCallNew(Expression* function, ZoneList<Expression*>* args,
+ int pos);
+ Expression* CallClassFieldInitializer(Scope* scope, Expression* this_expr);
+ Expression* RewriteSuperCall(Expression* call_expression);
void SetLanguageMode(Scope* scope, LanguageMode mode);
- void RaiseLanguageMode(LanguageMode mode);
+ void SetAsmModule();
V8_INLINE void MarkCollectedTailCallExpressions();
V8_INLINE void MarkTailPosition(Expression* expression);
@@ -852,7 +565,7 @@ class Parser : public ParserBase<Parser> {
V8_INLINE Expression* RewriteSpreads(ArrayLiteral* lit);
// Rewrite expressions that are not used as patterns
- V8_INLINE void RewriteNonPattern(ExpressionClassifier* classifier, bool* ok);
+ V8_INLINE void RewriteNonPattern(bool* ok);
V8_INLINE void QueueDestructuringAssignmentForRewriting(
Expression* assignment);
@@ -861,153 +574,526 @@ class Parser : public ParserBase<Parser> {
friend class InitializerRewriter;
void RewriteParameterInitializer(Expression* expr, Scope* scope);
+ Expression* BuildInitialYield(int pos, FunctionKind kind);
Expression* BuildCreateJSGeneratorObject(int pos, FunctionKind kind);
- Expression* BuildPromiseResolve(Expression* value, int pos);
- Expression* BuildPromiseReject(Expression* value, int pos);
+ Expression* BuildResolvePromise(Expression* value, int pos);
+ Expression* BuildRejectPromise(Expression* value, int pos);
+ Variable* PromiseVariable();
// Generic AST generator for throwing errors from compiled code.
Expression* NewThrowError(Runtime::FunctionId function_id,
MessageTemplate::Template message,
const AstRawString* arg, int pos);
- void FinalizeIteratorUse(Variable* completion, Expression* condition,
- Variable* iter, Block* iterator_use, Block* result);
+ void FinalizeIteratorUse(Scope* use_scope, Variable* completion,
+ Expression* condition, Variable* iter,
+ Block* iterator_use, Block* result);
Statement* FinalizeForOfStatement(ForOfStatement* loop, Variable* completion,
int pos);
void BuildIteratorClose(ZoneList<Statement*>* statements, Variable* iterator,
Variable* input, Variable* output);
- void BuildIteratorCloseForCompletion(ZoneList<Statement*>* statements,
+ void BuildIteratorCloseForCompletion(Scope* scope,
+ ZoneList<Statement*>* statements,
Variable* iterator,
Expression* completion);
Statement* CheckCallable(Variable* var, Expression* error, int pos);
V8_INLINE Expression* RewriteAwaitExpression(Expression* value, int pos);
+ V8_INLINE void PrepareAsyncFunctionBody(ZoneList<Statement*>* body,
+ FunctionKind kind, int pos);
+ V8_INLINE void RewriteAsyncFunctionBody(ZoneList<Statement*>* body,
+ Block* block,
+ Expression* return_value, bool* ok);
Expression* RewriteYieldStar(Expression* generator, Expression* expression,
int pos);
- void ParseArrowFunctionFormalParameters(ParserFormalParameters* parameters,
- Expression* params, int end_pos,
- bool* ok);
+ void AddArrowFunctionFormalParameters(ParserFormalParameters* parameters,
+ Expression* params, int end_pos,
+ bool* ok);
void SetFunctionName(Expression* value, const AstRawString* name);
- Scanner scanner_;
- PreParser* reusable_preparser_;
- Scope* original_scope_; // for ES5 function declarations in sloppy eval
- Target* target_stack_; // for break, continue statements
- ScriptCompiler::CompileOptions compile_options_;
- ParseData* cached_parse_data_;
+ // Helper functions for recursive descent.
+ V8_INLINE bool IsEval(const AstRawString* identifier) const {
+ return identifier == ast_value_factory()->eval_string();
+ }
- PendingCompilationErrorHandler pending_error_handler_;
+ V8_INLINE bool IsArguments(const AstRawString* identifier) const {
+ return identifier == ast_value_factory()->arguments_string();
+ }
- // Other information which will be stored in Parser and moved to Isolate after
- // parsing.
- int use_counts_[v8::Isolate::kUseCounterFeatureCount];
- int total_preparse_skipped_;
- HistogramTimer* pre_parse_timer_;
+ V8_INLINE bool IsEvalOrArguments(const AstRawString* identifier) const {
+ return IsEval(identifier) || IsArguments(identifier);
+ }
- bool parsing_on_main_thread_;
+ V8_INLINE bool IsUndefined(const AstRawString* identifier) const {
+ return identifier == ast_value_factory()->undefined_string();
+ }
-#ifdef DEBUG
- void Print(AstNode* node);
-#endif // DEBUG
-};
+ V8_INLINE bool IsFutureStrictReserved(const AstRawString* identifier) const {
+ return scanner()->IdentifierIsFutureStrictReserved(identifier);
+ }
-bool ParserBaseTraits<Parser>::IsFutureStrictReserved(
- const AstRawString* identifier) const {
- return delegate()->scanner()->IdentifierIsFutureStrictReserved(identifier);
-}
+ // Returns true if the expression is of type "this.foo".
+ V8_INLINE static bool IsThisProperty(Expression* expression) {
+ DCHECK(expression != NULL);
+ Property* property = expression->AsProperty();
+ return property != NULL && property->obj()->IsVariableProxy() &&
+ property->obj()->AsVariableProxy()->is_this();
+ }
-const AstRawString* ParserBaseTraits<Parser>::EmptyIdentifierString() const {
- return delegate()->ast_value_factory()->empty_string();
-}
+ // This returns true if the expression is an indentifier (wrapped
+ // inside a variable proxy). We exclude the case of 'this', which
+ // has been converted to a variable proxy.
+ V8_INLINE static bool IsIdentifier(Expression* expression) {
+ DCHECK_NOT_NULL(expression);
+ VariableProxy* operand = expression->AsVariableProxy();
+ return operand != nullptr && !operand->is_this();
+ }
+ V8_INLINE static const AstRawString* AsIdentifier(Expression* expression) {
+ DCHECK(IsIdentifier(expression));
+ return expression->AsVariableProxy()->raw_name();
+ }
-// Support for handling complex values (array and object literals) that
-// can be fully handled at compile time.
-class CompileTimeValue: public AllStatic {
- public:
- enum LiteralType {
- OBJECT_LITERAL_FAST_ELEMENTS,
- OBJECT_LITERAL_SLOW_ELEMENTS,
- ARRAY_LITERAL
- };
+ V8_INLINE VariableProxy* AsIdentifierExpression(Expression* expression) {
+ return expression->AsVariableProxy();
+ }
- static bool IsCompileTimeValue(Expression* expression);
+ V8_INLINE bool IsPrototype(const AstRawString* identifier) const {
+ return identifier == ast_value_factory()->prototype_string();
+ }
- // Get the value as a compile time value.
- static Handle<FixedArray> GetValue(Isolate* isolate, Expression* expression);
+ V8_INLINE bool IsConstructor(const AstRawString* identifier) const {
+ return identifier == ast_value_factory()->constructor_string();
+ }
- // Get the type of a compile time value returned by GetValue().
- static LiteralType GetLiteralType(Handle<FixedArray> value);
+ V8_INLINE bool IsDirectEvalCall(Expression* expression) const {
+ if (!expression->IsCall()) return false;
+ expression = expression->AsCall()->expression();
+ return IsIdentifier(expression) && IsEval(AsIdentifier(expression));
+ }
- // Get the elements array of a compile time value returned by GetValue().
- static Handle<FixedArray> GetElements(Handle<FixedArray> value);
+ V8_INLINE static bool IsBoilerplateProperty(
+ ObjectLiteral::Property* property) {
+ return ObjectLiteral::IsBoilerplateProperty(property);
+ }
- private:
- static const int kLiteralTypeSlot = 0;
- static const int kElementsSlot = 1;
+ V8_INLINE bool IsNative(Expression* expr) const {
+ DCHECK_NOT_NULL(expr);
+ return expr->IsVariableProxy() &&
+ expr->AsVariableProxy()->raw_name() ==
+ ast_value_factory()->native_string();
+ }
- DISALLOW_IMPLICIT_CONSTRUCTORS(CompileTimeValue);
-};
+ V8_INLINE static bool IsArrayIndex(const AstRawString* string,
+ uint32_t* index) {
+ return string->AsArrayIndex(index);
+ }
-void ParserBaseTraits<Parser>::AddFormalParameter(
- ParserFormalParameters* parameters, Expression* pattern,
- Expression* initializer, int initializer_end_position, bool is_rest) {
- bool is_simple = pattern->IsVariableProxy() && initializer == nullptr;
- const AstRawString* name =
- is_simple ? pattern->AsVariableProxy()->raw_name()
- : delegate()->ast_value_factory()->empty_string();
- parameters->params.Add(
- ParserFormalParameters::Parameter(name, pattern, initializer,
- initializer_end_position, is_rest),
- parameters->scope->zone());
-}
-
-void ParserBaseTraits<Parser>::DeclareFormalParameter(
- DeclarationScope* scope, const ParserFormalParameters::Parameter& parameter,
- Type::ExpressionClassifier* classifier) {
- bool is_duplicate = false;
- bool is_simple = classifier->is_simple_parameter_list();
- auto name = is_simple || parameter.is_rest
- ? parameter.name
- : delegate()->ast_value_factory()->empty_string();
- auto mode = is_simple || parameter.is_rest ? VAR : TEMPORARY;
- if (!is_simple) scope->SetHasNonSimpleParameters();
- bool is_optional = parameter.initializer != nullptr;
- Variable* var =
- scope->DeclareParameter(name, mode, is_optional, parameter.is_rest,
- &is_duplicate, delegate()->ast_value_factory());
- if (is_duplicate) {
- classifier->RecordDuplicateFormalParameterError(
- delegate()->scanner()->location());
- }
- if (is_sloppy(scope->language_mode())) {
- // TODO(sigurds) Mark every parameter as maybe assigned. This is a
- // conservative approximation necessary to account for parameters
- // that are assigned via the arguments array.
- var->set_maybe_assigned();
- }
-}
-
-void ParserBaseTraits<Parser>::AddParameterInitializationBlock(
- const ParserFormalParameters& parameters,
- ZoneList<v8::internal::Statement*>* body, bool is_async, bool* ok) {
- if (!parameters.is_simple) {
- auto* init_block =
- delegate()->BuildParameterInitializationBlock(parameters, ok);
- if (!*ok) return;
+ V8_INLINE bool IsUseStrictDirective(Statement* statement) const {
+ return IsStringLiteral(statement, ast_value_factory()->use_strict_string());
+ }
+
+ V8_INLINE bool IsUseAsmDirective(Statement* statement) const {
+ return IsStringLiteral(statement, ast_value_factory()->use_asm_string());
+ }
+
+ // Returns true if the statement is an expression statement containing
+ // a single string literal. If a second argument is given, the literal
+ // is also compared with it and the result is true only if they are equal.
+ V8_INLINE bool IsStringLiteral(Statement* statement,
+ const AstRawString* arg = nullptr) const {
+ ExpressionStatement* e_stat = statement->AsExpressionStatement();
+ if (e_stat == nullptr) return false;
+ Literal* literal = e_stat->expression()->AsLiteral();
+ if (literal == nullptr || !literal->raw_value()->IsString()) return false;
+ return arg == nullptr || literal->raw_value()->AsString() == arg;
+ }
+
+ V8_INLINE static Expression* GetPropertyValue(LiteralProperty* property) {
+ return property->value();
+ }
+
+ V8_INLINE void GetDefaultStrings(
+ const AstRawString** default_string,
+ const AstRawString** star_default_star_string) {
+ *default_string = ast_value_factory()->default_string();
+ *star_default_star_string = ast_value_factory()->star_default_star_string();
+ }
+
+ // Functions for encapsulating the differences between parsing and preparsing;
+ // operations interleaved with the recursive descent.
+ V8_INLINE void PushLiteralName(const AstRawString* id) {
+ DCHECK_NOT_NULL(fni_);
+ fni_->PushLiteralName(id);
+ }
+
+ V8_INLINE void PushVariableName(const AstRawString* id) {
+ DCHECK_NOT_NULL(fni_);
+ fni_->PushVariableName(id);
+ }
+
+ V8_INLINE void PushPropertyName(Expression* expression) {
+ DCHECK_NOT_NULL(fni_);
+ if (expression->IsPropertyName()) {
+ fni_->PushLiteralName(expression->AsLiteral()->AsRawPropertyName());
+ } else {
+ fni_->PushLiteralName(ast_value_factory()->anonymous_function_string());
+ }
+ }
+
+ V8_INLINE void PushEnclosingName(const AstRawString* name) {
+ DCHECK_NOT_NULL(fni_);
+ fni_->PushEnclosingName(name);
+ }
+
+ V8_INLINE void AddFunctionForNameInference(FunctionLiteral* func_to_infer) {
+ DCHECK_NOT_NULL(fni_);
+ fni_->AddFunction(func_to_infer);
+ }
+
+ V8_INLINE void InferFunctionName() {
+ DCHECK_NOT_NULL(fni_);
+ fni_->Infer();
+ }
+
+ // If we assign a function literal to a property we pretenure the
+ // literal so it can be added as a constant function property.
+ V8_INLINE static void CheckAssigningFunctionLiteralToProperty(
+ Expression* left, Expression* right) {
+ DCHECK(left != NULL);
+ if (left->IsProperty() && right->IsFunctionLiteral()) {
+ right->AsFunctionLiteral()->set_pretenure();
+ }
+ }
+
+ // Determine if the expression is a variable proxy and mark it as being used
+ // in an assignment or with a increment/decrement operator.
+ V8_INLINE static Expression* MarkExpressionAsAssigned(
+ Expression* expression) {
+ VariableProxy* proxy =
+ expression != NULL ? expression->AsVariableProxy() : NULL;
+ if (proxy != NULL) proxy->set_is_assigned();
+ return expression;
+ }
+
+ // Returns true if we have a binary expression between two numeric
+ // literals. In that case, *x will be changed to an expression which is the
+ // computed value.
+ bool ShortcutNumericLiteralBinaryExpression(Expression** x, Expression* y,
+ Token::Value op, int pos);
+
+ // Rewrites the following types of unary expressions:
+ // not <literal> -> true / false
+ // + <numeric literal> -> <numeric literal>
+ // - <numeric literal> -> <numeric literal with value negated>
+ // ! <literal> -> true / false
+ // The following rewriting rules enable the collection of type feedback
+ // without any special stub and the multiplication is removed later in
+ // Crankshaft's canonicalization pass.
+ // + foo -> foo * 1
+ // - foo -> foo * (-1)
+ // ~ foo -> foo ^(~0)
+ Expression* BuildUnaryExpression(Expression* expression, Token::Value op,
+ int pos);
+
+ Expression* BuildIteratorResult(Expression* value, bool done);
+
+ // Generate AST node that throws a ReferenceError with the given type.
+ V8_INLINE Expression* NewThrowReferenceError(
+ MessageTemplate::Template message, int pos) {
+ return NewThrowError(Runtime::kNewReferenceError, message,
+ ast_value_factory()->empty_string(), pos);
+ }
+
+ // Generate AST node that throws a SyntaxError with the given
+ // type. The first argument may be null (in the handle sense) in
+ // which case no arguments are passed to the constructor.
+ V8_INLINE Expression* NewThrowSyntaxError(MessageTemplate::Template message,
+ const AstRawString* arg, int pos) {
+ return NewThrowError(Runtime::kNewSyntaxError, message, arg, pos);
+ }
+
+ // Generate AST node that throws a TypeError with the given
+ // type. Both arguments must be non-null (in the handle sense).
+ V8_INLINE Expression* NewThrowTypeError(MessageTemplate::Template message,
+ const AstRawString* arg, int pos) {
+ return NewThrowError(Runtime::kNewTypeError, message, arg, pos);
+ }
+
+ // Reporting errors.
+ V8_INLINE void ReportMessageAt(Scanner::Location source_location,
+ MessageTemplate::Template message,
+ const char* arg = NULL,
+ ParseErrorType error_type = kSyntaxError) {
+ if (stack_overflow()) {
+ // Suppress the error message (syntax error or such) in the presence of a
+ // stack overflow. The isolate allows only one pending exception at at
+ // time
+ // and we want to report the stack overflow later.
+ return;
+ }
+ pending_error_handler_.ReportMessageAt(source_location.beg_pos,
+ source_location.end_pos, message,
+ arg, error_type);
+ }
+
+ V8_INLINE void ReportMessageAt(Scanner::Location source_location,
+ MessageTemplate::Template message,
+ const AstRawString* arg,
+ ParseErrorType error_type = kSyntaxError) {
+ if (stack_overflow()) {
+ // Suppress the error message (syntax error or such) in the presence of a
+ // stack overflow. The isolate allows only one pending exception at at
+ // time
+ // and we want to report the stack overflow later.
+ return;
+ }
+ pending_error_handler_.ReportMessageAt(source_location.beg_pos,
+ source_location.end_pos, message,
+ arg, error_type);
+ }
+
+ // "null" return type creators.
+ V8_INLINE static const AstRawString* EmptyIdentifier() { return nullptr; }
+ V8_INLINE static bool IsEmptyIdentifier(const AstRawString* name) {
+ return name == nullptr;
+ }
+ V8_INLINE static Expression* EmptyExpression() { return nullptr; }
+ V8_INLINE static Literal* EmptyLiteral() { return nullptr; }
+ V8_INLINE static ObjectLiteralProperty* EmptyObjectLiteralProperty() {
+ return nullptr;
+ }
+ V8_INLINE static ClassLiteralProperty* EmptyClassLiteralProperty() {
+ return nullptr;
+ }
+ V8_INLINE static FunctionLiteral* EmptyFunctionLiteral() { return nullptr; }
+ V8_INLINE static Block* NullBlock() { return nullptr; }
+
+ V8_INLINE static bool IsEmptyExpression(Expression* expr) {
+ return expr == nullptr;
+ }
+
+ // Used in error return values.
+ V8_INLINE static ZoneList<Expression*>* NullExpressionList() {
+ return nullptr;
+ }
+ V8_INLINE static bool IsNullExpressionList(ZoneList<Expression*>* exprs) {
+ return exprs == nullptr;
+ }
+ V8_INLINE static ZoneList<Statement*>* NullStatementList() { return nullptr; }
+ V8_INLINE static bool IsNullStatementList(ZoneList<Statement*>* stmts) {
+ return stmts == nullptr;
+ }
+ V8_INLINE static Statement* NullStatement() { return nullptr; }
+ V8_INLINE bool IsNullStatement(Statement* stmt) { return stmt == nullptr; }
+ V8_INLINE bool IsEmptyStatement(Statement* stmt) {
+ DCHECK_NOT_NULL(stmt);
+ return stmt->IsEmpty();
+ }
+
+ // Non-NULL empty string.
+ V8_INLINE const AstRawString* EmptyIdentifierString() const {
+ return ast_value_factory()->empty_string();
+ }
+
+ // Odd-ball literal creators.
+ V8_INLINE Literal* GetLiteralTheHole(int position) {
+ return factory()->NewTheHoleLiteral(kNoSourcePosition);
+ }
+
+ // Producing data during the recursive descent.
+ V8_INLINE const AstRawString* GetSymbol() const {
+ const AstRawString* result = scanner()->CurrentSymbol(ast_value_factory());
+ DCHECK(result != NULL);
+ return result;
+ }
+
+ V8_INLINE const AstRawString* GetNextSymbol() const {
+ return scanner()->NextSymbol(ast_value_factory());
+ }
+
+ V8_INLINE const AstRawString* GetNumberAsSymbol() const {
+ double double_value = scanner()->DoubleValue();
+ char array[100];
+ const char* string = DoubleToCString(double_value, ArrayVector(array));
+ return ast_value_factory()->GetOneByteString(string);
+ }
+ V8_INLINE Expression* ThisExpression(int pos = kNoSourcePosition) {
+ return NewUnresolved(ast_value_factory()->this_string(), pos, pos + 4,
+ THIS_VARIABLE);
+ }
+
+ Expression* NewSuperPropertyReference(int pos);
+ Expression* NewSuperCallReference(int pos);
+ Expression* NewTargetExpression(int pos);
+ Expression* FunctionSentExpression(int pos);
+
+ Literal* ExpressionFromLiteral(Token::Value token, int pos);
+
+ V8_INLINE Expression* ExpressionFromIdentifier(
+ const AstRawString* name, int start_position, int end_position,
+ InferName infer = InferName::kYes) {
+ if (infer == InferName::kYes) {
+ fni_->PushVariableName(name);
+ }
+ return NewUnresolved(name, start_position, end_position);
+ }
+
+ V8_INLINE Expression* ExpressionFromString(int pos) {
+ const AstRawString* symbol = GetSymbol();
+ fni_->PushLiteralName(symbol);
+ return factory()->NewStringLiteral(symbol, pos);
+ }
+
+ V8_INLINE ZoneList<Expression*>* NewExpressionList(int size) const {
+ return new (zone()) ZoneList<Expression*>(size, zone());
+ }
+ V8_INLINE ZoneList<ObjectLiteral::Property*>* NewObjectPropertyList(
+ int size) const {
+ return new (zone()) ZoneList<ObjectLiteral::Property*>(size, zone());
+ }
+ V8_INLINE ZoneList<ClassLiteral::Property*>* NewClassPropertyList(
+ int size) const {
+ return new (zone()) ZoneList<ClassLiteral::Property*>(size, zone());
+ }
+ V8_INLINE ZoneList<Statement*>* NewStatementList(int size) const {
+ return new (zone()) ZoneList<Statement*>(size, zone());
+ }
+ V8_INLINE ZoneList<CaseClause*>* NewCaseClauseList(int size) const {
+ return new (zone()) ZoneList<CaseClause*>(size, zone());
+ }
+
+ V8_INLINE Expression* NewV8Intrinsic(const AstRawString* name,
+ ZoneList<Expression*>* args, int pos,
+ bool* ok);
+
+ V8_INLINE Statement* NewThrowStatement(Expression* exception, int pos) {
+ return factory()->NewExpressionStatement(
+ factory()->NewThrow(exception, pos), pos);
+ }
+
+ V8_INLINE void AddParameterInitializationBlock(
+ const ParserFormalParameters& parameters, ZoneList<Statement*>* body,
+ bool is_async, bool* ok) {
+ if (parameters.is_simple) return;
+ auto* init_block = BuildParameterInitializationBlock(parameters, ok);
+ if (!*ok) return;
if (is_async) {
- init_block = delegate()->BuildRejectPromiseOnException(init_block);
+ init_block = BuildRejectPromiseOnException(init_block, ok);
+ if (!*ok) return;
}
+ if (init_block != nullptr) body->Add(init_block, zone());
+ }
- if (init_block != nullptr) {
- body->Add(init_block, delegate()->zone());
+ V8_INLINE void AddFormalParameter(ParserFormalParameters* parameters,
+ Expression* pattern,
+ Expression* initializer,
+ int initializer_end_position,
+ bool is_rest) {
+ bool is_simple = pattern->IsVariableProxy() && initializer == nullptr;
+ const AstRawString* name = is_simple
+ ? pattern->AsVariableProxy()->raw_name()
+ : ast_value_factory()->empty_string();
+ parameters->params.Add(
+ ParserFormalParameters::Parameter(name, pattern, initializer,
+ initializer_end_position, is_rest),
+ parameters->scope->zone());
+ }
+
+ V8_INLINE void DeclareFormalParameter(
+ DeclarationScope* scope,
+ const ParserFormalParameters::Parameter& parameter) {
+ bool is_duplicate = false;
+ bool is_simple = classifier()->is_simple_parameter_list();
+ auto name = is_simple || parameter.is_rest
+ ? parameter.name
+ : ast_value_factory()->empty_string();
+ auto mode = is_simple || parameter.is_rest ? VAR : TEMPORARY;
+ if (!is_simple) scope->SetHasNonSimpleParameters();
+ bool is_optional = parameter.initializer != nullptr;
+ Variable* var =
+ scope->DeclareParameter(name, mode, is_optional, parameter.is_rest,
+ &is_duplicate, ast_value_factory());
+ if (is_duplicate) {
+ classifier()->RecordDuplicateFormalParameterError(scanner()->location());
+ }
+ if (is_sloppy(scope->language_mode())) {
+ // TODO(sigurds) Mark every parameter as maybe assigned. This is a
+ // conservative approximation necessary to account for parameters
+ // that are assigned via the arguments array.
+ var->set_maybe_assigned();
}
}
-}
+
+ void DeclareArrowFunctionFormalParameters(ParserFormalParameters* parameters,
+ Expression* params,
+ const Scanner::Location& params_loc,
+ Scanner::Location* duplicate_loc,
+ bool* ok);
+
+ void ReindexLiterals(const ParserFormalParameters& parameters);
+
+ V8_INLINE Expression* NoTemplateTag() { return NULL; }
+ V8_INLINE static bool IsTaggedTemplate(const Expression* tag) {
+ return tag != NULL;
+ }
+
+ V8_INLINE void MaterializeUnspreadArgumentsLiterals(int count) {}
+
+ Expression* ExpressionListToExpression(ZoneList<Expression*>* args);
+
+ void AddAccessorPrefixToFunctionName(bool is_get, FunctionLiteral* function,
+ const AstRawString* name);
+
+ void SetFunctionNameFromPropertyName(ObjectLiteralProperty* property,
+ const AstRawString* name);
+
+ void SetFunctionNameFromIdentifierRef(Expression* value,
+ Expression* identifier);
+
+ V8_INLINE ZoneList<typename ExpressionClassifier::Error>*
+ GetReportedErrorList() const {
+ return function_state_->GetReportedErrorList();
+ }
+
+ V8_INLINE ZoneList<Expression*>* GetNonPatternList() const {
+ return function_state_->non_patterns_to_rewrite();
+ }
+
+ V8_INLINE void CountUsage(v8::Isolate::UseCounterFeature feature) {
+ ++use_counts_[feature];
+ }
+
+ // Parser's private field members.
+ friend class DiscardableZoneScope; // Uses reusable_preparser_.
+ // FIXME(marja): Make reusable_preparser_ always use its own temp Zone (call
+ // DeleteAll after each function), so this won't be needed.
+
+ Scanner scanner_;
+ PreParser* reusable_preparser_;
+ Scope* original_scope_; // for ES5 function declarations in sloppy eval
+
+ friend class ParserTarget;
+ friend class ParserTargetScope;
+ ParserTarget* target_stack_; // for break, continue statements
+
+ ScriptCompiler::CompileOptions compile_options_;
+ ParseData* cached_parse_data_;
+
+ PendingCompilationErrorHandler pending_error_handler_;
+
+ // Other information which will be stored in Parser and moved to Isolate after
+ // parsing.
+ int use_counts_[v8::Isolate::kUseCounterFeatureCount];
+ int total_preparse_skipped_;
+ HistogramTimer* pre_parse_timer_;
+
+ bool parsing_on_main_thread_;
+};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/pattern-rewriter.cc b/deps/v8/src/parsing/pattern-rewriter.cc
index 1831a2927d..7898f87244 100644
--- a/deps/v8/src/parsing/pattern-rewriter.cc
+++ b/deps/v8/src/parsing/pattern-rewriter.cc
@@ -12,7 +12,8 @@ namespace v8 {
namespace internal {
void Parser::PatternRewriter::DeclareAndInitializeVariables(
- Block* block, const DeclarationDescriptor* declaration_descriptor,
+ Parser* parser, Block* block,
+ const DeclarationDescriptor* declaration_descriptor,
const DeclarationParsingResult::Declaration* declaration,
ZoneList<const AstRawString*>* names, bool* ok) {
PatternRewriter rewriter;
@@ -20,7 +21,7 @@ void Parser::PatternRewriter::DeclareAndInitializeVariables(
DCHECK(block->ignore_completion_value());
rewriter.scope_ = declaration_descriptor->scope;
- rewriter.parser_ = declaration_descriptor->parser;
+ rewriter.parser_ = parser;
rewriter.context_ = BINDING;
rewriter.pattern_ = declaration->pattern;
rewriter.initializer_position_ = declaration->initializer_position;
@@ -36,11 +37,12 @@ void Parser::PatternRewriter::DeclareAndInitializeVariables(
void Parser::PatternRewriter::RewriteDestructuringAssignment(
Parser* parser, RewritableExpression* to_rewrite, Scope* scope) {
- PatternRewriter rewriter;
-
+ DCHECK(!scope->HasBeenRemoved());
DCHECK(!to_rewrite->is_rewritten());
bool ok = true;
+
+ PatternRewriter rewriter;
rewriter.scope_ = scope;
rewriter.parser_ = parser;
rewriter.context_ = ASSIGNMENT;
@@ -139,23 +141,16 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
// which the variable or constant is declared. Only function variables have
// an initial value in the declaration (because they are initialized upon
// entering the function).
- //
- // If we have a legacy const declaration, in an inner scope, the proxy
- // is always bound to the declared variable (independent of possibly
- // surrounding 'with' statements).
- // For let/const declarations in harmony mode, we can also immediately
- // pre-resolve the proxy because it resides in the same scope as the
- // declaration.
const AstRawString* name = pattern->raw_name();
- VariableProxy* proxy = descriptor_->scope->NewUnresolved(
- factory(), name, parser_->scanner()->location().beg_pos,
+ VariableProxy* proxy = factory()->NewVariableProxy(
+ name, NORMAL_VARIABLE, parser_->scanner()->location().beg_pos,
parser_->scanner()->location().end_pos);
Declaration* declaration = factory()->NewVariableDeclaration(
proxy, descriptor_->scope, descriptor_->declaration_pos);
- Variable* var = parser_->Declare(declaration, descriptor_->declaration_kind,
- descriptor_->mode,
- DefaultInitializationFlag(descriptor_->mode),
- ok_, descriptor_->hoist_scope);
+ Variable* var = parser_->Declare(
+ declaration, descriptor_->declaration_kind, descriptor_->mode,
+ Variable::DefaultInitializationFlag(descriptor_->mode), ok_,
+ descriptor_->hoist_scope);
if (!*ok_) return;
DCHECK_NOT_NULL(var);
DCHECK(proxy->is_resolved());
@@ -267,12 +262,14 @@ Variable* Parser::PatternRewriter::CreateTempVar(Expression* value) {
void Parser::PatternRewriter::VisitRewritableExpression(
RewritableExpression* node) {
// If this is not a destructuring assignment...
- if (!IsAssignmentContext() || !node->expression()->IsAssignment()) {
+ if (!IsAssignmentContext()) {
// Mark the node as rewritten to prevent redundant rewriting, and
// perform BindingPattern rewriting
DCHECK(!node->is_rewritten());
node->Rewrite(node->expression());
return Visit(node->expression());
+ } else if (!node->expression()->IsAssignment()) {
+ return Visit(node->expression());
}
if (node->is_rewritten()) return;
@@ -374,7 +371,7 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
auto temp = *temp_var = CreateTempVar(current_value_);
auto iterator = CreateTempVar(parser_->GetIterator(
- factory()->NewVariableProxy(temp), factory(), kNoSourcePosition));
+ factory()->NewVariableProxy(temp), kNoSourcePosition));
auto done =
CreateTempVar(factory()->NewBooleanLiteral(false, kNoSourcePosition));
auto result = CreateTempVar();
@@ -601,8 +598,9 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
Expression* closing_condition = factory()->NewUnaryOperation(
Token::NOT, factory()->NewVariableProxy(done), nopos);
- parser_->FinalizeIteratorUse(completion, closing_condition, iterator, block_,
- target);
+
+ parser_->FinalizeIteratorUse(scope(), completion, closing_condition, iterator,
+ block_, target);
block_ = target;
}
diff --git a/deps/v8/src/parsing/preparser.cc b/deps/v8/src/parsing/preparser.cc
index b1bbbf60c8..88470f7fa9 100644
--- a/deps/v8/src/parsing/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -10,6 +10,7 @@
#include "src/conversions.h"
#include "src/globals.h"
#include "src/list.h"
+#include "src/parsing/duplicate-finder.h"
#include "src/parsing/parser-base.h"
#include "src/parsing/preparse-data-format.h"
#include "src/parsing/preparse-data.h"
@@ -28,34 +29,18 @@ namespace internal {
// thus it must never be used where only a single statement
// is correct (e.g. an if statement branch w/o braces)!
-#define CHECK_OK ok); \
- if (!*ok) return Statement::Default(); \
+#define CHECK_OK_VALUE(x) ok); \
+ if (!*ok) return x; \
((void)0
#define DUMMY ) // to make indentation work
#undef DUMMY
-// Used in functions where the return type is not ExpressionT.
-#define CHECK_OK_CUSTOM(x) ok); \
- if (!*ok) return this->x(); \
- ((void)0
-#define DUMMY ) // to make indentation work
-#undef DUMMY
+#define CHECK_OK CHECK_OK_VALUE(Expression::Default())
+#define CHECK_OK_VOID CHECK_OK_VALUE(this->Void())
-void ParserBaseTraits<PreParser>::ReportMessageAt(
- Scanner::Location source_location, MessageTemplate::Template message,
- const char* arg, ParseErrorType error_type) {
- delegate()->log_->LogMessage(source_location.beg_pos, source_location.end_pos,
- message, arg, error_type);
-}
+namespace {
-void ParserBaseTraits<PreParser>::ReportMessageAt(
- Scanner::Location source_location, MessageTemplate::Template message,
- const AstRawString* arg, ParseErrorType error_type) {
- UNREACHABLE();
-}
-
-PreParserIdentifier ParserBaseTraits<PreParser>::GetSymbol(
- Scanner* scanner) const {
+PreParserIdentifier GetSymbolHelper(Scanner* scanner) {
switch (scanner->current_token()) {
case Token::ENUM:
return PreParserIdentifier::Enum();
@@ -86,49 +71,51 @@ PreParserIdentifier ParserBaseTraits<PreParser>::GetSymbol(
}
}
-PreParserExpression ParserBaseTraits<PreParser>::ExpressionFromString(
- int pos, Scanner* scanner, PreParserFactory* factory) const {
- if (scanner->UnescapedLiteralMatches("use strict", 10)) {
- return PreParserExpression::UseStrictStringLiteral();
+} // unnamed namespace
+
+PreParserIdentifier PreParser::GetSymbol() const {
+ PreParserIdentifier symbol = GetSymbolHelper(scanner());
+ if (track_unresolved_variables_) {
+ const AstRawString* result = scanner()->CurrentSymbol(ast_value_factory());
+ DCHECK_NOT_NULL(result);
+ symbol.string_ = result;
}
- return PreParserExpression::StringLiteral();
+ return symbol;
}
PreParser::PreParseResult PreParser::PreParseLazyFunction(
- LanguageMode language_mode, FunctionKind kind, bool has_simple_parameters,
- bool parsing_module, ParserRecorder* log, Scanner::BookmarkScope* bookmark,
- int* use_counts) {
+ DeclarationScope* function_scope, bool parsing_module, ParserRecorder* log,
+ bool is_inner_function, bool may_abort, int* use_counts) {
+ DCHECK_EQ(FUNCTION_SCOPE, function_scope->scope_type());
parsing_module_ = parsing_module;
log_ = log;
use_counts_ = use_counts;
- // Lazy functions always have trivial outer scopes (no with/catch scopes).
+ DCHECK(!track_unresolved_variables_);
+ track_unresolved_variables_ = is_inner_function;
+
+ // The caller passes the function_scope which is not yet inserted into the
+ // scope_state_. All scopes above the function_scope are ignored by the
+ // PreParser.
DCHECK_NULL(scope_state_);
- DeclarationScope* top_scope = NewScriptScope();
- FunctionState top_state(&function_state_, &scope_state_, top_scope,
- kNormalFunction);
- scope()->SetLanguageMode(language_mode);
- DeclarationScope* function_scope = NewFunctionScope(kind);
- if (!has_simple_parameters) function_scope->SetHasNonSimpleParameters();
- FunctionState function_state(&function_state_, &scope_state_, function_scope,
- kind);
+ FunctionState function_state(&function_state_, &scope_state_, function_scope);
DCHECK_EQ(Token::LBRACE, scanner()->current_token());
bool ok = true;
int start_position = peek_position();
- ParseLazyFunctionLiteralBody(&ok, bookmark);
+ LazyParsingResult result = ParseLazyFunctionLiteralBody(may_abort, &ok);
use_counts_ = nullptr;
- if (bookmark && bookmark->HasBeenReset()) {
- // Do nothing, as we've just aborted scanning this function.
+ track_unresolved_variables_ = false;
+ if (result == kLazyParsingAborted) {
+ return kPreParseAbort;
} else if (stack_overflow()) {
return kPreParseStackOverflow;
} else if (!ok) {
ReportUnexpectedToken(scanner()->current_token());
} else {
DCHECK_EQ(Token::RBRACE, scanner()->peek());
- if (is_strict(scope()->language_mode())) {
+ if (is_strict(function_scope->language_mode())) {
int end_pos = scanner()->location().end_pos;
CheckStrictOctalLiteral(start_position, end_pos, &ok);
- CheckDecimalLiteralWithLeadingZero(use_counts, start_position, end_pos);
- if (!ok) return kPreParseSuccess;
+ CheckDecimalLiteralWithLeadingZero(start_position, end_pos);
}
}
return kPreParseSuccess;
@@ -148,908 +135,6 @@ PreParser::PreParseResult PreParser::PreParseLazyFunction(
// That means that contextual checks (like a label being declared where
// it is used) are generally omitted.
-
-PreParser::Statement PreParser::ParseStatementListItem(bool* ok) {
- // ECMA 262 6th Edition
- // StatementListItem[Yield, Return] :
- // Statement[?Yield, ?Return]
- // Declaration[?Yield]
- //
- // Declaration[Yield] :
- // HoistableDeclaration[?Yield]
- // ClassDeclaration[?Yield]
- // LexicalDeclaration[In, ?Yield]
- //
- // HoistableDeclaration[Yield, Default] :
- // FunctionDeclaration[?Yield, ?Default]
- // GeneratorDeclaration[?Yield, ?Default]
- //
- // LexicalDeclaration[In, Yield] :
- // LetOrConst BindingList[?In, ?Yield] ;
-
- switch (peek()) {
- case Token::FUNCTION:
- return ParseHoistableDeclaration(ok);
- case Token::CLASS:
- return ParseClassDeclaration(ok);
- case Token::CONST:
- return ParseVariableStatement(kStatementListItem, ok);
- case Token::LET:
- if (IsNextLetKeyword()) {
- return ParseVariableStatement(kStatementListItem, ok);
- }
- break;
- case Token::ASYNC:
- if (allow_harmony_async_await() && PeekAhead() == Token::FUNCTION &&
- !scanner()->HasAnyLineTerminatorAfterNext()) {
- Consume(Token::ASYNC);
- return ParseAsyncFunctionDeclaration(ok);
- }
- /* falls through */
- default:
- break;
- }
- return ParseStatement(kAllowLabelledFunctionStatement, ok);
-}
-
-
-void PreParser::ParseStatementList(int end_token, bool* ok,
- Scanner::BookmarkScope* bookmark) {
- // SourceElements ::
- // (Statement)* <end_token>
-
- // Bookkeeping for trial parse if bookmark is set:
- DCHECK_IMPLIES(bookmark, bookmark->HasBeenSet());
- bool maybe_reset = bookmark != nullptr;
- int count_statements = 0;
-
- bool directive_prologue = true;
- while (peek() != end_token) {
- if (directive_prologue && peek() != Token::STRING) {
- directive_prologue = false;
- }
- bool starts_with_identifier = peek() == Token::IDENTIFIER;
- Scanner::Location token_loc = scanner()->peek_location();
- Statement statement = ParseStatementListItem(CHECK_OK_CUSTOM(Void));
-
- if (directive_prologue) {
- bool use_strict_found = statement.IsUseStrictLiteral();
-
- if (use_strict_found) {
- scope()->SetLanguageMode(
- static_cast<LanguageMode>(scope()->language_mode() | STRICT));
- } else if (!statement.IsStringLiteral()) {
- directive_prologue = false;
- }
-
- if (use_strict_found && !scope()->HasSimpleParameters()) {
- // TC39 deemed "use strict" directives to be an error when occurring
- // in the body of a function with non-simple parameter list, on
- // 29/7/2015. https://goo.gl/ueA7Ln
- ReportMessageAt(token_loc,
- MessageTemplate::kIllegalLanguageModeDirective,
- "use strict");
- *ok = false;
- return;
- }
- }
-
- // If we're allowed to reset to a bookmark, we will do so when we see a long
- // and trivial function.
- // Our current definition of 'long and trivial' is:
- // - over 200 statements
- // - all starting with an identifier (i.e., no if, for, while, etc.)
- if (maybe_reset && (!starts_with_identifier ||
- ++count_statements > kLazyParseTrialLimit)) {
- if (count_statements > kLazyParseTrialLimit) {
- bookmark->Reset();
- return;
- }
- maybe_reset = false;
- }
- }
-}
-
-
-PreParser::Statement PreParser::ParseStatement(
- AllowLabelledFunctionStatement allow_function, bool* ok) {
- // Statement ::
- // EmptyStatement
- // ...
-
- if (peek() == Token::SEMICOLON) {
- Next();
- return Statement::Default();
- }
- return ParseSubStatement(allow_function, ok);
-}
-
-PreParser::Statement PreParser::ParseScopedStatement(bool legacy, bool* ok) {
- if (is_strict(language_mode()) || peek() != Token::FUNCTION ||
- (legacy && allow_harmony_restrictive_declarations())) {
- return ParseSubStatement(kDisallowLabelledFunctionStatement, ok);
- } else {
- BlockState block_state(&scope_state_);
- return ParseFunctionDeclaration(ok);
- }
-}
-
-PreParser::Statement PreParser::ParseSubStatement(
- AllowLabelledFunctionStatement allow_function, bool* ok) {
- // Statement ::
- // Block
- // VariableStatement
- // EmptyStatement
- // ExpressionStatement
- // IfStatement
- // IterationStatement
- // ContinueStatement
- // BreakStatement
- // ReturnStatement
- // WithStatement
- // LabelledStatement
- // SwitchStatement
- // ThrowStatement
- // TryStatement
- // DebuggerStatement
-
- // Note: Since labels can only be used by 'break' and 'continue'
- // statements, which themselves are only valid within blocks,
- // iterations or 'switch' statements (i.e., BreakableStatements),
- // labels can be simply ignored in all other cases; except for
- // trivial labeled break statements 'label: break label' which is
- // parsed into an empty statement.
-
- // Keep the source position of the statement
- switch (peek()) {
- case Token::LBRACE:
- return ParseBlock(ok);
-
- case Token::SEMICOLON:
- Next();
- return Statement::Default();
-
- case Token::IF:
- return ParseIfStatement(ok);
-
- case Token::DO:
- return ParseDoWhileStatement(ok);
-
- case Token::WHILE:
- return ParseWhileStatement(ok);
-
- case Token::FOR:
- return ParseForStatement(ok);
-
- case Token::CONTINUE:
- return ParseContinueStatement(ok);
-
- case Token::BREAK:
- return ParseBreakStatement(ok);
-
- case Token::RETURN:
- return ParseReturnStatement(ok);
-
- case Token::WITH:
- return ParseWithStatement(ok);
-
- case Token::SWITCH:
- return ParseSwitchStatement(ok);
-
- case Token::THROW:
- return ParseThrowStatement(ok);
-
- case Token::TRY:
- return ParseTryStatement(ok);
-
- case Token::FUNCTION:
- // FunctionDeclaration only allowed as a StatementListItem, not in
- // an arbitrary Statement position. Exceptions such as
- // ES#sec-functiondeclarations-in-ifstatement-statement-clauses
- // are handled by calling ParseScopedStatement rather than
- // ParseSubStatement directly.
- ReportMessageAt(scanner()->peek_location(),
- is_strict(language_mode())
- ? MessageTemplate::kStrictFunction
- : MessageTemplate::kSloppyFunction);
- *ok = false;
- return Statement::Default();
-
- case Token::DEBUGGER:
- return ParseDebuggerStatement(ok);
-
- case Token::VAR:
- return ParseVariableStatement(kStatement, ok);
-
- default:
- return ParseExpressionOrLabelledStatement(allow_function, ok);
- }
-}
-
-PreParser::Statement PreParser::ParseHoistableDeclaration(
- int pos, ParseFunctionFlags flags, bool* ok) {
- const bool is_generator = flags & ParseFunctionFlags::kIsGenerator;
- const bool is_async = flags & ParseFunctionFlags::kIsAsync;
- DCHECK(!is_generator || !is_async);
-
- bool is_strict_reserved = false;
- Identifier name = ParseIdentifierOrStrictReservedWord(
- &is_strict_reserved, CHECK_OK);
-
- ParseFunctionLiteral(name, scanner()->location(),
- is_strict_reserved ? kFunctionNameIsStrictReserved
- : kFunctionNameValidityUnknown,
- is_generator ? FunctionKind::kGeneratorFunction
- : is_async ? FunctionKind::kAsyncFunction
- : FunctionKind::kNormalFunction,
- pos, FunctionLiteral::kDeclaration, language_mode(),
- CHECK_OK);
- return Statement::FunctionDeclaration();
-}
-
-PreParser::Statement PreParser::ParseAsyncFunctionDeclaration(bool* ok) {
- // AsyncFunctionDeclaration ::
- // async [no LineTerminator here] function BindingIdentifier[Await]
- // ( FormalParameters[Await] ) { AsyncFunctionBody }
- DCHECK_EQ(scanner()->current_token(), Token::ASYNC);
- int pos = position();
- Expect(Token::FUNCTION, CHECK_OK);
- ParseFunctionFlags flags = ParseFunctionFlags::kIsAsync;
- return ParseHoistableDeclaration(pos, flags, ok);
-}
-
-PreParser::Statement PreParser::ParseHoistableDeclaration(bool* ok) {
- // FunctionDeclaration ::
- // 'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
- // GeneratorDeclaration ::
- // 'function' '*' Identifier '(' FormalParameterListopt ')'
- // '{' FunctionBody '}'
-
- Expect(Token::FUNCTION, CHECK_OK);
- int pos = position();
- ParseFunctionFlags flags = ParseFunctionFlags::kIsNormal;
- if (Check(Token::MUL)) {
- flags |= ParseFunctionFlags::kIsGenerator;
- }
- return ParseHoistableDeclaration(pos, flags, ok);
-}
-
-
-PreParser::Statement PreParser::ParseClassDeclaration(bool* ok) {
- Expect(Token::CLASS, CHECK_OK);
-
- int pos = position();
- bool is_strict_reserved = false;
- Identifier name =
- ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
- ParseClassLiteral(nullptr, name, scanner()->location(), is_strict_reserved,
- pos, CHECK_OK);
- return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseBlock(bool* ok) {
- // Block ::
- // '{' StatementList '}'
-
- Expect(Token::LBRACE, CHECK_OK);
- Statement final = Statement::Default();
- {
- BlockState block_state(&scope_state_);
- while (peek() != Token::RBRACE) {
- final = ParseStatementListItem(CHECK_OK);
- }
- }
- Expect(Token::RBRACE, ok);
- return final;
-}
-
-
-PreParser::Statement PreParser::ParseVariableStatement(
- VariableDeclarationContext var_context,
- bool* ok) {
- // VariableStatement ::
- // VariableDeclarations ';'
-
- Statement result = ParseVariableDeclarations(
- var_context, nullptr, nullptr, nullptr, nullptr, nullptr, CHECK_OK);
- ExpectSemicolon(CHECK_OK);
- return result;
-}
-
-
-// If the variable declaration declares exactly one non-const
-// variable, then *var is set to that variable. In all other cases,
-// *var is untouched; in particular, it is the caller's responsibility
-// to initialize it properly. This mechanism is also used for the parsing
-// of 'for-in' loops.
-PreParser::Statement PreParser::ParseVariableDeclarations(
- VariableDeclarationContext var_context, int* num_decl, bool* is_lexical,
- bool* is_binding_pattern, Scanner::Location* first_initializer_loc,
- Scanner::Location* bindings_loc, bool* ok) {
- // VariableDeclarations ::
- // ('var' | 'const') (Identifier ('=' AssignmentExpression)?)+[',']
- //
- // The ES6 Draft Rev3 specifies the following grammar for const declarations
- //
- // ConstDeclaration ::
- // const ConstBinding (',' ConstBinding)* ';'
- // ConstBinding ::
- // Identifier '=' AssignmentExpression
- //
- // TODO(ES6):
- // ConstBinding ::
- // BindingPattern '=' AssignmentExpression
- bool require_initializer = false;
- bool lexical = false;
- bool is_pattern = false;
- if (peek() == Token::VAR) {
- Consume(Token::VAR);
- } else if (peek() == Token::CONST) {
- // TODO(ES6): The ES6 Draft Rev4 section 12.2.2 reads:
- //
- // ConstDeclaration : const ConstBinding (',' ConstBinding)* ';'
- //
- // * It is a Syntax Error if the code that matches this production is not
- // contained in extended code.
- //
- // However disallowing const in sloppy mode will break compatibility with
- // existing pages. Therefore we keep allowing const with the old
- // non-harmony semantics in sloppy mode.
- Consume(Token::CONST);
- DCHECK(var_context != kStatement);
- require_initializer = true;
- lexical = true;
- } else if (peek() == Token::LET) {
- Consume(Token::LET);
- DCHECK(var_context != kStatement);
- lexical = true;
- } else {
- *ok = false;
- return Statement::Default();
- }
-
- // The scope of a var/const declared variable anywhere inside a function
- // is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). The scope
- // of a let declared variable is the scope of the immediately enclosing
- // block.
- int nvars = 0; // the number of variables declared
- int bindings_start = peek_position();
- do {
- // Parse binding pattern.
- if (nvars > 0) Consume(Token::COMMA);
- int decl_pos = peek_position();
- PreParserExpression pattern = PreParserExpression::Default();
- {
- ExpressionClassifier pattern_classifier(this);
- pattern = ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
-
- ValidateBindingPattern(&pattern_classifier, CHECK_OK);
- if (lexical) {
- ValidateLetPattern(&pattern_classifier, CHECK_OK);
- }
- }
-
- is_pattern = pattern.IsObjectLiteral() || pattern.IsArrayLiteral();
-
- Scanner::Location variable_loc = scanner()->location();
- nvars++;
- if (Check(Token::ASSIGN)) {
- ExpressionClassifier classifier(this);
- ParseAssignmentExpression(var_context != kForStatement, &classifier,
- CHECK_OK);
- ValidateExpression(&classifier, CHECK_OK);
-
- variable_loc.end_pos = scanner()->location().end_pos;
- if (first_initializer_loc && !first_initializer_loc->IsValid()) {
- *first_initializer_loc = variable_loc;
- }
- } else if ((require_initializer || is_pattern) &&
- (var_context != kForStatement || !PeekInOrOf())) {
- ReportMessageAt(
- Scanner::Location(decl_pos, scanner()->location().end_pos),
- MessageTemplate::kDeclarationMissingInitializer,
- is_pattern ? "destructuring" : "const");
- *ok = false;
- return Statement::Default();
- }
- } while (peek() == Token::COMMA);
-
- if (bindings_loc) {
- *bindings_loc =
- Scanner::Location(bindings_start, scanner()->location().end_pos);
- }
-
- if (num_decl != nullptr) *num_decl = nvars;
- if (is_lexical != nullptr) *is_lexical = lexical;
- if (is_binding_pattern != nullptr) *is_binding_pattern = is_pattern;
- return Statement::Default();
-}
-
-PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
- Consume(Token::FUNCTION);
- int pos = position();
- ParseFunctionFlags flags = ParseFunctionFlags::kIsNormal;
- if (Check(Token::MUL)) {
- flags |= ParseFunctionFlags::kIsGenerator;
- if (allow_harmony_restrictive_declarations()) {
- ReportMessageAt(scanner()->location(),
- MessageTemplate::kGeneratorInLegacyContext);
- *ok = false;
- return Statement::Default();
- }
- }
- return ParseHoistableDeclaration(pos, flags, ok);
-}
-
-PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(
- AllowLabelledFunctionStatement allow_function, bool* ok) {
- // ExpressionStatement | LabelledStatement ::
- // Expression ';'
- // Identifier ':' Statement
-
- switch (peek()) {
- case Token::FUNCTION:
- case Token::LBRACE:
- UNREACHABLE(); // Always handled by the callers.
- case Token::CLASS:
- ReportUnexpectedToken(Next());
- *ok = false;
- return Statement::Default();
-
- default:
- break;
- }
-
- bool starts_with_identifier = peek_any_identifier();
- ExpressionClassifier classifier(this);
- Expression expr = ParseExpression(true, &classifier, CHECK_OK);
- ValidateExpression(&classifier, CHECK_OK);
-
- // Even if the expression starts with an identifier, it is not necessarily an
- // identifier. For example, "foo + bar" starts with an identifier but is not
- // an identifier.
- if (starts_with_identifier && expr.IsIdentifier() && peek() == Token::COLON) {
- // Expression is a single identifier, and not, e.g., a parenthesized
- // identifier.
- DCHECK(!expr.AsIdentifier().IsEnum());
- DCHECK(!parsing_module_ || !expr.AsIdentifier().IsAwait());
- DCHECK(is_sloppy(language_mode()) ||
- !IsFutureStrictReserved(expr.AsIdentifier()));
- Consume(Token::COLON);
- // ES#sec-labelled-function-declarations Labelled Function Declarations
- if (peek() == Token::FUNCTION && is_sloppy(language_mode())) {
- if (allow_function == kAllowLabelledFunctionStatement) {
- return ParseFunctionDeclaration(ok);
- } else {
- return ParseScopedStatement(true, ok);
- }
- }
- Statement statement =
- ParseStatement(kDisallowLabelledFunctionStatement, ok);
- return statement.IsJumpStatement() ? Statement::Default() : statement;
- // Preparsing is disabled for extensions (because the extension details
- // aren't passed to lazily compiled functions), so we don't
- // accept "native function" in the preparser.
- }
- // Parsed expression statement.
- ExpectSemicolon(CHECK_OK);
- return Statement::ExpressionStatement(expr);
-}
-
-
-PreParser::Statement PreParser::ParseIfStatement(bool* ok) {
- // IfStatement ::
- // 'if' '(' Expression ')' Statement ('else' Statement)?
-
- Expect(Token::IF, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
- ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
- Statement stat = ParseScopedStatement(false, CHECK_OK);
- if (peek() == Token::ELSE) {
- Next();
- Statement else_stat = ParseScopedStatement(false, CHECK_OK);
- stat = (stat.IsJumpStatement() && else_stat.IsJumpStatement()) ?
- Statement::Jump() : Statement::Default();
- } else {
- stat = Statement::Default();
- }
- return stat;
-}
-
-
-PreParser::Statement PreParser::ParseContinueStatement(bool* ok) {
- // ContinueStatement ::
- // 'continue' [no line terminator] Identifier? ';'
-
- Expect(Token::CONTINUE, CHECK_OK);
- Token::Value tok = peek();
- if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
- tok != Token::SEMICOLON &&
- tok != Token::RBRACE &&
- tok != Token::EOS) {
- // ECMA allows "eval" or "arguments" as labels even in strict mode.
- ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
- }
- ExpectSemicolon(CHECK_OK);
- return Statement::Jump();
-}
-
-
-PreParser::Statement PreParser::ParseBreakStatement(bool* ok) {
- // BreakStatement ::
- // 'break' [no line terminator] Identifier? ';'
-
- Expect(Token::BREAK, CHECK_OK);
- Token::Value tok = peek();
- if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
- tok != Token::SEMICOLON &&
- tok != Token::RBRACE &&
- tok != Token::EOS) {
- // ECMA allows "eval" or "arguments" as labels even in strict mode.
- ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
- }
- ExpectSemicolon(CHECK_OK);
- return Statement::Jump();
-}
-
-
-PreParser::Statement PreParser::ParseReturnStatement(bool* ok) {
- // ReturnStatement ::
- // 'return' [no line terminator] Expression? ';'
-
- // Consume the return token. It is necessary to do before
- // reporting any errors on it, because of the way errors are
- // reported (underlining).
- Expect(Token::RETURN, CHECK_OK);
-
- // An ECMAScript program is considered syntactically incorrect if it
- // contains a return statement that is not within the body of a
- // function. See ECMA-262, section 12.9, page 67.
- // This is not handled during preparsing.
-
- Token::Value tok = peek();
- if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
- tok != Token::SEMICOLON &&
- tok != Token::RBRACE &&
- tok != Token::EOS) {
- // Because of the return code rewriting that happens in case of a subclass
- // constructor we don't want to accept tail calls, therefore we don't set
- // ReturnExprScope to kInsideValidReturnStatement here.
- ReturnExprContext return_expr_context =
- IsSubclassConstructor(function_state_->kind())
- ? function_state_->return_expr_context()
- : ReturnExprContext::kInsideValidReturnStatement;
-
- ReturnExprScope maybe_allow_tail_calls(function_state_,
- return_expr_context);
- ParseExpression(true, CHECK_OK);
- }
- ExpectSemicolon(CHECK_OK);
- return Statement::Jump();
-}
-
-
-PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
- // WithStatement ::
- // 'with' '(' Expression ')' Statement
- Expect(Token::WITH, CHECK_OK);
- if (is_strict(language_mode())) {
- ReportMessageAt(scanner()->location(), MessageTemplate::kStrictWith);
- *ok = false;
- return Statement::Default();
- }
- Expect(Token::LPAREN, CHECK_OK);
- ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
-
- Scope* with_scope = NewScope(WITH_SCOPE);
- BlockState block_state(&scope_state_, with_scope);
- ParseScopedStatement(true, CHECK_OK);
- return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseSwitchStatement(bool* ok) {
- // SwitchStatement ::
- // 'switch' '(' Expression ')' '{' CaseClause* '}'
-
- Expect(Token::SWITCH, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
- ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
-
- {
- BlockState cases_block_state(&scope_state_);
- Expect(Token::LBRACE, CHECK_OK);
- Token::Value token = peek();
- while (token != Token::RBRACE) {
- if (token == Token::CASE) {
- Expect(Token::CASE, CHECK_OK);
- ParseExpression(true, CHECK_OK);
- } else {
- Expect(Token::DEFAULT, CHECK_OK);
- }
- Expect(Token::COLON, CHECK_OK);
- token = peek();
- Statement statement = Statement::Jump();
- while (token != Token::CASE &&
- token != Token::DEFAULT &&
- token != Token::RBRACE) {
- statement = ParseStatementListItem(CHECK_OK);
- token = peek();
- }
- }
- }
- Expect(Token::RBRACE, ok);
- return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseDoWhileStatement(bool* ok) {
- // DoStatement ::
- // 'do' Statement 'while' '(' Expression ')' ';'
-
- Expect(Token::DO, CHECK_OK);
- ParseScopedStatement(true, CHECK_OK);
- Expect(Token::WHILE, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
- ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, ok);
- if (peek() == Token::SEMICOLON) Consume(Token::SEMICOLON);
- return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseWhileStatement(bool* ok) {
- // WhileStatement ::
- // 'while' '(' Expression ')' Statement
-
- Expect(Token::WHILE, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
- ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
- ParseScopedStatement(true, ok);
- return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseForStatement(bool* ok) {
- // ForStatement ::
- // 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
-
- // Create an in-between scope for let-bound iteration variables.
- bool has_lexical = false;
-
- BlockState block_state(&scope_state_);
- Expect(Token::FOR, CHECK_OK);
- Expect(Token::LPAREN, CHECK_OK);
- if (peek() != Token::SEMICOLON) {
- ForEachStatement::VisitMode mode;
- if (peek() == Token::VAR || peek() == Token::CONST ||
- (peek() == Token::LET && IsNextLetKeyword())) {
- int decl_count;
- bool is_lexical;
- bool is_binding_pattern;
- Scanner::Location first_initializer_loc = Scanner::Location::invalid();
- Scanner::Location bindings_loc = Scanner::Location::invalid();
- ParseVariableDeclarations(kForStatement, &decl_count, &is_lexical,
- &is_binding_pattern, &first_initializer_loc,
- &bindings_loc, CHECK_OK);
- if (is_lexical) has_lexical = true;
- if (CheckInOrOf(&mode, ok)) {
- if (!*ok) return Statement::Default();
- if (decl_count != 1) {
- ReportMessageAt(bindings_loc,
- MessageTemplate::kForInOfLoopMultiBindings,
- ForEachStatement::VisitModeString(mode));
- *ok = false;
- return Statement::Default();
- }
- if (first_initializer_loc.IsValid() &&
- (is_strict(language_mode()) || mode == ForEachStatement::ITERATE ||
- is_lexical || is_binding_pattern || allow_harmony_for_in())) {
- // Only increment the use count if we would have let this through
- // without the flag.
- if (use_counts_ != nullptr && allow_harmony_for_in()) {
- ++use_counts_[v8::Isolate::kForInInitializer];
- }
- ReportMessageAt(first_initializer_loc,
- MessageTemplate::kForInOfLoopInitializer,
- ForEachStatement::VisitModeString(mode));
- *ok = false;
- return Statement::Default();
- }
-
- if (mode == ForEachStatement::ITERATE) {
- ExpressionClassifier classifier(this);
- ParseAssignmentExpression(true, &classifier, CHECK_OK);
- RewriteNonPattern(&classifier, CHECK_OK);
- } else {
- ParseExpression(true, CHECK_OK);
- }
-
- Expect(Token::RPAREN, CHECK_OK);
- {
- ReturnExprScope no_tail_calls(function_state_,
- ReturnExprContext::kInsideForInOfBody);
- ParseScopedStatement(true, CHECK_OK);
- }
- return Statement::Default();
- }
- } else {
- int lhs_beg_pos = peek_position();
- ExpressionClassifier classifier(this);
- Expression lhs = ParseExpression(false, &classifier, CHECK_OK);
- int lhs_end_pos = scanner()->location().end_pos;
- bool is_for_each = CheckInOrOf(&mode, CHECK_OK);
- bool is_destructuring = is_for_each &&
- (lhs->IsArrayLiteral() || lhs->IsObjectLiteral());
-
- if (is_destructuring) {
- ValidateAssignmentPattern(&classifier, CHECK_OK);
- } else {
- ValidateExpression(&classifier, CHECK_OK);
- }
-
- if (is_for_each) {
- if (!is_destructuring) {
- lhs = CheckAndRewriteReferenceExpression(
- lhs, lhs_beg_pos, lhs_end_pos, MessageTemplate::kInvalidLhsInFor,
- kSyntaxError, CHECK_OK);
- }
-
- if (mode == ForEachStatement::ITERATE) {
- ExpressionClassifier classifier(this);
- ParseAssignmentExpression(true, &classifier, CHECK_OK);
- RewriteNonPattern(&classifier, CHECK_OK);
- } else {
- ParseExpression(true, CHECK_OK);
- }
-
- Expect(Token::RPAREN, CHECK_OK);
- {
- BlockState block_state(&scope_state_);
- ParseScopedStatement(true, CHECK_OK);
- }
- return Statement::Default();
- }
- }
- }
-
- // Parsed initializer at this point.
- Expect(Token::SEMICOLON, CHECK_OK);
-
- // If there are let bindings, then condition and the next statement of the
- // for loop must be parsed in a new scope.
- Scope* inner_scope = scope();
- // TODO(verwaest): Allocate this through a ScopeState as well.
- if (has_lexical) inner_scope = NewScopeWithParent(inner_scope, BLOCK_SCOPE);
-
- {
- BlockState block_state(&scope_state_, inner_scope);
-
- if (peek() != Token::SEMICOLON) {
- ParseExpression(true, CHECK_OK);
- }
- Expect(Token::SEMICOLON, CHECK_OK);
-
- if (peek() != Token::RPAREN) {
- ParseExpression(true, CHECK_OK);
- }
- Expect(Token::RPAREN, CHECK_OK);
-
- ParseScopedStatement(true, ok);
- }
- return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseThrowStatement(bool* ok) {
- // ThrowStatement ::
- // 'throw' [no line terminator] Expression ';'
-
- Expect(Token::THROW, CHECK_OK);
- if (scanner()->HasAnyLineTerminatorBeforeNext()) {
- ReportMessageAt(scanner()->location(), MessageTemplate::kNewlineAfterThrow);
- *ok = false;
- return Statement::Default();
- }
- ParseExpression(true, CHECK_OK);
- ExpectSemicolon(ok);
- return Statement::Jump();
-}
-
-
-PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
- // TryStatement ::
- // 'try' Block Catch
- // 'try' Block Finally
- // 'try' Block Catch Finally
- //
- // Catch ::
- // 'catch' '(' Identifier ')' Block
- //
- // Finally ::
- // 'finally' Block
-
- Expect(Token::TRY, CHECK_OK);
-
- {
- ReturnExprScope no_tail_calls(function_state_,
- ReturnExprContext::kInsideTryBlock);
- ParseBlock(CHECK_OK);
- }
-
- Token::Value tok = peek();
- if (tok != Token::CATCH && tok != Token::FINALLY) {
- ReportMessageAt(scanner()->location(), MessageTemplate::kNoCatchOrFinally);
- *ok = false;
- return Statement::Default();
- }
- TailCallExpressionList tail_call_expressions_in_catch_block(zone());
- bool catch_block_exists = false;
- if (tok == Token::CATCH) {
- Consume(Token::CATCH);
- Expect(Token::LPAREN, CHECK_OK);
- Scope* catch_scope = NewScope(CATCH_SCOPE);
- ExpressionClassifier pattern_classifier(this);
- ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
- ValidateBindingPattern(&pattern_classifier, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
- {
- CollectExpressionsInTailPositionToListScope
- collect_tail_call_expressions_scope(
- function_state_, &tail_call_expressions_in_catch_block);
- BlockState block_state(&scope_state_, catch_scope);
- {
- BlockState block_state(&scope_state_);
- ParseBlock(CHECK_OK);
- }
- }
- catch_block_exists = true;
- tok = peek();
- }
- if (tok == Token::FINALLY) {
- Consume(Token::FINALLY);
- ParseBlock(CHECK_OK);
- if (FLAG_harmony_explicit_tailcalls && catch_block_exists &&
- tail_call_expressions_in_catch_block.has_explicit_tail_calls()) {
- // TODO(ishell): update chapter number.
- // ES8 XX.YY.ZZ
- ReportMessageAt(tail_call_expressions_in_catch_block.location(),
- MessageTemplate::kUnexpectedTailCallInCatchBlock);
- *ok = false;
- return Statement::Default();
- }
- }
- return Statement::Default();
-}
-
-
-PreParser::Statement PreParser::ParseDebuggerStatement(bool* ok) {
- // In ECMA-262 'debugger' is defined as a reserved keyword. In some browser
- // contexts this is used as a statement which invokes the debugger as if a
- // break point is present.
- // DebuggerStatement ::
- // 'debugger' ';'
-
- Expect(Token::DEBUGGER, CHECK_OK);
- ExpectSemicolon(ok);
- return Statement::Default();
-}
-
-
-// Redefinition of CHECK_OK for parsing expressions.
-#undef CHECK_OK
-#define CHECK_OK ok); \
- if (!*ok) return Expression::Default(); \
- ((void)0
-#define DUMMY ) // to make indentation work
-#undef DUMMY
-
-
PreParser::Expression PreParser::ParseFunctionLiteral(
Identifier function_name, Scanner::Location function_name_location,
FunctionNameValidity function_name_validity, FunctionKind kind,
@@ -1059,11 +144,11 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
// '(' FormalParameterList? ')' '{' FunctionBody '}'
// Parse function body.
+ PreParserStatementList body;
bool outer_is_script_scope = scope()->is_script_scope();
DeclarationScope* function_scope = NewFunctionScope(kind);
function_scope->SetLanguageMode(language_mode);
- FunctionState function_state(&function_state_, &scope_state_, function_scope,
- kind);
+ FunctionState function_state(&function_state_, &scope_state_, function_scope);
DuplicateFinder duplicate_finder(scanner()->unicode_cache());
ExpressionClassifier formals_classifier(this, &duplicate_finder);
@@ -1071,7 +156,7 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
int start_position = scanner()->location().beg_pos;
function_scope->set_start_position(start_position);
PreParserFormalParameters formals(function_scope);
- ParseFormalParameterList(&formals, &formals_classifier, CHECK_OK);
+ ParseFormalParameterList(&formals, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
int formals_end_position = scanner()->location().end_pos;
@@ -1085,9 +170,9 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
Expect(Token::LBRACE, CHECK_OK);
if (is_lazily_parsed) {
- ParseLazyFunctionLiteralBody(CHECK_OK);
+ ParseLazyFunctionLiteralBody(false, CHECK_OK);
} else {
- ParseStatementList(Token::RBRACE, CHECK_OK);
+ ParseStatementList(body, Token::RBRACE, CHECK_OK);
}
Expect(Token::RBRACE, CHECK_OK);
@@ -1100,52 +185,24 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
function_name_location, CHECK_OK);
const bool allow_duplicate_parameters =
is_sloppy(language_mode) && formals.is_simple && !IsConciseMethod(kind);
- ValidateFormalParameters(&formals_classifier, language_mode,
- allow_duplicate_parameters, CHECK_OK);
+ ValidateFormalParameters(language_mode, allow_duplicate_parameters, CHECK_OK);
if (is_strict(language_mode)) {
int end_position = scanner()->location().end_pos;
CheckStrictOctalLiteral(start_position, end_position, CHECK_OK);
- CheckDecimalLiteralWithLeadingZero(use_counts_, start_position,
- end_position);
- }
-
- return Expression::Default();
-}
-
-PreParser::Expression PreParser::ParseAsyncFunctionExpression(bool* ok) {
- // AsyncFunctionDeclaration ::
- // async [no LineTerminator here] function ( FormalParameters[Await] )
- // { AsyncFunctionBody }
- //
- // async [no LineTerminator here] function BindingIdentifier[Await]
- // ( FormalParameters[Await] ) { AsyncFunctionBody }
- int pos = position();
- Expect(Token::FUNCTION, CHECK_OK);
- bool is_strict_reserved = false;
- Identifier name;
- FunctionLiteral::FunctionType type = FunctionLiteral::kAnonymousExpression;
-
- if (peek_any_identifier()) {
- type = FunctionLiteral::kNamedExpression;
- name = ParseIdentifierOrStrictReservedWord(FunctionKind::kAsyncFunction,
- &is_strict_reserved, CHECK_OK);
+ CheckDecimalLiteralWithLeadingZero(start_position, end_position);
}
- ParseFunctionLiteral(name, scanner()->location(),
- is_strict_reserved ? kFunctionNameIsStrictReserved
- : kFunctionNameValidityUnknown,
- FunctionKind::kAsyncFunction, pos, type, language_mode(),
- CHECK_OK);
return Expression::Default();
}
-void PreParser::ParseLazyFunctionLiteralBody(bool* ok,
- Scanner::BookmarkScope* bookmark) {
+PreParser::LazyParsingResult PreParser::ParseLazyFunctionLiteralBody(
+ bool may_abort, bool* ok) {
int body_start = position();
- ParseStatementList(Token::RBRACE, ok, bookmark);
- if (!*ok) return;
- if (bookmark && bookmark->HasBeenReset()) return;
+ PreParserStatementList body;
+ LazyParsingResult result = ParseStatementList(
+ body, Token::RBRACE, may_abort, CHECK_OK_VALUE(kLazyParsingComplete));
+ if (result == kLazyParsingAborted) return result;
// Position right after terminal '}'.
DCHECK_EQ(Token::RBRACE, scanner()->peek());
@@ -1156,113 +213,45 @@ void PreParser::ParseLazyFunctionLiteralBody(bool* ok,
function_state_->materialized_literal_count(),
function_state_->expected_property_count(), language_mode(),
scope->uses_super_property(), scope->calls_eval());
-}
-
-PreParserExpression PreParser::ParseClassLiteral(
- ExpressionClassifier* classifier, PreParserIdentifier name,
- Scanner::Location class_name_location, bool name_is_strict_reserved,
- int pos, bool* ok) {
- // All parts of a ClassDeclaration and ClassExpression are strict code.
- if (name_is_strict_reserved) {
- ReportMessageAt(class_name_location,
- MessageTemplate::kUnexpectedStrictReserved);
- *ok = false;
- return EmptyExpression();
- }
- if (IsEvalOrArguments(name)) {
- ReportMessageAt(class_name_location, MessageTemplate::kStrictEvalArguments);
- *ok = false;
- return EmptyExpression();
- }
-
- LanguageMode class_language_mode = language_mode();
- BlockState block_state(&scope_state_);
- scope()->SetLanguageMode(
- static_cast<LanguageMode>(class_language_mode | STRICT));
- // TODO(marja): Make PreParser use scope names too.
- // this->scope()->SetScopeName(name);
-
- bool has_extends = Check(Token::EXTENDS);
- if (has_extends) {
- ExpressionClassifier extends_classifier(this);
- ParseLeftHandSideExpression(&extends_classifier, CHECK_OK);
- CheckNoTailCallExpressions(&extends_classifier, CHECK_OK);
- ValidateExpression(&extends_classifier, CHECK_OK);
- if (classifier != nullptr) {
- classifier->Accumulate(&extends_classifier,
- ExpressionClassifier::ExpressionProductions);
- }
- }
-
- ClassLiteralChecker checker(this);
- bool has_seen_constructor = false;
-
- Expect(Token::LBRACE, CHECK_OK);
- while (peek() != Token::RBRACE) {
- if (Check(Token::SEMICOLON)) continue;
- const bool in_class = true;
- bool is_computed_name = false; // Classes do not care about computed
- // property names here.
- Identifier name;
- ExpressionClassifier property_classifier(this);
- ParsePropertyDefinition(
- &checker, in_class, has_extends, MethodKind::kNormal, &is_computed_name,
- &has_seen_constructor, &property_classifier, &name, CHECK_OK);
- ValidateExpression(&property_classifier, CHECK_OK);
- if (classifier != nullptr) {
- classifier->Accumulate(&property_classifier,
- ExpressionClassifier::ExpressionProductions);
+ return kLazyParsingComplete;
+}
+
+PreParserExpression PreParser::ExpressionFromIdentifier(
+ PreParserIdentifier name, int start_position, int end_position,
+ InferName infer) {
+ if (track_unresolved_variables_) {
+ AstNodeFactory factory(ast_value_factory());
+ // Setting the Zone is necessary because zone_ might be the temp Zone, and
+ // AstValueFactory doesn't know about it.
+ factory.set_zone(zone());
+ DCHECK_NOT_NULL(name.string_);
+ scope()->NewUnresolved(&factory, name.string_, start_position, end_position,
+ NORMAL_VARIABLE);
+ }
+ return PreParserExpression::FromIdentifier(name);
+}
+
+void PreParser::DeclareAndInitializeVariables(
+ PreParserStatement block,
+ const DeclarationDescriptor* declaration_descriptor,
+ const DeclarationParsingResult::Declaration* declaration,
+ ZoneList<const AstRawString*>* names, bool* ok) {
+ if (declaration->pattern.string_) {
+ /* Mimic what Parser does when declaring variables (see
+ Parser::PatternRewriter::VisitVariableProxy).
+
+ var + no initializer -> RemoveUnresolved
+ let + no initializer -> RemoveUnresolved
+ var + initializer -> RemoveUnresolved followed by NewUnresolved
+ let + initializer -> RemoveUnresolved
+ */
+
+ if (declaration->initializer.IsEmpty() ||
+ declaration_descriptor->mode == VariableMode::LET) {
+ declaration_descriptor->scope->RemoveUnresolved(
+ declaration->pattern.string_);
}
}
-
- Expect(Token::RBRACE, CHECK_OK);
-
- return Expression::Default();
-}
-
-
-PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) {
- // CallRuntime ::
- // '%' Identifier Arguments
- Expect(Token::MOD, CHECK_OK);
- if (!allow_natives()) {
- *ok = false;
- return Expression::Default();
- }
- // Allow "eval" or "arguments" for backward compatibility.
- ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
- Scanner::Location spread_pos;
- ExpressionClassifier classifier(this);
- ParseArguments(&spread_pos, &classifier, ok);
- ValidateExpression(&classifier, CHECK_OK);
-
- DCHECK(!spread_pos.IsValid());
-
- return Expression::Default();
-}
-
-
-PreParserExpression PreParser::ParseDoExpression(bool* ok) {
- // AssignmentExpression ::
- // do '{' StatementList '}'
- Expect(Token::DO, CHECK_OK);
- Expect(Token::LBRACE, CHECK_OK);
- while (peek() != Token::RBRACE) {
- ParseStatementListItem(CHECK_OK);
- }
- Expect(Token::RBRACE, CHECK_OK);
- return PreParserExpression::Default();
-}
-
-void PreParser::ParseAsyncArrowSingleExpressionBody(
- PreParserStatementList body, bool accept_IN,
- ExpressionClassifier* classifier, int pos, bool* ok) {
- scope()->ForceContextAllocation();
-
- PreParserExpression return_value =
- ParseAssignmentExpression(accept_IN, classifier, CHECK_OK_CUSTOM(Void));
-
- body->Add(PreParserStatement::ExpressionStatement(return_value), zone());
}
#undef CHECK_OK
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index 3f268ee14a..4b5474854c 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -6,18 +6,18 @@
#define V8_PARSING_PREPARSER_H
#include "src/ast/scopes.h"
-#include "src/bailout-reason.h"
-#include "src/base/hashmap.h"
-#include "src/messages.h"
-#include "src/parsing/expression-classifier.h"
-#include "src/parsing/func-name-inferrer.h"
#include "src/parsing/parser-base.h"
-#include "src/parsing/scanner.h"
-#include "src/parsing/token.h"
namespace v8 {
namespace internal {
+// Whereas the Parser generates AST during the recursive descent,
+// the PreParser doesn't create a tree. Instead, it passes around minimal
+// data objects (PreParserExpression, PreParserIdentifier etc.) which contain
+// just enough data for the upper layer functions. PreParserFactory is
+// responsible for creating these dummy objects. It provides a similar kind of
+// interface as AstNodeFactory, so ParserBase doesn't need to care which one is
+// used.
class PreParserIdentifier {
public:
@@ -25,6 +25,9 @@ class PreParserIdentifier {
static PreParserIdentifier Default() {
return PreParserIdentifier(kUnknownIdentifier);
}
+ static PreParserIdentifier Empty() {
+ return PreParserIdentifier(kEmptyIdentifier);
+ }
static PreParserIdentifier Eval() {
return PreParserIdentifier(kEvalIdentifier);
}
@@ -64,6 +67,7 @@ class PreParserIdentifier {
static PreParserIdentifier Async() {
return PreParserIdentifier(kAsyncIdentifier);
}
+ bool IsEmpty() const { return type_ == kEmptyIdentifier; }
bool IsEval() const { return type_ == kEvalIdentifier; }
bool IsArguments() const { return type_ == kArgumentsIdentifier; }
bool IsEvalOrArguments() const { return IsEval() || IsArguments(); }
@@ -91,6 +95,7 @@ class PreParserIdentifier {
private:
enum Type {
+ kEmptyIdentifier,
kUnknownIdentifier,
kFutureReservedIdentifier,
kFutureStrictReservedIdentifier,
@@ -107,19 +112,23 @@ class PreParserIdentifier {
kAsyncIdentifier
};
- explicit PreParserIdentifier(Type type) : type_(type) {}
+ explicit PreParserIdentifier(Type type) : type_(type), string_(nullptr) {}
Type type_;
-
+ // Only non-nullptr when PreParser.track_unresolved_variables_ is true.
+ const AstRawString* string_;
friend class PreParserExpression;
+ friend class PreParser;
};
class PreParserExpression {
public:
- PreParserExpression() : code_(TypeField::encode(kExpression)) {}
+ PreParserExpression() : code_(TypeField::encode(kEmpty)) {}
+
+ static PreParserExpression Empty() { return PreParserExpression(); }
static PreParserExpression Default() {
- return PreParserExpression();
+ return PreParserExpression(TypeField::encode(kExpression));
}
static PreParserExpression Spread(PreParserExpression expression) {
@@ -128,7 +137,8 @@ class PreParserExpression {
static PreParserExpression FromIdentifier(PreParserIdentifier id) {
return PreParserExpression(TypeField::encode(kIdentifierExpression) |
- IdentifierTypeField::encode(id.type_));
+ IdentifierTypeField::encode(id.type_),
+ id.string_);
}
static PreParserExpression BinaryOperation(PreParserExpression left,
@@ -159,6 +169,11 @@ class PreParserExpression {
IsUseStrictField::encode(true));
}
+ static PreParserExpression UseAsmStringLiteral() {
+ return PreParserExpression(TypeField::encode(kStringLiteralExpression) |
+ IsUseAsmField::encode(true));
+ }
+
static PreParserExpression This() {
return PreParserExpression(TypeField::encode(kExpression) |
ExpressionTypeField::encode(kThisExpression));
@@ -199,6 +214,8 @@ class PreParserExpression {
ExpressionTypeField::encode(kNoTemplateTagExpression));
}
+ bool IsEmpty() const { return TypeField::decode(code_) == kEmpty; }
+
bool IsIdentifier() const {
return TypeField::decode(code_) == kIdentifierExpression;
}
@@ -230,6 +247,11 @@ class PreParserExpression {
IsUseStrictField::decode(code_);
}
+ bool IsUseAsmLiteral() const {
+ return TypeField::decode(code_) == kStringLiteralExpression &&
+ IsUseAsmField::decode(code_);
+ }
+
bool IsThis() const {
return TypeField::decode(code_) == kExpression &&
ExpressionTypeField::decode(code_) == kThisExpression;
@@ -275,7 +297,7 @@ class PreParserExpression {
ExpressionTypeField::decode(code_) == kNoTemplateTagExpression;
}
- bool IsSpreadExpression() const {
+ bool IsSpread() const {
return TypeField::decode(code_) == kSpreadExpression;
}
@@ -292,12 +314,16 @@ class PreParserExpression {
// More dummy implementations of things PreParser doesn't need to track:
void set_index(int index) {} // For YieldExpressions
void set_should_eager_compile() {}
+ void set_should_be_used_once_hint() {}
int position() const { return kNoSourcePosition; }
void set_function_token_position(int position) {}
+ void set_is_class_field_initializer(bool is_class_field_initializer) {}
+
private:
enum Type {
+ kEmpty,
kExpression,
kIdentifierExpression,
kStringLiteralExpression,
@@ -318,8 +344,9 @@ class PreParserExpression {
kAssignment
};
- explicit PreParserExpression(uint32_t expression_code)
- : code_(expression_code) {}
+ explicit PreParserExpression(uint32_t expression_code,
+ const AstRawString* string = nullptr)
+ : code_(expression_code), string_(string) {}
// The first three bits are for the Type.
typedef BitField<Type, 0, 3> TypeField;
@@ -335,11 +362,16 @@ class PreParserExpression {
// of the Type field, so they can share the storage.
typedef BitField<ExpressionType, TypeField::kNext, 3> ExpressionTypeField;
typedef BitField<bool, TypeField::kNext, 1> IsUseStrictField;
+ typedef BitField<bool, IsUseStrictField::kNext, 1> IsUseAsmField;
typedef BitField<PreParserIdentifier::Type, TypeField::kNext, 10>
IdentifierTypeField;
typedef BitField<bool, TypeField::kNext, 1> HasCoverInitializedNameField;
uint32_t code_;
+ // Non-nullptr if the expression is one identifier.
+ const AstRawString* string_;
+
+ friend class PreParser;
};
@@ -353,13 +385,18 @@ class PreParserList {
PreParserList* operator->() { return this; }
void Add(T, void*) { ++length_; }
int length() const { return length_; }
+ static PreParserList Null() { return PreParserList(-1); }
+ bool IsNull() const { return length_ == -1; }
+
private:
+ explicit PreParserList(int n) : length_(n) {}
int length_;
};
-
typedef PreParserList<PreParserExpression> PreParserExpressionList;
+class PreParserStatement;
+typedef PreParserList<PreParserStatement> PreParserStatementList;
class PreParserStatement {
public:
@@ -367,12 +404,16 @@ class PreParserStatement {
return PreParserStatement(kUnknownStatement);
}
- static PreParserStatement Jump() {
- return PreParserStatement(kJumpStatement);
+ static PreParserStatement Null() {
+ return PreParserStatement(kNullStatement);
}
- static PreParserStatement FunctionDeclaration() {
- return PreParserStatement(kFunctionDeclaration);
+ static PreParserStatement Empty() {
+ return PreParserStatement(kEmptyStatement);
+ }
+
+ static PreParserStatement Jump() {
+ return PreParserStatement(kJumpStatement);
}
// Creates expression statement from expression.
@@ -383,6 +424,9 @@ class PreParserStatement {
if (expression.IsUseStrictLiteral()) {
return PreParserStatement(kUseStrictExpressionStatement);
}
+ if (expression.IsUseAsmLiteral()) {
+ return PreParserStatement(kUseAsmExpressionStatement);
+ }
if (expression.IsStringLiteral()) {
return PreParserStatement(kStringLiteralExpressionStatement);
}
@@ -390,28 +434,43 @@ class PreParserStatement {
}
bool IsStringLiteral() {
- return code_ == kStringLiteralExpressionStatement || IsUseStrictLiteral();
+ return code_ == kStringLiteralExpressionStatement || IsUseStrictLiteral() ||
+ IsUseAsmLiteral();
}
bool IsUseStrictLiteral() {
return code_ == kUseStrictExpressionStatement;
}
- bool IsFunctionDeclaration() {
- return code_ == kFunctionDeclaration;
- }
+ bool IsUseAsmLiteral() { return code_ == kUseAsmExpressionStatement; }
bool IsJumpStatement() {
return code_ == kJumpStatement;
}
+ bool IsNullStatement() { return code_ == kNullStatement; }
+
+ bool IsEmptyStatement() { return code_ == kEmptyStatement; }
+
+ // Dummy implementation for making statement->somefunc() work in both Parser
+ // and PreParser.
+ PreParserStatement* operator->() { return this; }
+
+ PreParserStatementList statements() { return PreParserStatementList(); }
+ void set_scope(Scope* scope) {}
+ void Initialize(PreParserExpression cond, PreParserStatement body) {}
+ void Initialize(PreParserStatement init, PreParserExpression cond,
+ PreParserStatement next, PreParserStatement body) {}
+
private:
enum Type {
+ kNullStatement,
+ kEmptyStatement,
kUnknownStatement,
kJumpStatement,
kStringLiteralExpressionStatement,
kUseStrictExpressionStatement,
- kFunctionDeclaration
+ kUseAsmExpressionStatement,
};
explicit PreParserStatement(Type code) : code_(code) {}
@@ -419,9 +478,6 @@ class PreParserStatement {
};
-typedef PreParserList<PreParserStatement> PreParserStatementList;
-
-
class PreParserFactory {
public:
explicit PreParserFactory(void* unused_value_factory) {}
@@ -433,31 +489,34 @@ class PreParserFactory {
int pos) {
return PreParserExpression::Default();
}
+ PreParserExpression NewUndefinedLiteral(int pos) {
+ return PreParserExpression::Default();
+ }
PreParserExpression NewRegExpLiteral(PreParserIdentifier js_pattern,
int js_flags, int literal_index,
int pos) {
return PreParserExpression::Default();
}
PreParserExpression NewArrayLiteral(PreParserExpressionList values,
- int literal_index,
- int pos) {
- return PreParserExpression::ArrayLiteral();
- }
- PreParserExpression NewArrayLiteral(PreParserExpressionList values,
int first_spread_index, int literal_index,
int pos) {
return PreParserExpression::ArrayLiteral();
}
+ PreParserExpression NewClassLiteralProperty(PreParserExpression key,
+ PreParserExpression value,
+ ClassLiteralProperty::Kind kind,
+ bool is_static,
+ bool is_computed_name) {
+ return PreParserExpression::Default();
+ }
PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
PreParserExpression value,
ObjectLiteralProperty::Kind kind,
- bool is_static,
bool is_computed_name) {
return PreParserExpression::Default();
}
PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
PreParserExpression value,
- bool is_static,
bool is_computed_name) {
return PreParserExpression::Default();
}
@@ -533,15 +592,9 @@ class PreParserFactory {
int pos) {
return PreParserExpression::Default();
}
- PreParserExpression NewCallRuntime(const AstRawString* name,
- const Runtime::Function* function,
- PreParserExpressionList arguments,
- int pos) {
- return PreParserExpression::Default();
- }
PreParserStatement NewReturnStatement(PreParserExpression expression,
int pos) {
- return PreParserStatement::Default();
+ return PreParserStatement::Jump();
}
PreParserExpression NewFunctionLiteral(
PreParserIdentifier name, Scope* scope, PreParserStatementList body,
@@ -549,8 +602,7 @@ class PreParserFactory {
int parameter_count,
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::FunctionType function_type,
- FunctionLiteral::EagerCompileHint eager_compile_hint, FunctionKind kind,
- int position) {
+ FunctionLiteral::EagerCompileHint eager_compile_hint, int position) {
return PreParserExpression::Default();
}
@@ -563,6 +615,77 @@ class PreParserFactory {
return PreParserExpression::Default();
}
+ PreParserStatement NewEmptyStatement(int pos) {
+ return PreParserStatement::Default();
+ }
+
+ PreParserStatement NewBlock(ZoneList<const AstRawString*>* labels,
+ int capacity, bool ignore_completion_value,
+ int pos) {
+ return PreParserStatement::Default();
+ }
+
+ PreParserStatement NewDebuggerStatement(int pos) {
+ return PreParserStatement::Default();
+ }
+
+ PreParserStatement NewExpressionStatement(PreParserExpression expr, int pos) {
+ return PreParserStatement::ExpressionStatement(expr);
+ }
+
+ PreParserStatement NewIfStatement(PreParserExpression condition,
+ PreParserStatement then_statement,
+ PreParserStatement else_statement,
+ int pos) {
+ // This must return a jump statement iff both clauses are jump statements.
+ return else_statement.IsJumpStatement() ? then_statement : else_statement;
+ }
+
+ PreParserStatement NewBreakStatement(PreParserStatement target, int pos) {
+ return PreParserStatement::Jump();
+ }
+
+ PreParserStatement NewContinueStatement(PreParserStatement target, int pos) {
+ return PreParserStatement::Jump();
+ }
+
+ PreParserStatement NewWithStatement(Scope* scope,
+ PreParserExpression expression,
+ PreParserStatement statement, int pos) {
+ return PreParserStatement::Default();
+ }
+
+ PreParserStatement NewDoWhileStatement(ZoneList<const AstRawString*>* labels,
+ int pos) {
+ return PreParserStatement::Default();
+ }
+
+ PreParserStatement NewWhileStatement(ZoneList<const AstRawString*>* labels,
+ int pos) {
+ return PreParserStatement::Default();
+ }
+
+ PreParserStatement NewSwitchStatement(ZoneList<const AstRawString*>* labels,
+ int pos) {
+ return PreParserStatement::Default();
+ }
+
+ PreParserStatement NewCaseClause(PreParserExpression label,
+ PreParserStatementList statements, int pos) {
+ return PreParserStatement::Default();
+ }
+
+ PreParserStatement NewForStatement(ZoneList<const AstRawString*>* labels,
+ int pos) {
+ return PreParserStatement::Default();
+ }
+
+ PreParserStatement NewForEachStatement(ForEachStatement::VisitMode visit_mode,
+ ZoneList<const AstRawString*>* labels,
+ int pos) {
+ return PreParserStatement::Default();
+ }
+
// Return the object itself as AstVisitor and implement the needed
// dummy method right in this class.
PreParserFactory* visitor() { return this; }
@@ -585,567 +708,736 @@ struct PreParserFormalParameters : FormalParametersBase {
class PreParser;
+class PreParserTarget {
+ public:
+ PreParserTarget(ParserBase<PreParser>* preparser,
+ PreParserStatement statement) {}
+};
+
+class PreParserTargetScope {
+ public:
+ explicit PreParserTargetScope(ParserBase<PreParser>* preparser) {}
+};
+
template <>
-class ParserBaseTraits<PreParser> {
+struct ParserTypes<PreParser> {
+ typedef ParserBase<PreParser> Base;
+ typedef PreParser Impl;
+
+ // PreParser doesn't need to store generator variables.
+ typedef void Variable;
+
+ // Return types for traversing functions.
+ typedef PreParserIdentifier Identifier;
+ typedef PreParserExpression Expression;
+ typedef PreParserExpression FunctionLiteral;
+ typedef PreParserExpression ObjectLiteralProperty;
+ typedef PreParserExpression ClassLiteralProperty;
+ typedef PreParserExpressionList ExpressionList;
+ typedef PreParserExpressionList ObjectPropertyList;
+ typedef PreParserExpressionList ClassPropertyList;
+ typedef PreParserFormalParameters FormalParameters;
+ typedef PreParserStatement Statement;
+ typedef PreParserStatementList StatementList;
+ typedef PreParserStatement Block;
+ typedef PreParserStatement BreakableStatement;
+ typedef PreParserStatement IterationStatement;
+
+ // For constructing objects returned by the traversing functions.
+ typedef PreParserFactory Factory;
+
+ typedef PreParserTarget Target;
+ typedef PreParserTargetScope TargetScope;
+};
+
+
+// Preparsing checks a JavaScript program and emits preparse-data that helps
+// a later parsing to be faster.
+// See preparse-data-format.h for the data format.
+
+// The PreParser checks that the syntax follows the grammar for JavaScript,
+// and collects some information about the program along the way.
+// The grammar check is only performed in order to understand the program
+// sufficiently to deduce some information about it, that can be used
+// to speed up later parsing. Finding errors is not the goal of pre-parsing,
+// rather it is to speed up properly written and correct programs.
+// That means that contextual checks (like a label being declared where
+// it is used) are generally omitted.
+class PreParser : public ParserBase<PreParser> {
+ friend class ParserBase<PreParser>;
+ friend class v8::internal::ExpressionClassifier<ParserTypes<PreParser>>;
+
public:
- typedef ParserBaseTraits<PreParser> PreParserTraits;
-
- struct Type {
- // PreParser doesn't need to store generator variables.
- typedef void GeneratorVariable;
-
- typedef int AstProperties;
-
- typedef v8::internal::ExpressionClassifier<PreParserTraits>
- ExpressionClassifier;
-
- // Return types for traversing functions.
- typedef PreParserIdentifier Identifier;
- typedef PreParserExpression Expression;
- typedef PreParserExpression YieldExpression;
- typedef PreParserExpression FunctionLiteral;
- typedef PreParserExpression ClassLiteral;
- typedef PreParserExpression Literal;
- typedef PreParserExpression ObjectLiteralProperty;
- typedef PreParserExpressionList ExpressionList;
- typedef PreParserExpressionList PropertyList;
- typedef PreParserIdentifier FormalParameter;
- typedef PreParserFormalParameters FormalParameters;
- typedef PreParserStatementList StatementList;
-
- // For constructing objects returned by the traversing functions.
- typedef PreParserFactory Factory;
+ typedef PreParserIdentifier Identifier;
+ typedef PreParserExpression Expression;
+ typedef PreParserStatement Statement;
+
+ enum PreParseResult {
+ kPreParseStackOverflow,
+ kPreParseAbort,
+ kPreParseSuccess
};
- // TODO(nikolaos): The traits methods should not need to call methods
- // of the implementation object.
- PreParser* delegate() { return reinterpret_cast<PreParser*>(this); }
- const PreParser* delegate() const {
- return reinterpret_cast<const PreParser*>(this);
+ PreParser(Zone* zone, Scanner* scanner, AstValueFactory* ast_value_factory,
+ ParserRecorder* log, uintptr_t stack_limit)
+ : ParserBase<PreParser>(zone, scanner, stack_limit, NULL,
+ ast_value_factory, log),
+ use_counts_(nullptr),
+ track_unresolved_variables_(false) {}
+
+ // Pre-parse the program from the character stream; returns true on
+ // success (even if parsing failed, the pre-parse data successfully
+ // captured the syntax error), and false if a stack-overflow happened
+ // during parsing.
+ PreParseResult PreParseProgram(int* materialized_literals = 0,
+ bool is_module = false) {
+ DCHECK_NULL(scope_state_);
+ DeclarationScope* scope = NewScriptScope();
+
+ // ModuleDeclarationInstantiation for Source Text Module Records creates a
+ // new Module Environment Record whose outer lexical environment record is
+ // the global scope.
+ if (is_module) scope = NewModuleScope(scope);
+
+ FunctionState top_scope(&function_state_, &scope_state_, scope);
+ bool ok = true;
+ int start_position = scanner()->peek_location().beg_pos;
+ parsing_module_ = is_module;
+ PreParserStatementList body;
+ ParseStatementList(body, Token::EOS, &ok);
+ if (stack_overflow()) return kPreParseStackOverflow;
+ if (!ok) {
+ ReportUnexpectedToken(scanner()->current_token());
+ } else if (is_strict(this->scope()->language_mode())) {
+ CheckStrictOctalLiteral(start_position, scanner()->location().end_pos,
+ &ok);
+ CheckDecimalLiteralWithLeadingZero(start_position,
+ scanner()->location().end_pos);
+ }
+ if (materialized_literals) {
+ *materialized_literals = function_state_->materialized_literal_count();
+ }
+ return kPreParseSuccess;
}
+ // Parses a single function literal, from the opening parentheses before
+ // parameters to the closing brace after the body.
+ // Returns a FunctionEntry describing the body of the function in enough
+ // detail that it can be lazily compiled.
+ // The scanner is expected to have matched the "function" or "function*"
+ // keyword and parameters, and have consumed the initial '{'.
+ // At return, unless an error occurred, the scanner is positioned before the
+ // the final '}'.
+ PreParseResult PreParseLazyFunction(DeclarationScope* function_scope,
+ bool parsing_module, ParserRecorder* log,
+ bool track_unresolved_variables,
+ bool may_abort, int* use_counts);
+
+ private:
+ // These types form an algebra over syntactic categories that is just
+ // rich enough to let us recognize and propagate the constructs that
+ // are either being counted in the preparser data, or is important
+ // to throw the correct syntax error exceptions.
+
+ // All ParseXXX functions take as the last argument an *ok parameter
+ // which is set to false if parsing failed; it is unchanged otherwise.
+ // By making the 'exception handling' explicit, we are forced to check
+ // for failure at the call sites.
+
+ V8_INLINE PreParserStatementList ParseEagerFunctionBody(
+ PreParserIdentifier function_name, int pos,
+ const PreParserFormalParameters& parameters, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type, bool* ok);
+
+ V8_INLINE LazyParsingResult SkipLazyFunctionBody(
+ int* materialized_literal_count, int* expected_property_count,
+ bool track_unresolved_variables, bool may_abort, bool* ok) {
+ UNREACHABLE();
+ return kLazyParsingComplete;
+ }
+ Expression ParseFunctionLiteral(
+ Identifier name, Scanner::Location function_name_location,
+ FunctionNameValidity function_name_validity, FunctionKind kind,
+ int function_token_pos, FunctionLiteral::FunctionType function_type,
+ LanguageMode language_mode, bool* ok);
+ LazyParsingResult ParseLazyFunctionLiteralBody(bool may_abort, bool* ok);
+
+ struct TemplateLiteralState {};
+
+ V8_INLINE TemplateLiteralState OpenTemplateLiteral(int pos) {
+ return TemplateLiteralState();
+ }
+ V8_INLINE void AddTemplateExpression(TemplateLiteralState* state,
+ PreParserExpression expression) {}
+ V8_INLINE void AddTemplateSpan(TemplateLiteralState* state, bool tail) {}
+ V8_INLINE PreParserExpression CloseTemplateLiteral(
+ TemplateLiteralState* state, int start, PreParserExpression tag);
+ V8_INLINE void CheckConflictingVarDeclarations(Scope* scope, bool* ok) {}
+
+ V8_INLINE void SetLanguageMode(Scope* scope, LanguageMode mode) {
+ scope->SetLanguageMode(mode);
+ }
+ V8_INLINE void SetAsmModule() {}
+
+ V8_INLINE void MarkCollectedTailCallExpressions() {}
+ V8_INLINE void MarkTailPosition(PreParserExpression expression) {}
+
+ V8_INLINE PreParserExpressionList
+ PrepareSpreadArguments(PreParserExpressionList list) {
+ return list;
+ }
+
+ V8_INLINE PreParserExpression SpreadCall(PreParserExpression function,
+ PreParserExpressionList args,
+ int pos);
+ V8_INLINE PreParserExpression SpreadCallNew(PreParserExpression function,
+ PreParserExpressionList args,
+ int pos);
+
+ V8_INLINE PreParserExpression
+ RewriteSuperCall(PreParserExpression call_expression) {
+ return call_expression;
+ }
+
+ V8_INLINE void RewriteDestructuringAssignments() {}
+
+ V8_INLINE PreParserExpression RewriteExponentiation(PreParserExpression left,
+ PreParserExpression right,
+ int pos) {
+ return left;
+ }
+ V8_INLINE PreParserExpression RewriteAssignExponentiation(
+ PreParserExpression left, PreParserExpression right, int pos) {
+ return left;
+ }
+
+ V8_INLINE PreParserExpression
+ RewriteAwaitExpression(PreParserExpression value, int pos) {
+ return value;
+ }
+ V8_INLINE void PrepareAsyncFunctionBody(PreParserStatementList body,
+ FunctionKind kind, int pos) {}
+ V8_INLINE void RewriteAsyncFunctionBody(PreParserStatementList body,
+ PreParserStatement block,
+ PreParserExpression return_value,
+ bool* ok) {}
+ V8_INLINE PreParserExpression RewriteYieldStar(PreParserExpression generator,
+ PreParserExpression expression,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ V8_INLINE void RewriteNonPattern(bool* ok) { ValidateExpression(ok); }
+
+ void DeclareAndInitializeVariables(
+ PreParserStatement block,
+ const DeclarationDescriptor* declaration_descriptor,
+ const DeclarationParsingResult::Declaration* declaration,
+ ZoneList<const AstRawString*>* names, bool* ok);
+
+ V8_INLINE ZoneList<const AstRawString*>* DeclareLabel(
+ ZoneList<const AstRawString*>* labels, PreParserExpression expr,
+ bool* ok) {
+ DCHECK(!expr.AsIdentifier().IsEnum());
+ DCHECK(!parsing_module_ || !expr.AsIdentifier().IsAwait());
+ DCHECK(is_sloppy(language_mode()) ||
+ !IsFutureStrictReserved(expr.AsIdentifier()));
+ return labels;
+ }
+
+ // TODO(nikolaos): The preparser currently does not keep track of labels.
+ V8_INLINE bool ContainsLabel(ZoneList<const AstRawString*>* labels,
+ PreParserIdentifier label) {
+ return false;
+ }
+
+ V8_INLINE PreParserExpression RewriteReturn(PreParserExpression return_value,
+ int pos) {
+ return return_value;
+ }
+ V8_INLINE PreParserStatement RewriteSwitchStatement(
+ PreParserExpression tag, PreParserStatement switch_statement,
+ PreParserStatementList cases, Scope* scope) {
+ return PreParserStatement::Default();
+ }
+ V8_INLINE void RewriteCatchPattern(CatchInfo* catch_info, bool* ok) {}
+ V8_INLINE void ValidateCatchBlock(const CatchInfo& catch_info, bool* ok) {}
+ V8_INLINE PreParserStatement RewriteTryStatement(
+ PreParserStatement try_block, PreParserStatement catch_block,
+ PreParserStatement finally_block, const CatchInfo& catch_info, int pos) {
+ return PreParserStatement::Default();
+ }
+
+ V8_INLINE PreParserExpression RewriteDoExpression(PreParserStatement body,
+ int pos, bool* ok) {
+ return PreParserExpression::Default();
+ }
+
+ // TODO(nikolaos): The preparser currently does not keep track of labels
+ // and targets.
+ V8_INLINE PreParserStatement LookupBreakTarget(PreParserIdentifier label,
+ bool* ok) {
+ return PreParserStatement::Default();
+ }
+ V8_INLINE PreParserStatement LookupContinueTarget(PreParserIdentifier label,
+ bool* ok) {
+ return PreParserStatement::Default();
+ }
+
+ V8_INLINE PreParserStatement DeclareFunction(
+ PreParserIdentifier variable_name, PreParserExpression function, int pos,
+ bool is_generator, bool is_async, ZoneList<const AstRawString*>* names,
+ bool* ok) {
+ return Statement::Default();
+ }
+
+ V8_INLINE PreParserStatement
+ DeclareClass(PreParserIdentifier variable_name, PreParserExpression value,
+ ZoneList<const AstRawString*>* names, int class_token_pos,
+ int end_pos, bool* ok) {
+ return PreParserStatement::Default();
+ }
+ V8_INLINE void DeclareClassVariable(PreParserIdentifier name,
+ Scope* block_scope, ClassInfo* class_info,
+ int class_token_pos, bool* ok) {}
+ V8_INLINE void DeclareClassProperty(PreParserIdentifier class_name,
+ PreParserExpression property,
+ ClassInfo* class_info, bool* ok) {}
+ V8_INLINE PreParserExpression RewriteClassLiteral(PreParserIdentifier name,
+ ClassInfo* class_info,
+ int pos, bool* ok) {
+ return PreParserExpression::Default();
+ }
+
+ V8_INLINE PreParserStatement DeclareNative(PreParserIdentifier name, int pos,
+ bool* ok) {
+ return PreParserStatement::Default();
+ }
+
+ V8_INLINE void QueueDestructuringAssignmentForRewriting(
+ PreParserExpression assignment) {}
+ V8_INLINE void QueueNonPatternForRewriting(PreParserExpression expr,
+ bool* ok) {}
+
// Helper functions for recursive descent.
- bool IsEval(PreParserIdentifier identifier) const {
+ V8_INLINE bool IsEval(PreParserIdentifier identifier) const {
return identifier.IsEval();
}
- bool IsArguments(PreParserIdentifier identifier) const {
+ V8_INLINE bool IsArguments(PreParserIdentifier identifier) const {
return identifier.IsArguments();
}
- bool IsEvalOrArguments(PreParserIdentifier identifier) const {
+ V8_INLINE bool IsEvalOrArguments(PreParserIdentifier identifier) const {
return identifier.IsEvalOrArguments();
}
- bool IsUndefined(PreParserIdentifier identifier) const {
+ V8_INLINE bool IsUndefined(PreParserIdentifier identifier) const {
return identifier.IsUndefined();
}
- bool IsAwait(PreParserIdentifier identifier) const {
+ V8_INLINE bool IsAwait(PreParserIdentifier identifier) const {
return identifier.IsAwait();
}
- bool IsFutureStrictReserved(PreParserIdentifier identifier) const {
+ V8_INLINE bool IsFutureStrictReserved(PreParserIdentifier identifier) const {
return identifier.IsFutureStrictReserved();
}
// Returns true if the expression is of type "this.foo".
- static bool IsThisProperty(PreParserExpression expression) {
+ V8_INLINE static bool IsThisProperty(PreParserExpression expression) {
return expression.IsThisProperty();
}
- static bool IsIdentifier(PreParserExpression expression) {
+ V8_INLINE static bool IsIdentifier(PreParserExpression expression) {
return expression.IsIdentifier();
}
- static PreParserIdentifier AsIdentifier(PreParserExpression expression) {
+ V8_INLINE static PreParserIdentifier AsIdentifier(
+ PreParserExpression expression) {
return expression.AsIdentifier();
}
- bool IsPrototype(PreParserIdentifier identifier) const {
+ V8_INLINE static PreParserExpression AsIdentifierExpression(
+ PreParserExpression expression) {
+ return expression;
+ }
+
+ V8_INLINE bool IsPrototype(PreParserIdentifier identifier) const {
return identifier.IsPrototype();
}
- bool IsConstructor(PreParserIdentifier identifier) const {
+ V8_INLINE bool IsConstructor(PreParserIdentifier identifier) const {
return identifier.IsConstructor();
}
- bool IsDirectEvalCall(PreParserExpression expression) const {
+ V8_INLINE bool IsDirectEvalCall(PreParserExpression expression) const {
return expression.IsDirectEvalCall();
}
- static bool IsBoilerplateProperty(PreParserExpression property) {
+ V8_INLINE static bool IsBoilerplateProperty(PreParserExpression property) {
// PreParser doesn't count boilerplate properties.
return false;
}
- static bool IsArrayIndex(PreParserIdentifier string, uint32_t* index) {
+ V8_INLINE bool IsNative(PreParserExpression expr) const {
+ // Preparsing is disabled for extensions (because the extension
+ // details aren't passed to lazily compiled functions), so we
+ // don't accept "native function" in the preparser and there is
+ // no need to keep track of "native".
return false;
}
- static PreParserExpression GetPropertyValue(PreParserExpression property) {
- return PreParserExpression::Default();
+ V8_INLINE static bool IsArrayIndex(PreParserIdentifier string,
+ uint32_t* index) {
+ return false;
}
- // Functions for encapsulating the differences between parsing and preparsing;
- // operations interleaved with the recursive descent.
- static void PushLiteralName(FuncNameInferrer* fni, PreParserIdentifier id) {
- // PreParser should not use FuncNameInferrer.
- UNREACHABLE();
+ V8_INLINE bool IsUseStrictDirective(PreParserStatement statement) const {
+ return statement.IsUseStrictLiteral();
}
- void PushPropertyName(FuncNameInferrer* fni, PreParserExpression expression) {
- // PreParser should not use FuncNameInferrer.
- UNREACHABLE();
+ V8_INLINE bool IsUseAsmDirective(PreParserStatement statement) const {
+ return statement.IsUseAsmLiteral();
}
- static void InferFunctionName(FuncNameInferrer* fni,
- PreParserExpression expression) {
- // PreParser should not use FuncNameInferrer.
- UNREACHABLE();
+ V8_INLINE bool IsStringLiteral(PreParserStatement statement) const {
+ return statement.IsStringLiteral();
+ }
+
+ V8_INLINE static PreParserExpression GetPropertyValue(
+ PreParserExpression property) {
+ return PreParserExpression::Default();
}
- static void CheckAssigningFunctionLiteralToProperty(
+ V8_INLINE static void GetDefaultStrings(
+ PreParserIdentifier* default_string,
+ PreParserIdentifier* star_default_star_string) {}
+
+ // Functions for encapsulating the differences between parsing and preparsing;
+ // operations interleaved with the recursive descent.
+ V8_INLINE static void PushLiteralName(PreParserIdentifier id) {}
+ V8_INLINE static void PushVariableName(PreParserIdentifier id) {}
+ V8_INLINE void PushPropertyName(PreParserExpression expression) {}
+ V8_INLINE void PushEnclosingName(PreParserIdentifier name) {}
+ V8_INLINE static void AddFunctionForNameInference(
+ PreParserExpression expression) {}
+ V8_INLINE static void InferFunctionName() {}
+
+ V8_INLINE static void CheckAssigningFunctionLiteralToProperty(
PreParserExpression left, PreParserExpression right) {}
- static PreParserExpression MarkExpressionAsAssigned(
+ V8_INLINE static PreParserExpression MarkExpressionAsAssigned(
PreParserExpression expression) {
// TODO(marja): To be able to produce the same errors, the preparser needs
// to start tracking which expressions are variables and which are assigned.
return expression;
}
- bool ShortcutNumericLiteralBinaryExpression(PreParserExpression* x,
- PreParserExpression y,
- Token::Value op, int pos,
- PreParserFactory* factory) {
+ V8_INLINE bool ShortcutNumericLiteralBinaryExpression(PreParserExpression* x,
+ PreParserExpression y,
+ Token::Value op,
+ int pos) {
return false;
}
- PreParserExpression BuildUnaryExpression(PreParserExpression expression,
- Token::Value op, int pos,
- PreParserFactory* factory) {
+ V8_INLINE PreParserExpression BuildUnaryExpression(
+ PreParserExpression expression, Token::Value op, int pos) {
return PreParserExpression::Default();
}
- PreParserExpression BuildIteratorResult(PreParserExpression value,
- bool done) {
+ V8_INLINE PreParserExpression BuildIteratorResult(PreParserExpression value,
+ bool done) {
return PreParserExpression::Default();
}
- PreParserExpression NewThrowReferenceError(MessageTemplate::Template message,
- int pos) {
+ V8_INLINE PreParserStatement
+ BuildInitializationBlock(DeclarationParsingResult* parsing_result,
+ ZoneList<const AstRawString*>* names, bool* ok) {
+ return PreParserStatement::Default();
+ }
+
+ V8_INLINE PreParserStatement
+ InitializeForEachStatement(PreParserStatement stmt, PreParserExpression each,
+ PreParserExpression subject,
+ PreParserStatement body, int each_keyword_pos) {
+ return stmt;
+ }
+
+ V8_INLINE PreParserStatement RewriteForVarInLegacy(const ForInfo& for_info) {
+ return PreParserStatement::Null();
+ }
+ V8_INLINE void DesugarBindingInForEachStatement(
+ ForInfo* for_info, PreParserStatement* body_block,
+ PreParserExpression* each_variable, bool* ok) {}
+ V8_INLINE PreParserStatement CreateForEachStatementTDZ(
+ PreParserStatement init_block, const ForInfo& for_info, bool* ok) {
+ return init_block;
+ }
+
+ V8_INLINE StatementT DesugarLexicalBindingsInForStatement(
+ PreParserStatement loop, PreParserStatement init,
+ PreParserExpression cond, PreParserStatement next,
+ PreParserStatement body, Scope* inner_scope, const ForInfo& for_info,
+ bool* ok) {
+ return loop;
+ }
+
+ V8_INLINE PreParserExpression
+ NewThrowReferenceError(MessageTemplate::Template message, int pos) {
return PreParserExpression::Default();
}
- PreParserExpression NewThrowSyntaxError(MessageTemplate::Template message,
- PreParserIdentifier arg, int pos) {
+ V8_INLINE PreParserExpression NewThrowSyntaxError(
+ MessageTemplate::Template message, PreParserIdentifier arg, int pos) {
return PreParserExpression::Default();
}
- PreParserExpression NewThrowTypeError(MessageTemplate::Template message,
- PreParserIdentifier arg, int pos) {
+ V8_INLINE PreParserExpression NewThrowTypeError(
+ MessageTemplate::Template message, PreParserIdentifier arg, int pos) {
return PreParserExpression::Default();
}
// Reporting errors.
- void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate::Template message,
- const char* arg = NULL,
- ParseErrorType error_type = kSyntaxError);
- void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate::Template message,
- const AstRawString* arg,
- ParseErrorType error_type = kSyntaxError);
-
- // A dummy function, just useful as an argument to CHECK_OK_CUSTOM.
- static void Void() {}
+ V8_INLINE void ReportMessageAt(Scanner::Location source_location,
+ MessageTemplate::Template message,
+ const char* arg = NULL,
+ ParseErrorType error_type = kSyntaxError) {
+ log_->LogMessage(source_location.beg_pos, source_location.end_pos, message,
+ arg, error_type);
+ }
+
+ V8_INLINE void ReportMessageAt(Scanner::Location source_location,
+ MessageTemplate::Template message,
+ PreParserIdentifier arg,
+ ParseErrorType error_type = kSyntaxError) {
+ UNREACHABLE();
+ }
// "null" return type creators.
- static PreParserIdentifier EmptyIdentifier() {
- return PreParserIdentifier::Default();
+ V8_INLINE static PreParserIdentifier EmptyIdentifier() {
+ return PreParserIdentifier::Empty();
+ }
+ V8_INLINE static bool IsEmptyIdentifier(PreParserIdentifier name) {
+ return name.IsEmpty();
+ }
+ V8_INLINE static PreParserExpression EmptyExpression() {
+ return PreParserExpression::Empty();
}
- static PreParserExpression EmptyExpression() {
+ V8_INLINE static PreParserExpression EmptyLiteral() {
return PreParserExpression::Default();
}
- static PreParserExpression EmptyLiteral() {
+ V8_INLINE static PreParserExpression EmptyObjectLiteralProperty() {
return PreParserExpression::Default();
}
- static PreParserExpression EmptyObjectLiteralProperty() {
+ V8_INLINE static PreParserExpression EmptyClassLiteralProperty() {
return PreParserExpression::Default();
}
- static PreParserExpression EmptyFunctionLiteral() {
+ V8_INLINE static PreParserExpression EmptyFunctionLiteral() {
return PreParserExpression::Default();
}
- static PreParserExpressionList NullExpressionList() {
- return PreParserExpressionList();
+ V8_INLINE static bool IsEmptyExpression(PreParserExpression expr) {
+ return expr.IsEmpty();
+ }
+
+ V8_INLINE static PreParserExpressionList NullExpressionList() {
+ return PreParserExpressionList::Null();
+ }
+
+ V8_INLINE static bool IsNullExpressionList(PreParserExpressionList exprs) {
+ return exprs.IsNull();
+ }
+
+ V8_INLINE static PreParserStatementList NullStatementList() {
+ return PreParserStatementList::Null();
}
- PreParserIdentifier EmptyIdentifierString() const {
+
+ V8_INLINE static bool IsNullStatementList(PreParserStatementList stmts) {
+ return stmts.IsNull();
+ }
+
+ V8_INLINE static PreParserStatement NullStatement() {
+ return PreParserStatement::Null();
+ }
+
+ V8_INLINE bool IsNullStatement(PreParserStatement stmt) {
+ return stmt.IsNullStatement();
+ }
+
+ V8_INLINE bool IsEmptyStatement(PreParserStatement stmt) {
+ return stmt.IsEmptyStatement();
+ }
+
+ V8_INLINE static PreParserStatement NullBlock() {
+ return PreParserStatement::Null();
+ }
+
+ V8_INLINE PreParserIdentifier EmptyIdentifierString() const {
return PreParserIdentifier::Default();
}
// Odd-ball literal creators.
- PreParserExpression GetLiteralTheHole(int position,
- PreParserFactory* factory) const {
+ V8_INLINE PreParserExpression GetLiteralTheHole(int position) {
+ return PreParserExpression::Default();
+ }
+
+ V8_INLINE PreParserExpression GetLiteralUndefined(int position) {
return PreParserExpression::Default();
}
// Producing data during the recursive descent.
- PreParserIdentifier GetSymbol(Scanner* scanner) const;
+ PreParserIdentifier GetSymbol() const;
- PreParserIdentifier GetNextSymbol(Scanner* scanner) const {
+ V8_INLINE PreParserIdentifier GetNextSymbol() const {
return PreParserIdentifier::Default();
}
- PreParserIdentifier GetNumberAsSymbol(Scanner* scanner) const {
+ V8_INLINE PreParserIdentifier GetNumberAsSymbol() const {
return PreParserIdentifier::Default();
}
- PreParserExpression ThisExpression(int pos = kNoSourcePosition) {
+ V8_INLINE PreParserExpression ThisExpression(int pos = kNoSourcePosition) {
return PreParserExpression::This();
}
- PreParserExpression NewSuperPropertyReference(PreParserFactory* factory,
- int pos) {
+ V8_INLINE PreParserExpression NewSuperPropertyReference(int pos) {
return PreParserExpression::Default();
}
- PreParserExpression NewSuperCallReference(PreParserFactory* factory,
- int pos) {
+ V8_INLINE PreParserExpression NewSuperCallReference(int pos) {
return PreParserExpression::SuperCallReference();
}
- PreParserExpression NewTargetExpression(int pos) {
+ V8_INLINE PreParserExpression NewTargetExpression(int pos) {
return PreParserExpression::Default();
}
- PreParserExpression FunctionSentExpression(PreParserFactory* factory,
- int pos) const {
+ V8_INLINE PreParserExpression FunctionSentExpression(int pos) {
return PreParserExpression::Default();
}
- PreParserExpression ExpressionFromLiteral(Token::Value token, int pos,
- Scanner* scanner,
- PreParserFactory* factory) const {
+ V8_INLINE PreParserExpression ExpressionFromLiteral(Token::Value token,
+ int pos) {
return PreParserExpression::Default();
}
- PreParserExpression ExpressionFromIdentifier(PreParserIdentifier name,
- int start_position,
- int end_position,
- InferName = InferName::kYes) {
- return PreParserExpression::FromIdentifier(name);
- }
+ PreParserExpression ExpressionFromIdentifier(
+ PreParserIdentifier name, int start_position, int end_position,
+ InferName infer = InferName::kYes);
- PreParserExpression ExpressionFromString(int pos, Scanner* scanner,
- PreParserFactory* factory) const;
+ V8_INLINE PreParserExpression ExpressionFromString(int pos) {
+ if (scanner()->UnescapedLiteralMatches("use strict", 10)) {
+ return PreParserExpression::UseStrictStringLiteral();
+ }
+ return PreParserExpression::StringLiteral();
+ }
- PreParserExpression GetIterator(PreParserExpression iterable,
- PreParserFactory* factory, int pos) {
- return PreParserExpression::Default();
+ V8_INLINE PreParserExpressionList NewExpressionList(int size) const {
+ return PreParserExpressionList();
}
- PreParserExpressionList NewExpressionList(int size, Zone* zone) const {
+ V8_INLINE PreParserExpressionList NewObjectPropertyList(int size) const {
return PreParserExpressionList();
}
- PreParserExpressionList NewPropertyList(int size, Zone* zone) const {
+ V8_INLINE PreParserExpressionList NewClassPropertyList(int size) const {
return PreParserExpressionList();
}
- PreParserStatementList NewStatementList(int size, Zone* zone) const {
+ V8_INLINE PreParserStatementList NewStatementList(int size) const {
+ return PreParserStatementList();
+ }
+
+ PreParserStatementList NewCaseClauseList(int size) {
return PreParserStatementList();
}
- void AddParameterInitializationBlock(
+ V8_INLINE PreParserExpression
+ NewV8Intrinsic(PreParserIdentifier name, PreParserExpressionList arguments,
+ int pos, bool* ok) {
+ return PreParserExpression::Default();
+ }
+
+ V8_INLINE PreParserStatement NewThrowStatement(PreParserExpression exception,
+ int pos) {
+ return PreParserStatement::Jump();
+ }
+
+ V8_INLINE void AddParameterInitializationBlock(
const PreParserFormalParameters& parameters, PreParserStatementList body,
bool is_async, bool* ok) {}
- void AddFormalParameter(PreParserFormalParameters* parameters,
- PreParserExpression pattern,
- PreParserExpression initializer,
- int initializer_end_position, bool is_rest) {
+ V8_INLINE void AddFormalParameter(PreParserFormalParameters* parameters,
+ PreParserExpression pattern,
+ PreParserExpression initializer,
+ int initializer_end_position,
+ bool is_rest) {
++parameters->arity;
}
- void DeclareFormalParameter(DeclarationScope* scope,
- PreParserIdentifier parameter,
- Type::ExpressionClassifier* classifier) {
- if (!classifier->is_simple_parameter_list()) {
+ V8_INLINE void DeclareFormalParameter(DeclarationScope* scope,
+ PreParserIdentifier parameter) {
+ if (!classifier()->is_simple_parameter_list()) {
scope->SetHasNonSimpleParameters();
}
}
- V8_INLINE void ParseArrowFunctionFormalParameterList(
+ V8_INLINE void DeclareArrowFunctionFormalParameters(
PreParserFormalParameters* parameters, PreParserExpression params,
const Scanner::Location& params_loc, Scanner::Location* duplicate_loc,
- const Scope::Snapshot& scope_snapshot, bool* ok);
+ bool* ok) {
+ // TODO(wingo): Detect duplicated identifiers in paramlists. Detect
+ // parameter lists that are too long.
+ }
- void ReindexLiterals(const PreParserFormalParameters& parameters) {}
+ V8_INLINE void ReindexLiterals(const PreParserFormalParameters& parameters) {}
V8_INLINE PreParserExpression NoTemplateTag() {
return PreParserExpression::NoTemplateTag();
}
+
V8_INLINE static bool IsTaggedTemplate(const PreParserExpression tag) {
return !tag.IsNoTemplateTag();
}
- inline void MaterializeUnspreadArgumentsLiterals(int count);
-
- inline PreParserExpression ExpressionListToExpression(
- PreParserExpressionList args) {
- return PreParserExpression::Default();
- }
-
- void SetFunctionNameFromPropertyName(PreParserExpression property,
- PreParserIdentifier name) {}
- void SetFunctionNameFromIdentifierRef(PreParserExpression value,
- PreParserExpression identifier) {}
-
- V8_INLINE ZoneList<typename Type::ExpressionClassifier::Error>*
- GetReportedErrorList() const;
- V8_INLINE Zone* zone() const;
- V8_INLINE ZoneList<PreParserExpression>* GetNonPatternList() const;
-};
-
-
-// Preparsing checks a JavaScript program and emits preparse-data that helps
-// a later parsing to be faster.
-// See preparse-data-format.h for the data format.
-
-// The PreParser checks that the syntax follows the grammar for JavaScript,
-// and collects some information about the program along the way.
-// The grammar check is only performed in order to understand the program
-// sufficiently to deduce some information about it, that can be used
-// to speed up later parsing. Finding errors is not the goal of pre-parsing,
-// rather it is to speed up properly written and correct programs.
-// That means that contextual checks (like a label being declared where
-// it is used) are generally omitted.
-class PreParser : public ParserBase<PreParser> {
- friend class ParserBase<PreParser>;
- // TODO(nikolaos): This should not be necessary. It will be removed
- // when the traits object stops delegating to the implementation object.
- friend class ParserBaseTraits<PreParser>;
-
- public:
- typedef PreParserIdentifier Identifier;
- typedef PreParserExpression Expression;
- typedef PreParserStatement Statement;
-
- enum PreParseResult {
- kPreParseStackOverflow,
- kPreParseSuccess
- };
-
- PreParser(Zone* zone, Scanner* scanner, AstValueFactory* ast_value_factory,
- ParserRecorder* log, uintptr_t stack_limit)
- : ParserBase<PreParser>(zone, scanner, stack_limit, NULL,
- ast_value_factory, log),
- use_counts_(nullptr) {}
-
- // Pre-parse the program from the character stream; returns true on
- // success (even if parsing failed, the pre-parse data successfully
- // captured the syntax error), and false if a stack-overflow happened
- // during parsing.
- PreParseResult PreParseProgram(int* materialized_literals = 0,
- bool is_module = false) {
- DCHECK_NULL(scope_state_);
- DeclarationScope* scope = NewScriptScope();
-
- // ModuleDeclarationInstantiation for Source Text Module Records creates a
- // new Module Environment Record whose outer lexical environment record is
- // the global scope.
- if (is_module) scope = NewModuleScope(scope);
-
- FunctionState top_scope(&function_state_, &scope_state_, scope,
- kNormalFunction);
- bool ok = true;
- int start_position = scanner()->peek_location().beg_pos;
- parsing_module_ = is_module;
- ParseStatementList(Token::EOS, &ok);
- if (stack_overflow()) return kPreParseStackOverflow;
- if (!ok) {
- ReportUnexpectedToken(scanner()->current_token());
- } else if (is_strict(this->scope()->language_mode())) {
- CheckStrictOctalLiteral(start_position, scanner()->location().end_pos,
- &ok);
- CheckDecimalLiteralWithLeadingZero(use_counts_, start_position,
- scanner()->location().end_pos);
- }
- if (materialized_literals) {
- *materialized_literals = function_state_->materialized_literal_count();
+ V8_INLINE void MaterializeUnspreadArgumentsLiterals(int count) {
+ for (int i = 0; i < count; ++i) {
+ function_state_->NextMaterializedLiteralIndex();
}
- return kPreParseSuccess;
- }
-
- // Parses a single function literal, from the opening parentheses before
- // parameters to the closing brace after the body.
- // Returns a FunctionEntry describing the body of the function in enough
- // detail that it can be lazily compiled.
- // The scanner is expected to have matched the "function" or "function*"
- // keyword and parameters, and have consumed the initial '{'.
- // At return, unless an error occurred, the scanner is positioned before the
- // the final '}'.
- PreParseResult PreParseLazyFunction(LanguageMode language_mode,
- FunctionKind kind,
- bool has_simple_parameters,
- bool parsing_module, ParserRecorder* log,
- Scanner::BookmarkScope* bookmark,
- int* use_counts);
-
- private:
- static const int kLazyParseTrialLimit = 200;
-
- // These types form an algebra over syntactic categories that is just
- // rich enough to let us recognize and propagate the constructs that
- // are either being counted in the preparser data, or is important
- // to throw the correct syntax error exceptions.
-
- // All ParseXXX functions take as the last argument an *ok parameter
- // which is set to false if parsing failed; it is unchanged otherwise.
- // By making the 'exception handling' explicit, we are forced to check
- // for failure at the call sites.
- Statement ParseStatementListItem(bool* ok);
- void ParseStatementList(int end_token, bool* ok,
- Scanner::BookmarkScope* bookmark = nullptr);
- Statement ParseStatement(AllowLabelledFunctionStatement allow_function,
- bool* ok);
- Statement ParseSubStatement(AllowLabelledFunctionStatement allow_function,
- bool* ok);
- Statement ParseScopedStatement(bool legacy, bool* ok);
- Statement ParseHoistableDeclaration(bool* ok);
- Statement ParseHoistableDeclaration(int pos, ParseFunctionFlags flags,
- bool* ok);
- Statement ParseFunctionDeclaration(bool* ok);
- Statement ParseAsyncFunctionDeclaration(bool* ok);
- Expression ParseAsyncFunctionExpression(bool* ok);
- Statement ParseClassDeclaration(bool* ok);
- Statement ParseBlock(bool* ok);
- Statement ParseVariableStatement(VariableDeclarationContext var_context,
- bool* ok);
- Statement ParseVariableDeclarations(VariableDeclarationContext var_context,
- int* num_decl, bool* is_lexical,
- bool* is_binding_pattern,
- Scanner::Location* first_initializer_loc,
- Scanner::Location* bindings_loc,
- bool* ok);
- Statement ParseExpressionOrLabelledStatement(
- AllowLabelledFunctionStatement allow_function, bool* ok);
- Statement ParseIfStatement(bool* ok);
- Statement ParseContinueStatement(bool* ok);
- Statement ParseBreakStatement(bool* ok);
- Statement ParseReturnStatement(bool* ok);
- Statement ParseWithStatement(bool* ok);
- Statement ParseSwitchStatement(bool* ok);
- Statement ParseDoWhileStatement(bool* ok);
- Statement ParseWhileStatement(bool* ok);
- Statement ParseForStatement(bool* ok);
- Statement ParseThrowStatement(bool* ok);
- Statement ParseTryStatement(bool* ok);
- Statement ParseDebuggerStatement(bool* ok);
- Expression ParseConditionalExpression(bool accept_IN, bool* ok);
- Expression ParseObjectLiteral(bool* ok);
- Expression ParseV8Intrinsic(bool* ok);
- Expression ParseDoExpression(bool* ok);
-
- V8_INLINE PreParserStatementList ParseEagerFunctionBody(
- PreParserIdentifier function_name, int pos,
- const PreParserFormalParameters& parameters, FunctionKind kind,
- FunctionLiteral::FunctionType function_type, bool* ok);
-
- V8_INLINE void SkipLazyFunctionBody(
- int* materialized_literal_count, int* expected_property_count, bool* ok,
- Scanner::BookmarkScope* bookmark = nullptr) {
- UNREACHABLE();
}
- Expression ParseFunctionLiteral(
- Identifier name, Scanner::Location function_name_location,
- FunctionNameValidity function_name_validity, FunctionKind kind,
- int function_token_pos, FunctionLiteral::FunctionType function_type,
- LanguageMode language_mode, bool* ok);
- void ParseLazyFunctionLiteralBody(bool* ok,
- Scanner::BookmarkScope* bookmark = nullptr);
- PreParserExpression ParseClassLiteral(ExpressionClassifier* classifier,
- PreParserIdentifier name,
- Scanner::Location class_name_location,
- bool name_is_strict_reserved, int pos,
- bool* ok);
-
- struct TemplateLiteralState {};
-
- V8_INLINE TemplateLiteralState OpenTemplateLiteral(int pos) {
- return TemplateLiteralState();
+ V8_INLINE PreParserExpression
+ ExpressionListToExpression(PreParserExpressionList args) {
+ return PreParserExpression::Default();
}
- V8_INLINE void AddTemplateExpression(TemplateLiteralState* state,
- PreParserExpression expression) {}
- V8_INLINE void AddTemplateSpan(TemplateLiteralState* state, bool tail) {}
- V8_INLINE PreParserExpression CloseTemplateLiteral(
- TemplateLiteralState* state, int start, PreParserExpression tag);
- V8_INLINE void CheckConflictingVarDeclarations(Scope* scope, bool* ok) {}
-
- V8_INLINE void MarkCollectedTailCallExpressions() {}
- V8_INLINE void MarkTailPosition(PreParserExpression expression) {}
- void ParseAsyncArrowSingleExpressionBody(PreParserStatementList body,
- bool accept_IN,
- ExpressionClassifier* classifier,
- int pos, bool* ok);
+ V8_INLINE void AddAccessorPrefixToFunctionName(bool is_get,
+ PreParserExpression function,
+ PreParserIdentifier name) {}
+ V8_INLINE void SetFunctionNameFromPropertyName(PreParserExpression property,
+ PreParserIdentifier name) {}
+ V8_INLINE void SetFunctionNameFromIdentifierRef(
+ PreParserExpression value, PreParserExpression identifier) {}
- V8_INLINE PreParserExpressionList
- PrepareSpreadArguments(PreParserExpressionList list) {
- return list;
+ V8_INLINE ZoneList<typename ExpressionClassifier::Error>*
+ GetReportedErrorList() const {
+ return function_state_->GetReportedErrorList();
}
- V8_INLINE PreParserExpression SpreadCall(PreParserExpression function,
- PreParserExpressionList args,
- int pos);
- V8_INLINE PreParserExpression SpreadCallNew(PreParserExpression function,
- PreParserExpressionList args,
- int pos);
-
- V8_INLINE void RewriteDestructuringAssignments() {}
-
- V8_INLINE PreParserExpression RewriteExponentiation(PreParserExpression left,
- PreParserExpression right,
- int pos) {
- return left;
- }
- V8_INLINE PreParserExpression RewriteAssignExponentiation(
- PreParserExpression left, PreParserExpression right, int pos) {
- return left;
+ V8_INLINE ZoneList<PreParserExpression>* GetNonPatternList() const {
+ return function_state_->non_patterns_to_rewrite();
}
- V8_INLINE PreParserExpression
- RewriteAwaitExpression(PreParserExpression value, int pos) {
- return value;
- }
- V8_INLINE PreParserExpression RewriteYieldStar(PreParserExpression generator,
- PreParserExpression expression,
- int pos) {
- return PreParserExpression::Default();
- }
- V8_INLINE void RewriteNonPattern(Type::ExpressionClassifier* classifier,
- bool* ok) {
- ValidateExpression(classifier, ok);
+ V8_INLINE void CountUsage(v8::Isolate::UseCounterFeature feature) {
+ if (use_counts_ != nullptr) ++use_counts_[feature];
}
- V8_INLINE void QueueDestructuringAssignmentForRewriting(
- PreParserExpression assignment) {}
- V8_INLINE void QueueNonPatternForRewriting(PreParserExpression expr,
- bool* ok) {}
+ // Preparser's private field members.
int* use_counts_;
+ bool track_unresolved_variables_;
};
-void ParserBaseTraits<PreParser>::MaterializeUnspreadArgumentsLiterals(
- int count) {
- for (int i = 0; i < count; ++i) {
- delegate()->function_state_->NextMaterializedLiteralIndex();
- }
-}
-
PreParserExpression PreParser::SpreadCall(PreParserExpression function,
PreParserExpressionList args,
int pos) {
@@ -1158,46 +1450,24 @@ PreParserExpression PreParser::SpreadCallNew(PreParserExpression function,
return factory()->NewCallNew(function, args, pos);
}
-void ParserBaseTraits<PreParser>::ParseArrowFunctionFormalParameterList(
- PreParserFormalParameters* parameters, PreParserExpression params,
- const Scanner::Location& params_loc, Scanner::Location* duplicate_loc,
- const Scope::Snapshot& scope_snapshot, bool* ok) {
- // TODO(wingo): Detect duplicated identifiers in paramlists. Detect parameter
- // lists that are too long.
-}
-
-ZoneList<PreParserExpression>* ParserBaseTraits<PreParser>::GetNonPatternList()
- const {
- return delegate()->function_state_->non_patterns_to_rewrite();
-}
-
-ZoneList<
- typename ParserBaseTraits<PreParser>::Type::ExpressionClassifier::Error>*
-ParserBaseTraits<PreParser>::GetReportedErrorList() const {
- return delegate()->function_state_->GetReportedErrorList();
-}
-
-Zone* ParserBaseTraits<PreParser>::zone() const {
- return delegate()->function_state_->scope()->zone();
-}
-
PreParserStatementList PreParser::ParseEagerFunctionBody(
PreParserIdentifier function_name, int pos,
const PreParserFormalParameters& parameters, FunctionKind kind,
FunctionLiteral::FunctionType function_type, bool* ok) {
ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
+ PreParserStatementList result;
Scope* inner_scope = scope();
if (!parameters.is_simple) inner_scope = NewScope(BLOCK_SCOPE);
{
BlockState block_state(&scope_state_, inner_scope);
- ParseStatementList(Token::RBRACE, ok);
+ ParseStatementList(result, Token::RBRACE, ok);
if (!*ok) return PreParserStatementList();
}
Expect(Token::RBRACE, ok);
- return PreParserStatementList();
+ return result;
}
PreParserExpression PreParser::CloseTemplateLiteral(TemplateLiteralState* state,
diff --git a/deps/v8/src/parsing/rewriter.cc b/deps/v8/src/parsing/rewriter.cc
index 51ff547017..57009bd207 100644
--- a/deps/v8/src/parsing/rewriter.cc
+++ b/deps/v8/src/parsing/rewriter.cc
@@ -347,10 +347,13 @@ bool Rewriter::Rewrite(ParseInfo* info) {
Variable* result = closure_scope->NewTemporary(
info->ast_value_factory()->dot_result_string());
// The name string must be internalized at this point.
+ info->ast_value_factory()->Internalize(info->isolate());
DCHECK(!result->name().is_null());
Processor processor(info->isolate(), closure_scope, result,
info->ast_value_factory());
processor.Process(body);
+ // Internalize any values created during rewriting.
+ info->ast_value_factory()->Internalize(info->isolate());
if (processor.HasStackOverflow()) return false;
if (processor.result_assigned()) {
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index 7cdef87c9c..3f10cfa4c1 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -7,505 +7,676 @@
#include "include/v8.h"
#include "src/globals.h"
#include "src/handles.h"
-#include "src/list-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
#include "src/objects-inl.h"
+#include "src/parsing/scanner.h"
#include "src/unicode-inl.h"
namespace v8 {
namespace internal {
-namespace {
-
-size_t CopyUtf8CharsToUtf16Chars(uint16_t* dest, size_t length, const byte* src,
- size_t* src_pos, size_t src_length) {
- static const unibrow::uchar kMaxUtf16Character =
- unibrow::Utf16::kMaxNonSurrogateCharCode;
- size_t i = 0;
- // Because of the UTF-16 lead and trail surrogates, we stop filling the buffer
- // one character early (in the normal case), because we need to have at least
- // two free spaces in the buffer to be sure that the next character will fit.
- while (i < length - 1) {
- if (*src_pos == src_length) break;
- unibrow::uchar c = src[*src_pos];
- if (c <= unibrow::Utf8::kMaxOneByteChar) {
- *src_pos = *src_pos + 1;
- } else {
- c = unibrow::Utf8::CalculateValue(src + *src_pos, src_length - *src_pos,
- src_pos);
- }
- if (c > kMaxUtf16Character) {
- dest[i++] = unibrow::Utf16::LeadSurrogate(c);
- dest[i++] = unibrow::Utf16::TrailSurrogate(c);
- } else {
- dest[i++] = static_cast<uc16>(c);
- }
- }
- return i;
-}
-
-size_t CopyCharsHelper(uint16_t* dest, size_t length, const uint8_t* src,
- size_t* src_pos, size_t src_length,
- ScriptCompiler::StreamedSource::Encoding encoding) {
- // It's possible that this will be called with length 0, but don't assume that
- // the functions this calls handle it gracefully.
- if (length == 0) return 0;
+// ----------------------------------------------------------------------------
+// BufferedUtf16CharacterStreams
+//
+// A buffered character stream based on a random access character
+// source (ReadBlock can be called with pos() pointing to any position,
+// even positions before the current).
+class BufferedUtf16CharacterStream : public Utf16CharacterStream {
+ public:
+ BufferedUtf16CharacterStream();
- if (encoding == ScriptCompiler::StreamedSource::UTF8) {
- return CopyUtf8CharsToUtf16Chars(dest, length, src, src_pos, src_length);
- }
+ protected:
+ static const size_t kBufferSize = 512;
- size_t to_fill = length;
- if (to_fill > src_length - *src_pos) to_fill = src_length - *src_pos;
+ bool ReadBlock() override;
- if (encoding == ScriptCompiler::StreamedSource::ONE_BYTE) {
- v8::internal::CopyChars<uint8_t, uint16_t>(dest, src + *src_pos, to_fill);
- } else {
- DCHECK(encoding == ScriptCompiler::StreamedSource::TWO_BYTE);
- v8::internal::CopyChars<uint16_t, uint16_t>(
- dest, reinterpret_cast<const uint16_t*>(src + *src_pos), to_fill);
- }
- *src_pos += to_fill;
- return to_fill;
-}
+ // FillBuffer should read up to kBufferSize characters at position and store
+ // them into buffer_[0..]. It returns the number of characters stored.
+ virtual size_t FillBuffer(size_t position) = 0;
-} // namespace
+ // Fixed sized buffer that this class reads from.
+ // The base class' buffer_start_ should always point to buffer_.
+ uc16 buffer_[kBufferSize];
+};
+BufferedUtf16CharacterStream::BufferedUtf16CharacterStream()
+ : Utf16CharacterStream(buffer_, buffer_, buffer_, 0) {}
-// ----------------------------------------------------------------------------
-// BufferedUtf16CharacterStreams
+bool BufferedUtf16CharacterStream::ReadBlock() {
+ DCHECK_EQ(buffer_start_, buffer_);
-BufferedUtf16CharacterStream::BufferedUtf16CharacterStream()
- : Utf16CharacterStream(),
- pushback_limit_(NULL) {
- // Initialize buffer as being empty. First read will fill the buffer.
+ size_t position = pos();
+ buffer_pos_ = position;
buffer_cursor_ = buffer_;
- buffer_end_ = buffer_;
+ buffer_end_ = buffer_ + FillBuffer(position);
+ DCHECK_EQ(pos(), position);
+ DCHECK_LE(buffer_end_, buffer_start_ + kBufferSize);
+ return buffer_cursor_ < buffer_end_;
}
+// ----------------------------------------------------------------------------
+// GenericStringUtf16CharacterStream.
+//
+// A stream w/ a data source being a (flattened) Handle<String>.
-BufferedUtf16CharacterStream::~BufferedUtf16CharacterStream() { }
-
-void BufferedUtf16CharacterStream::PushBack(uc32 character) {
- if (character == kEndOfInput) {
- pos_--;
- return;
- }
- if (pushback_limit_ == NULL && buffer_cursor_ > buffer_) {
- // buffer_ is writable, buffer_cursor_ is const pointer.
- buffer_[--buffer_cursor_ - buffer_] = static_cast<uc16>(character);
- pos_--;
- return;
- }
- SlowPushBack(static_cast<uc16>(character));
-}
+class GenericStringUtf16CharacterStream : public BufferedUtf16CharacterStream {
+ public:
+ GenericStringUtf16CharacterStream(Handle<String> data, size_t start_position,
+ size_t end_position);
+ protected:
+ size_t FillBuffer(size_t position) override;
-void BufferedUtf16CharacterStream::SlowPushBack(uc16 character) {
- // In pushback mode, the end of the buffer contains pushback,
- // and the start of the buffer (from buffer start to pushback_limit_)
- // contains valid data that comes just after the pushback.
- // We NULL the pushback_limit_ if pushing all the way back to the
- // start of the buffer.
+ Handle<String> string_;
+ size_t length_;
+};
- if (pushback_limit_ == NULL) {
- // Enter pushback mode.
- pushback_limit_ = buffer_end_;
- buffer_end_ = buffer_ + kBufferSize;
- buffer_cursor_ = buffer_end_;
- }
- // Ensure that there is room for at least one pushback.
- DCHECK(buffer_cursor_ > buffer_);
- DCHECK(pos_ > 0);
- buffer_[--buffer_cursor_ - buffer_] = character;
- if (buffer_cursor_ == buffer_) {
- pushback_limit_ = NULL;
- } else if (buffer_cursor_ < pushback_limit_) {
- pushback_limit_ = buffer_cursor_;
- }
- pos_--;
+GenericStringUtf16CharacterStream::GenericStringUtf16CharacterStream(
+ Handle<String> data, size_t start_position, size_t end_position)
+ : string_(data), length_(end_position) {
+ DCHECK_GE(end_position, start_position);
+ DCHECK_GE(static_cast<size_t>(string_->length()),
+ end_position - start_position);
+ buffer_pos_ = start_position;
}
+size_t GenericStringUtf16CharacterStream::FillBuffer(size_t from_pos) {
+ if (from_pos >= length_) return 0;
-bool BufferedUtf16CharacterStream::ReadBlock() {
- buffer_cursor_ = buffer_;
- if (pushback_limit_ != NULL) {
- // Leave pushback mode.
- buffer_end_ = pushback_limit_;
- pushback_limit_ = NULL;
- // If there were any valid characters left at the
- // start of the buffer, use those.
- if (buffer_cursor_ < buffer_end_) return true;
- // Otherwise read a new block.
- }
- size_t length = FillBuffer(pos_);
- buffer_end_ = buffer_ + length;
- return length > 0;
+ size_t length = i::Min(kBufferSize, length_ - from_pos);
+ String::WriteToFlat<uc16>(*string_, buffer_, static_cast<int>(from_pos),
+ static_cast<int>(from_pos + length));
+ return length;
}
+// ----------------------------------------------------------------------------
+// ExternalTwoByteStringUtf16CharacterStream.
+//
+// A stream whose data source is a Handle<ExternalTwoByteString>. It avoids
+// all data copying.
-size_t BufferedUtf16CharacterStream::SlowSeekForward(size_t delta) {
- // Leave pushback mode (i.e., ignore that there might be valid data
- // in the buffer before the pushback_limit_ point).
- pushback_limit_ = NULL;
- return BufferSeekForward(delta);
-}
+class ExternalTwoByteStringUtf16CharacterStream : public Utf16CharacterStream {
+ public:
+ ExternalTwoByteStringUtf16CharacterStream(Handle<ExternalTwoByteString> data,
+ size_t start_position,
+ size_t end_position);
+ private:
+ bool ReadBlock() override;
-// ----------------------------------------------------------------------------
-// GenericStringUtf16CharacterStream
+ const uc16* raw_data_; // Pointer to the actual array of characters.
+ size_t start_pos_;
+ size_t end_pos_;
+};
+ExternalTwoByteStringUtf16CharacterStream::
+ ExternalTwoByteStringUtf16CharacterStream(
+ Handle<ExternalTwoByteString> data, size_t start_position,
+ size_t end_position)
+ : raw_data_(data->GetTwoByteData(static_cast<int>(start_position))),
+ start_pos_(start_position),
+ end_pos_(end_position) {
+ buffer_start_ = raw_data_;
+ buffer_cursor_ = raw_data_;
+ buffer_end_ = raw_data_ + (end_pos_ - start_pos_);
+ buffer_pos_ = start_pos_;
+}
-GenericStringUtf16CharacterStream::GenericStringUtf16CharacterStream(
- Handle<String> data, size_t start_position, size_t end_position)
- : string_(data), length_(end_position), bookmark_(kNoBookmark) {
- DCHECK(end_position >= start_position);
- pos_ = start_position;
+bool ExternalTwoByteStringUtf16CharacterStream::ReadBlock() {
+ size_t position = pos();
+ bool have_data = start_pos_ <= position && position < end_pos_;
+ if (have_data) {
+ buffer_pos_ = start_pos_;
+ buffer_cursor_ = raw_data_ + (position - start_pos_),
+ buffer_end_ = raw_data_ + (end_pos_ - start_pos_);
+ } else {
+ buffer_pos_ = position;
+ buffer_cursor_ = raw_data_;
+ buffer_end_ = raw_data_;
+ }
+ return have_data;
}
+// ----------------------------------------------------------------------------
+// ExternalOneByteStringUtf16CharacterStream
+//
+// A stream whose data source is a Handle<ExternalOneByteString>.
-GenericStringUtf16CharacterStream::~GenericStringUtf16CharacterStream() { }
+class ExternalOneByteStringUtf16CharacterStream
+ : public BufferedUtf16CharacterStream {
+ public:
+ ExternalOneByteStringUtf16CharacterStream(Handle<ExternalOneByteString> data,
+ size_t start_position,
+ size_t end_position);
+ // For testing:
+ ExternalOneByteStringUtf16CharacterStream(const char* data, size_t length);
-bool GenericStringUtf16CharacterStream::SetBookmark() {
- bookmark_ = pos_;
- return true;
-}
+ protected:
+ size_t FillBuffer(size_t position) override;
+ const uint8_t* raw_data_; // Pointer to the actual array of characters.
+ size_t length_;
+};
-void GenericStringUtf16CharacterStream::ResetToBookmark() {
- DCHECK(bookmark_ != kNoBookmark);
- pos_ = bookmark_;
- buffer_cursor_ = buffer_;
- buffer_end_ = buffer_ + FillBuffer(pos_);
+ExternalOneByteStringUtf16CharacterStream::
+ ExternalOneByteStringUtf16CharacterStream(
+ Handle<ExternalOneByteString> data, size_t start_position,
+ size_t end_position)
+ : raw_data_(data->GetChars()), length_(end_position) {
+ DCHECK(end_position >= start_position);
+ buffer_pos_ = start_position;
}
+ExternalOneByteStringUtf16CharacterStream::
+ ExternalOneByteStringUtf16CharacterStream(const char* data, size_t length)
+ : raw_data_(reinterpret_cast<const uint8_t*>(data)), length_(length) {}
-size_t GenericStringUtf16CharacterStream::BufferSeekForward(size_t delta) {
- size_t old_pos = pos_;
- pos_ = Min(pos_ + delta, length_);
- ReadBlock();
- return pos_ - old_pos;
+size_t ExternalOneByteStringUtf16CharacterStream::FillBuffer(size_t from_pos) {
+ if (from_pos >= length_) return 0;
+
+ size_t length = Min(kBufferSize, length_ - from_pos);
+ i::CopyCharsUnsigned(buffer_, raw_data_ + from_pos, length);
+ return length;
}
+// ----------------------------------------------------------------------------
+// Utf8ExternalStreamingStream - chunked streaming of Utf-8 data.
+//
+// This implementation is fairly complex, since data arrives in chunks which
+// may 'cut' arbitrarily into utf-8 characters. Also, seeking to a given
+// character position is tricky because the byte position cannot be dericed
+// from the character position.
+
+class Utf8ExternalStreamingStream : public BufferedUtf16CharacterStream {
+ public:
+ Utf8ExternalStreamingStream(
+ ScriptCompiler::ExternalSourceStream* source_stream)
+ : current_({0, {0, 0, unibrow::Utf8::Utf8IncrementalBuffer(0)}}),
+ source_stream_(source_stream) {}
+ ~Utf8ExternalStreamingStream() override {
+ for (size_t i = 0; i < chunks_.size(); i++) delete[] chunks_[i].data;
+ }
-size_t GenericStringUtf16CharacterStream::FillBuffer(size_t from_pos) {
- if (from_pos >= length_) return 0;
- size_t length = kBufferSize;
- if (from_pos + length > length_) {
- length = length_ - from_pos;
+ protected:
+ size_t FillBuffer(size_t position) override;
+
+ private:
+ // A position within the data stream. It stores:
+ // - The 'physical' position (# of bytes in the stream),
+ // - the 'logical' position (# of ucs-2 characters, also within the stream),
+ // - a possibly incomplete utf-8 char at the current 'physical' position.
+ struct StreamPosition {
+ size_t bytes;
+ size_t chars;
+ unibrow::Utf8::Utf8IncrementalBuffer incomplete_char;
+ };
+
+ // Position contains a StreamPosition and the index of the chunk the position
+ // points into. (The chunk_no could be derived from pos, but that'd be
+ // an expensive search through all chunks.)
+ struct Position {
+ size_t chunk_no;
+ StreamPosition pos;
+ };
+
+ // A chunk in the list of chunks, containing:
+ // - The chunk data (data pointer and length), and
+ // - the position at the first byte of the chunk.
+ struct Chunk {
+ const uint8_t* data;
+ size_t length;
+ StreamPosition start;
+ };
+
+ // Within the current chunk, skip forward from current_ towards position.
+ bool SkipToPosition(size_t position);
+ // Within the current chunk, fill the buffer_ (while it has capacity).
+ void FillBufferFromCurrentChunk();
+ // Fetch a new chunk (assuming current_ is at the end of the current data).
+ bool FetchChunk();
+ // Search through the chunks and set current_ to point to the given position.
+ // (This call is potentially expensive.)
+ void SearchPosition(size_t position);
+
+ std::vector<Chunk> chunks_;
+ Position current_;
+ ScriptCompiler::ExternalSourceStream* source_stream_;
+};
+
+bool Utf8ExternalStreamingStream::SkipToPosition(size_t position) {
+ DCHECK_LE(current_.pos.chars, position); // We can only skip forward.
+
+ // Already there? Then return immediately.
+ if (current_.pos.chars == position) return true;
+
+ const Chunk& chunk = chunks_[current_.chunk_no];
+ DCHECK(current_.pos.bytes >= chunk.start.bytes);
+
+ unibrow::Utf8::Utf8IncrementalBuffer incomplete_char =
+ chunk.start.incomplete_char;
+ size_t it = current_.pos.bytes - chunk.start.bytes;
+ size_t chars = chunk.start.chars;
+ while (it < chunk.length && chars < position) {
+ unibrow::uchar t =
+ unibrow::Utf8::ValueOfIncremental(chunk.data[it], &incomplete_char);
+ if (t != unibrow::Utf8::kIncomplete) {
+ chars++;
+ if (t > unibrow::Utf16::kMaxNonSurrogateCharCode) chars++;
+ }
+ it++;
}
- String::WriteToFlat<uc16>(*string_, buffer_, static_cast<int>(from_pos),
- static_cast<int>(from_pos + length));
- return length;
-}
+ current_.pos.bytes += it;
+ current_.pos.chars = chars;
+ current_.pos.incomplete_char = incomplete_char;
+ current_.chunk_no += (it == chunk.length);
-// ----------------------------------------------------------------------------
-// ExternalStreamingStream
-
-size_t ExternalStreamingStream::FillBuffer(size_t position) {
- // Ignore "position" which is the position in the decoded data. Instead,
- // ExternalStreamingStream keeps track of the position in the raw data.
- size_t data_in_buffer = 0;
- // Note that the UTF-8 decoder might not be able to fill the buffer
- // completely; it will typically leave the last character empty (see
- // Utf8ToUtf16CharacterStream::CopyChars).
- while (data_in_buffer < kBufferSize - 1) {
- if (current_data_ == NULL) {
- // GetSomeData will wait until the embedder has enough data. Here's an
- // interface between the API which uses size_t (which is the correct type
- // here) and the internal parts which use size_t.
- current_data_length_ = source_stream_->GetMoreData(&current_data_);
- current_data_offset_ = 0;
- bool data_ends = current_data_length_ == 0;
- bookmark_data_is_from_current_data_ = false;
-
- // A caveat: a data chunk might end with bytes from an incomplete UTF-8
- // character (the rest of the bytes will be in the next chunk).
- if (encoding_ == ScriptCompiler::StreamedSource::UTF8) {
- HandleUtf8SplitCharacters(&data_in_buffer);
- if (!data_ends && current_data_offset_ == current_data_length_) {
- // The data stream didn't end, but we used all the data in the
- // chunk. This will only happen when the chunk was really small. We
- // don't handle the case where a UTF-8 character is split over several
- // chunks; in that case V8 won't crash, but it will be a parse error.
- FlushCurrent();
- continue; // Request a new chunk.
- }
- }
-
- // Did the data stream end?
- if (data_ends) {
- DCHECK(utf8_split_char_buffer_length_ == 0);
- return data_in_buffer;
- }
+ return current_.pos.chars == position;
+}
+
+void Utf8ExternalStreamingStream::FillBufferFromCurrentChunk() {
+ DCHECK_LT(current_.chunk_no, chunks_.size());
+ DCHECK_EQ(buffer_start_, buffer_cursor_);
+ DCHECK_LT(buffer_end_ + 1, buffer_start_ + kBufferSize);
+
+ const Chunk& chunk = chunks_[current_.chunk_no];
+
+ // The buffer_ is writable, but buffer_*_ members are const. So we get a
+ // non-const pointer into buffer that points to the same char as buffer_end_.
+ uint16_t* cursor = buffer_ + (buffer_end_ - buffer_start_);
+ DCHECK_EQ(cursor, buffer_end_);
+
+ // If the current chunk is the last (empty) chunk we'll have to process
+ // any left-over, partial characters.
+ if (chunk.length == 0) {
+ unibrow::uchar t =
+ unibrow::Utf8::ValueOfIncrementalFinish(&current_.pos.incomplete_char);
+ if (t != unibrow::Utf8::kBufferEmpty) {
+ DCHECK(t < unibrow::Utf16::kMaxNonSurrogateCharCode);
+ *cursor = static_cast<uc16>(t);
+ buffer_end_++;
+ current_.pos.chars++;
}
+ return;
+ }
- // Fill the buffer from current_data_.
- size_t new_offset = 0;
- size_t new_chars_in_buffer =
- CopyCharsHelper(buffer_ + data_in_buffer, kBufferSize - data_in_buffer,
- current_data_ + current_data_offset_, &new_offset,
- current_data_length_ - current_data_offset_, encoding_);
- data_in_buffer += new_chars_in_buffer;
- current_data_offset_ += new_offset;
- DCHECK(data_in_buffer <= kBufferSize);
-
- // Did we use all the data in the data chunk?
- if (current_data_offset_ == current_data_length_) {
- FlushCurrent();
+ static const unibrow::uchar kUtf8Bom = 0xfeff;
+
+ unibrow::Utf8::Utf8IncrementalBuffer incomplete_char =
+ current_.pos.incomplete_char;
+ size_t it;
+ for (it = current_.pos.bytes - chunk.start.bytes;
+ it < chunk.length && cursor + 1 < buffer_start_ + kBufferSize; it++) {
+ unibrow::uchar t =
+ unibrow::Utf8::ValueOfIncremental(chunk.data[it], &incomplete_char);
+ if (t == unibrow::Utf8::kIncomplete) continue;
+ if (V8_LIKELY(t < kUtf8Bom)) {
+ *(cursor++) = static_cast<uc16>(t); // The by most frequent case.
+ } else if (t == kUtf8Bom && current_.pos.bytes + it == 2) {
+ // BOM detected at beginning of the stream. Don't copy it.
+ } else if (t <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ *(cursor++) = static_cast<uc16>(t);
+ } else {
+ *(cursor++) = unibrow::Utf16::LeadSurrogate(t);
+ *(cursor++) = unibrow::Utf16::TrailSurrogate(t);
}
}
- return data_in_buffer;
+
+ current_.pos.bytes = chunk.start.bytes + it;
+ current_.pos.chars += (cursor - buffer_end_);
+ current_.pos.incomplete_char = incomplete_char;
+ current_.chunk_no += (it == chunk.length);
+
+ buffer_end_ = cursor;
}
+bool Utf8ExternalStreamingStream::FetchChunk() {
+ DCHECK_EQ(current_.chunk_no, chunks_.size());
+ DCHECK(chunks_.empty() || chunks_.back().length != 0);
-bool ExternalStreamingStream::SetBookmark() {
- // Bookmarking for this stream is a bit more complex than expected, since
- // the stream state is distributed over several places:
- // - pos_ (inherited from Utf16CharacterStream)
- // - buffer_cursor_ and buffer_end_ (also from Utf16CharacterStream)
- // - buffer_ (from BufferedUtf16CharacterStream)
- // - current_data_ (+ .._offset_ and .._length) (this class)
- // - utf8_split_char_buffer_* (a partial utf8 symbol at the block boundary)
- //
- // The underlying source_stream_ instance likely could re-construct this
- // local data for us, but with the given interfaces we have no way of
- // accomplishing this. Thus, we'll have to save all data locally.
- //
- // What gets saved where:
- // - pos_ => bookmark_
- // - buffer_[buffer_cursor_ .. buffer_end_] => bookmark_buffer_
- // - current_data_[.._offset_ .. .._length_] => bookmark_data_
- // - utf8_split_char_buffer_* => bookmark_utf8_split...
- //
- // To make sure we don't unnecessarily copy data, we also maintain
- // whether bookmark_data_ contains a copy of the current current_data_
- // block. This is done with:
- // - bookmark_data_is_from_current_data_
- // - bookmark_data_offset_: offset into bookmark_data_
+ const uint8_t* chunk = nullptr;
+ size_t length = source_stream_->GetMoreData(&chunk);
+ chunks_.push_back({chunk, length, current_.pos});
+ return length > 0;
+}
+
+void Utf8ExternalStreamingStream::SearchPosition(size_t position) {
+ // If current_ already points to the right position, we're done.
//
- // Note that bookmark_data_is_from_current_data_ must be maintained
- // whenever current_data_ is updated.
-
- bookmark_ = pos_;
-
- size_t buffer_length = buffer_end_ - buffer_cursor_;
- bookmark_buffer_.Dispose();
- bookmark_buffer_ = Vector<uint16_t>::New(static_cast<int>(buffer_length));
- CopyCharsUnsigned(bookmark_buffer_.start(), buffer_cursor_, buffer_length);
-
- size_t data_length = current_data_length_ - current_data_offset_;
- size_t bookmark_data_length = static_cast<size_t>(bookmark_data_.length());
- if (bookmark_data_is_from_current_data_ &&
- data_length < bookmark_data_length) {
- // Fast case: bookmark_data_ was previously copied from the current
- // data block, and we have enough data for this bookmark.
- bookmark_data_offset_ = bookmark_data_length - data_length;
- } else {
- // Slow case: We need to copy current_data_.
- bookmark_data_.Dispose();
- bookmark_data_ = Vector<uint8_t>::New(static_cast<int>(data_length));
- CopyBytes(bookmark_data_.start(), current_data_ + current_data_offset_,
- data_length);
- bookmark_data_is_from_current_data_ = true;
- bookmark_data_offset_ = 0;
+ // This is expected to be the common case, since we typically call
+ // FillBuffer right after the current buffer.
+ if (current_.pos.chars == position) return;
+
+ // No chunks. Fetch at least one, so we can assume !chunks_.empty() below.
+ if (chunks_.empty()) {
+ DCHECK_EQ(current_.chunk_no, 0);
+ DCHECK_EQ(current_.pos.bytes, 0);
+ DCHECK_EQ(current_.pos.chars, 0);
+ FetchChunk();
}
- bookmark_utf8_split_char_buffer_length_ = utf8_split_char_buffer_length_;
- for (size_t i = 0; i < utf8_split_char_buffer_length_; i++) {
- bookmark_utf8_split_char_buffer_[i] = utf8_split_char_buffer_[i];
+ // Search for the last chunk whose start position is less or equal to
+ // position.
+ size_t chunk_no = chunks_.size() - 1;
+ while (chunk_no > 0 && chunks_[chunk_no].start.chars > position) {
+ chunk_no--;
}
- return source_stream_->SetBookmark();
-}
+ // Did we find the terminating (zero-length) chunk? Then we're seeking
+ // behind the end of the data, and position does not exist.
+ // Set current_ to point to the terminating chunk.
+ if (chunks_[chunk_no].length == 0) {
+ current_ = {chunk_no, chunks_[chunk_no].start};
+ return;
+ }
+ // Did we find the non-last chunk? Then our position must be within chunk_no.
+ if (chunk_no + 1 < chunks_.size()) {
+ // Fancy-pants optimization for ASCII chunks within a utf-8 stream.
+ // (Many web sites declare utf-8 encoding, but use only (or almost only) the
+ // ASCII subset for their JavaScript sources. We can exploit this, by
+ // checking whether the # bytes in a chunk are equal to the # chars, and if
+ // so avoid the expensive SkipToPosition.)
+ bool ascii_only_chunk =
+ (chunks_[chunk_no + 1].start.bytes - chunks_[chunk_no].start.bytes) ==
+ (chunks_[chunk_no + 1].start.chars - chunks_[chunk_no].start.chars);
+ if (ascii_only_chunk) {
+ size_t skip = position - chunks_[chunk_no].start.chars;
+ current_ = {chunk_no,
+ {chunks_[chunk_no].start.bytes + skip,
+ chunks_[chunk_no].start.chars + skip,
+ unibrow::Utf8::Utf8IncrementalBuffer(0)}};
+ } else {
+ current_ = {chunk_no, chunks_[chunk_no].start};
+ SkipToPosition(position);
+ }
-void ExternalStreamingStream::ResetToBookmark() {
- source_stream_->ResetToBookmark();
- FlushCurrent();
+ // Since position was within the chunk, SkipToPosition should have found
+ // something.
+ DCHECK_EQ(position, current_.pos.chars);
+ return;
+ }
- pos_ = bookmark_;
+ // What's left: We're in the last, non-terminating chunk. Our position
+ // may be in the chunk, but it may also be in 'future' chunks, which we'll
+ // have to obtain.
+ DCHECK_EQ(chunk_no, chunks_.size() - 1);
+ current_ = {chunk_no, chunks_[chunk_no].start};
+ bool have_more_data = true;
+ bool found = SkipToPosition(position);
+ while (have_more_data && !found) {
+ DCHECK_EQ(current_.chunk_no, chunks_.size());
+ have_more_data = FetchChunk();
+ found = have_more_data && SkipToPosition(position);
+ }
- // bookmark_data_* => current_data_*
- // (current_data_ assumes ownership of its memory.)
- current_data_offset_ = 0;
- current_data_length_ = bookmark_data_.length() - bookmark_data_offset_;
- uint8_t* data = new uint8_t[current_data_length_];
- CopyCharsUnsigned(data, bookmark_data_.begin() + bookmark_data_offset_,
- current_data_length_);
- delete[] current_data_;
- current_data_ = data;
- bookmark_data_is_from_current_data_ = true;
+ // We'll return with a postion != the desired position only if we're out
+ // of data. In that case, we'll point to the terminating chunk.
+ DCHECK_EQ(found, current_.pos.chars == position);
+ DCHECK_EQ(have_more_data, chunks_.back().length != 0);
+ DCHECK_IMPLIES(!found, !have_more_data);
+ DCHECK_IMPLIES(!found, current_.chunk_no == chunks_.size() - 1);
+}
- // bookmark_buffer_ needs to be copied to buffer_.
- CopyCharsUnsigned(buffer_, bookmark_buffer_.begin(),
- bookmark_buffer_.length());
+size_t Utf8ExternalStreamingStream::FillBuffer(size_t position) {
buffer_cursor_ = buffer_;
- buffer_end_ = buffer_ + bookmark_buffer_.length();
+ buffer_end_ = buffer_;
- // utf8 split char buffer
- utf8_split_char_buffer_length_ = bookmark_utf8_split_char_buffer_length_;
- for (size_t i = 0; i < bookmark_utf8_split_char_buffer_length_; i++) {
- utf8_split_char_buffer_[i] = bookmark_utf8_split_char_buffer_[i];
+ SearchPosition(position);
+ bool out_of_data = current_.chunk_no != chunks_.size() &&
+ chunks_[current_.chunk_no].length == 0;
+ if (out_of_data) return 0;
+
+ // Fill the buffer, until we have at least one char (or are out of data).
+ // (The embedder might give us 1-byte blocks within a utf-8 char, so we
+ // can't guarantee progress with one chunk. Thus we iterate.)
+ while (!out_of_data && buffer_cursor_ == buffer_end_) {
+ // At end of current data, but there might be more? Then fetch it.
+ if (current_.chunk_no == chunks_.size()) {
+ out_of_data = !FetchChunk();
+ }
+ FillBufferFromCurrentChunk();
}
+
+ DCHECK_EQ(current_.pos.chars - position, buffer_end_ - buffer_cursor_);
+ return buffer_end_ - buffer_cursor_;
}
+// ----------------------------------------------------------------------------
+// Chunks - helper for One- + TwoByteExternalStreamingStream
+namespace {
-void ExternalStreamingStream::FlushCurrent() {
- delete[] current_data_;
- current_data_ = NULL;
- current_data_length_ = 0;
- current_data_offset_ = 0;
- bookmark_data_is_from_current_data_ = false;
-}
+struct Chunk {
+ const uint8_t* data;
+ size_t byte_length;
+ size_t byte_pos;
+};
+typedef std::vector<struct Chunk> Chunks;
-void ExternalStreamingStream::HandleUtf8SplitCharacters(
- size_t* data_in_buffer) {
- // Note the following property of UTF-8 which makes this function possible:
- // Given any byte, we can always read its local environment (in both
- // directions) to find out the (possibly multi-byte) character it belongs
- // to. Single byte characters are of the form 0b0XXXXXXX. The first byte of a
- // multi-byte character is of the form 0b110XXXXX, 0b1110XXXX or
- // 0b11110XXX. The continuation bytes are of the form 0b10XXXXXX.
-
- // First check if we have leftover data from the last chunk.
- unibrow::uchar c;
- if (utf8_split_char_buffer_length_ > 0) {
- // Move the bytes which are part of the split character (which started in
- // the previous chunk) into utf8_split_char_buffer_. Note that the
- // continuation bytes are of the form 0b10XXXXXX, thus c >> 6 == 2.
- while (current_data_offset_ < current_data_length_ &&
- utf8_split_char_buffer_length_ < 4 &&
- (c = current_data_[current_data_offset_]) >> 6 == 2) {
- utf8_split_char_buffer_[utf8_split_char_buffer_length_] = c;
- ++utf8_split_char_buffer_length_;
- ++current_data_offset_;
- }
+void DeleteChunks(Chunks& chunks) {
+ for (size_t i = 0; i < chunks.size(); i++) delete[] chunks[i].data;
+}
- // Convert the data in utf8_split_char_buffer_.
- size_t new_offset = 0;
- size_t new_chars_in_buffer =
- CopyCharsHelper(buffer_ + *data_in_buffer,
- kBufferSize - *data_in_buffer, utf8_split_char_buffer_,
- &new_offset, utf8_split_char_buffer_length_, encoding_);
- *data_in_buffer += new_chars_in_buffer;
- // Make sure we used all the data.
- DCHECK(new_offset == utf8_split_char_buffer_length_);
- DCHECK(*data_in_buffer <= kBufferSize);
-
- utf8_split_char_buffer_length_ = 0;
+// Return the chunk index for the chunk containing position.
+// If position is behind the end of the stream, the index of the last,
+// zero-length chunk is returned.
+size_t FindChunk(Chunks& chunks, ScriptCompiler::ExternalSourceStream* source_,
+ size_t position) {
+ size_t end_pos =
+ chunks.empty() ? 0 : (chunks.back().byte_pos + chunks.back().byte_length);
+
+ // Get more data if needed. We usually won't enter the loop body.
+ bool out_of_data = !chunks.empty() && chunks.back().byte_length == 0;
+ while (!out_of_data && end_pos <= position + 1) {
+ const uint8_t* chunk = nullptr;
+ size_t len = source_->GetMoreData(&chunk);
+
+ chunks.push_back({chunk, len, end_pos});
+ end_pos += len;
+ out_of_data = (len == 0);
}
- // Move bytes which are part of an incomplete character from the end of the
- // current chunk to utf8_split_char_buffer_. They will be converted when the
- // next data chunk arrives. Note that all valid UTF-8 characters are at most 4
- // bytes long, but if the data is invalid, we can have character values bigger
- // than unibrow::Utf8::kMaxOneByteChar for more than 4 consecutive bytes.
- while (current_data_length_ > current_data_offset_ &&
- (c = current_data_[current_data_length_ - 1]) >
- unibrow::Utf8::kMaxOneByteChar &&
- utf8_split_char_buffer_length_ < 4) {
- --current_data_length_;
- ++utf8_split_char_buffer_length_;
- if (c >= (3 << 6)) {
- // 3 << 6 = 0b11000000; this is the first byte of the multi-byte
- // character. No need to copy the previous characters into the conversion
- // buffer (even if they're multi-byte).
- break;
- }
+ // Here, we should always have at least one chunk, and we either have the
+ // chunk we were looking for, or we're out of data. Also, out_of_data and
+ // end_pos are current (and designate whether we have exhausted the stream,
+ // and the length of data received so far, respectively).
+ DCHECK(!chunks.empty());
+ DCHECK_EQ(end_pos, chunks.back().byte_pos + chunks.back().byte_length);
+ DCHECK_EQ(out_of_data, chunks.back().byte_length == 0);
+ DCHECK(position < end_pos || out_of_data);
+
+ // Edge case: position is behind the end of stream: Return the last (length 0)
+ // chunk to indicate the end of the stream.
+ if (position >= end_pos) {
+ DCHECK(out_of_data);
+ return chunks.size() - 1;
}
- CHECK(utf8_split_char_buffer_length_ <= 4);
- for (size_t i = 0; i < utf8_split_char_buffer_length_; ++i) {
- utf8_split_char_buffer_[i] = current_data_[current_data_length_ + i];
+
+ // We almost always 'stream', meaning we want data from the last chunk, so
+ // let's look at chunks back-to-front.
+ size_t chunk_no = chunks.size() - 1;
+ while (chunks[chunk_no].byte_pos > position) {
+ DCHECK_NE(chunk_no, 0);
+ chunk_no--;
}
+ DCHECK_LE(chunks[chunk_no].byte_pos, position);
+ DCHECK_LT(position, chunks[chunk_no].byte_pos + chunks[chunk_no].byte_length);
+ return chunk_no;
}
+} // anonymous namespace
// ----------------------------------------------------------------------------
-// ExternalTwoByteStringUtf16CharacterStream
-
-ExternalTwoByteStringUtf16CharacterStream::
- ~ExternalTwoByteStringUtf16CharacterStream() { }
-
-ExternalTwoByteStringUtf16CharacterStream::
- ExternalTwoByteStringUtf16CharacterStream(
- Handle<ExternalTwoByteString> data, int start_position,
- int end_position)
- : raw_data_(data->GetTwoByteData(start_position)), bookmark_(kNoBookmark) {
- buffer_cursor_ = raw_data_,
- buffer_end_ = raw_data_ + (end_position - start_position);
- pos_ = start_position;
+// OneByteExternalStreamingStream
+//
+// A stream of latin-1 encoded, chunked data.
+
+class OneByteExternalStreamingStream : public BufferedUtf16CharacterStream {
+ public:
+ explicit OneByteExternalStreamingStream(
+ ScriptCompiler::ExternalSourceStream* source)
+ : source_(source) {}
+ ~OneByteExternalStreamingStream() override { DeleteChunks(chunks_); }
+
+ protected:
+ size_t FillBuffer(size_t position) override;
+
+ private:
+ Chunks chunks_;
+ ScriptCompiler::ExternalSourceStream* source_;
+};
+
+size_t OneByteExternalStreamingStream::FillBuffer(size_t position) {
+ const Chunk& chunk = chunks_[FindChunk(chunks_, source_, position)];
+ if (chunk.byte_length == 0) return 0;
+
+ size_t start_pos = position - chunk.byte_pos;
+ size_t len = i::Min(kBufferSize, chunk.byte_length - start_pos);
+ i::CopyCharsUnsigned(buffer_, chunk.data + start_pos, len);
+ return len;
}
-
-bool ExternalTwoByteStringUtf16CharacterStream::SetBookmark() {
- bookmark_ = pos_;
- return true;
+// ----------------------------------------------------------------------------
+// TwoByteExternalStreamingStream
+//
+// A stream of ucs-2 data, delivered in chunks. Chunks may be 'cut' into the
+// middle of characters (or even contain only one byte), which adds a bit
+// of complexity. This stream avoid all data copying, except for characters
+// that cross chunk boundaries.
+
+class TwoByteExternalStreamingStream : public Utf16CharacterStream {
+ public:
+ explicit TwoByteExternalStreamingStream(
+ ScriptCompiler::ExternalSourceStream* source);
+ ~TwoByteExternalStreamingStream() override;
+
+ protected:
+ bool ReadBlock() override;
+
+ Chunks chunks_;
+ ScriptCompiler::ExternalSourceStream* source_;
+ uc16 one_char_buffer_;
+};
+
+TwoByteExternalStreamingStream::TwoByteExternalStreamingStream(
+ ScriptCompiler::ExternalSourceStream* source)
+ : Utf16CharacterStream(&one_char_buffer_, &one_char_buffer_,
+ &one_char_buffer_, 0),
+ source_(source),
+ one_char_buffer_(0) {}
+
+TwoByteExternalStreamingStream::~TwoByteExternalStreamingStream() {
+ DeleteChunks(chunks_);
}
+bool TwoByteExternalStreamingStream::ReadBlock() {
+ size_t position = pos();
-void ExternalTwoByteStringUtf16CharacterStream::ResetToBookmark() {
- DCHECK(bookmark_ != kNoBookmark);
- pos_ = bookmark_;
- buffer_cursor_ = raw_data_ + bookmark_;
-}
+ // We'll search for the 2nd byte of our character, to make sure we
+ // have enough data for at least one character.
+ size_t chunk_no = FindChunk(chunks_, source_, 2 * position + 1);
-// ----------------------------------------------------------------------------
-// ExternalOneByteStringUtf16CharacterStream
+ // Out of data? Return 0.
+ if (chunks_[chunk_no].byte_length == 0) {
+ buffer_cursor_ = buffer_start_;
+ buffer_end_ = buffer_start_;
+ return false;
+ }
-ExternalOneByteStringUtf16CharacterStream::
- ~ExternalOneByteStringUtf16CharacterStream() {}
+ Chunk& current = chunks_[chunk_no];
+
+ // Annoying edge case: Chunks may not be 2-byte aligned, meaning that a
+ // character may be split between the previous and the current chunk.
+ // If we find such a lonely byte at the beginning of the chunk, we'll use
+ // one_char_buffer_ to hold the full character.
+ bool lonely_byte = (chunks_[chunk_no].byte_pos == (2 * position + 1));
+ if (lonely_byte) {
+ DCHECK_NE(chunk_no, 0);
+ Chunk& previous_chunk = chunks_[chunk_no - 1];
+#ifdef V8_TARGET_BIG_ENDIAN
+ uc16 character = current.data[0] |
+ previous_chunk.data[previous_chunk.byte_length - 1] << 8;
+#else
+ uc16 character = previous_chunk.data[previous_chunk.byte_length - 1] |
+ current.data[0] << 8;
+#endif
+
+ one_char_buffer_ = character;
+ buffer_pos_ = position;
+ buffer_start_ = &one_char_buffer_;
+ buffer_cursor_ = &one_char_buffer_;
+ buffer_end_ = &one_char_buffer_ + 1;
+ return true;
+ }
-ExternalOneByteStringUtf16CharacterStream::
- ExternalOneByteStringUtf16CharacterStream(
- Handle<ExternalOneByteString> data, int start_position,
- int end_position)
- : raw_data_(data->GetChars()),
- length_(end_position),
- bookmark_(kNoBookmark) {
- DCHECK(end_position >= start_position);
- pos_ = start_position;
+ // Common case: character is in current chunk.
+ DCHECK_LE(current.byte_pos, 2 * position);
+ DCHECK_LT(2 * position + 1, current.byte_pos + current.byte_length);
+
+ // Determine # of full ucs-2 chars in stream, and whether we started on an odd
+ // byte boundary.
+ bool odd_start = (current.byte_pos % 2) == 1;
+ size_t number_chars = (current.byte_length - odd_start) / 2;
+
+ // Point the buffer_*_ members into the current chunk and set buffer_cursor_
+ // to point to position. Be careful when converting the byte positions (in
+ // Chunk) to the ucs-2 character positions (in buffer_*_ members).
+ buffer_start_ = reinterpret_cast<const uint16_t*>(current.data + odd_start);
+ buffer_end_ = buffer_start_ + number_chars;
+ buffer_pos_ = (current.byte_pos + odd_start) / 2;
+ buffer_cursor_ = buffer_start_ + (position - buffer_pos_);
+ DCHECK_EQ(position, pos());
+ return true;
}
-ExternalOneByteStringUtf16CharacterStream::
- ExternalOneByteStringUtf16CharacterStream(const char* data, size_t length)
- : raw_data_(reinterpret_cast<const uint8_t*>(data)),
- length_(length),
- bookmark_(kNoBookmark) {}
+// ----------------------------------------------------------------------------
+// ScannerStream: Create stream instances.
-ExternalOneByteStringUtf16CharacterStream::
- ExternalOneByteStringUtf16CharacterStream(const char* data)
- : ExternalOneByteStringUtf16CharacterStream(data, strlen(data)) {}
+Utf16CharacterStream* ScannerStream::For(Handle<String> data) {
+ return ScannerStream::For(data, 0, data->length());
+}
-bool ExternalOneByteStringUtf16CharacterStream::SetBookmark() {
- bookmark_ = pos_;
- return true;
+Utf16CharacterStream* ScannerStream::For(Handle<String> data, int start_pos,
+ int end_pos) {
+ DCHECK(start_pos >= 0);
+ DCHECK(end_pos <= data->length());
+ if (data->IsExternalOneByteString()) {
+ return new ExternalOneByteStringUtf16CharacterStream(
+ Handle<ExternalOneByteString>::cast(data), start_pos, end_pos);
+ } else if (data->IsExternalTwoByteString()) {
+ return new ExternalTwoByteStringUtf16CharacterStream(
+ Handle<ExternalTwoByteString>::cast(data), start_pos, end_pos);
+ } else {
+ // TODO(vogelheim): Maybe call data.Flatten() first?
+ return new GenericStringUtf16CharacterStream(data, start_pos, end_pos);
+ }
}
-void ExternalOneByteStringUtf16CharacterStream::ResetToBookmark() {
- DCHECK(bookmark_ != kNoBookmark);
- pos_ = bookmark_;
- buffer_cursor_ = buffer_;
- buffer_end_ = buffer_ + FillBuffer(pos_);
+std::unique_ptr<Utf16CharacterStream> ScannerStream::ForTesting(
+ const char* data) {
+ return ScannerStream::ForTesting(data, strlen(data));
}
-size_t ExternalOneByteStringUtf16CharacterStream::BufferSeekForward(
- size_t delta) {
- size_t old_pos = pos_;
- pos_ = Min(pos_ + delta, length_);
- ReadBlock();
- return pos_ - old_pos;
+std::unique_ptr<Utf16CharacterStream> ScannerStream::ForTesting(
+ const char* data, size_t length) {
+ return std::unique_ptr<Utf16CharacterStream>(
+ new ExternalOneByteStringUtf16CharacterStream(data, length));
}
-size_t ExternalOneByteStringUtf16CharacterStream::FillBuffer(size_t from_pos) {
- if (from_pos >= length_) return 0;
- size_t length = Min(kBufferSize, length_ - from_pos);
- for (size_t i = 0; i < length; ++i) {
- buffer_[i] = static_cast<uc16>(raw_data_[from_pos + i]);
+Utf16CharacterStream* ScannerStream::For(
+ ScriptCompiler::ExternalSourceStream* source_stream,
+ v8::ScriptCompiler::StreamedSource::Encoding encoding) {
+ switch (encoding) {
+ case v8::ScriptCompiler::StreamedSource::TWO_BYTE:
+ return new TwoByteExternalStreamingStream(source_stream);
+ case v8::ScriptCompiler::StreamedSource::ONE_BYTE:
+ return new OneByteExternalStreamingStream(source_stream);
+ case v8::ScriptCompiler::StreamedSource::UTF8:
+ return new Utf8ExternalStreamingStream(source_stream);
}
- return length;
+ UNREACHABLE();
+ return nullptr;
}
} // namespace internal
diff --git a/deps/v8/src/parsing/scanner-character-streams.h b/deps/v8/src/parsing/scanner-character-streams.h
index 94d8284f14..ac81613ab7 100644
--- a/deps/v8/src/parsing/scanner-character-streams.h
+++ b/deps/v8/src/parsing/scanner-character-streams.h
@@ -5,187 +5,27 @@
#ifndef V8_PARSING_SCANNER_CHARACTER_STREAMS_H_
#define V8_PARSING_SCANNER_CHARACTER_STREAMS_H_
+#include "include/v8.h" // for v8::ScriptCompiler
#include "src/handles.h"
-#include "src/parsing/scanner.h"
-#include "src/vector.h"
namespace v8 {
namespace internal {
-// Forward declarations.
-class ExternalTwoByteString;
-class ExternalOneByteString;
+class Utf16CharacterStream;
-// A buffered character stream based on a random access character
-// source (ReadBlock can be called with pos_ pointing to any position,
-// even positions before the current).
-class BufferedUtf16CharacterStream: public Utf16CharacterStream {
+class ScannerStream {
public:
- BufferedUtf16CharacterStream();
- ~BufferedUtf16CharacterStream() override;
-
- void PushBack(uc32 character) override;
-
- protected:
- static const size_t kBufferSize = 512;
- static const size_t kPushBackStepSize = 16;
-
- size_t SlowSeekForward(size_t delta) override;
- bool ReadBlock() override;
- virtual void SlowPushBack(uc16 character);
-
- virtual size_t BufferSeekForward(size_t delta) = 0;
- virtual size_t FillBuffer(size_t position) = 0;
-
- const uc16* pushback_limit_;
- uc16 buffer_[kBufferSize];
-};
-
-
-// Generic string stream.
-class GenericStringUtf16CharacterStream: public BufferedUtf16CharacterStream {
- public:
- GenericStringUtf16CharacterStream(Handle<String> data, size_t start_position,
- size_t end_position);
- ~GenericStringUtf16CharacterStream() override;
-
- bool SetBookmark() override;
- void ResetToBookmark() override;
-
- protected:
- static const size_t kNoBookmark = -1;
-
- size_t BufferSeekForward(size_t delta) override;
- size_t FillBuffer(size_t position) override;
-
- Handle<String> string_;
- size_t length_;
- size_t bookmark_;
-};
-
-
-// ExternalStreamingStream is a wrapper around an ExternalSourceStream (see
-// include/v8.h) subclass implemented by the embedder.
-class ExternalStreamingStream : public BufferedUtf16CharacterStream {
- public:
- ExternalStreamingStream(ScriptCompiler::ExternalSourceStream* source_stream,
- v8::ScriptCompiler::StreamedSource::Encoding encoding)
- : source_stream_(source_stream),
- encoding_(encoding),
- current_data_(NULL),
- current_data_offset_(0),
- current_data_length_(0),
- utf8_split_char_buffer_length_(0),
- bookmark_(0),
- bookmark_data_is_from_current_data_(false),
- bookmark_data_offset_(0),
- bookmark_utf8_split_char_buffer_length_(0) {}
-
- ~ExternalStreamingStream() override {
- delete[] current_data_;
- bookmark_buffer_.Dispose();
- bookmark_data_.Dispose();
- }
-
- size_t BufferSeekForward(size_t delta) override {
- // We never need to seek forward when streaming scripts. We only seek
- // forward when we want to parse a function whose location we already know,
- // and when streaming, we don't know the locations of anything we haven't
- // seen yet.
- UNREACHABLE();
- return 0;
- }
-
- size_t FillBuffer(size_t position) override;
-
- bool SetBookmark() override;
- void ResetToBookmark() override;
-
- private:
- void HandleUtf8SplitCharacters(size_t* data_in_buffer);
- void FlushCurrent();
-
- ScriptCompiler::ExternalSourceStream* source_stream_;
- v8::ScriptCompiler::StreamedSource::Encoding encoding_;
- const uint8_t* current_data_;
- size_t current_data_offset_;
- size_t current_data_length_;
- // For converting UTF-8 characters which are split across two data chunks.
- uint8_t utf8_split_char_buffer_[4];
- size_t utf8_split_char_buffer_length_;
-
- // Bookmark support. See comments in ExternalStreamingStream::SetBookmark
- // for additional details.
- size_t bookmark_;
- Vector<uint16_t> bookmark_buffer_;
- Vector<uint8_t> bookmark_data_;
- bool bookmark_data_is_from_current_data_;
- size_t bookmark_data_offset_;
- uint8_t bookmark_utf8_split_char_buffer_[4];
- size_t bookmark_utf8_split_char_buffer_length_;
-};
-
-
-// UTF16 buffer to read characters from an external string.
-class ExternalTwoByteStringUtf16CharacterStream: public Utf16CharacterStream {
- public:
- ExternalTwoByteStringUtf16CharacterStream(Handle<ExternalTwoByteString> data,
- int start_position,
- int end_position);
- ~ExternalTwoByteStringUtf16CharacterStream() override;
-
- void PushBack(uc32 character) override {
- DCHECK(buffer_cursor_ > raw_data_);
- pos_--;
- if (character != kEndOfInput) {
- buffer_cursor_--;
- }
- }
-
- bool SetBookmark() override;
- void ResetToBookmark() override;
-
- private:
- size_t SlowSeekForward(size_t delta) override {
- // Fast case always handles seeking.
- return 0;
- }
- bool ReadBlock() override {
- // Entire string is read at start.
- return false;
- }
- const uc16* raw_data_; // Pointer to the actual array of characters.
-
- static const size_t kNoBookmark = -1;
-
- size_t bookmark_;
-};
-
-// UTF16 buffer to read characters from an external latin1 string.
-class ExternalOneByteStringUtf16CharacterStream
- : public BufferedUtf16CharacterStream {
- public:
- ExternalOneByteStringUtf16CharacterStream(Handle<ExternalOneByteString> data,
- int start_position,
- int end_position);
- ~ExternalOneByteStringUtf16CharacterStream() override;
+ static Utf16CharacterStream* For(Handle<String> data);
+ static Utf16CharacterStream* For(Handle<String> data, int start_pos,
+ int end_pos);
+ static Utf16CharacterStream* For(
+ ScriptCompiler::ExternalSourceStream* source_stream,
+ ScriptCompiler::StreamedSource::Encoding encoding);
// For testing:
- explicit ExternalOneByteStringUtf16CharacterStream(const char* data);
- ExternalOneByteStringUtf16CharacterStream(const char* data, size_t length);
-
- bool SetBookmark() override;
- void ResetToBookmark() override;
-
- private:
- static const size_t kNoBookmark = -1;
-
- size_t BufferSeekForward(size_t delta) override;
- size_t FillBuffer(size_t position) override;
-
- const uint8_t* raw_data_; // Pointer to the actual array of characters.
- size_t length_;
- size_t bookmark_;
+ static std::unique_ptr<Utf16CharacterStream> ForTesting(const char* data);
+ static std::unique_ptr<Utf16CharacterStream> ForTesting(const char* data,
+ size_t length);
};
} // namespace internal
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index 06ead2e827..e41b56fd4b 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -14,7 +14,7 @@
#include "src/char-predicates-inl.h"
#include "src/conversions-inl.h"
#include "src/list-inl.h"
-#include "src/parsing/parser.h"
+#include "src/parsing/duplicate-finder.h" // For Scanner::FindSymbol
namespace v8 {
namespace internal {
@@ -26,25 +26,60 @@ Handle<String> Scanner::LiteralBuffer::Internalize(Isolate* isolate) const {
return isolate->factory()->InternalizeTwoByteString(two_byte_literal());
}
+// ----------------------------------------------------------------------------
+// Scanner::BookmarkScope
+
+const size_t Scanner::BookmarkScope::kBookmarkAtFirstPos =
+ std::numeric_limits<size_t>::max() - 2;
+const size_t Scanner::BookmarkScope::kNoBookmark =
+ std::numeric_limits<size_t>::max() - 1;
+const size_t Scanner::BookmarkScope::kBookmarkWasApplied =
+ std::numeric_limits<size_t>::max();
+
+void Scanner::BookmarkScope::Set() {
+ DCHECK_EQ(bookmark_, kNoBookmark);
+ DCHECK_EQ(scanner_->next_next_.token, Token::UNINITIALIZED);
+
+ // The first token is a bit special, since current_ will still be
+ // uninitialized. In this case, store kBookmarkAtFirstPos and special-case it
+ // when
+ // applying the bookmark.
+ DCHECK_IMPLIES(
+ scanner_->current_.token == Token::UNINITIALIZED,
+ scanner_->current_.location.beg_pos == scanner_->next_.location.beg_pos);
+ bookmark_ = (scanner_->current_.token == Token::UNINITIALIZED)
+ ? kBookmarkAtFirstPos
+ : scanner_->location().beg_pos;
+}
+
+void Scanner::BookmarkScope::Apply() {
+ DCHECK(HasBeenSet()); // Caller hasn't called SetBookmark.
+ if (bookmark_ == kBookmarkAtFirstPos) {
+ scanner_->SeekNext(0);
+ } else {
+ scanner_->SeekNext(bookmark_);
+ scanner_->Next();
+ DCHECK_EQ(scanner_->location().beg_pos, bookmark_);
+ }
+ bookmark_ = kBookmarkWasApplied;
+}
-// Default implementation for streams that do not support bookmarks.
-bool Utf16CharacterStream::SetBookmark() { return false; }
-void Utf16CharacterStream::ResetToBookmark() { UNREACHABLE(); }
+bool Scanner::BookmarkScope::HasBeenSet() {
+ return bookmark_ != kNoBookmark && bookmark_ != kBookmarkWasApplied;
+}
+bool Scanner::BookmarkScope::HasBeenApplied() {
+ return bookmark_ == kBookmarkWasApplied;
+}
// ----------------------------------------------------------------------------
// Scanner
Scanner::Scanner(UnicodeCache* unicode_cache)
: unicode_cache_(unicode_cache),
- bookmark_c0_(kNoBookmark),
octal_pos_(Location::invalid()),
decimal_with_leading_zero_pos_(Location::invalid()),
found_html_comment_(false) {
- bookmark_current_.literal_chars = &bookmark_current_literal_;
- bookmark_current_.raw_literal_chars = &bookmark_current_raw_literal_;
- bookmark_next_.literal_chars = &bookmark_next_literal_;
- bookmark_next_.raw_literal_chars = &bookmark_next_raw_literal_;
}
@@ -305,14 +340,14 @@ static inline bool IsLittleEndianByteOrderMark(uc32 c) {
return c == 0xFFFE;
}
-
bool Scanner::SkipWhiteSpace() {
int start_position = source_pos();
while (true) {
while (true) {
- // The unicode cache accepts unsigned inputs.
- if (c0_ < 0) break;
+ // Don't skip behind the end of input.
+ if (c0_ == kEndOfInput) break;
+
// Advance as long as character is a WhiteSpace or LineTerminator.
// Remember if the latter is the case.
if (unicode_cache_->IsLineTerminator(c0_)) {
@@ -328,25 +363,27 @@ bool Scanner::SkipWhiteSpace() {
// line (with only whitespace in front of it), we treat the rest
// of the line as a comment. This is in line with the way
// SpiderMonkey handles it.
- if (c0_ == '-' && has_line_terminator_before_next_) {
- Advance();
- if (c0_ == '-') {
- Advance();
- if (c0_ == '>') {
- // Treat the rest of the line as a comment.
- SkipSingleLineComment();
- // Continue skipping white space after the comment.
- continue;
- }
- PushBack('-'); // undo Advance()
- }
+ if (c0_ != '-' || !has_line_terminator_before_next_) break;
+
+ Advance();
+ if (c0_ != '-') {
PushBack('-'); // undo Advance()
+ break;
}
- // Return whether or not we skipped any characters.
- return source_pos() != start_position;
+
+ Advance();
+ if (c0_ != '>') {
+ PushBack2('-', '-'); // undo 2x Advance();
+ break;
+ }
+
+ // Treat the rest of the line as a comment.
+ SkipSingleLineComment();
}
-}
+ // Return whether or not we skipped any characters.
+ return source_pos() != start_position;
+}
Token::Value Scanner::SkipSingleLineComment() {
Advance();
@@ -356,7 +393,7 @@ Token::Value Scanner::SkipSingleLineComment() {
// separately by the lexical grammar and becomes part of the
// stream of input elements for the syntactic grammar (see
// ECMA-262, section 7.4).
- while (c0_ >= 0 && !unicode_cache_->IsLineTerminator(c0_)) {
+ while (c0_ != kEndOfInput && !unicode_cache_->IsLineTerminator(c0_)) {
Advance();
}
@@ -366,7 +403,7 @@ Token::Value Scanner::SkipSingleLineComment() {
Token::Value Scanner::SkipSourceURLComment() {
TryToParseSourceURLComment();
- while (c0_ >= 0 && !unicode_cache_->IsLineTerminator(c0_)) {
+ while (c0_ != kEndOfInput && !unicode_cache_->IsLineTerminator(c0_)) {
Advance();
}
@@ -377,11 +414,11 @@ Token::Value Scanner::SkipSourceURLComment() {
void Scanner::TryToParseSourceURLComment() {
// Magic comments are of the form: //[#@]\s<name>=\s*<value>\s*.* and this
// function will just return if it cannot parse a magic comment.
- if (c0_ < 0 || !unicode_cache_->IsWhiteSpace(c0_)) return;
+ if (c0_ == kEndOfInput || !unicode_cache_->IsWhiteSpace(c0_)) return;
Advance();
LiteralBuffer name;
- while (c0_ >= 0 && !unicode_cache_->IsWhiteSpaceOrLineTerminator(c0_) &&
- c0_ != '=') {
+ while (c0_ != kEndOfInput &&
+ !unicode_cache_->IsWhiteSpaceOrLineTerminator(c0_) && c0_ != '=') {
name.AddChar(c0_);
Advance();
}
@@ -399,10 +436,10 @@ void Scanner::TryToParseSourceURLComment() {
return;
Advance();
value->Reset();
- while (c0_ >= 0 && unicode_cache_->IsWhiteSpace(c0_)) {
+ while (c0_ != kEndOfInput && unicode_cache_->IsWhiteSpace(c0_)) {
Advance();
}
- while (c0_ >= 0 && !unicode_cache_->IsLineTerminator(c0_)) {
+ while (c0_ != kEndOfInput && !unicode_cache_->IsLineTerminator(c0_)) {
// Disallowed characters.
if (c0_ == '"' || c0_ == '\'') {
value->Reset();
@@ -415,7 +452,7 @@ void Scanner::TryToParseSourceURLComment() {
Advance();
}
// Allow whitespace at the end.
- while (c0_ >= 0 && !unicode_cache_->IsLineTerminator(c0_)) {
+ while (c0_ != kEndOfInput && !unicode_cache_->IsLineTerminator(c0_)) {
if (!unicode_cache_->IsWhiteSpace(c0_)) {
value->Reset();
break;
@@ -429,10 +466,10 @@ Token::Value Scanner::SkipMultiLineComment() {
DCHECK(c0_ == '*');
Advance();
- while (c0_ >= 0) {
+ while (c0_ != kEndOfInput) {
uc32 ch = c0_;
Advance();
- if (c0_ >= 0 && unicode_cache_->IsLineTerminator(ch)) {
+ if (c0_ != kEndOfInput && unicode_cache_->IsLineTerminator(ch)) {
// Following ECMA-262, section 7.4, a comment containing
// a newline will make the comment count as a line-terminator.
has_multiline_comment_before_next_ = true;
@@ -450,24 +487,24 @@ Token::Value Scanner::SkipMultiLineComment() {
return Token::ILLEGAL;
}
-
Token::Value Scanner::ScanHtmlComment() {
// Check for <!-- comments.
DCHECK(c0_ == '!');
Advance();
- if (c0_ == '-') {
- Advance();
- if (c0_ == '-') {
- found_html_comment_ = true;
- return SkipSingleLineComment();
- }
- PushBack('-'); // undo Advance()
+ if (c0_ != '-') {
+ PushBack('!'); // undo Advance()
+ return Token::LT;
}
- PushBack('!'); // undo Advance()
- DCHECK(c0_ == '!');
- return Token::LT;
-}
+ Advance();
+ if (c0_ != '-') {
+ PushBack2('-', '!'); // undo 2x Advance()
+ return Token::LT;
+ }
+
+ found_html_comment_ = true;
+ return SkipSingleLineComment();
+}
void Scanner::Scan() {
next_.literal_chars = NULL;
@@ -716,7 +753,7 @@ void Scanner::Scan() {
break;
default:
- if (c0_ < 0) {
+ if (c0_ == kEndOfInput) {
token = Token::EOS;
} else if (unicode_cache_->IsIdentifierStart(c0_)) {
token = ScanIdentifierOrKeyword();
@@ -790,7 +827,7 @@ void Scanner::SeekForward(int pos) {
// Positions inside the lookahead token aren't supported.
DCHECK(pos >= current_pos);
if (pos != current_pos) {
- source_->SeekForward(pos - source_->pos());
+ source_->Seek(pos);
Advance();
// This function is only called to seek to the location
// of the end of a function (at the "}" token). It doesn't matter
@@ -808,7 +845,8 @@ bool Scanner::ScanEscape() {
Advance<capture_raw>();
// Skip escaped newlines.
- if (!in_template_literal && c0_ >= 0 && unicode_cache_->IsLineTerminator(c)) {
+ if (!in_template_literal && c0_ != kEndOfInput &&
+ unicode_cache_->IsLineTerminator(c)) {
// Allow CR+LF newlines in multiline string literals.
if (IsCarriageReturn(c) && IsLineFeed(c0_)) Advance<capture_raw>();
// Allow LF+CR newlines in multiline string literals.
@@ -894,7 +932,7 @@ Token::Value Scanner::ScanString() {
HandleLeadSurrogate();
break;
}
- if (c0_ < 0 || c0_ == '\n' || c0_ == '\r') return Token::ILLEGAL;
+ if (c0_ == kEndOfInput || c0_ == '\n' || c0_ == '\r') return Token::ILLEGAL;
if (c0_ == quote) {
literal.Complete();
Advance<false, false>();
@@ -906,12 +944,12 @@ Token::Value Scanner::ScanString() {
AddLiteralChar(c);
}
- while (c0_ != quote && c0_ >= 0
- && !unicode_cache_->IsLineTerminator(c0_)) {
+ while (c0_ != quote && c0_ != kEndOfInput &&
+ !unicode_cache_->IsLineTerminator(c0_)) {
uc32 c = c0_;
Advance();
if (c == '\\') {
- if (c0_ < 0 || !ScanEscape<false, false>()) {
+ if (c0_ == kEndOfInput || !ScanEscape<false, false>()) {
return Token::ILLEGAL;
}
} else {
@@ -957,7 +995,7 @@ Token::Value Scanner::ScanTemplateSpan() {
ReduceRawLiteralLength(2);
break;
} else if (c == '\\') {
- if (c0_ > 0 && unicode_cache_->IsLineTerminator(c0_)) {
+ if (c0_ != kEndOfInput && unicode_cache_->IsLineTerminator(c0_)) {
// The TV of LineContinuation :: \ LineTerminatorSequence is the empty
// code unit sequence.
uc32 lastChar = c0_;
@@ -1155,7 +1193,7 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
// section 7.8.3, page 17 (note that we read only one decimal digit
// if the value is 0).
if (IsDecimalDigit(c0_) ||
- (c0_ >= 0 && unicode_cache_->IsIdentifierStart(c0_)))
+ (c0_ != kEndOfInput && unicode_cache_->IsIdentifierStart(c0_)))
return Token::ILLEGAL;
literal.Complete();
@@ -1382,7 +1420,7 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
}
// Scan the rest of the identifier characters.
- while (c0_ >= 0 && unicode_cache_->IsIdentifierPart(c0_)) {
+ while (c0_ != kEndOfInput && unicode_cache_->IsIdentifierPart(c0_)) {
if (c0_ != '\\') {
uc32 next_char = c0_;
Advance();
@@ -1408,7 +1446,7 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
Token::Value Scanner::ScanIdentifierSuffix(LiteralScope* literal,
bool escaped) {
// Scan the rest of the identifier characters.
- while (c0_ >= 0 && unicode_cache_->IsIdentifierPart(c0_)) {
+ while (c0_ != kEndOfInput && unicode_cache_->IsIdentifierPart(c0_)) {
if (c0_ == '\\') {
uc32 c = ScanIdentifierUnicodeEscape();
escaped = true;
@@ -1465,10 +1503,12 @@ bool Scanner::ScanRegExpPattern() {
}
while (c0_ != '/' || in_character_class) {
- if (c0_ < 0 || unicode_cache_->IsLineTerminator(c0_)) return false;
+ if (c0_ == kEndOfInput || unicode_cache_->IsLineTerminator(c0_))
+ return false;
if (c0_ == '\\') { // Escape sequence.
AddLiteralCharAdvance();
- if (c0_ < 0 || unicode_cache_->IsLineTerminator(c0_)) return false;
+ if (c0_ == kEndOfInput || unicode_cache_->IsLineTerminator(c0_))
+ return false;
AddLiteralCharAdvance();
// If the escape allows more characters, i.e., \x??, \u????, or \c?,
// only "safe" characters are allowed (letters, digits, underscore),
@@ -1499,7 +1539,7 @@ Maybe<RegExp::Flags> Scanner::ScanRegExpFlags() {
// Scan regular expression flags.
int flags = 0;
- while (c0_ >= 0 && unicode_cache_->IsIdentifierPart(c0_)) {
+ while (c0_ != kEndOfInput && unicode_cache_->IsIdentifierPart(c0_)) {
RegExp::Flags flag = RegExp::kNone;
switch (c0_) {
case 'g':
@@ -1574,202 +1614,31 @@ bool Scanner::ContainsDot() {
int Scanner::FindSymbol(DuplicateFinder* finder, int value) {
+ // TODO(vogelheim): Move this logic into the calling class; this can be fully
+ // implemented using the public interface.
if (is_literal_one_byte()) {
return finder->AddOneByteSymbol(literal_one_byte_string(), value);
}
return finder->AddTwoByteSymbol(literal_two_byte_string(), value);
}
-
-bool Scanner::SetBookmark() {
- if (c0_ != kNoBookmark && bookmark_c0_ == kNoBookmark &&
- next_next_.token == Token::UNINITIALIZED && source_->SetBookmark()) {
- bookmark_c0_ = c0_;
- CopyTokenDesc(&bookmark_current_, &current_);
- CopyTokenDesc(&bookmark_next_, &next_);
- return true;
- }
- return false;
-}
-
-
-void Scanner::ResetToBookmark() {
- DCHECK(BookmarkHasBeenSet()); // Caller hasn't called SetBookmark.
-
- source_->ResetToBookmark();
- c0_ = bookmark_c0_;
- CopyToNextTokenDesc(&bookmark_current_);
- current_ = next_;
- CopyToNextTokenDesc(&bookmark_next_);
- bookmark_c0_ = kBookmarkWasApplied;
-}
-
-
-bool Scanner::BookmarkHasBeenSet() { return bookmark_c0_ >= 0; }
-
-
-bool Scanner::BookmarkHasBeenReset() {
- return bookmark_c0_ == kBookmarkWasApplied;
-}
-
-
-void Scanner::DropBookmark() { bookmark_c0_ = kNoBookmark; }
-
-void Scanner::CopyToNextTokenDesc(TokenDesc* from) {
- StartLiteral();
- StartRawLiteral();
- CopyTokenDesc(&next_, from);
- if (next_.literal_chars->length() == 0) next_.literal_chars = nullptr;
- if (next_.raw_literal_chars->length() == 0) next_.raw_literal_chars = nullptr;
-}
-
-void Scanner::CopyTokenDesc(TokenDesc* to, TokenDesc* from) {
- DCHECK_NOT_NULL(to);
- DCHECK_NOT_NULL(from);
- to->token = from->token;
- to->location = from->location;
- to->literal_chars->CopyFrom(from->literal_chars);
- to->raw_literal_chars->CopyFrom(from->raw_literal_chars);
-}
-
-
-int DuplicateFinder::AddOneByteSymbol(Vector<const uint8_t> key, int value) {
- return AddSymbol(key, true, value);
-}
-
-
-int DuplicateFinder::AddTwoByteSymbol(Vector<const uint16_t> key, int value) {
- return AddSymbol(Vector<const uint8_t>::cast(key), false, value);
-}
-
-
-int DuplicateFinder::AddSymbol(Vector<const uint8_t> key,
- bool is_one_byte,
- int value) {
- uint32_t hash = Hash(key, is_one_byte);
- byte* encoding = BackupKey(key, is_one_byte);
- base::HashMap::Entry* entry = map_.LookupOrInsert(encoding, hash);
- int old_value = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
- entry->value =
- reinterpret_cast<void*>(static_cast<intptr_t>(value | old_value));
- return old_value;
-}
-
-
-int DuplicateFinder::AddNumber(Vector<const uint8_t> key, int value) {
- DCHECK(key.length() > 0);
- // Quick check for already being in canonical form.
- if (IsNumberCanonical(key)) {
- return AddOneByteSymbol(key, value);
- }
-
- int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY;
- double double_value = StringToDouble(
- unicode_constants_, key, flags, 0.0);
- int length;
- const char* string;
- if (!std::isfinite(double_value)) {
- string = "Infinity";
- length = 8; // strlen("Infinity");
- } else {
- string = DoubleToCString(double_value,
- Vector<char>(number_buffer_, kBufferSize));
- length = StrLength(string);
- }
- return AddSymbol(Vector<const byte>(reinterpret_cast<const byte*>(string),
- length), true, value);
-}
-
-
-bool DuplicateFinder::IsNumberCanonical(Vector<const uint8_t> number) {
- // Test for a safe approximation of number literals that are already
- // in canonical form: max 15 digits, no leading zeroes, except an
- // integer part that is a single zero, and no trailing zeros below
- // the decimal point.
- int pos = 0;
- int length = number.length();
- if (number.length() > 15) return false;
- if (number[pos] == '0') {
- pos++;
- } else {
- while (pos < length &&
- static_cast<unsigned>(number[pos] - '0') <= ('9' - '0')) pos++;
- }
- if (length == pos) return true;
- if (number[pos] != '.') return false;
- pos++;
- bool invalid_last_digit = true;
- while (pos < length) {
- uint8_t digit = number[pos] - '0';
- if (digit > '9' - '0') return false;
- invalid_last_digit = (digit == 0);
- pos++;
- }
- return !invalid_last_digit;
-}
-
-
-uint32_t DuplicateFinder::Hash(Vector<const uint8_t> key, bool is_one_byte) {
- // Primitive hash function, almost identical to the one used
- // for strings (except that it's seeded by the length and representation).
- int length = key.length();
- uint32_t hash = (length << 1) | (is_one_byte ? 1 : 0);
- for (int i = 0; i < length; i++) {
- uint32_t c = key[i];
- hash = (hash + c) * 1025;
- hash ^= (hash >> 6);
- }
- return hash;
-}
-
-
-bool DuplicateFinder::Match(void* first, void* second) {
- // Decode lengths.
- // Length + representation is encoded as base 128, most significant heptet
- // first, with a 8th bit being non-zero while there are more heptets.
- // The value encodes the number of bytes following, and whether the original
- // was Latin1.
- byte* s1 = reinterpret_cast<byte*>(first);
- byte* s2 = reinterpret_cast<byte*>(second);
- uint32_t length_one_byte_field = 0;
- byte c1;
- do {
- c1 = *s1;
- if (c1 != *s2) return false;
- length_one_byte_field = (length_one_byte_field << 7) | (c1 & 0x7f);
- s1++;
- s2++;
- } while ((c1 & 0x80) != 0);
- int length = static_cast<int>(length_one_byte_field >> 1);
- return memcmp(s1, s2, length) == 0;
-}
-
-
-byte* DuplicateFinder::BackupKey(Vector<const uint8_t> bytes,
- bool is_one_byte) {
- uint32_t one_byte_length = (bytes.length() << 1) | (is_one_byte ? 1 : 0);
- backing_store_.StartSequence();
- // Emit one_byte_length as base-128 encoded number, with the 7th bit set
- // on the byte of every heptet except the last, least significant, one.
- if (one_byte_length >= (1 << 7)) {
- if (one_byte_length >= (1 << 14)) {
- if (one_byte_length >= (1 << 21)) {
- if (one_byte_length >= (1 << 28)) {
- backing_store_.Add(
- static_cast<uint8_t>((one_byte_length >> 28) | 0x80));
- }
- backing_store_.Add(
- static_cast<uint8_t>((one_byte_length >> 21) | 0x80u));
- }
- backing_store_.Add(
- static_cast<uint8_t>((one_byte_length >> 14) | 0x80u));
- }
- backing_store_.Add(static_cast<uint8_t>((one_byte_length >> 7) | 0x80u));
- }
- backing_store_.Add(static_cast<uint8_t>(one_byte_length & 0x7f));
-
- backing_store_.AddBlock(bytes);
- return backing_store_.EndSequence().start();
+void Scanner::SeekNext(size_t position) {
+ // Use with care: This cleanly resets most, but not all scanner state.
+ // TODO(vogelheim): Fix this, or at least DCHECK the relevant conditions.
+
+ // To re-scan from a given character position, we need to:
+ // 1, Reset the current_, next_ and next_next_ tokens
+ // (next_ + next_next_ will be overwrittem by Next(),
+ // current_ will remain unchanged, so overwrite it fully.)
+ current_ = {{0, 0}, nullptr, nullptr, 0, Token::UNINITIALIZED};
+ next_.token = Token::UNINITIALIZED;
+ next_next_.token = Token::UNINITIALIZED;
+ // 2, reset the source to the desired position,
+ source_->Seek(position);
+ // 3, re-scan, by scanning the look-ahead char + 1 token (next_).
+ c0_ = source_->Advance();
+ Next();
+ DCHECK_EQ(next_.location.beg_pos, position);
}
} // namespace internal
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index 66c6ce8133..b2b1a8a3f4 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -8,12 +8,9 @@
#define V8_PARSING_SCANNER_H_
#include "src/allocation.h"
-#include "src/base/hashmap.h"
#include "src/base/logging.h"
#include "src/char-predicates.h"
-#include "src/collector.h"
#include "src/globals.h"
-#include "src/list.h"
#include "src/messages.h"
#include "src/parsing/token.h"
#include "src/unicode-decoder.h"
@@ -25,127 +22,127 @@ namespace internal {
class AstRawString;
class AstValueFactory;
+class DuplicateFinder;
+class ExternalOneByteString;
+class ExternalTwoByteString;
class ParserRecorder;
class UnicodeCache;
-
// ---------------------------------------------------------------------
// Buffered stream of UTF-16 code units, using an internal UTF-16 buffer.
// A code unit is a 16 bit value representing either a 16 bit code point
// or one part of a surrogate pair that make a single 21 bit code point.
-
class Utf16CharacterStream {
public:
- Utf16CharacterStream() : pos_(0) { }
+ static const uc32 kEndOfInput = -1;
+
virtual ~Utf16CharacterStream() { }
// Returns and advances past the next UTF-16 code unit in the input
- // stream. If there are no more code units, it returns a negative
- // value.
+ // stream. If there are no more code units it returns kEndOfInput.
inline uc32 Advance() {
- if (buffer_cursor_ < buffer_end_ || ReadBlock()) {
- pos_++;
+ if (V8_LIKELY(buffer_cursor_ < buffer_end_)) {
+ return static_cast<uc32>(*(buffer_cursor_++));
+ } else if (ReadBlock()) {
return static_cast<uc32>(*(buffer_cursor_++));
+ } else {
+ // Note: currently the following increment is necessary to avoid a
+ // parser problem! The scanner treats the final kEndOfInput as
+ // a code unit with a position, and does math relative to that
+ // position.
+ buffer_cursor_++;
+ return kEndOfInput;
}
- // Note: currently the following increment is necessary to avoid a
- // parser problem! The scanner treats the final kEndOfInput as
- // a code unit with a position, and does math relative to that
- // position.
- pos_++;
-
- return kEndOfInput;
- }
-
- // Return the current position in the code unit stream.
- // Starts at zero.
- inline size_t pos() const { return pos_; }
-
- // Skips forward past the next code_unit_count UTF-16 code units
- // in the input, or until the end of input if that comes sooner.
- // Returns the number of code units actually skipped. If less
- // than code_unit_count,
- inline size_t SeekForward(size_t code_unit_count) {
- size_t buffered_chars = buffer_end_ - buffer_cursor_;
- if (code_unit_count <= buffered_chars) {
- buffer_cursor_ += code_unit_count;
- pos_ += code_unit_count;
- return code_unit_count;
+ }
+
+ // Go back one by one character in the input stream.
+ // This undoes the most recent Advance().
+ inline void Back() {
+ // The common case - if the previous character is within
+ // buffer_start_ .. buffer_end_ will be handles locally.
+ // Otherwise, a new block is requested.
+ if (V8_LIKELY(buffer_cursor_ > buffer_start_)) {
+ buffer_cursor_--;
+ } else {
+ ReadBlockAt(pos() - 1);
+ }
+ }
+
+ // Go back one by two characters in the input stream. (This is the same as
+ // calling Back() twice. But Back() may - in some instances - do substantial
+ // work. Back2() guarantees this work will be done only once.)
+ inline void Back2() {
+ if (V8_LIKELY(buffer_cursor_ - 2 >= buffer_start_)) {
+ buffer_cursor_ -= 2;
+ } else {
+ ReadBlockAt(pos() - 2);
}
- return SlowSeekForward(code_unit_count);
}
- // Pushes back the most recently read UTF-16 code unit (or negative
- // value if at end of input), i.e., the value returned by the most recent
- // call to Advance.
- // Must not be used right after calling SeekForward.
- virtual void PushBack(int32_t code_unit) = 0;
+ inline size_t pos() const {
+ return buffer_pos_ + (buffer_cursor_ - buffer_start_);
+ }
- virtual bool SetBookmark();
- virtual void ResetToBookmark();
+ inline void Seek(size_t pos) {
+ if (V8_LIKELY(pos >= buffer_pos_ &&
+ pos < (buffer_pos_ + (buffer_end_ - buffer_start_)))) {
+ buffer_cursor_ = buffer_start_ + (pos - buffer_pos_);
+ } else {
+ ReadBlockAt(pos);
+ }
+ }
protected:
- static const uc32 kEndOfInput = -1;
-
- // Ensures that the buffer_cursor_ points to the code_unit at
- // position pos_ of the input, if possible. If the position
- // is at or after the end of the input, return false. If there
- // are more code_units available, return true.
+ Utf16CharacterStream(const uint16_t* buffer_start,
+ const uint16_t* buffer_cursor,
+ const uint16_t* buffer_end, size_t buffer_pos)
+ : buffer_start_(buffer_start),
+ buffer_cursor_(buffer_cursor),
+ buffer_end_(buffer_end),
+ buffer_pos_(buffer_pos) {}
+ Utf16CharacterStream() : Utf16CharacterStream(nullptr, nullptr, nullptr, 0) {}
+
+ void ReadBlockAt(size_t new_pos) {
+ // The callers of this method (Back/Back2/Seek) should handle the easy
+ // case (seeking within the current buffer), and we should only get here
+ // if we actually require new data.
+ // (This is really an efficiency check, not a correctness invariant.)
+ DCHECK(new_pos < buffer_pos_ ||
+ new_pos >= buffer_pos_ + (buffer_end_ - buffer_start_));
+
+ // Change pos() to point to new_pos.
+ buffer_pos_ = new_pos;
+ buffer_cursor_ = buffer_start_;
+ bool success = ReadBlock();
+ USE(success);
+
+ // Post-conditions: 1, on success, we should be at the right position.
+ // 2, success == we should have more characters available.
+ DCHECK_IMPLIES(success, pos() == new_pos);
+ DCHECK_EQ(success, buffer_cursor_ < buffer_end_);
+ DCHECK_EQ(success, buffer_start_ < buffer_end_);
+ }
+
+ // Read more data, and update buffer_*_ to point to it.
+ // Returns true if more data was available.
+ //
+ // ReadBlock() may modify any of the buffer_*_ members, but must sure that
+ // the result of pos() remains unaffected.
+ //
+ // Examples:
+ // - a stream could either fill a separate buffer. Then buffer_start_ and
+ // buffer_cursor_ would point to the beginning of the buffer, and
+ // buffer_pos would be the old pos().
+ // - a stream with existing buffer chunks would set buffer_start_ and
+ // buffer_end_ to cover the full chunk, and then buffer_cursor_ would
+ // point into the middle of the buffer, while buffer_pos_ would describe
+ // the start of the buffer.
virtual bool ReadBlock() = 0;
- virtual size_t SlowSeekForward(size_t code_unit_count) = 0;
+ const uint16_t* buffer_start_;
const uint16_t* buffer_cursor_;
const uint16_t* buffer_end_;
- size_t pos_;
-};
-
-
-// ---------------------------------------------------------------------
-// DuplicateFinder discovers duplicate symbols.
-
-class DuplicateFinder {
- public:
- explicit DuplicateFinder(UnicodeCache* constants)
- : unicode_constants_(constants),
- backing_store_(16),
- map_(&Match) { }
-
- int AddOneByteSymbol(Vector<const uint8_t> key, int value);
- int AddTwoByteSymbol(Vector<const uint16_t> key, int value);
- // Add a a number literal by converting it (if necessary)
- // to the string that ToString(ToNumber(literal)) would generate.
- // and then adding that string with AddOneByteSymbol.
- // This string is the actual value used as key in an object literal,
- // and the one that must be different from the other keys.
- int AddNumber(Vector<const uint8_t> key, int value);
-
- private:
- int AddSymbol(Vector<const uint8_t> key, bool is_one_byte, int value);
- // Backs up the key and its length in the backing store.
- // The backup is stored with a base 127 encoding of the
- // length (plus a bit saying whether the string is one byte),
- // followed by the bytes of the key.
- uint8_t* BackupKey(Vector<const uint8_t> key, bool is_one_byte);
-
- // Compare two encoded keys (both pointing into the backing store)
- // for having the same base-127 encoded lengths and representation.
- // and then having the same 'length' bytes following.
- static bool Match(void* first, void* second);
- // Creates a hash from a sequence of bytes.
- static uint32_t Hash(Vector<const uint8_t> key, bool is_one_byte);
- // Checks whether a string containing a JS number is its canonical
- // form.
- static bool IsNumberCanonical(Vector<const uint8_t> key);
-
- // Size of buffer. Sufficient for using it to call DoubleToCString in
- // from conversions.h.
- static const int kBufferSize = 100;
-
- UnicodeCache* unicode_constants_;
- // Backing store used to store strings used as hashmap keys.
- SequenceCollector<unsigned char> backing_store_;
- base::HashMap map_;
- // Buffer used for string->number->canonical string conversions.
- char number_buffer_[kBufferSize];
+ size_t buffer_pos_;
};
@@ -157,18 +154,24 @@ class Scanner {
// Scoped helper for a re-settable bookmark.
class BookmarkScope {
public:
- explicit BookmarkScope(Scanner* scanner) : scanner_(scanner) {
+ explicit BookmarkScope(Scanner* scanner)
+ : scanner_(scanner), bookmark_(kNoBookmark) {
DCHECK_NOT_NULL(scanner_);
}
- ~BookmarkScope() { scanner_->DropBookmark(); }
+ ~BookmarkScope() {}
- bool Set() { return scanner_->SetBookmark(); }
- void Reset() { scanner_->ResetToBookmark(); }
- bool HasBeenSet() { return scanner_->BookmarkHasBeenSet(); }
- bool HasBeenReset() { return scanner_->BookmarkHasBeenReset(); }
+ void Set();
+ void Apply();
+ bool HasBeenSet();
+ bool HasBeenApplied();
private:
+ static const size_t kNoBookmark;
+ static const size_t kBookmarkWasApplied;
+ static const size_t kBookmarkAtFirstPos;
+
Scanner* scanner_;
+ size_t bookmark_;
DISALLOW_COPY_AND_ASSIGN(BookmarkScope);
};
@@ -190,6 +193,7 @@ class Scanner {
// -1 is outside of the range of any real source code.
static const int kNoOctalLocation = -1;
+ static const uc32 kEndOfInput = Utf16CharacterStream::kEndOfInput;
explicit Scanner(UnicodeCache* scanner_contants);
@@ -251,7 +255,7 @@ class Scanner {
return LiteralMatches(data, length, false);
}
- void IsGetOrSet(bool* is_get, bool* is_set) {
+ bool IsGetOrSet(bool* is_get, bool* is_set) {
if (is_literal_one_byte() &&
literal_length() == 3 &&
!literal_contains_escapes()) {
@@ -259,7 +263,9 @@ class Scanner {
reinterpret_cast<const char*>(literal_one_byte_string().start());
*is_get = strncmp(token, "get", 3) == 0;
*is_set = !*is_get && strncmp(token, "set", 3) == 0;
+ return *is_get || *is_set;
}
+ return false;
}
int FindSymbol(DuplicateFinder* finder, int value);
@@ -418,23 +424,6 @@ class Scanner {
Handle<String> Internalize(Isolate* isolate) const;
- void CopyFrom(const LiteralBuffer* other) {
- if (other == nullptr) {
- Reset();
- } else {
- is_one_byte_ = other->is_one_byte_;
- position_ = other->position_;
- if (position_ < backing_store_.length()) {
- std::copy(other->backing_store_.begin(),
- other->backing_store_.begin() + position_,
- backing_store_.begin());
- } else {
- backing_store_.Dispose();
- backing_store_ = other->backing_store_.Clone();
- }
- }
- }
-
private:
static const int kInitialCapacity = 16;
static const int kGrowthFactory = 4;
@@ -528,15 +517,6 @@ class Scanner {
scanner_error_ = MessageTemplate::kNone;
}
- // Support BookmarkScope functionality.
- bool SetBookmark();
- void ResetToBookmark();
- bool BookmarkHasBeenSet();
- bool BookmarkHasBeenReset();
- void DropBookmark();
- void CopyToNextTokenDesc(TokenDesc* from);
- static void CopyTokenDesc(TokenDesc* to, TokenDesc* from);
-
void ReportScannerError(const Location& location,
MessageTemplate::Template error) {
if (has_error()) return;
@@ -550,6 +530,9 @@ class Scanner {
scanner_error_location_ = Location(pos, pos + 1);
}
+ // Seek to the next_ token at the given position.
+ void SeekNext(size_t position);
+
// Literal buffer support
inline void StartLiteral() {
LiteralBuffer* free_buffer =
@@ -618,7 +601,7 @@ class Scanner {
if (unibrow::Utf16::IsLeadSurrogate(c0_)) {
uc32 c1 = source_->Advance();
if (!unibrow::Utf16::IsTrailSurrogate(c1)) {
- source_->PushBack(c1);
+ source_->Back();
} else {
c0_ = unibrow::Utf16::CombineSurrogatePair(c0_, c1);
}
@@ -627,14 +610,22 @@ class Scanner {
void PushBack(uc32 ch) {
if (c0_ > static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
- source_->PushBack(unibrow::Utf16::TrailSurrogate(c0_));
- source_->PushBack(unibrow::Utf16::LeadSurrogate(c0_));
+ source_->Back2();
} else {
- source_->PushBack(c0_);
+ source_->Back();
}
c0_ = ch;
}
+ // Same as PushBack(ch1); PushBack(ch2).
+ // - Potentially more efficient as it uses Back2() on the stream.
+ // - Uses char as parameters, since we're only calling it with ASCII chars in
+ // practice. This way, we can avoid a few edge cases.
+ void PushBack2(char ch1, char ch2) {
+ source_->Back2();
+ c0_ = ch2;
+ }
+
inline Token::Value Select(Token::Value tok) {
Advance();
return tok;
@@ -790,37 +781,6 @@ class Scanner {
TokenDesc next_; // desc for next token (one token look-ahead)
TokenDesc next_next_; // desc for the token after next (after PeakAhead())
- // Variables for Scanner::BookmarkScope and the *Bookmark implementation.
- // These variables contain the scanner state when a bookmark is set.
- //
- // We will use bookmark_c0_ as a 'control' variable, where:
- // - bookmark_c0_ >= 0: A bookmark has been set and this contains c0_.
- // - bookmark_c0_ == -1: No bookmark has been set.
- // - bookmark_c0_ == -2: The bookmark has been applied (ResetToBookmark).
- //
- // Which state is being bookmarked? The parser state is distributed over
- // several variables, roughly like this:
- // ... 1234 + 5678 ..... [character stream]
- // [current_] [next_] c0_ | [scanner state]
- // So when the scanner is logically at the beginning of an expression
- // like "1234 + 4567", then:
- // - current_ contains "1234"
- // - next_ contains "+"
- // - c0_ contains ' ' (the space between "+" and "5678",
- // - the source_ character stream points to the beginning of "5678".
- // To be able to restore this state, we will keep copies of current_, next_,
- // and c0_; we'll ask the stream to bookmark itself, and we'll copy the
- // contents of current_'s and next_'s literal buffers to bookmark_*_literal_.
- static const uc32 kNoBookmark = -1;
- static const uc32 kBookmarkWasApplied = -2;
- uc32 bookmark_c0_;
- TokenDesc bookmark_current_;
- TokenDesc bookmark_next_;
- LiteralBuffer bookmark_current_literal_;
- LiteralBuffer bookmark_current_raw_literal_;
- LiteralBuffer bookmark_next_literal_;
- LiteralBuffer bookmark_next_raw_literal_;
-
// Input stream. Must be initialized to an Utf16CharacterStream.
Utf16CharacterStream* source_;
diff --git a/deps/v8/src/pending-compilation-error-handler.cc b/deps/v8/src/pending-compilation-error-handler.cc
index f1f9a20e58..3e88efc999 100644
--- a/deps/v8/src/pending-compilation-error-handler.cc
+++ b/deps/v8/src/pending-compilation-error-handler.cc
@@ -4,6 +4,7 @@
#include "src/pending-compilation-error-handler.h"
+#include "src/ast/ast-value-factory.h"
#include "src/debug/debug.h"
#include "src/handles.h"
#include "src/isolate.h"
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index 6dd897b031..ce423ea53a 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -1719,7 +1719,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// r5 : feedback vector
// r6 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
- Label done_initialize_count, done_increment_count;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
@@ -1742,7 +1741,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
Register weak_value = r10;
__ LoadP(weak_value, FieldMemOperand(r8, WeakCell::kValueOffset));
__ cmp(r4, weak_value);
- __ beq(&done_increment_count);
+ __ beq(&done);
__ CompareRoot(r8, Heap::kmegamorphic_symbolRootIndex);
__ beq(&done);
__ LoadP(feedback_map, FieldMemOperand(r8, HeapObject::kMapOffset));
@@ -1765,7 +1764,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
__ cmp(r4, r8);
__ bne(&megamorphic);
- __ b(&done_increment_count);
+ __ b(&done);
__ bind(&miss);
@@ -1795,32 +1794,22 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub);
- __ b(&done_initialize_count);
+ __ b(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub);
- __ bind(&done_initialize_count);
- // Initialize the call counter.
- __ LoadSmiLiteral(r8, Smi::FromInt(1));
- __ SmiToPtrArrayOffset(r7, r6);
- __ add(r7, r5, r7);
- __ StoreP(r8, FieldMemOperand(r7, count_offset), r0);
- __ b(&done);
-
- __ bind(&done_increment_count);
+ __ bind(&done);
- // Increment the call count for monomorphic function calls.
+ // Increment the call count for all function calls.
__ SmiToPtrArrayOffset(r8, r6);
__ add(r8, r5, r8);
__ LoadP(r7, FieldMemOperand(r8, count_offset));
__ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
__ StoreP(r7, FieldMemOperand(r8, count_offset), r0);
-
- __ bind(&done);
}
@@ -1872,6 +1861,16 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
+// Note: feedback_vector and slot are clobbered after the call.
+static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
+ Register slot, Register temp) {
+ const int count_offset = FixedArray::kHeaderSize + kPointerSize;
+ __ SmiToPtrArrayOffset(temp, slot);
+ __ add(feedback_vector, feedback_vector, temp);
+ __ LoadP(slot, FieldMemOperand(feedback_vector, count_offset));
+ __ AddSmiLiteral(slot, slot, Smi::FromInt(1), temp);
+ __ StoreP(slot, FieldMemOperand(feedback_vector, count_offset), temp);
+}
void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// r4 - function
@@ -1885,12 +1884,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ mov(r3, Operand(arg_count()));
// Increment the call count for monomorphic function calls.
- const int count_offset = FixedArray::kHeaderSize + kPointerSize;
- __ SmiToPtrArrayOffset(r8, r6);
- __ add(r5, r5, r8);
- __ LoadP(r6, FieldMemOperand(r5, count_offset));
- __ AddSmiLiteral(r6, r6, Smi::FromInt(1), r0);
- __ StoreP(r6, FieldMemOperand(r5, count_offset), r0);
+ IncrementCallCount(masm, r5, r6, r0);
__ mr(r5, r7);
__ mr(r6, r4);
@@ -1903,7 +1897,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// r4 - function
// r6 - slot id (Smi)
// r5 - vector
- Label extra_checks_or_miss, call, call_function;
+ Label extra_checks_or_miss, call, call_function, call_count_incremented;
int argc = arg_count();
ParameterCount actual(argc);
@@ -1934,13 +1928,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
// convincing us that we have a monomorphic JSFunction.
__ JumpIfSmi(r4, &extra_checks_or_miss);
+ __ bind(&call_function);
+
// Increment the call count for monomorphic function calls.
- const int count_offset = FixedArray::kHeaderSize + kPointerSize;
- __ LoadP(r6, FieldMemOperand(r9, count_offset));
- __ AddSmiLiteral(r6, r6, Smi::FromInt(1), r0);
- __ StoreP(r6, FieldMemOperand(r9, count_offset), r0);
+ IncrementCallCount(masm, r5, r6, r0);
- __ bind(&call_function);
__ mov(r3, Operand(argc));
__ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
tail_call_mode()),
@@ -1980,6 +1972,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ StoreP(ip, FieldMemOperand(r9, FixedArray::kHeaderSize), r0);
__ bind(&call);
+
+ // Increment the call count for megamorphic function calls.
+ IncrementCallCount(masm, r5, r6, r0);
+
+ __ bind(&call_count_incremented);
__ mov(r3, Operand(argc));
__ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET);
@@ -2006,10 +2003,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ cmp(r7, ip);
__ bne(&miss);
- // Initialize the call counter.
- __ LoadSmiLiteral(r8, Smi::FromInt(1));
- __ StoreP(r8, FieldMemOperand(r9, count_offset), r0);
-
// Store the function. Use a stub since we need a frame for allocation.
// r5 - vector
// r6 - slot
@@ -2017,9 +2010,13 @@ void CallICStub::Generate(MacroAssembler* masm) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(masm->isolate());
+ __ Push(r5);
+ __ Push(r6);
__ Push(cp, r4);
__ CallStub(&create_stub);
__ Pop(cp, r4);
+ __ Pop(r6);
+ __ Pop(r5);
}
__ b(&call_function);
@@ -2029,7 +2026,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&miss);
GenerateMiss(masm);
- __ b(&call);
+ __ b(&call_count_incremented);
}
@@ -2211,290 +2208,6 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, Register dest,
}
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // lr: return address
- // sp[0]: to
- // sp[4]: from
- // sp[8]: string
-
- // This stub is called from the native-call %_SubString(...), so
- // nothing can be assumed about the arguments. It is tested that:
- // "string" is a sequential string,
- // both "from" and "to" are smis, and
- // 0 <= from <= to <= string.length.
- // If any of these assumptions fail, we call the runtime system.
-
- const int kToOffset = 0 * kPointerSize;
- const int kFromOffset = 1 * kPointerSize;
- const int kStringOffset = 2 * kPointerSize;
-
- __ LoadP(r5, MemOperand(sp, kToOffset));
- __ LoadP(r6, MemOperand(sp, kFromOffset));
-
- // If either to or from had the smi tag bit set, then fail to generic runtime
- __ JumpIfNotSmi(r5, &runtime);
- __ JumpIfNotSmi(r6, &runtime);
- __ SmiUntag(r5);
- __ SmiUntag(r6, SetRC);
- // Both r5 and r6 are untagged integers.
-
- // We want to bailout to runtime here if From is negative.
- __ blt(&runtime, cr0); // From < 0.
-
- __ cmpl(r6, r5);
- __ bgt(&runtime); // Fail if from > to.
- __ sub(r5, r5, r6);
-
- // Make sure first argument is a string.
- __ LoadP(r3, MemOperand(sp, kStringOffset));
- __ JumpIfSmi(r3, &runtime);
- Condition is_string = masm->IsObjectStringType(r3, r4);
- __ b(NegateCondition(is_string), &runtime, cr0);
-
- Label single_char;
- __ cmpi(r5, Operand(1));
- __ b(eq, &single_char);
-
- // Short-cut for the case of trivial substring.
- Label return_r3;
- // r3: original string
- // r5: result string length
- __ LoadP(r7, FieldMemOperand(r3, String::kLengthOffset));
- __ SmiUntag(r0, r7);
- __ cmpl(r5, r0);
- // Return original string.
- __ beq(&return_r3);
- // Longer than original string's length or negative: unsafe arguments.
- __ bgt(&runtime);
- // Shorter than original string's length: an actual substring.
-
- // Deal with different string types: update the index if necessary
- // and put the underlying string into r8.
- // r3: original string
- // r4: instance type
- // r5: length
- // r6: from index (untagged)
- Label underlying_unpacked, sliced_string, seq_or_external_string;
- // If the string is not indirect, it can only be sequential or external.
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
- STATIC_ASSERT(kIsIndirectStringMask != 0);
- __ andi(r0, r4, Operand(kIsIndirectStringMask));
- __ beq(&seq_or_external_string, cr0);
-
- __ andi(r0, r4, Operand(kSlicedNotConsMask));
- __ bne(&sliced_string, cr0);
- // Cons string. Check whether it is flat, then fetch first part.
- __ LoadP(r8, FieldMemOperand(r3, ConsString::kSecondOffset));
- __ CompareRoot(r8, Heap::kempty_stringRootIndex);
- __ bne(&runtime);
- __ LoadP(r8, FieldMemOperand(r3, ConsString::kFirstOffset));
- // Update instance type.
- __ LoadP(r4, FieldMemOperand(r8, HeapObject::kMapOffset));
- __ lbz(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ b(&underlying_unpacked);
-
- __ bind(&sliced_string);
- // Sliced string. Fetch parent and correct start index by offset.
- __ LoadP(r8, FieldMemOperand(r3, SlicedString::kParentOffset));
- __ LoadP(r7, FieldMemOperand(r3, SlicedString::kOffsetOffset));
- __ SmiUntag(r4, r7);
- __ add(r6, r6, r4); // Add offset to index.
- // Update instance type.
- __ LoadP(r4, FieldMemOperand(r8, HeapObject::kMapOffset));
- __ lbz(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ b(&underlying_unpacked);
-
- __ bind(&seq_or_external_string);
- // Sequential or external string. Just move string to the expected register.
- __ mr(r8, r3);
-
- __ bind(&underlying_unpacked);
-
- if (FLAG_string_slices) {
- Label copy_routine;
- // r8: underlying subject string
- // r4: instance type of underlying subject string
- // r5: length
- // r6: adjusted start index (untagged)
- __ cmpi(r5, Operand(SlicedString::kMinLength));
- // Short slice. Copy instead of slicing.
- __ blt(&copy_routine);
- // Allocate new sliced string. At this point we do not reload the instance
- // type including the string encoding because we simply rely on the info
- // provided by the original string. It does not matter if the original
- // string's encoding is wrong because we always have to recheck encoding of
- // the newly created string's parent anyways due to externalized strings.
- Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ andi(r0, r4, Operand(kStringEncodingMask));
- __ beq(&two_byte_slice, cr0);
- __ AllocateOneByteSlicedString(r3, r5, r9, r10, &runtime);
- __ b(&set_slice_header);
- __ bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(r3, r5, r9, r10, &runtime);
- __ bind(&set_slice_header);
- __ SmiTag(r6);
- __ StoreP(r8, FieldMemOperand(r3, SlicedString::kParentOffset), r0);
- __ StoreP(r6, FieldMemOperand(r3, SlicedString::kOffsetOffset), r0);
- __ b(&return_r3);
-
- __ bind(&copy_routine);
- }
-
- // r8: underlying subject string
- // r4: instance type of underlying subject string
- // r5: length
- // r6: adjusted start index (untagged)
- Label two_byte_sequential, sequential_string, allocate_result;
- STATIC_ASSERT(kExternalStringTag != 0);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ andi(r0, r4, Operand(kExternalStringTag));
- __ beq(&sequential_string, cr0);
-
- // Handle external string.
- // Rule out short external strings.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ andi(r0, r4, Operand(kShortExternalStringTag));
- __ bne(&runtime, cr0);
- __ LoadP(r8, FieldMemOperand(r8, ExternalString::kResourceDataOffset));
- // r8 already points to the first character of underlying string.
- __ b(&allocate_result);
-
- __ bind(&sequential_string);
- // Locate first character of underlying subject string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ addi(r8, r8, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- __ bind(&allocate_result);
- // Sequential acii string. Allocate the result.
- STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
- __ andi(r0, r4, Operand(kStringEncodingMask));
- __ beq(&two_byte_sequential, cr0);
-
- // Allocate and copy the resulting one-byte string.
- __ AllocateOneByteString(r3, r5, r7, r9, r10, &runtime);
-
- // Locate first character of substring to copy.
- __ add(r8, r8, r6);
- // Locate first character of result.
- __ addi(r4, r3, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- // r3: result string
- // r4: first character of result string
- // r5: result string length
- // r8: first character of substring to copy
- STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharacters(masm, r4, r8, r5, r6,
- String::ONE_BYTE_ENCODING);
- __ b(&return_r3);
-
- // Allocate and copy the resulting two-byte string.
- __ bind(&two_byte_sequential);
- __ AllocateTwoByteString(r3, r5, r7, r9, r10, &runtime);
-
- // Locate first character of substring to copy.
- __ ShiftLeftImm(r4, r6, Operand(1));
- __ add(r8, r8, r4);
- // Locate first character of result.
- __ addi(r4, r3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- // r3: result string.
- // r4: first character of result.
- // r5: result length.
- // r8: first character of substring to copy.
- STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharacters(masm, r4, r8, r5, r6,
- String::TWO_BYTE_ENCODING);
-
- __ bind(&return_r3);
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->sub_string_native(), 1, r6, r7);
- __ Drop(3);
- __ Ret();
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString);
-
- __ bind(&single_char);
- // r3: original string
- // r4: instance type
- // r5: length
- // r6: from index (untagged)
- __ SmiTag(r6, r6);
- StringCharAtGenerator generator(r3, r6, r5, r3, &runtime, &runtime, &runtime,
- RECEIVER_IS_STRING);
- generator.GenerateFast(masm);
- __ Drop(3);
- __ Ret();
- generator.SkipSlow(masm, &runtime);
-}
-
-void ToStringStub::Generate(MacroAssembler* masm) {
- // The ToString stub takes one argument in r3.
- Label is_number;
- __ JumpIfSmi(r3, &is_number);
-
- __ CompareObjectType(r3, r4, r4, FIRST_NONSTRING_TYPE);
- // r3: receiver
- // r4: receiver instance type
- __ Ret(lt);
-
- Label not_heap_number;
- __ cmpi(r4, Operand(HEAP_NUMBER_TYPE));
- __ bne(&not_heap_number);
- __ bind(&is_number);
- NumberToStringStub stub(isolate());
- __ TailCallStub(&stub);
- __ bind(&not_heap_number);
-
- Label not_oddball;
- __ cmpi(r4, Operand(ODDBALL_TYPE));
- __ bne(&not_oddball);
- __ LoadP(r3, FieldMemOperand(r3, Oddball::kToStringOffset));
- __ Ret();
- __ bind(&not_oddball);
-
- __ push(r3); // Push argument.
- __ TailCallRuntime(Runtime::kToString);
-}
-
-
-void ToNameStub::Generate(MacroAssembler* masm) {
- // The ToName stub takes one argument in r3.
- Label is_number;
- __ JumpIfSmi(r3, &is_number);
-
- STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
- __ CompareObjectType(r3, r4, r4, LAST_NAME_TYPE);
- // r3: receiver
- // r4: receiver instance type
- __ Ret(le);
-
- Label not_heap_number;
- __ cmpi(r4, Operand(HEAP_NUMBER_TYPE));
- __ bne(&not_heap_number);
- __ bind(&is_number);
- NumberToStringStub stub(isolate());
- __ TailCallStub(&stub);
- __ bind(&not_heap_number);
-
- Label not_oddball;
- __ cmpi(r4, Operand(ODDBALL_TYPE));
- __ bne(&not_oddball);
- __ LoadP(r3, FieldMemOperand(r3, Oddball::kToStringOffset));
- __ Ret();
- __ bind(&not_oddball);
-
- __ push(r3); // Push argument.
- __ TailCallRuntime(Runtime::kToName);
-}
-
-
void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left,
Register right,
@@ -3407,19 +3120,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental;
Label need_incremental_pop_scratch;
- DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
- __ lis(r0, Operand((~Page::kPageAlignmentMask >> 16)));
- __ and_(regs_.scratch0(), regs_.object(), r0);
- __ LoadP(
- regs_.scratch1(),
- MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset));
- __ subi(regs_.scratch1(), regs_.scratch1(), Operand(1));
- __ StoreP(
- regs_.scratch1(),
- MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset));
- __ cmpi(regs_.scratch1(), Operand::Zero()); // PPC, we could do better here
- __ blt(&need_incremental);
-
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
@@ -3854,7 +3554,7 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
__ LoadP(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
// Load the map into the correct register.
- DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
+ DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
__ mr(feedback, too_far);
__ addi(ip, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -4581,7 +4281,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// Fall back to %AllocateInNewSpace (if not too big).
Label too_big_for_new_space;
__ bind(&allocate);
- __ Cmpi(r10, Operand(Page::kMaxRegularHeapObjectSize), r0);
+ __ Cmpi(r10, Operand(kMaxRegularHeapObjectSize), r0);
__ bgt(&too_big_for_new_space);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
@@ -4972,7 +4672,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// Fall back to %AllocateInNewSpace (if not too big).
Label too_big_for_new_space;
__ bind(&allocate);
- __ Cmpi(r10, Operand(Page::kMaxRegularHeapObjectSize), r0);
+ __ Cmpi(r10, Operand(kMaxRegularHeapObjectSize), r0);
__ bgt(&too_big_for_new_space);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index bc188f4be1..3ff0fde047 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -40,13 +40,9 @@ const Register StoreDescriptor::SlotRegister() { return r7; }
const Register StoreWithVectorDescriptor::VectorRegister() { return r6; }
-const Register VectorStoreTransitionDescriptor::SlotRegister() { return r7; }
-const Register VectorStoreTransitionDescriptor::VectorRegister() { return r6; }
-const Register VectorStoreTransitionDescriptor::MapRegister() { return r8; }
-
-
-const Register StoreTransitionDescriptor::MapRegister() { return r6; }
-
+const Register StoreTransitionDescriptor::SlotRegister() { return r7; }
+const Register StoreTransitionDescriptor::VectorRegister() { return r6; }
+const Register StoreTransitionDescriptor::MapRegister() { return r8; }
const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r5; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r3; }
@@ -355,7 +351,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ApiCallbackDescriptorBase::InitializePlatformSpecific(
+void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r3, // callee
@@ -390,7 +386,19 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
r3, // argument count (not including receiver)
r6, // new target
r4, // constructor to call
- r5 // address of the first argument
+ r5, // allocation site feedback if available, undefined otherwise
+ r7 // address of the first argument
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterPushArgsAndConstructArrayDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r3, // argument count (not including receiver)
+ r4, // target to call checked to be Array function
+ r5, // allocation site feedback if available, undefined otherwise
+ r6 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 4e39d967af..9b5f80ebe9 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -282,9 +282,7 @@ void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index,
void MacroAssembler::InNewSpace(Register object, Register scratch,
Condition cond, Label* branch) {
DCHECK(cond == eq || cond == ne);
- const int mask =
- (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
- CheckPageFlag(object, scratch, mask, cond, branch);
+ CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cond, branch);
}
@@ -1814,7 +1812,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements,
void MacroAssembler::Allocate(int object_size, Register result,
Register scratch1, Register scratch2,
Label* gc_required, AllocationFlags flags) {
- DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(object_size <= kMaxRegularHeapObjectSize);
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
@@ -2070,7 +2068,7 @@ void MacroAssembler::FastAllocate(Register object_size, Register result,
void MacroAssembler::FastAllocate(int object_size, Register result,
Register scratch1, Register scratch2,
AllocationFlags flags) {
- DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(object_size <= kMaxRegularHeapObjectSize);
DCHECK(!AreAliased(result, scratch1, scratch2, ip));
// Make object size into bytes.
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index cf9d4b5719..ba4d277688 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -140,6 +140,18 @@ class MacroAssembler : public Assembler {
void Ret() { blr(); }
void Ret(Condition cond, CRegister cr = cr7) { bclr(cond, cr); }
+ // Emit code that loads |parameter_index|'th parameter from the stack to
+ // the register according to the CallInterfaceDescriptor definition.
+ // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
+ // below the caller's sp.
+ template <class Descriptor>
+ void LoadParameterFromStack(
+ Register reg, typename Descriptor::ParameterIndices parameter_index,
+ int sp_to_ra_offset_in_words = 0) {
+ DCHECK(Descriptor::kPassLastArgsOnStack);
+ UNIMPLEMENTED();
+ }
+
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Drop(int count);
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index 2816a87751..84fbb399b3 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -658,9 +658,8 @@ void Simulator::set_last_debugger_input(char* input) {
last_debugger_input_ = input;
}
-
-void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
- size_t size) {
+void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
+ void* start_addr, size_t size) {
intptr_t start = reinterpret_cast<intptr_t>(start_addr);
int intra_line = (start & CachePage::kLineMask);
start -= intra_line;
@@ -680,8 +679,8 @@ void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
}
}
-
-CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
+CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
+ void* page) {
base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
if (entry->value == NULL) {
CachePage* new_page = new CachePage();
@@ -692,7 +691,8 @@ CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
// Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start, int size) {
+void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache,
+ intptr_t start, int size) {
DCHECK(size <= CachePage::kPageSize);
DCHECK(AllOnOnePage(start, size - 1));
DCHECK((start & CachePage::kLineMask) == 0);
@@ -704,7 +704,8 @@ void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start, int size) {
memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
}
-void Simulator::CheckICache(base::HashMap* i_cache, Instruction* instr) {
+void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
+ Instruction* instr) {
intptr_t address = reinterpret_cast<intptr_t>(instr);
void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@@ -737,7 +738,7 @@ void Simulator::Initialize(Isolate* isolate) {
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
if (i_cache_ == NULL) {
- i_cache_ = new base::HashMap(&ICacheMatch);
+ i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
Initialize(isolate);
@@ -872,7 +873,8 @@ class Redirection {
// static
-void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
+void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
+ Redirection* first) {
Redirection::DeleteChain(first);
if (i_cache != nullptr) {
for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
diff --git a/deps/v8/src/ppc/simulator-ppc.h b/deps/v8/src/ppc/simulator-ppc.h
index d3163e8a50..d061545099 100644
--- a/deps/v8/src/ppc/simulator-ppc.h
+++ b/deps/v8/src/ppc/simulator-ppc.h
@@ -217,7 +217,7 @@ class Simulator {
// Call on program start.
static void Initialize(Isolate* isolate);
- static void TearDown(base::HashMap* i_cache, Redirection* first);
+ static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
@@ -239,7 +239,8 @@ class Simulator {
char* last_debugger_input() { return last_debugger_input_; }
// ICache checking.
- static void FlushICache(base::HashMap* i_cache, void* start, size_t size);
+ static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
+ size_t size);
// Returns true if pc register contains one of the 'special_values' defined
// below (bad_lr, end_sim_pc).
@@ -329,9 +330,12 @@ class Simulator {
void ExecuteInstruction(Instruction* instr);
// ICache.
- static void CheckICache(base::HashMap* i_cache, Instruction* instr);
- static void FlushOnePage(base::HashMap* i_cache, intptr_t start, int size);
- static CachePage* GetCachePage(base::HashMap* i_cache, void* page);
+ static void CheckICache(base::CustomMatcherHashMap* i_cache,
+ Instruction* instr);
+ static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start,
+ int size);
+ static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
+ void* page);
// Runtime call support.
static void* RedirectExternalReference(
@@ -369,7 +373,7 @@ class Simulator {
char* last_debugger_input_;
// Icache simulation
- base::HashMap* i_cache_;
+ base::CustomMatcherHashMap* i_cache_;
// Registered breakpoints.
Instruction* break_pc_;
diff --git a/deps/v8/src/profiler/OWNERS b/deps/v8/src/profiler/OWNERS
new file mode 100644
index 0000000000..87c96616bc
--- /dev/null
+++ b/deps/v8/src/profiler/OWNERS
@@ -0,0 +1 @@
+alph@chromium.org
diff --git a/deps/v8/src/profiler/allocation-tracker.cc b/deps/v8/src/profiler/allocation-tracker.cc
index d094d0ecc6..99b0b7096b 100644
--- a/deps/v8/src/profiler/allocation-tracker.cc
+++ b/deps/v8/src/profiler/allocation-tracker.cc
@@ -193,7 +193,7 @@ void AllocationTracker::DeleteFunctionInfo(FunctionInfo** info) {
AllocationTracker::AllocationTracker(HeapObjectsMap* ids, StringsStorage* names)
: ids_(ids),
names_(names),
- id_to_function_info_index_(base::HashMap::PointersMatch),
+ id_to_function_info_index_(),
info_index_for_other_state_(0) {
FunctionInfo* info = new FunctionInfo();
info->name = "(root)";
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index e3df609f89..e9ccc5703e 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -11,7 +11,6 @@
#include "src/base/atomic-utils.h"
#include "src/base/atomicops.h"
#include "src/base/platform/time.h"
-#include "src/compiler.h"
#include "src/isolate.h"
#include "src/libsampler/sampler.h"
#include "src/locked-queue.h"
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 9273168f80..d0fa2e4c1b 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -355,16 +355,8 @@ const SnapshotObjectId HeapObjectsMap::kFirstAvailableObjectId =
HeapObjectsMap::kGcRootsFirstSubrootId +
VisitorSynchronization::kNumberOfSyncTags * HeapObjectsMap::kObjectIdStep;
-
-static bool AddressesMatch(void* key1, void* key2) {
- return key1 == key2;
-}
-
-
HeapObjectsMap::HeapObjectsMap(Heap* heap)
- : next_id_(kFirstAvailableObjectId),
- entries_map_(AddressesMatch),
- heap_(heap) {
+ : next_id_(kFirstAvailableObjectId), heap_(heap) {
// This dummy element solves a problem with entries_map_.
// When we do lookup in HashMap we see no difference between two cases:
// it has an entry with NULL as the value or it has created
@@ -476,7 +468,7 @@ void HeapObjectsMap::UpdateHeapObjectsMap() {
entries_map_.occupancy());
}
heap_->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "HeapObjectsMap::UpdateHeapObjectsMap");
+ GarbageCollectionReason::kHeapProfiler);
HeapIterator iterator(heap_);
for (HeapObject* obj = iterator.next();
obj != NULL;
@@ -704,7 +696,7 @@ size_t HeapObjectsMap::GetUsedMemorySize() const {
GetMemoryUsedByList(entries_) + GetMemoryUsedByList(time_intervals_);
}
-HeapEntriesMap::HeapEntriesMap() : entries_(base::HashMap::PointersMatch) {}
+HeapEntriesMap::HeapEntriesMap() : entries_() {}
int HeapEntriesMap::Map(HeapThing thing) {
base::HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing));
@@ -720,7 +712,7 @@ void HeapEntriesMap::Pair(HeapThing thing, int entry) {
cache_entry->value = reinterpret_cast<void*>(static_cast<intptr_t>(entry));
}
-HeapObjectsSet::HeapObjectsSet() : entries_(base::HashMap::PointersMatch) {}
+HeapObjectsSet::HeapObjectsSet() : entries_() {}
void HeapObjectsSet::Clear() {
entries_.Clear();
@@ -1216,8 +1208,7 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
}
if (scope_info->HasFunctionName()) {
String* name = scope_info->FunctionName();
- VariableMode mode;
- int idx = scope_info->FunctionContextSlotIndex(name, &mode);
+ int idx = scope_info->FunctionContextSlotIndex(name);
if (idx >= 0) {
SetContextReference(context, entry, name, context->get(idx),
Context::OffsetOfElementAt(idx));
@@ -1831,6 +1822,7 @@ bool V8HeapExplorer::IsEssentialObject(Object* object) {
object != heap_->empty_byte_array() &&
object != heap_->empty_fixed_array() &&
object != heap_->empty_descriptor_array() &&
+ object != heap_->empty_type_feedback_vector() &&
object != heap_->fixed_array_map() && object != heap_->cell_map() &&
object != heap_->global_property_cell_map() &&
object != heap_->shared_function_info_map() &&
@@ -2507,12 +2499,10 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
// full GC is reachable from the root when computing dominators.
// This is not true for weakly reachable objects.
// As a temporary solution we call GC twice.
- heap_->CollectAllGarbage(
- Heap::kMakeHeapIterableMask,
- "HeapSnapshotGenerator::GenerateSnapshot");
- heap_->CollectAllGarbage(
- Heap::kMakeHeapIterableMask,
- "HeapSnapshotGenerator::GenerateSnapshot");
+ heap_->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ GarbageCollectionReason::kHeapProfiler);
+ heap_->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ GarbageCollectionReason::kHeapProfiler);
#ifdef VERIFY_HEAP
Heap* debug_heap = heap_;
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index b870fbe324..b235ff0502 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -525,8 +525,8 @@ class NativeObjectsExplorer {
bool embedder_queried_;
HeapObjectsSet in_groups_;
// RetainedObjectInfo* -> List<HeapObject*>*
- base::HashMap objects_by_info_;
- base::HashMap native_groups_;
+ base::CustomMatcherHashMap objects_by_info_;
+ base::CustomMatcherHashMap native_groups_;
HeapEntriesAllocator* synthetic_entries_allocator_;
HeapEntriesAllocator* native_entries_allocator_;
// Used during references extraction.
@@ -613,7 +613,7 @@ class HeapSnapshotJSONSerializer {
static const int kNodeFieldsCount;
HeapSnapshot* snapshot_;
- base::HashMap strings_;
+ base::CustomMatcherHashMap strings_;
int next_node_id_;
int next_string_id_;
OutputStreamWriter* writer_;
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index b785eaaf5f..179d411429 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -8,8 +8,9 @@
#include <map>
#include "src/allocation.h"
#include "src/base/hashmap.h"
-#include "src/compiler.h"
+#include "src/log.h"
#include "src/profiler/strings-storage.h"
+#include "src/source-position.h"
namespace v8 {
namespace internal {
@@ -220,10 +221,10 @@ class ProfileNode {
CodeEntry* entry_;
unsigned self_ticks_;
// Mapping from CodeEntry* to ProfileNode*
- base::HashMap children_;
+ base::CustomMatcherHashMap children_;
List<ProfileNode*> children_list_;
unsigned id_;
- base::HashMap line_ticks_;
+ base::CustomMatcherHashMap line_ticks_;
std::vector<CpuProfileDeoptInfo> deopt_infos_;
@@ -260,7 +261,7 @@ class ProfileTree {
Isolate* isolate_;
unsigned next_function_id_;
- base::HashMap function_ids_;
+ base::CustomMatcherHashMap function_ids_;
DISALLOW_COPY_AND_ASSIGN(ProfileTree);
};
diff --git a/deps/v8/src/profiler/profiler-listener.cc b/deps/v8/src/profiler/profiler-listener.cc
index 7ce874e6c1..4bceac2e89 100644
--- a/deps/v8/src/profiler/profiler-listener.cc
+++ b/deps/v8/src/profiler/profiler-listener.cc
@@ -319,6 +319,7 @@ CodeEntry* ProfilerListener::NewCodeEntry(
}
void ProfilerListener::AddObserver(CodeEventObserver* observer) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
if (std::find(observers_.begin(), observers_.end(), observer) !=
observers_.end())
return;
@@ -326,6 +327,7 @@ void ProfilerListener::AddObserver(CodeEventObserver* observer) {
}
void ProfilerListener::RemoveObserver(CodeEventObserver* observer) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
auto it = std::find(observers_.begin(), observers_.end(), observer);
if (it == observers_.end()) return;
observers_.erase(it);
diff --git a/deps/v8/src/profiler/profiler-listener.h b/deps/v8/src/profiler/profiler-listener.h
index 7e24ceaa86..500b7ae6be 100644
--- a/deps/v8/src/profiler/profiler-listener.h
+++ b/deps/v8/src/profiler/profiler-listener.h
@@ -79,6 +79,7 @@ class ProfilerListener : public CodeEventListener {
void RecordDeoptInlinedFrames(CodeEntry* entry, AbstractCode* abstract_code);
Name* InferScriptName(Name* name, SharedFunctionInfo* info);
V8_INLINE void DispatchCodeEvent(const CodeEventsContainer& evt_rec) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
for (auto observer : observers_) {
observer->CodeEventHandler(evt_rec);
}
@@ -87,6 +88,7 @@ class ProfilerListener : public CodeEventListener {
StringsStorage function_and_resource_names_;
std::vector<CodeEntry*> code_entries_;
std::vector<CodeEventObserver*> observers_;
+ base::Mutex mutex_;
DISALLOW_COPY_AND_ASSIGN(ProfilerListener);
};
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.cc b/deps/v8/src/profiler/sampling-heap-profiler.cc
index b4361ee849..3b2ca630ac 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.cc
+++ b/deps/v8/src/profiler/sampling-heap-profiler.cc
@@ -259,8 +259,8 @@ v8::AllocationProfile::Node* SamplingHeapProfiler::TranslateAllocationNode(
v8::AllocationProfile* SamplingHeapProfiler::GetAllocationProfile() {
if (flags_ & v8::HeapProfiler::kSamplingForceGC) {
- isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags,
- "SamplingHeapProfiler");
+ isolate_->heap()->CollectAllGarbage(
+ Heap::kNoGCFlags, GarbageCollectionReason::kSamplingProfiler);
}
// To resolve positions to line/column numbers, we will need to look up
// scripts. Build a map to allow fast mapping from script id to script.
diff --git a/deps/v8/src/profiler/strings-storage.h b/deps/v8/src/profiler/strings-storage.h
index f98aa5e038..f11afbd165 100644
--- a/deps/v8/src/profiler/strings-storage.h
+++ b/deps/v8/src/profiler/strings-storage.h
@@ -36,10 +36,10 @@ class StringsStorage {
static bool StringsMatch(void* key1, void* key2);
const char* AddOrDisposeString(char* str, int len);
- base::HashMap::Entry* GetEntry(const char* str, int len);
+ base::CustomMatcherHashMap::Entry* GetEntry(const char* str, int len);
uint32_t hash_seed_;
- base::HashMap names_;
+ base::CustomMatcherHashMap names_;
DISALLOW_COPY_AND_ASSIGN(StringsStorage);
};
diff --git a/deps/v8/src/profiler/tracing-cpu-profiler.cc b/deps/v8/src/profiler/tracing-cpu-profiler.cc
new file mode 100644
index 0000000000..b24ca2fd25
--- /dev/null
+++ b/deps/v8/src/profiler/tracing-cpu-profiler.cc
@@ -0,0 +1,25 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/profiler/tracing-cpu-profiler.h"
+
+#include "src/v8.h"
+
+namespace v8 {
+
+std::unique_ptr<TracingCpuProfiler> TracingCpuProfiler::Create(
+ v8::Isolate* isolate) {
+ return std::unique_ptr<TracingCpuProfiler>(
+ new internal::TracingCpuProfilerImpl(
+ reinterpret_cast<internal::Isolate*>(isolate)));
+}
+
+namespace internal {
+
+TracingCpuProfilerImpl::TracingCpuProfilerImpl(Isolate* isolate) {}
+
+TracingCpuProfilerImpl::~TracingCpuProfilerImpl() {}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/profiler/tracing-cpu-profiler.h b/deps/v8/src/profiler/tracing-cpu-profiler.h
new file mode 100644
index 0000000000..80f1bdcc92
--- /dev/null
+++ b/deps/v8/src/profiler/tracing-cpu-profiler.h
@@ -0,0 +1,26 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PROFILER_TRACING_CPU_PROFILER_H
+#define V8_PROFILER_TRACING_CPU_PROFILER_H
+
+#include "include/v8-profiler.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+class TracingCpuProfilerImpl final : public TracingCpuProfiler {
+ public:
+ explicit TracingCpuProfilerImpl(Isolate*);
+ ~TracingCpuProfilerImpl();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TracingCpuProfilerImpl);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PROFILER_TRACING_CPU_PROFILER_H
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index 87df02d08e..d720b1c3d2 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -62,7 +62,6 @@ STATIC_ASSERT(SKIP_SYMBOLS ==
static_cast<PropertyFilter>(v8::PropertyFilter::SKIP_SYMBOLS));
class Smi;
-class Type;
class TypeInfo;
// Type of properties.
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
index 0fd1a76e82..96a778cfb7 100644
--- a/deps/v8/src/regexp/jsregexp.cc
+++ b/deps/v8/src/regexp/jsregexp.cc
@@ -8,7 +8,6 @@
#include "src/base/platform/platform.h"
#include "src/compilation-cache.h"
-#include "src/compiler.h"
#include "src/elements.h"
#include "src/execution.h"
#include "src/factory.h"
diff --git a/deps/v8/src/regexp/jsregexp.h b/deps/v8/src/regexp/jsregexp.h
index 31c427ac0a..8118889966 100644
--- a/deps/v8/src/regexp/jsregexp.h
+++ b/deps/v8/src/regexp/jsregexp.h
@@ -46,7 +46,7 @@ class RegExpImpl {
// See ECMA-262 section 15.10.6.2.
// This function calls the garbage collector if necessary.
- MUST_USE_RESULT static MaybeHandle<Object> Exec(
+ V8_EXPORT_PRIVATE MUST_USE_RESULT static MaybeHandle<Object> Exec(
Handle<JSRegExp> regexp, Handle<String> subject, int index,
Handle<JSObject> lastMatchInfo);
@@ -200,7 +200,7 @@ class RegExpImpl {
// is not tracked, however. As a conservative approximation we track the
// total regexp code compiled including code that has subsequently been freed
// and the total executable memory at any point.
- static const int kRegExpExecutableMemoryLimit = 16 * MB;
+ static const size_t kRegExpExecutableMemoryLimit = 16 * MB;
static const int kRegExpCompiledLimit = 1 * MB;
static const int kRegExpTooLargeToOptimize = 20 * KB;
diff --git a/deps/v8/src/regexp/regexp-ast.h b/deps/v8/src/regexp/regexp-ast.h
index 406bf84233..07a8155437 100644
--- a/deps/v8/src/regexp/regexp-ast.h
+++ b/deps/v8/src/regexp/regexp-ast.h
@@ -7,8 +7,8 @@
#include "src/objects.h"
#include "src/utils.h"
-#include "src/zone-containers.h"
-#include "src/zone.h"
+#include "src/zone/zone-containers.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/regexp/regexp-parser.h b/deps/v8/src/regexp/regexp-parser.h
index a0b975d79e..2cf937fd9d 100644
--- a/deps/v8/src/regexp/regexp-parser.h
+++ b/deps/v8/src/regexp/regexp-parser.h
@@ -7,7 +7,7 @@
#include "src/objects.h"
#include "src/regexp/regexp-ast.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index fb05690b91..b1e640c2ec 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -9,6 +9,7 @@
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
+#include "src/compiler.h"
#include "src/execution.h"
#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
@@ -54,6 +55,33 @@ static const int kOSRCodeSizeAllowancePerTickIgnition =
static const int kMaxSizeEarlyOpt =
5 * FullCodeGenerator::kCodeSizeMultiplier;
+#define OPTIMIZATION_REASON_LIST(V) \
+ V(DoNotOptimize, "do not optimize") \
+ V(HotAndStable, "hot and stable") \
+ V(HotEnoughForBaseline, "hot enough for baseline") \
+ V(HotWithoutMuchTypeInfo, "not much type info but very hot") \
+ V(SmallFunction, "small function")
+
+enum class OptimizationReason : uint8_t {
+#define OPTIMIZATION_REASON_CONSTANTS(Constant, message) k##Constant,
+ OPTIMIZATION_REASON_LIST(OPTIMIZATION_REASON_CONSTANTS)
+#undef OPTIMIZATION_REASON_CONSTANTS
+};
+
+char const* OptimizationReasonToString(OptimizationReason reason) {
+ static char const* reasons[] = {
+#define OPTIMIZATION_REASON_TEXTS(Constant, message) message,
+ OPTIMIZATION_REASON_LIST(OPTIMIZATION_REASON_TEXTS)
+#undef OPTIMIZATION_REASON_TEXTS
+ };
+ size_t const index = static_cast<size_t>(reason);
+ DCHECK_LT(index, arraysize(reasons));
+ return reasons[index];
+}
+
+std::ostream& operator<<(std::ostream& os, OptimizationReason reason) {
+ return os << OptimizationReasonToString(reason);
+}
RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
: isolate_(isolate),
@@ -79,8 +107,15 @@ static void GetICCounts(JSFunction* function, int* ic_with_type_info_count,
// Harvest vector-ics as well
TypeFeedbackVector* vector = function->feedback_vector();
- int with = 0, gen = 0;
- vector->ComputeCounts(&with, &gen);
+ int with = 0, gen = 0, type_vector_ic_count = 0;
+ const bool is_interpreted =
+ function->shared()->code()->is_interpreter_trampoline_builtin();
+
+ vector->ComputeCounts(&with, &gen, &type_vector_ic_count, is_interpreted);
+ if (is_interpreted) {
+ DCHECK_EQ(*ic_total_count, 0);
+ *ic_total_count = type_vector_ic_count;
+ }
*ic_with_type_info_count += with;
*ic_generic_count += gen;
@@ -112,13 +147,17 @@ static void TraceRecompile(JSFunction* function, const char* reason,
}
}
-void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
- TraceRecompile(function, reason, "optimized");
+void RuntimeProfiler::Optimize(JSFunction* function,
+ OptimizationReason reason) {
+ DCHECK_NE(reason, OptimizationReason::kDoNotOptimize);
+ TraceRecompile(function, OptimizationReasonToString(reason), "optimized");
function->AttemptConcurrentOptimization();
}
-void RuntimeProfiler::Baseline(JSFunction* function, const char* reason) {
- TraceRecompile(function, reason, "baseline");
+void RuntimeProfiler::Baseline(JSFunction* function,
+ OptimizationReason reason) {
+ DCHECK_NE(reason, OptimizationReason::kDoNotOptimize);
+ TraceRecompile(function, OptimizationReasonToString(reason), "baseline");
// TODO(4280): Fix this to check function is compiled for the interpreter
// once we have a standard way to check that. For now function will only
@@ -237,9 +276,9 @@ void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
generic_percentage <= FLAG_generic_ic_threshold) {
// If this particular function hasn't had any ICs patched for enough
// ticks, optimize it now.
- Optimize(function, "hot and stable");
+ Optimize(function, OptimizationReason::kHotAndStable);
} else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
- Optimize(function, "not much type info but very hot");
+ Optimize(function, OptimizationReason::kHotWithoutMuchTypeInfo);
} else {
shared_code->set_profiler_ticks(ticks + 1);
if (FLAG_trace_opt_verbose) {
@@ -258,7 +297,7 @@ void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
&generic_percentage);
if (type_percentage >= FLAG_type_info_threshold &&
generic_percentage <= FLAG_generic_ic_threshold) {
- Optimize(function, "small function");
+ Optimize(function, OptimizationReason::kSmallFunction);
} else {
shared_code->set_profiler_ticks(ticks + 1);
}
@@ -271,31 +310,16 @@ void RuntimeProfiler::MaybeBaselineIgnition(JSFunction* function,
JavaScriptFrame* frame) {
if (function->IsInOptimizationQueue()) return;
- SharedFunctionInfo* shared = function->shared();
- int ticks = shared->profiler_ticks();
-
- // TODO(rmcilroy): Also ensure we only OSR top-level code if it is smaller
- // than kMaxToplevelSourceSize.
-
if (FLAG_always_osr) {
AttemptOnStackReplacement(frame, AbstractCode::kMaxLoopNestingMarker);
// Fall through and do a normal baseline compile as well.
- } else if (!frame->is_optimized() &&
- (function->IsMarkedForBaseline() ||
- function->IsMarkedForOptimization() ||
- function->IsMarkedForConcurrentOptimization() ||
- function->IsOptimized())) {
- // Attempt OSR if we are still running interpreted code even though the
- // the function has long been marked or even already been optimized.
- int64_t allowance =
- kOSRCodeSizeAllowanceBaseIgnition +
- static_cast<int64_t>(ticks) * kOSRCodeSizeAllowancePerTickIgnition;
- if (shared->bytecode_array()->Size() <= allowance) {
- AttemptOnStackReplacement(frame);
- }
+ } else if (MaybeOSRIgnition(function, frame)) {
return;
}
+ SharedFunctionInfo* shared = function->shared();
+ int ticks = shared->profiler_ticks();
+
if (shared->optimization_disabled() &&
shared->disable_optimization_reason() == kOptimizationDisabledForTest) {
// Don't baseline functions which have been marked by NeverOptimizeFunction
@@ -304,7 +328,7 @@ void RuntimeProfiler::MaybeBaselineIgnition(JSFunction* function,
}
if (ticks >= kProfilerTicksBeforeBaseline) {
- Baseline(function, "hot enough for baseline");
+ Baseline(function, OptimizationReason::kHotEnoughForBaseline);
}
}
@@ -312,31 +336,16 @@ void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function,
JavaScriptFrame* frame) {
if (function->IsInOptimizationQueue()) return;
- SharedFunctionInfo* shared = function->shared();
- int ticks = shared->profiler_ticks();
-
- // TODO(rmcilroy): Also ensure we only OSR top-level code if it is smaller
- // than kMaxToplevelSourceSize.
-
if (FLAG_always_osr) {
AttemptOnStackReplacement(frame, AbstractCode::kMaxLoopNestingMarker);
// Fall through and do a normal optimized compile as well.
- } else if (!frame->is_optimized() &&
- (function->IsMarkedForBaseline() ||
- function->IsMarkedForOptimization() ||
- function->IsMarkedForConcurrentOptimization() ||
- function->IsOptimized())) {
- // Attempt OSR if we are still running interpreted code even though the
- // the function has long been marked or even already been optimized.
- int64_t allowance =
- kOSRCodeSizeAllowanceBaseIgnition +
- static_cast<int64_t>(ticks) * kOSRCodeSizeAllowancePerTickIgnition;
- if (shared->bytecode_array()->Size() <= allowance) {
- AttemptOnStackReplacement(frame);
- }
+ } else if (MaybeOSRIgnition(function, frame)) {
return;
}
+ SharedFunctionInfo* shared = function->shared();
+ int ticks = shared->profiler_ticks();
+
if (shared->optimization_disabled()) {
if (shared->deopt_count() >= FLAG_max_opt_count) {
// If optimization was disabled due to many deoptimizations,
@@ -348,8 +357,51 @@ void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function,
}
return;
}
+
if (function->IsOptimized()) return;
+ OptimizationReason reason = ShouldOptimizeIgnition(function, frame);
+
+ if (reason != OptimizationReason::kDoNotOptimize) {
+ Optimize(function, reason);
+ }
+}
+
+bool RuntimeProfiler::MaybeOSRIgnition(JSFunction* function,
+ JavaScriptFrame* frame) {
+ if (!FLAG_ignition_osr) return false;
+
+ SharedFunctionInfo* shared = function->shared();
+ int ticks = shared->profiler_ticks();
+
+ // TODO(rmcilroy): Also ensure we only OSR top-level code if it is smaller
+ // than kMaxToplevelSourceSize.
+
+ bool osr_before_baselined = function->IsMarkedForBaseline() &&
+ ShouldOptimizeIgnition(function, frame) !=
+ OptimizationReason::kDoNotOptimize;
+ if (!frame->is_optimized() &&
+ (osr_before_baselined || function->IsMarkedForOptimization() ||
+ function->IsMarkedForConcurrentOptimization() ||
+ function->IsOptimized())) {
+ // Attempt OSR if we are still running interpreted code even though the
+ // the function has long been marked or even already been optimized.
+ int64_t allowance =
+ kOSRCodeSizeAllowanceBaseIgnition +
+ static_cast<int64_t>(ticks) * kOSRCodeSizeAllowancePerTickIgnition;
+ if (shared->bytecode_array()->Size() <= allowance) {
+ AttemptOnStackReplacement(frame);
+ }
+ return true;
+ }
+ return false;
+}
+
+OptimizationReason RuntimeProfiler::ShouldOptimizeIgnition(
+ JSFunction* function, JavaScriptFrame* frame) {
+ SharedFunctionInfo* shared = function->shared();
+ int ticks = shared->profiler_ticks();
+
if (ticks >= kProfilerTicksBeforeOptimization) {
int typeinfo, generic, total, type_percentage, generic_percentage;
GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
@@ -358,9 +410,9 @@ void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function,
generic_percentage <= FLAG_generic_ic_threshold) {
// If this particular function hasn't had any ICs patched for enough
// ticks, optimize it now.
- Optimize(function, "hot and stable");
+ return OptimizationReason::kHotAndStable;
} else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
- Optimize(function, "not much type info but very hot");
+ return OptimizationReason::kHotWithoutMuchTypeInfo;
} else {
if (FLAG_trace_opt_verbose) {
PrintF("[not yet optimizing ");
@@ -368,10 +420,12 @@ void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function,
PrintF(", not enough type info: %d/%d (%d%%)]\n", typeinfo, total,
type_percentage);
}
+ return OptimizationReason::kDoNotOptimize;
}
}
// TODO(rmcilroy): Consider whether we should optimize small functions when
// they are first seen on the stack (e.g., kMaxSizeEarlyOpt).
+ return OptimizationReason::kDoNotOptimize;
}
void RuntimeProfiler::MarkCandidatesForOptimization() {
@@ -419,6 +473,5 @@ void RuntimeProfiler::MarkCandidatesForOptimization() {
any_ic_changed_ = false;
}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime-profiler.h b/deps/v8/src/runtime-profiler.h
index 7f2c9024bf..5c538c488c 100644
--- a/deps/v8/src/runtime-profiler.h
+++ b/deps/v8/src/runtime-profiler.h
@@ -13,6 +13,7 @@ namespace internal {
class Isolate;
class JavaScriptFrame;
class JSFunction;
+enum class OptimizationReason : uint8_t;
class RuntimeProfiler {
public:
@@ -30,8 +31,13 @@ class RuntimeProfiler {
int frame_count);
void MaybeBaselineIgnition(JSFunction* function, JavaScriptFrame* frame);
void MaybeOptimizeIgnition(JSFunction* function, JavaScriptFrame* frame);
- void Optimize(JSFunction* function, const char* reason);
- void Baseline(JSFunction* function, const char* reason);
+ // Potentially attempts OSR from ignition and returns whether no other
+ // optimization attempts should be made.
+ bool MaybeOSRIgnition(JSFunction* function, JavaScriptFrame* frame);
+ OptimizationReason ShouldOptimizeIgnition(JSFunction* function,
+ JavaScriptFrame* frame);
+ void Optimize(JSFunction* function, OptimizationReason reason);
+ void Baseline(JSFunction* function, OptimizationReason reason);
Isolate* isolate_;
bool any_ic_changed_;
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index 4b7cd39835..cbde8f372e 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -375,15 +375,9 @@ RUNTIME_FUNCTION(Runtime_GrowArrayElements) {
uint32_t index = static_cast<uint32_t>(key);
if (index >= capacity) {
- if (object->map()->is_prototype_map() ||
- object->WouldConvertToSlowElements(index)) {
- // We don't want to allow operations that cause lazy deopt. Return a Smi
- // as a signal that optimized code should eagerly deoptimize.
+ if (!object->GetElementsAccessor()->GrowCapacity(object, index)) {
return Smi::FromInt(0);
}
-
- uint32_t new_capacity = JSObject::NewElementsCapacity(index + 1);
- object->GetElementsAccessor()->GrowCapacityAndConvert(object, new_capacity);
}
// On success, return the fixed array elements.
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 5448159513..323604ffde 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -95,7 +95,8 @@ static MaybeHandle<Object> DefineClass(Isolate* isolate,
prototype_parent = isolate->factory()->null_value();
} else if (super_class->IsConstructor()) {
DCHECK(!super_class->IsJSFunction() ||
- !Handle<JSFunction>::cast(super_class)->shared()->is_resumable());
+ !IsResumableFunction(
+ Handle<JSFunction>::cast(super_class)->shared()->kind()));
ASSIGN_RETURN_ON_EXCEPTION(
isolate, prototype_parent,
Runtime::GetObjectProperty(isolate, super_class,
@@ -187,52 +188,65 @@ RUNTIME_FUNCTION(Runtime_DefineClass) {
end_position));
}
+namespace {
-static MaybeHandle<Object> LoadFromSuper(Isolate* isolate,
- Handle<Object> receiver,
- Handle<JSObject> home_object,
- Handle<Name> name) {
+enum class SuperMode { kLoad, kStore };
+
+MaybeHandle<JSReceiver> GetSuperHolder(
+ Isolate* isolate, Handle<Object> receiver, Handle<JSObject> home_object,
+ SuperMode mode, MaybeHandle<Name> maybe_name, uint32_t index) {
if (home_object->IsAccessCheckNeeded() &&
!isolate->MayAccess(handle(isolate->context()), home_object)) {
isolate->ReportFailedAccessCheck(home_object);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, JSReceiver);
}
PrototypeIterator iter(isolate, home_object);
Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
if (!proto->IsJSReceiver()) {
- return Object::ReadAbsentProperty(isolate, proto, name);
+ MessageTemplate::Template message =
+ mode == SuperMode::kLoad ? MessageTemplate::kNonObjectPropertyLoad
+ : MessageTemplate::kNonObjectPropertyStore;
+ Handle<Name> name;
+ if (!maybe_name.ToHandle(&name)) {
+ name = isolate->factory()->Uint32ToString(index);
+ }
+ THROW_NEW_ERROR(isolate, NewTypeError(message, name, proto), JSReceiver);
}
+ return Handle<JSReceiver>::cast(proto);
+}
- LookupIterator it(receiver, name, Handle<JSReceiver>::cast(proto));
+MaybeHandle<Object> LoadFromSuper(Isolate* isolate, Handle<Object> receiver,
+ Handle<JSObject> home_object,
+ Handle<Name> name) {
+ Handle<JSReceiver> holder;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, holder,
+ GetSuperHolder(isolate, receiver, home_object, SuperMode::kLoad, name, 0),
+ Object);
+ LookupIterator it(receiver, name, holder);
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(isolate, result, Object::GetProperty(&it), Object);
return result;
}
-static MaybeHandle<Object> LoadElementFromSuper(Isolate* isolate,
- Handle<Object> receiver,
- Handle<JSObject> home_object,
- uint32_t index) {
- if (home_object->IsAccessCheckNeeded() &&
- !isolate->MayAccess(handle(isolate->context()), home_object)) {
- isolate->ReportFailedAccessCheck(home_object);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- }
-
- PrototypeIterator iter(isolate, home_object);
- Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
- if (!proto->IsJSReceiver()) {
- Handle<Object> name = isolate->factory()->NewNumberFromUint(index);
- return Object::ReadAbsentProperty(isolate, proto, name);
- }
-
- LookupIterator it(isolate, receiver, index, Handle<JSReceiver>::cast(proto));
+MaybeHandle<Object> LoadElementFromSuper(Isolate* isolate,
+ Handle<Object> receiver,
+ Handle<JSObject> home_object,
+ uint32_t index) {
+ Handle<JSReceiver> holder;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, holder,
+ GetSuperHolder(isolate, receiver, home_object, SuperMode::kLoad,
+ MaybeHandle<Name>(), index),
+ Object);
+ LookupIterator it(isolate, receiver, index, holder);
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(isolate, result, Object::GetProperty(&it), Object);
return result;
}
+} // anonymous namespace
RUNTIME_FUNCTION(Runtime_LoadFromSuper) {
HandleScope scope(isolate);
@@ -272,50 +286,43 @@ RUNTIME_FUNCTION(Runtime_LoadKeyedFromSuper) {
LoadFromSuper(isolate, receiver, home_object, name));
}
+namespace {
-static Object* StoreToSuper(Isolate* isolate, Handle<JSObject> home_object,
- Handle<Object> receiver, Handle<Name> name,
- Handle<Object> value, LanguageMode language_mode) {
- if (home_object->IsAccessCheckNeeded() &&
- !isolate->MayAccess(handle(isolate->context()), home_object)) {
- isolate->ReportFailedAccessCheck(home_object);
- RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- }
-
- PrototypeIterator iter(isolate, home_object);
- Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
- if (!proto->IsJSReceiver()) return isolate->heap()->undefined_value();
-
- LookupIterator it(receiver, name, Handle<JSReceiver>::cast(proto));
+MaybeHandle<Object> StoreToSuper(Isolate* isolate, Handle<JSObject> home_object,
+ Handle<Object> receiver, Handle<Name> name,
+ Handle<Object> value,
+ LanguageMode language_mode) {
+ Handle<JSReceiver> holder;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, holder,
+ GetSuperHolder(isolate, receiver, home_object,
+ SuperMode::kStore, name, 0),
+ Object);
+ LookupIterator it(receiver, name, holder);
MAYBE_RETURN(Object::SetSuperProperty(&it, value, language_mode,
Object::CERTAINLY_NOT_STORE_FROM_KEYED),
- isolate->heap()->exception());
- return *value;
+ MaybeHandle<Object>());
+ return value;
}
-
-static Object* StoreElementToSuper(Isolate* isolate,
- Handle<JSObject> home_object,
- Handle<Object> receiver, uint32_t index,
- Handle<Object> value,
- LanguageMode language_mode) {
- if (home_object->IsAccessCheckNeeded() &&
- !isolate->MayAccess(handle(isolate->context()), home_object)) {
- isolate->ReportFailedAccessCheck(home_object);
- RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- }
-
- PrototypeIterator iter(isolate, home_object);
- Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
- if (!proto->IsJSReceiver()) return isolate->heap()->undefined_value();
-
- LookupIterator it(isolate, receiver, index, Handle<JSReceiver>::cast(proto));
+MaybeHandle<Object> StoreElementToSuper(Isolate* isolate,
+ Handle<JSObject> home_object,
+ Handle<Object> receiver, uint32_t index,
+ Handle<Object> value,
+ LanguageMode language_mode) {
+ Handle<JSReceiver> holder;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, holder,
+ GetSuperHolder(isolate, receiver, home_object, SuperMode::kStore,
+ MaybeHandle<Name>(), index),
+ Object);
+ LookupIterator it(isolate, receiver, index, holder);
MAYBE_RETURN(Object::SetSuperProperty(&it, value, language_mode,
Object::MAY_BE_STORE_FROM_KEYED),
- isolate->heap()->exception());
- return *value;
+ MaybeHandle<Object>());
+ return value;
}
+} // anonymous namespace
RUNTIME_FUNCTION(Runtime_StoreToSuper_Strict) {
HandleScope scope(isolate);
@@ -325,7 +332,8 @@ RUNTIME_FUNCTION(Runtime_StoreToSuper_Strict) {
CONVERT_ARG_HANDLE_CHECKED(Name, name, 2);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 3);
- return StoreToSuper(isolate, home_object, receiver, name, value, STRICT);
+ RETURN_RESULT_OR_FAILURE(isolate, StoreToSuper(isolate, home_object, receiver,
+ name, value, STRICT));
}
@@ -337,14 +345,13 @@ RUNTIME_FUNCTION(Runtime_StoreToSuper_Sloppy) {
CONVERT_ARG_HANDLE_CHECKED(Name, name, 2);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 3);
- return StoreToSuper(isolate, home_object, receiver, name, value, SLOPPY);
+ RETURN_RESULT_OR_FAILURE(isolate, StoreToSuper(isolate, home_object, receiver,
+ name, value, SLOPPY));
}
-
-static Object* StoreKeyedToSuper(Isolate* isolate, Handle<JSObject> home_object,
- Handle<Object> receiver, Handle<Object> key,
- Handle<Object> value,
- LanguageMode language_mode) {
+static MaybeHandle<Object> StoreKeyedToSuper(
+ Isolate* isolate, Handle<JSObject> home_object, Handle<Object> receiver,
+ Handle<Object> key, Handle<Object> value, LanguageMode language_mode) {
uint32_t index = 0;
if (key->ToArrayIndex(&index)) {
@@ -352,8 +359,8 @@ static Object* StoreKeyedToSuper(Isolate* isolate, Handle<JSObject> home_object,
language_mode);
}
Handle<Name> name;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
- Object::ToName(isolate, key));
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, name, Object::ToName(isolate, key),
+ Object);
// TODO(verwaest): Unify using LookupIterator.
if (name->AsArrayIndex(&index)) {
return StoreElementToSuper(isolate, home_object, receiver, index, value,
@@ -372,7 +379,9 @@ RUNTIME_FUNCTION(Runtime_StoreKeyedToSuper_Strict) {
CONVERT_ARG_HANDLE_CHECKED(Object, key, 2);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 3);
- return StoreKeyedToSuper(isolate, home_object, receiver, key, value, STRICT);
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
+ StoreKeyedToSuper(isolate, home_object, receiver, key, value, STRICT));
}
@@ -384,7 +393,9 @@ RUNTIME_FUNCTION(Runtime_StoreKeyedToSuper_Sloppy) {
CONVERT_ARG_HANDLE_CHECKED(Object, key, 2);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 3);
- return StoreKeyedToSuper(isolate, home_object, receiver, key, value, SLOPPY);
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
+ StoreKeyedToSuper(isolate, home_object, receiver, key, value, SLOPPY));
}
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index b5910e4d3b..01ec73d427 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -11,6 +11,7 @@
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/interpreter/bytecode-array-iterator.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/v8threads.h"
@@ -172,6 +173,17 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
DCHECK(optimized_code->kind() == Code::OPTIMIZED_FUNCTION);
DCHECK(type == deoptimizer->bailout_type());
+ DCHECK_NULL(isolate->context());
+
+ // TODO(turbofan): For Crankshaft we restore the context before objects are
+ // being materialized, because it never de-materializes the context but it
+ // requires a context to materialize arguments objects. This is specific to
+ // Crankshaft and can be removed once only TurboFan goes through here.
+ if (!optimized_code->is_turbofanned()) {
+ JavaScriptFrameIterator top_it(isolate);
+ JavaScriptFrame* top_frame = top_it.frame();
+ isolate->set_context(Context::cast(top_frame->context()));
+ }
// Make sure to materialize objects before causing any allocation.
JavaScriptFrameIterator it(isolate);
@@ -179,9 +191,11 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
delete deoptimizer;
// Ensure the context register is updated for materialized objects.
- JavaScriptFrameIterator top_it(isolate);
- JavaScriptFrame* top_frame = top_it.frame();
- isolate->set_context(Context::cast(top_frame->context()));
+ if (optimized_code->is_turbofanned()) {
+ JavaScriptFrameIterator top_it(isolate);
+ JavaScriptFrame* top_frame = top_it.frame();
+ isolate->set_context(Context::cast(top_frame->context()));
+ }
if (type == Deoptimizer::LAZY) {
return isolate->heap()->undefined_value();
@@ -279,7 +293,20 @@ BailoutId DetermineEntryAndDisarmOSRForInterpreter(JavaScriptFrame* frame) {
// Reset the OSR loop nesting depth to disarm back edges.
bytecode->set_osr_loop_nesting_level(0);
- return BailoutId(iframe->GetBytecodeOffset());
+ // Translate the offset of the jump instruction to the jump target offset of
+ // that instruction so that the derived BailoutId points to the loop header.
+ // TODO(mstarzinger): This can be merged with {BytecodeBranchAnalysis} which
+ // already performs a pre-pass over the bytecode stream anyways.
+ int jump_offset = iframe->GetBytecodeOffset();
+ interpreter::BytecodeArrayIterator iterator(bytecode);
+ while (iterator.current_offset() + iterator.current_prefix_offset() <
+ jump_offset) {
+ iterator.Advance();
+ }
+ DCHECK(interpreter::Bytecodes::IsJump(iterator.current_bytecode()));
+ int jump_target_offset = iterator.GetJumpTargetOffset();
+
+ return BailoutId(jump_target_offset);
}
} // namespace
@@ -335,10 +362,18 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
function->shared()->increment_deopt_count();
if (result->is_turbofanned()) {
- // TurboFanned OSR code cannot be installed into the function.
- // But the function is obviously hot, so optimize it next time.
- function->ReplaceCode(
- isolate->builtins()->builtin(Builtins::kCompileOptimized));
+ // When we're waiting for concurrent optimization, set to compile on
+ // the next call - otherwise we'd run unoptimized once more
+ // and potentially compile for OSR another time as well.
+ if (function->IsMarkedForConcurrentOptimization()) {
+ if (FLAG_trace_osr) {
+ PrintF("[OSR - Re-marking ");
+ function->PrintName();
+ PrintF(" for non-concurrent optimization]\n");
+ }
+ function->ReplaceCode(
+ isolate->builtins()->builtin(Builtins::kCompileOptimized));
+ }
} else {
// Crankshafted OSR code can be installed into the function.
function->ReplaceCode(*result);
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index a8c465a380..2d217b83f7 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -9,6 +9,7 @@
#include "src/debug/debug-frames.h"
#include "src/debug/debug-scopes.h"
#include "src/debug/debug.h"
+#include "src/debug/liveedit.h"
#include "src/frames-inl.h"
#include "src/globals.h"
#include "src/interpreter/bytecodes.h"
@@ -1521,7 +1522,8 @@ RUNTIME_FUNCTION(Runtime_GetDebugContext) {
RUNTIME_FUNCTION(Runtime_CollectGarbage) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
- isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags, "%CollectGarbage");
+ isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags,
+ GarbageCollectionReason::kRuntime);
return isolate->heap()->undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-forin.cc b/deps/v8/src/runtime/runtime-forin.cc
index 0d624e97d6..bd37cdcf2f 100644
--- a/deps/v8/src/runtime/runtime-forin.cc
+++ b/deps/v8/src/runtime/runtime-forin.cc
@@ -140,17 +140,6 @@ RUNTIME_FUNCTION_RETURN_TRIPLE(Runtime_ForInPrepare) {
return MakeTriple(*cache_type, *cache_array, Smi::FromInt(cache_length));
}
-
-RUNTIME_FUNCTION(Runtime_ForInDone) {
- SealHandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_SMI_ARG_CHECKED(index, 0);
- CONVERT_SMI_ARG_CHECKED(length, 1);
- DCHECK_LE(0, index);
- DCHECK_LE(index, length);
- return isolate->heap()->ToBoolean(index == length);
-}
-
RUNTIME_FUNCTION(Runtime_ForInHasProperty) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -188,15 +177,5 @@ RUNTIME_FUNCTION(Runtime_ForInNext) {
HasEnumerableProperty(isolate, receiver, key));
}
-
-RUNTIME_FUNCTION(Runtime_ForInStep) {
- SealHandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_SMI_ARG_CHECKED(index, 0);
- DCHECK_LE(0, index);
- DCHECK_LT(index, Smi::kMaxValue);
- return Smi::FromInt(index + 1);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index 298f1a1d11..fa50941925 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -174,6 +174,7 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
target_shared->set_bytecode_array(source_shared->bytecode_array());
}
target_shared->set_scope_info(source_shared->scope_info());
+ target_shared->set_outer_scope_info(source_shared->outer_scope_info());
target_shared->set_length(source_shared->length());
target_shared->set_num_literals(source_shared->num_literals());
target_shared->set_feedback_metadata(source_shared->feedback_metadata());
diff --git a/deps/v8/src/runtime/runtime-generator.cc b/deps/v8/src/runtime/runtime-generator.cc
index dcc48c5c9e..bb63a3d0d0 100644
--- a/deps/v8/src/runtime/runtime-generator.cc
+++ b/deps/v8/src/runtime/runtime-generator.cc
@@ -18,7 +18,7 @@ RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
- CHECK(function->shared()->is_resumable());
+ CHECK(IsResumableFunction(function->shared()->kind()));
Handle<FixedArray> operand_stack;
if (function->shared()->HasBytecodeArray()) {
@@ -49,7 +49,7 @@ RUNTIME_FUNCTION(Runtime_SuspendJSGeneratorObject) {
JavaScriptFrameIterator stack_iterator(isolate);
JavaScriptFrame* frame = stack_iterator.frame();
- CHECK(frame->function()->shared()->is_resumable());
+ CHECK(IsResumableFunction(frame->function()->shared()->kind()));
DCHECK_EQ(frame->function(), generator_object->function());
DCHECK(frame->function()->shared()->is_compiled());
DCHECK(!frame->function()->IsOptimized());
diff --git a/deps/v8/src/runtime/runtime-i18n.cc b/deps/v8/src/runtime/runtime-i18n.cc
index c5577dadaf..cac403baca 100644
--- a/deps/v8/src/runtime/runtime-i18n.cc
+++ b/deps/v8/src/runtime/runtime-i18n.cc
@@ -25,6 +25,8 @@
#include "unicode/decimfmt.h"
#include "unicode/dtfmtsym.h"
#include "unicode/dtptngen.h"
+#include "unicode/fieldpos.h"
+#include "unicode/fpositer.h"
#include "unicode/locid.h"
#include "unicode/normalizer2.h"
#include "unicode/numfmt.h"
@@ -322,7 +324,7 @@ RUNTIME_FUNCTION(Runtime_GetImplFromInitializedIntlObject) {
Handle<Symbol> marker = isolate->factory()->intl_impl_object_symbol();
Handle<Object> impl = JSReceiver::GetDataProperty(obj, marker);
- if (impl->IsTheHole(isolate)) {
+ if (!impl->IsJSObject()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kNotIntlObject, obj));
}
@@ -393,6 +395,138 @@ RUNTIME_FUNCTION(Runtime_InternalDateFormat) {
result.length())));
}
+namespace {
+// The list comes from third_party/icu/source/i18n/unicode/udat.h.
+// They're mapped to DateTimeFormat components listed at
+// https://tc39.github.io/ecma402/#sec-datetimeformat-abstracts .
+
+Handle<String> IcuDateFieldIdToDateType(int32_t field_id, Isolate* isolate) {
+ switch (field_id) {
+ case -1:
+ return isolate->factory()->literal_string();
+ case UDAT_YEAR_FIELD:
+ case UDAT_EXTENDED_YEAR_FIELD:
+ case UDAT_YEAR_NAME_FIELD:
+ return isolate->factory()->year_string();
+ case UDAT_MONTH_FIELD:
+ case UDAT_STANDALONE_MONTH_FIELD:
+ return isolate->factory()->month_string();
+ case UDAT_DATE_FIELD:
+ return isolate->factory()->day_string();
+ case UDAT_HOUR_OF_DAY1_FIELD:
+ case UDAT_HOUR_OF_DAY0_FIELD:
+ case UDAT_HOUR1_FIELD:
+ case UDAT_HOUR0_FIELD:
+ return isolate->factory()->hour_string();
+ case UDAT_MINUTE_FIELD:
+ return isolate->factory()->minute_string();
+ case UDAT_SECOND_FIELD:
+ return isolate->factory()->second_string();
+ case UDAT_DAY_OF_WEEK_FIELD:
+ case UDAT_DOW_LOCAL_FIELD:
+ case UDAT_STANDALONE_DAY_FIELD:
+ return isolate->factory()->weekday_string();
+ case UDAT_AM_PM_FIELD:
+ return isolate->factory()->dayperiod_string();
+ case UDAT_TIMEZONE_FIELD:
+ case UDAT_TIMEZONE_RFC_FIELD:
+ case UDAT_TIMEZONE_GENERIC_FIELD:
+ case UDAT_TIMEZONE_SPECIAL_FIELD:
+ case UDAT_TIMEZONE_LOCALIZED_GMT_OFFSET_FIELD:
+ case UDAT_TIMEZONE_ISO_FIELD:
+ case UDAT_TIMEZONE_ISO_LOCAL_FIELD:
+ return isolate->factory()->timeZoneName_string();
+ case UDAT_ERA_FIELD:
+ return isolate->factory()->era_string();
+ default:
+ // Other UDAT_*_FIELD's cannot show up because there is no way to specify
+ // them via options of Intl.DateTimeFormat.
+ UNREACHABLE();
+ // To prevent MSVC from issuing C4715 warning.
+ return Handle<String>();
+ }
+}
+
+bool AddElement(Handle<JSArray> array, int index, int32_t field_id,
+ const icu::UnicodeString& formatted, int32_t begin, int32_t end,
+ Isolate* isolate) {
+ HandleScope scope(isolate);
+ Factory* factory = isolate->factory();
+ Handle<JSObject> element = factory->NewJSObject(isolate->object_function());
+ Handle<String> value = IcuDateFieldIdToDateType(field_id, isolate);
+ JSObject::AddProperty(element, factory->type_string(), value, NONE);
+
+ icu::UnicodeString field(formatted.tempSubStringBetween(begin, end));
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value, factory->NewStringFromTwoByte(Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(field.getBuffer()),
+ field.length())),
+ false);
+
+ JSObject::AddProperty(element, factory->value_string(), value, NONE);
+ RETURN_ON_EXCEPTION_VALUE(
+ isolate, JSObject::AddDataElement(array, index, element, NONE), false);
+ return true;
+}
+
+} // namespace
+
+RUNTIME_FUNCTION(Runtime_InternalDateFormatToParts) {
+ HandleScope scope(isolate);
+ Factory* factory = isolate->factory();
+
+ DCHECK(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, date_format_holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSDate, date, 1);
+
+ Handle<Object> value;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, Object::ToNumber(date));
+
+ icu::SimpleDateFormat* date_format =
+ DateFormat::UnpackDateFormat(isolate, date_format_holder);
+ if (!date_format) return isolate->ThrowIllegalOperation();
+
+ icu::UnicodeString formatted;
+ icu::FieldPositionIterator fp_iter;
+ icu::FieldPosition fp;
+ UErrorCode status = U_ZERO_ERROR;
+ date_format->format(value->Number(), formatted, &fp_iter, status);
+ if (U_FAILURE(status)) return isolate->heap()->undefined_value();
+
+ Handle<JSArray> result = factory->NewJSArray(0);
+ int32_t length = formatted.length();
+ if (length == 0) return *result;
+
+ int index = 0;
+ int32_t previous_end_pos = 0;
+ while (fp_iter.next(fp)) {
+ int32_t begin_pos = fp.getBeginIndex();
+ int32_t end_pos = fp.getEndIndex();
+
+ if (previous_end_pos < begin_pos) {
+ if (!AddElement(result, index, -1, formatted, previous_end_pos, begin_pos,
+ isolate)) {
+ return isolate->heap()->undefined_value();
+ }
+ ++index;
+ }
+ if (!AddElement(result, index, fp.getField(), formatted, begin_pos, end_pos,
+ isolate)) {
+ return isolate->heap()->undefined_value();
+ }
+ previous_end_pos = end_pos;
+ ++index;
+ }
+ if (previous_end_pos < length) {
+ if (!AddElement(result, index, -1, formatted, previous_end_pos, length,
+ isolate)) {
+ return isolate->heap()->undefined_value();
+ }
+ }
+ JSObject::ValidateElements(result);
+ return *result;
+}
RUNTIME_FUNCTION(Runtime_InternalDateParse) {
HandleScope scope(isolate);
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 3de0f16b1e..26882b5c83 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -120,18 +120,17 @@ RUNTIME_FUNCTION(Runtime_ThrowWasmError) {
error, isolate->factory()->stack_trace_symbol());
// Patch the stack trace (array of <receiver, function, code, position>).
if (stack_trace_obj->IsJSArray()) {
- Handle<FixedArray> stack_elements(
- FixedArray::cast(JSArray::cast(*stack_trace_obj)->elements()));
- DCHECK_EQ(1, stack_elements->length() % 4);
- DCHECK(Code::cast(stack_elements->get(3))->kind() == Code::WASM_FUNCTION);
- DCHECK(stack_elements->get(4)->IsSmi() &&
- Smi::cast(stack_elements->get(4))->value() >= 0);
- stack_elements->set(4, Smi::FromInt(-1 - byte_offset));
+ Handle<FrameArray> stack_elements(
+ FrameArray::cast(JSArray::cast(*stack_trace_obj)->elements()));
+ DCHECK(stack_elements->Code(0)->kind() == AbstractCode::WASM_FUNCTION);
+ DCHECK(stack_elements->Offset(0)->value() >= 0);
+ stack_elements->SetOffset(0, Smi::FromInt(-1 - byte_offset));
}
- Handle<Object> detailed_stack_trace_obj = JSReceiver::GetDataProperty(
- error, isolate->factory()->detailed_stack_trace_symbol());
+
// Patch the detailed stack trace (array of JSObjects with various
// properties).
+ Handle<Object> detailed_stack_trace_obj = JSReceiver::GetDataProperty(
+ error, isolate->factory()->detailed_stack_trace_symbol());
if (detailed_stack_trace_obj->IsJSArray()) {
Handle<FixedArray> stack_elements(
FixedArray::cast(JSArray::cast(*detailed_stack_trace_obj)->elements()));
@@ -235,8 +234,7 @@ RUNTIME_FUNCTION(Runtime_ThrowIncompatibleMethodReceiver) {
RUNTIME_FUNCTION(Runtime_ThrowInvalidStringLength) {
HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidStringLength));
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewInvalidStringLengthError());
}
RUNTIME_FUNCTION(Runtime_ThrowIteratorResultNotAnObject) {
@@ -272,23 +270,51 @@ RUNTIME_FUNCTION(Runtime_ThrowApplyNonFunction) {
isolate, NewTypeError(MessageTemplate::kApplyNonFunction, object, type));
}
+namespace {
-RUNTIME_FUNCTION(Runtime_PromiseRejectEvent) {
- DCHECK(args.length() == 3);
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
- CONVERT_BOOLEAN_ARG_CHECKED(debug_event, 2);
- if (debug_event) isolate->debug()->OnPromiseReject(promise, value);
+void PromiseRejectEvent(Isolate* isolate, Handle<JSObject> promise,
+ Handle<Object> rejected_promise, Handle<Object> value,
+ bool debug_event) {
+ if (isolate->debug()->is_active() && debug_event) {
+ isolate->debug()->OnPromiseReject(rejected_promise, value);
+ }
Handle<Symbol> key = isolate->factory()->promise_has_handler_symbol();
// Do not report if we actually have a handler.
if (JSReceiver::GetDataProperty(promise, key)->IsUndefined(isolate)) {
isolate->ReportPromiseReject(promise, value,
v8::kPromiseRejectWithNoHandler);
}
+}
+
+} // namespace
+
+RUNTIME_FUNCTION(Runtime_PromiseRejectEvent) {
+ DCHECK(args.length() == 3);
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+ CONVERT_BOOLEAN_ARG_CHECKED(debug_event, 2);
+
+ PromiseRejectEvent(isolate, promise, promise, value, debug_event);
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_PromiseRejectEventFromStack) {
+ DCHECK(args.length() == 2);
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+
+ Handle<Object> rejected_promise = promise;
+ if (isolate->debug()->is_active()) {
+ // If the Promise.reject call is caught, then this will return
+ // undefined, which will be interpreted by PromiseRejectEvent
+ // as being a caught exception event.
+ rejected_promise = isolate->GetPromiseOnStackOnThrow();
+ }
+ PromiseRejectEvent(isolate, promise, rejected_promise, value, true);
+ return isolate->heap()->undefined_value();
+}
RUNTIME_FUNCTION(Runtime_PromiseRevokeReject) {
DCHECK(args.length() == 1);
@@ -330,7 +356,7 @@ RUNTIME_FUNCTION(Runtime_AllocateInNewSpace) {
CONVERT_SMI_ARG_CHECKED(size, 0);
CHECK(IsAligned(size, kPointerSize));
CHECK(size > 0);
- CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ CHECK(size <= kMaxRegularHeapObjectSize);
return *isolate->factory()->NewFillerObject(size, false, NEW_SPACE);
}
@@ -342,7 +368,7 @@ RUNTIME_FUNCTION(Runtime_AllocateInTargetSpace) {
CONVERT_SMI_ARG_CHECKED(flags, 1);
CHECK(IsAligned(size, kPointerSize));
CHECK(size > 0);
- CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ CHECK(size <= kMaxRegularHeapObjectSize);
bool double_align = AllocateDoubleAlignFlag::decode(flags);
AllocationSpace space = AllocateTargetSpace::decode(flags);
return *isolate->factory()->NewFillerObject(size, double_align, space);
@@ -528,6 +554,21 @@ RUNTIME_FUNCTION(Runtime_GetAndResetRuntimeCallStats) {
}
}
+RUNTIME_FUNCTION(Runtime_EnqueuePromiseResolveThenableJob) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 6);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, resolution, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, then, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, resolve, 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, reject, 3);
+ CONVERT_ARG_HANDLE_CHECKED(Object, before_debug_event, 4);
+ CONVERT_ARG_HANDLE_CHECKED(Object, after_debug_event, 5);
+ Handle<PromiseContainer> container = isolate->factory()->NewPromiseContainer(
+ resolution, then, resolve, reject, before_debug_event, after_debug_event);
+ isolate->EnqueueMicrotask(container);
+ return isolate->heap()->undefined_value();
+}
+
RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index a0dd3e8de9..ebdf04ccae 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -6,8 +6,9 @@
#include "src/allocation-site-scopes.h"
#include "src/arguments.h"
+#include "src/ast/ast.h"
+#include "src/ast/compile-time-value.h"
#include "src/isolate-inl.h"
-#include "src/parsing/parser.h"
#include "src/runtime/runtime.h"
namespace v8 {
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 7908c6295c..70ed23ba61 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -677,6 +677,38 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
return *object;
}
+RUNTIME_FUNCTION(Runtime_DefineDataProperty) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 5);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
+ CONVERT_SMI_ARG_CHECKED(set_function_name, 4);
+
+ if (set_function_name) {
+ DCHECK(value->IsJSFunction());
+ JSFunction::SetName(Handle<JSFunction>::cast(value), name,
+ isolate->factory()->empty_string());
+ }
+
+ PropertyDescriptor desc;
+ desc.set_writable(!(attrs & ReadOnly));
+ desc.set_enumerable(!(attrs & DontEnum));
+ desc.set_configurable(!(attrs & DontDelete));
+ desc.set_value(value);
+
+ Maybe<bool> result = JSReceiver::DefineOwnProperty(isolate, receiver, name,
+ &desc, Object::DONT_THROW);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+ if (result.IsNothing()) {
+ DCHECK(isolate->has_pending_exception());
+ return isolate->heap()->exception();
+ }
+
+ return *receiver;
+}
+
// Return property without being observable by accessors or interceptors.
RUNTIME_FUNCTION(Runtime_GetDataProperty) {
HandleScope scope(isolate);
@@ -928,5 +960,32 @@ RUNTIME_FUNCTION(Runtime_CreateDataProperty) {
return *value;
}
+RUNTIME_FUNCTION(Runtime_LoadModuleExport) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ Handle<Module> module(isolate->context()->module());
+ return *Module::LoadExport(module, name);
+}
+
+RUNTIME_FUNCTION(Runtime_LoadModuleImport) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, module_request, 1);
+ Handle<Module> module(isolate->context()->module());
+ return *Module::LoadImport(module, name, module_request->value());
+}
+
+RUNTIME_FUNCTION(Runtime_StoreModuleExport) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+ Handle<Module> module(isolate->context()->module());
+ Module::StoreExport(module, name, value);
+ return isolate->heap()->undefined_value();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index b36e5e66cb..977e6bc48f 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -794,7 +794,7 @@ RUNTIME_FUNCTION(Runtime_RegExpSource) {
return regexp->source();
}
-
+// TODO(jgruber): Remove this once all uses in regexp.js have been removed.
RUNTIME_FUNCTION(Runtime_RegExpConstructResult) {
HandleScope handle_scope(isolate);
DCHECK(args.length() == 3);
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 26bfb29d93..0c037db307 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -44,7 +44,7 @@ Object* ThrowRedeclarationError(Isolate* isolate, Handle<String> name,
Object* DeclareGlobal(
Isolate* isolate, Handle<JSGlobalObject> global, Handle<String> name,
Handle<Object> value, PropertyAttributes attr, bool is_var,
- bool is_function, RedeclarationType redeclaration_type,
+ bool is_function_declaration, RedeclarationType redeclaration_type,
Handle<TypeFeedbackVector> feedback_vector = Handle<TypeFeedbackVector>(),
FeedbackVectorSlot slot = FeedbackVectorSlot::Invalid()) {
Handle<ScriptContextTable> script_contexts(
@@ -60,7 +60,14 @@ Object* DeclareGlobal(
}
// Do the lookup own properties only, see ES5 erratum.
- LookupIterator it(global, name, global, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ LookupIterator::Configuration lookup_config(
+ LookupIterator::Configuration::OWN_SKIP_INTERCEPTOR);
+ if (is_function_declaration) {
+ // For function declarations, use the interceptor on the declaration. For
+ // non-functions, use it only on initialization.
+ lookup_config = LookupIterator::Configuration::OWN;
+ }
+ LookupIterator it(global, name, global, lookup_config);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
if (!maybe.IsJust()) return isolate->heap()->exception();
@@ -71,7 +78,7 @@ Object* DeclareGlobal(
// Skip var re-declarations.
if (is_var) return isolate->heap()->undefined_value();
- DCHECK(is_function);
+ DCHECK(is_function_declaration);
if ((old_attributes & DONT_DELETE) != 0) {
// Only allow reconfiguring globals to functions in user code (no
// natives, which are marked as read-only).
@@ -83,9 +90,9 @@ Object* DeclareGlobal(
if (old_details.IsReadOnly() || old_details.IsDontEnum() ||
(it.state() == LookupIterator::ACCESSOR &&
it.GetAccessors()->IsAccessorPair())) {
- // ES#sec-globaldeclarationinstantiation 5.d:
+ // ECMA-262 section 15.1.11 GlobalDeclarationInstantiation 5.d:
// If hasRestrictedGlobal is true, throw a SyntaxError exception.
- // ES#sec-evaldeclarationinstantiation 8.a.iv.1.b:
+ // ECMA-262 section 18.2.1.3 EvalDeclarationInstantiation 8.a.iv.1.b:
// If fnDefinable is false, throw a TypeError exception.
return ThrowRedeclarationError(isolate, name, redeclaration_type);
}
@@ -102,6 +109,10 @@ Object* DeclareGlobal(
if (it.state() == LookupIterator::ACCESSOR) it.Delete();
}
+ if (is_function_declaration) {
+ it.Restart();
+ }
+
// Define or redefine own property.
RETURN_FAILURE_ON_EXCEPTION(
isolate, JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, attr));
@@ -294,9 +305,8 @@ Object* DeclareEvalHelper(Isolate* isolate, Handle<String> name,
DCHECK(context->IsBlockContext());
object = isolate->factory()->NewJSObject(
isolate->context_extension_function());
- Handle<HeapObject> extension =
- isolate->factory()->NewSloppyBlockWithEvalContextExtension(
- handle(context->scope_info()), object);
+ Handle<HeapObject> extension = isolate->factory()->NewContextExtension(
+ handle(context->scope_info()), object);
context->set_extension(*extension);
} else {
object = handle(context->extension_object(), isolate);
@@ -665,8 +675,6 @@ RUNTIME_FUNCTION(Runtime_NewScriptContext) {
Handle<Context> result =
isolate->factory()->NewScriptContext(closure, scope_info);
- result->InitializeGlobalSlots();
-
DCHECK(function->context() == isolate->context());
DCHECK(*global_object == result->global_object());
@@ -691,26 +699,41 @@ RUNTIME_FUNCTION(Runtime_NewFunctionContext) {
RUNTIME_FUNCTION(Runtime_PushWithContext) {
HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, extension_object, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 1);
+ CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 2);
Handle<Context> current(isolate->context());
- Handle<Context> context =
- isolate->factory()->NewWithContext(function, current, extension_object);
+ Handle<Context> context = isolate->factory()->NewWithContext(
+ function, current, scope_info, extension_object);
isolate->set_context(*context);
return *context;
}
+RUNTIME_FUNCTION(Runtime_PushModuleContext) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Module, module, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 1);
+ CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 2);
+ DCHECK(function->context() == isolate->context());
+
+ Handle<Context> context =
+ isolate->factory()->NewModuleContext(module, function, scope_info);
+ isolate->set_context(*context);
+ return *context;
+}
RUNTIME_FUNCTION(Runtime_PushCatchContext) {
HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, thrown_object, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 2);
+ CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 3);
Handle<Context> current(isolate->context());
Handle<Context> context = isolate->factory()->NewCatchContext(
- function, current, name, thrown_object);
+ function, current, scope_info, name, thrown_object);
isolate->set_context(*context);
return *context;
}
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index 517513ed4e..f5bda59b26 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -103,140 +103,12 @@ RUNTIME_FUNCTION(Runtime_StringIndexOf) {
return Smi::FromInt(position);
}
-
-template <typename schar, typename pchar>
-static int StringMatchBackwards(Vector<const schar> subject,
- Vector<const pchar> pattern, int idx) {
- int pattern_length = pattern.length();
- DCHECK(pattern_length >= 1);
- DCHECK(idx + pattern_length <= subject.length());
-
- if (sizeof(schar) == 1 && sizeof(pchar) > 1) {
- for (int i = 0; i < pattern_length; i++) {
- uc16 c = pattern[i];
- if (c > String::kMaxOneByteCharCode) {
- return -1;
- }
- }
- }
-
- pchar pattern_first_char = pattern[0];
- for (int i = idx; i >= 0; i--) {
- if (subject[i] != pattern_first_char) continue;
- int j = 1;
- while (j < pattern_length) {
- if (pattern[j] != subject[i + j]) {
- break;
- }
- j++;
- }
- if (j == pattern_length) {
- return i;
- }
- }
- return -1;
-}
-
-
RUNTIME_FUNCTION(Runtime_StringLastIndexOf) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
-
- CONVERT_ARG_HANDLE_CHECKED(String, sub, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, pat, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, index, 2);
-
- uint32_t start_index = 0;
- if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
-
- uint32_t pat_length = pat->length();
- uint32_t sub_length = sub->length();
-
- if (start_index + pat_length > sub_length) {
- start_index = sub_length - pat_length;
- }
-
- if (pat_length == 0) {
- return Smi::FromInt(start_index);
- }
-
- sub = String::Flatten(sub);
- pat = String::Flatten(pat);
-
- int position = -1;
- DisallowHeapAllocation no_gc; // ensure vectors stay valid
-
- String::FlatContent sub_content = sub->GetFlatContent();
- String::FlatContent pat_content = pat->GetFlatContent();
-
- if (pat_content.IsOneByte()) {
- Vector<const uint8_t> pat_vector = pat_content.ToOneByteVector();
- if (sub_content.IsOneByte()) {
- position = StringMatchBackwards(sub_content.ToOneByteVector(), pat_vector,
- start_index);
- } else {
- position = StringMatchBackwards(sub_content.ToUC16Vector(), pat_vector,
- start_index);
- }
- } else {
- Vector<const uc16> pat_vector = pat_content.ToUC16Vector();
- if (sub_content.IsOneByte()) {
- position = StringMatchBackwards(sub_content.ToOneByteVector(), pat_vector,
- start_index);
- } else {
- position = StringMatchBackwards(sub_content.ToUC16Vector(), pat_vector,
- start_index);
- }
- }
-
- return Smi::FromInt(position);
-}
-
-
-RUNTIME_FUNCTION(Runtime_StringLocaleCompare) {
HandleScope handle_scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_ARG_HANDLE_CHECKED(String, str1, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, str2, 1);
-
- if (str1.is_identical_to(str2)) return Smi::FromInt(0); // Equal.
- int str1_length = str1->length();
- int str2_length = str2->length();
-
- // Decide trivial cases without flattening.
- if (str1_length == 0) {
- if (str2_length == 0) return Smi::FromInt(0); // Equal.
- return Smi::FromInt(-str2_length);
- } else {
- if (str2_length == 0) return Smi::FromInt(str1_length);
- }
-
- int end = str1_length < str2_length ? str1_length : str2_length;
-
- // No need to flatten if we are going to find the answer on the first
- // character. At this point we know there is at least one character
- // in each string, due to the trivial case handling above.
- int d = str1->Get(0) - str2->Get(0);
- if (d != 0) return Smi::FromInt(d);
-
- str1 = String::Flatten(str1);
- str2 = String::Flatten(str2);
-
- DisallowHeapAllocation no_gc;
- String::FlatContent flat1 = str1->GetFlatContent();
- String::FlatContent flat2 = str2->GetFlatContent();
-
- for (int i = 0; i < end; i++) {
- if (flat1.Get(i) != flat2.Get(i)) {
- return Smi::FromInt(flat1.Get(i) - flat2.Get(i));
- }
- }
-
- return Smi::FromInt(str1_length - str2_length);
+ return String::LastIndexOf(isolate, args.at<Object>(0), args.at<Object>(1),
+ isolate->factory()->undefined_value());
}
-
RUNTIME_FUNCTION(Runtime_SubString) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 0d6cb0efdd..8100d2c759 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -8,6 +8,7 @@
#include "src/arguments.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
+#include "src/compiler.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
@@ -419,8 +420,8 @@ RUNTIME_FUNCTION(Runtime_SetAllocationTimeout) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 2 || args.length() == 3);
#ifdef DEBUG
- CONVERT_SMI_ARG_CHECKED(interval, 0);
- CONVERT_SMI_ARG_CHECKED(timeout, 1);
+ CONVERT_INT32_ARG_CHECKED(interval, 0);
+ CONVERT_INT32_ARG_CHECKED(timeout, 1);
isolate->heap()->set_allocation_timeout(timeout);
FLAG_gc_interval = interval;
if (args.length() == 3) {
@@ -456,7 +457,6 @@ RUNTIME_FUNCTION(Runtime_DebugPrint) {
}
args[0]->Print(os);
if (args[0]->IsHeapObject()) {
- os << "\n";
HeapObject::cast(args[0])->map()->Print(os);
}
#else
@@ -768,7 +768,34 @@ RUNTIME_FUNCTION(Runtime_DeserializeWasmModule) {
if (!maybe_compiled_module.ToHandle(&compiled_module)) {
return isolate->heap()->undefined_value();
}
- return *wasm::CreateCompiledModuleObject(isolate, compiled_module);
+ return *wasm::CreateCompiledModuleObject(isolate, compiled_module,
+ wasm::ModuleOrigin::kWasmOrigin);
+}
+
+RUNTIME_FUNCTION(Runtime_ValidateWasmInstancesChain) {
+ HandleScope shs(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, module_obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, instance_count, 1);
+ wasm::testing::ValidateInstancesChain(isolate, module_obj,
+ instance_count->value());
+ return isolate->heap()->ToBoolean(true);
+}
+
+RUNTIME_FUNCTION(Runtime_ValidateWasmModuleState) {
+ HandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, module_obj, 0);
+ wasm::testing::ValidateModuleState(isolate, module_obj);
+ return isolate->heap()->ToBoolean(true);
+}
+
+RUNTIME_FUNCTION(Runtime_ValidateWasmOrphanedInstance) {
+ HandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, instance_obj, 0);
+ wasm::testing::ValidateOrphanedInstance(isolate, instance_obj);
+ return isolate->heap()->ToBoolean(true);
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index 04bf368974..ba422bf01e 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -200,7 +200,6 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitializeFromArrayLike) {
size_t length = 0;
if (source->IsJSTypedArray() &&
JSTypedArray::cast(*source)->type() == array_type) {
- length_obj = handle(JSTypedArray::cast(*source)->length(), isolate);
length = JSTypedArray::cast(*source)->length_value();
} else {
CHECK(TryNumberToSize(*length_obj, &length));
@@ -246,6 +245,7 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitializeFromArrayLike) {
Handle<Object> byte_length_obj(
isolate->factory()->NewNumberFromSize(byte_length));
holder->set_byte_length(*byte_length_obj);
+ length_obj = isolate->factory()->NewNumberFromSize(length);
holder->set_length(*length_obj);
Handle<FixedTypedArrayBase> elements =
@@ -419,217 +419,5 @@ RUNTIME_FUNCTION(Runtime_IsSharedInteger32TypedArray) {
obj->type() == kExternalInt32Array);
}
-
-inline static bool NeedToFlipBytes(bool is_little_endian) {
-#ifdef V8_TARGET_LITTLE_ENDIAN
- return !is_little_endian;
-#else
- return is_little_endian;
-#endif
-}
-
-
-template <int n>
-inline void CopyBytes(uint8_t* target, uint8_t* source) {
- for (int i = 0; i < n; i++) {
- *(target++) = *(source++);
- }
-}
-
-
-template <int n>
-inline void FlipBytes(uint8_t* target, uint8_t* source) {
- source = source + (n - 1);
- for (int i = 0; i < n; i++) {
- *(target++) = *(source--);
- }
-}
-
-
-template <typename T>
-inline static bool DataViewGetValue(Isolate* isolate,
- Handle<JSDataView> data_view,
- Handle<Object> byte_offset_obj,
- bool is_little_endian, T* result) {
- size_t byte_offset = 0;
- if (!TryNumberToSize(*byte_offset_obj, &byte_offset)) {
- return false;
- }
- Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()));
-
- size_t data_view_byte_offset = NumberToSize(data_view->byte_offset());
- size_t data_view_byte_length = NumberToSize(data_view->byte_length());
- if (byte_offset + sizeof(T) > data_view_byte_length ||
- byte_offset + sizeof(T) < byte_offset) { // overflow
- return false;
- }
-
- union Value {
- T data;
- uint8_t bytes[sizeof(T)];
- };
-
- Value value;
- size_t buffer_offset = data_view_byte_offset + byte_offset;
- DCHECK(NumberToSize(buffer->byte_length()) >= buffer_offset + sizeof(T));
- uint8_t* source =
- static_cast<uint8_t*>(buffer->backing_store()) + buffer_offset;
- if (NeedToFlipBytes(is_little_endian)) {
- FlipBytes<sizeof(T)>(value.bytes, source);
- } else {
- CopyBytes<sizeof(T)>(value.bytes, source);
- }
- *result = value.data;
- return true;
-}
-
-
-template <typename T>
-static bool DataViewSetValue(Isolate* isolate, Handle<JSDataView> data_view,
- Handle<Object> byte_offset_obj,
- bool is_little_endian, T data) {
- size_t byte_offset = 0;
- if (!TryNumberToSize(*byte_offset_obj, &byte_offset)) {
- return false;
- }
- Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()));
-
- size_t data_view_byte_offset = NumberToSize(data_view->byte_offset());
- size_t data_view_byte_length = NumberToSize(data_view->byte_length());
- if (byte_offset + sizeof(T) > data_view_byte_length ||
- byte_offset + sizeof(T) < byte_offset) { // overflow
- return false;
- }
-
- union Value {
- T data;
- uint8_t bytes[sizeof(T)];
- };
-
- Value value;
- value.data = data;
- size_t buffer_offset = data_view_byte_offset + byte_offset;
- DCHECK(NumberToSize(buffer->byte_length()) >= buffer_offset + sizeof(T));
- uint8_t* target =
- static_cast<uint8_t*>(buffer->backing_store()) + buffer_offset;
- if (NeedToFlipBytes(is_little_endian)) {
- FlipBytes<sizeof(T)>(target, value.bytes);
- } else {
- CopyBytes<sizeof(T)>(target, value.bytes);
- }
- return true;
-}
-
-
-#define DATA_VIEW_GETTER(TypeName, Type, Converter) \
- RUNTIME_FUNCTION(Runtime_DataViewGet##TypeName) { \
- HandleScope scope(isolate); \
- DCHECK(args.length() == 3); \
- CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0); \
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(offset, 1); \
- CONVERT_BOOLEAN_ARG_CHECKED(is_little_endian, 2); \
- Type result; \
- if (DataViewGetValue(isolate, holder, offset, is_little_endian, \
- &result)) { \
- return *isolate->factory()->Converter(result); \
- } else { \
- THROW_NEW_ERROR_RETURN_FAILURE( \
- isolate, \
- NewRangeError(MessageTemplate::kInvalidDataViewAccessorOffset)); \
- } \
- }
-
-DATA_VIEW_GETTER(Uint8, uint8_t, NewNumberFromUint)
-DATA_VIEW_GETTER(Int8, int8_t, NewNumberFromInt)
-DATA_VIEW_GETTER(Uint16, uint16_t, NewNumberFromUint)
-DATA_VIEW_GETTER(Int16, int16_t, NewNumberFromInt)
-DATA_VIEW_GETTER(Uint32, uint32_t, NewNumberFromUint)
-DATA_VIEW_GETTER(Int32, int32_t, NewNumberFromInt)
-DATA_VIEW_GETTER(Float32, float, NewNumber)
-DATA_VIEW_GETTER(Float64, double, NewNumber)
-
-#undef DATA_VIEW_GETTER
-
-
-template <typename T>
-static T DataViewConvertValue(double value);
-
-
-template <>
-int8_t DataViewConvertValue<int8_t>(double value) {
- return static_cast<int8_t>(DoubleToInt32(value));
-}
-
-
-template <>
-int16_t DataViewConvertValue<int16_t>(double value) {
- return static_cast<int16_t>(DoubleToInt32(value));
-}
-
-
-template <>
-int32_t DataViewConvertValue<int32_t>(double value) {
- return DoubleToInt32(value);
-}
-
-
-template <>
-uint8_t DataViewConvertValue<uint8_t>(double value) {
- return static_cast<uint8_t>(DoubleToUint32(value));
-}
-
-
-template <>
-uint16_t DataViewConvertValue<uint16_t>(double value) {
- return static_cast<uint16_t>(DoubleToUint32(value));
-}
-
-
-template <>
-uint32_t DataViewConvertValue<uint32_t>(double value) {
- return DoubleToUint32(value);
-}
-
-
-template <>
-float DataViewConvertValue<float>(double value) {
- return static_cast<float>(value);
-}
-
-
-template <>
-double DataViewConvertValue<double>(double value) {
- return value;
-}
-
-
-#define DATA_VIEW_SETTER(TypeName, Type) \
- RUNTIME_FUNCTION(Runtime_DataViewSet##TypeName) { \
- HandleScope scope(isolate); \
- DCHECK(args.length() == 4); \
- CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0); \
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(offset, 1); \
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); \
- CONVERT_BOOLEAN_ARG_CHECKED(is_little_endian, 3); \
- Type v = DataViewConvertValue<Type>(value->Number()); \
- if (DataViewSetValue(isolate, holder, offset, is_little_endian, v)) { \
- return isolate->heap()->undefined_value(); \
- } else { \
- THROW_NEW_ERROR_RETURN_FAILURE( \
- isolate, \
- NewRangeError(MessageTemplate::kInvalidDataViewAccessorOffset)); \
- } \
- }
-
-DATA_VIEW_SETTER(Uint8, uint8_t)
-DATA_VIEW_SETTER(Int8, int8_t)
-DATA_VIEW_SETTER(Uint16, uint16_t)
-DATA_VIEW_SETTER(Int16, int16_t)
-DATA_VIEW_SETTER(Uint32, uint32_t)
-DATA_VIEW_SETTER(Int32, int32_t)
-DATA_VIEW_SETTER(Float32, float)
-DATA_VIEW_SETTER(Float64, double)
-
-#undef DATA_VIEW_SETTER
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index 37608e61cd..ab69046c45 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -18,17 +18,32 @@
namespace v8 {
namespace internal {
-namespace {
-const int kWasmMemArrayBuffer = 2;
+RUNTIME_FUNCTION(Runtime_WasmMemorySize) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+
+ Handle<JSObject> module_instance;
+ {
+ // Get the module JSObject
+ DisallowHeapAllocation no_allocation;
+ const Address entry = Isolate::c_entry_fp(isolate->thread_local_top());
+ Address pc =
+ Memory::Address_at(entry + StandardFrameConstants::kCallerPCOffset);
+ Code* code =
+ isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code;
+ Object* owning_instance = wasm::GetOwningWasmInstance(code);
+ CHECK_NOT_NULL(owning_instance);
+ module_instance = handle(JSObject::cast(owning_instance), isolate);
+ }
+ return *isolate->factory()->NewNumberFromInt(
+ wasm::GetInstanceMemorySize(isolate, module_instance));
}
RUNTIME_FUNCTION(Runtime_WasmGrowMemory) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- uint32_t delta_pages = 0;
- CHECK(args[0]->ToUint32(&delta_pages));
- Handle<JSObject> module_object;
-
+ CONVERT_UINT32_ARG_CHECKED(delta_pages, 0);
+ Handle<JSObject> module_instance;
{
// Get the module JSObject
DisallowHeapAllocation no_allocation;
@@ -37,77 +52,12 @@ RUNTIME_FUNCTION(Runtime_WasmGrowMemory) {
Memory::Address_at(entry + StandardFrameConstants::kCallerPCOffset);
Code* code =
isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code;
- FixedArray* deopt_data = code->deoptimization_data();
- DCHECK(deopt_data->length() == 2);
- module_object = Handle<JSObject>::cast(handle(deopt_data->get(0), isolate));
- CHECK(!module_object->IsNull(isolate));
- }
-
- Address old_mem_start, new_mem_start;
- uint32_t old_size, new_size;
-
- // Get mem buffer associated with module object
- Handle<Object> obj(module_object->GetInternalField(kWasmMemArrayBuffer),
- isolate);
-
- if (obj->IsUndefined(isolate)) {
- // If module object does not have linear memory associated with it,
- // Allocate new array buffer of given size.
- old_mem_start = nullptr;
- old_size = 0;
- // TODO(gdeepti): Fix bounds check to take into account size of memtype.
- new_size = delta_pages * wasm::WasmModule::kPageSize;
- if (delta_pages > wasm::WasmModule::kMaxMemPages) {
- return *isolate->factory()->NewNumberFromInt(-1);
- }
- new_mem_start =
- static_cast<Address>(isolate->array_buffer_allocator()->Allocate(
- static_cast<uint32_t>(new_size)));
- if (new_mem_start == NULL) {
- return *isolate->factory()->NewNumberFromInt(-1);
- }
-#if DEBUG
- // Double check the API allocator actually zero-initialized the memory.
- for (size_t i = old_size; i < new_size; i++) {
- DCHECK_EQ(0, new_mem_start[i]);
- }
-#endif
- } else {
- Handle<JSArrayBuffer> old_buffer = Handle<JSArrayBuffer>::cast(obj);
- old_mem_start = static_cast<Address>(old_buffer->backing_store());
- old_size = old_buffer->byte_length()->Number();
- // If the old memory was zero-sized, we should have been in the
- // "undefined" case above.
- DCHECK_NOT_NULL(old_mem_start);
- DCHECK_NE(0, old_size);
-
- new_size = old_size + delta_pages * wasm::WasmModule::kPageSize;
- if (new_size >
- wasm::WasmModule::kMaxMemPages * wasm::WasmModule::kPageSize) {
- return *isolate->factory()->NewNumberFromInt(-1);
- }
- new_mem_start = static_cast<Address>(realloc(old_mem_start, new_size));
- if (new_mem_start == NULL) {
- return *isolate->factory()->NewNumberFromInt(-1);
- }
- old_buffer->set_is_external(true);
- isolate->heap()->UnregisterArrayBuffer(*old_buffer);
- // Zero initializing uninitialized memory from realloc
- memset(new_mem_start + old_size, 0, new_size - old_size);
+ Object* owning_instance = wasm::GetOwningWasmInstance(code);
+ CHECK_NOT_NULL(owning_instance);
+ module_instance = handle(JSObject::cast(owning_instance), isolate);
}
-
- Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
- JSArrayBuffer::Setup(buffer, isolate, false, new_mem_start, new_size);
- buffer->set_is_neuterable(false);
-
- // Set new buffer to be wasm memory
- module_object->SetInternalField(kWasmMemArrayBuffer, *buffer);
-
- CHECK(wasm::UpdateWasmModuleMemory(module_object, old_mem_start,
- new_mem_start, old_size, new_size));
-
- return *isolate->factory()->NewNumberFromInt(old_size /
- wasm::WasmModule::kPageSize);
+ return *isolate->factory()->NewNumberFromInt(
+ wasm::GrowInstanceMemory(isolate, module_instance, delta_pages));
}
RUNTIME_FUNCTION(Runtime_WasmThrowTypeError) {
@@ -116,5 +66,28 @@ RUNTIME_FUNCTION(Runtime_WasmThrowTypeError) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kWasmTrapTypeError));
}
+
+RUNTIME_FUNCTION(Runtime_WasmThrow) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_SMI_ARG_CHECKED(lower, 0);
+ CONVERT_SMI_ARG_CHECKED(upper, 1);
+
+ const int32_t thrown_value = (upper << 16) | lower;
+
+ return isolate->Throw(*isolate->factory()->NewNumberFromInt(thrown_value));
+}
+
+RUNTIME_FUNCTION(Runtime_WasmGetCaughtExceptionValue) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ Object* exception = args[0];
+ // The unwinder will only deliver exceptions to wasm if the exception is a
+ // Number or a Smi (which we have just converted to a Number.) This logic
+ // lives in Isolate::is_catchable_by_wasm(Object*).
+ CHECK(exception->IsNumber());
+ return exception;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime.cc b/deps/v8/src/runtime/runtime.cc
index 151e240f25..9d1cd39c5d 100644
--- a/deps/v8/src/runtime/runtime.cc
+++ b/deps/v8/src/runtime/runtime.cc
@@ -5,6 +5,7 @@
#include "src/runtime/runtime.h"
#include "src/assembler.h"
+#include "src/base/hashmap.h"
#include "src/contexts.h"
#include "src/handles-inl.h"
#include "src/heap/heap.h"
@@ -57,30 +58,61 @@ static const Runtime::Function kIntrinsicFunctions[] = {
#undef I
#undef F
+namespace {
-void Runtime::InitializeIntrinsicFunctionNames(Isolate* isolate,
- Handle<NameDictionary> dict) {
- DCHECK(dict->NumberOfElements() == 0);
- HandleScope scope(isolate);
- for (int i = 0; i < kNumFunctions; ++i) {
- const char* name = kIntrinsicFunctions[i].name;
- if (name == NULL) continue;
- Handle<NameDictionary> new_dict = NameDictionary::Add(
- dict, isolate->factory()->InternalizeUtf8String(name),
- Handle<Smi>(Smi::FromInt(i), isolate), PropertyDetails::Empty());
- // The dictionary does not need to grow.
- CHECK(new_dict.is_identical_to(dict));
+V8_DECLARE_ONCE(initialize_function_name_map_once);
+static const base::CustomMatcherHashMap* kRuntimeFunctionNameMap;
+
+struct IntrinsicFunctionIdentifier {
+ IntrinsicFunctionIdentifier(const unsigned char* data, const int length)
+ : data_(data), length_(length) {}
+
+ static bool Match(void* key1, void* key2) {
+ const IntrinsicFunctionIdentifier* lhs =
+ static_cast<IntrinsicFunctionIdentifier*>(key1);
+ const IntrinsicFunctionIdentifier* rhs =
+ static_cast<IntrinsicFunctionIdentifier*>(key2);
+ if (lhs->length_ != rhs->length_) return false;
+ return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(lhs->data_),
+ reinterpret_cast<const uint8_t*>(rhs->data_),
+ rhs->length_) == 0;
+ }
+
+ uint32_t Hash() {
+ return StringHasher::HashSequentialString<uint8_t>(
+ data_, length_, v8::internal::kZeroHashSeed);
}
-}
+ const unsigned char* data_;
+ const int length_;
+};
+
+void InitializeIntrinsicFunctionNames() {
+ base::CustomMatcherHashMap* function_name_map =
+ new base::CustomMatcherHashMap(IntrinsicFunctionIdentifier::Match);
+ for (size_t i = 0; i < arraysize(kIntrinsicFunctions); ++i) {
+ const Runtime::Function* function = &kIntrinsicFunctions[i];
+ IntrinsicFunctionIdentifier* identifier = new IntrinsicFunctionIdentifier(
+ reinterpret_cast<const unsigned char*>(function->name),
+ static_cast<int>(strlen(function->name)));
+ base::HashMap::Entry* entry =
+ function_name_map->InsertNew(identifier, identifier->Hash());
+ entry->value = const_cast<Runtime::Function*>(function);
+ }
+ kRuntimeFunctionNameMap = function_name_map;
+}
-const Runtime::Function* Runtime::FunctionForName(Handle<String> name) {
- Heap* heap = name->GetHeap();
- int entry = heap->intrinsic_function_names()->FindEntry(name);
- if (entry != kNotFound) {
- Object* smi_index = heap->intrinsic_function_names()->ValueAt(entry);
- int function_index = Smi::cast(smi_index)->value();
- return &(kIntrinsicFunctions[function_index]);
+} // namespace
+
+const Runtime::Function* Runtime::FunctionForName(const unsigned char* name,
+ int length) {
+ base::CallOnce(&initialize_function_name_map_once,
+ &InitializeIntrinsicFunctionNames);
+ IntrinsicFunctionIdentifier identifier(name, length);
+ base::HashMap::Entry* entry =
+ kRuntimeFunctionNameMap->Lookup(&identifier, identifier.Hash());
+ if (entry) {
+ return reinterpret_cast<Function*>(entry->value);
}
return NULL;
}
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 38eb51d5a3..cbdaf0f033 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -11,7 +11,7 @@
#include "src/base/platform/time.h"
#include "src/objects.h"
#include "src/unicode.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
@@ -204,12 +204,10 @@ namespace internal {
#define FOR_EACH_INTRINSIC_ERROR(F) F(ErrorToString, 1, 1)
#define FOR_EACH_INTRINSIC_FORIN(F) \
- F(ForInDone, 2, 1) \
F(ForInEnumerate, 1, 1) \
F(ForInFilter, 2, 1) \
F(ForInHasProperty, 2, 1) \
- F(ForInNext, 4, 1) \
- F(ForInStep, 1, 1)
+ F(ForInNext, 4, 1)
#define FOR_EACH_INTRINSIC_INTERPRETER(F) \
F(InterpreterNewClosure, 2, 1) \
@@ -262,6 +260,7 @@ namespace internal {
F(GetImplFromInitializedIntlObject, 1, 1) \
F(CreateDateTimeFormat, 3, 1) \
F(InternalDateFormat, 2, 1) \
+ F(InternalDateFormatToParts, 2, 1) \
F(InternalDateParse, 2, 1) \
F(CreateNumberFormat, 3, 1) \
F(InternalNumberFormat, 2, 1) \
@@ -291,6 +290,7 @@ namespace internal {
F(CheckIsBootstrapping, 0, 1) \
F(CreateListFromArrayLike, 1, 1) \
F(EnqueueMicrotask, 1, 1) \
+ F(EnqueuePromiseResolveThenableJob, 6, 1) \
F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1) \
F(ExportExperimentalFromRuntime, 1, 1) \
F(ExportFromRuntime, 1, 1) \
@@ -304,6 +304,7 @@ namespace internal {
F(NewTypeError, 2, 1) \
F(OrdinaryHasInstance, 2, 1) \
F(PromiseRejectEvent, 3, 1) \
+ F(PromiseRejectEventFromStack, 2, 1) \
F(PromiseRevokeReject, 1, 1) \
F(PromoteScheduledException, 0, 1) \
F(ReThrow, 1, 1) \
@@ -394,6 +395,7 @@ namespace internal {
F(IsJSGlobalProxy, 1, 1) \
F(DefineAccessorPropertyUnchecked, 5, 1) \
F(DefineDataPropertyInLiteral, 5, 1) \
+ F(DefineDataProperty, 5, 1) \
F(GetDataProperty, 2, 1) \
F(GetConstructorName, 1, 1) \
F(HasFastPackedElements, 1, 1) \
@@ -416,7 +418,10 @@ namespace internal {
F(HasInPrototypeChain, 2, 1) \
F(CreateIterResultObject, 2, 1) \
F(IsAccessCheckNeeded, 1, 1) \
- F(CreateDataProperty, 3, 1)
+ F(CreateDataProperty, 3, 1) \
+ F(LoadModuleExport, 1, 1) \
+ F(LoadModuleImport, 2, 1) \
+ F(StoreModuleExport, 2, 1)
#define FOR_EACH_INTRINSIC_OPERATORS(F) \
F(Multiply, 2, 1) \
@@ -475,8 +480,9 @@ namespace internal {
F(NewClosure_Tenured, 1, 1) \
F(NewScriptContext, 2, 1) \
F(NewFunctionContext, 1, 1) \
- F(PushWithContext, 2, 1) \
- F(PushCatchContext, 3, 1) \
+ F(PushModuleContext, 3, 1) \
+ F(PushWithContext, 3, 1) \
+ F(PushCatchContext, 4, 1) \
F(PushBlockContext, 2, 1) \
F(DeleteLookupSlot, 1, 1) \
F(LoadLookupSlot, 1, 1) \
@@ -797,8 +803,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_STRINGS(F) \
F(StringReplaceOneCharWithString, 3, 1) \
F(StringIndexOf, 3, 1) \
- F(StringLastIndexOf, 3, 1) \
- F(StringLocaleCompare, 2, 1) \
+ F(StringLastIndexOf, 2, 1) \
F(SubString, 3, 1) \
F(StringAdd, 2, 1) \
F(InternalizeString, 1, 1) \
@@ -888,7 +893,10 @@ namespace internal {
F(SerializeWasmModule, 1, 1) \
F(DeserializeWasmModule, 1, 1) \
F(IsAsmWasmCode, 1, 1) \
- F(IsNotAsmWasmCode, 1, 1)
+ F(IsNotAsmWasmCode, 1, 1) \
+ F(ValidateWasmInstancesChain, 2, 1) \
+ F(ValidateWasmModuleState, 1, 1) \
+ F(ValidateWasmOrphanedInstance, 1, 1)
#define FOR_EACH_INTRINSIC_TYPEDARRAY(F) \
F(ArrayBufferGetByteLength, 1, 1) \
@@ -905,27 +913,14 @@ namespace internal {
F(IsTypedArray, 1, 1) \
F(IsSharedTypedArray, 1, 1) \
F(IsSharedIntegerTypedArray, 1, 1) \
- F(IsSharedInteger32TypedArray, 1, 1) \
- F(DataViewGetUint8, 3, 1) \
- F(DataViewGetInt8, 3, 1) \
- F(DataViewGetUint16, 3, 1) \
- F(DataViewGetInt16, 3, 1) \
- F(DataViewGetUint32, 3, 1) \
- F(DataViewGetInt32, 3, 1) \
- F(DataViewGetFloat32, 3, 1) \
- F(DataViewGetFloat64, 3, 1) \
- F(DataViewSetUint8, 4, 1) \
- F(DataViewSetInt8, 4, 1) \
- F(DataViewSetUint16, 4, 1) \
- F(DataViewSetInt16, 4, 1) \
- F(DataViewSetUint32, 4, 1) \
- F(DataViewSetInt32, 4, 1) \
- F(DataViewSetFloat32, 4, 1) \
- F(DataViewSetFloat64, 4, 1)
+ F(IsSharedInteger32TypedArray, 1, 1)
#define FOR_EACH_INTRINSIC_WASM(F) \
F(WasmGrowMemory, 1, 1) \
- F(WasmThrowTypeError, 0, 1)
+ F(WasmMemorySize, 0, 1) \
+ F(WasmThrowTypeError, 0, 1) \
+ F(WasmThrow, 2, 1) \
+ F(WasmGetCaughtExceptionValue, 1, 1)
#define FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
F(LoadLookupSlotForCall, 1, 2)
@@ -935,30 +930,26 @@ namespace internal {
// Most intrinsics are implemented in the runtime/ directory, but ICs are
// implemented in ic.cc for now.
-#define FOR_EACH_INTRINSIC_IC(F) \
- F(BinaryOpIC_Miss, 2, 1) \
- F(BinaryOpIC_MissWithAllocationSite, 3, 1) \
- F(CallIC_Miss, 3, 1) \
- F(CompareIC_Miss, 3, 1) \
- F(ElementsTransitionAndStoreIC_Miss, 5, 1) \
- F(KeyedLoadIC_Miss, 4, 1) \
- F(KeyedLoadIC_MissFromStubFailure, 4, 1) \
- F(KeyedStoreIC_Miss, 5, 1) \
- F(KeyedStoreIC_MissFromStubFailure, 5, 1) \
- F(KeyedStoreIC_Slow, 5, 1) \
- F(LoadElementWithInterceptor, 2, 1) \
- F(LoadGlobalIC_Miss, 2, 1) \
- F(LoadGlobalIC_Slow, 2, 1) \
- F(LoadIC_Miss, 4, 1) \
- F(LoadIC_MissFromStubFailure, 4, 1) \
- F(LoadPropertyWithInterceptor, 3, 1) \
- F(LoadPropertyWithInterceptorOnly, 3, 1) \
- F(StoreCallbackProperty, 6, 1) \
- F(StoreIC_Miss, 5, 1) \
- F(StoreIC_MissFromStubFailure, 5, 1) \
- F(TransitionStoreIC_MissFromStubFailure, 6, 1) \
- F(StorePropertyWithInterceptor, 3, 1) \
- F(ToBooleanIC_Miss, 1, 1) \
+#define FOR_EACH_INTRINSIC_IC(F) \
+ F(BinaryOpIC_Miss, 2, 1) \
+ F(BinaryOpIC_MissWithAllocationSite, 3, 1) \
+ F(CallIC_Miss, 3, 1) \
+ F(CompareIC_Miss, 3, 1) \
+ F(ElementsTransitionAndStoreIC_Miss, 6, 1) \
+ F(KeyedLoadIC_Miss, 4, 1) \
+ F(KeyedLoadIC_MissFromStubFailure, 4, 1) \
+ F(KeyedStoreIC_Miss, 5, 1) \
+ F(KeyedStoreIC_Slow, 5, 1) \
+ F(LoadElementWithInterceptor, 2, 1) \
+ F(LoadGlobalIC_Miss, 2, 1) \
+ F(LoadGlobalIC_Slow, 2, 1) \
+ F(LoadIC_Miss, 4, 1) \
+ F(LoadPropertyWithInterceptor, 3, 1) \
+ F(LoadPropertyWithInterceptorOnly, 3, 1) \
+ F(StoreCallbackProperty, 6, 1) \
+ F(StoreIC_Miss, 5, 1) \
+ F(StorePropertyWithInterceptor, 3, 1) \
+ F(ToBooleanIC_Miss, 1, 1) \
F(Unreachable, 0, 1)
#define FOR_EACH_INTRINSIC_RETURN_OBJECT(F) \
@@ -1044,13 +1035,8 @@ class Runtime : public AllStatic {
static const int kNotFound = -1;
- // Add internalized strings for all the intrinsic function names to a
- // StringDictionary.
- static void InitializeIntrinsicFunctionNames(Isolate* isolate,
- Handle<NameDictionary> dict);
-
- // Get the intrinsic function with the given name, which must be internalized.
- static const Function* FunctionForName(Handle<String> name);
+ // Get the intrinsic function with the given name.
+ static const Function* FunctionForName(const unsigned char* name, int length);
// Get the intrinsic function with the given FunctionId.
static const Function* FunctionForId(FunctionId id);
diff --git a/deps/v8/src/s390/code-stubs-s390.cc b/deps/v8/src/s390/code-stubs-s390.cc
index ce8038418d..b1bf02d196 100644
--- a/deps/v8/src/s390/code-stubs-s390.cc
+++ b/deps/v8/src/s390/code-stubs-s390.cc
@@ -1726,7 +1726,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// r4 : feedback vector
// r5 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
- Label done_initialize_count, done_increment_count;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
@@ -1749,7 +1748,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
Register weak_value = r9;
__ LoadP(weak_value, FieldMemOperand(r7, WeakCell::kValueOffset));
__ CmpP(r3, weak_value);
- __ beq(&done_increment_count, Label::kNear);
+ __ beq(&done, Label::kNear);
__ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex);
__ beq(&done, Label::kNear);
__ LoadP(feedback_map, FieldMemOperand(r7, HeapObject::kMapOffset));
@@ -1772,7 +1771,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
__ CmpP(r3, r7);
__ bne(&megamorphic);
- __ b(&done_increment_count, Label::kNear);
+ __ b(&done, Label::kNear);
__ bind(&miss);
@@ -1802,32 +1801,22 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub);
- __ b(&done_initialize_count, Label::kNear);
+ __ b(&done, Label::kNear);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub);
- __ bind(&done_initialize_count);
- // Initialize the call counter.
- __ LoadSmiLiteral(r7, Smi::FromInt(1));
- __ SmiToPtrArrayOffset(r6, r5);
- __ AddP(r6, r4, r6);
- __ StoreP(r7, FieldMemOperand(r6, count_offset), r0);
- __ b(&done, Label::kNear);
-
- __ bind(&done_increment_count);
+ __ bind(&done);
- // Increment the call count for monomorphic function calls.
+ // Increment the call count for all function calls.
__ SmiToPtrArrayOffset(r7, r5);
__ AddP(r7, r4, r7);
__ LoadP(r6, FieldMemOperand(r7, count_offset));
__ AddSmiLiteral(r6, r6, Smi::FromInt(1), r0);
__ StoreP(r6, FieldMemOperand(r7, count_offset), r0);
-
- __ bind(&done);
}
void CallConstructStub::Generate(MacroAssembler* masm) {
@@ -1873,6 +1862,17 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
+// Note: feedback_vector and slot are clobbered after the call.
+static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
+ Register slot, Register temp) {
+ const int count_offset = FixedArray::kHeaderSize + kPointerSize;
+ __ SmiToPtrArrayOffset(temp, slot);
+ __ AddP(feedback_vector, feedback_vector, temp);
+ __ LoadP(slot, FieldMemOperand(feedback_vector, count_offset));
+ __ AddSmiLiteral(slot, slot, Smi::FromInt(1), temp);
+ __ StoreP(slot, FieldMemOperand(feedback_vector, count_offset), temp);
+}
+
void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// r3 - function
// r5 - slot id
@@ -1885,12 +1885,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ mov(r2, Operand(arg_count()));
// Increment the call count for monomorphic function calls.
- const int count_offset = FixedArray::kHeaderSize + kPointerSize;
- __ SmiToPtrArrayOffset(r7, r5);
- __ AddP(r4, r4, r7);
- __ LoadP(r5, FieldMemOperand(r4, count_offset));
- __ AddSmiLiteral(r5, r5, Smi::FromInt(1), r0);
- __ StoreP(r5, FieldMemOperand(r4, count_offset), r0);
+ IncrementCallCount(masm, r4, r5, r1);
__ LoadRR(r4, r6);
__ LoadRR(r5, r3);
@@ -1902,7 +1897,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// r3 - function
// r5 - slot id (Smi)
// r4 - vector
- Label extra_checks_or_miss, call, call_function;
+ Label extra_checks_or_miss, call, call_function, call_count_incremented;
int argc = arg_count();
ParameterCount actual(argc);
@@ -1933,13 +1928,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
// convincing us that we have a monomorphic JSFunction.
__ JumpIfSmi(r3, &extra_checks_or_miss);
+ __ bind(&call_function);
+
// Increment the call count for monomorphic function calls.
- const int count_offset = FixedArray::kHeaderSize + kPointerSize;
- __ LoadP(r5, FieldMemOperand(r8, count_offset));
- __ AddSmiLiteral(r5, r5, Smi::FromInt(1), r0);
- __ StoreP(r5, FieldMemOperand(r8, count_offset), r0);
+ IncrementCallCount(masm, r4, r5, r1);
- __ bind(&call_function);
__ mov(r2, Operand(argc));
__ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
tail_call_mode()),
@@ -1979,6 +1972,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ StoreP(ip, FieldMemOperand(r8, FixedArray::kHeaderSize), r0);
__ bind(&call);
+
+ // Increment the call count for megamorphic function calls.
+ IncrementCallCount(masm, r4, r5, r1);
+
+ __ bind(&call_count_incremented);
__ mov(r2, Operand(argc));
__ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET);
@@ -2005,10 +2003,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ CmpP(r6, ip);
__ bne(&miss);
- // Initialize the call counter.
- __ LoadSmiLiteral(r7, Smi::FromInt(1));
- __ StoreP(r7, FieldMemOperand(r8, count_offset), r0);
-
// Store the function. Use a stub since we need a frame for allocation.
// r4 - vector
// r5 - slot
@@ -2016,9 +2010,13 @@ void CallICStub::Generate(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(masm->isolate());
+ __ Push(r4);
+ __ Push(r5);
__ Push(cp, r3);
__ CallStub(&create_stub);
__ Pop(cp, r3);
+ __ Pop(r5);
+ __ Pop(r4);
}
__ b(&call_function);
@@ -2028,7 +2026,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&miss);
GenerateMiss(masm);
- __ b(&call);
+ __ b(&call_count_incremented);
}
void CallICStub::GenerateMiss(MacroAssembler* masm) {
@@ -2204,297 +2202,6 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, Register dest,
__ bind(&done);
}
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // lr: return address
- // sp[0]: to
- // sp[4]: from
- // sp[8]: string
-
- // This stub is called from the native-call %_SubString(...), so
- // nothing can be assumed about the arguments. It is tested that:
- // "string" is a sequential string,
- // both "from" and "to" are smis, and
- // 0 <= from <= to <= string.length.
- // If any of these assumptions fail, we call the runtime system.
-
- const int kToOffset = 0 * kPointerSize;
- const int kFromOffset = 1 * kPointerSize;
- const int kStringOffset = 2 * kPointerSize;
-
- __ LoadP(r4, MemOperand(sp, kToOffset));
- __ LoadP(r5, MemOperand(sp, kFromOffset));
-
- // If either to or from had the smi tag bit set, then fail to generic runtime
- __ JumpIfNotSmi(r4, &runtime);
- __ JumpIfNotSmi(r5, &runtime);
- __ SmiUntag(r4);
- __ SmiUntag(r5);
- // Both r4 and r5 are untagged integers.
-
- // We want to bailout to runtime here if From is negative.
- __ blt(&runtime); // From < 0.
-
- __ CmpLogicalP(r5, r4);
- __ bgt(&runtime); // Fail if from > to.
- __ SubP(r4, r4, r5);
-
- // Make sure first argument is a string.
- __ LoadP(r2, MemOperand(sp, kStringOffset));
- __ JumpIfSmi(r2, &runtime);
- Condition is_string = masm->IsObjectStringType(r2, r3);
- __ b(NegateCondition(is_string), &runtime);
-
- Label single_char;
- __ CmpP(r4, Operand(1));
- __ b(eq, &single_char);
-
- // Short-cut for the case of trivial substring.
- Label return_r2;
- // r2: original string
- // r4: result string length
- __ LoadP(r6, FieldMemOperand(r2, String::kLengthOffset));
- __ SmiUntag(r0, r6);
- __ CmpLogicalP(r4, r0);
- // Return original string.
- __ beq(&return_r2);
- // Longer than original string's length or negative: unsafe arguments.
- __ bgt(&runtime);
- // Shorter than original string's length: an actual substring.
-
- // Deal with different string types: update the index if necessary
- // and put the underlying string into r7.
- // r2: original string
- // r3: instance type
- // r4: length
- // r5: from index (untagged)
- Label underlying_unpacked, sliced_string, seq_or_external_string;
- // If the string is not indirect, it can only be sequential or external.
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
- STATIC_ASSERT(kIsIndirectStringMask != 0);
- __ mov(r0, Operand(kIsIndirectStringMask));
- __ AndP(r0, r3);
- __ beq(&seq_or_external_string);
-
- __ mov(r0, Operand(kSlicedNotConsMask));
- __ AndP(r0, r3);
- __ bne(&sliced_string);
- // Cons string. Check whether it is flat, then fetch first part.
- __ LoadP(r7, FieldMemOperand(r2, ConsString::kSecondOffset));
- __ CompareRoot(r7, Heap::kempty_stringRootIndex);
- __ bne(&runtime);
- __ LoadP(r7, FieldMemOperand(r2, ConsString::kFirstOffset));
- // Update instance type.
- __ LoadP(r3, FieldMemOperand(r7, HeapObject::kMapOffset));
- __ LoadlB(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
- __ b(&underlying_unpacked);
-
- __ bind(&sliced_string);
- // Sliced string. Fetch parent and correct start index by offset.
- __ LoadP(r7, FieldMemOperand(r2, SlicedString::kParentOffset));
- __ LoadP(r6, FieldMemOperand(r2, SlicedString::kOffsetOffset));
- __ SmiUntag(r3, r6);
- __ AddP(r5, r3); // Add offset to index.
- // Update instance type.
- __ LoadP(r3, FieldMemOperand(r7, HeapObject::kMapOffset));
- __ LoadlB(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
- __ b(&underlying_unpacked);
-
- __ bind(&seq_or_external_string);
- // Sequential or external string. Just move string to the expected register.
- __ LoadRR(r7, r2);
-
- __ bind(&underlying_unpacked);
-
- if (FLAG_string_slices) {
- Label copy_routine;
- // r7: underlying subject string
- // r3: instance type of underlying subject string
- // r4: length
- // r5: adjusted start index (untagged)
- __ CmpP(r4, Operand(SlicedString::kMinLength));
- // Short slice. Copy instead of slicing.
- __ blt(&copy_routine);
- // Allocate new sliced string. At this point we do not reload the instance
- // type including the string encoding because we simply rely on the info
- // provided by the original string. It does not matter if the original
- // string's encoding is wrong because we always have to recheck encoding of
- // the newly created string's parent anyways due to externalized strings.
- Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ mov(r0, Operand(kStringEncodingMask));
- __ AndP(r0, r3);
- __ beq(&two_byte_slice);
- __ AllocateOneByteSlicedString(r2, r4, r8, r9, &runtime);
- __ b(&set_slice_header);
- __ bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(r2, r4, r8, r9, &runtime);
- __ bind(&set_slice_header);
- __ SmiTag(r5);
- __ StoreP(r7, FieldMemOperand(r2, SlicedString::kParentOffset));
- __ StoreP(r5, FieldMemOperand(r2, SlicedString::kOffsetOffset));
- __ b(&return_r2);
-
- __ bind(&copy_routine);
- }
-
- // r7: underlying subject string
- // r3: instance type of underlying subject string
- // r4: length
- // r5: adjusted start index (untagged)
- Label two_byte_sequential, sequential_string, allocate_result;
- STATIC_ASSERT(kExternalStringTag != 0);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ mov(r0, Operand(kExternalStringTag));
- __ AndP(r0, r3);
- __ beq(&sequential_string);
-
- // Handle external string.
- // Rule out short external strings.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ mov(r0, Operand(kShortExternalStringTag));
- __ AndP(r0, r3);
- __ bne(&runtime);
- __ LoadP(r7, FieldMemOperand(r7, ExternalString::kResourceDataOffset));
- // r7 already points to the first character of underlying string.
- __ b(&allocate_result);
-
- __ bind(&sequential_string);
- // Locate first character of underlying subject string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ AddP(r7, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- __ bind(&allocate_result);
- // Sequential acii string. Allocate the result.
- STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
- __ mov(r0, Operand(kStringEncodingMask));
- __ AndP(r0, r3);
- __ beq(&two_byte_sequential);
-
- // Allocate and copy the resulting one-byte string.
- __ AllocateOneByteString(r2, r4, r6, r8, r9, &runtime);
-
- // Locate first character of substring to copy.
- __ AddP(r7, r5);
- // Locate first character of result.
- __ AddP(r3, r2, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- // r2: result string
- // r3: first character of result string
- // r4: result string length
- // r7: first character of substring to copy
- STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharacters(masm, r3, r7, r4, r5,
- String::ONE_BYTE_ENCODING);
- __ b(&return_r2);
-
- // Allocate and copy the resulting two-byte string.
- __ bind(&two_byte_sequential);
- __ AllocateTwoByteString(r2, r4, r6, r8, r9, &runtime);
-
- // Locate first character of substring to copy.
- __ ShiftLeftP(r3, r5, Operand(1));
- __ AddP(r7, r3);
- // Locate first character of result.
- __ AddP(r3, r2, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- // r2: result string.
- // r3: first character of result.
- // r4: result length.
- // r7: first character of substring to copy.
- STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharacters(masm, r3, r7, r4, r5,
- String::TWO_BYTE_ENCODING);
-
- __ bind(&return_r2);
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->sub_string_native(), 1, r5, r6);
- __ Drop(3);
- __ Ret();
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString);
-
- __ bind(&single_char);
- // r2: original string
- // r3: instance type
- // r4: length
- // r5: from index (untagged)
- __ SmiTag(r5, r5);
- StringCharAtGenerator generator(r2, r5, r4, r2, &runtime, &runtime, &runtime,
- RECEIVER_IS_STRING);
- generator.GenerateFast(masm);
- __ Drop(3);
- __ Ret();
- generator.SkipSlow(masm, &runtime);
-}
-
-void ToStringStub::Generate(MacroAssembler* masm) {
- // The ToString stub takes one argument in r2.
- Label done;
- Label is_number;
- __ JumpIfSmi(r2, &is_number);
-
- __ CompareObjectType(r2, r3, r3, FIRST_NONSTRING_TYPE);
- // r2: receiver
- // r3: receiver instance type
- __ blt(&done);
-
- Label not_heap_number;
- __ CmpP(r3, Operand(HEAP_NUMBER_TYPE));
- __ bne(&not_heap_number);
- __ bind(&is_number);
- NumberToStringStub stub(isolate());
- __ TailCallStub(&stub);
- __ bind(&not_heap_number);
-
- Label not_oddball;
- __ CmpP(r3, Operand(ODDBALL_TYPE));
- __ bne(&not_oddball);
- __ LoadP(r2, FieldMemOperand(r2, Oddball::kToStringOffset));
- __ Ret();
- __ bind(&not_oddball);
-
- __ push(r2); // Push argument.
- __ TailCallRuntime(Runtime::kToString);
-
- __ bind(&done);
- __ Ret();
-}
-
-void ToNameStub::Generate(MacroAssembler* masm) {
- // The ToName stub takes one argument in r2.
- Label is_number;
- __ JumpIfSmi(r2, &is_number);
-
- STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
- __ CompareObjectType(r2, r3, r3, LAST_NAME_TYPE);
- // r2: receiver
- // r3: receiver instance type
- __ Ret(le);
-
- Label not_heap_number;
- __ CmpP(r3, Operand(HEAP_NUMBER_TYPE));
- __ bne(&not_heap_number);
- __ bind(&is_number);
- NumberToStringStub stub(isolate());
- __ TailCallStub(&stub);
- __ bind(&not_heap_number);
-
- Label not_oddball;
- __ CmpP(r3, Operand(ODDBALL_TYPE));
- __ bne(&not_oddball);
- __ LoadP(r2, FieldMemOperand(r2, Oddball::kToStringOffset));
- __ Ret();
- __ bind(&not_oddball);
-
- __ push(r2); // Push argument.
- __ TailCallRuntime(Runtime::kToName);
-}
void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left,
@@ -3357,18 +3064,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental;
Label need_incremental_pop_scratch;
- DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
- __ AndP(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
- __ LoadP(
- regs_.scratch1(),
- MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset));
- __ SubP(regs_.scratch1(), regs_.scratch1(), Operand(1));
- __ StoreP(
- regs_.scratch1(),
- MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset));
- __ CmpP(regs_.scratch1(), Operand::Zero()); // S390, we could do better here
- __ blt(&need_incremental);
-
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
@@ -3785,7 +3480,7 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
__ LoadP(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
// Load the map into the correct register.
- DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
+ DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
__ LoadRR(feedback, too_far);
__ AddP(ip, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -4521,7 +4216,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// Fall back to %AllocateInNewSpace (if not too big).
Label too_big_for_new_space;
__ bind(&allocate);
- __ CmpP(r9, Operand(Page::kMaxRegularHeapObjectSize));
+ __ CmpP(r9, Operand(kMaxRegularHeapObjectSize));
__ bgt(&too_big_for_new_space);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
@@ -4896,7 +4591,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// Fall back to %AllocateInNewSpace (if not too big).
Label too_big_for_new_space;
__ bind(&allocate);
- __ CmpP(r9, Operand(Page::kMaxRegularHeapObjectSize));
+ __ CmpP(r9, Operand(kMaxRegularHeapObjectSize));
__ bgt(&too_big_for_new_space);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
diff --git a/deps/v8/src/s390/interface-descriptors-s390.cc b/deps/v8/src/s390/interface-descriptors-s390.cc
index 4cdcd54521..ca40a0c9f1 100644
--- a/deps/v8/src/s390/interface-descriptors-s390.cc
+++ b/deps/v8/src/s390/interface-descriptors-s390.cc
@@ -38,11 +38,9 @@ const Register StoreDescriptor::SlotRegister() { return r6; }
const Register StoreWithVectorDescriptor::VectorRegister() { return r5; }
-const Register VectorStoreTransitionDescriptor::SlotRegister() { return r6; }
-const Register VectorStoreTransitionDescriptor::VectorRegister() { return r5; }
-const Register VectorStoreTransitionDescriptor::MapRegister() { return r7; }
-
-const Register StoreTransitionDescriptor::MapRegister() { return r5; }
+const Register StoreTransitionDescriptor::SlotRegister() { return r6; }
+const Register StoreTransitionDescriptor::VectorRegister() { return r5; }
+const Register StoreTransitionDescriptor::MapRegister() { return r7; }
const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r4; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r2; }
@@ -324,7 +322,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ApiCallbackDescriptorBase::InitializePlatformSpecific(
+void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r2, // callee
@@ -359,7 +357,19 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
r2, // argument count (not including receiver)
r5, // new target
r3, // constructor to call
- r4 // address of the first argument
+ r4, // allocation site feedback if available, undefined otherwise
+ r6 // address of the first argument
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterPushArgsAndConstructArrayDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r2, // argument count (not including receiver)
+ r3, // target to call checked to be Array function
+ r4, // allocation site feedback if available, undefined otherwise
+ r5 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/s390/macro-assembler-s390.cc b/deps/v8/src/s390/macro-assembler-s390.cc
index 8b708de734..769d3dc1b0 100644
--- a/deps/v8/src/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/s390/macro-assembler-s390.cc
@@ -251,10 +251,7 @@ void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index,
void MacroAssembler::InNewSpace(Register object, Register scratch,
Condition cond, Label* branch) {
DCHECK(cond == eq || cond == ne);
- // TODO(joransiu): check if we can merge mov Operand into AndP.
- const int mask =
- (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
- CheckPageFlag(object, scratch, mask, cond, branch);
+ CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cond, branch);
}
void MacroAssembler::RecordWriteField(
@@ -1709,7 +1706,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements,
void MacroAssembler::Allocate(int object_size, Register result,
Register scratch1, Register scratch2,
Label* gc_required, AllocationFlags flags) {
- DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(object_size <= kMaxRegularHeapObjectSize);
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
@@ -1965,7 +1962,7 @@ void MacroAssembler::FastAllocate(Register object_size, Register result,
void MacroAssembler::FastAllocate(int object_size, Register result,
Register scratch1, Register scratch2,
AllocationFlags flags) {
- DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(object_size <= kMaxRegularHeapObjectSize);
DCHECK(!AreAliased(result, scratch1, scratch2, ip));
// Make object size into bytes.
diff --git a/deps/v8/src/s390/macro-assembler-s390.h b/deps/v8/src/s390/macro-assembler-s390.h
index b8ed3a057d..7f2d0421bf 100644
--- a/deps/v8/src/s390/macro-assembler-s390.h
+++ b/deps/v8/src/s390/macro-assembler-s390.h
@@ -194,6 +194,18 @@ class MacroAssembler : public Assembler {
void Ret() { b(r14); }
void Ret(Condition cond) { b(cond, r14); }
+ // Emit code that loads |parameter_index|'th parameter from the stack to
+ // the register according to the CallInterfaceDescriptor definition.
+ // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
+ // below the caller's sp.
+ template <class Descriptor>
+ void LoadParameterFromStack(
+ Register reg, typename Descriptor::ParameterIndices parameter_index,
+ int sp_to_ra_offset_in_words = 0) {
+ DCHECK(Descriptor::kPassLastArgsOnStack);
+ UNIMPLEMENTED();
+ }
+
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Drop(int count);
diff --git a/deps/v8/src/s390/simulator-s390.cc b/deps/v8/src/s390/simulator-s390.cc
index 91db78226b..78bc939842 100644
--- a/deps/v8/src/s390/simulator-s390.cc
+++ b/deps/v8/src/s390/simulator-s390.cc
@@ -660,8 +660,8 @@ void Simulator::set_last_debugger_input(char* input) {
last_debugger_input_ = input;
}
-void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
- size_t size) {
+void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
+ void* start_addr, size_t size) {
intptr_t start = reinterpret_cast<intptr_t>(start_addr);
int intra_line = (start & CachePage::kLineMask);
start -= intra_line;
@@ -681,7 +681,8 @@ void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
}
}
-CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
+CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
+ void* page) {
base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
if (entry->value == NULL) {
CachePage* new_page = new CachePage();
@@ -691,7 +692,8 @@ CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
}
// Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start, int size) {
+void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache,
+ intptr_t start, int size) {
DCHECK(size <= CachePage::kPageSize);
DCHECK(AllOnOnePage(start, size - 1));
DCHECK((start & CachePage::kLineMask) == 0);
@@ -703,7 +705,8 @@ void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start, int size) {
memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
}
-void Simulator::CheckICache(base::HashMap* i_cache, Instruction* instr) {
+void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
+ Instruction* instr) {
intptr_t address = reinterpret_cast<intptr_t>(instr);
void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@@ -1469,7 +1472,7 @@ void Simulator::EvalTableInit() {
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
if (i_cache_ == NULL) {
- i_cache_ = new base::HashMap(&ICacheMatch);
+ i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
Initialize(isolate);
@@ -1609,7 +1612,8 @@ class Redirection {
};
// static
-void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
+void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
+ Redirection* first) {
Redirection::DeleteChain(first);
if (i_cache != nullptr) {
for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
diff --git a/deps/v8/src/s390/simulator-s390.h b/deps/v8/src/s390/simulator-s390.h
index 7af00ee25f..1ce6bf776b 100644
--- a/deps/v8/src/s390/simulator-s390.h
+++ b/deps/v8/src/s390/simulator-s390.h
@@ -211,7 +211,7 @@ class Simulator {
// Call on program start.
static void Initialize(Isolate* isolate);
- static void TearDown(base::HashMap* i_cache, Redirection* first);
+ static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
@@ -233,7 +233,8 @@ class Simulator {
char* last_debugger_input() { return last_debugger_input_; }
// ICache checking.
- static void FlushICache(base::HashMap* i_cache, void* start, size_t size);
+ static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
+ size_t size);
// Returns true if pc register contains one of the 'special_values' defined
// below (bad_lr, end_sim_pc).
@@ -445,9 +446,12 @@ class Simulator {
void ExecuteInstruction(Instruction* instr, bool auto_incr_pc = true);
// ICache.
- static void CheckICache(base::HashMap* i_cache, Instruction* instr);
- static void FlushOnePage(base::HashMap* i_cache, intptr_t start, int size);
- static CachePage* GetCachePage(base::HashMap* i_cache, void* page);
+ static void CheckICache(base::CustomMatcherHashMap* i_cache,
+ Instruction* instr);
+ static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start,
+ int size);
+ static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
+ void* page);
// Runtime call support.
static void* RedirectExternalReference(
@@ -482,7 +486,7 @@ class Simulator {
char* last_debugger_input_;
// Icache simulation
- base::HashMap* i_cache_;
+ base::CustomMatcherHashMap* i_cache_;
// Registered breakpoints.
Instruction* break_pc_;
diff --git a/deps/v8/src/safepoint-table.h b/deps/v8/src/safepoint-table.h
index fbb0152eb3..e0e9d95c48 100644
--- a/deps/v8/src/safepoint-table.h
+++ b/deps/v8/src/safepoint-table.h
@@ -8,7 +8,7 @@
#include "src/allocation.h"
#include "src/heap/heap.h"
#include "src/v8memory.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/signature.h b/deps/v8/src/signature.h
index 3fa5f8290f..97238b6749 100644
--- a/deps/v8/src/signature.h
+++ b/deps/v8/src/signature.h
@@ -5,7 +5,7 @@
#ifndef V8_SIGNATURE_H_
#define V8_SIGNATURE_H_
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/small-pointer-list.h b/deps/v8/src/small-pointer-list.h
index 9ece249064..ac5ecaae57 100644
--- a/deps/v8/src/small-pointer-list.h
+++ b/deps/v8/src/small-pointer-list.h
@@ -7,7 +7,7 @@
#include "src/base/logging.h"
#include "src/globals.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index 8d2f5d9339..16044a5059 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -98,6 +98,10 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
UNREACHABLE();
}
+ if (ElideObject(obj)) {
+ return SerializeObject(*isolate()->factory()->undefined_value(),
+ how_to_code, where_to_point, skip);
+ }
// Past this point we should not see any (context-specific) maps anymore.
CHECK(!obj->IsMap());
// There should be no references to the global object embedded.
diff --git a/deps/v8/src/snapshot/code-serializer.h b/deps/v8/src/snapshot/code-serializer.h
index e82a7d5dd6..b3c54d1c84 100644
--- a/deps/v8/src/snapshot/code-serializer.h
+++ b/deps/v8/src/snapshot/code-serializer.h
@@ -36,6 +36,7 @@ class CodeSerializer : public Serializer {
UNREACHABLE();
}
+ virtual bool ElideObject(Object* obj) { return false; }
void SerializeGeneric(HeapObject* heap_object, HowToCode how_to_code,
WhereToPoint where_to_point);
@@ -73,6 +74,8 @@ class WasmCompiledModuleSerializer : public CodeSerializer {
}
}
+ bool ElideObject(Object* obj) override { return obj->IsWeakCell(); };
+
private:
WasmCompiledModuleSerializer(Isolate* isolate, uint32_t source_hash)
: CodeSerializer(isolate, source_hash) {}
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 7a2df28f62..b90a2c5b10 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -414,7 +414,7 @@ Address Deserializer::Allocate(int space_index, int size) {
LargeObjectSpace* lo_space = isolate_->heap()->lo_space();
Executability exec = static_cast<Executability>(source_.Get());
AllocationResult result = lo_space->AllocateRaw(size, exec);
- HeapObject* obj = HeapObject::cast(result.ToObjectChecked());
+ HeapObject* obj = result.ToObjectChecked();
deserialized_large_objects_.Add(obj);
return obj->address();
} else if (space_index == MAP_SPACE) {
diff --git a/deps/v8/src/snapshot/natives.h b/deps/v8/src/snapshot/natives.h
index e44751537f..a9dc306a3d 100644
--- a/deps/v8/src/snapshot/natives.h
+++ b/deps/v8/src/snapshot/natives.h
@@ -22,8 +22,15 @@ enum NativeType {
TEST
};
+// Extra handling for V8_EXPORT_PRIVATE in combination with USING_V8_SHARED
+// since definition of methods of classes marked as dllimport is not allowed.
template <NativeType type>
+#ifdef USING_V8_SHARED
class NativesCollection {
+#else
+class V8_EXPORT_PRIVATE NativesCollection {
+#endif // USING_V8_SHARED
+
public:
// The following methods are implemented in js2c-generated code:
diff --git a/deps/v8/src/snapshot/serializer-common.cc b/deps/v8/src/snapshot/serializer-common.cc
index bb3cc5c535..adfd6e4f64 100644
--- a/deps/v8/src/snapshot/serializer-common.cc
+++ b/deps/v8/src/snapshot/serializer-common.cc
@@ -14,7 +14,7 @@ namespace internal {
ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate) {
map_ = isolate->external_reference_map();
if (map_ != NULL) return;
- map_ = new base::HashMap(base::HashMap::PointersMatch);
+ map_ = new base::HashMap();
ExternalReferenceTable* table = ExternalReferenceTable::instance(isolate);
for (int i = 0; i < table->size(); ++i) {
Address addr = table->address(i);
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index d7a7f89278..f622a5b8d2 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -403,9 +403,8 @@ void Serializer::ObjectSerializer::SerializeExternalString() {
ExternalTwoByteString::cast(string)->resource()->data());
}
- AllocationSpace space = (allocation_size > Page::kMaxRegularHeapObjectSize)
- ? LO_SPACE
- : OLD_SPACE;
+ AllocationSpace space =
+ (allocation_size > kMaxRegularHeapObjectSize) ? LO_SPACE : OLD_SPACE;
SerializePrologue(space, allocation_size, map);
// Output the rest of the imaginary string.
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index ff2c6a979d..0f87774548 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -38,7 +38,7 @@ class CodeAddressMap : public CodeEventLogger {
private:
class NameMap {
public:
- NameMap() : impl_(base::HashMap::PointersMatch) {}
+ NameMap() : impl_() {}
~NameMap() {
for (base::HashMap::Entry* p = impl_.Start(); p != NULL;
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index fed45d16b6..959ac56fa9 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -31,19 +31,6 @@ bool Snapshot::HasContextSnapshot(Isolate* isolate, size_t index) {
return index < num_contexts;
}
-
-uint32_t Snapshot::SizeOfFirstPage(Isolate* isolate, AllocationSpace space) {
- DCHECK(space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE);
- if (!isolate->snapshot_available()) {
- return static_cast<uint32_t>(MemoryAllocator::PageAreaSize(space));
- }
- uint32_t size;
- int offset = kFirstPageSizesOffset + (space - FIRST_PAGED_SPACE) * kInt32Size;
- memcpy(&size, isolate->snapshot_blob()->data + offset, kInt32Size);
- return size;
-}
-
-
bool Snapshot::Initialize(Isolate* isolate) {
if (!isolate->snapshot_available()) return false;
base::ElapsedTimer timer;
@@ -89,25 +76,8 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
return Handle<Context>::cast(result);
}
-void UpdateMaxRequirementPerPage(
- uint32_t* requirements,
- Vector<const SerializedData::Reservation> reservations) {
- int space = 0;
- uint32_t current_requirement = 0;
- for (const auto& reservation : reservations) {
- current_requirement += reservation.chunk_size();
- if (reservation.is_last()) {
- requirements[space] = std::max(requirements[space], current_requirement);
- current_requirement = 0;
- space++;
- }
- }
- DCHECK_EQ(i::Serializer::kNumberOfSpaces, space);
-}
-
-void CalculateFirstPageSizes(const SnapshotData* startup_snapshot,
- const List<SnapshotData*>* context_snapshots,
- uint32_t* sizes_out) {
+void ProfileDeserialization(const SnapshotData* startup_snapshot,
+ const List<SnapshotData*>* context_snapshots) {
if (FLAG_profile_deserialization) {
int startup_total = 0;
PrintF("Deserialization will reserve:\n");
@@ -123,36 +93,6 @@ void CalculateFirstPageSizes(const SnapshotData* startup_snapshot,
PrintF("%10d bytes per context #%d\n", context_total, i);
}
}
-
- uint32_t startup_requirements[i::Serializer::kNumberOfSpaces];
- uint32_t context_requirements[i::Serializer::kNumberOfSpaces];
- for (int space = 0; space < i::Serializer::kNumberOfSpaces; space++) {
- startup_requirements[space] = 0;
- context_requirements[space] = 0;
- }
-
- UpdateMaxRequirementPerPage(startup_requirements,
- startup_snapshot->Reservations());
- for (const auto& context_snapshot : *context_snapshots) {
- UpdateMaxRequirementPerPage(context_requirements,
- context_snapshot->Reservations());
- }
-
- for (int space = 0; space < i::Serializer::kNumberOfSpaces; space++) {
- // If the space requirement for a page is less than a page size, we consider
- // limiting the size of the first page in order to save memory on startup.
- uint32_t required = startup_requirements[space] +
- 2 * context_requirements[space] +
- Page::kObjectStartOffset;
- // Add a small allowance to the code space for small scripts.
- if (space == CODE_SPACE) required += 32 * KB;
-
- if (space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE) {
- uint32_t max_size =
- MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(space));
- sizes_out[space - FIRST_PAGED_SPACE] = std::min(required, max_size);
- }
- }
}
v8::StartupData Snapshot::CreateSnapshotBlob(
@@ -166,13 +106,9 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
total_length += context_snapshot->RawData().length();
}
- uint32_t first_page_sizes[kNumPagedSpaces];
- CalculateFirstPageSizes(startup_snapshot, context_snapshots,
- first_page_sizes);
+ ProfileDeserialization(startup_snapshot, context_snapshots);
char* data = new char[total_length];
- memcpy(data + kFirstPageSizesOffset, first_page_sizes,
- kNumPagedSpaces * kInt32Size);
memcpy(data + kNumberOfContextsOffset, &num_contexts, kInt32Size);
int payload_offset = StartupSnapshotOffset(num_contexts);
int payload_length = startup_snapshot->RawData().length();
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index a541592fee..49a60926dc 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -67,9 +67,6 @@ class Snapshot : public AllStatic {
static bool EmbedsScript(Isolate* isolate);
- static uint32_t SizeOfFirstPage(Isolate* isolate, AllocationSpace space);
-
-
// To be implemented by the snapshot source.
static const v8::StartupData* DefaultSnapshotBlob();
@@ -88,21 +85,16 @@ class Snapshot : public AllStatic {
int index);
// Snapshot blob layout:
- // [0 - 5] pre-calculated first page sizes for paged spaces
- // [6] number of contexts N
- // [7] offset to context 0
- // [8] offset to context 1
+ // [0] number of contexts N
+ // [1] offset to context 0
+ // [2] offset to context 1
// ...
// ... offset to context N - 1
// ... startup snapshot data
// ... context 0 snapshot data
// ... context 1 snapshot data
- static const int kNumPagedSpaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
-
- static const int kFirstPageSizesOffset = 0;
- static const int kNumberOfContextsOffset =
- kFirstPageSizesOffset + kNumPagedSpaces * kInt32Size;
+ static const int kNumberOfContextsOffset = 0;
static const int kFirstContextOffsetOffset =
kNumberOfContextsOffset + kInt32Size;
diff --git a/deps/v8/src/snapshot/startup-serializer.h b/deps/v8/src/snapshot/startup-serializer.h
index cc66f71ae9..9c1c3b904c 100644
--- a/deps/v8/src/snapshot/startup-serializer.h
+++ b/deps/v8/src/snapshot/startup-serializer.h
@@ -32,8 +32,7 @@ class StartupSerializer : public Serializer {
private:
class PartialCacheIndexMap : public AddressMapBase {
public:
- PartialCacheIndexMap()
- : map_(base::HashMap::PointersMatch), next_index_(0) {}
+ PartialCacheIndexMap() : map_(), next_index_(0) {}
// Lookup object in the map. Return its index if found, or create
// a new entry with new_index as value, and return kInvalidIndex.
diff --git a/deps/v8/src/source-position-table.h b/deps/v8/src/source-position-table.h
index 76ae4a0759..74c3b9e45f 100644
--- a/deps/v8/src/source-position-table.h
+++ b/deps/v8/src/source-position-table.h
@@ -8,7 +8,7 @@
#include "src/assert-scope.h"
#include "src/checks.h"
#include "src/handles.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/tracing/trace-event.cc b/deps/v8/src/tracing/trace-event.cc
index 3e0a0fab21..440af193e9 100644
--- a/deps/v8/src/tracing/trace-event.cc
+++ b/deps/v8/src/tracing/trace-event.cc
@@ -6,6 +6,7 @@
#include <string.h>
+#include "src/counters.h"
#include "src/isolate.h"
#include "src/v8.h"
@@ -26,9 +27,11 @@ void CallStatsScopedTracer::AddEndTraceEvent() {
v8::internal::tracing::AddTraceEvent(
TRACE_EVENT_PHASE_END, p_data_->category_group_enabled, p_data_->name,
v8::internal::tracing::kGlobalScope, v8::internal::tracing::kNoId,
- v8::internal::tracing::kNoId, TRACE_EVENT_FLAG_COPY,
- "runtime-call-stat",
- TRACE_STR_COPY(p_data_->isolate->trace_event_stats_table()->Dump()));
+ v8::internal::tracing::kNoId, TRACE_EVENT_FLAG_NONE,
+ "runtime-call-stats", TRACE_STR_COPY(p_data_->isolate->counters()
+ ->runtime_call_stats()
+ ->Dump()
+ .c_str()));
} else {
v8::internal::tracing::AddTraceEvent(
TRACE_EVENT_PHASE_END, p_data_->category_group_enabled, p_data_->name,
@@ -37,14 +40,14 @@ void CallStatsScopedTracer::AddEndTraceEvent() {
}
}
-void CallStatsScopedTracer::Initialize(Isolate* isolate,
+void CallStatsScopedTracer::Initialize(v8::internal::Isolate* isolate,
const uint8_t* category_group_enabled,
const char* name) {
data_.isolate = isolate;
data_.category_group_enabled = category_group_enabled;
data_.name = name;
p_data_ = &data_;
- TraceEventStatsTable* table = isolate->trace_event_stats_table();
+ RuntimeCallStats* table = isolate->counters()->runtime_call_stats();
has_parent_scope_ = table->InUse();
if (!has_parent_scope_) table->Reset();
v8::internal::tracing::AddTraceEvent(
@@ -53,88 +56,6 @@ void CallStatsScopedTracer::Initialize(Isolate* isolate,
TRACE_EVENT_FLAG_NONE, v8::internal::tracing::kNoId);
}
-void TraceEventStatsTable::Enter(Isolate* isolate,
- TraceEventCallStatsTimer* timer,
- CounterId counter_id) {
- TraceEventStatsTable* table = isolate->trace_event_stats_table();
- RuntimeCallCounter* counter = &(table->*counter_id);
- timer->Start(counter, table->current_timer_);
- table->current_timer_ = timer;
-}
-
-void TraceEventStatsTable::Leave(Isolate* isolate,
- TraceEventCallStatsTimer* timer) {
- TraceEventStatsTable* table = isolate->trace_event_stats_table();
- if (table->current_timer_ == timer) {
- table->current_timer_ = timer->Stop();
- }
-}
-
-void TraceEventStatsTable::Reset() {
- in_use_ = true;
- current_timer_ = nullptr;
-
-#define RESET_COUNTER(name) this->name.Reset();
- FOR_EACH_MANUAL_COUNTER(RESET_COUNTER)
-#undef RESET_COUNTER
-
-#define RESET_COUNTER(name, nargs, result_size) this->Runtime_##name.Reset();
- FOR_EACH_INTRINSIC(RESET_COUNTER)
-#undef RESET_COUNTER
-
-#define RESET_COUNTER(name) this->Builtin_##name.Reset();
- BUILTIN_LIST_C(RESET_COUNTER)
-#undef RESET_COUNTER
-
-#define RESET_COUNTER(name) this->API_##name.Reset();
- FOR_EACH_API_COUNTER(RESET_COUNTER)
-#undef RESET_COUNTER
-
-#define RESET_COUNTER(name) this->Handler_##name.Reset();
- FOR_EACH_HANDLER_COUNTER(RESET_COUNTER)
-#undef RESET_COUNTER
-}
-
-const char* TraceEventStatsTable::Dump() {
- buffer_.str(std::string());
- buffer_.clear();
- buffer_ << "{";
-#define DUMP_COUNTER(name) \
- if (this->name.count > 0) this->name.Dump(buffer_);
- FOR_EACH_MANUAL_COUNTER(DUMP_COUNTER)
-#undef DUMP_COUNTER
-
-#define DUMP_COUNTER(name, nargs, result_size) \
- if (this->Runtime_##name.count > 0) this->Runtime_##name.Dump(buffer_);
- FOR_EACH_INTRINSIC(DUMP_COUNTER)
-#undef DUMP_COUNTER
-
-#define DUMP_COUNTER(name) \
- if (this->Builtin_##name.count > 0) this->Builtin_##name.Dump(buffer_);
- BUILTIN_LIST_C(DUMP_COUNTER)
-#undef DUMP_COUNTER
-
-#define DUMP_COUNTER(name) \
- if (this->API_##name.count > 0) this->API_##name.Dump(buffer_);
- FOR_EACH_API_COUNTER(DUMP_COUNTER)
-#undef DUMP_COUNTER
-
-#define DUMP_COUNTER(name) \
- if (this->Handler_##name.count > 0) this->Handler_##name.Dump(buffer_);
- FOR_EACH_HANDLER_COUNTER(DUMP_COUNTER)
-#undef DUMP_COUNTER
- buffer_ << "\"END\":[]}";
- const std::string& buffer_str = buffer_.str();
- size_t length = buffer_str.size();
- if (length > len_) {
- buffer_c_str_.reset(new char[length + 1]);
- len_ = length;
- }
- strncpy(buffer_c_str_.get(), buffer_str.c_str(), length + 1);
- in_use_ = false;
- return buffer_c_str_.get();
-}
-
} // namespace tracing
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/tracing/trace-event.h b/deps/v8/src/tracing/trace-event.h
index 25ccd8045a..35d2e1507d 100644
--- a/deps/v8/src/tracing/trace-event.h
+++ b/deps/v8/src/tracing/trace-event.h
@@ -6,12 +6,12 @@
#define SRC_TRACING_TRACE_EVENT_H_
#include <stddef.h>
+#include <memory>
#include "base/trace_event/common/trace_event_common.h"
#include "include/v8-platform.h"
#include "src/base/atomicops.h"
#include "src/base/macros.h"
-#include "src/counters.h"
// This header file defines implementation details of how the trace macros in
// trace_event_common.h collect and store trace events. Anything not
@@ -121,8 +121,7 @@ enum CategoryGroupEnabledFlags {
// const uint8_t* arg_types,
// const uint64_t* arg_values,
// unsigned int flags)
-#define TRACE_EVENT_API_ADD_TRACE_EVENT \
- v8::internal::tracing::TraceEventHelper::GetCurrentPlatform()->AddTraceEvent
+#define TRACE_EVENT_API_ADD_TRACE_EVENT v8::internal::tracing::AddTraceEventImpl
// Set the duration field of a COMPLETE trace event.
// void TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
@@ -281,7 +280,7 @@ extern TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
uint64_t cid_; \
}; \
INTERNAL_TRACE_EVENT_UID(ScopedContext) \
- INTERNAL_TRACE_EVENT_UID(scoped_context)(context.raw_id());
+ INTERNAL_TRACE_EVENT_UID(scoped_context)(context);
#define TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() \
base::NoBarrier_Load(&v8::internal::tracing::kRuntimeCallStatsTracingEnabled)
@@ -289,9 +288,6 @@ extern TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
#define TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name) \
INTERNAL_TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name)
-#define TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(isolate, counter_id) \
- INTERNAL_TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(isolate, counter_id)
-
#define INTERNAL_TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name) \
{ \
INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO( \
@@ -309,13 +305,11 @@ extern TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
name); \
}
-#define INTERNAL_TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(isolate, \
- counter_id) \
- v8::internal::tracing::CounterScope INTERNAL_TRACE_EVENT_UID(scope)( \
- isolate, counter_id);
-
namespace v8 {
namespace internal {
+
+class Isolate;
+
namespace tracing {
// Specify these values when the corresponding argument of AddTraceEvent is not
@@ -460,6 +454,28 @@ class TraceStringWithCopy {
const char* str_;
};
+static V8_INLINE uint64_t AddTraceEventImpl(
+ char phase, const uint8_t* category_group_enabled, const char* name,
+ const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
+ const char** arg_names, const uint8_t* arg_types,
+ const uint64_t* arg_values, unsigned int flags) {
+ std::unique_ptr<ConvertableToTraceFormat> arg_convertables[2];
+ if (num_args > 0 && arg_types[0] == TRACE_VALUE_TYPE_CONVERTABLE) {
+ arg_convertables[0].reset(reinterpret_cast<ConvertableToTraceFormat*>(
+ static_cast<intptr_t>(arg_values[0])));
+ }
+ if (num_args > 1 && arg_types[1] == TRACE_VALUE_TYPE_CONVERTABLE) {
+ arg_convertables[1].reset(reinterpret_cast<ConvertableToTraceFormat*>(
+ static_cast<intptr_t>(arg_values[1])));
+ }
+ DCHECK(num_args <= 2);
+ v8::Platform* platform =
+ v8::internal::tracing::TraceEventHelper::GetCurrentPlatform();
+ return platform->AddTraceEvent(phase, category_group_enabled, name, scope, id,
+ bind_id, num_args, arg_names, arg_types,
+ arg_values, arg_convertables, flags);
+}
+
// Define SetTraceValue for each allowed type. It stores the type and
// value in the return arguments. This allows this API to avoid declaring any
// structures so that it is portable to third_party libraries.
@@ -500,6 +516,19 @@ INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&, as_string,
#undef INTERNAL_DECLARE_SET_TRACE_VALUE
#undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT
+static V8_INLINE void SetTraceValue(ConvertableToTraceFormat* convertable_value,
+ unsigned char* type, uint64_t* value) {
+ *type = TRACE_VALUE_TYPE_CONVERTABLE;
+ *value = static_cast<uint64_t>(reinterpret_cast<intptr_t>(convertable_value));
+}
+
+template <typename T>
+static V8_INLINE typename std::enable_if<
+ std::is_convertible<T*, ConvertableToTraceFormat*>::value>::type
+SetTraceValue(std::unique_ptr<T> ptr, unsigned char* type, uint64_t* value) {
+ SetTraceValue(ptr.release(), type, value);
+}
+
// These AddTraceEvent template
// function is defined here instead of in the macro, because the arg_values
// could be temporary objects, such as std::string. In order to store
@@ -512,36 +541,38 @@ static V8_INLINE uint64_t AddTraceEvent(char phase,
uint64_t id, uint64_t bind_id,
unsigned int flags) {
return TRACE_EVENT_API_ADD_TRACE_EVENT(phase, category_group_enabled, name,
- scope, id, bind_id, kZeroNumArgs, NULL,
- NULL, NULL, flags);
+ scope, id, bind_id, kZeroNumArgs,
+ nullptr, nullptr, nullptr, flags);
}
template <class ARG1_TYPE>
static V8_INLINE uint64_t AddTraceEvent(
char phase, const uint8_t* category_group_enabled, const char* name,
const char* scope, uint64_t id, uint64_t bind_id, unsigned int flags,
- const char* arg1_name, const ARG1_TYPE& arg1_val) {
+ const char* arg1_name, ARG1_TYPE&& arg1_val) {
const int num_args = 1;
- uint8_t arg_types[1];
- uint64_t arg_values[1];
- SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+ uint8_t arg_type;
+ uint64_t arg_value;
+ SetTraceValue(std::forward<ARG1_TYPE>(arg1_val), &arg_type, &arg_value);
return TRACE_EVENT_API_ADD_TRACE_EVENT(
phase, category_group_enabled, name, scope, id, bind_id, num_args,
- &arg1_name, arg_types, arg_values, flags);
+ &arg1_name, &arg_type, &arg_value, flags);
}
template <class ARG1_TYPE, class ARG2_TYPE>
static V8_INLINE uint64_t AddTraceEvent(
char phase, const uint8_t* category_group_enabled, const char* name,
const char* scope, uint64_t id, uint64_t bind_id, unsigned int flags,
- const char* arg1_name, const ARG1_TYPE& arg1_val, const char* arg2_name,
- const ARG2_TYPE& arg2_val) {
+ const char* arg1_name, ARG1_TYPE&& arg1_val, const char* arg2_name,
+ ARG2_TYPE&& arg2_val) {
const int num_args = 2;
const char* arg_names[2] = {arg1_name, arg2_name};
unsigned char arg_types[2];
uint64_t arg_values[2];
- SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
- SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
+ SetTraceValue(std::forward<ARG1_TYPE>(arg1_val), &arg_types[0],
+ &arg_values[0]);
+ SetTraceValue(std::forward<ARG2_TYPE>(arg2_val), &arg_types[1],
+ &arg_values[1]);
return TRACE_EVENT_API_ADD_TRACE_EVENT(
phase, category_group_enabled, name, scope, id, bind_id, num_args,
arg_names, arg_types, arg_values, flags);
@@ -634,136 +665,21 @@ class CallStatsScopedTracer {
}
}
- void Initialize(Isolate* isolate, const uint8_t* category_group_enabled,
- const char* name);
+ void Initialize(v8::internal::Isolate* isolate,
+ const uint8_t* category_group_enabled, const char* name);
private:
void AddEndTraceEvent();
struct Data {
const uint8_t* category_group_enabled;
const char* name;
- Isolate* isolate;
+ v8::internal::Isolate* isolate;
};
bool has_parent_scope_;
Data* p_data_;
Data data_;
};
-// TraceEventCallStatsTimer is used to keep track of the stack of currently
-// active timers used for properly measuring the own time of a
-// RuntimeCallCounter.
-class TraceEventCallStatsTimer {
- public:
- TraceEventCallStatsTimer() : counter_(nullptr), parent_(nullptr) {}
- RuntimeCallCounter* counter() { return counter_; }
- base::ElapsedTimer timer() { return timer_; }
-
- private:
- friend class TraceEventStatsTable;
-
- V8_INLINE void Start(RuntimeCallCounter* counter,
- TraceEventCallStatsTimer* parent) {
- counter_ = counter;
- parent_ = parent;
- timer_.Start();
- }
-
- V8_INLINE TraceEventCallStatsTimer* Stop() {
- base::TimeDelta delta = timer_.Elapsed();
- timer_.Stop();
- counter_->count++;
- counter_->time += delta;
- if (parent_ != nullptr) {
- // Adjust parent timer so that it does not include sub timer's time.
- parent_->counter_->time -= delta;
- }
- return parent_;
- }
-
- RuntimeCallCounter* counter_;
- TraceEventCallStatsTimer* parent_;
- base::ElapsedTimer timer_;
-};
-
-class TraceEventStatsTable {
- public:
- typedef RuntimeCallCounter TraceEventStatsTable::*CounterId;
-
-#define CALL_RUNTIME_COUNTER(name) \
- RuntimeCallCounter name = RuntimeCallCounter(#name);
- FOR_EACH_MANUAL_COUNTER(CALL_RUNTIME_COUNTER)
-#undef CALL_RUNTIME_COUNTER
-#define CALL_RUNTIME_COUNTER(name, nargs, ressize) \
- RuntimeCallCounter Runtime_##name = RuntimeCallCounter(#name);
- FOR_EACH_INTRINSIC(CALL_RUNTIME_COUNTER)
-#undef CALL_RUNTIME_COUNTER
-#define CALL_BUILTIN_COUNTER(name) \
- RuntimeCallCounter Builtin_##name = RuntimeCallCounter(#name);
- BUILTIN_LIST_C(CALL_BUILTIN_COUNTER)
-#undef CALL_BUILTIN_COUNTER
-#define CALL_BUILTIN_COUNTER(name) \
- RuntimeCallCounter API_##name = RuntimeCallCounter("API_" #name);
- FOR_EACH_API_COUNTER(CALL_BUILTIN_COUNTER)
-#undef CALL_BUILTIN_COUNTER
-#define CALL_BUILTIN_COUNTER(name) \
- RuntimeCallCounter Handler_##name = RuntimeCallCounter(#name);
- FOR_EACH_HANDLER_COUNTER(CALL_BUILTIN_COUNTER)
-#undef CALL_BUILTIN_COUNTER
-
- // Starting measuring the time for a function. This will establish the
- // connection to the parent counter for properly calculating the own times.
- static void Enter(Isolate* isolate, TraceEventCallStatsTimer* timer,
- CounterId counter_id);
-
- // Leave a scope for a measured runtime function. This will properly add
- // the time delta to the current_counter and subtract the delta from its
- // parent.
- static void Leave(Isolate* isolate, TraceEventCallStatsTimer* timer);
-
- void Reset();
- const char* Dump();
-
- TraceEventStatsTable() {
- Reset();
- in_use_ = false;
- }
-
- TraceEventCallStatsTimer* current_timer() { return current_timer_; }
- bool InUse() { return in_use_; }
-
- private:
- std::stringstream buffer_;
- std::unique_ptr<char[]> buffer_c_str_;
- size_t len_ = 0;
- // Counter to track recursive time events.
- TraceEventCallStatsTimer* current_timer_ = nullptr;
- bool in_use_;
-};
-
-class CounterScope {
- public:
- CounterScope(Isolate* isolate, TraceEventStatsTable::CounterId counter_id)
- : isolate_(nullptr) {
- if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED())) {
- isolate_ = isolate;
- TraceEventStatsTable::Enter(isolate_, &timer_, counter_id);
- }
- }
- ~CounterScope() {
- // A non-nullptr isolate_ means the stats table already entered the scope
- // and started the timer, we need to leave the scope and reset the timer
- // even when we stop tracing, otherwise we have the risk to have a dangling
- // pointer.
- if (V8_UNLIKELY(isolate_ != nullptr)) {
- TraceEventStatsTable::Leave(isolate_, &timer_);
- }
- }
-
- private:
- Isolate* isolate_;
- TraceEventCallStatsTimer timer_;
-};
-
} // namespace tracing
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/type-cache.h b/deps/v8/src/type-cache.h
deleted file mode 100644
index e7616ec3dc..0000000000
--- a/deps/v8/src/type-cache.h
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_TYPE_CACHE_H_
-#define V8_TYPE_CACHE_H_
-
-#include "src/types.h"
-
-namespace v8 {
-namespace internal {
-
-class TypeCache final {
- private:
- // This has to be first for the initialization magic to work.
- base::AccountingAllocator allocator;
- Zone zone_;
-
- public:
- static TypeCache const& Get();
-
- TypeCache() : zone_(&allocator) {}
-
- Type* const kInt8 =
- CreateNative(CreateRange<int8_t>(), Type::UntaggedIntegral8());
- Type* const kUint8 =
- CreateNative(CreateRange<uint8_t>(), Type::UntaggedIntegral8());
- Type* const kUint8Clamped = kUint8;
- Type* const kInt16 =
- CreateNative(CreateRange<int16_t>(), Type::UntaggedIntegral16());
- Type* const kUint16 =
- CreateNative(CreateRange<uint16_t>(), Type::UntaggedIntegral16());
- Type* const kInt32 =
- CreateNative(Type::Signed32(), Type::UntaggedIntegral32());
- Type* const kUint32 =
- CreateNative(Type::Unsigned32(), Type::UntaggedIntegral32());
- Type* const kFloat32 = CreateNative(Type::Number(), Type::UntaggedFloat32());
- Type* const kFloat64 = CreateNative(Type::Number(), Type::UntaggedFloat64());
-
- Type* const kSmi = CreateNative(Type::SignedSmall(), Type::TaggedSigned());
- Type* const kHoleySmi = Type::Union(kSmi, Type::Hole(), zone());
- Type* const kHeapNumber = CreateNative(Type::Number(), Type::TaggedPointer());
-
- Type* const kSingletonZero = CreateRange(0.0, 0.0);
- Type* const kSingletonOne = CreateRange(1.0, 1.0);
- Type* const kSingletonTen = CreateRange(10.0, 10.0);
- Type* const kSingletonMinusOne = CreateRange(-1.0, -1.0);
- Type* const kZeroOrUndefined =
- Type::Union(kSingletonZero, Type::Undefined(), zone());
- Type* const kTenOrUndefined =
- Type::Union(kSingletonTen, Type::Undefined(), zone());
- Type* const kMinusOneOrZero = CreateRange(-1.0, 0.0);
- Type* const kMinusOneToOneOrMinusZeroOrNaN = Type::Union(
- Type::Union(CreateRange(-1.0, 1.0), Type::MinusZero(), zone()),
- Type::NaN(), zone());
- Type* const kZeroOrOne = CreateRange(0.0, 1.0);
- Type* const kZeroOrOneOrNaN = Type::Union(kZeroOrOne, Type::NaN(), zone());
- Type* const kZeroToThirtyOne = CreateRange(0.0, 31.0);
- Type* const kZeroToThirtyTwo = CreateRange(0.0, 32.0);
- Type* const kZeroish =
- Type::Union(kSingletonZero, Type::MinusZeroOrNaN(), zone());
- Type* const kInteger = CreateRange(-V8_INFINITY, V8_INFINITY);
- Type* const kIntegerOrMinusZero =
- Type::Union(kInteger, Type::MinusZero(), zone());
- Type* const kIntegerOrMinusZeroOrNaN =
- Type::Union(kIntegerOrMinusZero, Type::NaN(), zone());
- Type* const kPositiveInteger = CreateRange(0.0, V8_INFINITY);
- Type* const kPositiveIntegerOrMinusZero =
- Type::Union(kPositiveInteger, Type::MinusZero(), zone());
- Type* const kPositiveIntegerOrMinusZeroOrNaN =
- Type::Union(kPositiveIntegerOrMinusZero, Type::NaN(), zone());
-
- Type* const kAdditiveSafeInteger =
- CreateRange(-4503599627370496.0, 4503599627370496.0);
- Type* const kSafeInteger = CreateRange(-kMaxSafeInteger, kMaxSafeInteger);
- Type* const kAdditiveSafeIntegerOrMinusZero =
- Type::Union(kAdditiveSafeInteger, Type::MinusZero(), zone());
- Type* const kSafeIntegerOrMinusZero =
- Type::Union(kSafeInteger, Type::MinusZero(), zone());
- Type* const kPositiveSafeInteger = CreateRange(0.0, kMaxSafeInteger);
-
- Type* const kUntaggedUndefined =
- Type::Intersect(Type::Undefined(), Type::Untagged(), zone());
-
- // Asm.js related types.
- Type* const kAsmSigned = kInt32;
- Type* const kAsmUnsigned = kUint32;
- Type* const kAsmInt = Type::Union(kAsmSigned, kAsmUnsigned, zone());
- Type* const kAsmFixnum = Type::Intersect(kAsmSigned, kAsmUnsigned, zone());
- Type* const kAsmFloat = kFloat32;
- Type* const kAsmDouble = kFloat64;
- Type* const kAsmFloatQ = Type::Union(kAsmFloat, kUntaggedUndefined, zone());
- Type* const kAsmDoubleQ = Type::Union(kAsmDouble, kUntaggedUndefined, zone());
- // Not part of the Asm.js type hierarchy, but represents a part of what
- // intish encompasses.
- Type* const kAsmIntQ = Type::Union(kAsmInt, kUntaggedUndefined, zone());
- Type* const kAsmFloatDoubleQ = Type::Union(kAsmFloatQ, kAsmDoubleQ, zone());
- // Asm.js size unions.
- Type* const kAsmSize8 = Type::Union(kInt8, kUint8, zone());
- Type* const kAsmSize16 = Type::Union(kInt16, kUint16, zone());
- Type* const kAsmSize32 =
- Type::Union(Type::Union(kInt32, kUint32, zone()), kAsmFloat, zone());
- Type* const kAsmSize64 = kFloat64;
- // Asm.js other types.
- Type* const kAsmComparable = Type::Union(
- kAsmSigned,
- Type::Union(kAsmUnsigned, Type::Union(kAsmDouble, kAsmFloat, zone()),
- zone()),
- zone());
- Type* const kAsmIntArrayElement =
- Type::Union(Type::Union(kInt8, kUint8, zone()),
- Type::Union(Type::Union(kInt16, kUint16, zone()),
- Type::Union(kInt32, kUint32, zone()), zone()),
- zone());
-
- // The FixedArray::length property always containts a smi in the range
- // [0, FixedArray::kMaxLength].
- Type* const kFixedArrayLengthType = CreateNative(
- CreateRange(0.0, FixedArray::kMaxLength), Type::TaggedSigned());
-
- // The FixedDoubleArray::length property always containts a smi in the range
- // [0, FixedDoubleArray::kMaxLength].
- Type* const kFixedDoubleArrayLengthType = CreateNative(
- CreateRange(0.0, FixedDoubleArray::kMaxLength), Type::TaggedSigned());
-
- // The JSArray::length property always contains a tagged number in the range
- // [0, kMaxUInt32].
- Type* const kJSArrayLengthType =
- CreateNative(Type::Unsigned32(), Type::Tagged());
-
- // The JSTyped::length property always contains a tagged number in the range
- // [0, kMaxSmiValue].
- Type* const kJSTypedArrayLengthType =
- CreateNative(Type::UnsignedSmall(), Type::TaggedSigned());
-
- // The String::length property always contains a smi in the range
- // [0, String::kMaxLength].
- Type* const kStringLengthType =
- CreateNative(CreateRange(0.0, String::kMaxLength), Type::TaggedSigned());
-
-#define TYPED_ARRAY(TypeName, type_name, TYPE_NAME, ctype, size) \
- Type* const k##TypeName##Array = CreateArray(k##TypeName);
- TYPED_ARRAYS(TYPED_ARRAY)
-#undef TYPED_ARRAY
-
- private:
- Type* CreateArray(Type* element) { return Type::Array(element, zone()); }
-
- Type* CreateArrayFunction(Type* array) {
- Type* arg1 = Type::Union(Type::Unsigned32(), Type::Object(), zone());
- Type* arg2 = Type::Union(Type::Unsigned32(), Type::Undefined(), zone());
- Type* arg3 = arg2;
- return Type::Function(array, arg1, arg2, arg3, zone());
- }
-
- Type* CreateNative(Type* semantic, Type* representation) {
- return Type::Intersect(semantic, representation, zone());
- }
-
- template <typename T>
- Type* CreateRange() {
- return CreateRange(std::numeric_limits<T>::min(),
- std::numeric_limits<T>::max());
- }
-
- Type* CreateRange(double min, double max) {
- return Type::Range(min, max, zone());
- }
-
- Zone* zone() { return &zone_; }
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TYPE_CACHE_H_
diff --git a/deps/v8/src/type-feedback-vector-inl.h b/deps/v8/src/type-feedback-vector-inl.h
index 771021fb99..f70f01888f 100644
--- a/deps/v8/src/type-feedback-vector-inl.h
+++ b/deps/v8/src/type-feedback-vector-inl.h
@@ -5,6 +5,7 @@
#ifndef V8_TYPE_FEEDBACK_VECTOR_INL_H_
#define V8_TYPE_FEEDBACK_VECTOR_INL_H_
+#include "src/globals.h"
#include "src/type-feedback-vector.h"
namespace v8 {
@@ -52,7 +53,13 @@ TypeFeedbackVector* TypeFeedbackVector::cast(Object* obj) {
int TypeFeedbackMetadata::GetSlotSize(FeedbackVectorSlotKind kind) {
DCHECK_NE(FeedbackVectorSlotKind::INVALID, kind);
DCHECK_NE(FeedbackVectorSlotKind::KINDS_NUMBER, kind);
- return kind == FeedbackVectorSlotKind::GENERAL ? 1 : 2;
+ if (kind == FeedbackVectorSlotKind::GENERAL ||
+ kind == FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC ||
+ kind == FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC) {
+ return 1;
+ }
+
+ return 2;
}
bool TypeFeedbackMetadata::SlotRequiresName(FeedbackVectorSlotKind kind) {
@@ -65,6 +72,8 @@ bool TypeFeedbackMetadata::SlotRequiresName(FeedbackVectorSlotKind kind) {
case FeedbackVectorSlotKind::KEYED_LOAD_IC:
case FeedbackVectorSlotKind::STORE_IC:
case FeedbackVectorSlotKind::KEYED_STORE_IC:
+ case FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC:
+ case FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC:
case FeedbackVectorSlotKind::GENERAL:
case FeedbackVectorSlotKind::INVALID:
return false;
@@ -77,22 +86,20 @@ bool TypeFeedbackMetadata::SlotRequiresName(FeedbackVectorSlotKind kind) {
}
bool TypeFeedbackVector::is_empty() const {
- if (length() == 0) return true;
- DCHECK(length() > kReservedIndexCount);
- return false;
+ return length() == kReservedIndexCount;
}
-
int TypeFeedbackVector::slot_count() const {
- if (length() == 0) return 0;
- DCHECK(length() > kReservedIndexCount);
return length() - kReservedIndexCount;
}
TypeFeedbackMetadata* TypeFeedbackVector::metadata() const {
- return is_empty() ? TypeFeedbackMetadata::cast(GetHeap()->empty_fixed_array())
- : TypeFeedbackMetadata::cast(get(kMetadataIndex));
+ return TypeFeedbackMetadata::cast(get(kMetadataIndex));
+}
+
+int TypeFeedbackVector::invocation_count() const {
+ return Smi::cast(get(kInvocationCountIndex))->value();
}
// Conversion from an integer index to either a slot or an ic slot.
@@ -113,23 +120,93 @@ void TypeFeedbackVector::Set(FeedbackVectorSlot slot, Object* value,
set(GetIndex(slot), value, mode);
}
+// Helper function to transform the feedback to BinaryOperationHint.
+BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback) {
+ switch (type_feedback) {
+ case BinaryOperationFeedback::kNone:
+ return BinaryOperationHint::kNone;
+ case BinaryOperationFeedback::kSignedSmall:
+ return BinaryOperationHint::kSignedSmall;
+ case BinaryOperationFeedback::kNumber:
+ return BinaryOperationHint::kNumberOrOddball;
+ case BinaryOperationFeedback::kString:
+ return BinaryOperationHint::kString;
+ case BinaryOperationFeedback::kAny:
+ default:
+ return BinaryOperationHint::kAny;
+ }
+ UNREACHABLE();
+ return BinaryOperationHint::kNone;
+}
-void TypeFeedbackVector::ComputeCounts(int* with_type_info, int* generic) {
+// Helper function to transform the feedback to CompareOperationHint.
+CompareOperationHint CompareOperationHintFromFeedback(int type_feedback) {
+ switch (type_feedback) {
+ case CompareOperationFeedback::kNone:
+ return CompareOperationHint::kNone;
+ case CompareOperationFeedback::kSignedSmall:
+ return CompareOperationHint::kSignedSmall;
+ case CompareOperationFeedback::kNumber:
+ return CompareOperationHint::kNumber;
+ default:
+ return CompareOperationHint::kAny;
+ }
+ UNREACHABLE();
+ return CompareOperationHint::kNone;
+}
+
+void TypeFeedbackVector::ComputeCounts(int* with_type_info, int* generic,
+ int* vector_ic_count,
+ bool code_is_interpreted) {
Object* uninitialized_sentinel =
TypeFeedbackVector::RawUninitializedSentinel(GetIsolate());
Object* megamorphic_sentinel =
*TypeFeedbackVector::MegamorphicSentinel(GetIsolate());
int with = 0;
int gen = 0;
+ int total = 0;
TypeFeedbackMetadataIterator iter(metadata());
while (iter.HasNext()) {
FeedbackVectorSlot slot = iter.Next();
FeedbackVectorSlotKind kind = iter.kind();
Object* obj = Get(slot);
- if (obj != uninitialized_sentinel &&
- kind != FeedbackVectorSlotKind::GENERAL) {
- if (obj->IsWeakCell() || obj->IsFixedArray() || obj->IsString()) {
+ if (kind == FeedbackVectorSlotKind::GENERAL) {
+ continue;
+ }
+ total++;
+
+ if (obj != uninitialized_sentinel) {
+ if (kind == FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC ||
+ kind == FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC) {
+ // If we are not running interpreted code, we need to ignore
+ // the special ic slots for binaryop/compare used by the
+ // interpreter.
+ // TODO(mvstanton): Remove code_is_interpreted when full code
+ // is retired from service.
+ if (!code_is_interpreted) continue;
+
+ DCHECK(obj->IsSmi());
+ int op_feedback = static_cast<int>(Smi::cast(obj)->value());
+ if (kind == FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC) {
+ CompareOperationHint hint =
+ CompareOperationHintFromFeedback(op_feedback);
+ if (hint == CompareOperationHint::kAny) {
+ gen++;
+ } else if (hint != CompareOperationHint::kNone) {
+ with++;
+ }
+ } else {
+ DCHECK(kind == FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC);
+ BinaryOperationHint hint =
+ BinaryOperationHintFromFeedback(op_feedback);
+ if (hint == BinaryOperationHint::kAny) {
+ gen++;
+ } else if (hint != BinaryOperationHint::kNone) {
+ with++;
+ }
+ }
+ } else if (obj->IsWeakCell() || obj->IsFixedArray() || obj->IsString()) {
with++;
} else if (obj == megamorphic_sentinel) {
gen++;
@@ -139,6 +216,7 @@ void TypeFeedbackVector::ComputeCounts(int* with_type_info, int* generic) {
*with_type_info = with;
*generic = gen;
+ *vector_ic_count = total;
}
Handle<Symbol> TypeFeedbackVector::UninitializedSentinel(Isolate* isolate) {
diff --git a/deps/v8/src/type-feedback-vector.cc b/deps/v8/src/type-feedback-vector.cc
index 61f5e8b9c7..30bc2d4153 100644
--- a/deps/v8/src/type-feedback-vector.cc
+++ b/deps/v8/src/type-feedback-vector.cc
@@ -102,9 +102,7 @@ Handle<TypeFeedbackMetadata> TypeFeedbackMetadata::New(Isolate* isolate,
Handle<UnseededNumberDictionary> names;
if (name_count) {
- names = UnseededNumberDictionary::New(
- isolate, base::bits::RoundUpToPowerOfTwo32(name_count), TENURED,
- USE_CUSTOM_MINIMUM_CAPACITY);
+ names = UnseededNumberDictionary::New(isolate, name_count, TENURED);
}
int name_index = 0;
@@ -114,7 +112,10 @@ Handle<TypeFeedbackMetadata> TypeFeedbackMetadata::New(Isolate* isolate,
if (SlotRequiresName(kind)) {
Handle<String> name = spec->GetName(name_index);
DCHECK(!name.is_null());
- names = UnseededNumberDictionary::AtNumberPut(names, i, name);
+ Handle<UnseededNumberDictionary> new_names =
+ UnseededNumberDictionary::AtNumberPut(names, i, name);
+ DCHECK_EQ(*new_names, *names);
+ names = new_names;
name_index++;
}
}
@@ -202,6 +203,10 @@ const char* TypeFeedbackMetadata::Kind2String(FeedbackVectorSlotKind kind) {
return "STORE_IC";
case FeedbackVectorSlotKind::KEYED_STORE_IC:
return "KEYED_STORE_IC";
+ case FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC:
+ return "INTERPRETER_BINARYOP_IC";
+ case FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC:
+ return "INTERPRETER_COMPARE_IC";
case FeedbackVectorSlotKind::GENERAL:
return "STUB";
case FeedbackVectorSlotKind::KINDS_NUMBER:
@@ -230,11 +235,13 @@ Handle<TypeFeedbackVector> TypeFeedbackVector::New(
const int slot_count = metadata->slot_count();
const int length = slot_count + kReservedIndexCount;
if (length == kReservedIndexCount) {
- return Handle<TypeFeedbackVector>::cast(factory->empty_fixed_array());
+ return Handle<TypeFeedbackVector>::cast(
+ factory->empty_type_feedback_vector());
}
Handle<FixedArray> array = factory->NewFixedArray(length, TENURED);
array->set(kMetadataIndex, *metadata);
+ array->set(kInvocationCountIndex, Smi::FromInt(0));
DisallowHeapAllocation no_gc;
@@ -250,12 +257,18 @@ Handle<TypeFeedbackVector> TypeFeedbackVector::New(
Object* value;
if (kind == FeedbackVectorSlotKind::LOAD_GLOBAL_IC) {
value = *factory->empty_weak_cell();
+ } else if (kind == FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC ||
+ kind == FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC) {
+ value = Smi::FromInt(0);
} else {
value = *uninitialized_sentinel;
}
array->set(index, value, SKIP_WRITE_BARRIER);
+
+ value = kind == FeedbackVectorSlotKind::CALL_IC ? Smi::FromInt(0)
+ : *uninitialized_sentinel;
for (int j = 1; j < entry_size; j++) {
- array->set(index + j, *uninitialized_sentinel, SKIP_WRITE_BARRIER);
+ array->set(index + j, value, SKIP_WRITE_BARRIER);
}
i += entry_size;
}
@@ -334,6 +347,13 @@ void TypeFeedbackVector::ClearSlotsImpl(SharedFunctionInfo* shared,
nexus.Clear(shared->code());
break;
}
+ case FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC:
+ case FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC: {
+ DCHECK(Get(slot)->IsSmi());
+ // don't clear these smi slots.
+ // Set(slot, Smi::FromInt(0));
+ break;
+ }
case FeedbackVectorSlotKind::GENERAL: {
if (obj->IsHeapObject()) {
InstanceType instance_type =
@@ -620,16 +640,25 @@ InlineCacheState CallICNexus::StateFromFeedback() const {
int CallICNexus::ExtractCallCount() {
Object* call_count = GetFeedbackExtra();
- if (call_count->IsSmi()) {
- int value = Smi::cast(call_count)->value();
- return value;
- }
- return -1;
+ CHECK(call_count->IsSmi());
+ int value = Smi::cast(call_count)->value();
+ return value;
}
+float CallICNexus::ComputeCallFrequency() {
+ double const invocation_count = vector()->invocation_count();
+ double const call_count = ExtractCallCount();
+ return static_cast<float>(call_count / invocation_count);
+}
void CallICNexus::Clear(Code* host) { CallIC::Clear(GetIsolate(), host, this); }
+void CallICNexus::ConfigureUninitialized() {
+ Isolate* isolate = GetIsolate();
+ SetFeedback(*TypeFeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(Smi::FromInt(0), SKIP_WRITE_BARRIER);
+}
void CallICNexus::ConfigureMonomorphicArray() {
Object* feedback = GetFeedback();
@@ -650,10 +679,13 @@ void CallICNexus::ConfigureMonomorphic(Handle<JSFunction> function) {
void CallICNexus::ConfigureMegamorphic() {
- FeedbackNexus::ConfigureMegamorphic();
+ SetFeedback(*TypeFeedbackVector::MegamorphicSentinel(GetIsolate()),
+ SKIP_WRITE_BARRIER);
+ Smi* count = Smi::cast(GetFeedbackExtra());
+ int new_count = count->value() + 1;
+ SetFeedbackExtra(Smi::FromInt(new_count), SKIP_WRITE_BARRIER);
}
-
void CallICNexus::ConfigureMegamorphic(int call_count) {
SetFeedback(*TypeFeedbackVector::MegamorphicSentinel(GetIsolate()),
SKIP_WRITE_BARRIER);
@@ -1020,5 +1052,38 @@ IcCheckType KeyedStoreICNexus::GetKeyType() const {
}
return IsPropertyNameFeedback(feedback) ? PROPERTY : ELEMENT;
}
+
+InlineCacheState BinaryOpICNexus::StateFromFeedback() const {
+ BinaryOperationHint hint = GetBinaryOperationFeedback();
+ if (hint == BinaryOperationHint::kNone) {
+ return UNINITIALIZED;
+ } else if (hint == BinaryOperationHint::kAny) {
+ return GENERIC;
+ }
+
+ return MONOMORPHIC;
+}
+
+InlineCacheState CompareICNexus::StateFromFeedback() const {
+ CompareOperationHint hint = GetCompareOperationFeedback();
+ if (hint == CompareOperationHint::kNone) {
+ return UNINITIALIZED;
+ } else if (hint == CompareOperationHint::kAny) {
+ return GENERIC;
+ }
+
+ return MONOMORPHIC;
+}
+
+BinaryOperationHint BinaryOpICNexus::GetBinaryOperationFeedback() const {
+ int feedback = Smi::cast(GetFeedback())->value();
+ return BinaryOperationHintFromFeedback(feedback);
+}
+
+CompareOperationHint CompareICNexus::GetCompareOperationFeedback() const {
+ int feedback = Smi::cast(GetFeedback())->value();
+ return CompareOperationHintFromFeedback(feedback);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/type-feedback-vector.h b/deps/v8/src/type-feedback-vector.h
index 5355ee7188..af69499b04 100644
--- a/deps/v8/src/type-feedback-vector.h
+++ b/deps/v8/src/type-feedback-vector.h
@@ -10,7 +10,8 @@
#include "src/base/logging.h"
#include "src/elements-kind.h"
#include "src/objects.h"
-#include "src/zone-containers.h"
+#include "src/type-hints.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -27,6 +28,8 @@ enum class FeedbackVectorSlotKind {
KEYED_LOAD_IC,
STORE_IC,
KEYED_STORE_IC,
+ INTERPRETER_BINARYOP_IC,
+ INTERPRETER_COMPARE_IC,
// This is a general purpose slot that occupies one feedback vector element.
GENERAL,
@@ -67,6 +70,14 @@ class FeedbackVectorSpecBase {
return AddSlot(FeedbackVectorSlotKind::KEYED_STORE_IC);
}
+ FeedbackVectorSlot AddInterpreterBinaryOpICSlot() {
+ return AddSlot(FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC);
+ }
+
+ FeedbackVectorSlot AddInterpreterCompareICSlot() {
+ return AddSlot(FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC);
+ }
+
FeedbackVectorSlot AddGeneralSlot() {
return AddSlot(FeedbackVectorSlotKind::GENERAL);
}
@@ -207,7 +218,7 @@ class TypeFeedbackMetadata : public FixedArray {
static const char* Kind2String(FeedbackVectorSlotKind kind);
private:
- static const int kFeedbackVectorSlotKindBits = 4;
+ static const int kFeedbackVectorSlotKindBits = 5;
STATIC_ASSERT(static_cast<int>(FeedbackVectorSlotKind::KINDS_NUMBER) <
(1 << kFeedbackVectorSlotKindBits));
@@ -222,11 +233,10 @@ class TypeFeedbackMetadata : public FixedArray {
// The shape of the TypeFeedbackVector is an array with:
// 0: feedback metadata
-// 1: ics_with_types
-// 2: ics_with_generic_info
-// 3: feedback slot #0
+// 1: invocation count
+// 2: feedback slot #0
// ...
-// 3 + slot_count - 1: feedback slot #(slot_count-1)
+// 2 + slot_count - 1: feedback slot #(slot_count-1)
//
class TypeFeedbackVector : public FixedArray {
public:
@@ -234,9 +244,11 @@ class TypeFeedbackVector : public FixedArray {
static inline TypeFeedbackVector* cast(Object* obj);
static const int kMetadataIndex = 0;
- static const int kReservedIndexCount = 1;
+ static const int kInvocationCountIndex = 1;
+ static const int kReservedIndexCount = 2;
- inline void ComputeCounts(int* with_type_info, int* generic);
+ inline void ComputeCounts(int* with_type_info, int* generic,
+ int* vector_ic_count, bool code_is_interpreted);
inline bool is_empty() const;
@@ -244,6 +256,7 @@ class TypeFeedbackVector : public FixedArray {
inline int slot_count() const;
inline TypeFeedbackMetadata* metadata() const;
+ inline int invocation_count() const;
// Conversion from a slot to an integer index to the underlying array.
static int GetIndex(FeedbackVectorSlot slot) {
@@ -461,6 +474,7 @@ class CallICNexus final : public FeedbackNexus {
void Clear(Code* host);
+ void ConfigureUninitialized() override;
void ConfigureMonomorphicArray();
void ConfigureMonomorphic(Handle<JSFunction> function);
void ConfigureMegamorphic() final;
@@ -481,6 +495,10 @@ class CallICNexus final : public FeedbackNexus {
}
int ExtractCallCount();
+
+ // Compute the call frequency based on the call count and the invocation
+ // count (taken from the type feedback vector).
+ float ComputeCallFrequency();
};
@@ -548,6 +566,10 @@ class KeyedLoadICNexus : public FeedbackNexus {
: FeedbackNexus(vector, slot) {
DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC, vector->GetKind(slot));
}
+ explicit KeyedLoadICNexus(Isolate* isolate)
+ : FeedbackNexus(
+ TypeFeedbackVector::DummyVector(isolate),
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot)) {}
KeyedLoadICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
: FeedbackNexus(vector, slot) {
DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC, vector->GetKind(slot));
@@ -630,6 +652,72 @@ class KeyedStoreICNexus : public FeedbackNexus {
InlineCacheState StateFromFeedback() const override;
Name* FindFirstName() const override;
};
+
+class BinaryOpICNexus final : public FeedbackNexus {
+ public:
+ BinaryOpICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
+ : FeedbackNexus(vector, slot) {
+ DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC,
+ vector->GetKind(slot));
+ }
+ BinaryOpICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
+ : FeedbackNexus(vector, slot) {
+ DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC,
+ vector->GetKind(slot));
+ }
+
+ void Clear(Code* host);
+
+ InlineCacheState StateFromFeedback() const final;
+ BinaryOperationHint GetBinaryOperationFeedback() const;
+
+ int ExtractMaps(MapHandleList* maps) const final {
+ // BinaryOpICs don't record map feedback.
+ return 0;
+ }
+ MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
+ return MaybeHandle<Code>();
+ }
+ bool FindHandlers(List<Handle<Object>>* code_list,
+ int length = -1) const final {
+ return length == 0;
+ }
+};
+
+class CompareICNexus final : public FeedbackNexus {
+ public:
+ CompareICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
+ : FeedbackNexus(vector, slot) {
+ DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC,
+ vector->GetKind(slot));
+ }
+ CompareICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
+ : FeedbackNexus(vector, slot) {
+ DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC,
+ vector->GetKind(slot));
+ }
+
+ void Clear(Code* host);
+
+ InlineCacheState StateFromFeedback() const final;
+ CompareOperationHint GetCompareOperationFeedback() const;
+
+ int ExtractMaps(MapHandleList* maps) const final {
+ // BinaryOpICs don't record map feedback.
+ return 0;
+ }
+ MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
+ return MaybeHandle<Code>();
+ }
+ bool FindHandlers(List<Handle<Object>>* code_list,
+ int length = -1) const final {
+ return length == 0;
+ }
+};
+
+inline BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback);
+inline CompareOperationHint CompareOperationHintFromFeedback(int type_feedback);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/type-hints.cc b/deps/v8/src/type-hints.cc
index a07a8707b1..ff00eeff8b 100644
--- a/deps/v8/src/compiler/type-hints.cc
+++ b/deps/v8/src/type-hints.cc
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/type-hints.h"
+#include "src/type-hints.h"
namespace v8 {
namespace internal {
-namespace compiler {
std::ostream& operator<<(std::ostream& os, BinaryOperationHint hint) {
switch (hint) {
@@ -18,6 +17,8 @@ std::ostream& operator<<(std::ostream& os, BinaryOperationHint hint) {
return os << "Signed32";
case BinaryOperationHint::kNumberOrOddball:
return os << "NumberOrOddball";
+ case BinaryOperationHint::kString:
+ return os << "String";
case BinaryOperationHint::kAny:
return os << "Any";
}
@@ -86,6 +87,5 @@ std::ostream& operator<<(std::ostream& os, ToBooleanHints hints) {
return os;
}
-} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/type-hints.h b/deps/v8/src/type-hints.h
index ad94491511..cdf470956f 100644
--- a/deps/v8/src/compiler/type-hints.h
+++ b/deps/v8/src/type-hints.h
@@ -2,15 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_TYPE_HINTS_H_
-#define V8_COMPILER_TYPE_HINTS_H_
+#ifndef V8_TYPE_HINTS_H_
+#define V8_TYPE_HINTS_H_
#include "src/base/flags.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
-namespace compiler {
// Type hints for an binary operation.
enum class BinaryOperationHint : uint8_t {
@@ -18,6 +17,7 @@ enum class BinaryOperationHint : uint8_t {
kSignedSmall,
kSigned32,
kNumberOrOddball,
+ kString,
kAny
};
@@ -66,8 +66,7 @@ std::ostream& operator<<(std::ostream&, ToBooleanHints);
DEFINE_OPERATORS_FOR_FLAGS(ToBooleanHints)
-} // namespace compiler
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_TYPE_HINTS_H_
+#endif // V8_TYPE_HINTS_H_
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index 8289d91125..ce0ab6ca6a 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -6,7 +6,6 @@
#include "src/ast/ast.h"
#include "src/code-stubs.h"
-#include "src/compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/objects-inl.h"
@@ -192,58 +191,129 @@ Handle<AllocationSite> TypeFeedbackOracle::GetCallNewAllocationSite(
return Handle<AllocationSite>::null();
}
+namespace {
+
+AstType* CompareOpHintToType(CompareOperationHint hint) {
+ switch (hint) {
+ case CompareOperationHint::kNone:
+ return AstType::None();
+ case CompareOperationHint::kSignedSmall:
+ return AstType::SignedSmall();
+ case CompareOperationHint::kNumber:
+ return AstType::Number();
+ case CompareOperationHint::kNumberOrOddball:
+ return AstType::NumberOrOddball();
+ case CompareOperationHint::kAny:
+ return AstType::Any();
+ }
+ UNREACHABLE();
+ return AstType::None();
+}
+
+AstType* BinaryOpHintToType(BinaryOperationHint hint) {
+ switch (hint) {
+ case BinaryOperationHint::kNone:
+ return AstType::None();
+ case BinaryOperationHint::kSignedSmall:
+ return AstType::SignedSmall();
+ case BinaryOperationHint::kSigned32:
+ return AstType::Signed32();
+ case BinaryOperationHint::kNumberOrOddball:
+ return AstType::Number();
+ case BinaryOperationHint::kString:
+ return AstType::String();
+ case BinaryOperationHint::kAny:
+ return AstType::Any();
+ }
+ UNREACHABLE();
+ return AstType::None();
+}
-void TypeFeedbackOracle::CompareType(TypeFeedbackId id,
- Type** left_type,
- Type** right_type,
- Type** combined_type) {
+} // end anonymous namespace
+
+void TypeFeedbackOracle::CompareType(TypeFeedbackId id, FeedbackVectorSlot slot,
+ AstType** left_type, AstType** right_type,
+ AstType** combined_type) {
Handle<Object> info = GetInfo(id);
+ // A check for a valid slot is not sufficient here. InstanceOf collects
+ // type feedback in a General slot.
if (!info->IsCode()) {
- // For some comparisons we don't have ICs, e.g. LiteralCompareTypeof.
- *left_type = *right_type = *combined_type = Type::None();
+ // For some comparisons we don't have type feedback, e.g.
+ // LiteralCompareTypeof.
+ *left_type = *right_type = *combined_type = AstType::None();
return;
}
- Handle<Code> code = Handle<Code>::cast(info);
+ // Feedback from Ignition. The feedback slot will be allocated and initialized
+ // to AstType::None() even when ignition is not enabled. So it is safe to get
+ // feedback from the type feedback vector.
+ DCHECK(!slot.IsInvalid());
+ CompareICNexus nexus(feedback_vector_, slot);
+ *left_type = *right_type = *combined_type =
+ CompareOpHintToType(nexus.GetCompareOperationFeedback());
+
+ // Merge the feedback from full-codegen if available.
+ Handle<Code> code = Handle<Code>::cast(info);
Handle<Map> map;
Map* raw_map = code->FindFirstMap();
if (raw_map != NULL) Map::TryUpdate(handle(raw_map)).ToHandle(&map);
if (code->is_compare_ic_stub()) {
CompareICStub stub(code->stub_key(), isolate());
- *left_type = CompareICState::StateToType(zone(), stub.left());
- *right_type = CompareICState::StateToType(zone(), stub.right());
- *combined_type = CompareICState::StateToType(zone(), stub.state(), map);
+ AstType* left_type_from_ic =
+ CompareICState::StateToType(zone(), stub.left());
+ *left_type = AstType::Union(*left_type, left_type_from_ic, zone());
+ AstType* right_type_from_ic =
+ CompareICState::StateToType(zone(), stub.right());
+ *right_type = AstType::Union(*right_type, right_type_from_ic, zone());
+ AstType* combined_type_from_ic =
+ CompareICState::StateToType(zone(), stub.state(), map);
+ *combined_type =
+ AstType::Union(*combined_type, combined_type_from_ic, zone());
}
}
-
-void TypeFeedbackOracle::BinaryType(TypeFeedbackId id,
- Type** left,
- Type** right,
- Type** result,
+void TypeFeedbackOracle::BinaryType(TypeFeedbackId id, FeedbackVectorSlot slot,
+ AstType** left, AstType** right,
+ AstType** result,
Maybe<int>* fixed_right_arg,
Handle<AllocationSite>* allocation_site,
Token::Value op) {
Handle<Object> object = GetInfo(id);
- if (!object->IsCode()) {
- // For some binary ops we don't have ICs, e.g. Token::COMMA, but for the
- // operations covered by the BinaryOpIC we should always have them.
+ if (slot.IsInvalid()) {
+ // For some binary ops we don't have ICs or feedback slots,
+ // e.g. Token::COMMA, but for the operations covered by the BinaryOpIC we
+ // should always have them.
+ DCHECK(!object->IsCode());
DCHECK(op < BinaryOpICState::FIRST_TOKEN ||
op > BinaryOpICState::LAST_TOKEN);
- *left = *right = *result = Type::None();
+ *left = *right = *result = AstType::None();
*fixed_right_arg = Nothing<int>();
*allocation_site = Handle<AllocationSite>::null();
return;
}
+
+ // Feedback from Ignition. The feedback slot will be allocated and initialized
+ // to AstType::None() even when ignition is not enabled. So it is safe to get
+ // feedback from the type feedback vector.
+ DCHECK(!slot.IsInvalid());
+ BinaryOpICNexus nexus(feedback_vector_, slot);
+ *left = *right = *result =
+ BinaryOpHintToType(nexus.GetBinaryOperationFeedback());
+ *fixed_right_arg = Nothing<int>();
+ *allocation_site = Handle<AllocationSite>::null();
+
+ if (!object->IsCode()) return;
+
+ // Merge the feedback from full-codegen if available.
Handle<Code> code = Handle<Code>::cast(object);
DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
BinaryOpICState state(isolate(), code->extra_ic_state());
DCHECK_EQ(op, state.op());
- *left = state.GetLeftType();
- *right = state.GetRightType();
- *result = state.GetResultType();
+ *left = AstType::Union(*left, state.GetLeftType(), zone());
+ *right = AstType::Union(*right, state.GetRightType(), zone());
+ *result = AstType::Union(*result, state.GetResultType(), zone());
*fixed_right_arg = state.fixed_right_arg();
AllocationSite* first_allocation_site = code->FindFirstAllocationSite();
@@ -254,14 +324,24 @@ void TypeFeedbackOracle::BinaryType(TypeFeedbackId id,
}
}
-
-Type* TypeFeedbackOracle::CountType(TypeFeedbackId id) {
+AstType* TypeFeedbackOracle::CountType(TypeFeedbackId id,
+ FeedbackVectorSlot slot) {
Handle<Object> object = GetInfo(id);
- if (!object->IsCode()) return Type::None();
+ if (slot.IsInvalid()) {
+ DCHECK(!object->IsCode());
+ return AstType::None();
+ }
+
+ DCHECK(!slot.IsInvalid());
+ BinaryOpICNexus nexus(feedback_vector_, slot);
+ AstType* type = BinaryOpHintToType(nexus.GetBinaryOperationFeedback());
+
+ if (!object->IsCode()) return type;
+
Handle<Code> code = Handle<Code>::cast(object);
DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
BinaryOpICState state(isolate(), code->extra_ic_state());
- return state.GetLeftType();
+ return AstType::Union(type, state.GetLeftType(), zone());
}
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index 4e8dc54d02..06a0c9ebd0 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -6,11 +6,11 @@
#define V8_TYPE_INFO_H_
#include "src/allocation.h"
+#include "src/ast/ast-types.h"
#include "src/contexts.h"
#include "src/globals.h"
#include "src/parsing/token.h"
-#include "src/types.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
@@ -77,20 +77,16 @@ class TypeFeedbackOracle: public ZoneObject {
uint16_t ToBooleanTypes(TypeFeedbackId id);
// Get type information for arithmetic operations and compares.
- void BinaryType(TypeFeedbackId id,
- Type** left,
- Type** right,
- Type** result,
+ void BinaryType(TypeFeedbackId id, FeedbackVectorSlot slot, AstType** left,
+ AstType** right, AstType** result,
Maybe<int>* fixed_right_arg,
Handle<AllocationSite>* allocation_site,
Token::Value operation);
- void CompareType(TypeFeedbackId id,
- Type** left,
- Type** right,
- Type** combined);
+ void CompareType(TypeFeedbackId id, FeedbackVectorSlot slot, AstType** left,
+ AstType** right, AstType** combined);
- Type* CountType(TypeFeedbackId id);
+ AstType* CountType(TypeFeedbackId id, FeedbackVectorSlot slot);
Zone* zone() const { return zone_; }
Isolate* isolate() const { return isolate_; }
diff --git a/deps/v8/src/unicode-decoder.h b/deps/v8/src/unicode-decoder.h
index c030841166..35d23a2ac7 100644
--- a/deps/v8/src/unicode-decoder.h
+++ b/deps/v8/src/unicode-decoder.h
@@ -7,10 +7,11 @@
#include <sys/types.h>
#include "src/globals.h"
+#include "src/utils.h"
namespace unibrow {
-class Utf8DecoderBase {
+class V8_EXPORT_PRIVATE Utf8DecoderBase {
public:
// Initialization done in subclass.
inline Utf8DecoderBase();
diff --git a/deps/v8/src/unicode.cc b/deps/v8/src/unicode.cc
index db98be8675..015f8a27f2 100644
--- a/deps/v8/src/unicode.cc
+++ b/deps/v8/src/unicode.cc
@@ -190,8 +190,7 @@ static int LookupMapping(const int32_t* table,
}
}
-
-static inline size_t NonASCIISequenceLength(byte first) {
+static inline uint8_t NonASCIISequenceLength(byte first) {
// clang-format off
static const uint8_t lengths[256] = {
// The first 128 entries correspond to ASCII characters.
@@ -229,80 +228,137 @@ static inline bool IsContinuationCharacter(byte chr) {
// This method decodes an UTF-8 value according to RFC 3629.
uchar Utf8::CalculateValue(const byte* str, size_t max_length, size_t* cursor) {
size_t length = NonASCIISequenceLength(str[0]);
- if (length == 0 || max_length < length) {
- *cursor += 1;
- return kBadChar;
- }
- if (length == 2) {
- if (!IsContinuationCharacter(str[1])) {
- *cursor += 1;
- return kBadChar;
- }
- *cursor += 2;
- return ((str[0] << 6) + str[1]) - 0x00003080;
+
+ // Check continuation characters.
+ size_t max_count = std::min(length, max_length);
+ size_t count = 1;
+ while (count < max_count && IsContinuationCharacter(str[count])) {
+ count++;
}
+ *cursor += count;
+
+ // There must be enough continuation characters.
+ if (count != length) return kBadChar;
+
+ // Check overly long sequences & other conditions.
if (length == 3) {
- switch (str[0]) {
- case 0xE0:
- // Overlong three-byte sequence.
- if (str[1] < 0xA0 || str[1] > 0xBF) {
- *cursor += 1;
- return kBadChar;
- }
- break;
- case 0xED:
- // High and low surrogate halves.
- if (str[1] < 0x80 || str[1] > 0x9F) {
- *cursor += 1;
- return kBadChar;
- }
- break;
- default:
- if (!IsContinuationCharacter(str[1])) {
- *cursor += 1;
- return kBadChar;
- }
- }
- if (!IsContinuationCharacter(str[2])) {
- *cursor += 1;
+ if (str[0] == 0xE0 && (str[1] < 0xA0 || str[1] > 0xBF)) {
+ // Overlong three-byte sequence?
+ return kBadChar;
+ } else if (str[0] == 0xED && (str[1] < 0x80 || str[1] > 0x9F)) {
+ // High and low surrogate halves?
return kBadChar;
}
- *cursor += 3;
- return ((str[0] << 12) + (str[1] << 6) + str[2]) - 0x000E2080;
- }
- DCHECK(length == 4);
- switch (str[0]) {
- case 0xF0:
+ } else if (length == 4) {
+ if (str[0] == 0xF0 && (str[1] < 0x90 || str[1] > 0xBF)) {
// Overlong four-byte sequence.
- if (str[1] < 0x90 || str[1] > 0xBF) {
- *cursor += 1;
- return kBadChar;
- }
- break;
- case 0xF4:
+ return kBadChar;
+ } else if (str[0] == 0xF4 && (str[1] < 0x80 || str[1] > 0x8F)) {
// Code points outside of the unicode range.
- if (str[1] < 0x80 || str[1] > 0x8F) {
- *cursor += 1;
- return kBadChar;
- }
- break;
- default:
- if (!IsContinuationCharacter(str[1])) {
- *cursor += 1;
- return kBadChar;
- }
+ return kBadChar;
+ }
}
- if (!IsContinuationCharacter(str[2])) {
- *cursor += 1;
- return kBadChar;
+
+ // All errors have been handled, so we only have to assemble the result.
+ switch (length) {
+ case 1:
+ return str[0];
+ case 2:
+ return ((str[0] << 6) + str[1]) - 0x00003080;
+ case 3:
+ return ((str[0] << 12) + (str[1] << 6) + str[2]) - 0x000E2080;
+ case 4:
+ return ((str[0] << 18) + (str[1] << 12) + (str[2] << 6) + str[3]) -
+ 0x03C82080;
}
- if (!IsContinuationCharacter(str[3])) {
- *cursor += 1;
+
+ UNREACHABLE();
+ return kBadChar;
+}
+
+uchar Utf8::ValueOfIncremental(byte next, Utf8IncrementalBuffer* buffer) {
+ DCHECK_NOT_NULL(buffer);
+
+ // The common case: 1-byte Utf8 (and no incomplete char in the buffer)
+ if (V8_LIKELY(next <= kMaxOneByteChar && *buffer == 0)) {
+ return static_cast<uchar>(next);
+ }
+
+ if (*buffer == 0) {
+ // We're at the start of a new character.
+ uint32_t kind = NonASCIISequenceLength(next);
+ if (kind >= 2 && kind <= 4) {
+ // Start of 2..4 byte character, and no buffer.
+
+ // The mask for the lower bits depends on the kind, and is
+ // 0x1F, 0x0F, 0x07 for kinds 2, 3, 4 respectively. We can get that
+ // with one shift.
+ uint8_t mask = 0x7f >> kind;
+
+ // Store the kind in the top nibble, and kind - 1 (i.e., remaining bytes)
+ // in 2nd nibble, and the value in the bottom three. The 2nd nibble is
+ // intended as a counter about how many bytes are still needed.
+ *buffer = kind << 28 | (kind - 1) << 24 | (next & mask);
+ return kIncomplete;
+ } else {
+ // No buffer, and not the start of a 1-byte char (handled at the
+ // beginning), and not the start of a 2..4 byte char? Bad char.
+ *buffer = 0;
+ return kBadChar;
+ }
+ } else if (*buffer <= 0xff) {
+ // We have one unprocessed byte left (from the last else case in this if
+ // statement).
+ uchar previous = *buffer;
+ *buffer = 0;
+ uchar t = ValueOfIncremental(previous, buffer);
+ if (t == kIncomplete) {
+ // If we have an incomplete character, process both the previous and the
+ // next byte at once.
+ return ValueOfIncremental(next, buffer);
+ } else {
+ // Otherwise, process the previous byte and save the next byte for next
+ // time.
+ DCHECK_EQ(0, *buffer);
+ *buffer = next;
+ return t;
+ }
+ } else if (IsContinuationCharacter(next)) {
+ // We're inside of a character, as described by buffer.
+
+ // How many bytes (excluding this one) do we still expect?
+ uint8_t bytes_expected = *buffer >> 28;
+ uint8_t bytes_left = (*buffer >> 24) & 0x0f;
+ bytes_left--;
+ // Update the value.
+ uint32_t value = ((*buffer & 0xffffff) << 6) | (next & 0x3F);
+ if (bytes_left) {
+ *buffer = (bytes_expected << 28 | bytes_left << 24 | value);
+ return kIncomplete;
+ } else {
+ *buffer = 0;
+ bool sequence_was_too_long = (bytes_expected == 2 && value < 0x80) ||
+ (bytes_expected == 3 && value < 0x800);
+ return sequence_was_too_long ? kBadChar : value;
+ }
+ } else {
+ // Within a character, but not a continuation character? Then the
+ // previous char was a bad char. But we need to save the current
+ // one.
+ *buffer = next;
return kBadChar;
}
- *cursor += 4;
- return ((str[0] << 18) + (str[1] << 12) + (str[2] << 6) + str[3]) -
- 0x03C82080;
+}
+
+uchar Utf8::ValueOfIncrementalFinish(Utf8IncrementalBuffer* buffer) {
+ DCHECK_NOT_NULL(buffer);
+ if (*buffer == 0) {
+ return kBufferEmpty;
+ } else {
+ // Process left-over chars. An incomplete char at the end maps to kBadChar.
+ uchar t = ValueOfIncremental(0, buffer);
+ return (t == kIncomplete) ? kBadChar : t;
+ }
}
bool Utf8::Validate(const byte* bytes, size_t length) {
diff --git a/deps/v8/src/unicode.h b/deps/v8/src/unicode.h
index 35717bca86..1299a8ff9a 100644
--- a/deps/v8/src/unicode.h
+++ b/deps/v8/src/unicode.h
@@ -141,6 +141,8 @@ class Utf8 {
// The unicode replacement character, used to signal invalid unicode
// sequences (e.g. an orphan surrogate) when converting to a UTF-8 encoding.
static const uchar kBadChar = 0xFFFD;
+ static const uchar kBufferEmpty = 0x0;
+ static const uchar kIncomplete = 0xFFFFFFFC; // any non-valid code point.
static const unsigned kMaxEncodedSize = 4;
static const unsigned kMaxOneByteChar = 0x7f;
static const unsigned kMaxTwoByteChar = 0x7ff;
@@ -156,6 +158,11 @@ class Utf8 {
static const unsigned kMax16BitCodeUnitSize = 3;
static inline uchar ValueOf(const byte* str, size_t length, size_t* cursor);
+ typedef uint32_t Utf8IncrementalBuffer;
+ static uchar ValueOfIncremental(byte next_byte,
+ Utf8IncrementalBuffer* buffer);
+ static uchar ValueOfIncrementalFinish(Utf8IncrementalBuffer* buffer);
+
// Excludes non-characters from the set of valid code points.
static inline bool IsValidCharacter(uchar c);
diff --git a/deps/v8/src/utils.cc b/deps/v8/src/utils.cc
index 16b5b7c61f..ef640c3b0e 100644
--- a/deps/v8/src/utils.cc
+++ b/deps/v8/src/utils.cc
@@ -387,8 +387,8 @@ void MemCopyUint16Uint8Wrapper(uint16_t* dest, const uint8_t* src,
}
}
-
-MemCopyUint8Function memcopy_uint8_function = &MemCopyUint8Wrapper;
+V8_EXPORT_PRIVATE MemCopyUint8Function memcopy_uint8_function =
+ &MemCopyUint8Wrapper;
MemCopyUint16Uint8Function memcopy_uint16_uint8_function =
&MemCopyUint16Uint8Wrapper;
// Defined in codegen-arm.cc.
@@ -398,7 +398,8 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
Isolate* isolate, MemCopyUint16Uint8Function stub);
#elif V8_OS_POSIX && V8_HOST_ARCH_MIPS
-MemCopyUint8Function memcopy_uint8_function = &MemCopyUint8Wrapper;
+V8_EXPORT_PRIVATE MemCopyUint8Function memcopy_uint8_function =
+ &MemCopyUint8Wrapper;
// Defined in codegen-mips.cc.
MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
MemCopyUint8Function stub);
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 8eca39207d..314ea9be9e 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -234,6 +234,10 @@ inline double Floor(double x) {
}
inline double Pow(double x, double y) {
+ if (y == 0.0) return 1.0;
+ if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) {
+ return std::numeric_limits<double>::quiet_NaN();
+ }
#if (defined(__MINGW64_VERSION_MAJOR) && \
(!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1)) || \
defined(V8_OS_AIX)
@@ -433,7 +437,7 @@ void init_memcopy_functions(Isolate* isolate);
const int kMinComplexMemCopy = 64;
// Copy memory area. No restrictions.
-void MemMove(void* dest, const void* src, size_t size);
+V8_EXPORT_PRIVATE void MemMove(void* dest, const void* src, size_t size);
typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size);
// Keep the distinction of "move" vs. "copy" for the benefit of other
@@ -444,7 +448,7 @@ V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
#elif defined(V8_HOST_ARCH_ARM)
typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src,
size_t size);
-extern MemCopyUint8Function memcopy_uint8_function;
+V8_EXPORT_PRIVATE extern MemCopyUint8Function memcopy_uint8_function;
V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src,
size_t chars) {
memcpy(dest, src, chars);
@@ -455,7 +459,8 @@ V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
(*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
reinterpret_cast<const uint8_t*>(src), size);
}
-V8_INLINE void MemMove(void* dest, const void* src, size_t size) {
+V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src,
+ size_t size) {
memmove(dest, src, size);
}
@@ -473,7 +478,7 @@ V8_INLINE void MemCopyUint16Uint8(uint16_t* dest, const uint8_t* src,
#elif defined(V8_HOST_ARCH_MIPS)
typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src,
size_t size);
-extern MemCopyUint8Function memcopy_uint8_function;
+V8_EXPORT_PRIVATE extern MemCopyUint8Function memcopy_uint8_function;
V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src,
size_t chars) {
memcpy(dest, src, chars);
@@ -484,7 +489,8 @@ V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
(*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
reinterpret_cast<const uint8_t*>(src), size);
}
-V8_INLINE void MemMove(void* dest, const void* src, size_t size) {
+V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src,
+ size_t size) {
memmove(dest, src, size);
}
#else
@@ -492,7 +498,8 @@ V8_INLINE void MemMove(void* dest, const void* src, size_t size) {
V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
memcpy(dest, src, size);
}
-V8_INLINE void MemMove(void* dest, const void* src, size_t size) {
+V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src,
+ size_t size) {
memmove(dest, src, size);
}
const int kMinComplexMemCopy = 16 * kPointerSize;
diff --git a/deps/v8/src/v8.gyp b/deps/v8/src/v8.gyp
index 1adb2fe8a1..9a3824742f 100644
--- a/deps/v8/src/v8.gyp
+++ b/deps/v8/src/v8.gyp
@@ -34,10 +34,11 @@
'warmup_script%': "",
'v8_extra_library_files%': [],
'v8_experimental_extra_library_files%': [],
+ 'v8_enable_inspector%': 0,
'mksnapshot_exec': '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
'mkpeephole_exec': '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mkpeephole<(EXECUTABLE_SUFFIX)',
},
- 'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi'],
+ 'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi', 'inspector/inspector.gypi'],
'targets': [
{
'target_name': 'v8',
@@ -60,12 +61,10 @@
'..',
],
'defines': [
- 'V8_SHARED',
'BUILDING_V8_SHARED',
],
'direct_dependent_settings': {
'defines': [
- 'V8_SHARED',
'USING_V8_SHARED',
],
},
@@ -163,12 +162,10 @@
}],
['component=="shared_library"', {
'defines': [
- 'V8_SHARED',
'BUILDING_V8_SHARED',
],
'direct_dependent_settings': {
'defines': [
- 'V8_SHARED',
'USING_V8_SHARED',
],
},
@@ -258,7 +255,6 @@
['component=="shared_library"', {
'defines': [
'BUILDING_V8_SHARED',
- 'V8_SHARED',
],
}],
]
@@ -285,12 +281,10 @@
}],
['component=="shared_library"', {
'defines': [
- 'V8_SHARED',
'BUILDING_V8_SHARED',
],
'direct_dependent_settings': {
'defines': [
- 'V8_SHARED',
'USING_V8_SHARED',
],
},
@@ -451,10 +445,14 @@
'ast/ast-numbering.h',
'ast/ast-traversal-visitor.h',
'ast/ast-type-bounds.h',
+ 'ast/ast-types.cc',
+ 'ast/ast-types.h',
'ast/ast-value-factory.cc',
'ast/ast-value-factory.h',
'ast/ast.cc',
'ast/ast.h',
+ 'ast/compile-time-value.cc',
+ 'ast/compile-time-value.h',
'ast/context-slot-cache.cc',
'ast/context-slot-cache.h',
'ast/modules.cc',
@@ -462,7 +460,6 @@
'ast/prettyprinter.cc',
'ast/prettyprinter.h',
'ast/scopeinfo.cc',
- 'ast/scopeinfo.h',
'ast/scopes.cc',
'ast/scopes.h',
'ast/variables.cc',
@@ -498,12 +495,14 @@
'builtins/builtins-handler.cc',
'builtins/builtins-internal.cc',
'builtins/builtins-interpreter.cc',
+ 'builtins/builtins-iterator.cc',
'builtins/builtins-json.cc',
'builtins/builtins-math.cc',
'builtins/builtins-number.cc',
'builtins/builtins-object.cc',
'builtins/builtins-proxy.cc',
'builtins/builtins-reflect.cc',
+ 'builtins/builtins-regexp.cc',
'builtins/builtins-sharedarraybuffer.cc',
'builtins/builtins-string.cc',
'builtins/builtins-symbol.cc',
@@ -534,6 +533,8 @@
'compilation-cache.h',
'compilation-dependencies.cc',
'compilation-dependencies.h',
+ 'compilation-info.cc',
+ 'compilation-info.h',
'compilation-statistics.cc',
'compilation-statistics.h',
'compiler/access-builder.cc',
@@ -583,14 +584,14 @@
'compiler/effect-control-linearizer.h',
'compiler/escape-analysis.cc',
'compiler/escape-analysis.h',
- "compiler/escape-analysis-reducer.cc",
- "compiler/escape-analysis-reducer.h",
+ 'compiler/escape-analysis-reducer.cc',
+ 'compiler/escape-analysis-reducer.h',
'compiler/frame.cc',
'compiler/frame.h',
'compiler/frame-elider.cc',
'compiler/frame-elider.h',
- "compiler/frame-states.cc",
- "compiler/frame-states.h",
+ 'compiler/frame-states.cc',
+ 'compiler/frame-states.h',
'compiler/gap-resolver.cc',
'compiler/gap-resolver.h',
'compiler/graph-reducer.cc',
@@ -661,6 +662,8 @@
'compiler/machine-operator-reducer.h',
'compiler/machine-operator.cc',
'compiler/machine-operator.h',
+ 'compiler/machine-graph-verifier.cc',
+ 'compiler/machine-graph-verifier.h',
'compiler/memory-optimizer.cc',
'compiler/memory-optimizer.h',
'compiler/move-optimizer.cc',
@@ -720,10 +723,14 @@
'compiler/store-store-elimination.h',
'compiler/tail-call-optimization.cc',
'compiler/tail-call-optimization.h',
+ 'compiler/types.cc',
+ 'compiler/types.h',
+ 'compiler/type-cache.cc',
+ 'compiler/type-cache.h',
'compiler/type-hint-analyzer.cc',
'compiler/type-hint-analyzer.h',
- 'compiler/type-hints.cc',
- 'compiler/type-hints.h',
+ 'compiler/typed-optimization.cc',
+ 'compiler/typed-optimization.h',
'compiler/typer.cc',
'compiler/typer.h',
'compiler/unwinding-info-writer.h',
@@ -949,6 +956,7 @@
'ic/call-optimization.h',
'ic/handler-compiler.cc',
'ic/handler-compiler.h',
+ 'ic/handler-configuration.h',
'ic/ic-inl.h',
'ic/ic-state.cc',
'ic/ic-state.h',
@@ -978,6 +986,8 @@
'interpreter/bytecode-generator.h',
'interpreter/bytecode-label.cc',
'interpreter/bytecode-label.h',
+ 'interpreter/bytecode-operands.cc',
+ 'interpreter/bytecode-operands.h',
'interpreter/bytecode-peephole-optimizer.cc',
'interpreter/bytecode-peephole-optimizer.h',
'interpreter/bytecode-peephole-table.h',
@@ -985,7 +995,6 @@
'interpreter/bytecode-pipeline.h',
'interpreter/bytecode-register.cc',
'interpreter/bytecode-register.h',
- 'interpreter/bytecode-register-allocator.cc',
'interpreter/bytecode-register-allocator.h',
'interpreter/bytecode-register-optimizer.cc',
'interpreter/bytecode-register-optimizer.h',
@@ -1023,6 +1032,9 @@
'log-utils.h',
'log.cc',
'log.h',
+ 'lookup-cache-inl.h',
+ 'lookup-cache.cc',
+ 'lookup-cache.h',
'lookup.cc',
'lookup.h',
'macro-assembler.h',
@@ -1040,6 +1052,8 @@
'objects.h',
'ostreams.cc',
'ostreams.h',
+ 'parsing/duplicate-finder.cc',
+ 'parsing/duplicate-finder.h',
'parsing/expression-classifier.h',
'parsing/func-name-inferrer.cc',
'parsing/func-name-inferrer.h',
@@ -1091,6 +1105,8 @@
'profiler/strings-storage.h',
'profiler/tick-sample.cc',
'profiler/tick-sample.h',
+ 'profiler/tracing-cpu-profiler.cc',
+ 'profiler/tracing-cpu-profiler.h',
'profiler/unbound-queue-inl.h',
'profiler/unbound-queue.h',
'property-descriptor.cc',
@@ -1199,15 +1215,13 @@
'transitions-inl.h',
'transitions.cc',
'transitions.h',
- 'type-cache.cc',
- 'type-cache.h',
'type-feedback-vector-inl.h',
'type-feedback-vector.cc',
'type-feedback-vector.h',
+ 'type-hints.cc',
+ 'type-hints.h',
'type-info.cc',
'type-info.h',
- 'types.cc',
- 'types.h',
'unicode-inl.h',
'unicode.cc',
'unicode.h',
@@ -1235,8 +1249,6 @@
'wasm/ast-decoder.cc',
'wasm/ast-decoder.h',
'wasm/decoder.h',
- 'wasm/encoder.cc',
- 'wasm/encoder.h',
'wasm/leb-helper.h',
'wasm/module-decoder.cc',
'wasm/module-decoder.h',
@@ -1253,16 +1265,22 @@
'wasm/wasm-macro-gen.h',
'wasm/wasm-module.cc',
'wasm/wasm-module.h',
+ 'wasm/wasm-module-builder.cc',
+ 'wasm/wasm-module-builder.h',
'wasm/wasm-interpreter.cc',
'wasm/wasm-interpreter.h',
'wasm/wasm-opcodes.cc',
'wasm/wasm-opcodes.h',
'wasm/wasm-result.cc',
'wasm/wasm-result.h',
- 'zone.cc',
- 'zone.h',
- 'zone-allocator.h',
- 'zone-containers.h',
+ 'zone/accounting-allocator.cc',
+ 'zone/accounting-allocator.h',
+ 'zone/zone-segment.cc',
+ 'zone/zone-segment.h',
+ 'zone/zone.cc',
+ 'zone/zone.h',
+ 'zone/zone-allocator.h',
+ 'zone/zone-containers.h',
],
'conditions': [
['want_separate_host_toolset==1', {
@@ -1399,6 +1417,8 @@
'ia32/interface-descriptors-ia32.cc',
'ia32/macro-assembler-ia32.cc',
'ia32/macro-assembler-ia32.h',
+ 'ia32/simulator-ia32.cc',
+ 'ia32/simulator-ia32.h',
'builtins/ia32/builtins-ia32.cc',
'compiler/ia32/code-generator-ia32.cc',
'compiler/ia32/instruction-codes-ia32.h',
@@ -1438,6 +1458,8 @@
'x87/interface-descriptors-x87.cc',
'x87/macro-assembler-x87.cc',
'x87/macro-assembler-x87.h',
+ 'x87/simulator-x87.cc',
+ 'x87/simulator-x87.h',
'builtins/x87/builtins-x87.cc',
'compiler/x87/code-generator-x87.cc',
'compiler/x87/instruction-codes-x87.h',
@@ -1546,9 +1568,15 @@
'regexp/mips64/regexp-macro-assembler-mips64.h',
],
}],
- ['v8_target_arch=="x64" or v8_target_arch=="x32"', {
+ ['v8_target_arch=="x64"', {
'sources': [ ### gcmole(arch:x64) ###
'builtins/x64/builtins-x64.cc',
+ 'compiler/x64/code-generator-x64.cc',
+ 'compiler/x64/instruction-codes-x64.h',
+ 'compiler/x64/instruction-scheduler-x64.cc',
+ 'compiler/x64/instruction-selector-x64.cc',
+ 'compiler/x64/unwinding-info-writer-x64.h',
+ 'compiler/x64/unwinding-info-writer-x64.cc',
'crankshaft/x64/lithium-codegen-x64.cc',
'crankshaft/x64/lithium-codegen-x64.h',
'crankshaft/x64/lithium-gap-resolver-x64.cc',
@@ -1565,11 +1593,15 @@
'x64/cpu-x64.cc',
'x64/deoptimizer-x64.cc',
'x64/disasm-x64.cc',
+ 'x64/eh-frame-x64.cc',
'x64/frames-x64.cc',
'x64/frames-x64.h',
'x64/interface-descriptors-x64.cc',
'x64/macro-assembler-x64.cc',
'x64/macro-assembler-x64.h',
+ 'x64/simulator-x64.cc',
+ 'x64/simulator-x64.h',
+ 'x64/sse-instr.h',
'debug/x64/debug-x64.cc',
'full-codegen/x64/full-codegen-x64.cc',
'ic/x64/access-compiler-x64.cc',
@@ -1579,17 +1611,7 @@
'ic/x64/stub-cache-x64.cc',
'regexp/x64/regexp-macro-assembler-x64.cc',
'regexp/x64/regexp-macro-assembler-x64.h',
- ],
- }],
- ['v8_target_arch=="x64"', {
- 'sources': [
- 'compiler/x64/code-generator-x64.cc',
- 'compiler/x64/instruction-codes-x64.h',
- 'compiler/x64/instruction-scheduler-x64.cc',
- 'compiler/x64/instruction-selector-x64.cc',
- 'compiler/x64/unwinding-info-writer-x64.h',
- 'compiler/x64/unwinding-info-writer-x64.cc',
- 'x64/eh-frame-x64.cc',
+ 'third_party/valgrind/valgrind.h',
],
}],
['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
@@ -1691,7 +1713,6 @@
['component=="shared_library"', {
'defines': [
'BUILDING_V8_SHARED',
- 'V8_SHARED',
],
}],
['v8_postmortem_support=="true"', {
@@ -1723,6 +1744,16 @@
'i18n.h',
],
}],
+ ['v8_enable_inspector==1', {
+ 'sources': [
+ '<@(inspector_all_sources)'
+ ],
+ 'dependencies': [
+ 'inspector/inspector.gyp:protocol_generated_sources',
+ 'inspector/inspector.gyp:inspector_injected_script',
+ 'inspector/inspector.gyp:inspector_debugger_script',
+ ],
+ }],
['OS=="win" and v8_enable_i18n_support==1', {
'dependencies': [
'<(icu_gyp_path):icudata',
@@ -1740,8 +1771,6 @@
'..',
],
'sources': [
- 'base/accounting-allocator.cc',
- 'base/accounting-allocator.h',
'base/adapters.h',
'base/atomic-utils.h',
'base/atomicops.h',
@@ -1775,6 +1804,7 @@
'base/functional.cc',
'base/functional.h',
'base/hashmap.h',
+ 'base/hashmap-entry.h',
'base/ieee754.cc',
'base/ieee754.h',
'base/iterator.h',
@@ -2171,17 +2201,16 @@
'js/regexp.js',
'js/arraybuffer.js',
'js/typedarray.js',
- 'js/iterator-prototype.js',
'js/collection.js',
'js/weak-collection.js',
'js/collection-iterator.js',
'js/promise.js',
'js/messages.js',
'js/array-iterator.js',
- 'js/string-iterator.js',
'js/templates.js',
'js/spread.js',
'js/proxy.js',
+ 'js/async-await.js',
'debug/mirrors.js',
'debug/debug.js',
'debug/liveedit.js',
@@ -2192,7 +2221,6 @@
'js/harmony-atomics.js',
'js/harmony-simd.js',
'js/harmony-string-padding.js',
- 'js/harmony-async-await.js'
],
'libraries_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
'libraries_experimental_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
@@ -2202,8 +2230,8 @@
['v8_enable_i18n_support==1', {
'library_files': ['js/i18n.js'],
'experimental_library_files': [
+ 'js/datetime-format-to-parts.js',
'js/icu-case-mapping.js',
- 'js/intl-extra.js',
],
}],
],
@@ -2402,7 +2430,10 @@
'..',
],
'sources': [
+ 'interpreter/bytecode-operands.h',
+ 'interpreter/bytecode-operands.cc',
'interpreter/bytecode-peephole-table.h',
+ 'interpreter/bytecode-traits.h',
'interpreter/bytecodes.h',
'interpreter/bytecodes.cc',
'interpreter/mkpeephole.cc'
diff --git a/deps/v8/src/value-serializer.cc b/deps/v8/src/value-serializer.cc
index 0af4838abf..1d2e36dc04 100644
--- a/deps/v8/src/value-serializer.cc
+++ b/deps/v8/src/value-serializer.cc
@@ -7,16 +7,19 @@
#include <type_traits>
#include "src/base/logging.h"
+#include "src/conversions.h"
#include "src/factory.h"
#include "src/handles-inl.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects.h"
+#include "src/transitions.h"
namespace v8 {
namespace internal {
static const uint32_t kLatestVersion = 9;
+static const int kPretenureThreshold = 100 * KB;
template <typename T>
static size_t BytesNeededForVarint(T value) {
@@ -82,12 +85,54 @@ enum class SerializationTag : uint8_t {
// Regular expression, UTF-8 encoding. byteLength:uint32_t, raw data,
// flags:uint32_t.
kRegExp = 'R',
+ // Beginning of a JS map.
+ kBeginJSMap = ';',
+ // End of a JS map. length:uint32_t.
+ kEndJSMap = ':',
+ // Beginning of a JS set.
+ kBeginJSSet = '\'',
+ // End of a JS set. length:uint32_t.
+ kEndJSSet = ',',
+ // Array buffer. byteLength:uint32_t, then raw data.
+ kArrayBuffer = 'B',
+ // Array buffer (transferred). transferID:uint32_t
+ kArrayBufferTransfer = 't',
+ // View into an array buffer.
+ // subtag:ArrayBufferViewTag, byteOffset:uint32_t, byteLength:uint32_t
+ // For typed arrays, byteOffset and byteLength must be divisible by the size
+ // of the element.
+ // Note: kArrayBufferView is special, and should have an ArrayBuffer (or an
+ // ObjectReference to one) serialized just before it. This is a quirk arising
+ // from the previous stack-based implementation.
+ kArrayBufferView = 'V',
+ // Shared array buffer (transferred). transferID:uint32_t
+ kSharedArrayBufferTransfer = 'u',
};
-ValueSerializer::ValueSerializer(Isolate* isolate)
+namespace {
+
+enum class ArrayBufferViewTag : uint8_t {
+ kInt8Array = 'b',
+ kUint8Array = 'B',
+ kUint8ClampedArray = 'C',
+ kInt16Array = 'w',
+ kUint16Array = 'W',
+ kInt32Array = 'd',
+ kUint32Array = 'D',
+ kFloat32Array = 'f',
+ kFloat64Array = 'F',
+ kDataView = '?',
+};
+
+} // namespace
+
+ValueSerializer::ValueSerializer(Isolate* isolate,
+ v8::ValueSerializer::Delegate* delegate)
: isolate_(isolate),
+ delegate_(delegate),
zone_(isolate->allocator()),
- id_map_(isolate->heap(), &zone_) {}
+ id_map_(isolate->heap(), &zone_),
+ array_buffer_transfer_map_(isolate->heap(), &zone_) {}
ValueSerializer::~ValueSerializer() {}
@@ -150,6 +195,11 @@ void ValueSerializer::WriteTwoByteString(Vector<const uc16> chars) {
reinterpret_cast<const uint8_t*>(chars.end()));
}
+void ValueSerializer::WriteRawBytes(const void* source, size_t length) {
+ const uint8_t* begin = reinterpret_cast<const uint8_t*>(source);
+ buffer_.insert(buffer_.end(), begin, begin + length);
+}
+
uint8_t* ValueSerializer::ReserveRawBytes(size_t bytes) {
if (!bytes) return nullptr;
auto old_size = buffer_.size();
@@ -157,6 +207,20 @@ uint8_t* ValueSerializer::ReserveRawBytes(size_t bytes) {
return &buffer_[old_size];
}
+void ValueSerializer::WriteUint32(uint32_t value) {
+ WriteVarint<uint32_t>(value);
+}
+
+void ValueSerializer::WriteUint64(uint64_t value) {
+ WriteVarint<uint64_t>(value);
+}
+
+void ValueSerializer::TransferArrayBuffer(uint32_t transfer_id,
+ Handle<JSArrayBuffer> array_buffer) {
+ DCHECK(!array_buffer_transfer_map_.Find(array_buffer));
+ array_buffer_transfer_map_.Set(array_buffer, transfer_id);
+}
+
Maybe<bool> ValueSerializer::WriteObject(Handle<Object> object) {
if (object->IsSmi()) {
WriteSmi(Smi::cast(*object));
@@ -172,15 +236,33 @@ Maybe<bool> ValueSerializer::WriteObject(Handle<Object> object) {
case MUTABLE_HEAP_NUMBER_TYPE:
WriteHeapNumber(HeapNumber::cast(*object));
return Just(true);
+ case JS_TYPED_ARRAY_TYPE:
+ case JS_DATA_VIEW_TYPE: {
+ // Despite being JSReceivers, these have their wrapped buffer serialized
+ // first. That makes this logic a little quirky, because it needs to
+ // happen before we assign object IDs.
+ // TODO(jbroman): It may be possible to avoid materializing a typed
+ // array's buffer here.
+ Handle<JSArrayBufferView> view = Handle<JSArrayBufferView>::cast(object);
+ if (!id_map_.Find(view)) {
+ Handle<JSArrayBuffer> buffer(
+ view->IsJSTypedArray()
+ ? Handle<JSTypedArray>::cast(view)->GetBuffer()
+ : handle(JSArrayBuffer::cast(view->buffer()), isolate_));
+ if (!WriteJSReceiver(buffer).FromMaybe(false)) return Nothing<bool>();
+ }
+ return WriteJSReceiver(view);
+ }
default:
if (object->IsString()) {
WriteString(Handle<String>::cast(object));
return Just(true);
} else if (object->IsJSReceiver()) {
return WriteJSReceiver(Handle<JSReceiver>::cast(object));
+ } else {
+ ThrowDataCloneError(MessageTemplate::kDataCloneError, object);
+ return Nothing<bool>();
}
- UNIMPLEMENTED();
- return Nothing<bool>();
}
}
@@ -267,20 +349,27 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
// Eliminate callable and exotic objects, which should not be serialized.
InstanceType instance_type = receiver->map()->instance_type();
- if (receiver->IsCallable() || instance_type <= LAST_SPECIAL_RECEIVER_TYPE) {
+ if (receiver->IsCallable() || (instance_type <= LAST_SPECIAL_RECEIVER_TYPE &&
+ instance_type != JS_SPECIAL_API_OBJECT_TYPE)) {
+ ThrowDataCloneError(MessageTemplate::kDataCloneError, receiver);
return Nothing<bool>();
}
// If we are at the end of the stack, abort. This function may recurse.
- if (StackLimitCheck(isolate_).HasOverflowed()) return Nothing<bool>();
+ STACK_CHECK(isolate_, Nothing<bool>());
HandleScope scope(isolate_);
switch (instance_type) {
case JS_ARRAY_TYPE:
return WriteJSArray(Handle<JSArray>::cast(receiver));
case JS_OBJECT_TYPE:
- case JS_API_OBJECT_TYPE:
- return WriteJSObject(Handle<JSObject>::cast(receiver));
+ case JS_API_OBJECT_TYPE: {
+ Handle<JSObject> js_object = Handle<JSObject>::cast(receiver);
+ return js_object->GetInternalFieldCount() ? WriteHostObject(js_object)
+ : WriteJSObject(js_object);
+ }
+ case JS_SPECIAL_API_OBJECT_TYPE:
+ return WriteHostObject(Handle<JSObject>::cast(receiver));
case JS_DATE_TYPE:
WriteJSDate(JSDate::cast(*receiver));
return Just(true);
@@ -289,21 +378,76 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
case JS_REGEXP_TYPE:
WriteJSRegExp(JSRegExp::cast(*receiver));
return Just(true);
+ case JS_MAP_TYPE:
+ return WriteJSMap(Handle<JSMap>::cast(receiver));
+ case JS_SET_TYPE:
+ return WriteJSSet(Handle<JSSet>::cast(receiver));
+ case JS_ARRAY_BUFFER_TYPE:
+ return WriteJSArrayBuffer(JSArrayBuffer::cast(*receiver));
+ case JS_TYPED_ARRAY_TYPE:
+ case JS_DATA_VIEW_TYPE:
+ return WriteJSArrayBufferView(JSArrayBufferView::cast(*receiver));
default:
- UNIMPLEMENTED();
- break;
+ ThrowDataCloneError(MessageTemplate::kDataCloneError, receiver);
+ return Nothing<bool>();
}
return Nothing<bool>();
}
Maybe<bool> ValueSerializer::WriteJSObject(Handle<JSObject> object) {
+ DCHECK_GT(object->map()->instance_type(), LAST_CUSTOM_ELEMENTS_RECEIVER);
+ const bool can_serialize_fast =
+ object->HasFastProperties() && object->elements()->length() == 0;
+ if (!can_serialize_fast) return WriteJSObjectSlow(object);
+
+ Handle<Map> map(object->map(), isolate_);
+ WriteTag(SerializationTag::kBeginJSObject);
+
+ // Write out fast properties as long as they are only data properties and the
+ // map doesn't change.
+ uint32_t properties_written = 0;
+ bool map_changed = false;
+ for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+ Handle<Name> key(map->instance_descriptors()->GetKey(i), isolate_);
+ if (!key->IsString()) continue;
+ PropertyDetails details = map->instance_descriptors()->GetDetails(i);
+ if (details.IsDontEnum()) continue;
+
+ Handle<Object> value;
+ if (V8_LIKELY(!map_changed)) map_changed = *map == object->map();
+ if (V8_LIKELY(!map_changed && details.type() == DATA)) {
+ FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
+ value = JSObject::FastPropertyAt(object, details.representation(),
+ field_index);
+ } else {
+ // This logic should essentially match WriteJSObjectPropertiesSlow.
+ // If the property is no longer found, do not serialize it.
+ // This could happen if a getter deleted the property.
+ LookupIterator it(isolate_, object, key, LookupIterator::OWN);
+ if (!it.IsFound()) continue;
+ if (!Object::GetProperty(&it).ToHandle(&value)) return Nothing<bool>();
+ }
+
+ if (!WriteObject(key).FromMaybe(false) ||
+ !WriteObject(value).FromMaybe(false)) {
+ return Nothing<bool>();
+ }
+ properties_written++;
+ }
+
+ WriteTag(SerializationTag::kEndJSObject);
+ WriteVarint<uint32_t>(properties_written);
+ return Just(true);
+}
+
+Maybe<bool> ValueSerializer::WriteJSObjectSlow(Handle<JSObject> object) {
WriteTag(SerializationTag::kBeginJSObject);
Handle<FixedArray> keys;
uint32_t properties_written;
if (!KeyAccumulator::GetKeys(object, KeyCollectionMode::kOwnOnly,
ENUMERABLE_STRINGS)
.ToHandle(&keys) ||
- !WriteJSObjectProperties(object, keys).To(&properties_written)) {
+ !WriteJSObjectPropertiesSlow(object, keys).To(&properties_written)) {
return Nothing<bool>();
}
WriteTag(SerializationTag::kEndJSObject);
@@ -331,7 +475,46 @@ Maybe<bool> ValueSerializer::WriteJSArray(Handle<JSArray> array) {
// format changes.
WriteTag(SerializationTag::kBeginDenseJSArray);
WriteVarint<uint32_t>(length);
- for (uint32_t i = 0; i < length; i++) {
+ uint32_t i = 0;
+
+ // Fast paths. Note that FAST_ELEMENTS in particular can bail due to the
+ // structure of the elements changing.
+ switch (array->GetElementsKind()) {
+ case FAST_SMI_ELEMENTS: {
+ Handle<FixedArray> elements(FixedArray::cast(array->elements()),
+ isolate_);
+ for (; i < length; i++) WriteSmi(Smi::cast(elements->get(i)));
+ break;
+ }
+ case FAST_DOUBLE_ELEMENTS: {
+ Handle<FixedDoubleArray> elements(
+ FixedDoubleArray::cast(array->elements()), isolate_);
+ for (; i < length; i++) {
+ WriteTag(SerializationTag::kDouble);
+ WriteDouble(elements->get_scalar(i));
+ }
+ break;
+ }
+ case FAST_ELEMENTS: {
+ Handle<Object> old_length(array->length(), isolate_);
+ for (; i < length; i++) {
+ if (array->length() != *old_length ||
+ array->GetElementsKind() != FAST_ELEMENTS) {
+ // Fall back to slow path.
+ break;
+ }
+ Handle<Object> element(FixedArray::cast(array->elements())->get(i),
+ isolate_);
+ if (!WriteObject(element).FromMaybe(false)) return Nothing<bool>();
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ // If there are elements remaining, serialize them slowly.
+ for (; i < length; i++) {
// Serializing the array's elements can have arbitrary side effects, so we
// cannot rely on still having fast elements, even if it did to begin
// with.
@@ -342,6 +525,7 @@ Maybe<bool> ValueSerializer::WriteJSArray(Handle<JSArray> array) {
return Nothing<bool>();
}
}
+
KeyAccumulator accumulator(isolate_, KeyCollectionMode::kOwnOnly,
ENUMERABLE_STRINGS);
if (!accumulator.CollectOwnPropertyNames(array, array).FromMaybe(false)) {
@@ -350,7 +534,7 @@ Maybe<bool> ValueSerializer::WriteJSArray(Handle<JSArray> array) {
Handle<FixedArray> keys =
accumulator.GetKeys(GetKeysConversion::kConvertToString);
uint32_t properties_written;
- if (!WriteJSObjectProperties(array, keys).To(&properties_written)) {
+ if (!WriteJSObjectPropertiesSlow(array, keys).To(&properties_written)) {
return Nothing<bool>();
}
WriteTag(SerializationTag::kEndDenseJSArray);
@@ -364,7 +548,7 @@ Maybe<bool> ValueSerializer::WriteJSArray(Handle<JSArray> array) {
if (!KeyAccumulator::GetKeys(array, KeyCollectionMode::kOwnOnly,
ENUMERABLE_STRINGS)
.ToHandle(&keys) ||
- !WriteJSObjectProperties(array, keys).To(&properties_written)) {
+ !WriteJSObjectPropertiesSlow(array, keys).To(&properties_written)) {
return Nothing<bool>();
}
WriteTag(SerializationTag::kEndSparseJSArray);
@@ -401,6 +585,7 @@ Maybe<bool> ValueSerializer::WriteJSValue(Handle<JSValue> value) {
v8::String::NO_NULL_TERMINATION);
} else {
DCHECK(inner_value->IsSymbol());
+ ThrowDataCloneError(MessageTemplate::kDataCloneError, value);
return Nothing<bool>();
}
return Just(true);
@@ -417,7 +602,135 @@ void ValueSerializer::WriteJSRegExp(JSRegExp* regexp) {
WriteVarint(static_cast<uint32_t>(regexp->GetFlags()));
}
-Maybe<uint32_t> ValueSerializer::WriteJSObjectProperties(
+Maybe<bool> ValueSerializer::WriteJSMap(Handle<JSMap> map) {
+ // First copy the key-value pairs, since getters could mutate them.
+ Handle<OrderedHashMap> table(OrderedHashMap::cast(map->table()));
+ int length = table->NumberOfElements() * 2;
+ Handle<FixedArray> entries = isolate_->factory()->NewFixedArray(length);
+ {
+ DisallowHeapAllocation no_gc;
+ Oddball* the_hole = isolate_->heap()->the_hole_value();
+ int capacity = table->UsedCapacity();
+ int result_index = 0;
+ for (int i = 0; i < capacity; i++) {
+ Object* key = table->KeyAt(i);
+ if (key == the_hole) continue;
+ entries->set(result_index++, key);
+ entries->set(result_index++, table->ValueAt(i));
+ }
+ DCHECK_EQ(result_index, length);
+ }
+
+ // Then write it out.
+ WriteTag(SerializationTag::kBeginJSMap);
+ for (int i = 0; i < length; i++) {
+ if (!WriteObject(handle(entries->get(i), isolate_)).FromMaybe(false)) {
+ return Nothing<bool>();
+ }
+ }
+ WriteTag(SerializationTag::kEndJSMap);
+ WriteVarint<uint32_t>(length);
+ return Just(true);
+}
+
+Maybe<bool> ValueSerializer::WriteJSSet(Handle<JSSet> set) {
+ // First copy the element pointers, since getters could mutate them.
+ Handle<OrderedHashSet> table(OrderedHashSet::cast(set->table()));
+ int length = table->NumberOfElements();
+ Handle<FixedArray> entries = isolate_->factory()->NewFixedArray(length);
+ {
+ DisallowHeapAllocation no_gc;
+ Oddball* the_hole = isolate_->heap()->the_hole_value();
+ int capacity = table->UsedCapacity();
+ int result_index = 0;
+ for (int i = 0; i < capacity; i++) {
+ Object* key = table->KeyAt(i);
+ if (key == the_hole) continue;
+ entries->set(result_index++, key);
+ }
+ DCHECK_EQ(result_index, length);
+ }
+
+ // Then write it out.
+ WriteTag(SerializationTag::kBeginJSSet);
+ for (int i = 0; i < length; i++) {
+ if (!WriteObject(handle(entries->get(i), isolate_)).FromMaybe(false)) {
+ return Nothing<bool>();
+ }
+ }
+ WriteTag(SerializationTag::kEndJSSet);
+ WriteVarint<uint32_t>(length);
+ return Just(true);
+}
+
+Maybe<bool> ValueSerializer::WriteJSArrayBuffer(JSArrayBuffer* array_buffer) {
+ uint32_t* transfer_entry = array_buffer_transfer_map_.Find(array_buffer);
+ if (transfer_entry) {
+ DCHECK(array_buffer->was_neutered() || array_buffer->is_shared());
+ WriteTag(array_buffer->is_shared()
+ ? SerializationTag::kSharedArrayBufferTransfer
+ : SerializationTag::kArrayBufferTransfer);
+ WriteVarint(*transfer_entry);
+ return Just(true);
+ }
+
+ if (array_buffer->is_shared()) {
+ ThrowDataCloneError(
+ MessageTemplate::kDataCloneErrorSharedArrayBufferNotTransferred);
+ return Nothing<bool>();
+ }
+ if (array_buffer->was_neutered()) {
+ ThrowDataCloneError(MessageTemplate::kDataCloneErrorNeuteredArrayBuffer);
+ return Nothing<bool>();
+ }
+ double byte_length = array_buffer->byte_length()->Number();
+ if (byte_length > std::numeric_limits<uint32_t>::max()) {
+ ThrowDataCloneError(MessageTemplate::kDataCloneError, handle(array_buffer));
+ return Nothing<bool>();
+ }
+ WriteTag(SerializationTag::kArrayBuffer);
+ WriteVarint<uint32_t>(byte_length);
+ WriteRawBytes(array_buffer->backing_store(), byte_length);
+ return Just(true);
+}
+
+Maybe<bool> ValueSerializer::WriteJSArrayBufferView(JSArrayBufferView* view) {
+ WriteTag(SerializationTag::kArrayBufferView);
+ ArrayBufferViewTag tag = ArrayBufferViewTag::kInt8Array;
+ if (view->IsJSTypedArray()) {
+ switch (JSTypedArray::cast(view)->type()) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ tag = ArrayBufferViewTag::k##Type##Array; \
+ break;
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ }
+ } else {
+ DCHECK(view->IsJSDataView());
+ tag = ArrayBufferViewTag::kDataView;
+ }
+ WriteVarint(static_cast<uint8_t>(tag));
+ WriteVarint(NumberToUint32(view->byte_offset()));
+ WriteVarint(NumberToUint32(view->byte_length()));
+ return Just(true);
+}
+
+Maybe<bool> ValueSerializer::WriteHostObject(Handle<JSObject> object) {
+ if (!delegate_) {
+ isolate_->Throw(*isolate_->factory()->NewError(
+ isolate_->error_function(), MessageTemplate::kDataCloneError, object));
+ return Nothing<bool>();
+ }
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+ Maybe<bool> result =
+ delegate_->WriteHostObject(v8_isolate, Utils::ToLocal(object));
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate_, Nothing<bool>());
+ DCHECK(!result.IsNothing());
+ return result;
+}
+
+Maybe<uint32_t> ValueSerializer::WriteJSObjectPropertiesSlow(
Handle<JSObject> object, Handle<FixedArray> keys) {
uint32_t properties_written = 0;
int length = keys->length();
@@ -445,25 +758,56 @@ Maybe<uint32_t> ValueSerializer::WriteJSObjectProperties(
return Just(properties_written);
}
+void ValueSerializer::ThrowDataCloneError(
+ MessageTemplate::Template template_index) {
+ return ThrowDataCloneError(template_index,
+ isolate_->factory()->empty_string());
+}
+
+void ValueSerializer::ThrowDataCloneError(
+ MessageTemplate::Template template_index, Handle<Object> arg0) {
+ Handle<String> message =
+ MessageTemplate::FormatMessage(isolate_, template_index, arg0);
+ if (delegate_) {
+ delegate_->ThrowDataCloneError(Utils::ToLocal(message));
+ } else {
+ isolate_->Throw(
+ *isolate_->factory()->NewError(isolate_->error_function(), message));
+ }
+ if (isolate_->has_scheduled_exception()) {
+ isolate_->PromoteScheduledException();
+ }
+}
+
ValueDeserializer::ValueDeserializer(Isolate* isolate,
- Vector<const uint8_t> data)
+ Vector<const uint8_t> data,
+ v8::ValueDeserializer::Delegate* delegate)
: isolate_(isolate),
+ delegate_(delegate),
position_(data.start()),
end_(data.start() + data.length()),
- id_map_(Handle<SeededNumberDictionary>::cast(
- isolate->global_handles()->Create(
- *SeededNumberDictionary::New(isolate, 0)))) {}
+ pretenure_(data.length() > kPretenureThreshold ? TENURED : NOT_TENURED),
+ id_map_(Handle<FixedArray>::cast(isolate->global_handles()->Create(
+ isolate_->heap()->empty_fixed_array()))) {}
ValueDeserializer::~ValueDeserializer() {
GlobalHandles::Destroy(Handle<Object>::cast(id_map_).location());
+
+ Handle<Object> transfer_map_handle;
+ if (array_buffer_transfer_map_.ToHandle(&transfer_map_handle)) {
+ GlobalHandles::Destroy(transfer_map_handle.location());
+ }
}
Maybe<bool> ValueDeserializer::ReadHeader() {
if (position_ < end_ &&
*position_ == static_cast<uint8_t>(SerializationTag::kVersion)) {
ReadTag().ToChecked();
- if (!ReadVarint<uint32_t>().To(&version_)) return Nothing<bool>();
- if (version_ > kLatestVersion) return Nothing<bool>();
+ if (!ReadVarint<uint32_t>().To(&version_) || version_ > kLatestVersion) {
+ isolate_->Throw(*isolate_->factory()->NewError(
+ MessageTemplate::kDataCloneDeserializationVersionError));
+ return Nothing<bool>();
+ }
}
return Just(true);
}
@@ -511,7 +855,7 @@ Maybe<T> ValueDeserializer::ReadVarint() {
if (position_ >= end_) return Nothing<T>();
uint8_t byte = *position_;
if (V8_LIKELY(shift < sizeof(T) * 8)) {
- value |= (byte & 0x7f) << shift;
+ value |= static_cast<T>(byte & 0x7f) << shift;
shift += 7;
}
has_another_byte = byte & 0x80;
@@ -551,7 +895,67 @@ Maybe<Vector<const uint8_t>> ValueDeserializer::ReadRawBytes(int size) {
return Just(Vector<const uint8_t>(start, size));
}
+bool ValueDeserializer::ReadUint32(uint32_t* value) {
+ return ReadVarint<uint32_t>().To(value);
+}
+
+bool ValueDeserializer::ReadUint64(uint64_t* value) {
+ return ReadVarint<uint64_t>().To(value);
+}
+
+bool ValueDeserializer::ReadDouble(double* value) {
+ return ReadDouble().To(value);
+}
+
+bool ValueDeserializer::ReadRawBytes(size_t length, const void** data) {
+ if (length > static_cast<size_t>(end_ - position_)) return false;
+ *data = position_;
+ position_ += length;
+ return true;
+}
+
+void ValueDeserializer::TransferArrayBuffer(
+ uint32_t transfer_id, Handle<JSArrayBuffer> array_buffer) {
+ if (array_buffer_transfer_map_.is_null()) {
+ array_buffer_transfer_map_ =
+ Handle<SeededNumberDictionary>::cast(isolate_->global_handles()->Create(
+ *SeededNumberDictionary::New(isolate_, 0)));
+ }
+ Handle<SeededNumberDictionary> dictionary =
+ array_buffer_transfer_map_.ToHandleChecked();
+ const bool used_as_prototype = false;
+ Handle<SeededNumberDictionary> new_dictionary =
+ SeededNumberDictionary::AtNumberPut(dictionary, transfer_id, array_buffer,
+ used_as_prototype);
+ if (!new_dictionary.is_identical_to(dictionary)) {
+ GlobalHandles::Destroy(Handle<Object>::cast(dictionary).location());
+ array_buffer_transfer_map_ = Handle<SeededNumberDictionary>::cast(
+ isolate_->global_handles()->Create(*new_dictionary));
+ }
+}
+
MaybeHandle<Object> ValueDeserializer::ReadObject() {
+ MaybeHandle<Object> result = ReadObjectInternal();
+
+ // ArrayBufferView is special in that it consumes the value before it, even
+ // after format version 0.
+ Handle<Object> object;
+ SerializationTag tag;
+ if (result.ToHandle(&object) && V8_UNLIKELY(object->IsJSArrayBuffer()) &&
+ PeekTag().To(&tag) && tag == SerializationTag::kArrayBufferView) {
+ ConsumeTag(SerializationTag::kArrayBufferView);
+ result = ReadJSArrayBufferView(Handle<JSArrayBuffer>::cast(object));
+ }
+
+ if (result.is_null() && !isolate_->has_pending_exception()) {
+ isolate_->Throw(*isolate_->factory()->NewError(
+ MessageTemplate::kDataCloneDeserializationError));
+ }
+
+ return result;
+}
+
+MaybeHandle<Object> ValueDeserializer::ReadObjectInternal() {
SerializationTag tag;
if (!ReadTag().To(&tag)) return MaybeHandle<Object>();
switch (tag) {
@@ -570,17 +974,19 @@ MaybeHandle<Object> ValueDeserializer::ReadObject() {
case SerializationTag::kInt32: {
Maybe<int32_t> number = ReadZigZag<int32_t>();
if (number.IsNothing()) return MaybeHandle<Object>();
- return isolate_->factory()->NewNumberFromInt(number.FromJust());
+ return isolate_->factory()->NewNumberFromInt(number.FromJust(),
+ pretenure_);
}
case SerializationTag::kUint32: {
Maybe<uint32_t> number = ReadVarint<uint32_t>();
if (number.IsNothing()) return MaybeHandle<Object>();
- return isolate_->factory()->NewNumberFromUint(number.FromJust());
+ return isolate_->factory()->NewNumberFromUint(number.FromJust(),
+ pretenure_);
}
case SerializationTag::kDouble: {
Maybe<double> number = ReadDouble();
if (number.IsNothing()) return MaybeHandle<Object>();
- return isolate_->factory()->NewNumber(number.FromJust());
+ return isolate_->factory()->NewNumber(number.FromJust(), pretenure_);
}
case SerializationTag::kUtf8String:
return ReadUtf8String();
@@ -606,8 +1012,25 @@ MaybeHandle<Object> ValueDeserializer::ReadObject() {
return ReadJSValue(tag);
case SerializationTag::kRegExp:
return ReadJSRegExp();
+ case SerializationTag::kBeginJSMap:
+ return ReadJSMap();
+ case SerializationTag::kBeginJSSet:
+ return ReadJSSet();
+ case SerializationTag::kArrayBuffer:
+ return ReadJSArrayBuffer();
+ case SerializationTag::kArrayBufferTransfer: {
+ const bool is_shared = false;
+ return ReadTransferredJSArrayBuffer(is_shared);
+ }
+ case SerializationTag::kSharedArrayBufferTransfer: {
+ const bool is_shared = true;
+ return ReadTransferredJSArrayBuffer(is_shared);
+ }
default:
- return MaybeHandle<Object>();
+ // TODO(jbroman): Introduce an explicit tag for host objects to avoid
+ // having to treat every unknown tag as a potential host object.
+ position_--;
+ return ReadHostObject();
}
}
@@ -620,7 +1043,7 @@ MaybeHandle<String> ValueDeserializer::ReadUtf8String() {
!ReadRawBytes(utf8_length).To(&utf8_bytes))
return MaybeHandle<String>();
return isolate_->factory()->NewStringFromUtf8(
- Vector<const char>::cast(utf8_bytes));
+ Vector<const char>::cast(utf8_bytes), pretenure_);
}
MaybeHandle<String> ValueDeserializer::ReadTwoByteString() {
@@ -636,7 +1059,7 @@ MaybeHandle<String> ValueDeserializer::ReadTwoByteString() {
// string on the heap (regardless of alignment).
Handle<SeqTwoByteString> string;
if (!isolate_->factory()
- ->NewRawTwoByteString(byte_length / sizeof(uc16))
+ ->NewRawTwoByteString(byte_length / sizeof(uc16), pretenure_)
.ToHandle(&string))
return MaybeHandle<String>();
@@ -646,19 +1069,59 @@ MaybeHandle<String> ValueDeserializer::ReadTwoByteString() {
return string;
}
+bool ValueDeserializer::ReadExpectedString(Handle<String> expected) {
+ // In the case of failure, the position in the stream is reset.
+ const uint8_t* original_position = position_;
+
+ SerializationTag tag;
+ uint32_t byte_length;
+ Vector<const uint8_t> bytes;
+ if (!ReadTag().To(&tag) || !ReadVarint<uint32_t>().To(&byte_length) ||
+ byte_length >
+ static_cast<uint32_t>(std::numeric_limits<int32_t>::max()) ||
+ !ReadRawBytes(byte_length).To(&bytes)) {
+ position_ = original_position;
+ return false;
+ }
+
+ expected = String::Flatten(expected);
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat = expected->GetFlatContent();
+
+ // If the bytes are verbatim what is in the flattened string, then the string
+ // is successfully consumed.
+ if (tag == SerializationTag::kUtf8String && flat.IsOneByte()) {
+ Vector<const uint8_t> chars = flat.ToOneByteVector();
+ if (byte_length == chars.length() &&
+ String::IsAscii(chars.begin(), chars.length()) &&
+ memcmp(bytes.begin(), chars.begin(), byte_length) == 0) {
+ return true;
+ }
+ } else if (tag == SerializationTag::kTwoByteString && flat.IsTwoByte()) {
+ Vector<const uc16> chars = flat.ToUC16Vector();
+ if (byte_length == static_cast<unsigned>(chars.length()) * sizeof(uc16) &&
+ memcmp(bytes.begin(), chars.begin(), byte_length) == 0) {
+ return true;
+ }
+ }
+
+ position_ = original_position;
+ return false;
+}
+
MaybeHandle<JSObject> ValueDeserializer::ReadJSObject() {
// If we are at the end of the stack, abort. This function may recurse.
- if (StackLimitCheck(isolate_).HasOverflowed()) return MaybeHandle<JSObject>();
+ STACK_CHECK(isolate_, MaybeHandle<JSObject>());
uint32_t id = next_id_++;
HandleScope scope(isolate_);
Handle<JSObject> object =
- isolate_->factory()->NewJSObject(isolate_->object_function());
+ isolate_->factory()->NewJSObject(isolate_->object_function(), pretenure_);
AddObjectWithID(id, object);
uint32_t num_properties;
uint32_t expected_num_properties;
- if (!ReadJSObjectProperties(object, SerializationTag::kEndJSObject)
+ if (!ReadJSObjectProperties(object, SerializationTag::kEndJSObject, true)
.To(&num_properties) ||
!ReadVarint<uint32_t>().To(&expected_num_properties) ||
num_properties != expected_num_properties) {
@@ -671,21 +1134,22 @@ MaybeHandle<JSObject> ValueDeserializer::ReadJSObject() {
MaybeHandle<JSArray> ValueDeserializer::ReadSparseJSArray() {
// If we are at the end of the stack, abort. This function may recurse.
- if (StackLimitCheck(isolate_).HasOverflowed()) return MaybeHandle<JSArray>();
+ STACK_CHECK(isolate_, MaybeHandle<JSArray>());
uint32_t length;
if (!ReadVarint<uint32_t>().To(&length)) return MaybeHandle<JSArray>();
uint32_t id = next_id_++;
HandleScope scope(isolate_);
- Handle<JSArray> array = isolate_->factory()->NewJSArray(0);
+ Handle<JSArray> array = isolate_->factory()->NewJSArray(
+ 0, TERMINAL_FAST_ELEMENTS_KIND, pretenure_);
JSArray::SetLength(array, length);
AddObjectWithID(id, array);
uint32_t num_properties;
uint32_t expected_num_properties;
uint32_t expected_length;
- if (!ReadJSObjectProperties(array, SerializationTag::kEndSparseJSArray)
+ if (!ReadJSObjectProperties(array, SerializationTag::kEndSparseJSArray, false)
.To(&num_properties) ||
!ReadVarint<uint32_t>().To(&expected_num_properties) ||
!ReadVarint<uint32_t>().To(&expected_length) ||
@@ -699,7 +1163,7 @@ MaybeHandle<JSArray> ValueDeserializer::ReadSparseJSArray() {
MaybeHandle<JSArray> ValueDeserializer::ReadDenseJSArray() {
// If we are at the end of the stack, abort. This function may recurse.
- if (StackLimitCheck(isolate_).HasOverflowed()) return MaybeHandle<JSArray>();
+ STACK_CHECK(isolate_, MaybeHandle<JSArray>());
uint32_t length;
if (!ReadVarint<uint32_t>().To(&length)) return MaybeHandle<JSArray>();
@@ -707,7 +1171,8 @@ MaybeHandle<JSArray> ValueDeserializer::ReadDenseJSArray() {
uint32_t id = next_id_++;
HandleScope scope(isolate_);
Handle<JSArray> array = isolate_->factory()->NewJSArray(
- FAST_HOLEY_ELEMENTS, length, length, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+ FAST_HOLEY_ELEMENTS, length, length, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE,
+ pretenure_);
AddObjectWithID(id, array);
Handle<FixedArray> elements(FixedArray::cast(array->elements()), isolate_);
@@ -722,7 +1187,7 @@ MaybeHandle<JSArray> ValueDeserializer::ReadDenseJSArray() {
uint32_t num_properties;
uint32_t expected_num_properties;
uint32_t expected_length;
- if (!ReadJSObjectProperties(array, SerializationTag::kEndDenseJSArray)
+ if (!ReadJSObjectProperties(array, SerializationTag::kEndDenseJSArray, false)
.To(&num_properties) ||
!ReadVarint<uint32_t>().To(&expected_num_properties) ||
!ReadVarint<uint32_t>().To(&expected_length) ||
@@ -752,29 +1217,30 @@ MaybeHandle<JSValue> ValueDeserializer::ReadJSValue(SerializationTag tag) {
Handle<JSValue> value;
switch (tag) {
case SerializationTag::kTrueObject:
- value = Handle<JSValue>::cast(
- isolate_->factory()->NewJSObject(isolate_->boolean_function()));
+ value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
+ isolate_->boolean_function(), pretenure_));
value->set_value(isolate_->heap()->true_value());
break;
case SerializationTag::kFalseObject:
- value = Handle<JSValue>::cast(
- isolate_->factory()->NewJSObject(isolate_->boolean_function()));
+ value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
+ isolate_->boolean_function(), pretenure_));
value->set_value(isolate_->heap()->false_value());
break;
case SerializationTag::kNumberObject: {
double number;
if (!ReadDouble().To(&number)) return MaybeHandle<JSValue>();
- value = Handle<JSValue>::cast(
- isolate_->factory()->NewJSObject(isolate_->number_function()));
- Handle<Object> number_object = isolate_->factory()->NewNumber(number);
+ value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
+ isolate_->number_function(), pretenure_));
+ Handle<Object> number_object =
+ isolate_->factory()->NewNumber(number, pretenure_);
value->set_value(*number_object);
break;
}
case SerializationTag::kStringObject: {
Handle<String> string;
if (!ReadUtf8String().ToHandle(&string)) return MaybeHandle<JSValue>();
- value = Handle<JSValue>::cast(
- isolate_->factory()->NewJSObject(isolate_->string_function()));
+ value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
+ isolate_->string_function(), pretenure_));
value->set_value(*string);
break;
}
@@ -801,9 +1267,296 @@ MaybeHandle<JSRegExp> ValueDeserializer::ReadJSRegExp() {
return regexp;
}
+MaybeHandle<JSMap> ValueDeserializer::ReadJSMap() {
+ // If we are at the end of the stack, abort. This function may recurse.
+ STACK_CHECK(isolate_, MaybeHandle<JSMap>());
+
+ HandleScope scope(isolate_);
+ uint32_t id = next_id_++;
+ Handle<JSMap> map = isolate_->factory()->NewJSMap();
+ AddObjectWithID(id, map);
+
+ Handle<JSFunction> map_set = isolate_->map_set();
+ uint32_t length = 0;
+ while (true) {
+ SerializationTag tag;
+ if (!PeekTag().To(&tag)) return MaybeHandle<JSMap>();
+ if (tag == SerializationTag::kEndJSMap) {
+ ConsumeTag(SerializationTag::kEndJSMap);
+ break;
+ }
+
+ Handle<Object> argv[2];
+ if (!ReadObject().ToHandle(&argv[0]) || !ReadObject().ToHandle(&argv[1]) ||
+ Execution::Call(isolate_, map_set, map, arraysize(argv), argv)
+ .is_null()) {
+ return MaybeHandle<JSMap>();
+ }
+ length += 2;
+ }
+
+ uint32_t expected_length;
+ if (!ReadVarint<uint32_t>().To(&expected_length) ||
+ length != expected_length) {
+ return MaybeHandle<JSMap>();
+ }
+ DCHECK(HasObjectWithID(id));
+ return scope.CloseAndEscape(map);
+}
+
+MaybeHandle<JSSet> ValueDeserializer::ReadJSSet() {
+ // If we are at the end of the stack, abort. This function may recurse.
+ STACK_CHECK(isolate_, MaybeHandle<JSSet>());
+
+ HandleScope scope(isolate_);
+ uint32_t id = next_id_++;
+ Handle<JSSet> set = isolate_->factory()->NewJSSet();
+ AddObjectWithID(id, set);
+ Handle<JSFunction> set_add = isolate_->set_add();
+ uint32_t length = 0;
+ while (true) {
+ SerializationTag tag;
+ if (!PeekTag().To(&tag)) return MaybeHandle<JSSet>();
+ if (tag == SerializationTag::kEndJSSet) {
+ ConsumeTag(SerializationTag::kEndJSSet);
+ break;
+ }
+
+ Handle<Object> argv[1];
+ if (!ReadObject().ToHandle(&argv[0]) ||
+ Execution::Call(isolate_, set_add, set, arraysize(argv), argv)
+ .is_null()) {
+ return MaybeHandle<JSSet>();
+ }
+ length++;
+ }
+
+ uint32_t expected_length;
+ if (!ReadVarint<uint32_t>().To(&expected_length) ||
+ length != expected_length) {
+ return MaybeHandle<JSSet>();
+ }
+ DCHECK(HasObjectWithID(id));
+ return scope.CloseAndEscape(set);
+}
+
+MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadJSArrayBuffer() {
+ uint32_t id = next_id_++;
+ uint32_t byte_length;
+ Vector<const uint8_t> bytes;
+ if (!ReadVarint<uint32_t>().To(&byte_length) ||
+ byte_length > static_cast<size_t>(end_ - position_)) {
+ return MaybeHandle<JSArrayBuffer>();
+ }
+ const bool should_initialize = false;
+ Handle<JSArrayBuffer> array_buffer =
+ isolate_->factory()->NewJSArrayBuffer(SharedFlag::kNotShared, pretenure_);
+ JSArrayBuffer::SetupAllocatingData(array_buffer, isolate_, byte_length,
+ should_initialize);
+ memcpy(array_buffer->backing_store(), position_, byte_length);
+ position_ += byte_length;
+ AddObjectWithID(id, array_buffer);
+ return array_buffer;
+}
+
+MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadTransferredJSArrayBuffer(
+ bool is_shared) {
+ uint32_t id = next_id_++;
+ uint32_t transfer_id;
+ Handle<SeededNumberDictionary> transfer_map;
+ if (!ReadVarint<uint32_t>().To(&transfer_id) ||
+ !array_buffer_transfer_map_.ToHandle(&transfer_map)) {
+ return MaybeHandle<JSArrayBuffer>();
+ }
+ int index = transfer_map->FindEntry(isolate_, transfer_id);
+ if (index == SeededNumberDictionary::kNotFound) {
+ return MaybeHandle<JSArrayBuffer>();
+ }
+ Handle<JSArrayBuffer> array_buffer(
+ JSArrayBuffer::cast(transfer_map->ValueAt(index)), isolate_);
+ DCHECK_EQ(is_shared, array_buffer->is_shared());
+ AddObjectWithID(id, array_buffer);
+ return array_buffer;
+}
+
+MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
+ Handle<JSArrayBuffer> buffer) {
+ uint32_t buffer_byte_length = NumberToUint32(buffer->byte_length());
+ uint8_t tag = 0;
+ uint32_t byte_offset = 0;
+ uint32_t byte_length = 0;
+ if (!ReadVarint<uint8_t>().To(&tag) ||
+ !ReadVarint<uint32_t>().To(&byte_offset) ||
+ !ReadVarint<uint32_t>().To(&byte_length) ||
+ byte_offset > buffer_byte_length ||
+ byte_length > buffer_byte_length - byte_offset) {
+ return MaybeHandle<JSArrayBufferView>();
+ }
+ uint32_t id = next_id_++;
+ ExternalArrayType external_array_type = kExternalInt8Array;
+ unsigned element_size = 0;
+ switch (static_cast<ArrayBufferViewTag>(tag)) {
+ case ArrayBufferViewTag::kDataView: {
+ Handle<JSDataView> data_view =
+ isolate_->factory()->NewJSDataView(buffer, byte_offset, byte_length);
+ AddObjectWithID(id, data_view);
+ return data_view;
+ }
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case ArrayBufferViewTag::k##Type##Array: \
+ external_array_type = kExternal##Type##Array; \
+ element_size = size; \
+ break;
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ }
+ if (byte_offset % element_size != 0 || byte_length % element_size != 0) {
+ return MaybeHandle<JSArrayBufferView>();
+ }
+ Handle<JSTypedArray> typed_array = isolate_->factory()->NewJSTypedArray(
+ external_array_type, buffer, byte_offset, byte_length / element_size,
+ pretenure_);
+ AddObjectWithID(id, typed_array);
+ return typed_array;
+}
+
+MaybeHandle<JSObject> ValueDeserializer::ReadHostObject() {
+ if (!delegate_) return MaybeHandle<JSObject>();
+ STACK_CHECK(isolate_, MaybeHandle<JSObject>());
+ uint32_t id = next_id_++;
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+ v8::Local<v8::Object> object;
+ if (!delegate_->ReadHostObject(v8_isolate).ToLocal(&object)) {
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate_, JSObject);
+ return MaybeHandle<JSObject>();
+ }
+ Handle<JSObject> js_object =
+ Handle<JSObject>::cast(Utils::OpenHandle(*object));
+ AddObjectWithID(id, js_object);
+ return js_object;
+}
+
+// Copies a vector of property values into an object, given the map that should
+// be used.
+static void CommitProperties(Handle<JSObject> object, Handle<Map> map,
+ const std::vector<Handle<Object>>& properties) {
+ JSObject::AllocateStorageForMap(object, map);
+ DCHECK(!object->map()->is_dictionary_map());
+
+ DisallowHeapAllocation no_gc;
+ DescriptorArray* descriptors = object->map()->instance_descriptors();
+ for (unsigned i = 0; i < properties.size(); i++) {
+ object->WriteToField(i, descriptors->GetDetails(i), *properties[i]);
+ }
+}
+
Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
- Handle<JSObject> object, SerializationTag end_tag) {
- for (uint32_t num_properties = 0;; num_properties++) {
+ Handle<JSObject> object, SerializationTag end_tag,
+ bool can_use_transitions) {
+ uint32_t num_properties = 0;
+
+ // Fast path (following map transitions).
+ if (can_use_transitions) {
+ bool transitioning = true;
+ Handle<Map> map(object->map(), isolate_);
+ DCHECK(!map->is_dictionary_map());
+ DCHECK(map->instance_descriptors()->IsEmpty());
+ std::vector<Handle<Object>> properties;
+ properties.reserve(8);
+
+ while (transitioning) {
+ // If there are no more properties, finish.
+ SerializationTag tag;
+ if (!PeekTag().To(&tag)) return Nothing<uint32_t>();
+ if (tag == end_tag) {
+ ConsumeTag(end_tag);
+ CommitProperties(object, map, properties);
+ CHECK_LT(properties.size(), std::numeric_limits<uint32_t>::max());
+ return Just(static_cast<uint32_t>(properties.size()));
+ }
+
+ // Determine the key to be used and the target map to transition to, if
+ // possible. Transitioning may abort if the key is not a string, or if no
+ // transition was found.
+ Handle<Object> key;
+ Handle<Map> target;
+ Handle<String> expected_key = TransitionArray::ExpectedTransitionKey(map);
+ if (!expected_key.is_null() && ReadExpectedString(expected_key)) {
+ key = expected_key;
+ target = TransitionArray::ExpectedTransitionTarget(map);
+ } else {
+ if (!ReadObject().ToHandle(&key)) return Nothing<uint32_t>();
+ if (key->IsString()) {
+ key =
+ isolate_->factory()->InternalizeString(Handle<String>::cast(key));
+ target = TransitionArray::FindTransitionToField(
+ map, Handle<String>::cast(key));
+ transitioning = !target.is_null();
+ } else {
+ transitioning = false;
+ }
+ }
+
+ // Read the value that corresponds to it.
+ Handle<Object> value;
+ if (!ReadObject().ToHandle(&value)) return Nothing<uint32_t>();
+
+ // If still transitioning and the value fits the field representation
+ // (though generalization may be required), store the property value so
+ // that we can copy them all at once. Otherwise, stop transitioning.
+ if (transitioning) {
+ int descriptor = static_cast<int>(properties.size());
+ PropertyDetails details =
+ target->instance_descriptors()->GetDetails(descriptor);
+ Representation expected_representation = details.representation();
+ if (value->FitsRepresentation(expected_representation)) {
+ if (expected_representation.IsHeapObject() &&
+ !target->instance_descriptors()
+ ->GetFieldType(descriptor)
+ ->NowContains(value)) {
+ Handle<FieldType> value_type =
+ value->OptimalType(isolate_, expected_representation);
+ Map::GeneralizeFieldType(target, descriptor,
+ expected_representation, value_type);
+ }
+ DCHECK(target->instance_descriptors()
+ ->GetFieldType(descriptor)
+ ->NowContains(value));
+ properties.push_back(value);
+ map = target;
+ continue;
+ } else {
+ transitioning = false;
+ }
+ }
+
+ // Fell out of transitioning fast path. Commit the properties gathered so
+ // far, and then start setting properties slowly instead.
+ DCHECK(!transitioning);
+ CHECK_LT(properties.size(), std::numeric_limits<uint32_t>::max());
+ CommitProperties(object, map, properties);
+ num_properties = static_cast<uint32_t>(properties.size());
+
+ bool success;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate_, object, key, &success, LookupIterator::OWN);
+ if (!success ||
+ JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, NONE)
+ .is_null()) {
+ return Nothing<uint32_t>();
+ }
+ num_properties++;
+ }
+
+ // At this point, transitioning should be done, but at least one property
+ // should have been written (in the zero-property case, there is an early
+ // return).
+ DCHECK(!transitioning);
+ DCHECK_GE(num_properties, 1u);
+ }
+
+ // Slow path.
+ for (;; num_properties++) {
SerializationTag tag;
if (!PeekTag().To(&tag)) return Nothing<uint32_t>();
if (tag == end_tag) {
@@ -828,15 +1581,16 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
}
bool ValueDeserializer::HasObjectWithID(uint32_t id) {
- return id_map_->Has(isolate_, id);
+ return id < static_cast<unsigned>(id_map_->length()) &&
+ !id_map_->get(id)->IsTheHole(isolate_);
}
MaybeHandle<JSReceiver> ValueDeserializer::GetObjectWithID(uint32_t id) {
- int index = id_map_->FindEntry(isolate_, id);
- if (index == SeededNumberDictionary::kNotFound) {
+ if (id >= static_cast<unsigned>(id_map_->length())) {
return MaybeHandle<JSReceiver>();
}
- Object* value = id_map_->ValueAt(index);
+ Object* value = id_map_->get(id);
+ if (value->IsTheHole(isolate_)) return MaybeHandle<JSReceiver>();
DCHECK(value->IsJSReceiver());
return Handle<JSReceiver>(JSReceiver::cast(value), isolate_);
}
@@ -844,16 +1598,13 @@ MaybeHandle<JSReceiver> ValueDeserializer::GetObjectWithID(uint32_t id) {
void ValueDeserializer::AddObjectWithID(uint32_t id,
Handle<JSReceiver> object) {
DCHECK(!HasObjectWithID(id));
- const bool used_as_prototype = false;
- Handle<SeededNumberDictionary> new_dictionary =
- SeededNumberDictionary::AtNumberPut(id_map_, id, object,
- used_as_prototype);
+ Handle<FixedArray> new_array = FixedArray::SetAndGrow(id_map_, id, object);
// If the dictionary was reallocated, update the global handle.
- if (!new_dictionary.is_identical_to(id_map_)) {
+ if (!new_array.is_identical_to(id_map_)) {
GlobalHandles::Destroy(Handle<Object>::cast(id_map_).location());
- id_map_ = Handle<SeededNumberDictionary>::cast(
- isolate_->global_handles()->Create(*new_dictionary));
+ id_map_ = Handle<FixedArray>::cast(
+ isolate_->global_handles()->Create(*new_array));
}
}
@@ -878,8 +1629,7 @@ static Maybe<bool> SetPropertiesFromKeyValuePairs(Isolate* isolate,
MaybeHandle<Object>
ValueDeserializer::ReadObjectUsingEntireBufferForLegacyFormat() {
- if (version_ > 0) return MaybeHandle<Object>();
-
+ DCHECK_EQ(version_, 0);
HandleScope scope(isolate_);
std::vector<Handle<Object>> stack;
while (position_ < end_) {
@@ -901,8 +1651,8 @@ ValueDeserializer::ReadObjectUsingEntireBufferForLegacyFormat() {
size_t begin_properties =
stack.size() - 2 * static_cast<size_t>(num_properties);
- Handle<JSObject> js_object =
- isolate_->factory()->NewJSObject(isolate_->object_function());
+ Handle<JSObject> js_object = isolate_->factory()->NewJSObject(
+ isolate_->object_function(), pretenure_);
if (num_properties &&
!SetPropertiesFromKeyValuePairs(
isolate_, js_object, &stack[begin_properties], num_properties)
@@ -926,7 +1676,8 @@ ValueDeserializer::ReadObjectUsingEntireBufferForLegacyFormat() {
return MaybeHandle<Object>();
}
- Handle<JSArray> js_array = isolate_->factory()->NewJSArray(0);
+ Handle<JSArray> js_array = isolate_->factory()->NewJSArray(
+ 0, TERMINAL_FAST_ELEMENTS_KIND, pretenure_);
JSArray::SetLength(js_array, length);
size_t begin_properties =
stack.size() - 2 * static_cast<size_t>(num_properties);
@@ -941,9 +1692,12 @@ ValueDeserializer::ReadObjectUsingEntireBufferForLegacyFormat() {
new_object = js_array;
break;
}
- case SerializationTag::kEndDenseJSArray:
+ case SerializationTag::kEndDenseJSArray: {
// This was already broken in Chromium, and apparently wasn't missed.
+ isolate_->Throw(*isolate_->factory()->NewError(
+ MessageTemplate::kDataCloneDeserializationError));
return MaybeHandle<Object>();
+ }
default:
if (!ReadObject().ToHandle(&new_object)) return MaybeHandle<Object>();
break;
@@ -959,7 +1713,11 @@ ValueDeserializer::ReadObjectUsingEntireBufferForLegacyFormat() {
#endif
position_ = end_;
- if (stack.size() != 1) return MaybeHandle<Object>();
+ if (stack.size() != 1) {
+ isolate_->Throw(*isolate_->factory()->NewError(
+ MessageTemplate::kDataCloneDeserializationError));
+ return MaybeHandle<Object>();
+ }
return scope.CloseAndEscape(stack[0]);
}
diff --git a/deps/v8/src/value-serializer.h b/deps/v8/src/value-serializer.h
index ab9c664899..27ce0c1207 100644
--- a/deps/v8/src/value-serializer.h
+++ b/deps/v8/src/value-serializer.h
@@ -12,16 +12,21 @@
#include "src/base/compiler-specific.h"
#include "src/base/macros.h"
#include "src/identity-map.h"
+#include "src/messages.h"
#include "src/vector.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
class HeapNumber;
class Isolate;
+class JSArrayBuffer;
+class JSArrayBufferView;
class JSDate;
+class JSMap;
class JSRegExp;
+class JSSet;
class JSValue;
class Object;
class Oddball;
@@ -37,7 +42,7 @@ enum class SerializationTag : uint8_t;
*/
class ValueSerializer {
public:
- explicit ValueSerializer(Isolate* isolate);
+ ValueSerializer(Isolate* isolate, v8::ValueSerializer::Delegate* delegate);
~ValueSerializer();
/*
@@ -56,6 +61,23 @@ class ValueSerializer {
*/
std::vector<uint8_t> ReleaseBuffer() { return std::move(buffer_); }
+ /*
+ * Marks an ArrayBuffer as havings its contents transferred out of band.
+ * Pass the corresponding JSArrayBuffer in the deserializing context to
+ * ValueDeserializer::TransferArrayBuffer.
+ */
+ void TransferArrayBuffer(uint32_t transfer_id,
+ Handle<JSArrayBuffer> array_buffer);
+
+ /*
+ * Publicly exposed wire format writing methods.
+ * These are intended for use within the delegate's WriteHostObject method.
+ */
+ void WriteUint32(uint32_t value);
+ void WriteUint64(uint64_t value);
+ void WriteRawBytes(const void* source, size_t length);
+ void WriteDouble(double value);
+
private:
// Writing the wire format.
void WriteTag(SerializationTag tag);
@@ -63,7 +85,6 @@ class ValueSerializer {
void WriteVarint(T value);
template <typename T>
void WriteZigZag(T value);
- void WriteDouble(double value);
void WriteOneByteString(Vector<const uint8_t> chars);
void WriteTwoByteString(Vector<const uc16> chars);
uint8_t* ReserveRawBytes(size_t bytes);
@@ -75,20 +96,35 @@ class ValueSerializer {
void WriteString(Handle<String> string);
Maybe<bool> WriteJSReceiver(Handle<JSReceiver> receiver) WARN_UNUSED_RESULT;
Maybe<bool> WriteJSObject(Handle<JSObject> object) WARN_UNUSED_RESULT;
+ Maybe<bool> WriteJSObjectSlow(Handle<JSObject> object) WARN_UNUSED_RESULT;
Maybe<bool> WriteJSArray(Handle<JSArray> array) WARN_UNUSED_RESULT;
void WriteJSDate(JSDate* date);
Maybe<bool> WriteJSValue(Handle<JSValue> value) WARN_UNUSED_RESULT;
void WriteJSRegExp(JSRegExp* regexp);
+ Maybe<bool> WriteJSMap(Handle<JSMap> map) WARN_UNUSED_RESULT;
+ Maybe<bool> WriteJSSet(Handle<JSSet> map) WARN_UNUSED_RESULT;
+ Maybe<bool> WriteJSArrayBuffer(JSArrayBuffer* array_buffer);
+ Maybe<bool> WriteJSArrayBufferView(JSArrayBufferView* array_buffer);
+ Maybe<bool> WriteHostObject(Handle<JSObject> object) WARN_UNUSED_RESULT;
/*
* Reads the specified keys from the object and writes key-value pairs to the
* buffer. Returns the number of keys actually written, which may be smaller
* if some keys are not own properties when accessed.
*/
- Maybe<uint32_t> WriteJSObjectProperties(
+ Maybe<uint32_t> WriteJSObjectPropertiesSlow(
Handle<JSObject> object, Handle<FixedArray> keys) WARN_UNUSED_RESULT;
+ /*
+ * Asks the delegate to handle an error that occurred during data cloning, by
+ * throwing an exception appropriate for the host.
+ */
+ void ThrowDataCloneError(MessageTemplate::Template template_index);
+ V8_NOINLINE void ThrowDataCloneError(MessageTemplate::Template template_index,
+ Handle<Object> arg0);
+
Isolate* const isolate_;
+ v8::ValueSerializer::Delegate* const delegate_;
std::vector<uint8_t> buffer_;
Zone zone_;
@@ -98,6 +134,9 @@ class ValueSerializer {
IdentityMap<uint32_t> id_map_;
uint32_t next_id_ = 0;
+ // A similar map, for transferred array buffers.
+ IdentityMap<uint32_t> array_buffer_transfer_map_;
+
DISALLOW_COPY_AND_ASSIGN(ValueSerializer);
};
@@ -107,7 +146,8 @@ class ValueSerializer {
*/
class ValueDeserializer {
public:
- ValueDeserializer(Isolate* isolate, Vector<const uint8_t> data);
+ ValueDeserializer(Isolate* isolate, Vector<const uint8_t> data,
+ v8::ValueDeserializer::Delegate* delegate);
~ValueDeserializer();
/*
@@ -116,6 +156,13 @@ class ValueDeserializer {
Maybe<bool> ReadHeader() WARN_UNUSED_RESULT;
/*
+ * Reads the underlying wire format version. Likely mostly to be useful to
+ * legacy code reading old wire format versions. Must be called after
+ * ReadHeader.
+ */
+ uint32_t GetWireFormatVersion() const { return version_; }
+
+ /*
* Deserializes a V8 object from the buffer.
*/
MaybeHandle<Object> ReadObject() WARN_UNUSED_RESULT;
@@ -130,6 +177,22 @@ class ValueDeserializer {
MaybeHandle<Object> ReadObjectUsingEntireBufferForLegacyFormat()
WARN_UNUSED_RESULT;
+ /*
+ * Accepts the array buffer corresponding to the one passed previously to
+ * ValueSerializer::TransferArrayBuffer.
+ */
+ void TransferArrayBuffer(uint32_t transfer_id,
+ Handle<JSArrayBuffer> array_buffer);
+
+ /*
+ * Publicly exposed wire format writing methods.
+ * These are intended for use within the delegate's WriteHostObject method.
+ */
+ bool ReadUint32(uint32_t* value) WARN_UNUSED_RESULT;
+ bool ReadUint64(uint64_t* value) WARN_UNUSED_RESULT;
+ bool ReadDouble(double* value) WARN_UNUSED_RESULT;
+ bool ReadRawBytes(size_t length, const void** data) WARN_UNUSED_RESULT;
+
private:
// Reading the wire format.
Maybe<SerializationTag> PeekTag() const WARN_UNUSED_RESULT;
@@ -142,6 +205,14 @@ class ValueDeserializer {
Maybe<double> ReadDouble() WARN_UNUSED_RESULT;
Maybe<Vector<const uint8_t>> ReadRawBytes(int size) WARN_UNUSED_RESULT;
+ // Reads a string if it matches the one provided.
+ // Returns true if this was the case. Otherwise, nothing is consumed.
+ bool ReadExpectedString(Handle<String> expected) WARN_UNUSED_RESULT;
+
+ // Like ReadObject, but skips logic for special cases in simulating the
+ // "stack machine".
+ MaybeHandle<Object> ReadObjectInternal() WARN_UNUSED_RESULT;
+
// Reading V8 objects of specific kinds.
// The tag is assumed to have already been read.
MaybeHandle<String> ReadUtf8String() WARN_UNUSED_RESULT;
@@ -152,13 +223,22 @@ class ValueDeserializer {
MaybeHandle<JSDate> ReadJSDate() WARN_UNUSED_RESULT;
MaybeHandle<JSValue> ReadJSValue(SerializationTag tag) WARN_UNUSED_RESULT;
MaybeHandle<JSRegExp> ReadJSRegExp() WARN_UNUSED_RESULT;
+ MaybeHandle<JSMap> ReadJSMap() WARN_UNUSED_RESULT;
+ MaybeHandle<JSSet> ReadJSSet() WARN_UNUSED_RESULT;
+ MaybeHandle<JSArrayBuffer> ReadJSArrayBuffer() WARN_UNUSED_RESULT;
+ MaybeHandle<JSArrayBuffer> ReadTransferredJSArrayBuffer(bool is_shared)
+ WARN_UNUSED_RESULT;
+ MaybeHandle<JSArrayBufferView> ReadJSArrayBufferView(
+ Handle<JSArrayBuffer> buffer) WARN_UNUSED_RESULT;
+ MaybeHandle<JSObject> ReadHostObject() WARN_UNUSED_RESULT;
/*
* Reads key-value pairs into the object until the specified end tag is
* encountered. If successful, returns the number of properties read.
*/
Maybe<uint32_t> ReadJSObjectProperties(Handle<JSObject> object,
- SerializationTag end_tag);
+ SerializationTag end_tag,
+ bool can_use_transitions);
// Manipulating the map from IDs to reified objects.
bool HasObjectWithID(uint32_t id);
@@ -166,12 +246,17 @@ class ValueDeserializer {
void AddObjectWithID(uint32_t id, Handle<JSReceiver> object);
Isolate* const isolate_;
+ v8::ValueDeserializer::Delegate* const delegate_;
const uint8_t* position_;
const uint8_t* const end_;
+ PretenureFlag pretenure_;
uint32_t version_ = 0;
- Handle<SeededNumberDictionary> id_map_; // Always a global handle.
uint32_t next_id_ = 0;
+ // Always global handles.
+ Handle<FixedArray> id_map_;
+ MaybeHandle<SeededNumberDictionary> array_buffer_transfer_map_;
+
DISALLOW_COPY_AND_ASSIGN(ValueDeserializer);
};
diff --git a/deps/v8/src/wasm/ast-decoder.cc b/deps/v8/src/wasm/ast-decoder.cc
index 0f192508ba..02d1db5bda 100644
--- a/deps/v8/src/wasm/ast-decoder.cc
+++ b/deps/v8/src/wasm/ast-decoder.cc
@@ -7,7 +7,7 @@
#include "src/bit-vector.h"
#include "src/flags.h"
#include "src/handles.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
#include "src/wasm/ast-decoder.h"
#include "src/wasm/decoder.h"
@@ -36,6 +36,8 @@ namespace wasm {
error("Invalid opcode (enable with --" #flag ")"); \
break; \
}
+// TODO(titzer): this is only for intermediate migration.
+#define IMPLICIT_FUNCTION_END 1
// An SsaEnv environment carries the current local variable renaming
// as well as the current effect and control dependency in the TF graph.
@@ -68,62 +70,82 @@ struct Value {
LocalType type;
};
-// An entry on the control stack (i.e. if, block, loop).
-struct Control {
- const byte* pc;
- int stack_depth; // stack height at the beginning of the construct.
- SsaEnv* end_env; // end environment for the construct.
- SsaEnv* false_env; // false environment (only for if).
- SsaEnv* catch_env; // catch environment (only for try with catch).
- SsaEnv* finish_try_env; // the environment where a try with finally lives.
- TFNode* node; // result node for the construct.
- LocalType type; // result type for the construct.
- bool is_loop; // true if this is the inner label of a loop.
+struct TryInfo : public ZoneObject {
+ SsaEnv* catch_env;
+ TFNode* exception;
- bool is_if() const { return *pc == kExprIf; }
+ explicit TryInfo(SsaEnv* c) : catch_env(c), exception(nullptr) {}
+};
- bool is_try() const {
- return *pc == kExprTryCatch || *pc == kExprTryCatchFinally ||
- *pc == kExprTryFinally;
- }
+struct MergeValues {
+ uint32_t arity;
+ union {
+ Value* array;
+ Value first;
+ } vals; // Either multiple values or a single value.
- bool has_catch() const {
- return *pc == kExprTryCatch || *pc == kExprTryCatchFinally;
+ Value& first() {
+ DCHECK_GT(arity, 0u);
+ return arity == 1 ? vals.first : vals.array[0];
}
+};
- bool has_finally() const {
- return *pc == kExprTryCatchFinally || *pc == kExprTryFinally;
- }
+static Value* NO_VALUE = nullptr;
+
+enum ControlKind { kControlIf, kControlBlock, kControlLoop, kControlTry };
+
+// An entry on the control stack (i.e. if, block, loop).
+struct Control {
+ const byte* pc;
+ ControlKind kind;
+ int stack_depth; // stack height at the beginning of the construct.
+ SsaEnv* end_env; // end environment for the construct.
+ SsaEnv* false_env; // false environment (only for if).
+ TryInfo* try_info; // Information used for compiling try statements.
+ int32_t previous_catch; // The previous Control (on the stack) with a catch.
+
+ // Values merged into the end of this control construct.
+ MergeValues merge;
+
+ inline bool is_if() const { return kind == kControlIf; }
+ inline bool is_block() const { return kind == kControlBlock; }
+ inline bool is_loop() const { return kind == kControlLoop; }
+ inline bool is_try() const { return kind == kControlTry; }
// Named constructors.
- static Control Block(const byte* pc, int stack_depth, SsaEnv* end_env) {
- return {pc, stack_depth, end_env, nullptr, nullptr,
- nullptr, nullptr, kAstEnd, false};
+ static Control Block(const byte* pc, int stack_depth, SsaEnv* end_env,
+ int32_t previous_catch) {
+ return {pc, kControlBlock, stack_depth, end_env,
+ nullptr, nullptr, previous_catch, {0, {NO_VALUE}}};
}
static Control If(const byte* pc, int stack_depth, SsaEnv* end_env,
- SsaEnv* false_env) {
- return {pc, stack_depth, end_env, false_env, nullptr,
- nullptr, nullptr, kAstStmt, false};
+ SsaEnv* false_env, int32_t previous_catch) {
+ return {pc, kControlIf, stack_depth, end_env,
+ false_env, nullptr, previous_catch, {0, {NO_VALUE}}};
}
- static Control Loop(const byte* pc, int stack_depth, SsaEnv* end_env) {
- return {pc, stack_depth, end_env, nullptr, nullptr,
- nullptr, nullptr, kAstEnd, true};
+ static Control Loop(const byte* pc, int stack_depth, SsaEnv* end_env,
+ int32_t previous_catch) {
+ return {pc, kControlLoop, stack_depth, end_env,
+ nullptr, nullptr, previous_catch, {0, {NO_VALUE}}};
}
static Control Try(const byte* pc, int stack_depth, SsaEnv* end_env,
- SsaEnv* catch_env, SsaEnv* finish_try_env) {
- return {pc, stack_depth, end_env, nullptr, catch_env, finish_try_env,
- nullptr, kAstEnd, false};
+ Zone* zone, SsaEnv* catch_env, int32_t previous_catch) {
+ DCHECK_NOT_NULL(catch_env);
+ TryInfo* try_info = new (zone) TryInfo(catch_env);
+ return {pc, kControlTry, stack_depth, end_env,
+ nullptr, try_info, previous_catch, {0, {NO_VALUE}}};
}
};
// Macros that build nodes only if there is a graph and the current SSA
// environment is reachable from start. This avoids problems with malformed
// TF graphs when decoding inputs that have unreachable code.
-#define BUILD(func, ...) (build() ? builder_->func(__VA_ARGS__) : nullptr)
-#define BUILD0(func) (build() ? builder_->func() : nullptr)
+#define BUILD(func, ...) \
+ (build() ? CheckForException(builder_->func(__VA_ARGS__)) : nullptr)
+#define BUILD0(func) (build() ? CheckForException(builder_->func()) : nullptr)
// Generic Wasm bytecode decoder with utilities for decoding operands,
// lengths, etc.
@@ -150,17 +172,18 @@ class WasmDecoder : public Decoder {
}
return true;
}
- error(pc, pc + 1, "invalid local index");
+ error(pc, pc + 1, "invalid local index: %u", operand.index);
return false;
}
inline bool Validate(const byte* pc, GlobalIndexOperand& operand) {
ModuleEnv* m = module_;
if (m && m->module && operand.index < m->module->globals.size()) {
- operand.type = m->module->globals[operand.index].type;
+ operand.global = &m->module->globals[operand.index];
+ operand.type = operand.global->type;
return true;
}
- error(pc, pc + 1, "invalid global index");
+ error(pc, pc + 1, "invalid global index: %u", operand.index);
return false;
}
@@ -175,16 +198,9 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, CallFunctionOperand& operand) {
if (Complete(pc, operand)) {
- uint32_t expected = static_cast<uint32_t>(operand.sig->parameter_count());
- if (operand.arity != expected) {
- error(pc, pc + 1,
- "arity mismatch in direct function call (expected %u, got %u)",
- expected, operand.arity);
- return false;
- }
return true;
}
- error(pc, pc + 1, "invalid function index");
+ error(pc, pc + 1, "invalid function index: %u", operand.index);
return false;
}
@@ -199,161 +215,28 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, CallIndirectOperand& operand) {
if (Complete(pc, operand)) {
- uint32_t expected = static_cast<uint32_t>(operand.sig->parameter_count());
- if (operand.arity != expected) {
- error(pc, pc + 1,
- "arity mismatch in indirect function call (expected %u, got %u)",
- expected, operand.arity);
- return false;
- }
- return true;
- }
- error(pc, pc + 1, "invalid signature index");
- return false;
- }
-
- inline bool Complete(const byte* pc, CallImportOperand& operand) {
- ModuleEnv* m = module_;
- if (m && m->module && operand.index < m->module->import_table.size()) {
- operand.sig = m->module->import_table[operand.index].sig;
- return true;
- }
- return false;
- }
-
- inline bool Validate(const byte* pc, CallImportOperand& operand) {
- if (Complete(pc, operand)) {
- uint32_t expected = static_cast<uint32_t>(operand.sig->parameter_count());
- if (operand.arity != expected) {
- error(pc, pc + 1, "arity mismatch in import call (expected %u, got %u)",
- expected, operand.arity);
- return false;
- }
return true;
}
- error(pc, pc + 1, "invalid signature index");
+ error(pc, pc + 1, "invalid signature index: #%u", operand.index);
return false;
}
inline bool Validate(const byte* pc, BreakDepthOperand& operand,
ZoneVector<Control>& control) {
- if (operand.arity > 1) {
- error(pc, pc + 1, "invalid arity for br or br_if");
- return false;
- }
if (operand.depth < control.size()) {
operand.target = &control[control.size() - operand.depth - 1];
return true;
}
- error(pc, pc + 1, "invalid break depth");
+ error(pc, pc + 1, "invalid break depth: %u", operand.depth);
return false;
}
bool Validate(const byte* pc, BranchTableOperand& operand,
size_t block_depth) {
- if (operand.arity > 1) {
- error(pc, pc + 1, "invalid arity for break");
- return false;
- }
- // Verify table.
- for (uint32_t i = 0; i < operand.table_count + 1; ++i) {
- uint32_t target = operand.read_entry(this, i);
- if (target >= block_depth) {
- error(operand.table + i * 2, "improper branch in br_table");
- return false;
- }
- }
+ // TODO(titzer): add extra redundant validation for br_table here?
return true;
}
- unsigned OpcodeArity(const byte* pc) {
-#define DECLARE_ARITY(name, ...) \
- static const LocalType kTypes_##name[] = {__VA_ARGS__}; \
- static const int kArity_##name = \
- static_cast<int>(arraysize(kTypes_##name) - 1);
-
- FOREACH_SIGNATURE(DECLARE_ARITY);
-#undef DECLARE_ARITY
-
- switch (static_cast<WasmOpcode>(*pc)) {
- case kExprI8Const:
- case kExprI32Const:
- case kExprI64Const:
- case kExprF64Const:
- case kExprF32Const:
- case kExprGetLocal:
- case kExprGetGlobal:
- case kExprNop:
- case kExprUnreachable:
- case kExprEnd:
- case kExprBlock:
- case kExprThrow:
- case kExprTryCatch:
- case kExprTryCatchFinally:
- case kExprTryFinally:
- case kExprFinally:
- case kExprLoop:
- return 0;
-
- case kExprSetGlobal:
- case kExprSetLocal:
- case kExprElse:
- case kExprCatch:
- return 1;
-
- case kExprBr: {
- BreakDepthOperand operand(this, pc);
- return operand.arity;
- }
- case kExprBrIf: {
- BreakDepthOperand operand(this, pc);
- return 1 + operand.arity;
- }
- case kExprBrTable: {
- BranchTableOperand operand(this, pc);
- return 1 + operand.arity;
- }
-
- case kExprIf:
- return 1;
- case kExprSelect:
- return 3;
-
- case kExprCallFunction: {
- CallFunctionOperand operand(this, pc);
- return operand.arity;
- }
- case kExprCallIndirect: {
- CallIndirectOperand operand(this, pc);
- return 1 + operand.arity;
- }
- case kExprCallImport: {
- CallImportOperand operand(this, pc);
- return operand.arity;
- }
- case kExprReturn: {
- ReturnArityOperand operand(this, pc);
- return operand.arity;
- }
-
-#define DECLARE_OPCODE_CASE(name, opcode, sig) \
- case kExpr##name: \
- return kArity_##sig;
-
- FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
- FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
- FOREACH_MISC_MEM_OPCODE(DECLARE_OPCODE_CASE)
- FOREACH_SIMPLE_OPCODE(DECLARE_OPCODE_CASE)
- FOREACH_SIMPLE_MEM_OPCODE(DECLARE_OPCODE_CASE)
- FOREACH_ASMJS_COMPAT_OPCODE(DECLARE_OPCODE_CASE)
- FOREACH_SIMD_OPCODE(DECLARE_OPCODE_CASE)
-#undef DECLARE_OPCODE_CASE
- default:
- UNREACHABLE();
- return 0;
- }
- }
-
unsigned OpcodeLength(const byte* pc) {
switch (static_cast<WasmOpcode>(*pc)) {
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
@@ -361,7 +244,7 @@ class WasmDecoder : public Decoder {
FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
#undef DECLARE_OPCODE_CASE
{
- MemoryAccessOperand operand(this, pc);
+ MemoryAccessOperand operand(this, pc, UINT32_MAX);
return 1 + operand.length;
}
case kExprBr:
@@ -383,12 +266,17 @@ class WasmDecoder : public Decoder {
CallIndirectOperand operand(this, pc);
return 1 + operand.length;
}
- case kExprCallImport: {
- CallImportOperand operand(this, pc);
+
+ case kExprTry:
+ case kExprIf: // fall thru
+ case kExprLoop:
+ case kExprBlock: {
+ BlockTypeOperand operand(this, pc);
return 1 + operand.length;
}
case kExprSetLocal:
+ case kExprTeeLocal:
case kExprGetLocal:
case kExprCatch: {
LocalIndexOperand operand(this, pc);
@@ -396,7 +284,8 @@ class WasmDecoder : public Decoder {
}
case kExprBrTable: {
BranchTableOperand operand(this, pc);
- return 1 + operand.length;
+ BranchTableIterator iterator(this, operand);
+ return 1 + iterator.length();
}
case kExprI32Const: {
ImmI32Operand operand(this, pc);
@@ -412,17 +301,14 @@ class WasmDecoder : public Decoder {
return 5;
case kExprF64Const:
return 9;
- case kExprReturn: {
- ReturnArityOperand operand(this, pc);
- return 1 + operand.length;
- }
-
default:
return 1;
}
}
};
+static const int32_t kNullCatch = -1;
+
// The full WASM decoder for bytecode. Both verifies bytecode and generates
// a TurboFan IR graph.
class WasmFullDecoder : public WasmDecoder {
@@ -434,7 +320,9 @@ class WasmFullDecoder : public WasmDecoder {
base_(body.base),
local_type_vec_(zone),
stack_(zone),
- control_(zone) {
+ control_(zone),
+ last_end_found_(false),
+ current_catch_(kNullCatch) {
local_types_ = &local_type_vec_;
}
@@ -447,7 +335,7 @@ class WasmFullDecoder : public WasmDecoder {
control_.clear();
if (end_ < pc_) {
- error(pc_, "function body end < start");
+ error("function body end < start");
return false;
}
@@ -457,23 +345,55 @@ class WasmFullDecoder : public WasmDecoder {
if (failed()) return TraceFailed();
+#if IMPLICIT_FUNCTION_END
+ // With implicit end support (old style), the function block
+ // remains on the stack. Other control blocks are an error.
+ if (control_.size() > 1) {
+ error(pc_, control_.back().pc, "unterminated control structure");
+ return TraceFailed();
+ }
+
+ // Assume an implicit end to the function body block.
+ if (control_.size() == 1) {
+ Control* c = &control_.back();
+ if (ssa_env_->go()) {
+ FallThruTo(c);
+ }
+
+ if (c->end_env->go()) {
+ // Push the end values onto the stack.
+ stack_.resize(c->stack_depth);
+ if (c->merge.arity == 1) {
+ stack_.push_back(c->merge.vals.first);
+ } else {
+ for (unsigned i = 0; i < c->merge.arity; i++) {
+ stack_.push_back(c->merge.vals.array[i]);
+ }
+ }
+
+ TRACE(" @%-8d #xx:%-20s|", startrel(pc_), "ImplicitReturn");
+ SetEnv("function:end", c->end_env);
+ DoReturn();
+ TRACE("\n");
+ }
+ }
+#else
if (!control_.empty()) {
error(pc_, control_.back().pc, "unterminated control structure");
return TraceFailed();
}
- if (ssa_env_->go()) {
- TRACE(" @%-6d #xx:%-20s|", startrel(pc_), "ImplicitReturn");
- DoReturn();
- if (failed()) return TraceFailed();
- TRACE("\n");
+ if (!last_end_found_) {
+ error("function body must end with \"end\" opcode.");
+ return false;
}
+#endif
if (FLAG_trace_wasm_decode_time) {
double ms = decode_timer.Elapsed().InMillisecondsF();
- PrintF("wasm-decode ok (%0.3f ms)\n\n", ms);
+ PrintF("wasm-decode %s (%0.3f ms)\n\n", ok() ? "ok" : "failed", ms);
} else {
- TRACE("wasm-decode ok\n\n");
+ TRACE("wasm-decode %s\n\n", ok() ? "ok" : "failed");
}
return true;
@@ -526,6 +446,11 @@ class WasmFullDecoder : public WasmDecoder {
ZoneVector<LocalType> local_type_vec_; // types of local variables.
ZoneVector<Value> stack_; // stack of values.
ZoneVector<Control> control_; // stack of blocks, loops, and ifs.
+ bool last_end_found_;
+
+ int32_t current_catch_;
+
+ TryInfo* current_try_info() { return control_[current_catch_].try_info; }
inline bool build() { return builder_ && ssa_env_->go(); }
@@ -574,6 +499,8 @@ class WasmFullDecoder : public WasmDecoder {
return builder_->Float32Constant(0);
case kAstF64:
return builder_->Float64Constant(0);
+ case kAstS128:
+ return builder_->DefaultS128Value();
default:
UNREACHABLE();
return nullptr;
@@ -603,8 +530,13 @@ class WasmFullDecoder : public WasmDecoder {
}
// Decode local declarations, if any.
uint32_t entries = consume_u32v("local decls count");
+ TRACE("local decls count: %u\n", entries);
while (entries-- > 0 && pc_ < limit_) {
uint32_t count = consume_u32v("local count");
+ if (count > kMaxNumWasmLocals) {
+ error(pc_ - 1, "local count too large");
+ return;
+ }
byte code = consume_u8("local type");
LocalType type;
switch (code) {
@@ -620,6 +552,9 @@ class WasmFullDecoder : public WasmDecoder {
case kLocalF64:
type = kAstF64;
break;
+ case kLocalS128:
+ type = kAstS128;
+ break;
default:
error(pc_ - 1, "invalid local type");
return;
@@ -636,82 +571,68 @@ class WasmFullDecoder : public WasmDecoder {
reinterpret_cast<const void*>(limit_), baserel(pc_),
static_cast<int>(limit_ - start_), builder_ ? "graph building" : "");
+ {
+ // Set up initial function block.
+ SsaEnv* break_env = ssa_env_;
+ SetEnv("initial env", Steal(break_env));
+ PushBlock(break_env);
+ Control* c = &control_.back();
+ c->merge.arity = static_cast<uint32_t>(sig_->return_count());
+
+ if (c->merge.arity == 1) {
+ c->merge.vals.first = {pc_, nullptr, sig_->GetReturn(0)};
+ } else if (c->merge.arity > 1) {
+ c->merge.vals.array = zone_->NewArray<Value>(c->merge.arity);
+ for (unsigned i = 0; i < c->merge.arity; i++) {
+ c->merge.vals.array[i] = {pc_, nullptr, sig_->GetReturn(i)};
+ }
+ }
+ }
+
if (pc_ >= limit_) return; // Nothing to do.
while (true) { // decoding loop.
unsigned len = 1;
WasmOpcode opcode = static_cast<WasmOpcode>(*pc_);
- TRACE(" @%-6d #%02x:%-20s|", startrel(pc_), opcode,
- WasmOpcodes::ShortOpcodeName(opcode));
+ if (!WasmOpcodes::IsPrefixOpcode(opcode)) {
+ TRACE(" @%-8d #%02x:%-20s|", startrel(pc_), opcode,
+ WasmOpcodes::ShortOpcodeName(opcode));
+ }
FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (sig) {
- // Fast case of a simple operator.
- TFNode* node;
- switch (sig->parameter_count()) {
- case 1: {
- Value val = Pop(0, sig->GetParam(0));
- node = BUILD(Unop, opcode, val.node, position());
- break;
- }
- case 2: {
- Value rval = Pop(1, sig->GetParam(1));
- Value lval = Pop(0, sig->GetParam(0));
- node = BUILD(Binop, opcode, lval.node, rval.node, position());
- break;
- }
- default:
- UNREACHABLE();
- node = nullptr;
- break;
- }
- Push(GetReturnType(sig), node);
+ BuildSimpleOperator(opcode, sig);
} else {
// Complex bytecode.
switch (opcode) {
case kExprNop:
- Push(kAstStmt, nullptr);
break;
case kExprBlock: {
// The break environment is the outer environment.
+ BlockTypeOperand operand(this, pc_);
SsaEnv* break_env = ssa_env_;
PushBlock(break_env);
SetEnv("block:start", Steal(break_env));
+ SetBlockType(&control_.back(), operand);
+ len = 1 + operand.length;
break;
}
case kExprThrow: {
CHECK_PROTOTYPE_OPCODE(wasm_eh_prototype);
- Pop(0, kAstI32);
-
- // TODO(jpp): start exception propagation.
+ Value value = Pop(0, kAstI32);
+ BUILD(Throw, value.node);
break;
}
- case kExprTryCatch: {
+ case kExprTry: {
CHECK_PROTOTYPE_OPCODE(wasm_eh_prototype);
+ BlockTypeOperand operand(this, pc_);
SsaEnv* outer_env = ssa_env_;
SsaEnv* try_env = Steal(outer_env);
- SsaEnv* catch_env = Split(try_env);
- PushTry(outer_env, catch_env, nullptr);
+ SsaEnv* catch_env = UnreachableEnv();
+ PushTry(outer_env, catch_env);
SetEnv("try_catch:start", try_env);
- break;
- }
- case kExprTryCatchFinally: {
- CHECK_PROTOTYPE_OPCODE(wasm_eh_prototype);
- SsaEnv* outer_env = ssa_env_;
- SsaEnv* try_env = Steal(outer_env);
- SsaEnv* catch_env = Split(try_env);
- SsaEnv* finally_env = Split(try_env);
- PushTry(finally_env, catch_env, outer_env);
- SetEnv("try_catch_finally:start", try_env);
- break;
- }
- case kExprTryFinally: {
- CHECK_PROTOTYPE_OPCODE(wasm_eh_prototype);
- SsaEnv* outer_env = ssa_env_;
- SsaEnv* try_env = Steal(outer_env);
- SsaEnv* finally_env = Split(outer_env);
- PushTry(finally_env, nullptr, outer_env);
- SetEnv("try_finally:start", try_env);
+ SetBlockType(&control_.back(), operand);
+ len = 1 + operand.length;
break;
}
case kExprCatch: {
@@ -720,97 +641,57 @@ class WasmFullDecoder : public WasmDecoder {
len = 1 + operand.length;
if (control_.empty()) {
- error(pc_, "catch does not match a any try");
+ error("catch does not match any try");
break;
}
Control* c = &control_.back();
- if (!c->has_catch()) {
- error(pc_, "catch does not match a try with catch");
+ if (!c->is_try()) {
+ error("catch does not match any try");
break;
}
- if (c->catch_env == nullptr) {
+ if (c->try_info->catch_env == nullptr) {
error(pc_, "catch already present for try with catch");
break;
}
- Goto(ssa_env_, c->end_env);
+ if (ssa_env_->go()) {
+ MergeValuesInto(c);
+ }
+ stack_.resize(c->stack_depth);
- SsaEnv* catch_env = c->catch_env;
- c->catch_env = nullptr;
+ DCHECK_NOT_NULL(c->try_info);
+ SsaEnv* catch_env = c->try_info->catch_env;
+ c->try_info->catch_env = nullptr;
SetEnv("catch:begin", catch_env);
+ current_catch_ = c->previous_catch;
if (Validate(pc_, operand)) {
- // TODO(jpp): figure out how thrown value is propagated. It is
- // unlikely to be a value on the stack.
if (ssa_env_->locals) {
- ssa_env_->locals[operand.index] = nullptr;
+ TFNode* exception_as_i32 =
+ BUILD(Catch, c->try_info->exception, position());
+ ssa_env_->locals[operand.index] = exception_as_i32;
}
}
- PopUpTo(c->stack_depth);
-
- break;
- }
- case kExprFinally: {
- CHECK_PROTOTYPE_OPCODE(wasm_eh_prototype);
- if (control_.empty()) {
- error(pc_, "finally does not match a any try");
- break;
- }
-
- Control* c = &control_.back();
- if (c->has_catch() && c->catch_env != nullptr) {
- error(pc_, "missing catch for try with catch and finally");
- break;
- }
-
- if (!c->has_finally()) {
- error(pc_, "finally does not match a try with finally");
- break;
- }
-
- if (c->finish_try_env == nullptr) {
- error(pc_, "finally already present for try with finally");
- break;
- }
-
- // ssa_env_ is either the env for either the try or the catch, but
- // it does not matter: either way we need to direct the control flow
- // to the end_env, which is the env for the finally.
- // c->finish_try_env is the the environment enclosing the try block.
- Goto(ssa_env_, c->end_env);
-
- PopUpTo(c->stack_depth);
-
- // The current environment becomes end_env, and finish_try_env
- // becomes the new end_env. This ensures that any control flow
- // leaving a try block up to now will do so by branching to the
- // finally block. Setting the end_env to be finish_try_env ensures
- // that kExprEnd below can handle the try block as it would any
- // other block construct.
- SsaEnv* finally_env = c->end_env;
- c->end_env = c->finish_try_env;
- SetEnv("finally:begin", finally_env);
- c->finish_try_env = nullptr;
-
break;
}
case kExprLoop: {
- // The break environment is the outer environment.
- SsaEnv* break_env = ssa_env_;
- PushBlock(break_env);
- SsaEnv* finish_try_env = Steal(break_env);
+ BlockTypeOperand operand(this, pc_);
+ SsaEnv* finish_try_env = Steal(ssa_env_);
// The continue environment is the inner environment.
PrepareForLoop(pc_, finish_try_env);
SetEnv("loop:start", Split(finish_try_env));
ssa_env_->SetNotMerged();
PushLoop(finish_try_env);
+ SetBlockType(&control_.back(), operand);
+ len = 1 + operand.length;
break;
}
case kExprIf: {
// Condition on top of stack. Split environments for branches.
+ BlockTypeOperand operand(this, pc_);
Value cond = Pop(0, kAstI32);
TFNode* if_true = nullptr;
TFNode* if_false = nullptr;
@@ -822,11 +703,13 @@ class WasmFullDecoder : public WasmDecoder {
true_env->control = if_true;
PushIf(end_env, false_env);
SetEnv("if:true", true_env);
+ SetBlockType(&control_.back(), operand);
+ len = 1 + operand.length;
break;
}
case kExprElse: {
if (control_.empty()) {
- error(pc_, "else does not match any if");
+ error("else does not match any if");
break;
}
Control* c = &control_.back();
@@ -838,31 +721,38 @@ class WasmFullDecoder : public WasmDecoder {
error(pc_, c->pc, "else already present for if");
break;
}
- Value val = PopUpTo(c->stack_depth);
- MergeInto(c->end_env, &c->node, &c->type, val);
+ FallThruTo(c);
// Switch to environment for false branch.
+ stack_.resize(c->stack_depth);
SetEnv("if_else:false", c->false_env);
c->false_env = nullptr; // record that an else is already seen
break;
}
case kExprEnd: {
if (control_.empty()) {
- error(pc_, "end does not match any if or block");
- break;
+ error("end does not match any if, try, or block");
+ return;
}
const char* name = "block:end";
Control* c = &control_.back();
- Value val = PopUpTo(c->stack_depth);
- if (c->is_loop) {
- // Loops always push control in pairs.
- control_.pop_back();
- c = &control_.back();
- name = "loop:end";
- } else if (c->is_if()) {
+ if (c->is_loop()) {
+ // A loop just leaves the values on the stack.
+ TypeCheckLoopFallThru(c);
+ PopControl();
+ SetEnv("loop:end", ssa_env_);
+ break;
+ }
+ if (c->is_if()) {
if (c->false_env != nullptr) {
// End the true branch of a one-armed if.
Goto(c->false_env, c->end_env);
- val = {val.pc, nullptr, kAstStmt};
+ if (ssa_env_->go() && stack_.size() != c->stack_depth) {
+ error("end of if expected empty stack");
+ stack_.resize(c->stack_depth);
+ }
+ if (c->merge.arity > 0) {
+ error("non-void one-armed if");
+ }
name = "if:merge";
} else {
// End the false branch of a two-armed if.
@@ -871,28 +761,41 @@ class WasmFullDecoder : public WasmDecoder {
} else if (c->is_try()) {
name = "try:end";
- // try blocks do not yield a value.
- val = {val.pc, nullptr, kAstStmt};
-
- // validate that catch/finally were seen.
- if (c->catch_env != nullptr) {
- error(pc_, "missing catch in try with catch");
+ // validate that catch was seen.
+ if (c->try_info->catch_env != nullptr) {
+ error(pc_, "missing catch in try");
break;
}
+ }
+ FallThruTo(c);
+ SetEnv(name, c->end_env);
- if (c->finish_try_env != nullptr) {
- error(pc_, "missing finally in try with finally");
- break;
+ // Push the end values onto the stack.
+ stack_.resize(c->stack_depth);
+ if (c->merge.arity == 1) {
+ stack_.push_back(c->merge.vals.first);
+ } else {
+ for (unsigned i = 0; i < c->merge.arity; i++) {
+ stack_.push_back(c->merge.vals.array[i]);
}
}
- if (ssa_env_->go()) {
- MergeInto(c->end_env, &c->node, &c->type, val);
+ PopControl();
+
+ if (control_.empty()) {
+ // If the last (implicit) control was popped, check we are at end.
+ if (pc_ + 1 != end_) {
+ error(pc_, pc_ + 1, "trailing code after function end");
+ }
+ last_end_found_ = true;
+ if (ssa_env_->go()) {
+ // The result of the block is the return value.
+ TRACE(" @%-8d #xx:%-20s|", startrel(pc_), "ImplicitReturn");
+ DoReturn();
+ TRACE("\n");
+ }
+ return;
}
- SetEnv(name, c->end_env);
- stack_.resize(c->stack_depth);
- Push(c->type, c->node);
- control_.pop_back();
break;
}
case kExprSelect: {
@@ -901,7 +804,7 @@ class WasmFullDecoder : public WasmDecoder {
Value tval = Pop();
if (tval.type == kAstStmt || tval.type != fval.type) {
if (tval.type != kAstEnd && fval.type != kAstEnd) {
- error(pc_, "type mismatch in select");
+ error("type mismatch in select");
break;
}
}
@@ -923,39 +826,33 @@ class WasmFullDecoder : public WasmDecoder {
}
case kExprBr: {
BreakDepthOperand operand(this, pc_);
- Value val = {pc_, nullptr, kAstStmt};
- if (operand.arity) val = Pop();
if (Validate(pc_, operand, control_)) {
- BreakTo(operand.target, val);
+ BreakTo(operand.depth);
}
len = 1 + operand.length;
- Push(kAstEnd, nullptr);
+ EndControl();
break;
}
case kExprBrIf: {
BreakDepthOperand operand(this, pc_);
- Value cond = Pop(operand.arity, kAstI32);
- Value val = {pc_, nullptr, kAstStmt};
- if (operand.arity == 1) val = Pop();
- if (Validate(pc_, operand, control_)) {
+ Value cond = Pop(0, kAstI32);
+ if (ok() && Validate(pc_, operand, control_)) {
SsaEnv* fenv = ssa_env_;
SsaEnv* tenv = Split(fenv);
fenv->SetNotMerged();
BUILD(Branch, cond.node, &tenv->control, &fenv->control);
ssa_env_ = tenv;
- BreakTo(operand.target, val);
+ BreakTo(operand.depth);
ssa_env_ = fenv;
}
len = 1 + operand.length;
- Push(kAstStmt, nullptr);
break;
}
case kExprBrTable: {
BranchTableOperand operand(this, pc_);
+ BranchTableIterator iterator(this, operand);
if (Validate(pc_, operand, control_.size())) {
- Value key = Pop(operand.arity, kAstI32);
- Value val = {pc_, nullptr, kAstStmt};
- if (operand.arity == 1) val = Pop();
+ Value key = Pop(0, kAstI32);
if (failed()) break;
SsaEnv* break_env = ssa_env_;
@@ -965,42 +862,43 @@ class WasmFullDecoder : public WasmDecoder {
SsaEnv* copy = Steal(break_env);
ssa_env_ = copy;
- for (uint32_t i = 0; i < operand.table_count + 1; ++i) {
- uint16_t target = operand.read_entry(this, i);
+ while (iterator.has_next()) {
+ uint32_t i = iterator.cur_index();
+ const byte* pos = iterator.pc();
+ uint32_t target = iterator.next();
+ if (target >= control_.size()) {
+ error(pos, "improper branch in br_table");
+ break;
+ }
ssa_env_ = Split(copy);
ssa_env_->control = (i == operand.table_count)
? BUILD(IfDefault, sw)
: BUILD(IfValue, i, sw);
- int depth = target;
- Control* c = &control_[control_.size() - depth - 1];
- MergeInto(c->end_env, &c->node, &c->type, val);
+ BreakTo(target);
}
} else {
// Only a default target. Do the equivalent of br.
- uint16_t target = operand.read_entry(this, 0);
- int depth = target;
- Control* c = &control_[control_.size() - depth - 1];
- MergeInto(c->end_env, &c->node, &c->type, val);
+ const byte* pos = iterator.pc();
+ uint32_t target = iterator.next();
+ if (target >= control_.size()) {
+ error(pos, "improper branch in br_table");
+ break;
+ }
+ BreakTo(target);
}
// br_table ends the control flow like br.
ssa_env_ = break_env;
- Push(kAstStmt, nullptr);
}
- len = 1 + operand.length;
+ len = 1 + iterator.length();
break;
}
case kExprReturn: {
- ReturnArityOperand operand(this, pc_);
- if (operand.arity != sig_->return_count()) {
- error(pc_, pc_ + 1, "arity mismatch in return");
- }
DoReturn();
- len = 1 + operand.length;
break;
}
case kExprUnreachable: {
- Push(kAstEnd, BUILD(Unreachable, position()));
- ssa_env_->Kill(SsaEnv::kControlEnd);
+ BUILD(Unreachable, position());
+ EndControl();
break;
}
case kExprI8Const: {
@@ -1050,11 +948,24 @@ class WasmFullDecoder : public WasmDecoder {
if (Validate(pc_, operand)) {
Value val = Pop(0, local_type_vec_[operand.index]);
if (ssa_env_->locals) ssa_env_->locals[operand.index] = val.node;
+ }
+ len = 1 + operand.length;
+ break;
+ }
+ case kExprTeeLocal: {
+ LocalIndexOperand operand(this, pc_);
+ if (Validate(pc_, operand)) {
+ Value val = Pop(0, local_type_vec_[operand.index]);
+ if (ssa_env_->locals) ssa_env_->locals[operand.index] = val.node;
Push(val.type, val.node);
}
len = 1 + operand.length;
break;
}
+ case kExprDrop: {
+ Pop();
+ break;
+ }
case kExprGetGlobal: {
GlobalIndexOperand operand(this, pc_);
if (Validate(pc_, operand)) {
@@ -1066,9 +977,13 @@ class WasmFullDecoder : public WasmDecoder {
case kExprSetGlobal: {
GlobalIndexOperand operand(this, pc_);
if (Validate(pc_, operand)) {
- Value val = Pop(0, operand.type);
- BUILD(SetGlobal, operand.index, val.node);
- Push(val.type, val.node);
+ if (operand.global->mutability) {
+ Value val = Pop(0, operand.type);
+ BUILD(SetGlobal, operand.index, val.node);
+ } else {
+ error(pc_, pc_ + 1, "immutable global #%u cannot be assigned",
+ operand.index);
+ }
}
len = 1 + operand.length;
break;
@@ -1088,7 +1003,6 @@ class WasmFullDecoder : public WasmDecoder {
case kExprI32LoadMem:
len = DecodeLoadMem(kAstI32, MachineType::Int32());
break;
-
case kExprI64LoadMem8S:
len = DecodeLoadMem(kAstI64, MachineType::Int8());
break;
@@ -1143,17 +1057,24 @@ class WasmFullDecoder : public WasmDecoder {
case kExprF64StoreMem:
len = DecodeStoreMem(kAstF64, MachineType::Float64());
break;
-
+ case kExprGrowMemory:
+ if (module_->origin != kAsmJsOrigin) {
+ Value val = Pop(0, kAstI32);
+ Push(kAstI32, BUILD(GrowMemory, val.node));
+ } else {
+ error("grow_memory is not supported for asmjs modules");
+ }
+ break;
case kExprMemorySize:
- Push(kAstI32, BUILD(MemSize, 0));
+ Push(kAstI32, BUILD(CurrentMemoryPages));
break;
case kExprCallFunction: {
CallFunctionOperand operand(this, pc_);
if (Validate(pc_, operand)) {
TFNode** buffer = PopArgs(operand.sig);
- TFNode* call =
- BUILD(CallDirect, operand.index, buffer, position());
- Push(GetReturnType(operand.sig), call);
+ TFNode** rets = nullptr;
+ BUILD(CallDirect, operand.index, buffer, &rets, position());
+ PushReturns(operand.sig, rets);
}
len = 1 + operand.length;
break;
@@ -1161,23 +1082,12 @@ class WasmFullDecoder : public WasmDecoder {
case kExprCallIndirect: {
CallIndirectOperand operand(this, pc_);
if (Validate(pc_, operand)) {
- TFNode** buffer = PopArgs(operand.sig);
Value index = Pop(0, kAstI32);
- if (buffer) buffer[0] = index.node;
- TFNode* call =
- BUILD(CallIndirect, operand.index, buffer, position());
- Push(GetReturnType(operand.sig), call);
- }
- len = 1 + operand.length;
- break;
- }
- case kExprCallImport: {
- CallImportOperand operand(this, pc_);
- if (Validate(pc_, operand)) {
TFNode** buffer = PopArgs(operand.sig);
- TFNode* call =
- BUILD(CallImport, operand.index, buffer, position());
- Push(GetReturnType(operand.sig), call);
+ if (buffer) buffer[0] = index.node;
+ TFNode** rets = nullptr;
+ BUILD(CallIndirect, operand.index, buffer, &rets, position());
+ PushReturns(operand.sig, rets);
}
len = 1 + operand.length;
break;
@@ -1187,20 +1097,34 @@ class WasmFullDecoder : public WasmDecoder {
len++;
byte simd_index = *(pc_ + 1);
opcode = static_cast<WasmOpcode>(opcode << 8 | simd_index);
- DecodeSimdOpcode(opcode);
+ TRACE(" @%-4d #%02x #%02x:%-20s|", startrel(pc_), kSimdPrefix,
+ simd_index, WasmOpcodes::ShortOpcodeName(opcode));
+ len += DecodeSimdOpcode(opcode);
break;
}
- default:
- error("Invalid opcode");
- return;
+ default: {
+ // Deal with special asmjs opcodes.
+ if (module_ && module_->origin == kAsmJsOrigin) {
+ sig = WasmOpcodes::AsmjsSignature(opcode);
+ if (sig) {
+ BuildSimpleOperator(opcode, sig);
+ }
+ } else {
+ error("Invalid opcode");
+ return;
+ }
+ }
}
- } // end complex bytecode
+ }
#if DEBUG
if (FLAG_trace_wasm_decoder) {
for (size_t i = 0; i < stack_.size(); ++i) {
Value& val = stack_[i];
WasmOpcode opcode = static_cast<WasmOpcode>(*val.pc);
+ if (WasmOpcodes::IsPrefixOpcode(opcode)) {
+ opcode = static_cast<WasmOpcode>(opcode << 8 | *(val.pc + 1));
+ }
PrintF(" %c@%d:%s", WasmOpcodes::ShortNameOf(val.type),
static_cast<int>(val.pc - start_),
WasmOpcodes::ShortOpcodeName(opcode));
@@ -1215,7 +1139,8 @@ class WasmFullDecoder : public WasmDecoder {
PrintF("[%u]", operand.index);
break;
}
- case kExprSetLocal: {
+ case kExprSetLocal: // fallthru
+ case kExprTeeLocal: {
LocalIndexOperand operand(this, val.pc);
PrintF("[%u]", operand.index);
break;
@@ -1234,7 +1159,21 @@ class WasmFullDecoder : public WasmDecoder {
return;
}
} // end decode loop
- } // end DecodeFunctionBody()
+ }
+
+ void EndControl() { ssa_env_->Kill(SsaEnv::kControlEnd); }
+
+ void SetBlockType(Control* c, BlockTypeOperand& operand) {
+ c->merge.arity = operand.arity;
+ if (c->merge.arity == 1) {
+ c->merge.vals.first = {pc_, nullptr, operand.read_entry(0)};
+ } else if (c->merge.arity > 1) {
+ c->merge.vals.array = zone_->NewArray<Value>(c->merge.arity);
+ for (unsigned i = 0; i < c->merge.arity; i++) {
+ c->merge.vals.array[i] = {pc_, nullptr, operand.read_entry(i)};
+ }
+ }
+ }
TFNode** PopArgs(FunctionSig* sig) {
if (build()) {
@@ -1260,27 +1199,35 @@ class WasmFullDecoder : public WasmDecoder {
void PushBlock(SsaEnv* end_env) {
const int stack_depth = static_cast<int>(stack_.size());
- control_.emplace_back(Control::Block(pc_, stack_depth, end_env));
+ control_.emplace_back(
+ Control::Block(pc_, stack_depth, end_env, current_catch_));
}
void PushLoop(SsaEnv* end_env) {
const int stack_depth = static_cast<int>(stack_.size());
- control_.emplace_back(Control::Loop(pc_, stack_depth, end_env));
+ control_.emplace_back(
+ Control::Loop(pc_, stack_depth, end_env, current_catch_));
}
void PushIf(SsaEnv* end_env, SsaEnv* false_env) {
const int stack_depth = static_cast<int>(stack_.size());
- control_.emplace_back(Control::If(pc_, stack_depth, end_env, false_env));
+ control_.emplace_back(
+ Control::If(pc_, stack_depth, end_env, false_env, current_catch_));
}
- void PushTry(SsaEnv* end_env, SsaEnv* catch_env, SsaEnv* finish_try_env) {
+ void PushTry(SsaEnv* end_env, SsaEnv* catch_env) {
const int stack_depth = static_cast<int>(stack_.size());
- control_.emplace_back(
- Control::Try(pc_, stack_depth, end_env, catch_env, finish_try_env));
+ control_.emplace_back(Control::Try(pc_, stack_depth, end_env, zone_,
+ catch_env, current_catch_));
+ current_catch_ = static_cast<int32_t>(control_.size() - 1);
}
+ void PopControl() { control_.pop_back(); }
+
int DecodeLoadMem(LocalType type, MachineType mem_type) {
- MemoryAccessOperand operand(this, pc_);
+ MemoryAccessOperand operand(this, pc_,
+ ElementSizeLog2Of(mem_type.representation()));
+
Value index = Pop(0, kAstI32);
TFNode* node = BUILD(LoadMem, type, mem_type, index.node, operand.offset,
operand.alignment, position());
@@ -1289,24 +1236,45 @@ class WasmFullDecoder : public WasmDecoder {
}
int DecodeStoreMem(LocalType type, MachineType mem_type) {
- MemoryAccessOperand operand(this, pc_);
+ MemoryAccessOperand operand(this, pc_,
+ ElementSizeLog2Of(mem_type.representation()));
Value val = Pop(1, type);
Value index = Pop(0, kAstI32);
BUILD(StoreMem, mem_type, index.node, operand.offset, operand.alignment,
val.node, position());
- Push(type, val.node);
return 1 + operand.length;
}
- void DecodeSimdOpcode(WasmOpcode opcode) {
- FunctionSig* sig = WasmOpcodes::Signature(opcode);
- compiler::NodeVector inputs(sig->parameter_count(), zone_);
- for (size_t i = sig->parameter_count(); i > 0; i--) {
- Value val = Pop(static_cast<int>(i - 1), sig->GetParam(i - 1));
- inputs[i - 1] = val.node;
+ unsigned DecodeSimdOpcode(WasmOpcode opcode) {
+ unsigned len = 0;
+ switch (opcode) {
+ case kExprI32x4ExtractLane: {
+ uint8_t lane = this->checked_read_u8(pc_, 2, "lane number");
+ if (lane < 0 || lane > 3) {
+ error(pc_, pc_ + 2, "invalid extract lane value");
+ }
+ TFNode* input = Pop(0, LocalType::kSimd128).node;
+ TFNode* node = BUILD(SimdExtractLane, opcode, lane, input);
+ Push(LocalType::kWord32, node);
+ len++;
+ break;
+ }
+ default: {
+ FunctionSig* sig = WasmOpcodes::Signature(opcode);
+ if (sig != nullptr) {
+ compiler::NodeVector inputs(sig->parameter_count(), zone_);
+ for (size_t i = sig->parameter_count(); i > 0; i--) {
+ Value val = Pop(static_cast<int>(i - 1), sig->GetParam(i - 1));
+ inputs[i - 1] = val.node;
+ }
+ TFNode* node = BUILD(SimdOp, opcode, inputs);
+ Push(GetReturnType(sig), node);
+ } else {
+ error("invalid simd opcode");
+ }
+ }
}
- TFNode* node = BUILD(SimdOp, opcode, inputs);
- Push(GetReturnType(sig), node);
+ return len;
}
void DoReturn() {
@@ -1320,12 +1288,21 @@ class WasmFullDecoder : public WasmDecoder {
if (buffer) buffer[i] = val.node;
}
- Push(kAstEnd, BUILD(Return, count, buffer));
- ssa_env_->Kill(SsaEnv::kControlEnd);
+ BUILD(Return, count, buffer);
+ EndControl();
}
void Push(LocalType type, TFNode* node) {
- stack_.push_back({pc_, node, type});
+ if (type != kAstStmt && type != kAstEnd) {
+ stack_.push_back({pc_, node, type});
+ }
+ }
+
+ void PushReturns(FunctionSig* sig, TFNode** rets) {
+ for (size_t i = 0; i < sig->return_count(); i++) {
+ // When verifying only, then {rets} will be null, so push null.
+ Push(sig->GetReturn(i), rets ? rets[i] : nullptr);
+ }
}
const char* SafeOpcodeNameAt(const byte* pc) {
@@ -1334,6 +1311,10 @@ class WasmFullDecoder : public WasmDecoder {
}
Value Pop(int index, LocalType expected) {
+ if (!ssa_env_->go()) {
+ // Unreachable code is essentially not typechecked.
+ return {pc_, nullptr, expected};
+ }
Value val = Pop();
if (val.type != expected) {
if (val.type != kAstEnd) {
@@ -1346,6 +1327,10 @@ class WasmFullDecoder : public WasmDecoder {
}
Value Pop() {
+ if (!ssa_env_->go()) {
+ // Unreachable code is essentially not typechecked.
+ return {pc_, nullptr, kAstEnd};
+ }
size_t limit = control_.empty() ? 0 : control_.back().stack_depth;
if (stack_.size() <= limit) {
Value val = {pc_, nullptr, kAstStmt};
@@ -1358,6 +1343,10 @@ class WasmFullDecoder : public WasmDecoder {
}
Value PopUpTo(int stack_depth) {
+ if (!ssa_env_->go()) {
+ // Unreachable code is essentially not typechecked.
+ return {pc_, nullptr, kAstEnd};
+ }
if (stack_depth == stack_.size()) {
Value val = {pc_, nullptr, kAstStmt};
return val;
@@ -1375,34 +1364,82 @@ class WasmFullDecoder : public WasmDecoder {
int startrel(const byte* ptr) { return static_cast<int>(ptr - start_); }
- void BreakTo(Control* block, Value& val) {
- if (block->is_loop) {
+ void BreakTo(unsigned depth) {
+ if (!ssa_env_->go()) return;
+ Control* c = &control_[control_.size() - depth - 1];
+ if (c->is_loop()) {
// This is the inner loop block, which does not have a value.
- Goto(ssa_env_, block->end_env);
+ Goto(ssa_env_, c->end_env);
} else {
- // Merge the value into the production for the block.
- MergeInto(block->end_env, &block->node, &block->type, val);
+ // Merge the value(s) into the end of the block.
+ if (static_cast<size_t>(c->stack_depth + c->merge.arity) >
+ stack_.size()) {
+ error(
+ pc_, pc_,
+ "expected at least %d values on the stack for br to @%d, found %d",
+ c->merge.arity, startrel(c->pc),
+ static_cast<int>(stack_.size() - c->stack_depth));
+ return;
+ }
+ MergeValuesInto(c);
+ }
+ }
+
+ void FallThruTo(Control* c) {
+ if (!ssa_env_->go()) return;
+ // Merge the value(s) into the end of the block.
+ int arity = static_cast<int>(c->merge.arity);
+ if (c->stack_depth + arity != stack_.size()) {
+ error(pc_, pc_, "expected %d elements on the stack for fallthru to @%d",
+ arity, startrel(c->pc));
+ return;
}
+ MergeValuesInto(c);
}
- void MergeInto(SsaEnv* target, TFNode** node, LocalType* type, Value& val) {
+ inline Value& GetMergeValueFromStack(Control* c, int i) {
+ return stack_[stack_.size() - c->merge.arity + i];
+ }
+
+ void TypeCheckLoopFallThru(Control* c) {
if (!ssa_env_->go()) return;
- DCHECK_NE(kAstEnd, val.type);
+ // Fallthru must match arity exactly.
+ int arity = static_cast<int>(c->merge.arity);
+ if (c->stack_depth + arity != stack_.size()) {
+ error(pc_, pc_, "expected %d elements on the stack for fallthru to @%d",
+ arity, startrel(c->pc));
+ return;
+ }
+ // Typecheck the values left on the stack.
+ for (unsigned i = 0; i < c->merge.arity; i++) {
+ Value& val = GetMergeValueFromStack(c, i);
+ Value& old =
+ c->merge.arity == 1 ? c->merge.vals.first : c->merge.vals.array[i];
+ if (val.type != old.type) {
+ error(pc_, pc_, "type error in merge[%d] (expected %s, got %s)", i,
+ WasmOpcodes::TypeName(old.type), WasmOpcodes::TypeName(val.type));
+ return;
+ }
+ }
+ }
+ void MergeValuesInto(Control* c) {
+ SsaEnv* target = c->end_env;
bool first = target->state == SsaEnv::kUnreachable;
Goto(ssa_env_, target);
- if (first) {
- // first merge to this environment; set the type and the node.
- *type = val.type;
- *node = val.node;
- } else if (val.type == *type && val.type != kAstStmt) {
- // merge with the existing value for this block.
- *node = CreateOrMergeIntoPhi(*type, target->control, *node, val.node);
- } else {
- // types don't match, or block is already a stmt.
- *type = kAstStmt;
- *node = nullptr;
+ for (unsigned i = 0; i < c->merge.arity; i++) {
+ Value& val = GetMergeValueFromStack(c, i);
+ Value& old =
+ c->merge.arity == 1 ? c->merge.vals.first : c->merge.vals.array[i];
+ if (val.type != old.type) {
+ error(pc_, pc_, "type error in merge[%d] (expected %s, got %s)", i,
+ WasmOpcodes::TypeName(old.type), WasmOpcodes::TypeName(val.type));
+ return;
+ }
+ old.node =
+ first ? val.node : CreateOrMergeIntoPhi(old.type, target->control,
+ old.node, val.node);
}
}
@@ -1442,6 +1479,45 @@ class WasmFullDecoder : public WasmDecoder {
}
}
+ TFNode* CheckForException(TFNode* node) {
+ if (node == nullptr) {
+ return nullptr;
+ }
+
+ const bool inside_try_scope = current_catch_ != kNullCatch;
+
+ if (!inside_try_scope) {
+ return node;
+ }
+
+ TFNode* if_success = nullptr;
+ TFNode* if_exception = nullptr;
+ if (!builder_->ThrowsException(node, &if_success, &if_exception)) {
+ return node;
+ }
+
+ SsaEnv* success_env = Steal(ssa_env_);
+ success_env->control = if_success;
+
+ SsaEnv* exception_env = Split(success_env);
+ exception_env->control = if_exception;
+ TryInfo* try_info = current_try_info();
+ Goto(exception_env, try_info->catch_env);
+ TFNode* exception = try_info->exception;
+ if (exception == nullptr) {
+ DCHECK_EQ(SsaEnv::kReached, try_info->catch_env->state);
+ try_info->exception = if_exception;
+ } else {
+ DCHECK_EQ(SsaEnv::kMerged, try_info->catch_env->state);
+ try_info->exception =
+ CreateOrMergeIntoPhi(kAstI32, try_info->catch_env->control,
+ try_info->exception, if_exception);
+ }
+
+ SetEnv("if_success", success_env);
+ return node;
+ }
+
void Goto(SsaEnv* from, SsaEnv* to) {
DCHECK_NOT_NULL(to);
if (!from->go()) return;
@@ -1630,16 +1706,15 @@ class WasmFullDecoder : public WasmDecoder {
case kExprLoop:
case kExprIf:
case kExprBlock:
- case kExprTryCatch:
- case kExprTryCatchFinally:
- case kExprTryFinally:
+ case kExprTry:
+ length = OpcodeLength(pc);
depth++;
- DCHECK_EQ(1, OpcodeLength(pc));
break;
- case kExprSetLocal: {
+ case kExprSetLocal: // fallthru
+ case kExprTeeLocal: {
LocalIndexOperand operand(this, pc);
if (assigned->length() > 0 &&
- static_cast<int>(operand.index) < assigned->length()) {
+ operand.index < static_cast<uint32_t>(assigned->length())) {
// Unverified code might have an out-of-bounds index.
assigned->Add(operand.index);
}
@@ -1664,11 +1739,33 @@ class WasmFullDecoder : public WasmDecoder {
DCHECK_EQ(pc_ - start_, offset); // overflows cannot happen
return offset;
}
+
+ inline void BuildSimpleOperator(WasmOpcode opcode, FunctionSig* sig) {
+ TFNode* node;
+ switch (sig->parameter_count()) {
+ case 1: {
+ Value val = Pop(0, sig->GetParam(0));
+ node = BUILD(Unop, opcode, val.node, position());
+ break;
+ }
+ case 2: {
+ Value rval = Pop(1, sig->GetParam(1));
+ Value lval = Pop(0, sig->GetParam(0));
+ node = BUILD(Binop, opcode, lval.node, rval.node, position());
+ break;
+ }
+ default:
+ UNREACHABLE();
+ node = nullptr;
+ break;
+ }
+ Push(GetReturnType(sig), node);
+ }
};
bool DecodeLocalDecls(AstLocalDecls& decls, const byte* start,
const byte* end) {
- base::AccountingAllocator allocator;
+ AccountingAllocator allocator;
Zone tmp(&allocator);
FunctionBody body = {nullptr, nullptr, nullptr, start, end};
WasmFullDecoder decoder(&tmp, nullptr, body);
@@ -1686,7 +1783,7 @@ BytecodeIterator::BytecodeIterator(const byte* start, const byte* end,
}
}
-DecodeResult VerifyWasmCode(base::AccountingAllocator* allocator,
+DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
FunctionBody& body) {
Zone zone(allocator);
WasmFullDecoder decoder(&zone, nullptr, body);
@@ -1694,8 +1791,8 @@ DecodeResult VerifyWasmCode(base::AccountingAllocator* allocator,
return decoder.toResult<DecodeStruct*>(nullptr);
}
-DecodeResult BuildTFGraph(base::AccountingAllocator* allocator,
- TFBuilder* builder, FunctionBody& body) {
+DecodeResult BuildTFGraph(AccountingAllocator* allocator, TFBuilder* builder,
+ FunctionBody& body) {
Zone zone(allocator);
WasmFullDecoder decoder(&zone, builder, body);
decoder.Decode();
@@ -1707,18 +1804,13 @@ unsigned OpcodeLength(const byte* pc, const byte* end) {
return decoder.OpcodeLength(pc);
}
-unsigned OpcodeArity(const byte* pc, const byte* end) {
- WasmDecoder decoder(nullptr, nullptr, pc, end);
- return decoder.OpcodeArity(pc);
-}
-
void PrintAstForDebugging(const byte* start, const byte* end) {
- base::AccountingAllocator allocator;
+ AccountingAllocator allocator;
OFStream os(stdout);
PrintAst(&allocator, FunctionBodyForTesting(start, end), os, nullptr);
}
-bool PrintAst(base::AccountingAllocator* allocator, const FunctionBody& body,
+bool PrintAst(AccountingAllocator* allocator, const FunctionBody& body,
std::ostream& os,
std::vector<std::tuple<uint32_t, int, int>>* offset_table) {
Zone zone(allocator);
@@ -1777,68 +1869,57 @@ bool PrintAst(base::AccountingAllocator* allocator, const FunctionBody& body,
}
switch (opcode) {
- case kExprIf:
case kExprElse:
+ os << " // @" << i.pc_offset();
+ control_depth++;
+ break;
case kExprLoop:
+ case kExprIf:
case kExprBlock:
- case kExprTryCatch:
- case kExprTryCatchFinally:
- case kExprTryFinally:
+ case kExprTry: {
+ BlockTypeOperand operand(&i, i.pc());
os << " // @" << i.pc_offset();
+ for (unsigned i = 0; i < operand.arity; i++) {
+ os << " " << WasmOpcodes::TypeName(operand.read_entry(i));
+ }
control_depth++;
break;
+ }
case kExprEnd:
os << " // @" << i.pc_offset();
control_depth--;
break;
case kExprBr: {
BreakDepthOperand operand(&i, i.pc());
- os << " // arity=" << operand.arity << " depth=" << operand.depth;
+ os << " // depth=" << operand.depth;
break;
}
case kExprBrIf: {
BreakDepthOperand operand(&i, i.pc());
- os << " // arity=" << operand.arity << " depth" << operand.depth;
+ os << " // depth=" << operand.depth;
break;
}
case kExprBrTable: {
BranchTableOperand operand(&i, i.pc());
- os << " // arity=" << operand.arity
- << " entries=" << operand.table_count;
+ os << " // entries=" << operand.table_count;
break;
}
case kExprCallIndirect: {
CallIndirectOperand operand(&i, i.pc());
+ os << " // sig #" << operand.index;
if (decoder.Complete(i.pc(), operand)) {
- os << " // sig #" << operand.index << ": " << *operand.sig;
- } else {
- os << " // arity=" << operand.arity << " sig #" << operand.index;
- }
- break;
- }
- case kExprCallImport: {
- CallImportOperand operand(&i, i.pc());
- if (decoder.Complete(i.pc(), operand)) {
- os << " // import #" << operand.index << ": " << *operand.sig;
- } else {
- os << " // arity=" << operand.arity << " import #" << operand.index;
+ os << ": " << *operand.sig;
}
break;
}
case kExprCallFunction: {
CallFunctionOperand operand(&i, i.pc());
+ os << " // function #" << operand.index;
if (decoder.Complete(i.pc(), operand)) {
- os << " // function #" << operand.index << ": " << *operand.sig;
- } else {
- os << " // arity=" << operand.arity << " function #" << operand.index;
+ os << ": " << *operand.sig;
}
break;
}
- case kExprReturn: {
- ReturnArityOperand operand(&i, i.pc());
- os << " // arity=" << operand.arity;
- break;
- }
default:
break;
}
diff --git a/deps/v8/src/wasm/ast-decoder.h b/deps/v8/src/wasm/ast-decoder.h
index c4f6c1679a..8c2c2c4734 100644
--- a/deps/v8/src/wasm/ast-decoder.h
+++ b/deps/v8/src/wasm/ast-decoder.h
@@ -21,6 +21,9 @@ class WasmGraphBuilder;
namespace wasm {
+const uint32_t kMaxNumWasmLocals = 8000000;
+struct WasmGlobal;
+
// Helpers for decoding different kinds of operands which follow bytecodes.
struct LocalIndexOperand {
uint32_t index;
@@ -79,39 +82,111 @@ struct ImmF64Operand {
struct GlobalIndexOperand {
uint32_t index;
LocalType type;
+ const WasmGlobal* global;
unsigned length;
inline GlobalIndexOperand(Decoder* decoder, const byte* pc) {
index = decoder->checked_read_u32v(pc, 1, &length, "global index");
+ global = nullptr;
type = kAstStmt;
}
};
+struct BlockTypeOperand {
+ uint32_t arity;
+ const byte* types; // pointer to encoded types for the block.
+ unsigned length;
+
+ inline BlockTypeOperand(Decoder* decoder, const byte* pc) {
+ uint8_t val = decoder->checked_read_u8(pc, 1, "block type");
+ LocalType type = kAstStmt;
+ length = 1;
+ arity = 0;
+ types = nullptr;
+ if (decode_local_type(val, &type)) {
+ arity = type == kAstStmt ? 0 : 1;
+ types = pc + 1;
+ } else {
+ // Handle multi-value blocks.
+ if (!FLAG_wasm_mv_prototype) {
+ decoder->error(pc, pc + 1, "invalid block arity > 1");
+ return;
+ }
+ if (val != kMultivalBlock) {
+ decoder->error(pc, pc + 1, "invalid block type");
+ return;
+ }
+ // Decode and check the types vector of the block.
+ unsigned len = 0;
+ uint32_t count = decoder->checked_read_u32v(pc, 2, &len, "block arity");
+ // {count} is encoded as {arity-2}, so that a {0} count here corresponds
+ // to a block with 2 values. This makes invalid/redundant encodings
+ // impossible.
+ arity = count + 2;
+ length = 1 + len + arity;
+ types = pc + 1 + 1 + len;
+
+ for (uint32_t i = 0; i < arity; i++) {
+ uint32_t offset = 1 + 1 + len + i;
+ val = decoder->checked_read_u8(pc, offset, "block type");
+ decode_local_type(val, &type);
+ if (type == kAstStmt) {
+ decoder->error(pc, pc + offset, "invalid block type");
+ return;
+ }
+ }
+ }
+ }
+ // Decode a byte representing a local type. Return {false} if the encoded
+ // byte was invalid or {kMultivalBlock}.
+ bool decode_local_type(uint8_t val, LocalType* result) {
+ switch (static_cast<LocalTypeCode>(val)) {
+ case kLocalVoid:
+ *result = kAstStmt;
+ return true;
+ case kLocalI32:
+ *result = kAstI32;
+ return true;
+ case kLocalI64:
+ *result = kAstI64;
+ return true;
+ case kLocalF32:
+ *result = kAstF32;
+ return true;
+ case kLocalF64:
+ *result = kAstF64;
+ return true;
+ default:
+ *result = kAstStmt;
+ return false;
+ }
+ }
+ LocalType read_entry(unsigned index) {
+ DCHECK_LT(index, arity);
+ LocalType result;
+ CHECK(decode_local_type(types[index], &result));
+ return result;
+ }
+};
+
struct Control;
struct BreakDepthOperand {
- uint32_t arity;
uint32_t depth;
Control* target;
unsigned length;
inline BreakDepthOperand(Decoder* decoder, const byte* pc) {
- unsigned len1 = 0;
- unsigned len2 = 0;
- arity = decoder->checked_read_u32v(pc, 1, &len1, "argument count");
- depth = decoder->checked_read_u32v(pc, 1 + len1, &len2, "break depth");
- length = len1 + len2;
+ depth = decoder->checked_read_u32v(pc, 1, &length, "break depth");
target = nullptr;
}
};
struct CallIndirectOperand {
- uint32_t arity;
uint32_t index;
FunctionSig* sig;
unsigned length;
inline CallIndirectOperand(Decoder* decoder, const byte* pc) {
unsigned len1 = 0;
unsigned len2 = 0;
- arity = decoder->checked_read_u32v(pc, 1, &len1, "argument count");
index = decoder->checked_read_u32v(pc, 1 + len1, &len2, "signature index");
length = len1 + len2;
sig = nullptr;
@@ -119,59 +194,32 @@ struct CallIndirectOperand {
};
struct CallFunctionOperand {
- uint32_t arity;
uint32_t index;
FunctionSig* sig;
unsigned length;
inline CallFunctionOperand(Decoder* decoder, const byte* pc) {
unsigned len1 = 0;
unsigned len2 = 0;
- arity = decoder->checked_read_u32v(pc, 1, &len1, "argument count");
index = decoder->checked_read_u32v(pc, 1 + len1, &len2, "function index");
length = len1 + len2;
sig = nullptr;
}
};
-struct CallImportOperand {
- uint32_t arity;
- uint32_t index;
- FunctionSig* sig;
- unsigned length;
- inline CallImportOperand(Decoder* decoder, const byte* pc) {
- unsigned len1 = 0;
- unsigned len2 = 0;
- arity = decoder->checked_read_u32v(pc, 1, &len1, "argument count");
- index = decoder->checked_read_u32v(pc, 1 + len1, &len2, "import index");
- length = len1 + len2;
- sig = nullptr;
- }
-};
-
struct BranchTableOperand {
- uint32_t arity;
uint32_t table_count;
+ const byte* start;
const byte* table;
- unsigned length;
inline BranchTableOperand(Decoder* decoder, const byte* pc) {
+ DCHECK_EQ(kExprBrTable, decoder->checked_read_u8(pc, 0, "opcode"));
+ start = pc + 1;
unsigned len1 = 0;
- unsigned len2 = 0;
- arity = decoder->checked_read_u32v(pc, 1, &len1, "argument count");
- table_count =
- decoder->checked_read_u32v(pc, 1 + len1, &len2, "table count");
+ table_count = decoder->checked_read_u32v(pc, 1, &len1, "table count");
if (table_count > (UINT_MAX / sizeof(uint32_t)) - 1 ||
- len1 + len2 > UINT_MAX - (table_count + 1) * sizeof(uint32_t)) {
+ len1 > UINT_MAX - (table_count + 1) * sizeof(uint32_t)) {
decoder->error(pc, "branch table size overflow");
}
- length = len1 + len2 + (table_count + 1) * sizeof(uint32_t);
-
- uint32_t table_start = 1 + len1 + len2;
- if (decoder->check(pc, table_start, (table_count + 1) * sizeof(uint32_t),
- "expected <table entries>")) {
- table = pc + table_start;
- } else {
- table = nullptr;
- }
+ table = pc + 1 + len1;
}
inline uint32_t read_entry(Decoder* decoder, unsigned i) {
DCHECK(i <= table_count);
@@ -179,14 +227,58 @@ struct BranchTableOperand {
}
};
+// A helper to iterate over a branch table.
+class BranchTableIterator {
+ public:
+ unsigned cur_index() { return index_; }
+ bool has_next() { return index_ <= table_count_; }
+ uint32_t next() {
+ DCHECK(has_next());
+ index_++;
+ unsigned length = 0;
+ uint32_t result =
+ decoder_->checked_read_u32v(pc_, 0, &length, "branch table entry");
+ pc_ += length;
+ return result;
+ }
+ // length, including the length of the {BranchTableOperand}, but not the
+ // opcode.
+ unsigned length() {
+ while (has_next()) next();
+ return static_cast<unsigned>(pc_ - start_);
+ }
+ const byte* pc() { return pc_; }
+
+ BranchTableIterator(Decoder* decoder, BranchTableOperand& operand)
+ : decoder_(decoder),
+ start_(operand.start),
+ pc_(operand.table),
+ index_(0),
+ table_count_(operand.table_count) {}
+
+ private:
+ Decoder* decoder_;
+ const byte* start_;
+ const byte* pc_;
+ uint32_t index_; // the current index.
+ uint32_t table_count_; // the count of entries, not including default.
+};
+
struct MemoryAccessOperand {
uint32_t alignment;
uint32_t offset;
unsigned length;
- inline MemoryAccessOperand(Decoder* decoder, const byte* pc) {
+ inline MemoryAccessOperand(Decoder* decoder, const byte* pc,
+ uint32_t max_alignment) {
unsigned alignment_length;
alignment =
decoder->checked_read_u32v(pc, 1, &alignment_length, "alignment");
+ if (max_alignment < alignment) {
+ decoder->error(pc, pc + 1,
+ "invalid alignment; expected maximum alignment is %u, "
+ "actual alignment is %u",
+ max_alignment, alignment);
+ }
unsigned offset_length;
offset = decoder->checked_read_u32v(pc, 1 + alignment_length,
&offset_length, "offset");
@@ -194,15 +286,6 @@ struct MemoryAccessOperand {
}
};
-struct ReturnArityOperand {
- uint32_t arity;
- unsigned length;
-
- inline ReturnArityOperand(Decoder* decoder, const byte* pc) {
- arity = decoder->checked_read_u32v(pc, 1, &length, "return count");
- }
-};
-
typedef compiler::WasmGraphBuilder TFBuilder;
struct ModuleEnv; // forward declaration of module interface.
@@ -228,25 +311,25 @@ inline std::ostream& operator<<(std::ostream& os, const DecodeStruct& tree) {
return os;
}
-DecodeResult VerifyWasmCode(base::AccountingAllocator* allocator,
- FunctionBody& body);
-DecodeResult BuildTFGraph(base::AccountingAllocator* allocator,
- TFBuilder* builder, FunctionBody& body);
-bool PrintAst(base::AccountingAllocator* allocator, const FunctionBody& body,
+V8_EXPORT_PRIVATE DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
+ FunctionBody& body);
+DecodeResult BuildTFGraph(AccountingAllocator* allocator, TFBuilder* builder,
+ FunctionBody& body);
+bool PrintAst(AccountingAllocator* allocator, const FunctionBody& body,
std::ostream& os,
std::vector<std::tuple<uint32_t, int, int>>* offset_table);
// A simplified form of AST printing, e.g. from a debugger.
void PrintAstForDebugging(const byte* start, const byte* end);
-inline DecodeResult VerifyWasmCode(base::AccountingAllocator* allocator,
+inline DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
ModuleEnv* module, FunctionSig* sig,
const byte* start, const byte* end) {
FunctionBody body = {module, sig, nullptr, start, end};
return VerifyWasmCode(allocator, body);
}
-inline DecodeResult BuildTFGraph(base::AccountingAllocator* allocator,
+inline DecodeResult BuildTFGraph(AccountingAllocator* allocator,
TFBuilder* builder, ModuleEnv* module,
FunctionSig* sig, const byte* start,
const byte* end) {
@@ -276,9 +359,6 @@ BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, size_t num_locals,
// Computes the length of the opcode at the given address.
unsigned OpcodeLength(const byte* pc, const byte* end);
-// Computes the arity (number of sub-nodes) of the opcode at the given address.
-unsigned OpcodeArity(const byte* pc, const byte* end);
-
// A simple forward iterator for bytecodes.
class BytecodeIterator : public Decoder {
public:
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index a6ede54bec..d5c9f43c57 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -12,7 +12,7 @@
#include "src/signature.h"
#include "src/utils.h"
#include "src/wasm/wasm-result.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -208,6 +208,19 @@ class Decoder {
// Consume {size} bytes and send them to the bit bucket, advancing {pc_}.
void consume_bytes(int size) {
+ TRACE(" +%d %-20s: %d bytes\n", static_cast<int>(pc_ - start_), "skip",
+ size);
+ if (checkAvailable(size)) {
+ pc_ += size;
+ } else {
+ pc_ = limit_;
+ }
+ }
+
+ // Consume {size} bytes and send them to the bit bucket, advancing {pc_}.
+ void consume_bytes(uint32_t size, const char* name = "skip") {
+ TRACE(" +%d %-20s: %d bytes\n", static_cast<int>(pc_ - start_), name,
+ size);
if (checkAvailable(size)) {
pc_ += size;
} else {
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 542c47ca15..90065616d9 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -27,6 +27,141 @@ namespace wasm {
namespace {
+const char* kNameString = "name";
+const size_t kNameStringLength = 4;
+
+LocalType TypeOf(const WasmModule* module, const WasmInitExpr& expr) {
+ switch (expr.kind) {
+ case WasmInitExpr::kNone:
+ return kAstStmt;
+ case WasmInitExpr::kGlobalIndex:
+ return expr.val.global_index < module->globals.size()
+ ? module->globals[expr.val.global_index].type
+ : kAstStmt;
+ case WasmInitExpr::kI32Const:
+ return kAstI32;
+ case WasmInitExpr::kI64Const:
+ return kAstI64;
+ case WasmInitExpr::kF32Const:
+ return kAstF32;
+ case WasmInitExpr::kF64Const:
+ return kAstF64;
+ default:
+ UNREACHABLE();
+ return kAstStmt;
+ }
+}
+
+// An iterator over the sections in a WASM binary module.
+// Automatically skips all unknown sections.
+class WasmSectionIterator {
+ public:
+ explicit WasmSectionIterator(Decoder& decoder)
+ : decoder_(decoder),
+ section_code_(kUnknownSectionCode),
+ section_start_(decoder.pc()),
+ section_end_(decoder.pc()) {
+ next();
+ }
+
+ inline bool more() const {
+ return section_code_ != kUnknownSectionCode && decoder_.more();
+ }
+
+ inline WasmSectionCode section_code() const { return section_code_; }
+
+ inline const byte* section_start() const { return section_start_; }
+
+ inline uint32_t section_length() const {
+ return static_cast<uint32_t>(section_end_ - section_start_);
+ }
+
+ inline const byte* section_end() const { return section_end_; }
+
+ // Advances to the next section, checking that decoding the current section
+ // stopped at {section_end_}.
+ void advance() {
+ if (decoder_.pc() != section_end_) {
+ const char* msg = decoder_.pc() < section_end_ ? "shorter" : "longer";
+ decoder_.error(decoder_.pc(), decoder_.pc(),
+ "section was %s than expected size "
+ "(%u bytes expected, %zu decoded)",
+ msg, section_length(),
+ static_cast<size_t>(decoder_.pc() - section_start_));
+ }
+ next();
+ }
+
+ private:
+ Decoder& decoder_;
+ WasmSectionCode section_code_;
+ const byte* section_start_;
+ const byte* section_end_;
+
+ // Reads the section code/name at the current position and sets up
+ // the internal fields.
+ void next() {
+ while (true) {
+ if (!decoder_.more()) {
+ section_code_ = kUnknownSectionCode;
+ return;
+ }
+ uint8_t section_code = decoder_.consume_u8("section code");
+ // Read and check the section size.
+ uint32_t section_length = decoder_.consume_u32v("section length");
+ section_start_ = decoder_.pc();
+ if (decoder_.checkAvailable(section_length)) {
+ // Get the limit of the section within the module.
+ section_end_ = section_start_ + section_length;
+ } else {
+ // The section would extend beyond the end of the module.
+ section_end_ = section_start_;
+ }
+
+ if (section_code == kUnknownSectionCode) {
+ // Check for the known "names" section.
+ uint32_t string_length = decoder_.consume_u32v("section name length");
+ const byte* section_name_start = decoder_.pc();
+ decoder_.consume_bytes(string_length, "section name");
+ if (decoder_.failed() || decoder_.pc() > section_end_) {
+ TRACE("Section name of length %u couldn't be read\n", string_length);
+ section_code_ = kUnknownSectionCode;
+ return;
+ }
+
+ TRACE(" +%d section name : \"%.*s\"\n",
+ static_cast<int>(section_name_start - decoder_.start()),
+ string_length < 20 ? string_length : 20, section_name_start);
+
+ if (string_length == kNameStringLength &&
+ strncmp(reinterpret_cast<const char*>(section_name_start),
+ kNameString, kNameStringLength) == 0) {
+ section_code = kNameSectionCode;
+ } else {
+ section_code = kUnknownSectionCode;
+ }
+ } else if (!IsValidSectionCode(section_code)) {
+ decoder_.error(decoder_.pc(), decoder_.pc(),
+ "unknown section code #0x%02x", section_code);
+ section_code = kUnknownSectionCode;
+ }
+ section_code_ = static_cast<WasmSectionCode>(section_code);
+
+ TRACE("Section: %s\n", SectionName(section_code_));
+ if (section_code_ == kUnknownSectionCode &&
+ section_end_ > decoder_.pc()) {
+ // skip to the end of the unknown section.
+ uint32_t remaining =
+ static_cast<uint32_t>(section_end_ - decoder_.pc());
+ decoder_.consume_bytes(remaining, "section payload");
+ // fall through and continue to the next section.
+ } else {
+ return;
+ }
+ }
+ }
+};
+
// The main logic for decoding the bytes of a module.
class ModuleDecoder : public Decoder {
public:
@@ -77,11 +212,9 @@ class ModuleDecoder : public Decoder {
module->min_mem_pages = 0;
module->max_mem_pages = 0;
module->mem_export = false;
- module->mem_external = false;
module->origin = origin_;
const byte* pos = pc_;
- int current_order = 0;
uint32_t magic_word = consume_u32("wasm magic");
#define BYTES(x) (x & 0xff), (x >> 8) & 0xff, (x >> 16) & 0xff, (x >> 24) & 0xff
if (magic_word != kWasmMagic) {
@@ -89,7 +222,6 @@ class ModuleDecoder : public Decoder {
"expected magic word %02x %02x %02x %02x, "
"found %02x %02x %02x %02x",
BYTES(kWasmMagic), BYTES(magic_word));
- goto done;
}
pos = pc_;
@@ -100,302 +232,367 @@ class ModuleDecoder : public Decoder {
"expected version %02x %02x %02x %02x, "
"found %02x %02x %02x %02x",
BYTES(kWasmVersion), BYTES(magic_version));
- goto done;
}
}
- // Decode the module sections.
- while (pc_ < limit_) {
- TRACE("DecodeSection\n");
- pos = pc_;
-
- // Read the section name.
- uint32_t string_length = consume_u32v("section name length");
- const byte* section_name_start = pc_;
- consume_bytes(string_length);
- if (failed()) {
- TRACE("Section name of length %u couldn't be read\n", string_length);
- break;
+ WasmSectionIterator section_iter(*this);
+
+ // ===== Type section ====================================================
+ if (section_iter.section_code() == kTypeSectionCode) {
+ uint32_t signatures_count = consume_u32v("signatures count");
+ module->signatures.reserve(SafeReserve(signatures_count));
+ for (uint32_t i = 0; ok() && i < signatures_count; ++i) {
+ TRACE("DecodeSignature[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+ FunctionSig* s = consume_sig();
+ module->signatures.push_back(s);
}
+ section_iter.advance();
+ }
- TRACE(" +%d section name : \"%.*s\"\n",
- static_cast<int>(section_name_start - start_),
- string_length < 20 ? string_length : 20, section_name_start);
-
- WasmSection::Code section =
- WasmSection::lookup(section_name_start, string_length);
-
- // Read and check the section size.
- uint32_t section_length = consume_u32v("section length");
- if (!checkAvailable(section_length)) {
- // The section would extend beyond the end of the module.
- break;
- }
- const byte* section_start = pc_;
- const byte* expected_section_end = pc_ + section_length;
-
- current_order = CheckSectionOrder(current_order, section);
-
- switch (section) {
- case WasmSection::Code::End:
- // Terminate section decoding.
- limit_ = pc_;
- break;
- case WasmSection::Code::Memory: {
- module->min_mem_pages = consume_u32v("min memory");
- module->max_mem_pages = consume_u32v("max memory");
- module->mem_export = consume_u8("export memory") != 0;
- break;
- }
- case WasmSection::Code::Signatures: {
- uint32_t signatures_count = consume_u32v("signatures count");
- module->signatures.reserve(SafeReserve(signatures_count));
- // Decode signatures.
- for (uint32_t i = 0; i < signatures_count; ++i) {
- if (failed()) break;
- TRACE("DecodeSignature[%d] module+%d\n", i,
- static_cast<int>(pc_ - start_));
- FunctionSig* s = consume_sig();
- module->signatures.push_back(s);
- }
- break;
+ // ===== Import section ==================================================
+ if (section_iter.section_code() == kImportSectionCode) {
+ uint32_t import_table_count = consume_u32v("import table count");
+ module->import_table.reserve(SafeReserve(import_table_count));
+ for (uint32_t i = 0; ok() && i < import_table_count; ++i) {
+ TRACE("DecodeImportTable[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+
+ module->import_table.push_back({
+ 0, // module_name_length
+ 0, // module_name_offset
+ 0, // field_name_offset
+ 0, // field_name_length
+ kExternalFunction, // kind
+ 0 // index
+ });
+ WasmImport* import = &module->import_table.back();
+ const byte* pos = pc_;
+ import->module_name_offset =
+ consume_string(&import->module_name_length, true);
+ if (import->module_name_length == 0) {
+ error(pos, "import module name cannot be NULL");
}
- case WasmSection::Code::FunctionSignatures: {
- uint32_t functions_count = consume_u32v("functions count");
- module->functions.reserve(SafeReserve(functions_count));
- for (uint32_t i = 0; i < functions_count; ++i) {
- module->functions.push_back({nullptr, // sig
- i, // func_index
- 0, // sig_index
- 0, // name_offset
- 0, // name_length
- 0, // code_start_offset
- 0}); // code_end_offset
+ import->field_name_offset =
+ consume_string(&import->field_name_length, true);
+
+ import->kind = static_cast<WasmExternalKind>(consume_u8("import kind"));
+ switch (import->kind) {
+ case kExternalFunction: {
+ // ===== Imported function =======================================
+ import->index = static_cast<uint32_t>(module->functions.size());
+ module->num_imported_functions++;
+ module->functions.push_back({nullptr, // sig
+ import->index, // func_index
+ 0, // sig_index
+ 0, // name_offset
+ 0, // name_length
+ 0, // code_start_offset
+ 0, // code_end_offset
+ true, // imported
+ false}); // exported
WasmFunction* function = &module->functions.back();
function->sig_index = consume_sig_index(module, &function->sig);
- }
- break;
- }
- case WasmSection::Code::FunctionBodies: {
- const byte* pos = pc_;
- uint32_t functions_count = consume_u32v("functions count");
- if (functions_count != module->functions.size()) {
- error(pos, pos, "function body count %u mismatch (%u expected)",
- functions_count,
- static_cast<uint32_t>(module->functions.size()));
break;
}
- for (uint32_t i = 0; i < functions_count; ++i) {
- WasmFunction* function = &module->functions[i];
- uint32_t size = consume_u32v("body size");
- function->code_start_offset = pc_offset();
- function->code_end_offset = pc_offset() + size;
-
- TRACE(" +%d %-20s: (%d bytes)\n", pc_offset(), "function body",
- size);
- pc_ += size;
- if (pc_ > limit_) {
- error(pc_, "function body extends beyond end of file");
- }
- }
- break;
- }
- case WasmSection::Code::Names: {
- const byte* pos = pc_;
- uint32_t functions_count = consume_u32v("functions count");
- if (functions_count != module->functions.size()) {
- error(pos, pos, "function name count %u mismatch (%u expected)",
- functions_count,
- static_cast<uint32_t>(module->functions.size()));
+ case kExternalTable: {
+ // ===== Imported table ==========================================
+ import->index =
+ static_cast<uint32_t>(module->function_tables.size());
+ module->function_tables.push_back(
+ {0, 0, std::vector<int32_t>(), true, false});
+ expect_u8("element type", 0x20);
+ WasmIndirectFunctionTable* table = &module->function_tables.back();
+ consume_resizable_limits("element count", "elements", kMaxUInt32,
+ &table->size, &table->max_size);
break;
}
-
- for (uint32_t i = 0; i < functions_count; ++i) {
- WasmFunction* function = &module->functions[i];
- function->name_offset =
- consume_string(&function->name_length, false);
-
- uint32_t local_names_count = consume_u32v("local names count");
- for (uint32_t j = 0; j < local_names_count; j++) {
- uint32_t unused = 0;
- uint32_t offset = consume_string(&unused, false);
- USE(unused);
- USE(offset);
- }
+ case kExternalMemory: {
+ // ===== Imported memory =========================================
+ // import->index =
+ // static_cast<uint32_t>(module->memories.size());
+ // TODO(titzer): imported memories
+ break;
}
- break;
- }
- case WasmSection::Code::Globals: {
- uint32_t globals_count = consume_u32v("globals count");
- module->globals.reserve(SafeReserve(globals_count));
- // Decode globals.
- for (uint32_t i = 0; i < globals_count; ++i) {
- if (failed()) break;
- TRACE("DecodeGlobal[%d] module+%d\n", i,
- static_cast<int>(pc_ - start_));
- // Add an uninitialized global and pass a pointer to it.
- module->globals.push_back({0, 0, kAstStmt, 0, false});
+ case kExternalGlobal: {
+ // ===== Imported global =========================================
+ import->index = static_cast<uint32_t>(module->globals.size());
+ module->globals.push_back(
+ {kAstStmt, false, NO_INIT, 0, true, false});
WasmGlobal* global = &module->globals.back();
- DecodeGlobalInModule(global);
- }
- break;
- }
- case WasmSection::Code::DataSegments: {
- uint32_t data_segments_count = consume_u32v("data segments count");
- module->data_segments.reserve(SafeReserve(data_segments_count));
- // Decode data segments.
- for (uint32_t i = 0; i < data_segments_count; ++i) {
- if (failed()) break;
- TRACE("DecodeDataSegment[%d] module+%d\n", i,
- static_cast<int>(pc_ - start_));
- module->data_segments.push_back({0, // dest_addr
- 0, // source_offset
- 0, // source_size
- false}); // init
- WasmDataSegment* segment = &module->data_segments.back();
- DecodeDataSegmentInModule(module, segment);
+ global->type = consume_value_type();
+ global->mutability = consume_u8("mutability") != 0;
+ break;
}
- break;
+ default:
+ error(pos, pos, "unknown import kind 0x%02x", import->kind);
+ break;
}
- case WasmSection::Code::FunctionTable: {
- // An indirect function table requires functions first.
- CheckForFunctions(module, section);
- // Assume only one table for now.
- static const uint32_t kSupportedTableCount = 1;
- module->function_tables.reserve(SafeReserve(kSupportedTableCount));
- // Decode function table.
- for (uint32_t i = 0; i < kSupportedTableCount; ++i) {
- if (failed()) break;
- TRACE("DecodeFunctionTable[%d] module+%d\n", i,
- static_cast<int>(pc_ - start_));
- module->function_tables.push_back({0, 0, std::vector<uint16_t>()});
- DecodeFunctionTableInModule(module, &module->function_tables[i]);
+ }
+ section_iter.advance();
+ }
+
+ // ===== Function section ================================================
+ if (section_iter.section_code() == kFunctionSectionCode) {
+ uint32_t functions_count = consume_u32v("functions count");
+ module->functions.reserve(SafeReserve(functions_count));
+ module->num_declared_functions = functions_count;
+ for (uint32_t i = 0; ok() && i < functions_count; ++i) {
+ uint32_t func_index = static_cast<uint32_t>(module->functions.size());
+ module->functions.push_back({nullptr, // sig
+ func_index, // func_index
+ 0, // sig_index
+ 0, // name_offset
+ 0, // name_length
+ 0, // code_start_offset
+ 0, // code_end_offset
+ false, // imported
+ false}); // exported
+ WasmFunction* function = &module->functions.back();
+ function->sig_index = consume_sig_index(module, &function->sig);
+ }
+ section_iter.advance();
+ }
+
+ // ===== Table section ===================================================
+ if (section_iter.section_code() == kTableSectionCode) {
+ const byte* pos = pc_;
+ uint32_t table_count = consume_u32v("table count");
+ // Require at most one table for now.
+ if (table_count > 1) {
+ error(pos, pos, "invalid table count %d, maximum 1", table_count);
+ }
+
+ for (uint32_t i = 0; ok() && i < table_count; i++) {
+ module->function_tables.push_back(
+ {0, 0, std::vector<int32_t>(), false, false});
+ WasmIndirectFunctionTable* table = &module->function_tables.back();
+ expect_u8("table type", kWasmAnyFunctionTypeForm);
+ consume_resizable_limits("table elements", "elements", kMaxUInt32,
+ &table->size, &table->max_size);
+ }
+ section_iter.advance();
+ }
+
+ // ===== Memory section ==================================================
+ if (section_iter.section_code() == kMemorySectionCode) {
+ const byte* pos = pc_;
+ uint32_t memory_count = consume_u32v("memory count");
+ // Require at most one memory for now.
+ if (memory_count > 1) {
+ error(pos, pos, "invalid memory count %d, maximum 1", memory_count);
+ }
+
+ for (uint32_t i = 0; ok() && i < memory_count; i++) {
+ consume_resizable_limits("memory", "pages", WasmModule::kMaxLegalPages,
+ &module->min_mem_pages,
+ &module->max_mem_pages);
+ }
+ section_iter.advance();
+ }
+
+ // ===== Global section ==================================================
+ if (section_iter.section_code() == kGlobalSectionCode) {
+ uint32_t globals_count = consume_u32v("globals count");
+ module->globals.reserve(SafeReserve(globals_count));
+ for (uint32_t i = 0; ok() && i < globals_count; ++i) {
+ TRACE("DecodeGlobal[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+ // Add an uninitialized global and pass a pointer to it.
+ module->globals.push_back({kAstStmt, false, NO_INIT, 0, false, false});
+ WasmGlobal* global = &module->globals.back();
+ DecodeGlobalInModule(module, i, global);
+ }
+ section_iter.advance();
+ }
+
+ // ===== Export section ==================================================
+ if (section_iter.section_code() == kExportSectionCode) {
+ uint32_t export_table_count = consume_u32v("export table count");
+ module->export_table.reserve(SafeReserve(export_table_count));
+ for (uint32_t i = 0; ok() && i < export_table_count; ++i) {
+ TRACE("DecodeExportTable[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+
+ module->export_table.push_back({
+ 0, // name_length
+ 0, // name_offset
+ kExternalFunction, // kind
+ 0 // index
+ });
+ WasmExport* exp = &module->export_table.back();
+
+ exp->name_offset = consume_string(&exp->name_length, true);
+ const byte* pos = pc();
+ exp->kind = static_cast<WasmExternalKind>(consume_u8("export kind"));
+ switch (exp->kind) {
+ case kExternalFunction: {
+ WasmFunction* func = nullptr;
+ exp->index = consume_func_index(module, &func);
+ module->num_exported_functions++;
+ if (func) func->exported = true;
+ break;
}
- break;
- }
- case WasmSection::Code::StartFunction: {
- // Declares a start function for a module.
- CheckForFunctions(module, section);
- if (module->start_function_index >= 0) {
- error("start function already declared");
+ case kExternalTable: {
+ WasmIndirectFunctionTable* table = nullptr;
+ exp->index = consume_table_index(module, &table);
+ if (table) table->exported = true;
break;
}
- WasmFunction* func;
- const byte* pos = pc_;
- module->start_function_index = consume_func_index(module, &func);
- if (func && func->sig->parameter_count() > 0) {
- error(pos, "invalid start function: non-zero parameter count");
+ case kExternalMemory: {
+ uint32_t index = consume_u32v("memory index");
+ if (index != 0) error("invalid memory index != 0");
+ module->mem_export = true;
break;
}
- break;
- }
- case WasmSection::Code::ImportTable: {
- uint32_t import_table_count = consume_u32v("import table count");
- module->import_table.reserve(SafeReserve(import_table_count));
- // Decode import table.
- for (uint32_t i = 0; i < import_table_count; ++i) {
- if (failed()) break;
- TRACE("DecodeImportTable[%d] module+%d\n", i,
- static_cast<int>(pc_ - start_));
-
- module->import_table.push_back({nullptr, // sig
- 0, // sig_index
- 0, // module_name_offset
- 0, // module_name_length
- 0, // function_name_offset
- 0}); // function_name_length
- WasmImport* import = &module->import_table.back();
-
- import->sig_index = consume_sig_index(module, &import->sig);
- const byte* pos = pc_;
- import->module_name_offset =
- consume_string(&import->module_name_length, true);
- if (import->module_name_length == 0) {
- error(pos, "import module name cannot be NULL");
- }
- import->function_name_offset =
- consume_string(&import->function_name_length, true);
+ case kExternalGlobal: {
+ WasmGlobal* global = nullptr;
+ exp->index = consume_global_index(module, &global);
+ if (global) global->exported = true;
+ break;
}
- break;
+ default:
+ error(pos, pos, "invalid export kind 0x%02x", exp->kind);
+ break;
}
- case WasmSection::Code::ExportTable: {
- // Declares an export table.
- CheckForFunctions(module, section);
- uint32_t export_table_count = consume_u32v("export table count");
- module->export_table.reserve(SafeReserve(export_table_count));
- // Decode export table.
- for (uint32_t i = 0; i < export_table_count; ++i) {
- if (failed()) break;
- TRACE("DecodeExportTable[%d] module+%d\n", i,
- static_cast<int>(pc_ - start_));
-
- module->export_table.push_back({0, // func_index
- 0, // name_offset
- 0}); // name_length
- WasmExport* exp = &module->export_table.back();
-
- WasmFunction* func;
- exp->func_index = consume_func_index(module, &func);
- exp->name_offset = consume_string(&exp->name_length, true);
+ }
+ // Check for duplicate exports.
+ if (ok() && module->export_table.size() > 1) {
+ std::vector<WasmExport> sorted_exports(module->export_table);
+ const byte* base = start_;
+ auto cmp_less = [base](const WasmExport& a, const WasmExport& b) {
+ // Return true if a < b.
+ if (a.name_length != b.name_length) {
+ return a.name_length < b.name_length;
}
- // Check for duplicate exports.
- if (ok() && module->export_table.size() > 1) {
- std::vector<WasmExport> sorted_exports(module->export_table);
- const byte* base = start_;
- auto cmp_less = [base](const WasmExport& a, const WasmExport& b) {
- // Return true if a < b.
- uint32_t len = a.name_length;
- if (len != b.name_length) return len < b.name_length;
- return memcmp(base + a.name_offset, base + b.name_offset, len) <
- 0;
- };
- std::stable_sort(sorted_exports.begin(), sorted_exports.end(),
- cmp_less);
- auto it = sorted_exports.begin();
- WasmExport* last = &*it++;
- for (auto end = sorted_exports.end(); it != end; last = &*it++) {
- DCHECK(!cmp_less(*it, *last)); // Vector must be sorted.
- if (!cmp_less(*last, *it)) {
- const byte* pc = start_ + it->name_offset;
- error(pc, pc,
- "Duplicate export name '%.*s' for functions %d and %d",
- it->name_length, pc, last->func_index, it->func_index);
- break;
- }
- }
+ return memcmp(base + a.name_offset, base + b.name_offset,
+ a.name_length) < 0;
+ };
+ std::stable_sort(sorted_exports.begin(), sorted_exports.end(),
+ cmp_less);
+ auto it = sorted_exports.begin();
+ WasmExport* last = &*it++;
+ for (auto end = sorted_exports.end(); it != end; last = &*it++) {
+ DCHECK(!cmp_less(*it, *last)); // Vector must be sorted.
+ if (!cmp_less(*last, *it)) {
+ const byte* pc = start_ + it->name_offset;
+ error(pc, pc,
+ "Duplicate export name '%.*s' for functions %d and %d",
+ it->name_length, pc, last->index, it->index);
+ break;
}
- break;
}
- case WasmSection::Code::Max:
- // Skip unknown sections.
- TRACE("Unknown section: '");
- for (uint32_t i = 0; i != string_length; ++i) {
- TRACE("%c", *(section_name_start + i));
- }
- TRACE("'\n");
- consume_bytes(section_length);
- break;
- }
-
- if (pc_ != expected_section_end) {
- const char* diff = pc_ < expected_section_end ? "shorter" : "longer";
- size_t expected_length = static_cast<size_t>(section_length);
- size_t actual_length = static_cast<size_t>(pc_ - section_start);
- error(pc_, pc_,
- "section \"%s\" %s (%zu bytes) than specified (%zu bytes)",
- WasmSection::getName(section), diff, actual_length,
- expected_length);
- break;
}
+ section_iter.advance();
+ }
+
+ // ===== Start section ===================================================
+ if (section_iter.section_code() == kStartSectionCode) {
+ WasmFunction* func;
+ const byte* pos = pc_;
+ module->start_function_index = consume_func_index(module, &func);
+ if (func && func->sig->parameter_count() > 0) {
+ error(pos, "invalid start function: non-zero parameter count");
+ }
+ section_iter.advance();
+ }
+
+ // ===== Elements section ================================================
+ if (section_iter.section_code() == kElementSectionCode) {
+ uint32_t element_count = consume_u32v("element count");
+ for (uint32_t i = 0; ok() && i < element_count; ++i) {
+ uint32_t table_index = consume_u32v("table index");
+ if (table_index != 0) error("illegal table index != 0");
+ WasmInitExpr offset = consume_init_expr(module, kAstI32);
+ uint32_t num_elem = consume_u32v("number of elements");
+ std::vector<uint32_t> vector;
+ module->table_inits.push_back({table_index, offset, vector});
+ WasmTableInit* init = &module->table_inits.back();
+ init->entries.reserve(SafeReserve(num_elem));
+ for (uint32_t j = 0; ok() && j < num_elem; j++) {
+ WasmFunction* func = nullptr;
+ init->entries.push_back(consume_func_index(module, &func));
+ }
+ }
+
+ section_iter.advance();
+ }
+
+ // ===== Code section ====================================================
+ if (section_iter.section_code() == kCodeSectionCode) {
+ const byte* pos = pc_;
+ uint32_t functions_count = consume_u32v("functions count");
+ if (functions_count != module->num_declared_functions) {
+ error(pos, pos, "function body count %u mismatch (%u expected)",
+ functions_count, module->num_declared_functions);
+ }
+ for (uint32_t i = 0; ok() && i < functions_count; ++i) {
+ WasmFunction* function =
+ &module->functions[i + module->num_imported_functions];
+ uint32_t size = consume_u32v("body size");
+ function->code_start_offset = pc_offset();
+ function->code_end_offset = pc_offset() + size;
+ consume_bytes(size, "function body");
+ }
+ section_iter.advance();
+ }
+
+ // ===== Data section ====================================================
+ if (section_iter.section_code() == kDataSectionCode) {
+ uint32_t data_segments_count = consume_u32v("data segments count");
+ module->data_segments.reserve(SafeReserve(data_segments_count));
+ for (uint32_t i = 0; ok() && i < data_segments_count; ++i) {
+ TRACE("DecodeDataSegment[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+ module->data_segments.push_back({
+ NO_INIT, // dest_addr
+ 0, // source_offset
+ 0 // source_size
+ });
+ WasmDataSegment* segment = &module->data_segments.back();
+ DecodeDataSegmentInModule(module, segment);
+ }
+ section_iter.advance();
}
- done:
- if (ok()) CalculateGlobalsOffsets(module);
+ // ===== Name section ====================================================
+ if (section_iter.section_code() == kNameSectionCode) {
+ const byte* pos = pc_;
+ uint32_t functions_count = consume_u32v("functions count");
+ if (functions_count != module->num_declared_functions) {
+ error(pos, pos, "function name count %u mismatch (%u expected)",
+ functions_count, module->num_declared_functions);
+ }
+
+ for (uint32_t i = 0; ok() && i < functions_count; ++i) {
+ WasmFunction* function =
+ &module->functions[i + module->num_imported_functions];
+ function->name_offset = consume_string(&function->name_length, false);
+
+ uint32_t local_names_count = consume_u32v("local names count");
+ for (uint32_t j = 0; ok() && j < local_names_count; j++) {
+ uint32_t unused = 0;
+ uint32_t offset = consume_string(&unused, false);
+ USE(unused);
+ USE(offset);
+ }
+ }
+ section_iter.advance();
+ }
+
+ // ===== Remaining sections ==============================================
+ if (section_iter.more() && ok()) {
+ error(pc(), pc(), "unexpected section: %s",
+ SectionName(section_iter.section_code()));
+ }
+
+ if (ok()) {
+ CalculateGlobalOffsets(module);
+ PreinitializeIndirectFunctionTables(module);
+ }
const WasmModule* finished_module = module;
ModuleResult result = toResult(finished_module);
- if (FLAG_dump_wasm_module) {
- DumpModule(module, result);
- }
+ if (FLAG_dump_wasm_module) DumpModule(module, result);
return result;
}
@@ -405,27 +602,6 @@ class ModuleDecoder : public Decoder {
return count < kMaxReserve ? count : kMaxReserve;
}
- void CheckForFunctions(WasmModule* module, WasmSection::Code section) {
- if (module->functions.size() == 0) {
- error(pc_ - 1, nullptr, "functions must appear before section %s",
- WasmSection::getName(section));
- }
- }
-
- int CheckSectionOrder(int current_order, WasmSection::Code section) {
- int next_order = WasmSection::getOrder(section);
- if (next_order == 0) return current_order;
- if (next_order == current_order) {
- error(pc_, pc_, "section \"%s\" already defined",
- WasmSection::getName(section));
- }
- if (next_order < current_order) {
- error(pc_, pc_, "section \"%s\" out of order",
- WasmSection::getName(section));
- }
- return next_order;
- }
-
// Decodes a single anonymous function starting at {start_}.
FunctionResult DecodeSingleFunction(ModuleEnv* module_env,
WasmFunction* function) {
@@ -451,6 +627,11 @@ class ModuleDecoder : public Decoder {
return ok() ? result : nullptr;
}
+ WasmInitExpr DecodeInitExpr(const byte* start) {
+ pc_ = start;
+ return consume_init_expr(nullptr, kAstStmt);
+ }
+
private:
Zone* module_zone;
ModuleResult result_;
@@ -459,15 +640,28 @@ class ModuleDecoder : public Decoder {
uint32_t off(const byte* ptr) { return static_cast<uint32_t>(ptr - start_); }
// Decodes a single global entry inside a module starting at {pc_}.
- void DecodeGlobalInModule(WasmGlobal* global) {
- global->name_offset = consume_string(&global->name_length, false);
- if (!unibrow::Utf8::Validate(start_ + global->name_offset,
- global->name_length)) {
- error("global name is not valid utf8");
+ void DecodeGlobalInModule(WasmModule* module, uint32_t index,
+ WasmGlobal* global) {
+ global->type = consume_value_type();
+ global->mutability = consume_u8("mutability") != 0;
+ const byte* pos = pc();
+ global->init = consume_init_expr(module, kAstStmt);
+ switch (global->init.kind) {
+ case WasmInitExpr::kGlobalIndex:
+ if (global->init.val.global_index >= index) {
+ error("invalid global index in init expression");
+ } else if (module->globals[index].type != global->type) {
+ error("type mismatch in global initialization");
+ }
+ break;
+ default:
+ if (global->type != TypeOf(module, global->init)) {
+ error(pos, pos,
+ "type error in global initialization, expected %s, got %s",
+ WasmOpcodes::TypeName(global->type),
+ WasmOpcodes::TypeName(TypeOf(module, global->init)));
+ }
}
- global->type = consume_local_type();
- global->offset = 0;
- global->exported = consume_u8("exported") != 0;
}
bool IsWithinLimit(uint32_t limit, uint32_t offset, uint32_t size) {
@@ -479,10 +673,10 @@ class ModuleDecoder : public Decoder {
// Decodes a single data segment entry inside a module starting at {pc_}.
void DecodeDataSegmentInModule(WasmModule* module, WasmDataSegment* segment) {
const byte* start = pc_;
- segment->dest_addr = consume_u32v("destination");
+ expect_u8("linear memory index", 0);
+ segment->dest_addr = consume_init_expr(module, kAstI32);
segment->source_size = consume_u32v("source size");
segment->source_offset = static_cast<uint32_t>(pc_ - start_);
- segment->init = true;
// Validate the data is in the module.
uint32_t module_limit = static_cast<uint32_t>(limit_ - start_);
@@ -491,40 +685,11 @@ class ModuleDecoder : public Decoder {
error(start, "segment out of bounds of module");
}
- // Validate that the segment will fit into the (minimum) memory.
- uint32_t memory_limit =
- WasmModule::kPageSize * (module ? module->min_mem_pages
- : WasmModule::kMaxMemPages);
- if (!IsWithinLimit(memory_limit, segment->dest_addr,
- segment->source_size)) {
- error(start, "segment out of bounds of memory");
- }
-
- consume_bytes(segment->source_size);
- }
-
- // Decodes a single function table inside a module starting at {pc_}.
- void DecodeFunctionTableInModule(WasmModule* module,
- WasmIndirectFunctionTable* table) {
- table->size = consume_u32v("function table entry count");
- table->max_size = table->size;
-
- if (table->max_size != table->size) {
- error("invalid table maximum size");
- }
-
- for (uint32_t i = 0; i < table->size; ++i) {
- uint16_t index = consume_u32v();
- if (index >= module->functions.size()) {
- error(pc_ - sizeof(index), "invalid function index");
- break;
- }
- table->values.push_back(index);
- }
+ consume_bytes(segment->source_size, "segment data");
}
// Calculate individual global offsets and total size of globals table.
- void CalculateGlobalsOffsets(WasmModule* module) {
+ void CalculateGlobalOffsets(WasmModule* module) {
uint32_t offset = 0;
if (module->globals.size() == 0) {
module->globals_size = 0;
@@ -540,6 +705,30 @@ class ModuleDecoder : public Decoder {
module->globals_size = offset;
}
+ // TODO(titzer): this only works without overlapping initializations from
+ // global bases for entries
+ void PreinitializeIndirectFunctionTables(WasmModule* module) {
+ // Fill all tables with invalid entries first.
+ for (WasmIndirectFunctionTable& table : module->function_tables) {
+ table.values.resize(table.size);
+ for (size_t i = 0; i < table.size; i++) {
+ table.values[i] = kInvalidFunctionIndex;
+ }
+ }
+ for (WasmTableInit& init : module->table_inits) {
+ if (init.offset.kind != WasmInitExpr::kI32Const) continue;
+ if (init.table_index >= module->function_tables.size()) continue;
+ WasmIndirectFunctionTable& table =
+ module->function_tables[init.table_index];
+ for (size_t i = 0; i < init.entries.size(); i++) {
+ size_t index = i + init.offset.val.i32_const;
+ if (index < table.values.size()) {
+ table.values[index] = init.entries[i];
+ }
+ }
+ }
+ }
+
// Verifies the body (code) of a given function.
void VerifyFunctionBody(uint32_t func_num, ModuleEnv* menv,
WasmFunction* function) {
@@ -570,26 +759,18 @@ class ModuleDecoder : public Decoder {
}
}
- // Reads a single 32-bit unsigned integer interpreted as an offset, checking
- // the offset is within bounds and advances.
- uint32_t consume_offset(const char* name = nullptr) {
- uint32_t offset = consume_u32(name ? name : "offset");
- if (offset > static_cast<uint32_t>(limit_ - start_)) {
- error(pc_ - sizeof(uint32_t), "offset out of bounds of module");
- }
- return offset;
- }
-
// Reads a length-prefixed string, checking that it is within bounds. Returns
// the offset of the string, and the length as an out parameter.
uint32_t consume_string(uint32_t* length, bool validate_utf8) {
*length = consume_u32v("string length");
uint32_t offset = pc_offset();
- TRACE(" +%u %-20s: (%u bytes)\n", offset, "string", *length);
- if (validate_utf8 && !unibrow::Utf8::Validate(pc_, *length)) {
- error(pc_, "no valid UTF-8 string");
+ const byte* string_start = pc_;
+ // Consume bytes before validation to guarantee that the string is not oob.
+ consume_bytes(*length, "string");
+ if (ok() && validate_utf8 &&
+ !unibrow::Utf8::Validate(string_start, *length)) {
+ error(string_start, "no valid UTF-8 string");
}
- consume_bytes(*length);
return offset;
}
@@ -607,25 +788,134 @@ class ModuleDecoder : public Decoder {
}
uint32_t consume_func_index(WasmModule* module, WasmFunction** func) {
+ return consume_index("function index", module->functions, func);
+ }
+
+ uint32_t consume_global_index(WasmModule* module, WasmGlobal** global) {
+ return consume_index("global index", module->globals, global);
+ }
+
+ uint32_t consume_table_index(WasmModule* module,
+ WasmIndirectFunctionTable** table) {
+ return consume_index("table index", module->function_tables, table);
+ }
+
+ template <typename T>
+ uint32_t consume_index(const char* name, std::vector<T>& vector, T** ptr) {
const byte* pos = pc_;
- uint32_t func_index = consume_u32v("function index");
- if (func_index >= module->functions.size()) {
- error(pos, pos, "function index %u out of bounds (%d functions)",
- func_index, static_cast<int>(module->functions.size()));
- *func = nullptr;
+ uint32_t index = consume_u32v(name);
+ if (index >= vector.size()) {
+ error(pos, pos, "%s %u out of bounds (%d entries)", name, index,
+ static_cast<int>(vector.size()));
+ *ptr = nullptr;
return 0;
}
- *func = &module->functions[func_index];
- return func_index;
+ *ptr = &vector[index];
+ return index;
+ }
+
+ void consume_resizable_limits(const char* name, const char* units,
+ uint32_t max_value, uint32_t* initial,
+ uint32_t* maximum) {
+ uint32_t flags = consume_u32v("resizable limits flags");
+ const byte* pos = pc();
+ *initial = consume_u32v("initial size");
+ if (*initial > max_value) {
+ error(pos, pos,
+ "initial %s size (%u %s) is larger than maximum allowable (%u)",
+ name, *initial, units, max_value);
+ }
+ if (flags & 1) {
+ pos = pc();
+ *maximum = consume_u32v("maximum size");
+ if (*maximum > max_value) {
+ error(pos, pos,
+ "maximum %s size (%u %s) is larger than maximum allowable (%u)",
+ name, *maximum, units, max_value);
+ }
+ if (*maximum < *initial) {
+ error(pos, pos, "maximum %s size (%u %s) is less than initial (%u %s)",
+ name, *maximum, units, *initial, units);
+ }
+ } else {
+ *maximum = 0;
+ }
+ }
+
+ bool expect_u8(const char* name, uint8_t expected) {
+ const byte* pos = pc();
+ uint8_t value = consume_u8(name);
+ if (value != expected) {
+ error(pos, pos, "expected %s 0x%02x, got 0x%02x", name, expected, value);
+ return false;
+ }
+ return true;
+ }
+
+ WasmInitExpr consume_init_expr(WasmModule* module, LocalType expected) {
+ const byte* pos = pc();
+ uint8_t opcode = consume_u8("opcode");
+ WasmInitExpr expr;
+ unsigned len = 0;
+ switch (opcode) {
+ case kExprGetGlobal: {
+ GlobalIndexOperand operand(this, pc() - 1);
+ expr.kind = WasmInitExpr::kGlobalIndex;
+ expr.val.global_index = operand.index;
+ len = operand.length;
+ break;
+ }
+ case kExprI32Const: {
+ ImmI32Operand operand(this, pc() - 1);
+ expr.kind = WasmInitExpr::kI32Const;
+ expr.val.i32_const = operand.value;
+ len = operand.length;
+ break;
+ }
+ case kExprF32Const: {
+ ImmF32Operand operand(this, pc() - 1);
+ expr.kind = WasmInitExpr::kF32Const;
+ expr.val.f32_const = operand.value;
+ len = operand.length;
+ break;
+ }
+ case kExprI64Const: {
+ ImmI64Operand operand(this, pc() - 1);
+ expr.kind = WasmInitExpr::kI64Const;
+ expr.val.i64_const = operand.value;
+ len = operand.length;
+ break;
+ }
+ case kExprF64Const: {
+ ImmF64Operand operand(this, pc() - 1);
+ expr.kind = WasmInitExpr::kF64Const;
+ expr.val.f64_const = operand.value;
+ len = operand.length;
+ break;
+ }
+ default: {
+ error("invalid opcode in initialization expression");
+ expr.kind = WasmInitExpr::kNone;
+ expr.val.i32_const = 0;
+ }
+ }
+ consume_bytes(len, "init code");
+ if (!expect_u8("end opcode", kExprEnd)) {
+ expr.kind = WasmInitExpr::kNone;
+ }
+ if (expected != kAstStmt && TypeOf(module, expr) != kAstI32) {
+ error(pos, pos, "type error in init expression, expected %s, got %s",
+ WasmOpcodes::TypeName(expected),
+ WasmOpcodes::TypeName(TypeOf(module, expr)));
+ }
+ return expr;
}
// Reads a single 8-bit integer, interpreting it as a local type.
- LocalType consume_local_type() {
- byte val = consume_u8("local type");
+ LocalType consume_value_type() {
+ byte val = consume_u8("value type");
LocalTypeCode t = static_cast<LocalTypeCode>(val);
switch (t) {
- case kLocalVoid:
- return kAstStmt;
case kLocalI32:
return kAstI32;
case kLocalI64:
@@ -634,6 +924,8 @@ class ModuleDecoder : public Decoder {
return kAstF32;
case kLocalF64:
return kAstF64;
+ case kLocalS128:
+ return kAstS128;
default:
error(pc_ - 1, "invalid local type");
return kAstStmt;
@@ -642,19 +934,12 @@ class ModuleDecoder : public Decoder {
// Parses a type entry, which is currently limited to functions only.
FunctionSig* consume_sig() {
- const byte* pos = pc_;
- byte form = consume_u8("type form");
- if (form != kWasmFunctionTypeForm) {
- error(pos, pos, "expected function type form (0x%02x), got: 0x%02x",
- kWasmFunctionTypeForm, form);
- return nullptr;
- }
+ if (!expect_u8("type form", kWasmFunctionTypeForm)) return nullptr;
// parse parameter types
uint32_t param_count = consume_u32v("param count");
std::vector<LocalType> params;
- for (uint32_t i = 0; i < param_count; ++i) {
- LocalType param = consume_local_type();
- if (param == kAstStmt) error(pc_ - 1, "invalid void parameter type");
+ for (uint32_t i = 0; ok() && i < param_count; ++i) {
+ LocalType param = consume_value_type();
params.push_back(param);
}
@@ -667,12 +952,16 @@ class ModuleDecoder : public Decoder {
return nullptr;
}
std::vector<LocalType> returns;
- for (uint32_t i = 0; i < return_count; ++i) {
- LocalType ret = consume_local_type();
- if (ret == kAstStmt) error(pc_ - 1, "invalid void return type");
+ for (uint32_t i = 0; ok() && i < return_count; ++i) {
+ LocalType ret = consume_value_type();
returns.push_back(ret);
}
+ if (failed()) {
+ // Decoding failed, return void -> void
+ return new (module_zone) FunctionSig(0, 0, nullptr);
+ }
+
// FunctionSig stores the return types first.
LocalType* buffer =
module_zone->NewArray<LocalType>(param_count + return_count);
@@ -711,7 +1000,7 @@ class FunctionError : public FunctionResult {
};
Vector<const byte> FindSection(const byte* module_start, const byte* module_end,
- WasmSection::Code code) {
+ WasmSectionCode code) {
Decoder decoder(module_start, module_end);
uint32_t magic_word = decoder.consume_u32("wasm magic");
@@ -720,24 +1009,14 @@ Vector<const byte> FindSection(const byte* module_start, const byte* module_end,
uint32_t magic_version = decoder.consume_u32("wasm version");
if (magic_version != kWasmVersion) decoder.error("wrong wasm version");
- while (decoder.more() && decoder.ok()) {
- // Read the section name.
- uint32_t string_length = decoder.consume_u32v("section name length");
- const byte* section_name_start = decoder.pc();
- decoder.consume_bytes(string_length);
- if (decoder.failed()) break;
-
- WasmSection::Code section =
- WasmSection::lookup(section_name_start, string_length);
-
- // Read and check the section size.
- uint32_t section_length = decoder.consume_u32v("section length");
-
- const byte* section_start = decoder.pc();
- decoder.consume_bytes(section_length);
- if (section == code && decoder.ok()) {
- return Vector<const uint8_t>(section_start, section_length);
+ WasmSectionIterator section_iter(decoder);
+ while (section_iter.more()) {
+ if (section_iter.section_code() == code) {
+ return Vector<const uint8_t>(section_iter.section_start(),
+ section_iter.section_length());
}
+ decoder.consume_bytes(section_iter.section_length(), "section payload");
+ section_iter.advance();
}
return Vector<const uint8_t>();
@@ -772,6 +1051,13 @@ FunctionSig* DecodeWasmSignatureForTesting(Zone* zone, const byte* start,
return decoder.DecodeFunctionSignature(start);
}
+WasmInitExpr DecodeWasmInitExprForTesting(const byte* start, const byte* end) {
+ AccountingAllocator allocator;
+ Zone zone(&allocator);
+ ModuleDecoder decoder(&zone, start, end, kWasmOrigin);
+ return decoder.DecodeInitExpr(start);
+}
+
FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone,
ModuleEnv* module_env,
const byte* function_start,
@@ -789,15 +1075,26 @@ FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone,
return decoder.DecodeSingleFunction(module_env, function);
}
-FunctionOffsetsResult DecodeWasmFunctionOffsets(const byte* module_start,
- const byte* module_end) {
+FunctionOffsetsResult DecodeWasmFunctionOffsets(
+ const byte* module_start, const byte* module_end,
+ uint32_t num_imported_functions) {
+ // Find and decode the code section.
Vector<const byte> code_section =
- FindSection(module_start, module_end, WasmSection::Code::FunctionBodies);
+ FindSection(module_start, module_end, kCodeSectionCode);
Decoder decoder(code_section.start(), code_section.end());
- if (!code_section.start()) decoder.error("no code section");
+ FunctionOffsets table;
+ if (!code_section.start()) {
+ decoder.error("no code section");
+ return decoder.toResult(std::move(table));
+ }
+
+ // Reserve entries for the imported functions.
+ table.reserve(num_imported_functions);
+ for (uint32_t i = 0; i < num_imported_functions; i++) {
+ table.push_back(std::make_pair(0, 0));
+ }
uint32_t functions_count = decoder.consume_u32v("functions count");
- FunctionOffsets table;
// Take care of invalid input here.
if (functions_count < static_cast<unsigned>(code_section.length()) / 2)
table.reserve(functions_count);
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index dd6bd3bc86..22a313cec3 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -12,9 +12,11 @@ namespace v8 {
namespace internal {
namespace wasm {
// Decodes the bytes of a WASM module between {module_start} and {module_end}.
-ModuleResult DecodeWasmModule(Isolate* isolate, Zone* zone,
- const byte* module_start, const byte* module_end,
- bool verify_functions, ModuleOrigin origin);
+V8_EXPORT_PRIVATE ModuleResult DecodeWasmModule(Isolate* isolate, Zone* zone,
+ const byte* module_start,
+ const byte* module_end,
+ bool verify_functions,
+ ModuleOrigin origin);
// Exposed for testing. Decodes a single function signature, allocating it
// in the given zone. Returns {nullptr} upon failure.
@@ -30,8 +32,11 @@ FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone, ModuleEnv* env,
// Extracts the function offset table from the wasm module bytes.
// Returns a vector with <offset, length> entries, or failure if the wasm bytes
// are detected as invalid. Note that this validation is not complete.
-FunctionOffsetsResult DecodeWasmFunctionOffsets(const byte* module_start,
- const byte* module_end);
+FunctionOffsetsResult DecodeWasmFunctionOffsets(
+ const byte* module_start, const byte* module_end,
+ uint32_t num_imported_functions);
+
+WasmInitExpr DecodeWasmInitExprForTesting(const byte* start, const byte* end);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/switch-logic.h b/deps/v8/src/wasm/switch-logic.h
index 8cef08b98b..160e0d69b2 100644
--- a/deps/v8/src/wasm/switch-logic.h
+++ b/deps/v8/src/wasm/switch-logic.h
@@ -5,8 +5,8 @@
#ifndef V8_WASM_SWITCH_LOGIC_H
#define V8_WASM_SWITCH_LOGIC_H
-#include "src/zone-containers.h"
-#include "src/zone.h"
+#include "src/zone/zone-containers.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 54e7100935..42a8e5f2ab 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -32,11 +32,15 @@ ByteArray *GetOrCreateFunctionOffsetTable(Handle<WasmDebugInfo> debug_info) {
FunctionOffsetsResult function_offsets;
{
DisallowHeapAllocation no_gc;
+ Handle<JSObject> wasm_object(debug_info->wasm_object(), isolate);
+ uint32_t num_imported_functions =
+ wasm::GetNumImportedFunctions(wasm_object);
SeqOneByteString *wasm_bytes =
wasm::GetWasmBytes(debug_info->wasm_object());
const byte *bytes_start = wasm_bytes->GetChars();
const byte *bytes_end = bytes_start + wasm_bytes->length();
- function_offsets = wasm::DecodeWasmFunctionOffsets(bytes_start, bytes_end);
+ function_offsets = wasm::DecodeWasmFunctionOffsets(bytes_start, bytes_end,
+ num_imported_functions);
}
DCHECK(function_offsets.ok());
size_t array_size = 2 * kIntSize * function_offsets.val.size();
@@ -179,7 +183,7 @@ Handle<String> WasmDebugInfo::DisassembleFunction(
Vector<const uint8_t> bytes_vec = GetFunctionBytes(debug_info, func_index);
DisallowHeapAllocation no_gc;
- base::AccountingAllocator allocator;
+ AccountingAllocator allocator;
bool ok = PrintAst(
&allocator, FunctionBodyForTesting(bytes_vec.start(), bytes_vec.end()),
disassembly_os, nullptr);
@@ -208,7 +212,7 @@ Handle<FixedArray> WasmDebugInfo::GetFunctionOffsetTable(
Vector<const uint8_t> bytes_vec = GetFunctionBytes(debug_info, func_index);
DisallowHeapAllocation no_gc;
- v8::base::AccountingAllocator allocator;
+ AccountingAllocator allocator;
bool ok = PrintAst(
&allocator, FunctionBodyForTesting(bytes_vec.start(), bytes_vec.end()),
null_stream, &offset_table_vec);
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index 09294c2c28..4c4c91b29c 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -206,9 +206,6 @@ uint32_t word64_popcnt_wrapper(uint64_t* input) {
void float64_pow_wrapper(double* param0, double* param1) {
double x = ReadDoubleValue(param0);
double y = ReadDoubleValue(param1);
- if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) {
- WriteDoubleValue(param0, std::numeric_limits<double>::quiet_NaN());
- }
WriteDoubleValue(param0, Pow(x, y));
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index 7e3127dd53..2ac681eff2 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -10,8 +10,8 @@
#include "src/wasm/wasm-external-refs.h"
#include "src/wasm/wasm-module.h"
-#include "src/base/accounting-allocator.h"
-#include "src/zone-containers.h"
+#include "src/zone/accounting-allocator.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -654,6 +654,48 @@ static inline int64_t ExecuteI64ReinterpretF64(double a, TrapReason* trap) {
return bit_cast<int64_t>(a);
}
+static inline int32_t ExecuteGrowMemory(uint32_t delta_pages,
+ WasmModuleInstance* instance) {
+ // TODO(ahaas): Move memory allocation to wasm-module.cc for better
+ // encapsulation.
+ if (delta_pages > wasm::WasmModule::kMaxMemPages) {
+ return -1;
+ }
+ uint32_t old_size = instance->mem_size;
+ uint32_t new_size;
+ byte* new_mem_start;
+ if (instance->mem_size == 0) {
+ if (delta_pages > wasm::WasmModule::kMaxMemPages) {
+ return -1;
+ }
+ // TODO(gdeepti): Fix bounds check to take into account size of memtype.
+ new_size = delta_pages * wasm::WasmModule::kPageSize;
+ new_mem_start = static_cast<byte*>(calloc(new_size, sizeof(byte)));
+ if (!new_mem_start) {
+ return -1;
+ }
+ } else {
+ DCHECK_NOT_NULL(instance->mem_start);
+ new_size = old_size + delta_pages * wasm::WasmModule::kPageSize;
+ if (new_size >
+ wasm::WasmModule::kMaxMemPages * wasm::WasmModule::kPageSize) {
+ return -1;
+ }
+ new_mem_start = static_cast<byte*>(realloc(instance->mem_start, new_size));
+ if (!new_mem_start) {
+ return -1;
+ }
+ // Zero initializing uninitialized memory from realloc
+ memset(new_mem_start + old_size, 0, new_size - old_size);
+ }
+ instance->mem_start = new_mem_start;
+ instance->mem_size = new_size;
+ // realloc
+ // update mem_start
+ // update mem_size
+ return static_cast<int32_t>(old_size / WasmModule::kPageSize);
+}
+
enum InternalOpcode {
#define DECL_INTERNAL_ENUM(name, value) kInternal##name = value,
FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_ENUM)
@@ -680,54 +722,38 @@ class ControlTransfers : public ZoneObject {
public:
ControlTransferMap map_;
- ControlTransfers(Zone* zone, size_t locals_encoded_size, const byte* start,
- const byte* end)
+ ControlTransfers(Zone* zone, ModuleEnv* env, AstLocalDecls* locals,
+ const byte* start, const byte* end)
: map_(zone) {
- // A control reference including from PC, from value depth, and whether
- // a value is explicitly passed (e.g. br/br_if/br_table with value).
- struct CRef {
- const byte* pc;
- sp_t value_depth;
- bool explicit_value;
- };
-
// Represents a control flow label.
struct CLabel : public ZoneObject {
const byte* target;
- size_t value_depth;
- ZoneVector<CRef> refs;
+ ZoneVector<const byte*> refs;
- CLabel(Zone* zone, size_t v)
- : target(nullptr), value_depth(v), refs(zone) {}
+ explicit CLabel(Zone* zone) : target(nullptr), refs(zone) {}
// Bind this label to the given PC.
- void Bind(ControlTransferMap* map, const byte* start, const byte* pc,
- bool expect_value) {
+ void Bind(ControlTransferMap* map, const byte* start, const byte* pc) {
DCHECK_NULL(target);
target = pc;
- for (auto from : refs) {
- auto pcdiff = static_cast<pcdiff_t>(target - from.pc);
- auto spdiff = static_cast<spdiff_t>(from.value_depth - value_depth);
- ControlTransfer::StackAction action = ControlTransfer::kNoAction;
- if (expect_value && !from.explicit_value) {
- action = spdiff == 0 ? ControlTransfer::kPushVoid
- : ControlTransfer::kPopAndRepush;
- }
- pc_t offset = static_cast<size_t>(from.pc - start);
- (*map)[offset] = {pcdiff, spdiff, action};
+ for (auto from_pc : refs) {
+ auto pcdiff = static_cast<pcdiff_t>(target - from_pc);
+ size_t offset = static_cast<size_t>(from_pc - start);
+ (*map)[offset] = pcdiff;
}
}
// Reference this label from the given location.
- void Ref(ControlTransferMap* map, const byte* start, CRef from) {
- DCHECK_GE(from.value_depth, value_depth);
+ void Ref(ControlTransferMap* map, const byte* start,
+ const byte* from_pc) {
if (target) {
- auto pcdiff = static_cast<pcdiff_t>(target - from.pc);
- auto spdiff = static_cast<spdiff_t>(from.value_depth - value_depth);
- pc_t offset = static_cast<size_t>(from.pc - start);
- (*map)[offset] = {pcdiff, spdiff, ControlTransfer::kNoAction};
+ // Target being bound before a reference means this is a loop.
+ DCHECK_EQ(kExprLoop, *target);
+ auto pcdiff = static_cast<pcdiff_t>(target - from_pc);
+ size_t offset = static_cast<size_t>(from_pc - start);
+ (*map)[offset] = pcdiff;
} else {
- refs.push_back(from);
+ refs.push_back(from_pc);
}
}
};
@@ -738,122 +764,104 @@ class ControlTransfers : public ZoneObject {
CLabel* end_label;
CLabel* else_label;
- void Ref(ControlTransferMap* map, const byte* start, const byte* from_pc,
- size_t from_value_depth, bool explicit_value) {
- end_label->Ref(map, start, {from_pc, from_value_depth, explicit_value});
+ void Ref(ControlTransferMap* map, const byte* start,
+ const byte* from_pc) {
+ end_label->Ref(map, start, from_pc);
}
};
// Compute the ControlTransfer map.
- // This works by maintaining a stack of control constructs similar to the
+ // This algorithm maintains a stack of control constructs similar to the
// AST decoder. The {control_stack} allows matching {br,br_if,br_table}
// bytecodes with their target, as well as determining whether the current
// bytecodes are within the true or false block of an else.
- // The value stack depth is tracked as {value_depth} and is needed to
- // determine how many values to pop off the stack for explicit and
- // implicit control flow.
-
std::vector<Control> control_stack;
- size_t value_depth = 0;
- for (BytecodeIterator i(start + locals_encoded_size, end); i.has_next();
- i.next()) {
+ CLabel* func_label = new (zone) CLabel(zone);
+ control_stack.push_back({start, func_label, nullptr});
+ for (BytecodeIterator i(start, end, locals); i.has_next(); i.next()) {
WasmOpcode opcode = i.current();
- TRACE("@%u: control %s (depth = %zu)\n", i.pc_offset(),
- WasmOpcodes::OpcodeName(opcode), value_depth);
+ TRACE("@%u: control %s\n", i.pc_offset(),
+ WasmOpcodes::OpcodeName(opcode));
switch (opcode) {
case kExprBlock: {
- TRACE("control @%u $%zu: Block\n", i.pc_offset(), value_depth);
- CLabel* label = new (zone) CLabel(zone, value_depth);
+ TRACE("control @%u: Block\n", i.pc_offset());
+ CLabel* label = new (zone) CLabel(zone);
control_stack.push_back({i.pc(), label, nullptr});
break;
}
case kExprLoop: {
- TRACE("control @%u $%zu: Loop\n", i.pc_offset(), value_depth);
- CLabel* label1 = new (zone) CLabel(zone, value_depth);
- CLabel* label2 = new (zone) CLabel(zone, value_depth);
- control_stack.push_back({i.pc(), label1, nullptr});
- control_stack.push_back({i.pc(), label2, nullptr});
- label2->Bind(&map_, start, i.pc(), false);
+ TRACE("control @%u: Loop\n", i.pc_offset());
+ CLabel* label = new (zone) CLabel(zone);
+ control_stack.push_back({i.pc(), label, nullptr});
+ label->Bind(&map_, start, i.pc());
break;
}
case kExprIf: {
- TRACE("control @%u $%zu: If\n", i.pc_offset(), value_depth);
- value_depth--;
- CLabel* end_label = new (zone) CLabel(zone, value_depth);
- CLabel* else_label = new (zone) CLabel(zone, value_depth);
+ TRACE("control @%u: If\n", i.pc_offset());
+ CLabel* end_label = new (zone) CLabel(zone);
+ CLabel* else_label = new (zone) CLabel(zone);
control_stack.push_back({i.pc(), end_label, else_label});
- else_label->Ref(&map_, start, {i.pc(), value_depth, false});
+ else_label->Ref(&map_, start, i.pc());
break;
}
case kExprElse: {
Control* c = &control_stack.back();
- TRACE("control @%u $%zu: Else\n", i.pc_offset(), value_depth);
- c->end_label->Ref(&map_, start, {i.pc(), value_depth, false});
- value_depth = c->end_label->value_depth;
+ TRACE("control @%u: Else\n", i.pc_offset());
+ c->end_label->Ref(&map_, start, i.pc());
DCHECK_NOT_NULL(c->else_label);
- c->else_label->Bind(&map_, start, i.pc() + 1, false);
+ c->else_label->Bind(&map_, start, i.pc() + 1);
c->else_label = nullptr;
break;
}
case kExprEnd: {
Control* c = &control_stack.back();
- TRACE("control @%u $%zu: End\n", i.pc_offset(), value_depth);
+ TRACE("control @%u: End\n", i.pc_offset());
if (c->end_label->target) {
// only loops have bound labels.
DCHECK_EQ(kExprLoop, *c->pc);
- control_stack.pop_back();
- c = &control_stack.back();
+ } else {
+ if (c->else_label) c->else_label->Bind(&map_, start, i.pc());
+ c->end_label->Bind(&map_, start, i.pc() + 1);
}
- if (c->else_label)
- c->else_label->Bind(&map_, start, i.pc() + 1, true);
- c->end_label->Ref(&map_, start, {i.pc(), value_depth, false});
- c->end_label->Bind(&map_, start, i.pc() + 1, true);
- value_depth = c->end_label->value_depth + 1;
control_stack.pop_back();
break;
}
case kExprBr: {
BreakDepthOperand operand(&i, i.pc());
- TRACE("control @%u $%zu: Br[arity=%u, depth=%u]\n", i.pc_offset(),
- value_depth, operand.arity, operand.depth);
- value_depth -= operand.arity;
- control_stack[control_stack.size() - operand.depth - 1].Ref(
- &map_, start, i.pc(), value_depth, operand.arity > 0);
- value_depth++;
+ TRACE("control @%u: Br[depth=%u]\n", i.pc_offset(), operand.depth);
+ Control* c = &control_stack[control_stack.size() - operand.depth - 1];
+ c->Ref(&map_, start, i.pc());
break;
}
case kExprBrIf: {
BreakDepthOperand operand(&i, i.pc());
- TRACE("control @%u $%zu: BrIf[arity=%u, depth=%u]\n", i.pc_offset(),
- value_depth, operand.arity, operand.depth);
- value_depth -= (operand.arity + 1);
- control_stack[control_stack.size() - operand.depth - 1].Ref(
- &map_, start, i.pc(), value_depth, operand.arity > 0);
- value_depth++;
+ TRACE("control @%u: BrIf[depth=%u]\n", i.pc_offset(), operand.depth);
+ Control* c = &control_stack[control_stack.size() - operand.depth - 1];
+ c->Ref(&map_, start, i.pc());
break;
}
case kExprBrTable: {
BranchTableOperand operand(&i, i.pc());
- TRACE("control @%u $%zu: BrTable[arity=%u count=%u]\n", i.pc_offset(),
- value_depth, operand.arity, operand.table_count);
- value_depth -= (operand.arity + 1);
- for (uint32_t j = 0; j < operand.table_count + 1; ++j) {
- uint32_t target = operand.read_entry(&i, j);
- control_stack[control_stack.size() - target - 1].Ref(
- &map_, start, i.pc() + j, value_depth, operand.arity > 0);
+ BranchTableIterator iterator(&i, operand);
+ TRACE("control @%u: BrTable[count=%u]\n", i.pc_offset(),
+ operand.table_count);
+ while (iterator.has_next()) {
+ uint32_t j = iterator.cur_index();
+ uint32_t target = iterator.next();
+ Control* c = &control_stack[control_stack.size() - target - 1];
+ c->Ref(&map_, start, i.pc() + j);
}
- value_depth++;
break;
}
default: {
- value_depth = value_depth - OpcodeArity(i.pc(), end) + 1;
break;
}
}
}
+ if (!func_label->target) func_label->Bind(&map_, start, end);
}
- ControlTransfer Lookup(pc_t from) {
+ pcdiff_t Lookup(pc_t from) {
auto result = map_.find(from);
if (result == map_.end()) {
V8_Fatal(__FILE__, __LINE__, "no control target for pc %zu", from);
@@ -899,7 +907,7 @@ class CodeMap {
if (function->func_index < interpreter_code_.size()) {
InterpreterCode* code = &interpreter_code_[function->func_index];
DCHECK_EQ(function, code->function);
- return code;
+ return Preprocess(code);
}
return nullptr;
}
@@ -923,9 +931,9 @@ class CodeMap {
if (code->targets == nullptr && code->start) {
// Compute the control targets map and the local declarations.
CHECK(DecodeLocalDecls(code->locals, code->start, code->end));
- code->targets =
- new (zone_) ControlTransfers(zone_, code->locals.decls_encoded_size,
- code->orig_start, code->orig_end);
+ ModuleEnv env = {module_, nullptr, kWasmOrigin};
+ code->targets = new (zone_) ControlTransfers(
+ zone_, &env, &code->locals, code->orig_start, code->orig_end);
}
return code;
}
@@ -964,6 +972,7 @@ class ThreadImpl : public WasmInterpreter::Thread {
instance_(instance),
stack_(zone),
frames_(zone),
+ blocks_(zone),
state_(WasmInterpreter::STOPPED),
break_pc_(kInvalidPc),
trap_reason_(kTrapCount) {}
@@ -984,6 +993,9 @@ class ThreadImpl : public WasmInterpreter::Thread {
stack_.push_back(args[i]);
}
frames_.back().ret_pc = InitLocals(code);
+ blocks_.push_back(
+ {0, stack_.size(), frames_.size(),
+ static_cast<uint32_t>(code->function->sig->return_count())});
TRACE(" => PushFrame(#%u @%zu)\n", code->function->func_index,
frames_.back().ret_pc);
}
@@ -1032,11 +1044,11 @@ class ThreadImpl : public WasmInterpreter::Thread {
return nullptr;
}
- virtual WasmVal GetReturnValue() {
+ virtual WasmVal GetReturnValue(int index) {
if (state_ == WasmInterpreter::TRAPPED) return WasmVal(0xdeadbeef);
CHECK_EQ(WasmInterpreter::FINISHED, state_);
- CHECK_EQ(1, stack_.size());
- return stack_[0];
+ CHECK_LT(static_cast<size_t>(index), stack_.size());
+ return stack_[index];
}
virtual pc_t GetBreakpointPc() { return break_pc_; }
@@ -1060,10 +1072,18 @@ class ThreadImpl : public WasmInterpreter::Thread {
sp_t llimit() { return plimit() + code->locals.total_local_count; }
};
+ struct Block {
+ pc_t pc;
+ sp_t sp;
+ size_t fp;
+ unsigned arity;
+ };
+
CodeMap* codemap_;
WasmModuleInstance* instance_;
ZoneVector<WasmVal> stack_;
ZoneVector<Frame> frames_;
+ ZoneVector<Block> blocks_;
WasmInterpreter::State state_;
pc_t break_pc_;
TrapReason trap_reason_;
@@ -1088,6 +1108,9 @@ class ThreadImpl : public WasmInterpreter::Thread {
DCHECK_GE(stack_.size(), arity);
// The parameters will overlap the arguments already on the stack.
frames_.push_back({code, 0, 0, stack_.size() - arity});
+ blocks_.push_back(
+ {0, stack_.size(), frames_.size(),
+ static_cast<uint32_t>(code->function->sig->return_count())});
frames_.back().ret_pc = InitLocals(code);
TRACE(" => push func#%u @%zu\n", code->function->func_index,
frames_.back().ret_pc);
@@ -1126,21 +1149,38 @@ class ThreadImpl : public WasmInterpreter::Thread {
bool SkipBreakpoint(InterpreterCode* code, pc_t pc) {
if (pc == break_pc_) {
+ // Skip the previously hit breakpoint when resuming.
break_pc_ = kInvalidPc;
return true;
}
return false;
}
- bool DoReturn(InterpreterCode** code, pc_t* pc, pc_t* limit, WasmVal val) {
+ int LookupTarget(InterpreterCode* code, pc_t pc) {
+ return static_cast<int>(code->targets->Lookup(pc));
+ }
+
+ int DoBreak(InterpreterCode* code, pc_t pc, size_t depth) {
+ size_t bp = blocks_.size() - depth - 1;
+ Block* target = &blocks_[bp];
+ DoStackTransfer(target->sp, target->arity);
+ blocks_.resize(bp);
+ return LookupTarget(code, pc);
+ }
+
+ bool DoReturn(InterpreterCode** code, pc_t* pc, pc_t* limit, size_t arity) {
DCHECK_GT(frames_.size(), 0u);
- stack_.resize(frames_.back().sp);
+ // Pop all blocks for this frame.
+ while (!blocks_.empty() && blocks_.back().fp == frames_.size()) {
+ blocks_.pop_back();
+ }
+
+ sp_t dest = frames_.back().sp;
frames_.pop_back();
if (frames_.size() == 0) {
- // A return from the top frame terminates the execution.
+ // A return from the last frame terminates the execution.
state_ = WasmInterpreter::FINISHED;
- stack_.clear();
- stack_.push_back(val);
+ DoStackTransfer(0, arity);
TRACE(" => finish\n");
return false;
} else {
@@ -1149,16 +1189,8 @@ class ThreadImpl : public WasmInterpreter::Thread {
*code = top->code;
*pc = top->ret_pc;
*limit = top->code->end - top->code->start;
- if (top->code->start[top->call_pc] == kExprCallIndirect ||
- (top->code->orig_start &&
- top->code->orig_start[top->call_pc] == kExprCallIndirect)) {
- // UGLY: An indirect call has the additional function index on the
- // stack.
- stack_.pop_back();
- }
TRACE(" => pop func#%u @%zu\n", (*code)->function->func_index, *pc);
-
- stack_.push_back(val);
+ DoStackTransfer(dest, arity);
return true;
}
}
@@ -1169,31 +1201,21 @@ class ThreadImpl : public WasmInterpreter::Thread {
*limit = target->end - target->start;
}
- // Adjust the program counter {pc} and the stack contents according to the
- // code's precomputed control transfer map. Returns the different between
- // the new pc and the old pc.
- int DoControlTransfer(InterpreterCode* code, pc_t pc) {
- auto target = code->targets->Lookup(pc);
- switch (target.action) {
- case ControlTransfer::kNoAction:
- TRACE(" action [sp-%u]\n", target.spdiff);
- PopN(target.spdiff);
- break;
- case ControlTransfer::kPopAndRepush: {
- WasmVal val = Pop();
- TRACE(" action [pop x, sp-%u, push x]\n", target.spdiff - 1);
- DCHECK_GE(target.spdiff, 1u);
- PopN(target.spdiff - 1);
- Push(pc, val);
- break;
- }
- case ControlTransfer::kPushVoid:
- TRACE(" action [sp-%u, push void]\n", target.spdiff);
- PopN(target.spdiff);
- Push(pc, WasmVal());
- break;
+ // Copies {arity} values on the top of the stack down the stack to {dest},
+ // dropping the values in-between.
+ void DoStackTransfer(sp_t dest, size_t arity) {
+ // before: |---------------| pop_count | arity |
+ // ^ 0 ^ dest ^ stack_.size()
+ //
+ // after: |---------------| arity |
+ // ^ 0 ^ stack_.size()
+ DCHECK_LE(dest, stack_.size());
+ DCHECK_LE(dest + arity, stack_.size());
+ size_t pop_count = stack_.size() - dest - arity;
+ for (size_t i = 0; i < arity; i++) {
+ stack_[dest + i] = stack_[dest + pop_count + i];
}
- return target.pcdiff;
+ stack_.resize(stack_.size() - pop_count);
}
void Execute(InterpreterCode* code, pc_t pc, int max) {
@@ -1209,8 +1231,8 @@ class ThreadImpl : public WasmInterpreter::Thread {
if (pc >= limit) {
// Fell off end of code; do an implicit return.
TRACE("@%-3zu: ImplicitReturn\n", pc);
- WasmVal val = PopArity(code->function->sig->return_count());
- if (!DoReturn(&code, &pc, &limit, val)) return;
+ if (!DoReturn(&code, &pc, &limit, code->function->sig->return_count()))
+ return;
decoder.Reset(code->start, code->end);
continue;
}
@@ -1243,27 +1265,37 @@ class ThreadImpl : public WasmInterpreter::Thread {
switch (orig) {
case kExprNop:
- Push(pc, WasmVal());
break;
- case kExprBlock:
+ case kExprBlock: {
+ BlockTypeOperand operand(&decoder, code->at(pc));
+ blocks_.push_back({pc, stack_.size(), frames_.size(), operand.arity});
+ len = 1 + operand.length;
+ break;
+ }
case kExprLoop: {
- // Do nothing.
+ BlockTypeOperand operand(&decoder, code->at(pc));
+ blocks_.push_back({pc, stack_.size(), frames_.size(), 0});
+ len = 1 + operand.length;
break;
}
case kExprIf: {
+ BlockTypeOperand operand(&decoder, code->at(pc));
WasmVal cond = Pop();
bool is_true = cond.to<uint32_t>() != 0;
+ blocks_.push_back({pc, stack_.size(), frames_.size(), operand.arity});
if (is_true) {
// fall through to the true block.
+ len = 1 + operand.length;
TRACE(" true => fallthrough\n");
} else {
- len = DoControlTransfer(code, pc);
+ len = LookupTarget(code, pc);
TRACE(" false => @%zu\n", pc + len);
}
break;
}
case kExprElse: {
- len = DoControlTransfer(code, pc);
+ blocks_.pop_back();
+ len = LookupTarget(code, pc);
TRACE(" end => @%zu\n", pc + len);
break;
}
@@ -1276,42 +1308,34 @@ class ThreadImpl : public WasmInterpreter::Thread {
}
case kExprBr: {
BreakDepthOperand operand(&decoder, code->at(pc));
- WasmVal val = PopArity(operand.arity);
- len = DoControlTransfer(code, pc);
+ len = DoBreak(code, pc, operand.depth);
TRACE(" br => @%zu\n", pc + len);
- if (operand.arity > 0) Push(pc, val);
break;
}
case kExprBrIf: {
BreakDepthOperand operand(&decoder, code->at(pc));
WasmVal cond = Pop();
- WasmVal val = PopArity(operand.arity);
bool is_true = cond.to<uint32_t>() != 0;
if (is_true) {
- len = DoControlTransfer(code, pc);
+ len = DoBreak(code, pc, operand.depth);
TRACE(" br_if => @%zu\n", pc + len);
- if (operand.arity > 0) Push(pc, val);
} else {
TRACE(" false => fallthrough\n");
len = 1 + operand.length;
- Push(pc, WasmVal());
}
break;
}
case kExprBrTable: {
BranchTableOperand operand(&decoder, code->at(pc));
uint32_t key = Pop().to<uint32_t>();
- WasmVal val = PopArity(operand.arity);
if (key >= operand.table_count) key = operand.table_count;
- len = DoControlTransfer(code, pc + key) + key;
- TRACE(" br[%u] => @%zu\n", key, pc + len);
- if (operand.arity > 0) Push(pc, val);
+ len = key + DoBreak(code, pc + key, operand.table[key]);
+ TRACE(" br[%u] => @%zu\n", key, pc + key + len);
break;
}
case kExprReturn: {
- ReturnArityOperand operand(&decoder, code->at(pc));
- WasmVal val = PopArity(operand.arity);
- if (!DoReturn(&code, &pc, &limit, val)) return;
+ size_t arity = code->function->sig->return_count();
+ if (!DoReturn(&code, &pc, &limit, arity)) return;
decoder.Reset(code->start, code->end);
continue;
}
@@ -1320,8 +1344,7 @@ class ThreadImpl : public WasmInterpreter::Thread {
return CommitPc(pc);
}
case kExprEnd: {
- len = DoControlTransfer(code, pc);
- DCHECK_EQ(1, len);
+ blocks_.pop_back();
break;
}
case kExprI8Const: {
@@ -1364,10 +1387,21 @@ class ThreadImpl : public WasmInterpreter::Thread {
LocalIndexOperand operand(&decoder, code->at(pc));
WasmVal val = Pop();
stack_[frames_.back().sp + operand.index] = val;
+ len = 1 + operand.length;
+ break;
+ }
+ case kExprTeeLocal: {
+ LocalIndexOperand operand(&decoder, code->at(pc));
+ WasmVal val = Pop();
+ stack_[frames_.back().sp + operand.index] = val;
Push(pc, val);
len = 1 + operand.length;
break;
}
+ case kExprDrop: {
+ Pop();
+ break;
+ }
case kExprCallFunction: {
CallFunctionOperand operand(&decoder, code->at(pc));
InterpreterCode* target = codemap()->GetCode(operand.index);
@@ -1378,9 +1412,7 @@ class ThreadImpl : public WasmInterpreter::Thread {
}
case kExprCallIndirect: {
CallIndirectOperand operand(&decoder, code->at(pc));
- size_t index = stack_.size() - operand.arity - 1;
- DCHECK_LT(index, stack_.size());
- uint32_t entry_index = stack_[index].to<uint32_t>();
+ uint32_t entry_index = Pop().to<uint32_t>();
// Assume only one table for now.
DCHECK_LE(module()->function_tables.size(), 1u);
InterpreterCode* target = codemap()->GetIndirectCode(0, entry_index);
@@ -1395,10 +1427,6 @@ class ThreadImpl : public WasmInterpreter::Thread {
decoder.Reset(code->start, code->end);
continue;
}
- case kExprCallImport: {
- UNIMPLEMENTED();
- break;
- }
case kExprGetGlobal: {
GlobalIndexOperand operand(&decoder, code->at(pc));
const WasmGlobal* global = &module()->globals[operand.index];
@@ -1437,14 +1465,13 @@ class ThreadImpl : public WasmInterpreter::Thread {
} else {
UNREACHABLE();
}
- Push(pc, val);
len = 1 + operand.length;
break;
}
#define LOAD_CASE(name, ctype, mtype) \
case kExpr##name: { \
- MemoryAccessOperand operand(&decoder, code->at(pc)); \
+ MemoryAccessOperand operand(&decoder, code->at(pc), sizeof(ctype)); \
uint32_t index = Pop().to<uint32_t>(); \
size_t effective_mem_size = instance()->mem_size - sizeof(mtype); \
if (operand.offset > effective_mem_size || \
@@ -1476,7 +1503,7 @@ class ThreadImpl : public WasmInterpreter::Thread {
#define STORE_CASE(name, ctype, mtype) \
case kExpr##name: { \
- MemoryAccessOperand operand(&decoder, code->at(pc)); \
+ MemoryAccessOperand operand(&decoder, code->at(pc), sizeof(ctype)); \
WasmVal val = Pop(); \
uint32_t index = Pop().to<uint32_t>(); \
size_t effective_mem_size = instance()->mem_size - sizeof(mtype); \
@@ -1486,7 +1513,6 @@ class ThreadImpl : public WasmInterpreter::Thread {
} \
byte* addr = instance()->mem_start + operand.offset + index; \
WriteLittleEndianValue<mtype>(addr, static_cast<mtype>(val.to<ctype>())); \
- Push(pc, val); \
len = 1 + operand.length; \
break; \
}
@@ -1546,9 +1572,14 @@ class ThreadImpl : public WasmInterpreter::Thread {
ASMJS_STORE_CASE(F32AsmjsStoreMem, float, float);
ASMJS_STORE_CASE(F64AsmjsStoreMem, double, double);
#undef ASMJS_STORE_CASE
-
+ case kExprGrowMemory: {
+ uint32_t delta_pages = Pop().to<uint32_t>();
+ Push(pc, WasmVal(ExecuteGrowMemory(delta_pages, instance())));
+ break;
+ }
case kExprMemorySize: {
- Push(pc, WasmVal(static_cast<uint32_t>(instance()->mem_size)));
+ Push(pc, WasmVal(static_cast<uint32_t>(instance()->mem_size /
+ WasmModule::kPageSize)));
break;
}
#define EXECUTE_SIMPLE_BINOP(name, ctype, op) \
@@ -1623,7 +1654,7 @@ class ThreadImpl : public WasmInterpreter::Thread {
void Push(pc_t pc, WasmVal val) {
// TODO(titzer): store PC as well?
- stack_.push_back(val);
+ if (val.type != kAstStmt) stack_.push_back(val);
}
void TraceStack(const char* phase, pc_t pc) {
@@ -1700,7 +1731,7 @@ class WasmInterpreterInternals : public ZoneObject {
// Implementation of the public interface of the interpreter.
//============================================================================
WasmInterpreter::WasmInterpreter(WasmModuleInstance* instance,
- base::AccountingAllocator* allocator)
+ AccountingAllocator* allocator)
: zone_(allocator),
internals_(new (&zone_) WasmInterpreterInternals(&zone_, instance)) {}
@@ -1804,7 +1835,7 @@ bool WasmInterpreter::SetFunctionCodeForTesting(const WasmFunction* function,
ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
Zone* zone, const byte* start, const byte* end) {
- ControlTransfers targets(zone, 0, start, end);
+ ControlTransfers targets(zone, nullptr, nullptr, start, end);
return targets.map_;
}
diff --git a/deps/v8/src/wasm/wasm-interpreter.h b/deps/v8/src/wasm/wasm-interpreter.h
index b106a202d2..b61e092e23 100644
--- a/deps/v8/src/wasm/wasm-interpreter.h
+++ b/deps/v8/src/wasm/wasm-interpreter.h
@@ -6,7 +6,7 @@
#define V8_WASM_INTERPRETER_H_
#include "src/wasm/wasm-opcodes.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace base {
@@ -28,15 +28,7 @@ typedef uint32_t spdiff_t;
const pc_t kInvalidPc = 0x80000000;
-// Visible for testing. A {ControlTransfer} helps the interpreter figure out
-// the target program counter and stack manipulations for a branch.
-struct ControlTransfer {
- enum StackAction { kNoAction, kPopAndRepush, kPushVoid };
- pcdiff_t pcdiff; // adjustment to the program counter (positive or negative).
- spdiff_t spdiff; // number of elements to pop off the stack.
- StackAction action; // action to perform on the stack.
-};
-typedef ZoneMap<pc_t, ControlTransfer> ControlTransferMap;
+typedef ZoneMap<pc_t, pcdiff_t> ControlTransferMap;
// Macro for defining union members.
#define FOREACH_UNION_MEMBER(V) \
@@ -102,7 +94,7 @@ class WasmFrame {
};
// An interpreter capable of executing WASM.
-class WasmInterpreter {
+class V8_EXPORT_PRIVATE WasmInterpreter {
public:
// State machine for a Thread:
// +---------------Run()-----------+
@@ -132,15 +124,14 @@ class WasmInterpreter {
virtual int GetFrameCount() = 0;
virtual const WasmFrame* GetFrame(int index) = 0;
virtual WasmFrame* GetMutableFrame(int index) = 0;
- virtual WasmVal GetReturnValue() = 0;
+ virtual WasmVal GetReturnValue(int index = 0) = 0;
// Thread-specific breakpoints.
bool SetBreakpoint(const WasmFunction* function, int pc, bool enabled);
bool GetBreakpoint(const WasmFunction* function, int pc);
};
- WasmInterpreter(WasmModuleInstance* instance,
- base::AccountingAllocator* allocator);
+ WasmInterpreter(WasmModuleInstance* instance, AccountingAllocator* allocator);
~WasmInterpreter();
//==========================================================================
@@ -190,9 +181,8 @@ class WasmInterpreter {
bool SetFunctionCodeForTesting(const WasmFunction* function,
const byte* start, const byte* end);
- // Computes the control targets for the given bytecode as {pc offset, sp
- // offset}
- // pairs. Used internally in the interpreter, but exposed for testing.
+ // Computes the control transfers for the given bytecode. Used internally in
+ // the interpreter, but exposed for testing.
static ControlTransferMap ComputeControlTransfersForTesting(Zone* zone,
const byte* start,
const byte* end);
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 10ae43c78b..254fd7061a 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -9,8 +9,6 @@
#include "src/asmjs/asm-wasm-builder.h"
#include "src/assert-scope.h"
#include "src/ast/ast.h"
-#include "src/ast/scopes.h"
-#include "src/compiler.h"
#include "src/execution.h"
#include "src/factory.h"
#include "src/handles.h"
@@ -18,7 +16,6 @@
#include "src/objects.h"
#include "src/parsing/parse-info.h"
-#include "src/wasm/encoder.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-module.h"
@@ -31,6 +28,13 @@ using v8::internal::wasm::ErrorThrower;
namespace v8 {
namespace {
+i::Handle<i::String> v8_str(i::Isolate* isolate, const char* str) {
+ return isolate->factory()->NewStringFromAsciiChecked(str);
+}
+Local<String> v8_str(Isolate* isolate, const char* str) {
+ return Utils::ToLocal(v8_str(reinterpret_cast<i::Isolate*>(isolate), str));
+}
+
struct RawBuffer {
const byte* start;
const byte* end;
@@ -80,7 +84,7 @@ void VerifyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
ErrorThrower thrower(isolate, "Wasm.verifyModule()");
if (args.Length() < 1) {
- thrower.Error("Argument 0 must be a buffer source");
+ thrower.TypeError("Argument 0 must be a buffer source");
return;
}
RawBuffer buffer = GetRawBufferSource(args[0], &thrower);
@@ -104,7 +108,7 @@ void VerifyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
ErrorThrower thrower(isolate, "Wasm.verifyFunction()");
if (args.Length() < 1) {
- thrower.Error("Argument 0 must be a buffer source");
+ thrower.TypeError("Argument 0 must be a buffer source");
return;
}
RawBuffer buffer = GetRawBufferSource(args[0], &thrower);
@@ -135,13 +139,11 @@ i::MaybeHandle<i::JSObject> InstantiateModule(
// Decode but avoid a redundant pass over function bodies for verification.
// Verification will happen during compilation.
i::Zone zone(isolate->allocator());
- internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
- isolate, &zone, start, end, false, origin);
-
+ i::MaybeHandle<i::JSObject> module_object =
+ i::wasm::CreateModuleObjectFromBytes(isolate, start, end, thrower,
+ origin);
i::MaybeHandle<i::JSObject> object;
- if (result.failed()) {
- thrower->Failed("", result);
- } else {
+ if (!module_object.is_null()) {
// Success. Instantiate the module and return the object.
i::Handle<i::JSObject> ffi = i::Handle<i::JSObject>::null();
if (args.Length() > 1 && args[1]->IsObject()) {
@@ -156,19 +158,12 @@ i::MaybeHandle<i::JSObject> InstantiateModule(
memory = i::Handle<i::JSArrayBuffer>(i::JSArrayBuffer::cast(*mem_obj));
}
- i::MaybeHandle<i::FixedArray> compiled_module =
- result.val->CompileFunctions(isolate, thrower);
- if (!thrower->error()) {
- DCHECK(!compiled_module.is_null());
- object = i::wasm::WasmModule::Instantiate(
- isolate, compiled_module.ToHandleChecked(), ffi, memory);
- if (!object.is_null()) {
- args.GetReturnValue().Set(v8::Utils::ToLocal(object.ToHandleChecked()));
- }
+ object = i::wasm::WasmModule::Instantiate(
+ isolate, thrower, module_object.ToHandleChecked(), ffi, memory);
+ if (!object.is_null()) {
+ args.GetReturnValue().Set(v8::Utils::ToLocal(object.ToHandleChecked()));
}
}
-
- if (result.val) delete result.val;
return object;
}
@@ -178,7 +173,7 @@ void InstantiateModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
ErrorThrower thrower(isolate, "Wasm.instantiateModule()");
if (args.Length() < 1) {
- thrower.Error("Argument 0 must be a buffer source");
+ thrower.TypeError("Argument 0 must be a buffer source");
return;
}
RawBuffer buffer = GetRawBufferSource(args[0], &thrower);
@@ -197,20 +192,37 @@ static i::MaybeHandle<i::JSObject> CreateModuleObject(
if (buffer.start == nullptr) return i::MaybeHandle<i::JSObject>();
DCHECK(source->IsArrayBuffer() || source->IsTypedArray());
- i::Zone zone(i_isolate->allocator());
- i::wasm::ModuleResult result = i::wasm::DecodeWasmModule(
- i_isolate, &zone, buffer.start, buffer.end, false, i::wasm::kWasmOrigin);
- std::unique_ptr<const i::wasm::WasmModule> decoded_module(result.val);
- if (result.failed()) {
- thrower->Failed("", result);
- return nothing;
- }
- i::MaybeHandle<i::FixedArray> compiled_module =
- decoded_module->CompileFunctions(i_isolate, thrower);
- if (compiled_module.is_null()) return nothing;
+ return i::wasm::CreateModuleObjectFromBytes(
+ i_isolate, buffer.start, buffer.end, thrower,
+ i::wasm::ModuleOrigin::kWasmOrigin);
+}
+
+static bool ValidateModule(v8::Isolate* isolate,
+ const v8::Local<v8::Value> source,
+ ErrorThrower* thrower) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::MaybeHandle<i::JSObject> nothing;
+
+ RawBuffer buffer = GetRawBufferSource(source, thrower);
+ if (buffer.start == nullptr) return false;
- return i::wasm::CreateCompiledModuleObject(i_isolate,
- compiled_module.ToHandleChecked());
+ DCHECK(source->IsArrayBuffer() || source->IsTypedArray());
+ return i::wasm::ValidateModuleBytes(i_isolate, buffer.start, buffer.end,
+ thrower,
+ i::wasm::ModuleOrigin::kWasmOrigin);
+}
+
+bool BrandCheck(Isolate* isolate, i::Handle<i::Object> value,
+ i::Handle<i::Symbol> sym, const char* msg) {
+ if (value->IsJSObject()) {
+ i::Handle<i::JSObject> object = i::Handle<i::JSObject>::cast(value);
+ Maybe<bool> has_brand = i::JSObject::HasOwnProperty(object, sym);
+ if (has_brand.IsNothing()) return false;
+ if (has_brand.ToChecked()) return true;
+ }
+ v8::Local<v8::Value> e = v8::Exception::TypeError(v8_str(isolate, msg));
+ isolate->ThrowException(e);
+ return false;
}
void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -220,7 +232,7 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
"WebAssembly.compile()");
if (args.Length() < 1) {
- thrower.Error("Argument 0 must be a buffer source");
+ thrower.TypeError("Argument 0 must be a buffer source");
return;
}
i::MaybeHandle<i::JSObject> module_obj =
@@ -238,6 +250,25 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
return_value.Set(resolver->GetPromise());
}
+void WebAssemblyValidate(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ HandleScope scope(isolate);
+ ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
+ "WebAssembly.validate()");
+
+ if (args.Length() < 1) {
+ thrower.TypeError("Argument 0 must be a buffer source");
+ return;
+ }
+
+ v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+ if (ValidateModule(isolate, args[0], &thrower)) {
+ return_value.Set(v8::True(isolate));
+ } else {
+ return_value.Set(v8::False(isolate));
+ }
+}
+
void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
HandleScope scope(isolate);
@@ -245,7 +276,7 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
"WebAssembly.Module()");
if (args.Length() < 1) {
- thrower.Error("Argument 0 must be a buffer source");
+ thrower.TypeError("Argument 0 must be a buffer source");
return;
}
i::MaybeHandle<i::JSObject> module_obj =
@@ -264,18 +295,15 @@ void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
ErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
if (args.Length() < 1) {
- thrower.Error(
- "Argument 0 must be provided, and must be a WebAssembly.Module object");
+ thrower.TypeError("Argument 0 must be a WebAssembly.Module");
return;
}
Local<Context> context = isolate->GetCurrentContext();
i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
- i::Handle<i::Symbol> module_sym(i_context->wasm_module_sym());
- i::MaybeHandle<i::Object> source =
- i::Object::GetProperty(Utils::OpenHandle(*args[0]), module_sym);
- if (source.is_null() || source.ToHandleChecked()->IsUndefined(i_isolate)) {
- thrower.Error("Argument 0 must be a WebAssembly.Module");
+ if (!BrandCheck(isolate, Utils::OpenHandle(*args[0]),
+ i::Handle<i::Symbol>(i_context->wasm_module_sym()),
+ "Argument 0 must be a WebAssembly.Module")) {
return;
}
@@ -285,13 +313,10 @@ void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj));
if (module_obj->GetInternalFieldCount() < 1 ||
!module_obj->GetInternalField(0)->IsFixedArray()) {
- thrower.Error("Argument 0 is an invalid WebAssembly.Module");
+ thrower.TypeError("Argument 0 is an invalid WebAssembly.Module");
return;
}
- i::Handle<i::FixedArray> compiled_code = i::Handle<i::FixedArray>(
- i::FixedArray::cast(module_obj->GetInternalField(0)));
-
i::Handle<i::JSReceiver> ffi = i::Handle<i::JSObject>::null();
if (args.Length() > 1 && args[1]->IsObject()) {
Local<Object> obj = Local<Object>::Cast(args[1]);
@@ -304,17 +329,211 @@ void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Handle<i::Object> mem_obj = v8::Utils::OpenHandle(*obj);
memory = i::Handle<i::JSArrayBuffer>(i::JSArrayBuffer::cast(*mem_obj));
}
- i::MaybeHandle<i::JSObject> instance =
- i::wasm::WasmModule::Instantiate(i_isolate, compiled_code, ffi, memory);
+ i::MaybeHandle<i::JSObject> instance = i::wasm::WasmModule::Instantiate(
+ i_isolate, &thrower, module_obj, ffi, memory);
if (instance.is_null()) {
- thrower.Error("Could not instantiate module");
+ if (!thrower.error()) thrower.Error("Could not instantiate module");
return;
}
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(Utils::ToLocal(instance.ToHandleChecked()));
}
+
+bool GetIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower,
+ Local<Context> context, Local<v8::Object> object,
+ Local<String> property, int* result, int lower_bound,
+ int upper_bound) {
+ v8::MaybeLocal<v8::Value> maybe = object->Get(context, property);
+ v8::Local<v8::Value> value;
+ if (maybe.ToLocal(&value)) {
+ int64_t number;
+ if (!value->IntegerValue(context).To(&number)) return false;
+ if (number < static_cast<int64_t>(lower_bound)) {
+ thrower->RangeError("Property value %" PRId64
+ " is below the lower bound %d",
+ number, lower_bound);
+ return false;
+ }
+ if (number > static_cast<int64_t>(upper_bound)) {
+ thrower->RangeError("Property value %" PRId64
+ " is above the upper bound %d",
+ number, upper_bound);
+ return false;
+ }
+ *result = static_cast<int>(number);
+ return true;
+ }
+ return false;
+}
+
+void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ HandleScope scope(isolate);
+ ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
+ "WebAssembly.Module()");
+ if (args.Length() < 1 || !args[0]->IsObject()) {
+ thrower.TypeError("Argument 0 must be a table descriptor");
+ return;
+ }
+ Local<Context> context = isolate->GetCurrentContext();
+ Local<v8::Object> descriptor = args[0]->ToObject(context).ToLocalChecked();
+ // The descriptor's 'element'.
+ {
+ v8::MaybeLocal<v8::Value> maybe =
+ descriptor->Get(context, v8_str(isolate, "element"));
+ v8::Local<v8::Value> value;
+ if (!maybe.ToLocal(&value)) return;
+ v8::Local<v8::String> string;
+ if (!value->ToString(context).ToLocal(&string)) return;
+ bool equal;
+ if (!string->Equals(context, v8_str(isolate, "anyfunc")).To(&equal)) return;
+ if (!equal) {
+ thrower.TypeError("Descriptor property 'element' must be 'anyfunc'");
+ return;
+ }
+ }
+ const int max_table_size = 1 << 26;
+ // The descriptor's 'initial'.
+ int initial;
+ if (!GetIntegerProperty(isolate, &thrower, context, descriptor,
+ v8_str(isolate, "initial"), &initial, 0,
+ max_table_size)) {
+ return;
+ }
+ // The descriptor's 'maximum'.
+ int maximum = 0;
+ Local<String> maximum_key = v8_str(isolate, "maximum");
+ Maybe<bool> has_maximum = descriptor->Has(context, maximum_key);
+
+ if (has_maximum.IsNothing()) {
+ // There has been an exception, just return.
+ return;
+ }
+ if (has_maximum.FromJust()) {
+ if (!GetIntegerProperty(isolate, &thrower, context, descriptor, maximum_key,
+ &maximum, initial, max_table_size)) {
+ return;
+ }
+ }
+
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::JSFunction> table_ctor(
+ i_isolate->native_context()->wasm_table_constructor());
+ i::Handle<i::JSObject> table_obj =
+ i_isolate->factory()->NewJSObject(table_ctor);
+ i::Handle<i::FixedArray> fixed_array =
+ i_isolate->factory()->NewFixedArray(initial);
+ i::Object* null = i_isolate->heap()->null_value();
+ for (int i = 0; i < initial; ++i) fixed_array->set(i, null);
+ table_obj->SetInternalField(0, *fixed_array);
+ table_obj->SetInternalField(
+ 1, has_maximum.FromJust()
+ ? static_cast<i::Object*>(i::Smi::FromInt(maximum))
+ : static_cast<i::Object*>(i_isolate->heap()->undefined_value()));
+ i::Handle<i::Symbol> table_sym(i_isolate->native_context()->wasm_table_sym());
+ i::Object::SetProperty(table_obj, table_sym, table_obj, i::STRICT).Check();
+ v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+ return_value.Set(Utils::ToLocal(table_obj));
+}
+
+void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ HandleScope scope(isolate);
+ ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
+ "WebAssembly.Module()");
+ if (args.Length() < 1 || !args[0]->IsObject()) {
+ thrower.TypeError("Argument 0 must be a table descriptor");
+ return;
+ }
+ Local<Context> context = isolate->GetCurrentContext();
+ Local<v8::Object> descriptor = args[0]->ToObject(context).ToLocalChecked();
+ // The descriptor's 'initial'.
+ int initial;
+ if (!GetIntegerProperty(isolate, &thrower, context, descriptor,
+ v8_str(isolate, "initial"), &initial, 0, 65536)) {
+ return;
+ }
+ // The descriptor's 'maximum'.
+ int maximum = 0;
+ Local<String> maximum_key = v8_str(isolate, "maximum");
+ Maybe<bool> has_maximum = descriptor->Has(context, maximum_key);
+
+ if (has_maximum.IsNothing()) {
+ // There has been an exception, just return.
+ return;
+ }
+ if (has_maximum.FromJust()) {
+ if (!GetIntegerProperty(isolate, &thrower, context, descriptor, maximum_key,
+ &maximum, initial, 65536)) {
+ return;
+ }
+ }
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::JSArrayBuffer> buffer =
+ i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared);
+ size_t size = static_cast<size_t>(i::wasm::WasmModule::kPageSize) *
+ static_cast<size_t>(initial);
+ i::JSArrayBuffer::SetupAllocatingData(buffer, i_isolate, size);
+
+ i::Handle<i::JSObject> memory_obj = i::WasmJs::CreateWasmMemoryObject(
+ i_isolate, buffer, has_maximum.FromJust(), maximum);
+ v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+ return_value.Set(Utils::ToLocal(memory_obj));
+}
+void WebAssemblyTableGetLength(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ // TODO(rossberg)
+}
+void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ // TODO(rossberg)
+}
+void WebAssemblyTableGet(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ // TODO(rossberg)
+}
+void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ // TODO(rossberg)
+}
+void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ // TODO(rossberg)
+}
+void WebAssemblyMemoryGetBuffer(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ Local<Context> context = isolate->GetCurrentContext();
+ i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
+ if (!BrandCheck(isolate, Utils::OpenHandle(*args.This()),
+ i::Handle<i::Symbol>(i_context->wasm_memory_sym()),
+ "Receiver is not a WebAssembly.Memory")) {
+ return;
+ }
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::JSObject> receiver =
+ i::Handle<i::JSObject>::cast(Utils::OpenHandle(*args.This()));
+ i::Handle<i::Object> buffer(receiver->GetInternalField(0), i_isolate);
+ DCHECK(buffer->IsJSArrayBuffer());
+ v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+ return_value.Set(Utils::ToLocal(buffer));
+}
} // namespace
+i::Handle<i::JSObject> i::WasmJs::CreateWasmMemoryObject(
+ i::Isolate* i_isolate, i::Handle<i::JSArrayBuffer> buffer, bool has_maximum,
+ int maximum) {
+ i::Handle<i::JSFunction> memory_ctor(
+ i_isolate->native_context()->wasm_memory_constructor());
+ i::Handle<i::JSObject> memory_obj =
+ i_isolate->factory()->NewJSObject(memory_ctor);
+ memory_obj->SetInternalField(0, *buffer);
+ memory_obj->SetInternalField(
+ 1, has_maximum
+ ? static_cast<i::Object*>(i::Smi::FromInt(maximum))
+ : static_cast<i::Object*>(i_isolate->heap()->undefined_value()));
+ i::Handle<i::Symbol> memory_sym(
+ i_isolate->native_context()->wasm_memory_sym());
+ i::Object::SetProperty(memory_obj, memory_sym, memory_obj, i::STRICT).Check();
+ return memory_obj;
+}
+
// TODO(titzer): we use the API to create the function template because the
// internal guts are too ugly to replicate here.
static i::Handle<i::FunctionTemplateInfo> NewTemplate(i::Isolate* i_isolate,
@@ -325,12 +544,9 @@ static i::Handle<i::FunctionTemplateInfo> NewTemplate(i::Isolate* i_isolate,
}
namespace internal {
-static Handle<String> v8_str(Isolate* isolate, const char* str) {
- return isolate->factory()->NewStringFromAsciiChecked(str);
-}
-static Handle<JSFunction> InstallFunc(Isolate* isolate, Handle<JSObject> object,
- const char* str, FunctionCallback func) {
+Handle<JSFunction> InstallFunc(Isolate* isolate, Handle<JSObject> object,
+ const char* str, FunctionCallback func) {
Handle<String> name = v8_str(isolate, str);
Handle<FunctionTemplateInfo> temp = NewTemplate(isolate, func);
Handle<JSFunction> function =
@@ -341,6 +557,112 @@ static Handle<JSFunction> InstallFunc(Isolate* isolate, Handle<JSObject> object,
return function;
}
+Handle<JSFunction> InstallGetter(Isolate* isolate, Handle<JSObject> object,
+ const char* str, FunctionCallback func) {
+ Handle<String> name = v8_str(isolate, str);
+ Handle<FunctionTemplateInfo> temp = NewTemplate(isolate, func);
+ Handle<JSFunction> function =
+ ApiNatives::InstantiateFunction(temp).ToHandleChecked();
+ v8::PropertyAttribute attributes =
+ static_cast<v8::PropertyAttribute>(v8::DontDelete | v8::ReadOnly);
+ Utils::ToLocal(object)->SetAccessorProperty(Utils::ToLocal(name),
+ Utils::ToLocal(function),
+ Local<Function>(), attributes);
+ return function;
+}
+
+void WasmJs::InstallWasmModuleSymbolIfNeeded(Isolate* isolate,
+ Handle<JSGlobalObject> global,
+ Handle<Context> context) {
+ if (!context->get(Context::WASM_MODULE_SYM_INDEX)->IsSymbol() ||
+ !context->get(Context::WASM_INSTANCE_SYM_INDEX)->IsSymbol()) {
+ InstallWasmMapsIfNeeded(isolate, isolate->native_context());
+ InstallWasmConstructors(isolate, isolate->global_object(),
+ isolate->native_context());
+ }
+}
+
+void WasmJs::InstallWasmConstructors(Isolate* isolate,
+ Handle<JSGlobalObject> global,
+ Handle<Context> context) {
+ Factory* factory = isolate->factory();
+ // Create private symbols.
+ Handle<Symbol> module_sym = factory->NewPrivateSymbol();
+ context->set_wasm_module_sym(*module_sym);
+
+ Handle<Symbol> instance_sym = factory->NewPrivateSymbol();
+ context->set_wasm_instance_sym(*instance_sym);
+
+ Handle<Symbol> table_sym = factory->NewPrivateSymbol();
+ context->set_wasm_table_sym(*table_sym);
+
+ Handle<Symbol> memory_sym = factory->NewPrivateSymbol();
+ context->set_wasm_memory_sym(*memory_sym);
+
+ // Bind the WebAssembly object.
+ Handle<String> name = v8_str(isolate, "WebAssembly");
+ Handle<JSFunction> cons = factory->NewFunction(name);
+ JSFunction::SetInstancePrototype(
+ cons, Handle<Object>(context->initial_object_prototype(), isolate));
+ cons->shared()->set_instance_class_name(*name);
+ Handle<JSObject> wasm_object = factory->NewJSObject(cons, TENURED);
+ PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
+ JSObject::AddProperty(global, name, wasm_object, attributes);
+
+ // Setup compile
+ InstallFunc(isolate, wasm_object, "compile", WebAssemblyCompile);
+
+ // Setup compile
+ InstallFunc(isolate, wasm_object, "validate", WebAssemblyValidate);
+
+ // Setup Module
+ Handle<JSFunction> module_constructor =
+ InstallFunc(isolate, wasm_object, "Module", WebAssemblyModule);
+ context->set_wasm_module_constructor(*module_constructor);
+ Handle<JSObject> module_proto =
+ factory->NewJSObject(module_constructor, TENURED);
+ i::Handle<i::Map> map = isolate->factory()->NewMap(
+ i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize + i::kPointerSize);
+ JSFunction::SetInitialMap(module_constructor, map, module_proto);
+ JSObject::AddProperty(module_proto, isolate->factory()->constructor_string(),
+ module_constructor, DONT_ENUM);
+
+ // Setup Instance
+ Handle<JSFunction> instance_constructor =
+ InstallFunc(isolate, wasm_object, "Instance", WebAssemblyInstance);
+ context->set_wasm_instance_constructor(*instance_constructor);
+
+ // Setup Table
+ Handle<JSFunction> table_constructor =
+ InstallFunc(isolate, wasm_object, "Table", WebAssemblyTable);
+ context->set_wasm_table_constructor(*table_constructor);
+ Handle<JSObject> table_proto =
+ factory->NewJSObject(table_constructor, TENURED);
+ map = isolate->factory()->NewMap(
+ i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize + 2 * i::kPointerSize);
+ JSFunction::SetInitialMap(table_constructor, map, table_proto);
+ JSObject::AddProperty(table_proto, isolate->factory()->constructor_string(),
+ table_constructor, DONT_ENUM);
+ InstallGetter(isolate, table_proto, "length", WebAssemblyTableGetLength);
+ InstallFunc(isolate, table_proto, "grow", WebAssemblyTableGrow);
+ InstallFunc(isolate, table_proto, "get", WebAssemblyTableGet);
+ InstallFunc(isolate, table_proto, "set", WebAssemblyTableSet);
+
+ // Setup Memory
+ Handle<JSFunction> memory_constructor =
+ InstallFunc(isolate, wasm_object, "Memory", WebAssemblyMemory);
+ context->set_wasm_memory_constructor(*memory_constructor);
+ Handle<JSObject> memory_proto =
+ factory->NewJSObject(memory_constructor, TENURED);
+ map = isolate->factory()->NewMap(
+ i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize + 2 * i::kPointerSize);
+ JSFunction::SetInitialMap(memory_constructor, map, memory_proto);
+ JSObject::AddProperty(memory_proto, isolate->factory()->constructor_string(),
+ memory_constructor, DONT_ENUM);
+ InstallFunc(isolate, memory_proto, "grow", WebAssemblyMemoryGrow);
+ InstallGetter(isolate, memory_proto, "buffer", WebAssemblyMemoryGetBuffer);
+}
+
void WasmJs::Install(Isolate* isolate, Handle<JSGlobalObject> global) {
if (!FLAG_expose_wasm && !FLAG_validate_asm) {
return;
@@ -350,7 +672,7 @@ void WasmJs::Install(Isolate* isolate, Handle<JSGlobalObject> global) {
// Setup wasm function map.
Handle<Context> context(global->native_context(), isolate);
- InstallWasmFunctionMap(isolate, context);
+ InstallWasmMapsIfNeeded(isolate, context);
if (!FLAG_expose_wasm) {
return;
@@ -383,39 +705,11 @@ void WasmJs::Install(Isolate* isolate, Handle<JSGlobalObject> global) {
JSObject::AddProperty(wasm_object, name, value, attributes);
}
}
-
- // Create private symbols.
- Handle<Symbol> module_sym = isolate->factory()->NewPrivateSymbol();
- Handle<Symbol> instance_sym = isolate->factory()->NewPrivateSymbol();
- context->set_wasm_module_sym(*module_sym);
- context->set_wasm_instance_sym(*instance_sym);
-
- // Bind the WebAssembly object.
- Handle<String> name = v8_str(isolate, "WebAssembly");
- Handle<JSFunction> cons = factory->NewFunction(name);
- JSFunction::SetInstancePrototype(
- cons, Handle<Object>(context->initial_object_prototype(), isolate));
- cons->shared()->set_instance_class_name(*name);
- Handle<JSObject> wasm_object = factory->NewJSObject(cons, TENURED);
- PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
- JSObject::AddProperty(global, name, wasm_object, attributes);
-
- // Install static methods on WebAssembly object.
- InstallFunc(isolate, wasm_object, "compile", WebAssemblyCompile);
- Handle<JSFunction> module_constructor =
- InstallFunc(isolate, wasm_object, "Module", WebAssemblyModule);
- Handle<JSFunction> instance_constructor =
- InstallFunc(isolate, wasm_object, "Instance", WebAssemblyInstance);
- i::Handle<i::Map> map = isolate->factory()->NewMap(
- i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize + i::kPointerSize);
- module_constructor->set_prototype_or_initial_map(*map);
- map->SetConstructor(*module_constructor);
-
- context->set_wasm_module_constructor(*module_constructor);
- context->set_wasm_instance_constructor(*instance_constructor);
+ InstallWasmConstructors(isolate, global, context);
}
-void WasmJs::InstallWasmFunctionMap(Isolate* isolate, Handle<Context> context) {
+void WasmJs::InstallWasmMapsIfNeeded(Isolate* isolate,
+ Handle<Context> context) {
if (!context->get(Context::WASM_FUNCTION_MAP_INDEX)->IsMap()) {
// TODO(titzer): Move this to bootstrapper.cc??
// TODO(titzer): Also make one for strict mode functions?
diff --git a/deps/v8/src/wasm/wasm-js.h b/deps/v8/src/wasm/wasm-js.h
index ded9a1a90b..4f26494624 100644
--- a/deps/v8/src/wasm/wasm-js.h
+++ b/deps/v8/src/wasm/wasm-js.h
@@ -5,13 +5,8 @@
#ifndef V8_WASM_JS_H_
#define V8_WASM_JS_H_
-#ifndef V8_SHARED
#include "src/allocation.h"
#include "src/base/hashmap.h"
-#else
-#include "include/v8.h"
-#include "src/base/compiler-specific.h"
-#endif // !V8_SHARED
namespace v8 {
namespace internal {
@@ -19,7 +14,19 @@ namespace internal {
class WasmJs {
public:
static void Install(Isolate* isolate, Handle<JSGlobalObject> global_object);
- static void InstallWasmFunctionMap(Isolate* isolate, Handle<Context> context);
+
+ V8_EXPORT_PRIVATE static void InstallWasmModuleSymbolIfNeeded(
+ Isolate* isolate, Handle<JSGlobalObject> global, Handle<Context> context);
+
+ V8_EXPORT_PRIVATE static void InstallWasmMapsIfNeeded(
+ Isolate* isolate, Handle<Context> context);
+ static void InstallWasmConstructors(Isolate* isolate,
+ Handle<JSGlobalObject> global,
+ Handle<Context> context);
+
+ static Handle<JSObject> CreateWasmMemoryObject(Isolate* isolate,
+ Handle<JSArrayBuffer> buffer,
+ bool has_maximum, int maximum);
};
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-macro-gen.h b/deps/v8/src/wasm/wasm-macro-gen.h
index abd57d505a..fd10a3929a 100644
--- a/deps/v8/src/wasm/wasm-macro-gen.h
+++ b/deps/v8/src/wasm/wasm-macro-gen.h
@@ -7,7 +7,7 @@
#include "src/wasm/wasm-opcodes.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
#define U32_LE(v) \
static_cast<byte>(v), static_cast<byte>((v) >> 8), \
@@ -17,17 +17,17 @@
#define WASM_MODULE_HEADER U32_LE(kWasmMagic), U32_LE(kWasmVersion)
-#define SIG_INDEX(v) U16_LE(v)
-// TODO(binji): make SIG_INDEX match this.
#define IMPORT_SIG_INDEX(v) U32V_1(v)
#define FUNC_INDEX(v) U32V_1(v)
+#define TABLE_INDEX(v) U32V_1(v)
#define NO_NAME U32V_1(0)
#define NAME_LENGTH(v) U32V_1(v)
+#define ENTRY_COUNT(v) U32V_1(v)
#define ZERO_ALIGNMENT 0
#define ZERO_OFFSET 0
-#define BR_TARGET(v) U32_LE(v)
+#define BR_TARGET(v) U32V_1(v)
#define MASK_7 ((1 << 7) - 1)
#define MASK_14 ((1 << 14) - 1)
@@ -62,36 +62,76 @@
#define ARITY_0 0
#define ARITY_1 1
+#define ARITY_2 2
#define DEPTH_0 0
#define DEPTH_1 1
+#define DEPTH_2 2
+#define ARITY_2 2
+
+#define WASM_BLOCK(...) kExprBlock, kLocalVoid, __VA_ARGS__, kExprEnd
+
+#define WASM_BLOCK_T(t, ...) \
+ kExprBlock, static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(t)), \
+ __VA_ARGS__, kExprEnd
+
+#define WASM_BLOCK_TT(t1, t2, ...) \
+ kExprBlock, kMultivalBlock, 0, \
+ static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(t1)), \
+ static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(t2)), __VA_ARGS__, \
+ kExprEnd
+
+#define WASM_BLOCK_I(...) kExprBlock, kLocalI32, __VA_ARGS__, kExprEnd
+#define WASM_BLOCK_L(...) kExprBlock, kLocalI64, __VA_ARGS__, kExprEnd
+#define WASM_BLOCK_F(...) kExprBlock, kLocalF32, __VA_ARGS__, kExprEnd
+#define WASM_BLOCK_D(...) kExprBlock, kLocalF64, __VA_ARGS__, kExprEnd
+
+#define WASM_INFINITE_LOOP kExprLoop, kLocalVoid, kExprBr, DEPTH_0, kExprEnd
+
+#define WASM_LOOP(...) kExprLoop, kLocalVoid, __VA_ARGS__, kExprEnd
+#define WASM_LOOP_I(...) kExprLoop, kLocalI32, __VA_ARGS__, kExprEnd
+#define WASM_LOOP_L(...) kExprLoop, kLocalI64, __VA_ARGS__, kExprEnd
+#define WASM_LOOP_F(...) kExprLoop, kLocalF32, __VA_ARGS__, kExprEnd
+#define WASM_LOOP_D(...) kExprLoop, kLocalF64, __VA_ARGS__, kExprEnd
+
+#define WASM_IF(cond, tstmt) cond, kExprIf, kLocalVoid, tstmt, kExprEnd
-#define WASM_BLOCK(...) kExprBlock, __VA_ARGS__, kExprEnd
-#define WASM_INFINITE_LOOP kExprLoop, kExprBr, ARITY_0, DEPTH_0, kExprEnd
-#define WASM_LOOP(...) kExprLoop, __VA_ARGS__, kExprEnd
-#define WASM_IF(cond, tstmt) cond, kExprIf, tstmt, kExprEnd
#define WASM_IF_ELSE(cond, tstmt, fstmt) \
- cond, kExprIf, tstmt, kExprElse, fstmt, kExprEnd
+ cond, kExprIf, kLocalVoid, tstmt, kExprElse, fstmt, kExprEnd
+
+#define WASM_IF_ELSE_T(t, cond, tstmt, fstmt) \
+ cond, kExprIf, static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(t)), tstmt, \
+ kExprElse, fstmt, kExprEnd
+
+#define WASM_IF_ELSE_TT(t1, t2, cond, tstmt, fstmt) \
+ cond, kExprIf, kMultivalBlock, 0, \
+ static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(t1)), \
+ static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(t2)), tstmt, kExprElse, \
+ fstmt, kExprEnd
+
+#define WASM_IF_ELSE_I(cond, tstmt, fstmt) \
+ cond, kExprIf, kLocalI32, tstmt, kExprElse, fstmt, kExprEnd
+#define WASM_IF_ELSE_L(cond, tstmt, fstmt) \
+ cond, kExprIf, kLocalI64, tstmt, kExprElse, fstmt, kExprEnd
+#define WASM_IF_ELSE_F(cond, tstmt, fstmt) \
+ cond, kExprIf, kLocalF32, tstmt, kExprElse, fstmt, kExprEnd
+#define WASM_IF_ELSE_D(cond, tstmt, fstmt) \
+ cond, kExprIf, kLocalF64, tstmt, kExprElse, fstmt, kExprEnd
+
#define WASM_SELECT(tval, fval, cond) tval, fval, cond, kExprSelect
-#define WASM_BR(depth) kExprBr, ARITY_0, static_cast<byte>(depth)
-#define WASM_BR_IF(depth, cond) \
- cond, kExprBrIf, ARITY_0, static_cast<byte>(depth)
-#define WASM_BRV(depth, val) val, kExprBr, ARITY_1, static_cast<byte>(depth)
-#define WASM_BRV_IF(depth, val, cond) \
- val, cond, kExprBrIf, ARITY_1, static_cast<byte>(depth)
-#define WASM_BREAK(depth) kExprBr, ARITY_0, static_cast<byte>(depth + 1)
-#define WASM_CONTINUE(depth) kExprBr, ARITY_0, static_cast<byte>(depth)
-#define WASM_BREAKV(depth, val) \
- val, kExprBr, ARITY_1, static_cast<byte>(depth + 1)
-#define WASM_RETURN0 kExprReturn, ARITY_0
-#define WASM_RETURN1(val) val, kExprReturn, ARITY_1
-#define WASM_RETURNN(count, ...) __VA_ARGS__, kExprReturn, count
+
+#define WASM_RETURN0 kExprReturn
+#define WASM_RETURN1(val) val, kExprReturn
+#define WASM_RETURNN(count, ...) __VA_ARGS__, kExprReturn
+
+#define WASM_BR(depth) kExprBr, static_cast<byte>(depth)
+#define WASM_BR_IF(depth, cond) cond, kExprBrIf, static_cast<byte>(depth)
+#define WASM_BR_IFD(depth, val, cond) \
+ val, cond, kExprBrIf, static_cast<byte>(depth), kExprDrop
+#define WASM_CONTINUE(depth) kExprBr, static_cast<byte>(depth)
#define WASM_UNREACHABLE kExprUnreachable
#define WASM_BR_TABLE(key, count, ...) \
- key, kExprBrTable, ARITY_0, U32V_1(count), __VA_ARGS__
-
-#define WASM_BR_TABLEV(val, key, count, ...) \
- val, key, kExprBrTable, ARITY_1, U32V_1(count), __VA_ARGS__
+ key, kExprBrTable, U32V_1(count), __VA_ARGS__
#define WASM_CASE(x) static_cast<byte>(x), static_cast<byte>(x >> 8)
#define WASM_CASE_BR(x) static_cast<byte>(x), static_cast<byte>(0x80 | (x) >> 8)
@@ -343,6 +383,8 @@ class LocalDeclEncoder {
static_cast<byte>(bit_cast<uint64_t>(val) >> 56)
#define WASM_GET_LOCAL(index) kExprGetLocal, static_cast<byte>(index)
#define WASM_SET_LOCAL(index, val) val, kExprSetLocal, static_cast<byte>(index)
+#define WASM_TEE_LOCAL(index, val) val, kExprTeeLocal, static_cast<byte>(index)
+#define WASM_DROP kExprDrop
#define WASM_GET_GLOBAL(index) kExprGetGlobal, static_cast<byte>(index)
#define WASM_SET_GLOBAL(index, val) \
val, kExprSetGlobal, static_cast<byte>(index)
@@ -374,49 +416,25 @@ class LocalDeclEncoder {
v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
alignment, ZERO_OFFSET
-#define WASM_CALL_FUNCTION0(index) \
- kExprCallFunction, 0, static_cast<byte>(index)
-#define WASM_CALL_FUNCTION1(index, a) \
- a, kExprCallFunction, 1, static_cast<byte>(index)
-#define WASM_CALL_FUNCTION2(index, a, b) \
- a, b, kExprCallFunction, 2, static_cast<byte>(index)
-#define WASM_CALL_FUNCTION3(index, a, b, c) \
- a, b, c, kExprCallFunction, 3, static_cast<byte>(index)
-#define WASM_CALL_FUNCTION4(index, a, b, c, d) \
- a, b, c, d, kExprCallFunction, 4, static_cast<byte>(index)
-#define WASM_CALL_FUNCTION5(index, a, b, c, d, e) \
- kExprCallFunction, 5, static_cast<byte>(index)
-#define WASM_CALL_FUNCTIONN(arity, index, ...) \
- __VA_ARGS__, kExprCallFunction, arity, static_cast<byte>(index)
-
-#define WASM_CALL_IMPORT0(index) kExprCallImport, 0, static_cast<byte>(index)
-#define WASM_CALL_IMPORT1(index, a) \
- a, kExprCallImport, 1, static_cast<byte>(index)
-#define WASM_CALL_IMPORT2(index, a, b) \
- a, b, kExprCallImport, 2, static_cast<byte>(index)
-#define WASM_CALL_IMPORT3(index, a, b, c) \
- a, b, c, kExprCallImport, 3, static_cast<byte>(index)
-#define WASM_CALL_IMPORT4(index, a, b, c, d) \
- a, b, c, d, kExprCallImport, 4, static_cast<byte>(index)
-#define WASM_CALL_IMPORT5(index, a, b, c, d, e) \
- a, b, c, d, e, kExprCallImport, 5, static_cast<byte>(index)
-#define WASM_CALL_IMPORTN(arity, index, ...) \
- __VA_ARGS__, kExprCallImport, U32V_1(arity), static_cast<byte>(index),
+#define WASM_CALL_FUNCTION0(index) kExprCallFunction, static_cast<byte>(index)
+#define WASM_CALL_FUNCTION(index, ...) \
+ __VA_ARGS__, kExprCallFunction, static_cast<byte>(index)
+// TODO(titzer): change usages of these macros to put func last.
#define WASM_CALL_INDIRECT0(index, func) \
- func, kExprCallIndirect, 0, static_cast<byte>(index)
+ func, kExprCallIndirect, static_cast<byte>(index)
#define WASM_CALL_INDIRECT1(index, func, a) \
- func, a, kExprCallIndirect, 1, static_cast<byte>(index)
+ a, func, kExprCallIndirect, static_cast<byte>(index)
#define WASM_CALL_INDIRECT2(index, func, a, b) \
- func, a, b, kExprCallIndirect, 2, static_cast<byte>(index)
+ a, b, func, kExprCallIndirect, static_cast<byte>(index)
#define WASM_CALL_INDIRECT3(index, func, a, b, c) \
- func, a, b, c, kExprCallIndirect, 3, static_cast<byte>(index)
+ a, b, c, func, kExprCallIndirect, static_cast<byte>(index)
#define WASM_CALL_INDIRECT4(index, func, a, b, c, d) \
- func, a, b, c, d, kExprCallIndirect, 4, static_cast<byte>(index)
+ a, b, c, d, func, kExprCallIndirect, static_cast<byte>(index)
#define WASM_CALL_INDIRECT5(index, func, a, b, c, d, e) \
- func, a, b, c, d, e, kExprCallIndirect, 5, static_cast<byte>(index)
+ a, b, c, d, e, func, kExprCallIndirect, static_cast<byte>(index)
#define WASM_CALL_INDIRECTN(arity, index, func, ...) \
- func, __VA_ARGS__, kExprCallIndirect, U32V_1(arity), static_cast<byte>(index)
+ __VA_ARGS__, func, kExprCallIndirect, static_cast<byte>(index)
#define WASM_NOT(x) x, kExprI32Eqz
#define WASM_SEQ(...) __VA_ARGS__
@@ -424,11 +442,16 @@ class LocalDeclEncoder {
//------------------------------------------------------------------------------
// Constructs that are composed of multiple bytecodes.
//------------------------------------------------------------------------------
-#define WASM_WHILE(x, y) \
- kExprLoop, x, kExprIf, y, kExprBr, ARITY_1, DEPTH_1, kExprEnd, kExprEnd
+#define WASM_WHILE(x, y) \
+ kExprLoop, kLocalVoid, x, kExprIf, kLocalVoid, y, kExprBr, DEPTH_1, \
+ kExprEnd, kExprEnd
#define WASM_INC_LOCAL(index) \
kExprGetLocal, static_cast<byte>(index), kExprI8Const, 1, kExprI32Add, \
- kExprSetLocal, static_cast<byte>(index)
+ kExprTeeLocal, static_cast<byte>(index)
+#define WASM_INC_LOCAL_BYV(index, count) \
+ kExprGetLocal, static_cast<byte>(index), kExprI8Const, \
+ static_cast<byte>(count), kExprI32Add, kExprTeeLocal, \
+ static_cast<byte>(index)
#define WASM_INC_LOCAL_BY(index, count) \
kExprGetLocal, static_cast<byte>(index), kExprI8Const, \
static_cast<byte>(count), kExprI32Add, kExprSetLocal, \
@@ -580,11 +603,17 @@ class LocalDeclEncoder {
#define WASM_I64_REINTERPRET_F64(x) x, kExprI64ReinterpretF64
//------------------------------------------------------------------------------
+// Memory Operations.
+//------------------------------------------------------------------------------
+#define WASM_GROW_MEMORY(x) x, kExprGrowMemory
+#define WASM_MEMORY_SIZE kExprMemorySize
+
+//------------------------------------------------------------------------------
// Simd Operations.
//------------------------------------------------------------------------------
#define WASM_SIMD_I32x4_SPLAT(x) x, kSimdPrefix, kExprI32x4Splat & 0xff
-#define WASM_SIMD_I32x4_EXTRACT_LANE(x, y) \
- x, y, kSimdPrefix, kExprI32x4ExtractLane & 0xff
+#define WASM_SIMD_I32x4_EXTRACT_LANE(lane, x) \
+ x, kSimdPrefix, kExprI32x4ExtractLane & 0xff, static_cast<byte>(lane)
#define SIG_ENTRY_v_v kWasmFunctionTypeForm, 0, 0
#define SIZEOF_SIG_ENTRY_v_v 3
@@ -605,4 +634,13 @@ class LocalDeclEncoder {
#define SIZEOF_SIG_ENTRY_x_xx 6
#define SIZEOF_SIG_ENTRY_x_xxx 7
+#define WASM_BRV(depth, val) val, kExprBr, static_cast<byte>(depth)
+#define WASM_BRV_IF(depth, val, cond) \
+ val, cond, kExprBrIf, static_cast<byte>(depth)
+#define WASM_BRV_IFD(depth, val, cond) \
+ val, cond, kExprBrIf, static_cast<byte>(depth), kExprDrop
+#define WASM_IFB(cond, ...) cond, kExprIf, kLocalVoid, __VA_ARGS__, kExprEnd
+#define WASM_BR_TABLEV(val, key, count, ...) \
+ val, key, kExprBrTable, U32V_1(count), __VA_ARGS__
+
#endif // V8_WASM_MACRO_GEN_H_
diff --git a/deps/v8/src/wasm/encoder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index ef0bddc836..084f5a0c1a 100644
--- a/deps/v8/src/wasm/encoder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -6,12 +6,12 @@
#include "src/handles.h"
#include "src/v8.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
#include "src/wasm/ast-decoder.h"
-#include "src/wasm/encoder.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/wasm-macro-gen.h"
+#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
@@ -30,15 +30,11 @@ namespace v8 {
namespace internal {
namespace wasm {
-// Emit a section name and the size as a padded varint that can be patched
+// Emit a section code and the size as a padded varint that can be patched
// later.
-size_t EmitSection(WasmSection::Code code, ZoneBuffer& buffer) {
- // Emit the section name.
- const char* name = WasmSection::getName(code);
- TRACE("emit section: %s\n", name);
- size_t length = WasmSection::getNameLength(code);
- buffer.write_size(length); // Section name string size.
- buffer.write(reinterpret_cast<const byte*>(name), length);
+size_t EmitSection(WasmSectionCode code, ZoneBuffer& buffer) {
+ // Emit the section code.
+ buffer.write_u8(code);
// Emit a placeholder for the length.
return buffer.reserve_u32v();
@@ -55,8 +51,14 @@ WasmFunctionBuilder::WasmFunctionBuilder(WasmModuleBuilder* builder)
locals_(builder->zone()),
signature_index_(0),
exported_(0),
+ func_index_(static_cast<uint32_t>(builder->functions_.size())),
body_(builder->zone()),
- name_(builder->zone()) {}
+ name_(builder->zone()),
+ i32_temps_(builder->zone()),
+ i64_temps_(builder->zone()),
+ f32_temps_(builder->zone()),
+ f64_temps_(builder->zone()),
+ direct_calls_(builder->zone()) {}
void WasmFunctionBuilder::EmitVarInt(uint32_t val) {
byte buffer[8];
@@ -86,6 +88,10 @@ void WasmFunctionBuilder::EmitSetLocal(uint32_t local_index) {
EmitWithVarInt(kExprSetLocal, local_index);
}
+void WasmFunctionBuilder::EmitTeeLocal(uint32_t local_index) {
+ EmitWithVarInt(kExprTeeLocal, local_index);
+}
+
void WasmFunctionBuilder::EmitCode(const byte* code, uint32_t code_size) {
for (size_t i = 0; i < code_size; ++i) {
body_.push_back(code[i]);
@@ -124,6 +130,15 @@ void WasmFunctionBuilder::EmitI32Const(int32_t value) {
}
}
+void WasmFunctionBuilder::EmitDirectCallIndex(uint32_t index) {
+ DirectCallIndex call;
+ call.offset = body_.size();
+ call.direct_index = index;
+ direct_calls_.push_back(call);
+ byte code[] = {U32V_5(0)};
+ EmitCode(code, sizeof(code));
+}
+
void WasmFunctionBuilder::SetExported() { exported_ = true; }
void WasmFunctionBuilder::SetName(const char* name, int name_length) {
@@ -139,14 +154,15 @@ void WasmFunctionBuilder::WriteSignature(ZoneBuffer& buffer) const {
buffer.write_u32v(signature_index_);
}
-void WasmFunctionBuilder::WriteExport(ZoneBuffer& buffer,
- uint32_t func_index) const {
+void WasmFunctionBuilder::WriteExport(ZoneBuffer& buffer) const {
if (exported_) {
- buffer.write_u32v(func_index);
buffer.write_size(name_.size());
if (name_.size() > 0) {
buffer.write(reinterpret_cast<const byte*>(&name_[0]), name_.size());
}
+ buffer.write_u8(kExternalFunction);
+ buffer.write_u32v(func_index_ +
+ static_cast<uint32_t>(builder_->imports_.size()));
}
}
@@ -158,24 +174,16 @@ void WasmFunctionBuilder::WriteBody(ZoneBuffer& buffer) const {
locals_.Emit(*ptr);
(*ptr) += locals_size; // UGLY: manual bump of position pointer
if (body_.size() > 0) {
+ size_t base = buffer.offset();
buffer.write(&body_[0], body_.size());
+ for (DirectCallIndex call : direct_calls_) {
+ buffer.patch_u32v(
+ base + call.offset,
+ call.direct_index + static_cast<uint32_t>(builder_->imports_.size()));
+ }
}
}
-WasmDataSegmentEncoder::WasmDataSegmentEncoder(Zone* zone, const byte* data,
- uint32_t size, uint32_t dest)
- : data_(zone), dest_(dest) {
- for (size_t i = 0; i < size; ++i) {
- data_.push_back(data[i]);
- }
-}
-
-void WasmDataSegmentEncoder::Write(ZoneBuffer& buffer) const {
- buffer.write_u32v(dest_);
- buffer.write_u32v(static_cast<uint32_t>(data_.size()));
- buffer.write(&data_[0], data_.size());
-}
-
WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
: zone_(zone),
signatures_(zone),
@@ -187,23 +195,22 @@ WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
signature_map_(zone),
start_function_index_(-1) {}
-uint32_t WasmModuleBuilder::AddFunction() {
+WasmFunctionBuilder* WasmModuleBuilder::AddFunction(FunctionSig* sig) {
functions_.push_back(new (zone_) WasmFunctionBuilder(this));
- return static_cast<uint32_t>(functions_.size() - 1);
+ // Add the signature if one was provided here.
+ if (sig) functions_.back()->SetSignature(sig);
+ return functions_.back();
}
-WasmFunctionBuilder* WasmModuleBuilder::FunctionAt(size_t index) {
- if (functions_.size() > index) {
- return functions_.at(index);
- } else {
- return nullptr;
+void WasmModuleBuilder::AddDataSegment(const byte* data, uint32_t size,
+ uint32_t dest) {
+ data_segments_.push_back({ZoneVector<byte>(zone()), dest});
+ ZoneVector<byte>& vec = data_segments_.back().data;
+ for (uint32_t i = 0; i < size; i++) {
+ vec.push_back(data[i]);
}
}
-void WasmModuleBuilder::AddDataSegment(WasmDataSegmentEncoder* data) {
- data_segments_.push_back(data);
-}
-
bool WasmModuleBuilder::CompareFunctionSigs::operator()(FunctionSig* a,
FunctionSig* b) const {
if (a->return_count() < b->return_count()) return true;
@@ -243,12 +250,13 @@ uint32_t WasmModuleBuilder::AddImport(const char* name, int name_length,
return static_cast<uint32_t>(imports_.size() - 1);
}
-void WasmModuleBuilder::MarkStartFunction(uint32_t index) {
- start_function_index_ = index;
+void WasmModuleBuilder::MarkStartFunction(WasmFunctionBuilder* function) {
+ start_function_index_ = function->func_index();
}
-uint32_t WasmModuleBuilder::AddGlobal(LocalType type, bool exported) {
- globals_.push_back(std::make_pair(type, exported));
+uint32_t WasmModuleBuilder::AddGlobal(LocalType type, bool exported,
+ bool mutability) {
+ globals_.push_back({type, exported, mutability});
return static_cast<uint32_t>(globals_.size() - 1);
}
@@ -262,7 +270,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == Emit signatures ========================================================
if (signatures_.size() > 0) {
- size_t start = EmitSection(WasmSection::Code::Signatures, buffer);
+ size_t start = EmitSection(kTypeSectionCode, buffer);
buffer.write_size(signatures_.size());
for (FunctionSig* sig : signatures_) {
@@ -279,86 +287,128 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
FixupSection(buffer, start);
}
- // == Emit globals ===========================================================
- if (globals_.size() > 0) {
- size_t start = EmitSection(WasmSection::Code::Globals, buffer);
- buffer.write_size(globals_.size());
-
- for (auto global : globals_) {
- buffer.write_u32v(0); // Length of the global name.
- buffer.write_u8(WasmOpcodes::LocalTypeCodeFor(global.first));
- buffer.write_u8(global.second);
- }
- FixupSection(buffer, start);
- }
-
// == Emit imports ===========================================================
if (imports_.size() > 0) {
- size_t start = EmitSection(WasmSection::Code::ImportTable, buffer);
+ size_t start = EmitSection(kImportSectionCode, buffer);
buffer.write_size(imports_.size());
for (auto import : imports_) {
- buffer.write_u32v(import.sig_index);
- buffer.write_u32v(import.name_length);
- buffer.write(reinterpret_cast<const byte*>(import.name),
+ buffer.write_u32v(import.name_length); // module name length
+ buffer.write(reinterpret_cast<const byte*>(import.name), // module name
import.name_length);
- buffer.write_u32v(0);
+ buffer.write_u32v(0); // field name length
+ buffer.write_u8(kExternalFunction);
+ buffer.write_u32v(import.sig_index);
}
FixupSection(buffer, start);
}
// == Emit function signatures ===============================================
+ bool has_names = false;
if (functions_.size() > 0) {
- size_t start = EmitSection(WasmSection::Code::FunctionSignatures, buffer);
+ size_t start = EmitSection(kFunctionSectionCode, buffer);
buffer.write_size(functions_.size());
for (auto function : functions_) {
function->WriteSignature(buffer);
if (function->exported()) exports++;
+ if (function->name_.size() > 0) has_names = true;
}
FixupSection(buffer, start);
}
// == emit function table ====================================================
if (indirect_functions_.size() > 0) {
- size_t start = EmitSection(WasmSection::Code::FunctionTable, buffer);
+ size_t start = EmitSection(kTableSectionCode, buffer);
+ buffer.write_u8(1); // table count
+ buffer.write_u8(kWasmAnyFunctionTypeForm);
+ buffer.write_u8(kResizableMaximumFlag);
+ buffer.write_size(indirect_functions_.size());
buffer.write_size(indirect_functions_.size());
-
- for (auto index : indirect_functions_) {
- buffer.write_u32v(index);
- }
FixupSection(buffer, start);
}
// == emit memory declaration ================================================
{
- size_t start = EmitSection(WasmSection::Code::Memory, buffer);
+ size_t start = EmitSection(kMemorySectionCode, buffer);
+ buffer.write_u8(1); // memory count
+ buffer.write_u32v(kResizableMaximumFlag);
buffer.write_u32v(16); // min memory size
buffer.write_u32v(16); // max memory size
- buffer.write_u8(0); // memory export
- static_assert(kDeclMemorySize == 3, "memory size must match emit above");
+ FixupSection(buffer, start);
+ }
+
+ // == Emit globals ===========================================================
+ if (globals_.size() > 0) {
+ size_t start = EmitSection(kGlobalSectionCode, buffer);
+ buffer.write_size(globals_.size());
+
+ for (auto global : globals_) {
+ buffer.write_u8(WasmOpcodes::LocalTypeCodeFor(global.type));
+ buffer.write_u8(global.mutability ? 1 : 0);
+ switch (global.type) {
+ case kAstI32: {
+ static const byte code[] = {WASM_I32V_1(0)};
+ buffer.write(code, sizeof(code));
+ break;
+ }
+ case kAstF32: {
+ static const byte code[] = {WASM_F32(0)};
+ buffer.write(code, sizeof(code));
+ break;
+ }
+ case kAstI64: {
+ static const byte code[] = {WASM_I64V_1(0)};
+ buffer.write(code, sizeof(code));
+ break;
+ }
+ case kAstF64: {
+ static const byte code[] = {WASM_F64(0.0)};
+ buffer.write(code, sizeof(code));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ buffer.write_u8(kExprEnd);
+ }
FixupSection(buffer, start);
}
// == emit exports ===========================================================
if (exports > 0) {
- size_t start = EmitSection(WasmSection::Code::ExportTable, buffer);
+ size_t start = EmitSection(kExportSectionCode, buffer);
buffer.write_u32v(exports);
- uint32_t index = 0;
- for (auto function : functions_) {
- function->WriteExport(buffer, index++);
- }
+ for (auto function : functions_) function->WriteExport(buffer);
FixupSection(buffer, start);
}
// == emit start function index ==============================================
if (start_function_index_ >= 0) {
- size_t start = EmitSection(WasmSection::Code::StartFunction, buffer);
- buffer.write_u32v(start_function_index_);
+ size_t start = EmitSection(kStartSectionCode, buffer);
+ buffer.write_u32v(start_function_index_ +
+ static_cast<uint32_t>(imports_.size()));
+ FixupSection(buffer, start);
+ }
+
+ // == emit function table elements ===========================================
+ if (indirect_functions_.size() > 0) {
+ size_t start = EmitSection(kElementSectionCode, buffer);
+ buffer.write_u8(1); // count of entries
+ buffer.write_u8(0); // table index
+ buffer.write_u8(kExprI32Const); // offset
+ buffer.write_u32v(0);
+ buffer.write_u8(kExprEnd);
+ buffer.write_size(indirect_functions_.size()); // element count
+
+ for (auto index : indirect_functions_) {
+ buffer.write_u32v(index + static_cast<uint32_t>(imports_.size()));
+ }
+
FixupSection(buffer, start);
}
// == emit code ==============================================================
if (functions_.size() > 0) {
- size_t start = EmitSection(WasmSection::Code::FunctionBodies, buffer);
+ size_t start = EmitSection(kCodeSectionCode, buffer);
buffer.write_size(functions_.size());
for (auto function : functions_) {
function->WriteBody(buffer);
@@ -368,11 +418,38 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == emit data segments =====================================================
if (data_segments_.size() > 0) {
- size_t start = EmitSection(WasmSection::Code::DataSegments, buffer);
+ size_t start = EmitSection(kDataSectionCode, buffer);
buffer.write_size(data_segments_.size());
for (auto segment : data_segments_) {
- segment->Write(buffer);
+ buffer.write_u8(0); // linear memory segment
+ buffer.write_u8(kExprI32Const); // initializer expression for dest
+ buffer.write_u32v(segment.dest);
+ buffer.write_u8(kExprEnd);
+ buffer.write_u32v(static_cast<uint32_t>(segment.data.size()));
+ buffer.write(&segment.data[0], segment.data.size());
+ }
+ FixupSection(buffer, start);
+ }
+
+ // == Emit names =============================================================
+ if (has_names) {
+ // Emit the section code.
+ buffer.write_u8(kUnknownSectionCode);
+ // Emit a placeholder for the length.
+ size_t start = buffer.reserve_u32v();
+ // Emit the section string.
+ buffer.write_size(4);
+ buffer.write(reinterpret_cast<const byte*>("name"), 4);
+ // Emit the names.
+ buffer.write_size(functions_.size());
+ for (auto function : functions_) {
+ buffer.write_size(function->name_.size());
+ if (function->name_.size() > 0) {
+ buffer.write(reinterpret_cast<const byte*>(&function->name_[0]),
+ function->name_.size());
+ }
+ buffer.write_u8(0);
}
FixupSection(buffer, start);
}
diff --git a/deps/v8/src/wasm/encoder.h b/deps/v8/src/wasm/wasm-module-builder.h
index eb8aa64abd..dcaf6c8e86 100644
--- a/deps/v8/src/wasm/encoder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_ENCODER_H_
-#define V8_WASM_ENCODER_H_
+#ifndef V8_WASM_WASM_MODULE_BUILDER_H_
+#define V8_WASM_WASM_MODULE_BUILDER_H_
#include "src/signature.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/wasm-macro-gen.h"
@@ -90,13 +90,14 @@ class ZoneBuffer : public ZoneObject {
void EnsureSpace(size_t size) {
if ((pos_ + size) > end_) {
- size_t new_size = 4096 + (end_ - buffer_) * 3;
+ size_t new_size = 4096 + size + (end_ - buffer_) * 3;
byte* new_buffer = reinterpret_cast<byte*>(zone_->New(new_size));
memcpy(new_buffer, buffer_, (pos_ - buffer_));
pos_ = new_buffer + (pos_ - buffer_);
buffer_ = new_buffer;
end_ = new_buffer + new_size;
}
+ DCHECK(pos_ + size <= end_);
}
byte** pos_ptr() { return &pos_; }
@@ -110,7 +111,7 @@ class ZoneBuffer : public ZoneObject {
class WasmModuleBuilder;
-class WasmFunctionBuilder : public ZoneObject {
+class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
public:
// Building methods.
void SetSignature(FunctionSig* sig);
@@ -120,61 +121,102 @@ class WasmFunctionBuilder : public ZoneObject {
void Emit(WasmOpcode opcode);
void EmitGetLocal(uint32_t index);
void EmitSetLocal(uint32_t index);
+ void EmitTeeLocal(uint32_t index);
void EmitI32Const(int32_t val);
void EmitWithU8(WasmOpcode opcode, const byte immediate);
void EmitWithU8U8(WasmOpcode opcode, const byte imm1, const byte imm2);
void EmitWithVarInt(WasmOpcode opcode, uint32_t immediate);
+ void EmitDirectCallIndex(uint32_t index);
void SetExported();
void SetName(const char* name, int name_length);
- bool exported() { return exported_; }
- // Writing methods.
void WriteSignature(ZoneBuffer& buffer) const;
- void WriteExport(ZoneBuffer& buffer, uint32_t func_index) const;
+ void WriteExport(ZoneBuffer& buffer) const;
void WriteBody(ZoneBuffer& buffer) const;
+ bool exported() { return exported_; }
+ uint32_t func_index() { return func_index_; }
+ FunctionSig* signature();
+
private:
explicit WasmFunctionBuilder(WasmModuleBuilder* builder);
friend class WasmModuleBuilder;
+ friend class WasmTemporary;
+
+ struct DirectCallIndex {
+ size_t offset;
+ uint32_t direct_index;
+ };
+
WasmModuleBuilder* builder_;
LocalDeclEncoder locals_;
uint32_t signature_index_;
bool exported_;
+ uint32_t func_index_;
ZoneVector<uint8_t> body_;
ZoneVector<char> name_;
+ ZoneVector<uint32_t> i32_temps_;
+ ZoneVector<uint32_t> i64_temps_;
+ ZoneVector<uint32_t> f32_temps_;
+ ZoneVector<uint32_t> f64_temps_;
+ ZoneVector<DirectCallIndex> direct_calls_;
};
-// TODO(titzer): kill!
-class WasmDataSegmentEncoder : public ZoneObject {
+class WasmTemporary {
public:
- WasmDataSegmentEncoder(Zone* zone, const byte* data, uint32_t size,
- uint32_t dest);
- void Write(ZoneBuffer& buffer) const;
+ WasmTemporary(WasmFunctionBuilder* builder, LocalType type) {
+ switch (type) {
+ case kAstI32:
+ temporary_ = &builder->i32_temps_;
+ break;
+ case kAstI64:
+ temporary_ = &builder->i64_temps_;
+ break;
+ case kAstF32:
+ temporary_ = &builder->f32_temps_;
+ break;
+ case kAstF64:
+ temporary_ = &builder->f64_temps_;
+ break;
+ default:
+ UNREACHABLE();
+ temporary_ = nullptr;
+ }
+ if (temporary_->size() == 0) {
+ // Allocate a new temporary.
+ index_ = builder->AddLocal(type);
+ } else {
+ // Reuse a previous temporary.
+ index_ = temporary_->back();
+ temporary_->pop_back();
+ }
+ }
+ ~WasmTemporary() {
+ temporary_->push_back(index_); // return the temporary to the list.
+ }
+ uint32_t index() { return index_; }
private:
- ZoneVector<byte> data_;
- uint32_t dest_;
-};
-
-struct WasmFunctionImport {
- uint32_t sig_index;
- const char* name;
- int name_length;
+ ZoneVector<uint32_t>* temporary_;
+ uint32_t index_;
};
-class WasmModuleBuilder : public ZoneObject {
+class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
public:
explicit WasmModuleBuilder(Zone* zone);
// Building methods.
- uint32_t AddFunction();
- uint32_t AddGlobal(LocalType type, bool exported);
- WasmFunctionBuilder* FunctionAt(size_t index);
- void AddDataSegment(WasmDataSegmentEncoder* data);
+ uint32_t AddImport(const char* name, int name_length, FunctionSig* sig);
+ void SetImportName(uint32_t index, const char* name, int name_length) {
+ imports_[index].name = name;
+ imports_[index].name_length = name_length;
+ }
+ WasmFunctionBuilder* AddFunction(FunctionSig* sig = nullptr);
+ uint32_t AddGlobal(LocalType type, bool exported, bool mutability = true);
+ void AddDataSegment(const byte* data, uint32_t size, uint32_t dest);
uint32_t AddSignature(FunctionSig* sig);
void AddIndirectFunction(uint32_t index);
- void MarkStartFunction(uint32_t index);
- uint32_t AddImport(const char* name, int name_length, FunctionSig* sig);
+ void MarkStartFunction(WasmFunctionBuilder* builder);
// Writing methods.
void WriteTo(ZoneBuffer& buffer) const;
@@ -186,20 +228,44 @@ class WasmModuleBuilder : public ZoneObject {
Zone* zone() { return zone_; }
+ FunctionSig* GetSignature(uint32_t index) { return signatures_[index]; }
+
private:
+ struct WasmFunctionImport {
+ uint32_t sig_index;
+ const char* name;
+ int name_length;
+ };
+
+ struct WasmGlobal {
+ LocalType type;
+ bool exported;
+ bool mutability;
+ };
+
+ struct WasmDataSegment {
+ ZoneVector<byte> data;
+ uint32_t dest;
+ };
+
+ friend class WasmFunctionBuilder;
Zone* zone_;
ZoneVector<FunctionSig*> signatures_;
ZoneVector<WasmFunctionImport> imports_;
ZoneVector<WasmFunctionBuilder*> functions_;
- ZoneVector<WasmDataSegmentEncoder*> data_segments_;
+ ZoneVector<WasmDataSegment> data_segments_;
ZoneVector<uint32_t> indirect_functions_;
- ZoneVector<std::pair<LocalType, bool>> globals_;
+ ZoneVector<WasmGlobal> globals_;
SignatureMap signature_map_;
int start_function_index_;
};
+inline FunctionSig* WasmFunctionBuilder::signature() {
+ return builder_->signatures_[signature_index_];
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif // V8_WASM_ENCODER_H_
+#endif // V8_WASM_WASM_MODULE_BUILDER_H_
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index 94bf998e53..f4cf505f5a 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -18,6 +18,7 @@
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-debug.h"
#include "src/wasm/wasm-function-name-table.h"
+#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-result.h"
@@ -27,179 +28,56 @@ namespace v8 {
namespace internal {
namespace wasm {
-enum JSFunctionExportInternalField {
- kInternalModuleInstance,
- kInternalArity,
- kInternalSignature
-};
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_wasm_instances) PrintF(__VA_ARGS__); \
+ } while (false)
-static const int kPlaceholderMarker = 1000000000;
+#define TRACE_CHAIN(instance) \
+ do { \
+ instance->PrintInstancesChain(); \
+ } while (false)
-static const char* wasmSections[] = {
-#define F(enumerator, order, string) string,
- FOR_EACH_WASM_SECTION_TYPE(F)
-#undef F
- "<unknown>" // entry for "Max"
-};
+namespace {
-static uint8_t wasmSectionsLengths[]{
-#define F(enumerator, order, string) sizeof(string) - 1,
- FOR_EACH_WASM_SECTION_TYPE(F)
-#undef F
- 9 // entry for "Max"
-};
+static const int kPlaceholderMarker = 1000000000;
-static uint8_t wasmSectionsOrders[]{
-#define F(enumerator, order, string) order,
- FOR_EACH_WASM_SECTION_TYPE(F)
-#undef F
- 0 // entry for "Max"
+enum JSFunctionExportInternalField {
+ kInternalModuleInstance,
+ kInternalArity,
+ kInternalSignature
};
-static_assert(sizeof(wasmSections) / sizeof(wasmSections[0]) ==
- (size_t)WasmSection::Code::Max + 1,
- "expected enum WasmSection::Code to be monotonic from 0");
-
-WasmSection::Code WasmSection::begin() { return (WasmSection::Code)0; }
-WasmSection::Code WasmSection::end() { return WasmSection::Code::Max; }
-WasmSection::Code WasmSection::next(WasmSection::Code code) {
- return (WasmSection::Code)(1 + (uint32_t)code);
-}
-
-const char* WasmSection::getName(WasmSection::Code code) {
- return wasmSections[(size_t)code];
-}
-
-size_t WasmSection::getNameLength(WasmSection::Code code) {
- return wasmSectionsLengths[(size_t)code];
-}
-
-int WasmSection::getOrder(WasmSection::Code code) {
- return wasmSectionsOrders[(size_t)code];
-}
-
-WasmSection::Code WasmSection::lookup(const byte* string, uint32_t length) {
- // TODO(jfb) Linear search, it may be better to do a common-prefix search.
- for (Code i = begin(); i != end(); i = next(i)) {
- if (getNameLength(i) == length && 0 == memcmp(getName(i), string, length)) {
- return i;
- }
- }
- return Code::Max;
-}
-
-std::ostream& operator<<(std::ostream& os, const WasmModule& module) {
- os << "WASM module with ";
- os << (module.min_mem_pages * module.kPageSize) << " min mem";
- os << (module.max_mem_pages * module.kPageSize) << " max mem";
- os << module.functions.size() << " functions";
- os << module.functions.size() << " globals";
- os << module.functions.size() << " data segments";
- return os;
-}
-
-std::ostream& operator<<(std::ostream& os, const WasmFunction& function) {
- os << "WASM function with signature " << *function.sig;
-
- os << " code bytes: "
- << (function.code_end_offset - function.code_start_offset);
- return os;
-}
-
-std::ostream& operator<<(std::ostream& os, const WasmFunctionName& pair) {
- os << "#" << pair.function_->func_index << ":";
- if (pair.function_->name_offset > 0) {
- if (pair.module_) {
- WasmName name = pair.module_->GetName(pair.function_->name_offset,
- pair.function_->name_length);
- os.write(name.start(), name.length());
- } else {
- os << "+" << pair.function_->func_index;
- }
- } else {
- os << "?";
- }
- return os;
-}
-
-Handle<JSFunction> WrapExportCodeAsJSFunction(
- Isolate* isolate, Handle<Code> export_code, Handle<String> name, int arity,
- MaybeHandle<ByteArray> maybe_signature, Handle<JSObject> module_instance) {
- Handle<SharedFunctionInfo> shared =
- isolate->factory()->NewSharedFunctionInfo(name, export_code, false);
- shared->set_length(arity);
- shared->set_internal_formal_parameter_count(arity);
- Handle<JSFunction> function = isolate->factory()->NewFunction(
- isolate->wasm_function_map(), name, export_code);
- function->set_shared(*shared);
-
- function->SetInternalField(kInternalModuleInstance, *module_instance);
- // add another Internal Field as the function arity
- function->SetInternalField(kInternalArity, Smi::FromInt(arity));
- // add another Internal Field as the signature of the foreign function
- Handle<ByteArray> signature;
- if (maybe_signature.ToHandle(&signature)) {
- function->SetInternalField(kInternalSignature, *signature);
- }
- return function;
-}
-
-namespace {
// Internal constants for the layout of the module object.
-const int kWasmModuleFunctionTable = 0;
-const int kWasmModuleCodeTable = 1;
-const int kWasmMemArrayBuffer = 2;
-const int kWasmGlobalsArrayBuffer = 3;
-// TODO(clemensh): Remove function name array, extract names from module bytes.
-const int kWasmFunctionNamesArray = 4;
-const int kWasmModuleBytesString = 5;
-const int kWasmDebugInfo = 6;
-const int kWasmModuleInternalFieldCount = 7;
-
-// TODO(mtrofin): Unnecessary once we stop using JS Heap for wasm code.
-// For now, each field is expected to have the type commented by its side.
-// The elements typed as "maybe" are optional. The others are mandatory. Since
-// the compiled module is either obtained from the current v8 instance, or from
-// a snapshot produced by a compatible (==identical) v8 instance, we simply
-// fail at instantiation time, in the face of invalid data.
-enum CompiledWasmObjectFields {
- kFunctions, // FixedArray of Code
- kImportData, // maybe FixedArray of FixedArray respecting the
- // WasmImportMetadata structure.
- kExports, // maybe FixedArray of FixedArray of WasmExportMetadata
- // structure
- kStartupFunction, // maybe FixedArray of WasmExportMetadata structure
- kTableOfIndirectFunctionTables, // maybe FixedArray of FixedArray of
- // WasmIndirectFunctionTableMetadata
- kModuleBytes, // maybe String
- kFunctionNameTable, // maybe ByteArray
- kMinRequiredMemory, // Smi. an uint32_t
- // The following 2 are either together present or absent:
- kDataSegmentsInfo, // maybe FixedArray of FixedArray respecting the
- // WasmSegmentInfo structure
- kDataSegments, // maybe ByteArray.
-
- kGlobalsSize, // Smi. an uint32_t
- kExportMem, // Smi. bool
- kOrigin, // Smi. ModuleOrigin
- kCompiledWasmObjectTableSize // Sentinel value.
+enum WasmInstanceObjectFields {
+ kWasmCompiledModule = 0,
+ kWasmModuleFunctionTable,
+ kWasmModuleCodeTable,
+ kWasmMemArrayBuffer,
+ kWasmGlobalsArrayBuffer,
+ // TODO(clemensh): Remove function name array, extract names from module
+ // bytes.
+ kWasmFunctionNamesArray,
+ kWasmModuleBytesString,
+ kWasmDebugInfo,
+ kWasmNumImportedFunctions,
+ kWasmModuleInternalFieldCount
};
-enum WasmImportMetadata {
- kModuleName, // String
- kFunctionName, // maybe String
- kOutputCount, // Smi. an uint32_t
- kSignature, // ByteArray. A copy of the data in FunctionSig
- kWasmImportDataTableSize // Sentinel value.
+enum WasmImportData {
+ kModuleName, // String
+ kFunctionName, // maybe String
+ kOutputCount, // Smi. an uint32_t
+ kSignature, // ByteArray. A copy of the data in FunctionSig
+ kWasmImportDataSize // Sentinel value.
};
-enum WasmExportMetadata {
- kExportCode, // Code
- kExportName, // String
- kExportArity, // Smi, an int
- kExportedFunctionIndex, // Smi, an uint32_t
- kExportedSignature, // ByteArray. A copy of the data in FunctionSig
- kWasmExportMetadataTableSize // Sentinel value.
+enum WasmExportData {
+ kExportName, // String
+ kExportArity, // Smi, an int
+ kExportedFunctionIndex, // Smi, an uint32_t
+ kExportedSignature, // ByteArray. A copy of the data in FunctionSig
+ kWasmExportDataSize // Sentinel value.
};
enum WasmSegmentInfo {
@@ -208,31 +86,26 @@ enum WasmSegmentInfo {
kWasmSegmentInfoSize // Sentinel value.
};
-enum WasmIndirectFunctionTableMetadata {
- kSize, // Smi. an uint32_t
- kTable, // FixedArray of indirect function table
- kWasmIndirectFunctionTableMetadataSize // Sentinel value.
+enum WasmIndirectFunctionTableData {
+ kSize, // Smi. an uint32_t
+ kTable, // FixedArray of indirect function table
+ kWasmIndirectFunctionTableDataSize // Sentinel value.
};
uint32_t GetMinModuleMemSize(const WasmModule* module) {
return WasmModule::kPageSize * module->min_mem_pages;
}
-void LoadDataSegments(Handle<FixedArray> compiled_module, Address mem_addr,
- size_t mem_size) {
- Isolate* isolate = compiled_module->GetIsolate();
- MaybeHandle<ByteArray> maybe_data =
- compiled_module->GetValue<ByteArray>(isolate, kDataSegments);
- MaybeHandle<FixedArray> maybe_segments =
- compiled_module->GetValue<FixedArray>(isolate, kDataSegmentsInfo);
+void LoadDataSegments(Handle<WasmCompiledModule> compiled_module,
+ Address mem_addr, size_t mem_size) {
+ CHECK(compiled_module->has_data_segments() ==
+ compiled_module->has_data_segments_info());
- // We either have both or neither.
- CHECK(maybe_data.is_null() == maybe_segments.is_null());
// If we have neither, we're done.
- if (maybe_data.is_null()) return;
+ if (!compiled_module->has_data_segments()) return;
- Handle<ByteArray> data = maybe_data.ToHandleChecked();
- Handle<FixedArray> segments = maybe_segments.ToHandleChecked();
+ Handle<ByteArray> data = compiled_module->data_segments();
+ Handle<FixedArray> segments = compiled_module->data_segments_info();
uint32_t last_extraction_pos = 0;
for (int i = 0; i < segments->length(); ++i) {
@@ -250,12 +123,11 @@ void LoadDataSegments(Handle<FixedArray> compiled_module, Address mem_addr,
}
void SaveDataSegmentInfo(Factory* factory, const WasmModule* module,
- Handle<FixedArray> compiled_module) {
+ Handle<WasmCompiledModule> compiled_module) {
Handle<FixedArray> segments = factory->NewFixedArray(
static_cast<int>(module->data_segments.size()), TENURED);
uint32_t data_size = 0;
for (const WasmDataSegment& segment : module->data_segments) {
- if (!segment.init) continue;
if (segment.source_size == 0) continue;
data_size += segment.source_size;
}
@@ -264,11 +136,12 @@ void SaveDataSegmentInfo(Factory* factory, const WasmModule* module,
uint32_t last_insertion_pos = 0;
for (uint32_t i = 0; i < module->data_segments.size(); ++i) {
const WasmDataSegment& segment = module->data_segments[i];
- if (!segment.init) continue;
if (segment.source_size == 0) continue;
Handle<ByteArray> js_segment =
factory->NewByteArray(kWasmSegmentInfoSize * sizeof(uint32_t), TENURED);
- js_segment->set_int(kDestAddr, segment.dest_addr);
+ // TODO(titzer): add support for global offsets for dest_addr
+ CHECK_EQ(WasmInitExpr::kI32Const, segment.dest_addr.kind);
+ js_segment->set_int(kDestAddr, segment.dest_addr.val.i32_const);
js_segment->set_int(kSourceSize, segment.source_size);
segments->set(i, *js_segment);
data->copy_in(last_insertion_pos,
@@ -276,8 +149,8 @@ void SaveDataSegmentInfo(Factory* factory, const WasmModule* module,
segment.source_size);
last_insertion_pos += segment.source_size;
}
- compiled_module->set(kDataSegmentsInfo, *segments);
- compiled_module->set(kDataSegments, *data);
+ compiled_module->set_data_segments_info(segments);
+ compiled_module->set_data_segments(data);
}
void PatchFunctionTable(Handle<Code> code,
@@ -315,8 +188,9 @@ Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size) {
return buffer;
}
-void RelocateInstanceCode(Handle<JSObject> instance, Address start,
- uint32_t prev_size, uint32_t new_size) {
+void RelocateInstanceCode(Handle<JSObject> instance, Address old_start,
+ Address start, uint32_t prev_size,
+ uint32_t new_size) {
Handle<FixedArray> functions = Handle<FixedArray>(
FixedArray::cast(instance->GetInternalField(kWasmModuleCodeTable)));
for (int i = 0; i < functions->length(); ++i) {
@@ -325,7 +199,7 @@ void RelocateInstanceCode(Handle<JSObject> instance, Address start,
int mask = (1 << RelocInfo::WASM_MEMORY_REFERENCE) |
(1 << RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
for (RelocIterator it(*function, mask); !it.done(); it.next()) {
- it.rinfo()->update_wasm_memory_reference(nullptr, start, prev_size,
+ it.rinfo()->update_wasm_memory_reference(old_start, start, prev_size,
new_size);
}
}
@@ -347,7 +221,8 @@ Handle<JSArrayBuffer> AllocateMemory(ErrorThrower* thrower, Isolate* isolate,
return mem_buffer;
}
-void RelocateGlobals(Handle<JSObject> instance, Address globals_start) {
+void RelocateGlobals(Handle<JSObject> instance, Address old_start,
+ Address globals_start) {
Handle<FixedArray> functions = Handle<FixedArray>(
FixedArray::cast(instance->GetInternalField(kWasmModuleCodeTable)));
uint32_t function_count = static_cast<uint32_t>(functions->length());
@@ -356,7 +231,7 @@ void RelocateGlobals(Handle<JSObject> instance, Address globals_start) {
AllowDeferredHandleDereference embedding_raw_address;
int mask = 1 << RelocInfo::WASM_GLOBAL_REFERENCE;
for (RelocIterator it(*function, mask); !it.done(); it.next()) {
- it.rinfo()->update_wasm_global_reference(nullptr, globals_start);
+ it.rinfo()->update_wasm_global_reference(old_start, globals_start);
}
}
}
@@ -375,64 +250,41 @@ Handle<Code> CreatePlaceholder(Factory* factory, uint32_t index,
return code;
}
-// TODO(mtrofin): remove when we stop relying on placeholders.
-void InitializePlaceholders(Factory* factory,
- std::vector<Handle<Code>>* placeholders,
- size_t size) {
- DCHECK(placeholders->empty());
- placeholders->reserve(size);
-
- for (uint32_t i = 0; i < size; ++i) {
- placeholders->push_back(CreatePlaceholder(factory, i, Code::WASM_FUNCTION));
- }
-}
-
bool LinkFunction(Handle<Code> unlinked,
- const std::vector<Handle<Code>>& code_targets,
- Code::Kind kind) {
+ std::vector<Handle<Code>>& code_table) {
bool modified = false;
- int mode_mask = RelocInfo::kCodeTargetMask;
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
AllowDeferredHandleDereference embedding_raw_address;
for (RelocIterator it(*unlinked, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (RelocInfo::IsCodeTarget(mode)) {
Code* target =
Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- if (target->kind() == kind &&
- target->constant_pool_offset() >= kPlaceholderMarker) {
- // Patch direct calls to placeholder code objects.
- uint32_t index = target->constant_pool_offset() - kPlaceholderMarker;
- CHECK(index < code_targets.size());
- Handle<Code> new_target = code_targets[index];
- if (target != *new_target) {
- it.rinfo()->set_target_address(new_target->instruction_start(),
- UPDATE_WRITE_BARRIER,
- SKIP_ICACHE_FLUSH);
- modified = true;
+ if (target->constant_pool_offset() < kPlaceholderMarker) continue;
+ switch (target->kind()) {
+ case Code::WASM_FUNCTION: // fall through
+ case Code::WASM_TO_JS_FUNCTION: // fall through
+ case Code::JS_TO_WASM_FUNCTION: {
+ // Patch direct calls to placeholder code objects.
+ uint32_t index = target->constant_pool_offset() - kPlaceholderMarker;
+ Handle<Code> new_target = code_table[index];
+ if (target != *new_target) {
+ it.rinfo()->set_target_address(new_target->instruction_start(),
+ UPDATE_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
+ modified = true;
+ }
+ break;
}
+ default:
+ break;
}
}
}
return modified;
}
-void LinkModuleFunctions(Isolate* isolate,
- std::vector<Handle<Code>>& functions) {
- for (size_t i = 0; i < functions.size(); ++i) {
- Handle<Code> code = functions[i];
- LinkFunction(code, functions, Code::WASM_FUNCTION);
- }
-}
-
-void LinkImports(Isolate* isolate, std::vector<Handle<Code>>& functions,
- const std::vector<Handle<Code>>& imports) {
- for (uint32_t i = 0; i < functions.size(); ++i) {
- Handle<Code> code = functions[i];
- LinkFunction(code, imports, Code::WASM_TO_JS_FUNCTION);
- }
-}
-
-void FlushAssemblyCache(Isolate* isolate, Handle<FixedArray> functions) {
+void FlushICache(Isolate* isolate, Handle<FixedArray> functions) {
for (int i = 0; i < functions->length(); ++i) {
Handle<Code> code = functions->GetValueChecked<Code>(isolate, i);
Assembler::FlushICache(isolate, code->instruction_start(),
@@ -440,84 +292,6 @@ void FlushAssemblyCache(Isolate* isolate, Handle<FixedArray> functions) {
}
}
-} // namespace
-
-WasmModule::WasmModule(byte* module_start)
- : module_start(module_start),
- module_end(nullptr),
- min_mem_pages(0),
- max_mem_pages(0),
- mem_export(false),
- mem_external(false),
- start_function_index(-1),
- origin(kWasmOrigin),
- globals_size(0),
- pending_tasks(new base::Semaphore(0)) {}
-
-static MaybeHandle<JSFunction> ReportFFIError(
- ErrorThrower& thrower, const char* error, uint32_t index,
- Handle<String> module_name, MaybeHandle<String> function_name) {
- Handle<String> function_name_handle;
- if (function_name.ToHandle(&function_name_handle)) {
- thrower.Error("Import #%d module=\"%.*s\" function=\"%.*s\" error: %s",
- index, module_name->length(), module_name->ToCString().get(),
- function_name_handle->length(),
- function_name_handle->ToCString().get(), error);
- } else {
- thrower.Error("Import #%d module=\"%.*s\" error: %s", index,
- module_name->length(), module_name->ToCString().get(), error);
- }
- thrower.Error("Import ");
- return MaybeHandle<JSFunction>();
-}
-
-static MaybeHandle<JSReceiver> LookupFunction(
- ErrorThrower& thrower, Factory* factory, Handle<JSReceiver> ffi,
- uint32_t index, Handle<String> module_name,
- MaybeHandle<String> function_name) {
- if (ffi.is_null()) {
- return ReportFFIError(thrower, "FFI is not an object", index, module_name,
- function_name);
- }
-
- // Look up the module first.
- MaybeHandle<Object> result = Object::GetProperty(ffi, module_name);
- if (result.is_null()) {
- return ReportFFIError(thrower, "module not found", index, module_name,
- function_name);
- }
-
- Handle<Object> module = result.ToHandleChecked();
-
- if (!module->IsJSReceiver()) {
- return ReportFFIError(thrower, "module is not an object or function", index,
- module_name, function_name);
- }
-
- Handle<Object> function;
- if (!function_name.is_null()) {
- // Look up the function in the module.
- MaybeHandle<Object> result =
- Object::GetProperty(module, function_name.ToHandleChecked());
- if (result.is_null()) {
- return ReportFFIError(thrower, "function not found", index, module_name,
- function_name);
- }
- function = result.ToHandleChecked();
- } else {
- // No function specified. Use the "default export".
- function = module;
- }
-
- if (!function->IsCallable()) {
- return ReportFFIError(thrower, "not a callable", index, module_name,
- function_name);
- }
-
- return Handle<JSReceiver>::cast(function);
-}
-
-namespace {
// Fetches the compilation unit of a wasm function and executes its parallel
// phase.
bool FetchAndExecuteCompilationUnit(
@@ -530,7 +304,7 @@ bool FetchAndExecuteCompilationUnit(
DisallowHandleDereference no_deref;
DisallowCodeDependencyChange no_dependency_change;
- // - 1 because AtomicIntrement returns the value after the atomic increment.
+ // - 1 because AtomicIncrement returns the value after the atomic increment.
size_t index = next_unit->Increment(1) - 1;
if (index >= compilation_units->size()) {
return false;
@@ -539,10 +313,8 @@ bool FetchAndExecuteCompilationUnit(
compiler::WasmCompilationUnit* unit = compilation_units->at(index);
if (unit != nullptr) {
unit->ExecuteCompilation();
- {
- base::LockGuard<base::Mutex> guard(result_mutex);
- executed_units->push(unit);
- }
+ base::LockGuard<base::Mutex> guard(result_mutex);
+ executed_units->push(unit);
}
return true;
}
@@ -585,11 +357,6 @@ static void RecordStats(Isolate* isolate, Code* code) {
code->relocation_info()->length());
}
-static void RecordStats(Isolate* isolate,
- const std::vector<Handle<Code>>& functions) {
- for (Handle<Code> c : functions) RecordStats(isolate, *c);
-}
-
static void RecordStats(Isolate* isolate, Handle<FixedArray> functions) {
DisallowHeapAllocation no_gc;
for (int i = 0; i < functions->length(); ++i) {
@@ -597,16 +364,27 @@ static void RecordStats(Isolate* isolate, Handle<FixedArray> functions) {
}
}
-Handle<FixedArray> GetImportsMetadata(Factory* factory,
- const WasmModule* module) {
+Address GetGlobalStartAddressFromCodeTemplate(Object* undefined,
+ JSObject* owner) {
+ Address old_address = nullptr;
+ Object* stored_value = owner->GetInternalField(kWasmGlobalsArrayBuffer);
+ if (stored_value != undefined) {
+ old_address = static_cast<Address>(
+ JSArrayBuffer::cast(stored_value)->backing_store());
+ }
+ return old_address;
+}
+
+Handle<FixedArray> GetImportsData(Factory* factory, const WasmModule* module) {
Handle<FixedArray> ret = factory->NewFixedArray(
static_cast<int>(module->import_table.size()), TENURED);
for (size_t i = 0; i < module->import_table.size(); ++i) {
const WasmImport& import = module->import_table[i];
+ if (import.kind != kExternalFunction) continue;
WasmName module_name = module->GetNameOrNull(import.module_name_offset,
import.module_name_length);
- WasmName function_name = module->GetNameOrNull(import.function_name_offset,
- import.function_name_length);
+ WasmName function_name = module->GetNameOrNull(import.field_name_offset,
+ import.field_name_length);
Handle<String> module_name_string =
factory->InternalizeUtf8String(module_name);
@@ -614,116 +392,172 @@ Handle<FixedArray> GetImportsMetadata(Factory* factory,
function_name.is_empty()
? Handle<String>::null()
: factory->InternalizeUtf8String(function_name);
- Handle<ByteArray> sig =
- factory->NewByteArray(static_cast<int>(import.sig->parameter_count() +
- import.sig->return_count()),
- TENURED);
- sig->copy_in(0, reinterpret_cast<const byte*>(import.sig->raw_data()),
+ FunctionSig* fsig = module->functions[import.index].sig;
+ Handle<ByteArray> sig = factory->NewByteArray(
+ static_cast<int>(fsig->parameter_count() + fsig->return_count()),
+ TENURED);
+ sig->copy_in(0, reinterpret_cast<const byte*>(fsig->raw_data()),
sig->length());
Handle<FixedArray> encoded_import =
- factory->NewFixedArray(kWasmImportDataTableSize, TENURED);
+ factory->NewFixedArray(kWasmImportDataSize, TENURED);
encoded_import->set(kModuleName, *module_name_string);
if (!function_name_string.is_null()) {
encoded_import->set(kFunctionName, *function_name_string);
}
- encoded_import->set(
- kOutputCount,
- Smi::FromInt(static_cast<int>(import.sig->return_count())));
+ encoded_import->set(kOutputCount,
+ Smi::FromInt(static_cast<int>(fsig->return_count())));
encoded_import->set(kSignature, *sig);
ret->set(static_cast<int>(i), *encoded_import);
}
return ret;
}
-bool CompileWrappersToImportedFunctions(Isolate* isolate,
- const Handle<JSReceiver> ffi,
- std::vector<Handle<Code>>& imports,
- Handle<FixedArray> import_data,
- ErrorThrower* thrower) {
- uint32_t import_count = static_cast<uint32_t>(import_data->length());
- if (import_count > 0) {
- imports.reserve(import_count);
- for (uint32_t index = 0; index < import_count; ++index) {
- Handle<FixedArray> data =
- import_data->GetValueChecked<FixedArray>(isolate, index);
- Handle<String> module_name =
- data->GetValueChecked<String>(isolate, kModuleName);
- MaybeHandle<String> function_name =
- data->GetValue<String>(isolate, kFunctionName);
-
- // TODO(mtrofin): this is an uint32_t, actually. We should rationalize
- // it when we rationalize signed/unsigned stuff.
- int ret_count = Smi::cast(data->get(kOutputCount))->value();
- CHECK(ret_count >= 0);
- Handle<ByteArray> sig_data =
- data->GetValueChecked<ByteArray>(isolate, kSignature);
- int sig_data_size = sig_data->length();
- int param_count = sig_data_size - ret_count;
- CHECK(param_count >= 0);
-
- MaybeHandle<JSReceiver> function = LookupFunction(
- *thrower, isolate->factory(), ffi, index, module_name, function_name);
- if (function.is_null()) return false;
- Handle<Code> code;
- Handle<JSReceiver> target = function.ToHandleChecked();
- bool isMatch = false;
- Handle<Code> export_wrapper_code;
- if (target->IsJSFunction()) {
- Handle<JSFunction> func = Handle<JSFunction>::cast(target);
- export_wrapper_code = handle(func->code());
- if (export_wrapper_code->kind() == Code::JS_TO_WASM_FUNCTION) {
- int exported_param_count =
- Smi::cast(func->GetInternalField(kInternalArity))->value();
- Handle<ByteArray> exportedSig = Handle<ByteArray>(
- ByteArray::cast(func->GetInternalField(kInternalSignature)));
- if (exported_param_count == param_count &&
- exportedSig->length() == sig_data->length() &&
- memcmp(exportedSig->data(), sig_data->data(),
- exportedSig->length()) == 0) {
- isMatch = true;
- }
- }
+static MaybeHandle<JSFunction> ReportFFIError(
+ ErrorThrower* thrower, const char* error, uint32_t index,
+ Handle<String> module_name, MaybeHandle<String> function_name) {
+ Handle<String> function_name_handle;
+ if (function_name.ToHandle(&function_name_handle)) {
+ thrower->Error("Import #%d module=\"%.*s\" function=\"%.*s\" error: %s",
+ index, module_name->length(), module_name->ToCString().get(),
+ function_name_handle->length(),
+ function_name_handle->ToCString().get(), error);
+ } else {
+ thrower->Error("Import #%d module=\"%.*s\" error: %s", index,
+ module_name->length(), module_name->ToCString().get(),
+ error);
+ }
+ thrower->Error("Import ");
+ return MaybeHandle<JSFunction>();
+}
+
+static MaybeHandle<JSReceiver> LookupFunction(
+ ErrorThrower* thrower, Factory* factory, Handle<JSReceiver> ffi,
+ uint32_t index, Handle<String> module_name,
+ MaybeHandle<String> function_name) {
+ if (ffi.is_null()) {
+ return ReportFFIError(thrower, "FFI is not an object", index, module_name,
+ function_name);
+ }
+
+ // Look up the module first.
+ MaybeHandle<Object> result = Object::GetProperty(ffi, module_name);
+ if (result.is_null()) {
+ return ReportFFIError(thrower, "module not found", index, module_name,
+ function_name);
+ }
+
+ Handle<Object> module = result.ToHandleChecked();
+
+ if (!module->IsJSReceiver()) {
+ return ReportFFIError(thrower, "module is not an object or function", index,
+ module_name, function_name);
+ }
+
+ Handle<Object> function;
+ if (!function_name.is_null()) {
+ // Look up the function in the module.
+ MaybeHandle<Object> result =
+ Object::GetProperty(module, function_name.ToHandleChecked());
+ if (result.is_null()) {
+ return ReportFFIError(thrower, "function not found", index, module_name,
+ function_name);
+ }
+ function = result.ToHandleChecked();
+ } else {
+ // No function specified. Use the "default export".
+ function = module;
+ }
+
+ if (!function->IsCallable()) {
+ return ReportFFIError(thrower, "not a callable", index, module_name,
+ function_name);
+ }
+
+ return Handle<JSReceiver>::cast(function);
+}
+
+Handle<Code> CompileImportWrapper(Isolate* isolate,
+ const Handle<JSReceiver> ffi, int index,
+ Handle<FixedArray> import_data,
+ ErrorThrower* thrower) {
+ Handle<FixedArray> data =
+ import_data->GetValueChecked<FixedArray>(isolate, index);
+ Handle<String> module_name =
+ data->GetValueChecked<String>(isolate, kModuleName);
+ MaybeHandle<String> function_name =
+ data->GetValue<String>(isolate, kFunctionName);
+
+ // TODO(mtrofin): this is an uint32_t, actually. We should rationalize
+ // it when we rationalize signed/unsigned stuff.
+ int ret_count = Smi::cast(data->get(kOutputCount))->value();
+ CHECK_GE(ret_count, 0);
+ Handle<ByteArray> sig_data =
+ data->GetValueChecked<ByteArray>(isolate, kSignature);
+ int sig_data_size = sig_data->length();
+ int param_count = sig_data_size - ret_count;
+ CHECK(param_count >= 0);
+
+ MaybeHandle<JSReceiver> function = LookupFunction(
+ thrower, isolate->factory(), ffi, index, module_name, function_name);
+ if (function.is_null()) return Handle<Code>::null();
+ Handle<Code> code;
+ Handle<JSReceiver> target = function.ToHandleChecked();
+ bool isMatch = false;
+ Handle<Code> export_wrapper_code;
+ if (target->IsJSFunction()) {
+ Handle<JSFunction> func = Handle<JSFunction>::cast(target);
+ export_wrapper_code = handle(func->code());
+ if (export_wrapper_code->kind() == Code::JS_TO_WASM_FUNCTION) {
+ int exported_param_count =
+ Smi::cast(func->GetInternalField(kInternalArity))->value();
+ Handle<ByteArray> exportedSig = Handle<ByteArray>(
+ ByteArray::cast(func->GetInternalField(kInternalSignature)));
+ if (exported_param_count == param_count &&
+ exportedSig->length() == sig_data->length() &&
+ memcmp(exportedSig->data(), sig_data->data(),
+ exportedSig->length()) == 0) {
+ isMatch = true;
}
- if (isMatch) {
- int wasm_count = 0;
- int const mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
- for (RelocIterator it(*export_wrapper_code, mask); !it.done();
- it.next()) {
- RelocInfo* rinfo = it.rinfo();
- Address target_address = rinfo->target_address();
- Code* target = Code::GetCodeFromTargetAddress(target_address);
- if (target->kind() == Code::WASM_FUNCTION) {
- ++wasm_count;
- code = handle(target);
- }
- }
- DCHECK(wasm_count == 1);
- } else {
- // Copy the signature to avoid a raw pointer into a heap object when
- // GC can happen.
- Zone zone(isolate->allocator());
- MachineRepresentation* reps =
- zone.NewArray<MachineRepresentation>(sig_data_size);
- memcpy(reps, sig_data->data(),
- sizeof(MachineRepresentation) * sig_data_size);
- FunctionSig sig(ret_count, param_count, reps);
-
- code = compiler::CompileWasmToJSWrapper(isolate, target, &sig, index,
- module_name, function_name);
+ }
+ }
+ if (isMatch) {
+ int wasm_count = 0;
+ int const mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
+ for (RelocIterator it(*export_wrapper_code, mask); !it.done(); it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ Address target_address = rinfo->target_address();
+ Code* target = Code::GetCodeFromTargetAddress(target_address);
+ if (target->kind() == Code::WASM_FUNCTION) {
+ ++wasm_count;
+ code = handle(target);
}
- imports.push_back(code);
}
+ DCHECK(wasm_count == 1);
+ return code;
+ } else {
+ // Copy the signature to avoid a raw pointer into a heap object when
+ // GC can happen.
+ Zone zone(isolate->allocator());
+ MachineRepresentation* reps =
+ zone.NewArray<MachineRepresentation>(sig_data_size);
+ memcpy(reps, sig_data->data(),
+ sizeof(MachineRepresentation) * sig_data_size);
+ FunctionSig sig(ret_count, param_count, reps);
+
+ return compiler::CompileWasmToJSWrapper(isolate, target, &sig, index,
+ module_name, function_name);
}
- return true;
}
void InitializeParallelCompilation(
Isolate* isolate, const std::vector<WasmFunction>& functions,
std::vector<compiler::WasmCompilationUnit*>& compilation_units,
- ModuleEnv& module_env, ErrorThrower& thrower) {
+ ModuleEnv& module_env, ErrorThrower* thrower) {
for (uint32_t i = FLAG_skip_compiling_wasm_funcs; i < functions.size(); ++i) {
- compilation_units[i] = new compiler::WasmCompilationUnit(
- &thrower, isolate, &module_env, &functions[i], i);
+ const WasmFunction* func = &functions[i];
+ compilation_units[i] =
+ func->imported ? nullptr : new compiler::WasmCompilationUnit(
+ thrower, isolate, &module_env, func, i);
}
}
@@ -812,7 +646,7 @@ void CompileInParallel(Isolate* isolate, const WasmModule* module,
// 1) The main thread allocates a compilation unit for each wasm function
// and stores them in the vector {compilation_units}.
InitializeParallelCompilation(isolate, module->functions, compilation_units,
- *module_env, *thrower);
+ *module_env, thrower);
// Objects for the synchronization with the background threads.
base::Mutex result_mutex;
@@ -853,8 +687,8 @@ void CompileSequentially(Isolate* isolate, const WasmModule* module,
for (uint32_t i = FLAG_skip_compiling_wasm_funcs;
i < module->functions.size(); ++i) {
const WasmFunction& func = module->functions[i];
+ if (func.imported) continue; // Imports are compiled at instantiation time.
- DCHECK_EQ(i, func.func_index);
WasmName str = module->GetName(func.name_offset, func.name_length);
Handle<Code> code = Handle<Code>::null();
// Compile the function.
@@ -870,190 +704,321 @@ void CompileSequentially(Isolate* isolate, const WasmModule* module,
}
}
-void SetDebugSupport(Factory* factory, Handle<FixedArray> compiled_module,
- Handle<JSObject> js_object) {
- Isolate* isolate = compiled_module->GetIsolate();
- MaybeHandle<String> module_bytes_string =
- compiled_module->GetValue<String>(isolate, kModuleBytes);
- if (!module_bytes_string.is_null()) {
- js_object->SetInternalField(kWasmModuleBytesString,
- *module_bytes_string.ToHandleChecked());
- }
- Handle<FixedArray> functions = Handle<FixedArray>(
- FixedArray::cast(js_object->GetInternalField(kWasmModuleCodeTable)));
+void PatchDirectCalls(Handle<FixedArray> old_functions,
+ Handle<FixedArray> new_functions, int start) {
+ DCHECK_EQ(new_functions->length(), old_functions->length());
- for (int i = FLAG_skip_compiling_wasm_funcs; i < functions->length(); ++i) {
- Handle<Code> code = functions->GetValueChecked<Code>(isolate, i);
- DCHECK(code->deoptimization_data() == nullptr ||
- code->deoptimization_data()->length() == 0);
- Handle<FixedArray> deopt_data = factory->NewFixedArray(2, TENURED);
- if (!js_object.is_null()) {
- deopt_data->set(0, *js_object);
+ DisallowHeapAllocation no_gc;
+ std::map<Code*, Code*> old_to_new_code;
+ for (int i = 0; i < new_functions->length(); ++i) {
+ old_to_new_code.insert(std::make_pair(Code::cast(old_functions->get(i)),
+ Code::cast(new_functions->get(i))));
+ }
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
+ AllowDeferredHandleDereference embedding_raw_address;
+ for (int i = start; i < new_functions->length(); ++i) {
+ Code* wasm_function = Code::cast(new_functions->get(i));
+ for (RelocIterator it(wasm_function, mode_mask); !it.done(); it.next()) {
+ Code* old_code =
+ Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+ if (old_code->kind() == Code::WASM_TO_JS_FUNCTION ||
+ old_code->kind() == Code::WASM_FUNCTION) {
+ auto found = old_to_new_code.find(old_code);
+ DCHECK(found != old_to_new_code.end());
+ Code* new_code = found->second;
+ if (new_code != old_code) {
+ it.rinfo()->set_target_address(new_code->instruction_start(),
+ UPDATE_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
+ }
+ }
}
- deopt_data->set(1, Smi::FromInt(static_cast<int>(i)));
- deopt_data->set_length(2);
- code->set_deoptimization_data(*deopt_data);
}
+}
- MaybeHandle<ByteArray> function_name_table =
- compiled_module->GetValue<ByteArray>(isolate, kFunctionNameTable);
- if (!function_name_table.is_null()) {
- js_object->SetInternalField(kWasmFunctionNamesArray,
- *function_name_table.ToHandleChecked());
+static void ResetCompiledModule(Isolate* isolate, JSObject* owner,
+ WasmCompiledModule* compiled_module) {
+ TRACE("Resetting %d\n", compiled_module->instance_id());
+ Object* undefined = *isolate->factory()->undefined_value();
+ uint32_t old_mem_size = compiled_module->has_heap()
+ ? compiled_module->mem_size()
+ : compiled_module->default_mem_size();
+ uint32_t default_mem_size = compiled_module->default_mem_size();
+ Object* mem_start = compiled_module->ptr_to_heap();
+ Address old_mem_address = nullptr;
+ Address globals_start =
+ GetGlobalStartAddressFromCodeTemplate(undefined, owner);
+
+ if (old_mem_size > 0) {
+ CHECK_NE(mem_start, undefined);
+ old_mem_address =
+ static_cast<Address>(JSArrayBuffer::cast(mem_start)->backing_store());
+ }
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::WASM_MEMORY_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::WASM_MEMORY_SIZE_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::WASM_GLOBAL_REFERENCE);
+
+ Object* fct_obj = compiled_module->ptr_to_code_table();
+ if (fct_obj != nullptr && fct_obj != undefined &&
+ (old_mem_size > 0 || globals_start != nullptr)) {
+ FixedArray* functions = FixedArray::cast(fct_obj);
+ for (int i = 0; i < functions->length(); ++i) {
+ Code* code = Code::cast(functions->get(i));
+ bool changed = false;
+ for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (RelocInfo::IsWasmMemoryReference(mode) ||
+ RelocInfo::IsWasmMemorySizeReference(mode)) {
+ it.rinfo()->update_wasm_memory_reference(
+ old_mem_address, nullptr, old_mem_size, default_mem_size);
+ changed = true;
+ } else {
+ CHECK(RelocInfo::IsWasmGlobalReference(mode));
+ it.rinfo()->update_wasm_global_reference(globals_start, nullptr);
+ changed = true;
+ }
+ }
+ if (changed) {
+ Assembler::FlushICache(isolate, code->instruction_start(),
+ code->instruction_size());
+ }
+ }
}
+ compiled_module->reset_heap();
}
-bool SetupGlobals(Isolate* isolate, Handle<FixedArray> compiled_module,
- Handle<JSObject> instance, ErrorThrower* thrower) {
- uint32_t globals_size = static_cast<uint32_t>(
- Smi::cast(compiled_module->get(kGlobalsSize))->value());
- if (globals_size > 0) {
- Handle<JSArrayBuffer> globals_buffer =
- NewArrayBuffer(isolate, globals_size);
- if (globals_buffer.is_null()) {
- thrower->Error("Out of memory: wasm globals");
- return false;
+static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
+ JSObject** p = reinterpret_cast<JSObject**>(data.GetParameter());
+ JSObject* owner = *p;
+ WasmCompiledModule* compiled_module =
+ WasmCompiledModule::cast(owner->GetInternalField(kWasmCompiledModule));
+ TRACE("Finalizing %d {\n", compiled_module->instance_id());
+ Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
+ DCHECK(compiled_module->has_weak_module_object());
+ WeakCell* weak_module_obj = compiled_module->ptr_to_weak_module_object();
+
+ // weak_module_obj may have been cleared, meaning the module object
+ // was GC-ed. In that case, there won't be any new instances created,
+ // and we don't need to maintain the links between instances.
+ if (!weak_module_obj->cleared()) {
+ JSObject* module_obj = JSObject::cast(weak_module_obj->value());
+ WasmCompiledModule* current_template =
+ WasmCompiledModule::cast(module_obj->GetInternalField(0));
+
+ TRACE("chain before {\n");
+ TRACE_CHAIN(current_template);
+ TRACE("}\n");
+
+ DCHECK(!current_template->has_weak_prev_instance());
+ WeakCell* next = compiled_module->ptr_to_weak_next_instance();
+ WeakCell* prev = compiled_module->ptr_to_weak_prev_instance();
+
+ if (current_template == compiled_module) {
+ if (next == nullptr) {
+ ResetCompiledModule(isolate, owner, compiled_module);
+ } else {
+ DCHECK(next->value()->IsFixedArray());
+ module_obj->SetInternalField(0, next->value());
+ DCHECK_NULL(prev);
+ WasmCompiledModule::cast(next->value())->reset_weak_prev_instance();
+ }
+ } else {
+ DCHECK(!(prev == nullptr && next == nullptr));
+ // the only reason prev or next would be cleared is if the
+ // respective objects got collected, but if that happened,
+ // we would have relinked the list.
+ if (prev != nullptr) {
+ DCHECK(!prev->cleared());
+ if (next == nullptr) {
+ WasmCompiledModule::cast(prev->value())->reset_weak_next_instance();
+ } else {
+ WasmCompiledModule::cast(prev->value())
+ ->set_ptr_to_weak_next_instance(next);
+ }
+ }
+ if (next != nullptr) {
+ DCHECK(!next->cleared());
+ if (prev == nullptr) {
+ WasmCompiledModule::cast(next->value())->reset_weak_prev_instance();
+ } else {
+ WasmCompiledModule::cast(next->value())
+ ->set_ptr_to_weak_prev_instance(prev);
+ }
+ }
}
- RelocateGlobals(instance,
- static_cast<Address>(globals_buffer->backing_store()));
- instance->SetInternalField(kWasmGlobalsArrayBuffer, *globals_buffer);
+ TRACE("chain after {\n");
+ TRACE_CHAIN(WasmCompiledModule::cast(module_obj->GetInternalField(0)));
+ TRACE("}\n");
}
- return true;
+ compiled_module->reset_weak_owning_instance();
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
+ TRACE("}\n");
}
-bool SetupInstanceHeap(Isolate* isolate, Handle<FixedArray> compiled_module,
- Handle<JSObject> instance, Handle<JSArrayBuffer> memory,
- ErrorThrower* thrower) {
- uint32_t min_mem_pages = static_cast<uint32_t>(
- Smi::cast(compiled_module->get(kMinRequiredMemory))->value());
- isolate->counters()->wasm_min_mem_pages_count()->AddSample(min_mem_pages);
- // TODO(wasm): re-enable counter for max_mem_pages when we use that field.
+Handle<FixedArray> SetupIndirectFunctionTable(
+ Isolate* isolate, Handle<FixedArray> wasm_functions,
+ Handle<FixedArray> indirect_table_template,
+ Handle<FixedArray> tables_to_replace) {
+ Factory* factory = isolate->factory();
+ Handle<FixedArray> cloned_indirect_tables =
+ factory->CopyFixedArray(indirect_table_template);
+ for (int i = 0; i < cloned_indirect_tables->length(); ++i) {
+ Handle<FixedArray> orig_metadata =
+ cloned_indirect_tables->GetValueChecked<FixedArray>(isolate, i);
+ Handle<FixedArray> cloned_metadata = factory->CopyFixedArray(orig_metadata);
+ cloned_indirect_tables->set(i, *cloned_metadata);
- if (memory.is_null() && min_mem_pages > 0) {
- memory = AllocateMemory(thrower, isolate, min_mem_pages);
- if (memory.is_null()) {
- return false;
+ Handle<FixedArray> orig_table =
+ cloned_metadata->GetValueChecked<FixedArray>(isolate, kTable);
+ Handle<FixedArray> cloned_table = factory->CopyFixedArray(orig_table);
+ cloned_metadata->set(kTable, *cloned_table);
+ // Patch the cloned code to refer to the cloned kTable.
+ Handle<FixedArray> table_to_replace =
+ tables_to_replace->GetValueChecked<FixedArray>(isolate, i)
+ ->GetValueChecked<FixedArray>(isolate, kTable);
+ for (int fct_index = 0; fct_index < wasm_functions->length(); ++fct_index) {
+ Handle<Code> wasm_function =
+ wasm_functions->GetValueChecked<Code>(isolate, fct_index);
+ PatchFunctionTable(wasm_function, table_to_replace, cloned_table);
}
}
+ return cloned_indirect_tables;
+}
- if (!memory.is_null()) {
- instance->SetInternalField(kWasmMemArrayBuffer, *memory);
- Address mem_start = static_cast<Address>(memory->backing_store());
- uint32_t mem_size = static_cast<uint32_t>(memory->byte_length()->Number());
- RelocateInstanceCode(instance, mem_start,
- WasmModule::kPageSize * min_mem_pages, mem_size);
- LoadDataSegments(compiled_module, mem_start, mem_size);
+} // namespace
+
+const char* SectionName(WasmSectionCode code) {
+ switch (code) {
+ case kUnknownSectionCode:
+ return "Unknown";
+ case kTypeSectionCode:
+ return "Type";
+ case kImportSectionCode:
+ return "Import";
+ case kFunctionSectionCode:
+ return "Function";
+ case kTableSectionCode:
+ return "Table";
+ case kMemorySectionCode:
+ return "Memory";
+ case kGlobalSectionCode:
+ return "Global";
+ case kExportSectionCode:
+ return "Export";
+ case kStartSectionCode:
+ return "Start";
+ case kCodeSectionCode:
+ return "Code";
+ case kElementSectionCode:
+ return "Element";
+ case kDataSectionCode:
+ return "Data";
+ case kNameSectionCode:
+ return "Name";
+ default:
+ return "<unknown>";
}
- return true;
}
-bool SetupImports(Isolate* isolate, Handle<FixedArray> compiled_module,
- Handle<JSObject> instance, ErrorThrower* thrower,
- Handle<JSReceiver> ffi) {
- //-------------------------------------------------------------------------
- // Compile wrappers to imported functions.
- //-------------------------------------------------------------------------
- std::vector<Handle<Code>> import_code;
- MaybeHandle<FixedArray> maybe_import_data =
- compiled_module->GetValue<FixedArray>(isolate, kImportData);
- Handle<FixedArray> import_data;
- if (maybe_import_data.ToHandle(&import_data)) {
- if (!CompileWrappersToImportedFunctions(isolate, ffi, import_code,
- import_data, thrower)) {
- return false;
+std::ostream& operator<<(std::ostream& os, const WasmModule& module) {
+ os << "WASM module with ";
+ os << (module.min_mem_pages * module.kPageSize) << " min mem";
+ os << (module.max_mem_pages * module.kPageSize) << " max mem";
+ os << module.functions.size() << " functions";
+ os << module.functions.size() << " globals";
+ os << module.functions.size() << " data segments";
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const WasmFunction& function) {
+ os << "WASM function with signature " << *function.sig;
+
+ os << " code bytes: "
+ << (function.code_end_offset - function.code_start_offset);
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const WasmFunctionName& pair) {
+ os << "#" << pair.function_->func_index << ":";
+ if (pair.function_->name_offset > 0) {
+ if (pair.module_) {
+ WasmName name = pair.module_->GetName(pair.function_->name_offset,
+ pair.function_->name_length);
+ os.write(name.start(), name.length());
+ } else {
+ os << "+" << pair.function_->func_index;
}
+ } else {
+ os << "?";
}
+ return os;
+}
- RecordStats(isolate, import_code);
+Handle<JSFunction> WrapExportCodeAsJSFunction(
+ Isolate* isolate, Handle<Code> export_code, Handle<String> name, int arity,
+ MaybeHandle<ByteArray> maybe_signature, Handle<JSObject> module_instance) {
+ Handle<SharedFunctionInfo> shared =
+ isolate->factory()->NewSharedFunctionInfo(name, export_code, false);
+ shared->set_length(arity);
+ shared->set_internal_formal_parameter_count(arity);
+ Handle<JSFunction> function = isolate->factory()->NewFunction(
+ isolate->wasm_function_map(), name, export_code);
+ function->set_shared(*shared);
- Handle<FixedArray> code_table = Handle<FixedArray>(
- FixedArray::cast(instance->GetInternalField(kWasmModuleCodeTable)));
- // TODO(mtrofin): get the code off std::vector and on FixedArray, for
- // consistency.
- std::vector<Handle<Code>> function_code(code_table->length());
- for (int i = 0; i < code_table->length(); ++i) {
- Handle<Code> code = Handle<Code>(Code::cast(code_table->get(i)));
- function_code[i] = code;
+ function->SetInternalField(kInternalModuleInstance, *module_instance);
+ // add another Internal Field as the function arity
+ function->SetInternalField(kInternalArity, Smi::FromInt(arity));
+ // add another Internal Field as the signature of the foreign function
+ Handle<ByteArray> signature;
+ if (maybe_signature.ToHandle(&signature)) {
+ function->SetInternalField(kInternalSignature, *signature);
}
-
- LinkImports(isolate, function_code, import_code);
- return true;
+ return function;
}
-bool SetupExportsObject(Handle<FixedArray> compiled_module, Isolate* isolate,
- Handle<JSObject> instance, ErrorThrower* thrower) {
- Factory* factory = isolate->factory();
- bool mem_export =
- static_cast<bool>(Smi::cast(compiled_module->get(kExportMem))->value());
- ModuleOrigin origin = static_cast<ModuleOrigin>(
- Smi::cast(compiled_module->get(kOrigin))->value());
-
- MaybeHandle<FixedArray> maybe_exports =
- compiled_module->GetValue<FixedArray>(isolate, kExports);
- if (!maybe_exports.is_null() || mem_export) {
- PropertyDescriptor desc;
- desc.set_writable(false);
+Object* GetOwningWasmInstance(Code* code) {
+ DCHECK(code->kind() == Code::WASM_FUNCTION);
+ DisallowHeapAllocation no_gc;
+ FixedArray* deopt_data = code->deoptimization_data();
+ DCHECK_NOT_NULL(deopt_data);
+ DCHECK(deopt_data->length() == 2);
+ Object* weak_link = deopt_data->get(0);
+ if (!weak_link->IsWeakCell()) return nullptr;
+ WeakCell* cell = WeakCell::cast(weak_link);
+ return cell->value();
+}
- Handle<JSObject> exports_object = instance;
- if (origin == kWasmOrigin) {
- // Create the "exports" object.
- Handle<JSFunction> object_function = Handle<JSFunction>(
- isolate->native_context()->object_function(), isolate);
- exports_object = factory->NewJSObject(object_function, TENURED);
- Handle<String> exports_name = factory->InternalizeUtf8String("exports");
- JSObject::AddProperty(instance, exports_name, exports_object, READ_ONLY);
- }
- Handle<FixedArray> exports;
- if (maybe_exports.ToHandle(&exports)) {
- int exports_size = exports->length();
- for (int i = 0; i < exports_size; ++i) {
- if (thrower->error()) return false;
- Handle<FixedArray> export_metadata =
- exports->GetValueChecked<FixedArray>(isolate, i);
- Handle<Code> export_code =
- export_metadata->GetValueChecked<Code>(isolate, kExportCode);
- RecordStats(isolate, *export_code);
- Handle<String> name =
- export_metadata->GetValueChecked<String>(isolate, kExportName);
- int arity = Smi::cast(export_metadata->get(kExportArity))->value();
- MaybeHandle<ByteArray> signature =
- export_metadata->GetValue<ByteArray>(isolate, kExportedSignature);
- Handle<JSFunction> function = WrapExportCodeAsJSFunction(
- isolate, export_code, name, arity, signature, instance);
- desc.set_value(function);
- Maybe<bool> status = JSReceiver::DefineOwnProperty(
- isolate, exports_object, name, &desc, Object::THROW_ON_ERROR);
- if (!status.IsJust()) {
- thrower->Error("export of %.*s failed.", name->length(),
- name->ToCString().get());
- return false;
- }
- }
- }
- if (mem_export) {
- // Export the memory as a named property.
- Handle<String> name = factory->InternalizeUtf8String("memory");
- Handle<JSArrayBuffer> memory = Handle<JSArrayBuffer>(
- JSArrayBuffer::cast(instance->GetInternalField(kWasmMemArrayBuffer)));
- JSObject::AddProperty(exports_object, name, memory, READ_ONLY);
- }
- }
- return true;
+uint32_t GetNumImportedFunctions(Handle<JSObject> wasm_object) {
+ return static_cast<uint32_t>(
+ Smi::cast(wasm_object->GetInternalField(kWasmNumImportedFunctions))
+ ->value());
}
-} // namespace
+WasmModule::WasmModule(byte* module_start)
+ : module_start(module_start),
+ module_end(nullptr),
+ min_mem_pages(0),
+ max_mem_pages(0),
+ mem_export(false),
+ start_function_index(-1),
+ origin(kWasmOrigin),
+ globals_size(0),
+ num_imported_functions(0),
+ num_declared_functions(0),
+ num_exported_functions(0),
+ pending_tasks(new base::Semaphore(0)) {}
-MaybeHandle<FixedArray> WasmModule::CompileFunctions(
+MaybeHandle<WasmCompiledModule> WasmModule::CompileFunctions(
Isolate* isolate, ErrorThrower* thrower) const {
Factory* factory = isolate->factory();
- MaybeHandle<FixedArray> nothing;
+ MaybeHandle<WasmCompiledModule> nothing;
- WasmModuleInstance temp_instance_for_compilation(this);
- temp_instance_for_compilation.context = isolate->native_context();
- temp_instance_for_compilation.mem_size = GetMinModuleMemSize(this);
- temp_instance_for_compilation.mem_start = nullptr;
- temp_instance_for_compilation.globals_start = nullptr;
+ WasmModuleInstance temp_instance(this);
+ temp_instance.context = isolate->native_context();
+ temp_instance.mem_size = GetMinModuleMemSize(this);
+ temp_instance.mem_start = nullptr;
+ temp_instance.globals_start = nullptr;
MaybeHandle<FixedArray> indirect_table =
function_tables.size()
@@ -1062,10 +1027,10 @@ MaybeHandle<FixedArray> WasmModule::CompileFunctions(
: MaybeHandle<FixedArray>();
for (uint32_t i = 0; i < function_tables.size(); ++i) {
Handle<FixedArray> values = wasm::BuildFunctionTable(isolate, i, this);
- temp_instance_for_compilation.function_tables[i] = values;
+ temp_instance.function_tables[i] = values;
Handle<FixedArray> metadata = isolate->factory()->NewFixedArray(
- kWasmIndirectFunctionTableMetadataSize, TENURED);
+ kWasmIndirectFunctionTableDataSize, TENURED);
metadata->set(kSize, Smi::FromInt(function_tables[i].size));
metadata->set(kTable, *values);
indirect_table.ToHandleChecked()->set(i, *metadata);
@@ -1076,61 +1041,90 @@ MaybeHandle<FixedArray> WasmModule::CompileFunctions(
ModuleEnv module_env;
module_env.module = this;
- module_env.instance = &temp_instance_for_compilation;
+ module_env.instance = &temp_instance;
module_env.origin = origin;
- InitializePlaceholders(factory, &module_env.placeholders, functions.size());
- Handle<FixedArray> compiled_functions =
- factory->NewFixedArray(static_cast<int>(functions.size()), TENURED);
+ // The {code_table} array contains import wrappers and functions (which
+ // are both included in {functions.size()}, and export wrappers.
+ int code_table_size =
+ static_cast<int>(functions.size() + num_exported_functions);
+ Handle<FixedArray> code_table =
+ factory->NewFixedArray(static_cast<int>(code_table_size), TENURED);
- temp_instance_for_compilation.import_code.resize(import_table.size());
- for (uint32_t i = 0; i < import_table.size(); ++i) {
- temp_instance_for_compilation.import_code[i] =
- CreatePlaceholder(factory, i, Code::WASM_TO_JS_FUNCTION);
+ // Initialize the code table with placeholders.
+ for (uint32_t i = 0; i < functions.size(); i++) {
+ Code::Kind kind = Code::WASM_FUNCTION;
+ if (i < num_imported_functions) kind = Code::WASM_TO_JS_FUNCTION;
+ Handle<Code> placeholder = CreatePlaceholder(factory, i, kind);
+ code_table->set(static_cast<int>(i), *placeholder);
+ temp_instance.function_code[i] = placeholder;
}
+
isolate->counters()->wasm_functions_per_module()->AddSample(
static_cast<int>(functions.size()));
- if (FLAG_wasm_num_compilation_tasks != 0) {
- CompileInParallel(isolate, this,
- temp_instance_for_compilation.function_code, thrower,
- &module_env);
+ if (!FLAG_trace_wasm_decoder && FLAG_wasm_num_compilation_tasks != 0) {
+ // Avoid a race condition by collecting results into a second vector.
+ std::vector<Handle<Code>> results;
+ results.reserve(temp_instance.function_code.size());
+ for (size_t i = 0; i < temp_instance.function_code.size(); i++) {
+ results.push_back(temp_instance.function_code[i]);
+ }
+ CompileInParallel(isolate, this, results, thrower, &module_env);
+
+ for (size_t i = 0; i < results.size(); i++) {
+ temp_instance.function_code[i] = results[i];
+ }
} else {
- CompileSequentially(isolate, this,
- temp_instance_for_compilation.function_code, thrower,
+ CompileSequentially(isolate, this, temp_instance.function_code, thrower,
&module_env);
}
if (thrower->error()) return nothing;
// At this point, compilation has completed. Update the code table.
for (size_t i = FLAG_skip_compiling_wasm_funcs;
- i < temp_instance_for_compilation.function_code.size(); ++i) {
- Code* code = *temp_instance_for_compilation.function_code[i];
- compiled_functions->set(static_cast<int>(i), code);
+ i < temp_instance.function_code.size(); ++i) {
+ Code* code = *temp_instance.function_code[i];
+ code_table->set(static_cast<int>(i), code);
+ }
+
+ // Link the functions in the module.
+ for (size_t i = FLAG_skip_compiling_wasm_funcs;
+ i < temp_instance.function_code.size(); ++i) {
+ Handle<Code> code = temp_instance.function_code[i];
+ bool modified = LinkFunction(code, temp_instance.function_code);
+ if (modified) {
+ // TODO(mtrofin): do we need to flush the cache here?
+ Assembler::FlushICache(isolate, code->instruction_start(),
+ code->instruction_size());
+ }
}
// Create the compiled module object, and populate with compiled functions
// and information needed at instantiation time. This object needs to be
// serializable. Instantiation may occur off a deserialized version of this
// object.
- Handle<FixedArray> ret =
- factory->NewFixedArray(kCompiledWasmObjectTableSize, TENURED);
- ret->set(kFunctions, *compiled_functions);
+ Handle<WasmCompiledModule> ret = WasmCompiledModule::New(
+ isolate, min_mem_pages, globals_size, mem_export, origin);
+ ret->set_code_table(code_table);
if (!indirect_table.is_null()) {
- ret->set(kTableOfIndirectFunctionTables, *indirect_table.ToHandleChecked());
+ ret->set_indirect_function_tables(indirect_table.ToHandleChecked());
}
- Handle<FixedArray> import_data = GetImportsMetadata(factory, this);
- ret->set(kImportData, *import_data);
+ Handle<FixedArray> import_data = GetImportsData(factory, this);
+ ret->set_import_data(import_data);
- // Compile export functions.
- int export_size = static_cast<int>(export_table.size());
- Handle<Code> startup_fct;
+ // Compile exported function wrappers.
+ int export_size = static_cast<int>(num_exported_functions);
if (export_size > 0) {
Handle<FixedArray> exports = factory->NewFixedArray(export_size, TENURED);
- for (int i = 0; i < export_size; ++i) {
- Handle<FixedArray> export_metadata =
- factory->NewFixedArray(kWasmExportMetadataTableSize, TENURED);
- const WasmExport& exp = export_table[i];
- FunctionSig* funcSig = functions[exp.func_index].sig;
+ int index = -1;
+
+ for (const WasmExport& exp : export_table) {
+ if (exp.kind != kExternalFunction)
+ continue; // skip non-function exports.
+ index++;
+ Handle<FixedArray> export_data =
+ factory->NewFixedArray(kWasmExportDataSize, TENURED);
+ FunctionSig* funcSig = functions[exp.index].sig;
Handle<ByteArray> exportedSig =
factory->NewByteArray(static_cast<int>(funcSig->parameter_count() +
funcSig->return_count()),
@@ -1138,45 +1132,34 @@ MaybeHandle<FixedArray> WasmModule::CompileFunctions(
exportedSig->copy_in(0,
reinterpret_cast<const byte*>(funcSig->raw_data()),
exportedSig->length());
- export_metadata->set(kExportedSignature, *exportedSig);
+ export_data->set(kExportedSignature, *exportedSig);
WasmName str = GetName(exp.name_offset, exp.name_length);
Handle<String> name = factory->InternalizeUtf8String(str);
- Handle<Code> code =
- temp_instance_for_compilation.function_code[exp.func_index];
+ Handle<Code> code = code_table->GetValueChecked<Code>(isolate, exp.index);
Handle<Code> export_code = compiler::CompileJSToWasmWrapper(
- isolate, &module_env, code, exp.func_index);
+ isolate, &module_env, code, exp.index);
if (thrower->error()) return nothing;
- export_metadata->set(kExportCode, *export_code);
- export_metadata->set(kExportName, *name);
- export_metadata->set(
- kExportArity, Smi::FromInt(static_cast<int>(
- functions[exp.func_index].sig->parameter_count())));
- export_metadata->set(kExportedFunctionIndex,
- Smi::FromInt(static_cast<int>(exp.func_index)));
- exports->set(i, *export_metadata);
- if (exp.func_index == start_function_index) {
- startup_fct = export_code;
- }
+ export_data->set(kExportName, *name);
+ export_data->set(kExportArity,
+ Smi::FromInt(static_cast<int>(
+ functions[exp.index].sig->parameter_count())));
+ export_data->set(kExportedFunctionIndex,
+ Smi::FromInt(static_cast<int>(exp.index)));
+ exports->set(index, *export_data);
+ code_table->set(static_cast<int>(functions.size() + index), *export_code);
}
- ret->set(kExports, *exports);
+ ret->set_exports(exports);
}
- // Compile startup function, if we haven't already.
+ // Record data for startup function.
if (start_function_index >= 0) {
- uint32_t index = static_cast<uint32_t>(start_function_index);
HandleScope scope(isolate);
- if (startup_fct.is_null()) {
- Handle<Code> code = temp_instance_for_compilation.function_code[index];
- DCHECK_EQ(0, functions[index].sig->parameter_count());
- startup_fct =
- compiler::CompileJSToWasmWrapper(isolate, &module_env, code, index);
- }
- Handle<FixedArray> metadata =
- factory->NewFixedArray(kWasmExportMetadataTableSize, TENURED);
- metadata->set(kExportCode, *startup_fct);
- metadata->set(kExportArity, Smi::FromInt(0));
- metadata->set(kExportedFunctionIndex, Smi::FromInt(start_function_index));
- ret->set(kStartupFunction, *metadata);
+ Handle<FixedArray> startup_data =
+ factory->NewFixedArray(kWasmExportDataSize, TENURED);
+ startup_data->set(kExportArity, Smi::FromInt(0));
+ startup_data->set(kExportedFunctionIndex,
+ Smi::FromInt(start_function_index));
+ ret->set_startup_function(startup_data);
}
// TODO(wasm): saving the module bytes for debugging is wasteful. We should
@@ -1189,213 +1172,334 @@ MaybeHandle<FixedArray> WasmModule::CompileFunctions(
Handle<String> module_bytes_string =
factory->NewStringFromOneByte(module_bytes_vec, TENURED)
.ToHandleChecked();
- ret->set(kModuleBytes, *module_bytes_string);
+ ret->set_module_bytes(module_bytes_string);
}
Handle<ByteArray> function_name_table =
BuildFunctionNamesTable(isolate, module_env.module);
- ret->set(kFunctionNameTable, *function_name_table);
- ret->set(kMinRequiredMemory, Smi::FromInt(min_mem_pages));
+ ret->set_function_names(function_name_table);
if (data_segments.size() > 0) SaveDataSegmentInfo(factory, this, ret);
- ret->set(kGlobalsSize, Smi::FromInt(globals_size));
- ret->set(kExportMem, Smi::FromInt(mem_export));
- ret->set(kOrigin, Smi::FromInt(origin));
+ DCHECK_EQ(ret->default_mem_size(), temp_instance.mem_size);
return ret;
}
-void PatchJSWrapper(Isolate* isolate, Handle<Code> wrapper,
- Handle<Code> new_target) {
- AllowDeferredHandleDereference embedding_raw_address;
- bool seen = false;
- for (RelocIterator it(*wrapper, 1 << RelocInfo::CODE_TARGET); !it.done();
- it.next()) {
- Code* target = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- if (target->kind() == Code::WASM_FUNCTION) {
- DCHECK(!seen);
- seen = true;
- it.rinfo()->set_target_address(new_target->instruction_start(),
- UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+// Instantiates a WASM module, creating a WebAssembly.Instance from a
+// WebAssembly.Module.
+MaybeHandle<JSObject> WasmModule::Instantiate(Isolate* isolate,
+ ErrorThrower* thrower,
+ Handle<JSObject> module_object,
+ Handle<JSReceiver> ffi,
+ Handle<JSArrayBuffer> memory) {
+ MaybeHandle<JSObject> nothing;
+ HistogramTimerScope wasm_instantiate_module_time_scope(
+ isolate->counters()->wasm_instantiate_module_time());
+ Factory* factory = isolate->factory();
+
+ //--------------------------------------------------------------------------
+ // Reuse the compiled module (if no owner), otherwise clone.
+ //--------------------------------------------------------------------------
+ Handle<WasmCompiledModule> compiled_module;
+ Handle<FixedArray> code_table;
+ Handle<FixedArray> old_code_table;
+ Handle<JSObject> owner;
+ // If we don't clone, this will be null(). Otherwise, this will
+ // be a weak link to the original. If we lose the original to GC,
+ // this will be a cleared. We'll link the instances chain last.
+ MaybeHandle<WeakCell> link_to_original;
+
+ TRACE("Starting new module instantiation\n");
+ {
+ Handle<WasmCompiledModule> original(
+ WasmCompiledModule::cast(module_object->GetInternalField(0)), isolate);
+ // Always make a new copy of the code_table, since the old_code_table
+ // may still have placeholders for imports.
+ old_code_table = original->code_table();
+ code_table = factory->CopyFixedArray(old_code_table);
+
+ if (original->has_weak_owning_instance()) {
+ WeakCell* tmp = original->ptr_to_weak_owning_instance();
+ DCHECK(!tmp->cleared());
+ // There is already an owner, clone everything.
+ owner = Handle<JSObject>(JSObject::cast(tmp->value()), isolate);
+ // Insert the latest clone in front.
+ TRACE("Cloning from %d\n", original->instance_id());
+ compiled_module = WasmCompiledModule::Clone(isolate, original);
+ // Replace the strong reference to point to the new instance here.
+ // This allows any of the other instances, including the original,
+ // to be collected.
+ module_object->SetInternalField(0, *compiled_module);
+ compiled_module->set_weak_module_object(original->weak_module_object());
+ link_to_original = factory->NewWeakCell(original);
+ // Don't link to original here. We remember the original
+ // as a weak link. If that link isn't clear by the time we finish
+ // instantiating this instance, then we link it at that time.
+ compiled_module->reset_weak_next_instance();
+
+ // Clone the code for WASM functions and exports.
+ for (int i = 0; i < code_table->length(); ++i) {
+ Handle<Code> orig_code = code_table->GetValueChecked<Code>(isolate, i);
+ switch (orig_code->kind()) {
+ case Code::WASM_TO_JS_FUNCTION:
+ // Imports will be overwritten with newly compiled wrappers.
+ break;
+ case Code::JS_TO_WASM_FUNCTION:
+ case Code::WASM_FUNCTION: {
+ Handle<Code> code = factory->CopyCode(orig_code);
+ code_table->set(i, *code);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+ RecordStats(isolate, code_table);
+ } else {
+ // There was no owner, so we can reuse the original.
+ compiled_module = original;
+ TRACE("Reusing existing instance %d\n", compiled_module->instance_id());
}
+ compiled_module->set_code_table(code_table);
}
- CHECK(seen);
- Assembler::FlushICache(isolate, wrapper->instruction_start(),
- wrapper->instruction_size());
-}
-Handle<FixedArray> SetupIndirectFunctionTable(
- Isolate* isolate, Handle<FixedArray> wasm_functions,
- Handle<FixedArray> indirect_table_template) {
- Factory* factory = isolate->factory();
- Handle<FixedArray> cloned_indirect_tables =
- factory->CopyFixedArray(indirect_table_template);
- for (int i = 0; i < cloned_indirect_tables->length(); ++i) {
- Handle<FixedArray> orig_metadata =
- cloned_indirect_tables->GetValueChecked<FixedArray>(isolate, i);
- Handle<FixedArray> cloned_metadata = factory->CopyFixedArray(orig_metadata);
- cloned_indirect_tables->set(i, *cloned_metadata);
+ //--------------------------------------------------------------------------
+ // Allocate the instance object.
+ //--------------------------------------------------------------------------
+ Handle<Map> map = factory->NewMap(
+ JS_OBJECT_TYPE,
+ JSObject::kHeaderSize + kWasmModuleInternalFieldCount * kPointerSize);
+ Handle<JSObject> instance = factory->NewJSObjectFromMap(map, TENURED);
+ instance->SetInternalField(kWasmModuleCodeTable, *code_table);
- Handle<FixedArray> orig_table =
- cloned_metadata->GetValueChecked<FixedArray>(isolate, kTable);
- Handle<FixedArray> cloned_table = factory->CopyFixedArray(orig_table);
- cloned_metadata->set(kTable, *cloned_table);
- // Patch the cloned code to refer to the cloned kTable.
- for (int i = 0; i < wasm_functions->length(); ++i) {
- Handle<Code> wasm_function =
- wasm_functions->GetValueChecked<Code>(isolate, i);
- PatchFunctionTable(wasm_function, orig_table, cloned_table);
- }
+ //--------------------------------------------------------------------------
+ // Set up the memory for the new instance.
+ //--------------------------------------------------------------------------
+ MaybeHandle<JSArrayBuffer> old_memory;
+ // TODO(titzer): handle imported memory properly.
+
+ uint32_t min_mem_pages = compiled_module->min_memory_pages();
+ isolate->counters()->wasm_min_mem_pages_count()->AddSample(min_mem_pages);
+ // TODO(wasm): re-enable counter for max_mem_pages when we use that field.
+
+ if (memory.is_null() && min_mem_pages > 0) {
+ memory = AllocateMemory(thrower, isolate, min_mem_pages);
+ if (memory.is_null()) return nothing; // failed to allocate memory
}
- return cloned_indirect_tables;
-}
-Handle<FixedArray> CloneModuleForInstance(Isolate* isolate,
- Handle<FixedArray> original) {
- Factory* factory = isolate->factory();
- Handle<FixedArray> clone = factory->CopyFixedArray(original);
-
- // Clone each wasm code object.
- Handle<FixedArray> orig_wasm_functions =
- original->GetValueChecked<FixedArray>(isolate, kFunctions);
- Handle<FixedArray> clone_wasm_functions =
- factory->CopyFixedArray(orig_wasm_functions);
- clone->set(kFunctions, *clone_wasm_functions);
- for (int i = 0; i < clone_wasm_functions->length(); ++i) {
- Handle<Code> orig_code =
- clone_wasm_functions->GetValueChecked<Code>(isolate, i);
- Handle<Code> cloned_code = factory->CopyCode(orig_code);
- clone_wasm_functions->set(i, *cloned_code);
- }
-
- MaybeHandle<FixedArray> maybe_orig_exports =
- original->GetValue<FixedArray>(isolate, kExports);
- Handle<FixedArray> orig_exports;
- if (maybe_orig_exports.ToHandle(&orig_exports)) {
- Handle<FixedArray> cloned_exports = factory->CopyFixedArray(orig_exports);
- clone->set(kExports, *cloned_exports);
- for (int i = 0; i < orig_exports->length(); ++i) {
- Handle<FixedArray> export_metadata =
- orig_exports->GetValueChecked<FixedArray>(isolate, i);
- Handle<FixedArray> clone_metadata =
- factory->CopyFixedArray(export_metadata);
- cloned_exports->set(i, *clone_metadata);
- Handle<Code> orig_code =
- export_metadata->GetValueChecked<Code>(isolate, kExportCode);
- Handle<Code> cloned_code = factory->CopyCode(orig_code);
- clone_metadata->set(kExportCode, *cloned_code);
- // TODO(wasm): This is actually a uint32_t, but since FixedArray indexes
- // in int, we are taking the risk of invalid values.
- int exported_fct_index =
- Smi::cast(export_metadata->get(kExportedFunctionIndex))->value();
- CHECK_GE(exported_fct_index, 0);
- CHECK_LT(exported_fct_index, clone_wasm_functions->length());
- Handle<Code> new_target = clone_wasm_functions->GetValueChecked<Code>(
- isolate, exported_fct_index);
- PatchJSWrapper(isolate, cloned_code, new_target);
+ if (!memory.is_null()) {
+ instance->SetInternalField(kWasmMemArrayBuffer, *memory);
+ Address mem_start = static_cast<Address>(memory->backing_store());
+ uint32_t mem_size = static_cast<uint32_t>(memory->byte_length()->Number());
+ LoadDataSegments(compiled_module, mem_start, mem_size);
+
+ uint32_t old_mem_size = compiled_module->has_heap()
+ ? compiled_module->mem_size()
+ : compiled_module->default_mem_size();
+ Address old_mem_start =
+ compiled_module->has_heap()
+ ? static_cast<Address>(compiled_module->heap()->backing_store())
+ : nullptr;
+ RelocateInstanceCode(instance, old_mem_start, mem_start, old_mem_size,
+ mem_size);
+ compiled_module->set_heap(memory);
+ }
+
+ //--------------------------------------------------------------------------
+ // Set up the globals for the new instance.
+ //--------------------------------------------------------------------------
+ MaybeHandle<JSArrayBuffer> old_globals;
+ MaybeHandle<JSArrayBuffer> globals;
+ uint32_t globals_size = compiled_module->globals_size();
+ if (globals_size > 0) {
+ Handle<JSArrayBuffer> global_buffer = NewArrayBuffer(isolate, globals_size);
+ globals = global_buffer;
+ if (globals.is_null()) {
+ thrower->Error("Out of memory: wasm globals");
+ return nothing;
+ }
+ Address old_address =
+ owner.is_null() ? nullptr : GetGlobalStartAddressFromCodeTemplate(
+ *isolate->factory()->undefined_value(),
+ JSObject::cast(*owner));
+ RelocateGlobals(instance, old_address,
+ static_cast<Address>(global_buffer->backing_store()));
+ instance->SetInternalField(kWasmGlobalsArrayBuffer, *global_buffer);
+ }
+
+ //--------------------------------------------------------------------------
+ // Compile the import wrappers for the new instance.
+ //--------------------------------------------------------------------------
+ // TODO(titzer): handle imported globals and function tables.
+ int num_imported_functions = 0;
+ if (compiled_module->has_import_data()) {
+ Handle<FixedArray> import_data = compiled_module->import_data();
+ num_imported_functions = import_data->length();
+ for (int index = 0; index < num_imported_functions; index++) {
+ Handle<Code> import_wrapper =
+ CompileImportWrapper(isolate, ffi, index, import_data, thrower);
+ if (thrower->error()) return nothing;
+ code_table->set(index, *import_wrapper);
+ RecordStats(isolate, *import_wrapper);
}
}
- MaybeHandle<FixedArray> maybe_startup =
- original->GetValue<FixedArray>(isolate, kStartupFunction);
- if (!maybe_startup.is_null()) {
- Handle<FixedArray> startup_metadata =
- factory->CopyFixedArray(maybe_startup.ToHandleChecked());
- Handle<Code> startup_fct_clone = factory->CopyCode(
- startup_metadata->GetValueChecked<Code>(isolate, kExportCode));
- startup_metadata->set(kExportCode, *startup_fct_clone);
- clone->set(kStartupFunction, *startup_metadata);
- // TODO(wasm): see todo above about int vs size_t indexing in FixedArray.
- int startup_fct_index =
- Smi::cast(startup_metadata->get(kExportedFunctionIndex))->value();
- CHECK_GE(startup_fct_index, 0);
- CHECK_LT(startup_fct_index, clone_wasm_functions->length());
- Handle<Code> new_target =
- clone_wasm_functions->GetValueChecked<Code>(isolate, startup_fct_index);
- PatchJSWrapper(isolate, startup_fct_clone, new_target);
- }
- return clone;
-}
+ //--------------------------------------------------------------------------
+ // Set up the debug support for the new instance.
+ //--------------------------------------------------------------------------
+ // TODO(wasm): avoid referencing this stuff from the instance, use it off
+ // the compiled module instead. See the following 3 assignments:
+ if (compiled_module->has_module_bytes()) {
+ instance->SetInternalField(kWasmModuleBytesString,
+ compiled_module->ptr_to_module_bytes());
+ }
-// Instantiates a wasm module as a JSObject.
-// * allocates a backing store of {mem_size} bytes.
-// * installs a named property "memory" for that buffer if exported
-// * installs named properties on the object for exported functions
-// * compiles wasm code to machine code
-MaybeHandle<JSObject> WasmModule::Instantiate(
- Isolate* isolate, Handle<FixedArray> compiled_module,
- Handle<JSReceiver> ffi, Handle<JSArrayBuffer> memory) {
- HistogramTimerScope wasm_instantiate_module_time_scope(
- isolate->counters()->wasm_instantiate_module_time());
- ErrorThrower thrower(isolate, "WasmModule::Instantiate()");
- Factory* factory = isolate->factory();
+ if (compiled_module->has_function_names()) {
+ instance->SetInternalField(kWasmFunctionNamesArray,
+ compiled_module->ptr_to_function_names());
+ }
- compiled_module = CloneModuleForInstance(isolate, compiled_module);
+ {
+ Handle<Object> handle = factory->NewNumber(num_imported_functions);
+ instance->SetInternalField(kWasmNumImportedFunctions, *handle);
+ }
+
+ //--------------------------------------------------------------------------
+ // Set up the runtime support for the new instance.
+ //--------------------------------------------------------------------------
+ Handle<WeakCell> weak_link = isolate->factory()->NewWeakCell(instance);
+
+ for (int i = num_imported_functions + FLAG_skip_compiling_wasm_funcs;
+ i < code_table->length(); ++i) {
+ Handle<Code> code = code_table->GetValueChecked<Code>(isolate, i);
+ if (code->kind() == Code::WASM_FUNCTION) {
+ Handle<FixedArray> deopt_data =
+ isolate->factory()->NewFixedArray(2, TENURED);
+ deopt_data->set(0, *weak_link);
+ deopt_data->set(1, Smi::FromInt(static_cast<int>(i)));
+ deopt_data->set_length(2);
+ code->set_deoptimization_data(*deopt_data);
+ }
+ }
- // These fields are compulsory.
- Handle<FixedArray> code_table =
- compiled_module->GetValueChecked<FixedArray>(isolate, kFunctions);
+ //--------------------------------------------------------------------------
+ // Set up the indirect function tables for the new instance.
+ //--------------------------------------------------------------------------
+ {
+ std::vector<Handle<Code>> functions(
+ static_cast<size_t>(code_table->length()));
+ for (int i = 0; i < code_table->length(); ++i) {
+ functions[i] = code_table->GetValueChecked<Code>(isolate, i);
+ }
- std::vector<Handle<Code>> functions(
- static_cast<size_t>(code_table->length()));
- for (int i = 0; i < code_table->length(); ++i) {
- functions[static_cast<size_t>(i)] =
- code_table->GetValueChecked<Code>(isolate, i);
+ if (compiled_module->has_indirect_function_tables()) {
+ Handle<FixedArray> indirect_tables_template =
+ compiled_module->indirect_function_tables();
+ Handle<FixedArray> to_replace =
+ owner.is_null() ? indirect_tables_template
+ : handle(FixedArray::cast(owner->GetInternalField(
+ kWasmModuleFunctionTable)));
+ Handle<FixedArray> indirect_tables = SetupIndirectFunctionTable(
+ isolate, code_table, indirect_tables_template, to_replace);
+ for (int i = 0; i < indirect_tables->length(); ++i) {
+ Handle<FixedArray> metadata =
+ indirect_tables->GetValueChecked<FixedArray>(isolate, i);
+ uint32_t size = Smi::cast(metadata->get(kSize))->value();
+ Handle<FixedArray> table =
+ metadata->GetValueChecked<FixedArray>(isolate, kTable);
+ PopulateFunctionTable(table, size, &functions);
+ }
+ instance->SetInternalField(kWasmModuleFunctionTable, *indirect_tables);
+ }
}
- LinkModuleFunctions(isolate, functions);
- RecordStats(isolate, code_table);
+ //--------------------------------------------------------------------------
+ // Set up the exports object for the new instance.
+ //--------------------------------------------------------------------------
+ bool mem_export = compiled_module->export_memory();
+ ModuleOrigin origin = compiled_module->origin();
- MaybeHandle<JSObject> nothing;
+ if (compiled_module->has_exports() || mem_export) {
+ PropertyDescriptor desc;
+ desc.set_writable(false);
- Handle<Map> map = factory->NewMap(
- JS_OBJECT_TYPE,
- JSObject::kHeaderSize + kWasmModuleInternalFieldCount * kPointerSize);
- Handle<JSObject> js_object = factory->NewJSObjectFromMap(map, TENURED);
- js_object->SetInternalField(kWasmModuleCodeTable, *code_table);
-
- if (!(SetupInstanceHeap(isolate, compiled_module, js_object, memory,
- &thrower) &&
- SetupGlobals(isolate, compiled_module, js_object, &thrower) &&
- SetupImports(isolate, compiled_module, js_object, &thrower, ffi) &&
- SetupExportsObject(compiled_module, isolate, js_object, &thrower))) {
- return nothing;
+ Handle<JSObject> exports_object = instance;
+ if (origin == kWasmOrigin) {
+ // Create the "exports" object.
+ Handle<JSFunction> object_function = Handle<JSFunction>(
+ isolate->native_context()->object_function(), isolate);
+ exports_object = factory->NewJSObject(object_function, TENURED);
+ Handle<String> exports_name = factory->InternalizeUtf8String("exports");
+ JSObject::AddProperty(instance, exports_name, exports_object, READ_ONLY);
+ }
+ int first_export = -1;
+ // TODO(wasm): another iteration over the code objects.
+ for (int i = 0; i < code_table->length(); i++) {
+ Handle<Code> code = code_table->GetValueChecked<Code>(isolate, i);
+ if (code->kind() == Code::JS_TO_WASM_FUNCTION) {
+ first_export = i;
+ break;
+ }
+ }
+ if (compiled_module->has_exports()) {
+ Handle<FixedArray> exports = compiled_module->exports();
+ int export_size = exports->length();
+ for (int i = 0; i < export_size; ++i) {
+ Handle<FixedArray> export_data =
+ exports->GetValueChecked<FixedArray>(isolate, i);
+ Handle<String> name =
+ export_data->GetValueChecked<String>(isolate, kExportName);
+ int arity = Smi::cast(export_data->get(kExportArity))->value();
+ MaybeHandle<ByteArray> signature =
+ export_data->GetValue<ByteArray>(isolate, kExportedSignature);
+ Handle<Code> export_code =
+ code_table->GetValueChecked<Code>(isolate, first_export + i);
+ Handle<JSFunction> function = WrapExportCodeAsJSFunction(
+ isolate, export_code, name, arity, signature, instance);
+ desc.set_value(function);
+ Maybe<bool> status = JSReceiver::DefineOwnProperty(
+ isolate, exports_object, name, &desc, Object::THROW_ON_ERROR);
+ if (!status.IsJust()) {
+ thrower->Error("export of %.*s failed.", name->length(),
+ name->ToCString().get());
+ return nothing;
+ }
+ }
+ }
+ if (mem_export) {
+ // Export the memory as a named property.
+ Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>(
+ JSArrayBuffer::cast(instance->GetInternalField(kWasmMemArrayBuffer)));
+ Handle<Object> memory_object =
+ WasmJs::CreateWasmMemoryObject(isolate, buffer, false, 0);
+ // TODO(titzer): export the memory with the correct name.
+ Handle<String> name = factory->InternalizeUtf8String("memory");
+ JSObject::AddProperty(exports_object, name, memory_object, READ_ONLY);
+ }
}
- SetDebugSupport(factory, compiled_module, js_object);
-
- FlushAssemblyCache(isolate, code_table);
-
- MaybeHandle<FixedArray> maybe_indirect_tables =
- compiled_module->GetValue<FixedArray>(isolate,
- kTableOfIndirectFunctionTables);
- Handle<FixedArray> indirect_tables_template;
- if (maybe_indirect_tables.ToHandle(&indirect_tables_template)) {
- Handle<FixedArray> indirect_tables = SetupIndirectFunctionTable(
- isolate, code_table, indirect_tables_template);
- for (int i = 0; i < indirect_tables->length(); ++i) {
- Handle<FixedArray> metadata =
- indirect_tables->GetValueChecked<FixedArray>(isolate, i);
- uint32_t size = Smi::cast(metadata->get(kSize))->value();
- Handle<FixedArray> table =
- metadata->GetValueChecked<FixedArray>(isolate, kTable);
- wasm::PopulateFunctionTable(table, size, &functions);
- }
- js_object->SetInternalField(kWasmModuleFunctionTable, *indirect_tables);
+ if (num_imported_functions > 0 || !owner.is_null()) {
+ // If the code was cloned, or new imports were compiled, patch.
+ PatchDirectCalls(old_code_table, code_table, num_imported_functions);
}
+ FlushICache(isolate, code_table);
+
+ //--------------------------------------------------------------------------
// Run the start function if one was specified.
- MaybeHandle<FixedArray> maybe_startup_fct =
- compiled_module->GetValue<FixedArray>(isolate, kStartupFunction);
- Handle<FixedArray> metadata;
- if (maybe_startup_fct.ToHandle(&metadata)) {
+ //--------------------------------------------------------------------------
+ if (compiled_module->has_startup_function()) {
+ Handle<FixedArray> startup_data = compiled_module->startup_function();
HandleScope scope(isolate);
+ int32_t start_index =
+ startup_data->GetValueChecked<Smi>(isolate, kExportedFunctionIndex)
+ ->value();
Handle<Code> startup_code =
- metadata->GetValueChecked<Code>(isolate, kExportCode);
- int arity = Smi::cast(metadata->get(kExportArity))->value();
+ code_table->GetValueChecked<Code>(isolate, start_index);
+ int arity = Smi::cast(startup_data->get(kExportArity))->value();
MaybeHandle<ByteArray> startup_signature =
- metadata->GetValue<ByteArray>(isolate, kExportedSignature);
+ startup_data->GetValue<ByteArray>(isolate, kExportedSignature);
Handle<JSFunction> startup_fct = WrapExportCodeAsJSFunction(
isolate, startup_code, factory->InternalizeUtf8String("start"), arity,
- startup_signature, js_object);
+ startup_signature, instance);
RecordStats(isolate, *startup_code);
// Call the JS function.
Handle<Object> undefined = isolate->factory()->undefined_value();
@@ -1403,35 +1507,86 @@ MaybeHandle<JSObject> WasmModule::Instantiate(
Execution::Call(isolate, startup_fct, undefined, 0, nullptr);
if (retval.is_null()) {
- thrower.Error("WASM.instantiateModule(): start function failed");
+ thrower->Error("WASM.instantiateModule(): start function failed");
return nothing;
}
}
- DCHECK(wasm::IsWasmObject(*js_object));
- return js_object;
+ DCHECK(wasm::IsWasmObject(*instance));
+
+ {
+ Handle<WeakCell> link_to_owner = factory->NewWeakCell(instance);
+
+ Handle<Object> global_handle = isolate->global_handles()->Create(*instance);
+ Handle<WeakCell> link_to_clone = factory->NewWeakCell(compiled_module);
+ {
+ DisallowHeapAllocation no_gc;
+ compiled_module->set_weak_owning_instance(link_to_owner);
+ Handle<WeakCell> next;
+ if (link_to_original.ToHandle(&next) && !next->cleared()) {
+ WasmCompiledModule* original = WasmCompiledModule::cast(next->value());
+ DCHECK(original->has_weak_owning_instance());
+ DCHECK(!original->weak_owning_instance()->cleared());
+ compiled_module->set_weak_next_instance(next);
+ original->set_weak_prev_instance(link_to_clone);
+ }
+
+ compiled_module->set_weak_owning_instance(link_to_owner);
+ instance->SetInternalField(kWasmCompiledModule, *compiled_module);
+ GlobalHandles::MakeWeak(global_handle.location(),
+ global_handle.location(), &InstanceFinalizer,
+ v8::WeakCallbackType::kFinalizer);
+ }
+ }
+ TRACE("Finishing instance %d\n", compiled_module->instance_id());
+ TRACE_CHAIN(WasmCompiledModule::cast(module_object->GetInternalField(0)));
+ return instance;
}
-// TODO(mtrofin): remove this once we move to WASM_DIRECT_CALL
-Handle<Code> ModuleEnv::GetCodeOrPlaceholder(uint32_t index) const {
- DCHECK(IsValidFunction(index));
- if (!placeholders.empty()) return placeholders[index];
- DCHECK_NOT_NULL(instance);
- return instance->function_code[index];
+#if DEBUG
+uint32_t WasmCompiledModule::instance_id_counter_ = 0;
+#endif
+
+Handle<WasmCompiledModule> WasmCompiledModule::New(Isolate* isolate,
+ uint32_t min_memory_pages,
+ uint32_t globals_size,
+ bool export_memory,
+ ModuleOrigin origin) {
+ Handle<FixedArray> ret =
+ isolate->factory()->NewFixedArray(PropertyIndices::Count, TENURED);
+ // Globals size is expected to fit into an int without overflow. This is not
+ // supported by the spec at the moment, however, we don't support array
+ // buffer sizes over 1g, so, for now, we avoid alocating a HeapNumber for
+ // the globals size. The CHECK guards this assumption.
+ CHECK_GE(static_cast<int>(globals_size), 0);
+ ret->set(kID_min_memory_pages,
+ Smi::FromInt(static_cast<int>(min_memory_pages)));
+ ret->set(kID_globals_size, Smi::FromInt(static_cast<int>(globals_size)));
+ ret->set(kID_export_memory, Smi::FromInt(static_cast<int>(export_memory)));
+ ret->set(kID_origin, Smi::FromInt(static_cast<int>(origin)));
+ WasmCompiledModule::cast(*ret)->Init();
+ return handle(WasmCompiledModule::cast(*ret));
}
-Handle<Code> ModuleEnv::GetImportCode(uint32_t index) {
- DCHECK(IsValidImport(index));
- return instance ? instance->import_code[index] : Handle<Code>::null();
+void WasmCompiledModule::Init() {
+#if DEBUG
+ set(kID_instance_id, Smi::FromInt(instance_id_counter_++));
+ TRACE("New compiled module id: %d\n", instance_id());
+#endif
}
-compiler::CallDescriptor* ModuleEnv::GetCallDescriptor(Zone* zone,
- uint32_t index) {
- DCHECK(IsValidFunction(index));
- // Always make a direct call to whatever is in the table at that location.
- // A wrapper will be generated for FFI calls.
- const WasmFunction* function = &module->functions[index];
- return GetWasmCallDescriptor(zone, function->sig);
+void WasmCompiledModule::PrintInstancesChain() {
+#if DEBUG
+ if (!FLAG_trace_wasm_instances) return;
+ for (WasmCompiledModule* current = this; current != nullptr;) {
+ PrintF("->%d", current->instance_id());
+ if (current->ptr_to_weak_next_instance() == nullptr) break;
+ CHECK(!current->ptr_to_weak_next_instance()->cleared());
+ current =
+ WasmCompiledModule::cast(current->ptr_to_weak_next_instance()->value());
+ }
+ PrintF("\n");
+#endif
}
Handle<Object> GetWasmFunctionNameOrNull(Isolate* isolate, Handle<Object> wasm,
@@ -1577,93 +1732,188 @@ int GetNumberOfFunctions(JSObject* wasm) {
return ByteArray::cast(func_names_obj)->get_int(0);
}
-Handle<JSObject> CreateCompiledModuleObject(
- Isolate* isolate, Handle<FixedArray> compiled_module) {
- Handle<JSFunction> module_cons(
- isolate->native_context()->wasm_module_constructor());
- Handle<JSObject> module_obj = isolate->factory()->NewJSObject(module_cons);
+Handle<JSObject> CreateCompiledModuleObject(Isolate* isolate,
+ Handle<FixedArray> compiled_module,
+ ModuleOrigin origin) {
+ Handle<JSObject> module_obj;
+ if (origin == ModuleOrigin::kWasmOrigin) {
+ Handle<JSFunction> module_cons(
+ isolate->native_context()->wasm_module_constructor());
+ module_obj = isolate->factory()->NewJSObject(module_cons);
+ } else {
+ DCHECK(origin == ModuleOrigin::kAsmJsOrigin);
+ Handle<Map> map = isolate->factory()->NewMap(
+ JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
+ module_obj = isolate->factory()->NewJSObjectFromMap(map, TENURED);
+ }
module_obj->SetInternalField(0, *compiled_module);
- Handle<Symbol> module_sym(isolate->native_context()->wasm_module_sym());
- Object::SetProperty(module_obj, module_sym, module_obj, STRICT).Check();
+ if (origin == ModuleOrigin::kWasmOrigin) {
+ Handle<Symbol> module_sym(isolate->native_context()->wasm_module_sym());
+ Object::SetProperty(module_obj, module_sym, module_obj, STRICT).Check();
+ }
+ Handle<WeakCell> link_to_module = isolate->factory()->NewWeakCell(module_obj);
+ WasmCompiledModule::cast(*compiled_module)
+ ->set_weak_module_object(link_to_module);
return module_obj;
}
-namespace testing {
-
-int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
- const byte* module_end, bool asm_js) {
- HandleScope scope(isolate);
+MaybeHandle<JSObject> CreateModuleObjectFromBytes(Isolate* isolate,
+ const byte* start,
+ const byte* end,
+ ErrorThrower* thrower,
+ ModuleOrigin origin) {
+ MaybeHandle<JSObject> nothing;
Zone zone(isolate->allocator());
- ErrorThrower thrower(isolate, "CompileAndRunWasmModule");
-
- // Decode the module, but don't verify function bodies, since we'll
- // be compiling them anyway.
- ModuleResult decoding_result =
- DecodeWasmModule(isolate, &zone, module_start, module_end, false,
- asm_js ? kAsmJsOrigin : kWasmOrigin);
-
- std::unique_ptr<const WasmModule> module(decoding_result.val);
- if (decoding_result.failed()) {
- // Module verification failed. throw.
- thrower.Error("WASM.compileRun() failed: %s",
- decoding_result.error_msg.get());
- return -1;
+ ModuleResult result =
+ DecodeWasmModule(isolate, &zone, start, end, false, origin);
+ std::unique_ptr<const WasmModule> decoded_module(result.val);
+ if (result.failed()) {
+ thrower->Failed("Wasm decoding failed", result);
+ return nothing;
}
+ MaybeHandle<FixedArray> compiled_module =
+ decoded_module->CompileFunctions(isolate, thrower);
+ if (compiled_module.is_null()) return nothing;
- if (module->import_table.size() > 0) {
- thrower.Error("Not supported: module has imports.");
- }
- if (module->export_table.size() == 0) {
- thrower.Error("Not supported: module has no exports.");
+ return CreateCompiledModuleObject(isolate, compiled_module.ToHandleChecked(),
+ origin);
+}
+
+bool ValidateModuleBytes(Isolate* isolate, const byte* start, const byte* end,
+ ErrorThrower* thrower, ModuleOrigin origin) {
+ Zone zone(isolate->allocator());
+ ModuleResult result =
+ DecodeWasmModule(isolate, &zone, start, end, false, origin);
+ if (result.ok()) {
+ DCHECK_NOT_NULL(result.val);
+ delete result.val;
+ return true;
}
+ return false;
+}
- if (thrower.error()) return -1;
- MaybeHandle<FixedArray> compiled_module =
- module->CompileFunctions(isolate, &thrower);
+MaybeHandle<JSArrayBuffer> GetInstanceMemory(Isolate* isolate,
+ Handle<JSObject> instance) {
+ Object* mem = instance->GetInternalField(kWasmMemArrayBuffer);
+ DCHECK(IsWasmObject(*instance));
+ if (mem->IsUndefined(isolate)) return MaybeHandle<JSArrayBuffer>();
+ return Handle<JSArrayBuffer>(JSArrayBuffer::cast(mem));
+}
- if (compiled_module.is_null()) return -1;
- Handle<JSObject> instance =
- WasmModule::Instantiate(isolate, compiled_module.ToHandleChecked(),
- Handle<JSReceiver>::null(),
- Handle<JSArrayBuffer>::null())
- .ToHandleChecked();
+void SetInstanceMemory(Handle<JSObject> instance, JSArrayBuffer* buffer) {
+ DisallowHeapAllocation no_gc;
+ DCHECK(IsWasmObject(*instance));
+ instance->SetInternalField(kWasmMemArrayBuffer, buffer);
+ WasmCompiledModule* module =
+ WasmCompiledModule::cast(instance->GetInternalField(kWasmCompiledModule));
+ module->set_ptr_to_heap(buffer);
+}
- return CallFunction(isolate, instance, &thrower, "main", 0, nullptr);
+int32_t GetInstanceMemorySize(Isolate* isolate, Handle<JSObject> instance) {
+ MaybeHandle<JSArrayBuffer> maybe_mem_buffer =
+ GetInstanceMemory(isolate, instance);
+ Handle<JSArrayBuffer> buffer;
+ if (!maybe_mem_buffer.ToHandle(&buffer)) {
+ return 0;
+ } else {
+ return buffer->byte_length()->Number() / WasmModule::kPageSize;
+ }
}
-int32_t CallFunction(Isolate* isolate, Handle<JSObject> instance,
- ErrorThrower* thrower, const char* name, int argc,
- Handle<Object> argv[]) {
- Handle<Name> exports = isolate->factory()->InternalizeUtf8String("exports");
- Handle<JSObject> exports_object = Handle<JSObject>::cast(
- JSObject::GetProperty(instance, exports).ToHandleChecked());
- Handle<Name> main_name = isolate->factory()->NewStringFromAsciiChecked(name);
- PropertyDescriptor desc;
- Maybe<bool> property_found = JSReceiver::GetOwnPropertyDescriptor(
- isolate, exports_object, main_name, &desc);
- if (!property_found.FromMaybe(false)) return -1;
-
- Handle<JSFunction> main_export = Handle<JSFunction>::cast(desc.value());
-
- // Call the JS function.
- Handle<Object> undefined = isolate->factory()->undefined_value();
- MaybeHandle<Object> retval =
- Execution::Call(isolate, main_export, undefined, argc, argv);
-
- // The result should be a number.
- if (retval.is_null()) {
- thrower->Error("WASM.compileRun() failed: Invocation was null");
+int32_t GrowInstanceMemory(Isolate* isolate, Handle<JSObject> instance,
+ uint32_t pages) {
+ Address old_mem_start = nullptr;
+ uint32_t old_size = 0, new_size = 0;
+
+ MaybeHandle<JSArrayBuffer> maybe_mem_buffer =
+ GetInstanceMemory(isolate, instance);
+ Handle<JSArrayBuffer> old_buffer;
+ if (!maybe_mem_buffer.ToHandle(&old_buffer)) {
+ // If module object does not have linear memory associated with it,
+ // Allocate new array buffer of given size.
+ // TODO(gdeepti): Fix bounds check to take into account size of memtype.
+ new_size = pages * WasmModule::kPageSize;
+ // The code generated in the wasm compiler guarantees this precondition.
+ DCHECK(pages <= WasmModule::kMaxMemPages);
+ } else {
+ old_mem_start = static_cast<Address>(old_buffer->backing_store());
+ old_size = old_buffer->byte_length()->Number();
+ // If the old memory was zero-sized, we should have been in the
+ // "undefined" case above.
+ DCHECK_NOT_NULL(old_mem_start);
+ DCHECK_NE(0, old_size);
+ DCHECK(old_size + pages * WasmModule::kPageSize <=
+ std::numeric_limits<uint32_t>::max());
+ new_size = old_size + pages * WasmModule::kPageSize;
+ }
+
+ if (new_size <= old_size ||
+ WasmModule::kMaxMemPages * WasmModule::kPageSize <= new_size) {
return -1;
}
- Handle<Object> result = retval.ToHandleChecked();
- if (result->IsSmi()) {
- return Smi::cast(*result)->value();
+ Handle<JSArrayBuffer> buffer = NewArrayBuffer(isolate, new_size);
+ if (buffer.is_null()) return -1;
+ Address new_mem_start = static_cast<Address>(buffer->backing_store());
+ if (old_size != 0) {
+ memcpy(new_mem_start, old_mem_start, old_size);
}
- if (result->IsHeapNumber()) {
- return static_cast<int32_t>(HeapNumber::cast(*result)->value());
+ SetInstanceMemory(instance, *buffer);
+ if (!UpdateWasmModuleMemory(instance, old_mem_start, new_mem_start, old_size,
+ new_size)) {
+ return -1;
}
- thrower->Error("WASM.compileRun() failed: Return value should be number");
- return -1;
+ DCHECK(old_size % WasmModule::kPageSize == 0);
+ return (old_size / WasmModule::kPageSize);
+}
+
+namespace testing {
+
+void ValidateInstancesChain(Isolate* isolate, Handle<JSObject> module_obj,
+ int instance_count) {
+ CHECK_GE(instance_count, 0);
+ DisallowHeapAllocation no_gc;
+ WasmCompiledModule* compiled_module =
+ WasmCompiledModule::cast(module_obj->GetInternalField(0));
+ CHECK_EQ(
+ JSObject::cast(compiled_module->ptr_to_weak_module_object()->value()),
+ *module_obj);
+ Object* prev = nullptr;
+ int found_instances = compiled_module->has_weak_owning_instance() ? 1 : 0;
+ WasmCompiledModule* current_instance = compiled_module;
+ while (current_instance->has_weak_next_instance()) {
+ CHECK((prev == nullptr && !current_instance->has_weak_prev_instance()) ||
+ current_instance->ptr_to_weak_prev_instance()->value() == prev);
+ CHECK_EQ(current_instance->ptr_to_weak_module_object()->value(),
+ *module_obj);
+ CHECK(
+ IsWasmObject(current_instance->ptr_to_weak_owning_instance()->value()));
+ prev = current_instance;
+ current_instance = WasmCompiledModule::cast(
+ current_instance->ptr_to_weak_next_instance()->value());
+ ++found_instances;
+ CHECK_LE(found_instances, instance_count);
+ }
+ CHECK_EQ(found_instances, instance_count);
+}
+
+void ValidateModuleState(Isolate* isolate, Handle<JSObject> module_obj) {
+ DisallowHeapAllocation no_gc;
+ WasmCompiledModule* compiled_module =
+ WasmCompiledModule::cast(module_obj->GetInternalField(0));
+ CHECK(compiled_module->has_weak_module_object());
+ CHECK_EQ(compiled_module->ptr_to_weak_module_object()->value(), *module_obj);
+ CHECK(!compiled_module->has_weak_prev_instance());
+ CHECK(!compiled_module->has_weak_next_instance());
+ CHECK(!compiled_module->has_weak_owning_instance());
+}
+
+void ValidateOrphanedInstance(Isolate* isolate, Handle<JSObject> instance) {
+ DisallowHeapAllocation no_gc;
+ CHECK(IsWasmObject(*instance));
+ WasmCompiledModule* compiled_module =
+ WasmCompiledModule::cast(instance->GetInternalField(kWasmCompiledModule));
+ CHECK(compiled_module->has_weak_module_object());
+ CHECK(compiled_module->ptr_to_weak_module_object()->cleared());
}
} // namespace testing
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 0c3df51d76..ac75042392 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -27,84 +27,71 @@ const size_t kMaxModuleSize = 1024 * 1024 * 1024;
const size_t kMaxFunctionSize = 128 * 1024;
const size_t kMaxStringSize = 256;
const uint32_t kWasmMagic = 0x6d736100;
-const uint32_t kWasmVersion = 0x0b;
+const uint32_t kWasmVersion = 0x0c;
+
const uint8_t kWasmFunctionTypeForm = 0x40;
+const uint8_t kWasmAnyFunctionTypeForm = 0x20;
+
+enum WasmSectionCode {
+ kUnknownSectionCode = 0, // code for unknown sections
+ kTypeSectionCode = 1, // Function signature declarations
+ kImportSectionCode = 2, // Import declarations
+ kFunctionSectionCode = 3, // Function declarations
+ kTableSectionCode = 4, // Indirect function table and other tables
+ kMemorySectionCode = 5, // Memory attributes
+ kGlobalSectionCode = 6, // Global declarations
+ kExportSectionCode = 7, // Exports
+ kStartSectionCode = 8, // Start function declaration
+ kElementSectionCode = 9, // Elements section
+ kCodeSectionCode = 10, // Function code
+ kDataSectionCode = 11, // Data segments
+ kNameSectionCode = 12, // Name section (encoded as a string)
+};
+
+inline bool IsValidSectionCode(uint8_t byte) {
+ return kTypeSectionCode <= byte && byte <= kDataSectionCode;
+}
-// WebAssembly sections are named as strings in the binary format, but
-// internally V8 uses an enum to handle them.
-//
-// Entries have the form F(enumerator, string).
-#define FOR_EACH_WASM_SECTION_TYPE(F) \
- F(Signatures, 1, "type") \
- F(ImportTable, 2, "import") \
- F(FunctionSignatures, 3, "function") \
- F(FunctionTable, 4, "table") \
- F(Memory, 5, "memory") \
- F(ExportTable, 6, "export") \
- F(StartFunction, 7, "start") \
- F(FunctionBodies, 8, "code") \
- F(DataSegments, 9, "data") \
- F(Names, 10, "name") \
- F(Globals, 0, "global") \
- F(End, 0, "end")
-
-// Contants for the above section types: {LEB128 length, characters...}.
-#define WASM_SECTION_MEMORY 6, 'm', 'e', 'm', 'o', 'r', 'y'
-#define WASM_SECTION_SIGNATURES 4, 't', 'y', 'p', 'e'
-#define WASM_SECTION_GLOBALS 6, 'g', 'l', 'o', 'b', 'a', 'l'
-#define WASM_SECTION_DATA_SEGMENTS 4, 'd', 'a', 't', 'a'
-#define WASM_SECTION_FUNCTION_TABLE 5, 't', 'a', 'b', 'l', 'e'
-#define WASM_SECTION_END 3, 'e', 'n', 'd'
-#define WASM_SECTION_START_FUNCTION 5, 's', 't', 'a', 'r', 't'
-#define WASM_SECTION_IMPORT_TABLE 6, 'i', 'm', 'p', 'o', 'r', 't'
-#define WASM_SECTION_EXPORT_TABLE 6, 'e', 'x', 'p', 'o', 'r', 't'
-#define WASM_SECTION_FUNCTION_SIGNATURES \
- 8, 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n'
-#define WASM_SECTION_FUNCTION_BODIES 4, 'c', 'o', 'd', 'e'
-#define WASM_SECTION_NAMES 4, 'n', 'a', 'm', 'e'
-
-// Constants for the above section headers' size (LEB128 + characters).
-#define WASM_SECTION_MEMORY_SIZE ((size_t)7)
-#define WASM_SECTION_SIGNATURES_SIZE ((size_t)5)
-#define WASM_SECTION_GLOBALS_SIZE ((size_t)7)
-#define WASM_SECTION_DATA_SEGMENTS_SIZE ((size_t)5)
-#define WASM_SECTION_FUNCTION_TABLE_SIZE ((size_t)6)
-#define WASM_SECTION_END_SIZE ((size_t)4)
-#define WASM_SECTION_START_FUNCTION_SIZE ((size_t)6)
-#define WASM_SECTION_IMPORT_TABLE_SIZE ((size_t)7)
-#define WASM_SECTION_EXPORT_TABLE_SIZE ((size_t)7)
-#define WASM_SECTION_FUNCTION_SIGNATURES_SIZE ((size_t)9)
-#define WASM_SECTION_FUNCTION_BODIES_SIZE ((size_t)5)
-#define WASM_SECTION_NAMES_SIZE ((size_t)5)
+const char* SectionName(WasmSectionCode code);
class WasmDebugInfo;
-struct WasmSection {
- enum class Code : uint32_t {
-#define F(enumerator, order, string) enumerator,
- FOR_EACH_WASM_SECTION_TYPE(F)
-#undef F
- Max
- };
- static WasmSection::Code begin();
- static WasmSection::Code end();
- static WasmSection::Code next(WasmSection::Code code);
- static const char* getName(Code code);
- static int getOrder(Code code);
- static size_t getNameLength(Code code);
- static WasmSection::Code lookup(const byte* string, uint32_t length);
+// Constants for fixed-size elements within a module.
+static const uint32_t kMaxReturnCount = 1;
+static const uint8_t kResizableMaximumFlag = 1;
+static const int32_t kInvalidFunctionIndex = -1;
+
+enum WasmExternalKind {
+ kExternalFunction = 0,
+ kExternalTable = 1,
+ kExternalMemory = 2,
+ kExternalGlobal = 3
};
-enum WasmFunctionDeclBit {
- kDeclFunctionName = 0x01,
- kDeclFunctionExport = 0x08
+// Representation of an initializer expression.
+struct WasmInitExpr {
+ enum WasmInitKind {
+ kNone,
+ kGlobalIndex,
+ kI32Const,
+ kI64Const,
+ kF32Const,
+ kF64Const
+ } kind;
+
+ union {
+ int32_t i32_const;
+ int64_t i64_const;
+ float f32_const;
+ double f64_const;
+ uint32_t global_index;
+ } val;
};
-// Constants for fixed-size elements within a module.
-static const size_t kDeclMemorySize = 3;
-static const size_t kDeclDataSegmentSize = 13;
-
-static const uint32_t kMaxReturnCount = 1;
+#define NO_INIT \
+ { \
+ WasmInitExpr::kNone, { 0u } \
+ }
// Static representation of a WASM function.
struct WasmFunction {
@@ -115,54 +102,69 @@ struct WasmFunction {
uint32_t name_length; // length in bytes of the name.
uint32_t code_start_offset; // offset in the module bytes of code start.
uint32_t code_end_offset; // offset in the module bytes of code end.
-};
-
-// Static representation of an imported WASM function.
-struct WasmImport {
- FunctionSig* sig; // signature of the function.
- uint32_t sig_index; // index into the signature table.
- uint32_t module_name_offset; // offset in module bytes of the module name.
- uint32_t module_name_length; // length in bytes of the module name.
- uint32_t function_name_offset; // offset in module bytes of the import name.
- uint32_t function_name_length; // length in bytes of the import name.
-};
-
-// Static representation of an exported WASM function.
-struct WasmExport {
- uint32_t func_index; // index into the function table.
- uint32_t name_offset; // offset in module bytes of the name to export.
- uint32_t name_length; // length in bytes of the exported name.
+ bool imported;
+ bool exported;
};
// Static representation of a wasm global variable.
struct WasmGlobal {
- uint32_t name_offset; // offset in the module bytes of the name, if any.
- uint32_t name_length; // length in bytes of the global name.
LocalType type; // type of the global.
- uint32_t offset; // offset from beginning of globals area.
- bool exported; // true if this global is exported.
+ bool mutability; // {true} if mutable.
+ WasmInitExpr init; // the initialization expression of the global.
+ uint32_t offset; // offset into global memory.
+ bool imported; // true if imported.
+ bool exported; // true if exported.
};
// Static representation of a wasm data segment.
struct WasmDataSegment {
- uint32_t dest_addr; // destination memory address of the data.
+ WasmInitExpr dest_addr; // destination memory address of the data.
uint32_t source_offset; // start offset in the module bytes.
uint32_t source_size; // end offset in the module bytes.
- bool init; // true if loaded upon instantiation.
};
// Static representation of a wasm indirect call table.
struct WasmIndirectFunctionTable {
- uint32_t size; // initial table size.
- uint32_t max_size; // maximum table size.
- std::vector<uint16_t> values; // function table.
+ uint32_t size; // initial table size.
+ uint32_t max_size; // maximum table size.
+ std::vector<int32_t> values; // function table, -1 indicating invalid.
+ bool imported; // true if imported.
+ bool exported; // true if exported.
+};
+
+// Static representation of how to initialize a table.
+struct WasmTableInit {
+ uint32_t table_index;
+ WasmInitExpr offset;
+ std::vector<uint32_t> entries;
+};
+
+// Static representation of a WASM import.
+struct WasmImport {
+ uint32_t module_name_length; // length in bytes of the module name.
+ uint32_t module_name_offset; // offset in module bytes of the module name.
+ uint32_t field_name_length; // length in bytes of the import name.
+ uint32_t field_name_offset; // offset in module bytes of the import name.
+ WasmExternalKind kind; // kind of the import.
+ uint32_t index; // index into the respective space.
+};
+
+// Static representation of a WASM export.
+struct WasmExport {
+ uint32_t name_length; // length in bytes of the exported name.
+ uint32_t name_offset; // offset in module bytes of the name to export.
+ WasmExternalKind kind; // kind of the export.
+ uint32_t index; // index into the respective space.
};
enum ModuleOrigin { kWasmOrigin, kAsmJsOrigin };
+class WasmCompiledModule;
+
// Static representation of a module.
struct WasmModule {
static const uint32_t kPageSize = 0x10000; // Page size, 64kb.
+ static const uint32_t kMaxLegalPages = 65536; // Maximum legal pages
static const uint32_t kMinMemPages = 1; // Minimum memory size = 64kb
static const uint32_t kMaxMemPages = 16384; // Maximum memory size = 1gb
@@ -171,7 +173,6 @@ struct WasmModule {
uint32_t min_mem_pages; // minimum size of the memory in 64k pages.
uint32_t max_mem_pages; // maximum size of the memory in 64k pages.
bool mem_export; // true if the memory is exported.
- bool mem_external; // true if the memory is external.
// TODO(wasm): reconcile start function index being an int with
// the fact that we index on uint32_t, so we may technically not be
// able to represent some start_function_index -es.
@@ -180,12 +181,16 @@ struct WasmModule {
std::vector<WasmGlobal> globals; // globals in this module.
uint32_t globals_size; // size of globals table.
+ uint32_t num_imported_functions; // number of imported functions.
+ uint32_t num_declared_functions; // number of declared functions.
+ uint32_t num_exported_functions; // number of exported functions.
std::vector<FunctionSig*> signatures; // signatures in this module.
std::vector<WasmFunction> functions; // functions in this module.
std::vector<WasmDataSegment> data_segments; // data segments in this module.
std::vector<WasmIndirectFunctionTable> function_tables; // function tables.
std::vector<WasmImport> import_table; // import table.
std::vector<WasmExport> export_table; // export table.
+ std::vector<WasmTableInit> table_inits; // initializations of tables
// We store the semaphore here to extend its lifetime. In <libc-2.21, which we
// use on the try bots, semaphore::Wait() can return while some compilation
// tasks are still executing semaphore::Signal(). If the semaphore is cleaned
@@ -233,13 +238,12 @@ struct WasmModule {
}
// Creates a new instantiation of the module in the given isolate.
- static MaybeHandle<JSObject> Instantiate(Isolate* isolate,
- Handle<FixedArray> compiled_module,
- Handle<JSReceiver> ffi,
- Handle<JSArrayBuffer> memory);
+ V8_EXPORT_PRIVATE static MaybeHandle<JSObject> Instantiate(
+ Isolate* isolate, ErrorThrower* thrower, Handle<JSObject> module_object,
+ Handle<JSReceiver> ffi, Handle<JSArrayBuffer> memory);
- MaybeHandle<FixedArray> CompileFunctions(Isolate* isolate,
- ErrorThrower* thrower) const;
+ MaybeHandle<WasmCompiledModule> CompileFunctions(Isolate* isolate,
+ ErrorThrower* thrower) const;
private:
DISALLOW_COPY_AND_ASSIGN(WasmModule);
@@ -255,7 +259,6 @@ struct WasmModuleInstance {
Handle<JSArrayBuffer> globals_buffer; // Handle to array buffer of globals.
std::vector<Handle<FixedArray>> function_tables; // indirect function tables.
std::vector<Handle<Code>> function_code; // code objects for each function.
- std::vector<Handle<Code>> import_code; // code objects for each import.
// -- raw memory ------------------------------------------------------------
byte* mem_start; // start of linear memory.
uint32_t mem_size; // size of the linear memory.
@@ -266,7 +269,6 @@ struct WasmModuleInstance {
: module(m),
function_tables(m->function_tables.size()),
function_code(m->functions.size()),
- import_code(m->import_table.size()),
mem_start(nullptr),
mem_size(0),
globals_start(nullptr) {}
@@ -278,9 +280,6 @@ struct ModuleEnv {
const WasmModule* module;
WasmModuleInstance* instance;
ModuleOrigin origin;
- // TODO(mtrofin): remove this once we introduce WASM_DIRECT_CALL
- // reloc infos.
- std::vector<Handle<Code>> placeholders;
bool IsValidGlobal(uint32_t index) const {
return module && index < module->globals.size();
@@ -291,9 +290,6 @@ struct ModuleEnv {
bool IsValidSignature(uint32_t index) const {
return module && index < module->signatures.size();
}
- bool IsValidImport(uint32_t index) const {
- return module && index < module->import_table.size();
- }
bool IsValidTable(uint32_t index) const {
return module && index < module->function_tables.size();
}
@@ -305,10 +301,6 @@ struct ModuleEnv {
DCHECK(IsValidFunction(index));
return module->functions[index].sig;
}
- FunctionSig* GetImportSignature(uint32_t index) {
- DCHECK(IsValidImport(index));
- return module->import_table[index].sig;
- }
FunctionSig* GetSignature(uint32_t index) {
DCHECK(IsValidSignature(index));
return module->signatures[index];
@@ -320,14 +312,15 @@ struct ModuleEnv {
bool asm_js() { return origin == kAsmJsOrigin; }
- Handle<Code> GetCodeOrPlaceholder(uint32_t index) const;
- Handle<Code> GetImportCode(uint32_t index);
+ Handle<Code> GetFunctionCode(uint32_t index) {
+ DCHECK_NOT_NULL(instance);
+ return instance->function_code[index];
+ }
static compiler::CallDescriptor* GetWasmCallDescriptor(Zone* zone,
FunctionSig* sig);
static compiler::CallDescriptor* GetI32WasmCallDescriptor(
Zone* zone, compiler::CallDescriptor* descriptor);
- compiler::CallDescriptor* GetCallDescriptor(Zone* zone, uint32_t index);
};
// A helper for printing out the names of functions.
@@ -347,6 +340,128 @@ typedef Result<WasmFunction*> FunctionResult;
typedef std::vector<std::pair<int, int>> FunctionOffsets;
typedef Result<FunctionOffsets> FunctionOffsetsResult;
+class WasmCompiledModule : public FixedArray {
+ public:
+ static WasmCompiledModule* cast(Object* fixed_array) {
+ return reinterpret_cast<WasmCompiledModule*>(fixed_array);
+ }
+
+#define WCM_OBJECT_OR_WEAK(TYPE, NAME, ID) \
+ Handle<TYPE> NAME() const { return handle(ptr_to_##NAME()); } \
+ \
+ MaybeHandle<TYPE> maybe_##NAME() const { \
+ if (has_##NAME()) return NAME(); \
+ return MaybeHandle<TYPE>(); \
+ } \
+ \
+ TYPE* ptr_to_##NAME() const { \
+ Object* obj = get(ID); \
+ if (!obj->Is##TYPE()) return nullptr; \
+ return TYPE::cast(obj); \
+ } \
+ \
+ void set_##NAME(Handle<TYPE> value) { set_ptr_to_##NAME(*value); } \
+ \
+ void set_ptr_to_##NAME(TYPE* value) { set(ID, value); } \
+ \
+ bool has_##NAME() const { return get(ID)->Is##TYPE(); } \
+ \
+ void reset_##NAME() { set_undefined(ID); }
+
+#define WCM_OBJECT(TYPE, NAME) WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME)
+
+#define WCM_SMALL_NUMBER(TYPE, NAME) \
+ TYPE NAME() const { \
+ return static_cast<TYPE>(Smi::cast(get(kID_##NAME))->value()); \
+ }
+
+#define WCM_WEAK_LINK(TYPE, NAME) \
+ WCM_OBJECT_OR_WEAK(WeakCell, weak_##NAME, kID_##NAME); \
+ \
+ Handle<TYPE> NAME() const { \
+ return handle(TYPE::cast(weak_##NAME()->value())); \
+ }
+
+#define CORE_WCM_PROPERTY_TABLE(MACRO) \
+ MACRO(OBJECT, FixedArray, code_table) \
+ MACRO(OBJECT, FixedArray, import_data) \
+ MACRO(OBJECT, FixedArray, exports) \
+ MACRO(OBJECT, FixedArray, startup_function) \
+ MACRO(OBJECT, FixedArray, indirect_function_tables) \
+ MACRO(OBJECT, String, module_bytes) \
+ MACRO(OBJECT, ByteArray, function_names) \
+ MACRO(SMALL_NUMBER, uint32_t, min_memory_pages) \
+ MACRO(OBJECT, FixedArray, data_segments_info) \
+ MACRO(OBJECT, ByteArray, data_segments) \
+ MACRO(SMALL_NUMBER, uint32_t, globals_size) \
+ MACRO(OBJECT, JSArrayBuffer, heap) \
+ MACRO(SMALL_NUMBER, bool, export_memory) \
+ MACRO(SMALL_NUMBER, ModuleOrigin, origin) \
+ MACRO(WEAK_LINK, WasmCompiledModule, next_instance) \
+ MACRO(WEAK_LINK, WasmCompiledModule, prev_instance) \
+ MACRO(WEAK_LINK, JSObject, owning_instance) \
+ MACRO(WEAK_LINK, JSObject, module_object)
+
+#if DEBUG
+#define DEBUG_ONLY_TABLE(MACRO) MACRO(SMALL_NUMBER, uint32_t, instance_id)
+#else
+#define DEBUG_ONLY_TABLE(IGNORE)
+ uint32_t instance_id() const { return -1; }
+#endif
+
+#define WCM_PROPERTY_TABLE(MACRO) \
+ CORE_WCM_PROPERTY_TABLE(MACRO) \
+ DEBUG_ONLY_TABLE(MACRO)
+
+ private:
+ enum PropertyIndices {
+#define INDICES(IGNORE1, IGNORE2, NAME) kID_##NAME,
+ WCM_PROPERTY_TABLE(INDICES) Count
+#undef INDICES
+ };
+
+ public:
+ static Handle<WasmCompiledModule> New(Isolate* isolate,
+ uint32_t min_memory_pages,
+ uint32_t globals_size,
+ bool export_memory,
+ ModuleOrigin origin);
+
+ static Handle<WasmCompiledModule> Clone(Isolate* isolate,
+ Handle<WasmCompiledModule> module) {
+ Handle<WasmCompiledModule> ret = Handle<WasmCompiledModule>::cast(
+ isolate->factory()->CopyFixedArray(module));
+ ret->Init();
+ ret->reset_weak_owning_instance();
+ ret->reset_weak_next_instance();
+ ret->reset_weak_prev_instance();
+ return ret;
+ }
+
+ uint32_t mem_size() const {
+ DCHECK(has_heap());
+ return heap()->byte_length()->Number();
+ }
+
+ uint32_t default_mem_size() const {
+ return min_memory_pages() * WasmModule::kPageSize;
+ }
+
+#define DECLARATION(KIND, TYPE, NAME) WCM_##KIND(TYPE, NAME)
+ WCM_PROPERTY_TABLE(DECLARATION)
+#undef DECLARATION
+
+ void PrintInstancesChain();
+
+ private:
+#if DEBUG
+ static uint32_t instance_id_counter_;
+#endif
+ void Init();
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(WasmCompiledModule);
+};
+
// Extract a function name from the given wasm object.
// Returns "<WASM UNNAMED>" if the function is unnamed or the name is not a
// valid UTF-8 string.
@@ -399,18 +514,38 @@ void PopulateFunctionTable(Handle<FixedArray> table, uint32_t table_size,
const std::vector<Handle<Code>>* code_table);
Handle<JSObject> CreateCompiledModuleObject(Isolate* isolate,
- Handle<FixedArray> compiled_module);
+ Handle<FixedArray> compiled_module,
+ ModuleOrigin origin);
+
+V8_EXPORT_PRIVATE MaybeHandle<JSObject> CreateModuleObjectFromBytes(
+ Isolate* isolate, const byte* start, const byte* end, ErrorThrower* thrower,
+ ModuleOrigin origin);
+
+V8_EXPORT_PRIVATE bool ValidateModuleBytes(Isolate* isolate, const byte* start,
+ const byte* end,
+ ErrorThrower* thrower,
+ ModuleOrigin origin);
+
+// Get the number of imported functions for a WASM instance.
+uint32_t GetNumImportedFunctions(Handle<JSObject> wasm_object);
+
+// Assumed to be called with a code object associated to a wasm module instance.
+// Intended to be called from runtime functions.
+// Returns nullptr on failing to get owning instance.
+Object* GetOwningWasmInstance(Code* code);
+
+int32_t GetInstanceMemorySize(Isolate* isolate, Handle<JSObject> instance);
+
+int32_t GrowInstanceMemory(Isolate* isolate, Handle<JSObject> instance,
+ uint32_t pages);
namespace testing {
-// Decode, verify, and run the function labeled "main" in the
-// given encoded module. The module should have no imports.
-int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
- const byte* module_end, bool asm_js = false);
+void ValidateInstancesChain(Isolate* isolate, Handle<JSObject> module_obj,
+ int instance_count);
+void ValidateModuleState(Isolate* isolate, Handle<JSObject> module_obj);
+void ValidateOrphanedInstance(Isolate* isolate, Handle<JSObject> instance);
-int32_t CallFunction(Isolate* isolate, Handle<JSObject> instance,
- ErrorThrower* thrower, const char* name, int argc,
- Handle<Object> argv[]);
} // namespace testing
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index 8f54207661..cd2dde4748 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -38,6 +38,18 @@ const char* WasmOpcodes::ShortOpcodeName(WasmOpcode opcode) {
return "Unknown";
}
+bool WasmOpcodes::IsPrefixOpcode(WasmOpcode opcode) {
+ switch (opcode) {
+#define CHECK_PREFIX(name, opcode) \
+ case k##name##Prefix: \
+ return true;
+ FOREACH_PREFIX(CHECK_PREFIX)
+#undef CHECK_PREFIX
+ default:
+ return false;
+ }
+}
+
std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
if (sig.return_count() == 0) os << "v";
for (size_t i = 0; i < sig.return_count(); ++i) {
@@ -74,6 +86,7 @@ static const FunctionSig* kSimdExprSigs[] = {
nullptr, FOREACH_SIMD_SIGNATURE(DECLARE_SIMD_SIG_ENTRY)};
static byte kSimpleExprSigTable[256];
+static byte kSimpleAsmjsExprSigTable[256];
static byte kSimdExprSigTable[256];
// Initialize the signature table.
@@ -81,14 +94,16 @@ static void InitSigTables() {
#define SET_SIG_TABLE(name, opcode, sig) \
kSimpleExprSigTable[opcode] = static_cast<int>(kSigEnum_##sig) + 1;
FOREACH_SIMPLE_OPCODE(SET_SIG_TABLE);
- FOREACH_SIMPLE_MEM_OPCODE(SET_SIG_TABLE);
- FOREACH_ASMJS_COMPAT_OPCODE(SET_SIG_TABLE);
#undef SET_SIG_TABLE
+#define SET_ASMJS_SIG_TABLE(name, opcode, sig) \
+ kSimpleAsmjsExprSigTable[opcode] = static_cast<int>(kSigEnum_##sig) + 1;
+ FOREACH_ASMJS_COMPAT_OPCODE(SET_ASMJS_SIG_TABLE);
+#undef SET_ASMJS_SIG_TABLE
byte simd_index;
#define SET_SIG_TABLE(name, opcode, sig) \
simd_index = opcode & 0xff; \
kSimdExprSigTable[simd_index] = static_cast<int>(kSigEnum_##sig) + 1;
- FOREACH_SIMD_OPCODE(SET_SIG_TABLE)
+ FOREACH_SIMD_0_OPERAND_OPCODE(SET_SIG_TABLE)
#undef SET_SIG_TABLE
}
@@ -102,6 +117,10 @@ class SigTable {
return const_cast<FunctionSig*>(
kSimpleExprSigs[kSimpleExprSigTable[static_cast<byte>(opcode)]]);
}
+ FunctionSig* AsmjsSignature(WasmOpcode opcode) const {
+ return const_cast<FunctionSig*>(
+ kSimpleExprSigs[kSimpleAsmjsExprSigTable[static_cast<byte>(opcode)]]);
+ }
FunctionSig* SimdSignature(WasmOpcode opcode) const {
return const_cast<FunctionSig*>(
kSimdExprSigs[kSimdExprSigTable[static_cast<byte>(opcode & 0xff)]]);
@@ -118,6 +137,10 @@ FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
}
}
+FunctionSig* WasmOpcodes::AsmjsSignature(WasmOpcode opcode) {
+ return sig_table.Get().AsmjsSignature(opcode);
+}
+
// TODO(titzer): pull WASM_64 up to a common header.
#if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
#define WASM_64 1
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 4d66e567ef..03827b2035 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -22,6 +22,9 @@ enum LocalTypeCode {
kLocalS128 = 5
};
+// Type code for multi-value block types.
+static const uint8_t kMultivalBlock = 0x41;
+
// We reuse the internal machine type to represent WebAssembly AST types.
// A typedef improves readability without adding a whole new type system.
typedef MachineRepresentation LocalType;
@@ -44,7 +47,7 @@ const WasmCodePosition kNoCodePosition = -1;
// Control expressions and blocks.
#define FOREACH_CONTROL_OPCODE(V) \
- V(Nop, 0x00, _) \
+ V(Unreachable, 0x00, _) \
V(Block, 0x01, _) \
V(Loop, 0x02, _) \
V(If, 0x03, _) \
@@ -54,13 +57,10 @@ const WasmCodePosition kNoCodePosition = -1;
V(BrIf, 0x07, _) \
V(BrTable, 0x08, _) \
V(Return, 0x09, _) \
- V(Unreachable, 0x0a, _) \
+ V(Nop, 0x0a, _) \
V(Throw, 0xfa, _) \
- V(TryCatch, 0xfb, _) \
- V(TryCatchFinally, 0xfc, _) \
- V(TryFinally, 0xfd, _) \
+ V(Try, 0xfb, _) \
V(Catch, 0xfe, _) \
- V(Finally, 0xff, _) \
V(End, 0x0F, _)
// Constants, locals, globals, and calls.
@@ -71,9 +71,10 @@ const WasmCodePosition kNoCodePosition = -1;
V(F32Const, 0x13, _) \
V(GetLocal, 0x14, _) \
V(SetLocal, 0x15, _) \
+ V(TeeLocal, 0x19, _) \
+ V(Drop, 0x0b, _) \
V(CallFunction, 0x16, _) \
V(CallIndirect, 0x17, _) \
- V(CallImport, 0x18, _) \
V(I8Const, 0xcb, _) \
V(GetGlobal, 0xbb, _) \
V(SetGlobal, 0xbc, _)
@@ -273,141 +274,144 @@ const WasmCodePosition kNoCodePosition = -1;
V(I32AsmjsSConvertF64, 0xe2, i_d) \
V(I32AsmjsUConvertF64, 0xe3, i_d)
-#define FOREACH_SIMD_OPCODE(V) \
- V(F32x4Splat, 0xe500, s_f) \
- V(F32x4ExtractLane, 0xe501, f_si) \
- V(F32x4ReplaceLane, 0xe502, s_sif) \
- V(F32x4Abs, 0xe503, s_s) \
- V(F32x4Neg, 0xe504, s_s) \
- V(F32x4Sqrt, 0xe505, s_s) \
- V(F32x4RecipApprox, 0xe506, s_s) \
- V(F32x4SqrtApprox, 0xe507, s_s) \
- V(F32x4Add, 0xe508, s_ss) \
- V(F32x4Sub, 0xe509, s_ss) \
- V(F32x4Mul, 0xe50a, s_ss) \
- V(F32x4Div, 0xe50b, s_ss) \
- V(F32x4Min, 0xe50c, s_ss) \
- V(F32x4Max, 0xe50d, s_ss) \
- V(F32x4MinNum, 0xe50e, s_ss) \
- V(F32x4MaxNum, 0xe50f, s_ss) \
- V(F32x4Eq, 0xe510, s_ss) \
- V(F32x4Ne, 0xe511, s_ss) \
- V(F32x4Lt, 0xe512, s_ss) \
- V(F32x4Le, 0xe513, s_ss) \
- V(F32x4Gt, 0xe514, s_ss) \
- V(F32x4Ge, 0xe515, s_ss) \
- V(F32x4Select, 0xe516, s_sss) \
- V(F32x4Swizzle, 0xe517, s_s) \
- V(F32x4Shuffle, 0xe518, s_ss) \
- V(F32x4FromInt32x4, 0xe519, s_s) \
- V(F32x4FromUint32x4, 0xe51a, s_s) \
- V(I32x4Splat, 0xe51b, s_i) \
- V(I32x4ExtractLane, 0xe51c, i_si) \
- V(I32x4ReplaceLane, 0xe51d, s_sii) \
- V(I32x4Neg, 0xe51e, s_s) \
- V(I32x4Add, 0xe51f, s_ss) \
- V(I32x4Sub, 0xe520, s_ss) \
- V(I32x4Mul, 0xe521, s_ss) \
- V(I32x4Min_s, 0xe522, s_ss) \
- V(I32x4Max_s, 0xe523, s_ss) \
- V(I32x4Shl, 0xe524, s_si) \
- V(I32x4Shr_s, 0xe525, s_si) \
- V(I32x4Eq, 0xe526, s_ss) \
- V(I32x4Ne, 0xe527, s_ss) \
- V(I32x4Lt_s, 0xe528, s_ss) \
- V(I32x4Le_s, 0xe529, s_ss) \
- V(I32x4Gt_s, 0xe52a, s_ss) \
- V(I32x4Ge_s, 0xe52b, s_ss) \
- V(I32x4Select, 0xe52c, s_sss) \
- V(I32x4Swizzle, 0xe52d, s_s) \
- V(I32x4Shuffle, 0xe52e, s_ss) \
- V(I32x4FromFloat32x4, 0xe52f, s_s) \
- V(I32x4Min_u, 0xe530, s_ss) \
- V(I32x4Max_u, 0xe531, s_ss) \
- V(I32x4Shr_u, 0xe532, s_ss) \
- V(I32x4Lt_u, 0xe533, s_ss) \
- V(I32x4Le_u, 0xe534, s_ss) \
- V(I32x4Gt_u, 0xe535, s_ss) \
- V(I32x4Ge_u, 0xe536, s_ss) \
- V(Ui32x4FromFloat32x4, 0xe537, s_s) \
- V(I16x8Splat, 0xe538, s_i) \
- V(I16x8ExtractLane, 0xe539, i_si) \
- V(I16x8ReplaceLane, 0xe53a, s_sii) \
- V(I16x8Neg, 0xe53b, s_s) \
- V(I16x8Add, 0xe53c, s_ss) \
- V(I16x8AddSaturate_s, 0xe53d, s_ss) \
- V(I16x8Sub, 0xe53e, s_ss) \
- V(I16x8SubSaturate_s, 0xe53f, s_ss) \
- V(I16x8Mul, 0xe540, s_ss) \
- V(I16x8Min_s, 0xe541, s_ss) \
- V(I16x8Max_s, 0xe542, s_ss) \
- V(I16x8Shl, 0xe543, s_si) \
- V(I16x8Shr_s, 0xe544, s_si) \
- V(I16x8Eq, 0xe545, s_ss) \
- V(I16x8Ne, 0xe546, s_ss) \
- V(I16x8Lt_s, 0xe547, s_ss) \
- V(I16x8Le_s, 0xe548, s_ss) \
- V(I16x8Gt_s, 0xe549, s_ss) \
- V(I16x8Ge_s, 0xe54a, s_ss) \
- V(I16x8Select, 0xe54b, s_sss) \
- V(I16x8Swizzle, 0xe54c, s_s) \
- V(I16x8Shuffle, 0xe54d, s_ss) \
- V(I16x8AddSaturate_u, 0xe54e, s_ss) \
- V(I16x8SubSaturate_u, 0xe54f, s_ss) \
- V(I16x8Min_u, 0xe550, s_ss) \
- V(I16x8Max_u, 0xe551, s_ss) \
- V(I16x8Shr_u, 0xe552, s_si) \
- V(I16x8Lt_u, 0xe553, s_ss) \
- V(I16x8Le_u, 0xe554, s_ss) \
- V(I16x8Gt_u, 0xe555, s_ss) \
- V(I16x8Ge_u, 0xe556, s_ss) \
- V(I8x16Splat, 0xe557, s_i) \
- V(I8x16ExtractLane, 0xe558, i_si) \
- V(I8x16ReplaceLane, 0xe559, s_sii) \
- V(I8x16Neg, 0xe55a, s_s) \
- V(I8x16Add, 0xe55b, s_ss) \
- V(I8x16AddSaturate_s, 0xe55c, s_ss) \
- V(I8x16Sub, 0xe55d, s_ss) \
- V(I8x16SubSaturate_s, 0xe55e, s_ss) \
- V(I8x16Mul, 0xe55f, s_ss) \
- V(I8x16Min_s, 0xe560, s_ss) \
- V(I8x16Max_s, 0xe561, s_ss) \
- V(I8x16Shl, 0xe562, s_si) \
- V(I8x16Shr_s, 0xe563, s_si) \
- V(I8x16Eq, 0xe564, s_ss) \
- V(I8x16Neq, 0xe565, s_ss) \
- V(I8x16Lt_s, 0xe566, s_ss) \
- V(I8x16Le_s, 0xe567, s_ss) \
- V(I8x16Gt_s, 0xe568, s_ss) \
- V(I8x16Ge_s, 0xe569, s_ss) \
- V(I8x16Select, 0xe56a, s_sss) \
- V(I8x16Swizzle, 0xe56b, s_s) \
- V(I8x16Shuffle, 0xe56c, s_ss) \
- V(I8x16AddSaturate_u, 0xe56d, s_ss) \
- V(I8x16Sub_saturate_u, 0xe56e, s_ss) \
- V(I8x16Min_u, 0xe56f, s_ss) \
- V(I8x16Max_u, 0xe570, s_ss) \
- V(I8x16Shr_u, 0xe571, s_ss) \
- V(I8x16Lt_u, 0xe572, s_ss) \
- V(I8x16Le_u, 0xe573, s_ss) \
- V(I8x16Gt_u, 0xe574, s_ss) \
- V(I8x16Ge_u, 0xe575, s_ss) \
- V(S128And, 0xe576, s_ss) \
- V(S128Ior, 0xe577, s_ss) \
- V(S128Xor, 0xe578, s_ss) \
+#define FOREACH_SIMD_0_OPERAND_OPCODE(V) \
+ V(F32x4Splat, 0xe500, s_f) \
+ V(F32x4ReplaceLane, 0xe502, s_sif) \
+ V(F32x4Abs, 0xe503, s_s) \
+ V(F32x4Neg, 0xe504, s_s) \
+ V(F32x4Sqrt, 0xe505, s_s) \
+ V(F32x4RecipApprox, 0xe506, s_s) \
+ V(F32x4SqrtApprox, 0xe507, s_s) \
+ V(F32x4Add, 0xe508, s_ss) \
+ V(F32x4Sub, 0xe509, s_ss) \
+ V(F32x4Mul, 0xe50a, s_ss) \
+ V(F32x4Div, 0xe50b, s_ss) \
+ V(F32x4Min, 0xe50c, s_ss) \
+ V(F32x4Max, 0xe50d, s_ss) \
+ V(F32x4MinNum, 0xe50e, s_ss) \
+ V(F32x4MaxNum, 0xe50f, s_ss) \
+ V(F32x4Eq, 0xe510, s_ss) \
+ V(F32x4Ne, 0xe511, s_ss) \
+ V(F32x4Lt, 0xe512, s_ss) \
+ V(F32x4Le, 0xe513, s_ss) \
+ V(F32x4Gt, 0xe514, s_ss) \
+ V(F32x4Ge, 0xe515, s_ss) \
+ V(F32x4Select, 0xe516, s_sss) \
+ V(F32x4Swizzle, 0xe517, s_s) \
+ V(F32x4Shuffle, 0xe518, s_ss) \
+ V(F32x4FromInt32x4, 0xe519, s_s) \
+ V(F32x4FromUint32x4, 0xe51a, s_s) \
+ V(I32x4Splat, 0xe51b, s_i) \
+ V(I32x4ReplaceLane, 0xe51d, s_sii) \
+ V(I32x4Neg, 0xe51e, s_s) \
+ V(I32x4Add, 0xe51f, s_ss) \
+ V(I32x4Sub, 0xe520, s_ss) \
+ V(I32x4Mul, 0xe521, s_ss) \
+ V(I32x4Min_s, 0xe522, s_ss) \
+ V(I32x4Max_s, 0xe523, s_ss) \
+ V(I32x4Shl, 0xe524, s_si) \
+ V(I32x4Shr_s, 0xe525, s_si) \
+ V(I32x4Eq, 0xe526, s_ss) \
+ V(I32x4Ne, 0xe527, s_ss) \
+ V(I32x4Lt_s, 0xe528, s_ss) \
+ V(I32x4Le_s, 0xe529, s_ss) \
+ V(I32x4Gt_s, 0xe52a, s_ss) \
+ V(I32x4Ge_s, 0xe52b, s_ss) \
+ V(I32x4Select, 0xe52c, s_sss) \
+ V(I32x4Swizzle, 0xe52d, s_s) \
+ V(I32x4Shuffle, 0xe52e, s_ss) \
+ V(I32x4FromFloat32x4, 0xe52f, s_s) \
+ V(I32x4Min_u, 0xe530, s_ss) \
+ V(I32x4Max_u, 0xe531, s_ss) \
+ V(I32x4Shr_u, 0xe532, s_ss) \
+ V(I32x4Lt_u, 0xe533, s_ss) \
+ V(I32x4Le_u, 0xe534, s_ss) \
+ V(I32x4Gt_u, 0xe535, s_ss) \
+ V(I32x4Ge_u, 0xe536, s_ss) \
+ V(Ui32x4FromFloat32x4, 0xe537, s_s) \
+ V(I16x8Splat, 0xe538, s_i) \
+ V(I16x8ReplaceLane, 0xe53a, s_sii) \
+ V(I16x8Neg, 0xe53b, s_s) \
+ V(I16x8Add, 0xe53c, s_ss) \
+ V(I16x8AddSaturate_s, 0xe53d, s_ss) \
+ V(I16x8Sub, 0xe53e, s_ss) \
+ V(I16x8SubSaturate_s, 0xe53f, s_ss) \
+ V(I16x8Mul, 0xe540, s_ss) \
+ V(I16x8Min_s, 0xe541, s_ss) \
+ V(I16x8Max_s, 0xe542, s_ss) \
+ V(I16x8Shl, 0xe543, s_si) \
+ V(I16x8Shr_s, 0xe544, s_si) \
+ V(I16x8Eq, 0xe545, s_ss) \
+ V(I16x8Ne, 0xe546, s_ss) \
+ V(I16x8Lt_s, 0xe547, s_ss) \
+ V(I16x8Le_s, 0xe548, s_ss) \
+ V(I16x8Gt_s, 0xe549, s_ss) \
+ V(I16x8Ge_s, 0xe54a, s_ss) \
+ V(I16x8Select, 0xe54b, s_sss) \
+ V(I16x8Swizzle, 0xe54c, s_s) \
+ V(I16x8Shuffle, 0xe54d, s_ss) \
+ V(I16x8AddSaturate_u, 0xe54e, s_ss) \
+ V(I16x8SubSaturate_u, 0xe54f, s_ss) \
+ V(I16x8Min_u, 0xe550, s_ss) \
+ V(I16x8Max_u, 0xe551, s_ss) \
+ V(I16x8Shr_u, 0xe552, s_si) \
+ V(I16x8Lt_u, 0xe553, s_ss) \
+ V(I16x8Le_u, 0xe554, s_ss) \
+ V(I16x8Gt_u, 0xe555, s_ss) \
+ V(I16x8Ge_u, 0xe556, s_ss) \
+ V(I8x16Splat, 0xe557, s_i) \
+ V(I8x16ReplaceLane, 0xe559, s_sii) \
+ V(I8x16Neg, 0xe55a, s_s) \
+ V(I8x16Add, 0xe55b, s_ss) \
+ V(I8x16AddSaturate_s, 0xe55c, s_ss) \
+ V(I8x16Sub, 0xe55d, s_ss) \
+ V(I8x16SubSaturate_s, 0xe55e, s_ss) \
+ V(I8x16Mul, 0xe55f, s_ss) \
+ V(I8x16Min_s, 0xe560, s_ss) \
+ V(I8x16Max_s, 0xe561, s_ss) \
+ V(I8x16Shl, 0xe562, s_si) \
+ V(I8x16Shr_s, 0xe563, s_si) \
+ V(I8x16Eq, 0xe564, s_ss) \
+ V(I8x16Neq, 0xe565, s_ss) \
+ V(I8x16Lt_s, 0xe566, s_ss) \
+ V(I8x16Le_s, 0xe567, s_ss) \
+ V(I8x16Gt_s, 0xe568, s_ss) \
+ V(I8x16Ge_s, 0xe569, s_ss) \
+ V(I8x16Select, 0xe56a, s_sss) \
+ V(I8x16Swizzle, 0xe56b, s_s) \
+ V(I8x16Shuffle, 0xe56c, s_ss) \
+ V(I8x16AddSaturate_u, 0xe56d, s_ss) \
+ V(I8x16Sub_saturate_u, 0xe56e, s_ss) \
+ V(I8x16Min_u, 0xe56f, s_ss) \
+ V(I8x16Max_u, 0xe570, s_ss) \
+ V(I8x16Shr_u, 0xe571, s_ss) \
+ V(I8x16Lt_u, 0xe572, s_ss) \
+ V(I8x16Le_u, 0xe573, s_ss) \
+ V(I8x16Gt_u, 0xe574, s_ss) \
+ V(I8x16Ge_u, 0xe575, s_ss) \
+ V(S128And, 0xe576, s_ss) \
+ V(S128Ior, 0xe577, s_ss) \
+ V(S128Xor, 0xe578, s_ss) \
V(S128Not, 0xe579, s_s)
+#define FOREACH_SIMD_1_OPERAND_OPCODE(V) \
+ V(F32x4ExtractLane, 0xe501, _) \
+ V(I32x4ExtractLane, 0xe51c, _) \
+ V(I16x8ExtractLane, 0xe539, _) \
+ V(I8x16ExtractLane, 0xe558, _)
+
// All opcodes.
-#define FOREACH_OPCODE(V) \
- FOREACH_CONTROL_OPCODE(V) \
- FOREACH_MISC_OPCODE(V) \
- FOREACH_SIMPLE_OPCODE(V) \
- FOREACH_SIMPLE_MEM_OPCODE(V) \
- FOREACH_STORE_MEM_OPCODE(V) \
- FOREACH_LOAD_MEM_OPCODE(V) \
- FOREACH_MISC_MEM_OPCODE(V) \
- FOREACH_ASMJS_COMPAT_OPCODE(V) \
- FOREACH_SIMD_OPCODE(V)
+#define FOREACH_OPCODE(V) \
+ FOREACH_CONTROL_OPCODE(V) \
+ FOREACH_MISC_OPCODE(V) \
+ FOREACH_SIMPLE_OPCODE(V) \
+ FOREACH_SIMPLE_MEM_OPCODE(V) \
+ FOREACH_STORE_MEM_OPCODE(V) \
+ FOREACH_LOAD_MEM_OPCODE(V) \
+ FOREACH_MISC_MEM_OPCODE(V) \
+ FOREACH_ASMJS_COMPAT_OPCODE(V) \
+ FOREACH_SIMD_0_OPERAND_OPCODE(V) \
+ FOREACH_SIMD_1_OPERAND_OPCODE(V)
// All signatures.
#define FOREACH_SIGNATURE(V) \
@@ -443,12 +447,10 @@ const WasmCodePosition kNoCodePosition = -1;
#define FOREACH_SIMD_SIGNATURE(V) \
V(s_s, kAstS128, kAstS128) \
V(s_f, kAstS128, kAstF32) \
- V(f_si, kAstF32, kAstS128, kAstI32) \
V(s_sif, kAstS128, kAstS128, kAstI32, kAstF32) \
V(s_ss, kAstS128, kAstS128, kAstS128) \
V(s_sss, kAstS128, kAstS128, kAstS128, kAstS128) \
V(s_i, kAstS128, kAstI32) \
- V(i_si, kAstI32, kAstS128, kAstI32) \
V(s_sii, kAstS128, kAstS128, kAstI32, kAstI32) \
V(s_si, kAstS128, kAstS128, kAstI32)
@@ -489,6 +491,8 @@ class WasmOpcodes {
static const char* OpcodeName(WasmOpcode opcode);
static const char* ShortOpcodeName(WasmOpcode opcode);
static FunctionSig* Signature(WasmOpcode opcode);
+ static FunctionSig* AsmjsSignature(WasmOpcode opcode);
+ static bool IsPrefixOpcode(WasmOpcode opcode);
static int TrapReasonToMessageId(TrapReason reason);
static const char* TrapReasonMessage(TrapReason reason);
@@ -497,6 +501,8 @@ class WasmOpcodes {
return 1 << ElementSizeLog2Of(type.representation());
}
+ static byte MemSize(LocalType type) { return 1 << ElementSizeLog2Of(type); }
+
static LocalTypeCode LocalTypeCodeFor(LocalType type) {
switch (type) {
case kAstI32:
@@ -507,10 +513,10 @@ class WasmOpcodes {
return kLocalF32;
case kAstF64:
return kLocalF64;
- case kAstStmt:
- return kLocalVoid;
case kAstS128:
return kLocalS128;
+ case kAstStmt:
+ return kLocalVoid;
default:
UNREACHABLE();
return kLocalVoid;
diff --git a/deps/v8/src/wasm/wasm-result.cc b/deps/v8/src/wasm/wasm-result.cc
index 30268ac8ad..7d251f03df 100644
--- a/deps/v8/src/wasm/wasm-result.cc
+++ b/deps/v8/src/wasm/wasm-result.cc
@@ -27,15 +27,13 @@ std::ostream& operator<<(std::ostream& os, const ErrorCode& error_code) {
return os;
}
-void ErrorThrower::Error(const char* format, ...) {
+void ErrorThrower::Format(i::Handle<i::JSFunction> constructor,
+ const char* format, va_list args) {
// Only report the first error.
if (error()) return;
char buffer[256];
- va_list arguments;
- va_start(arguments, format);
- base::OS::VSNPrintF(buffer, 255, format, arguments);
- va_end(arguments);
+ base::OS::VSNPrintF(buffer, 255, format, args);
std::ostringstream str;
if (context_ != nullptr) {
@@ -43,12 +41,39 @@ void ErrorThrower::Error(const char* format, ...) {
}
str << buffer;
- message_ = isolate_->factory()->NewStringFromAsciiChecked(str.str().c_str());
+ i::Handle<i::String> message =
+ isolate_->factory()->NewStringFromAsciiChecked(str.str().c_str());
+ exception_ = isolate_->factory()->NewError(constructor, message);
+}
+
+void ErrorThrower::Error(const char* format, ...) {
+ if (error()) return;
+ va_list arguments;
+ va_start(arguments, format);
+ Format(isolate_->error_function(), format, arguments);
+ va_end(arguments);
+}
+
+void ErrorThrower::TypeError(const char* format, ...) {
+ if (error()) return;
+ va_list arguments;
+ va_start(arguments, format);
+ Format(isolate_->type_error_function(), format, arguments);
+ va_end(arguments);
+}
+
+void ErrorThrower::RangeError(const char* format, ...) {
+ if (error()) return;
+ va_list arguments;
+ va_start(arguments, format);
+ CHECK(*isolate_->range_error_function() != *isolate_->type_error_function());
+ Format(isolate_->range_error_function(), format, arguments);
+ va_end(arguments);
}
ErrorThrower::~ErrorThrower() {
if (error() && !isolate_->has_pending_exception()) {
- isolate_->ScheduleThrow(*message_);
+ isolate_->ScheduleThrow(*exception_);
}
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-result.h b/deps/v8/src/wasm/wasm-result.h
index f16c15906d..ecc54e5b7a 100644
--- a/deps/v8/src/wasm/wasm-result.h
+++ b/deps/v8/src/wasm/wasm-result.h
@@ -22,19 +22,7 @@ namespace wasm {
// Error codes for programmatic checking of the decoder's verification.
enum ErrorCode {
kSuccess,
- kError, // TODO(titzer): remove me
- kOutOfMemory, // decoder ran out of memory
- kEndOfCode, // end of code reached prematurely
- kInvalidOpcode, // found invalid opcode
- kUnreachableCode, // found unreachable code
- kImproperContinue, // improperly nested continue
- kImproperBreak, // improperly nested break
- kReturnCount, // return count mismatch
- kTypeError, // type mismatch
- kInvalidLocalIndex, // invalid local
- kInvalidGlobalIndex, // invalid global
- kInvalidFunctionIndex, // invalid function
- kInvalidMemType // invalid memory type
+ kError, // TODO(titzer): introduce real error codes
};
// The overall result of decoding a function or a module.
@@ -97,33 +85,37 @@ std::ostream& operator<<(std::ostream& os, const Result<T>& result) {
std::ostream& operator<<(std::ostream& os, const ErrorCode& error_code);
// A helper for generating error messages that bubble up to JS exceptions.
-class ErrorThrower {
+class V8_EXPORT_PRIVATE ErrorThrower {
public:
- ErrorThrower(Isolate* isolate, const char* context)
+ ErrorThrower(i::Isolate* isolate, const char* context)
: isolate_(isolate), context_(context) {}
~ErrorThrower();
PRINTF_FORMAT(2, 3) void Error(const char* fmt, ...);
+ PRINTF_FORMAT(2, 3) void TypeError(const char* fmt, ...);
+ PRINTF_FORMAT(2, 3) void RangeError(const char* fmt, ...);
template <typename T>
void Failed(const char* error, Result<T>& result) {
std::ostringstream str;
str << error << result;
- return Error("%s", str.str().c_str());
+ Error("%s", str.str().c_str());
}
- i::Handle<i::String> Reify() {
- auto result = message_;
- message_ = i::Handle<i::String>();
+ i::Handle<i::Object> Reify() {
+ i::Handle<i::Object> result = exception_;
+ exception_ = i::Handle<i::Object>::null();
return result;
}
- bool error() const { return !message_.is_null(); }
+ bool error() const { return !exception_.is_null(); }
private:
- Isolate* isolate_;
+ void Format(i::Handle<i::JSFunction> constructor, const char* fmt, va_list);
+
+ i::Isolate* isolate_;
const char* context_;
- i::Handle<i::String> message_;
+ i::Handle<i::Object> exception_;
};
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 9a0d18e9c2..d202aadf7a 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -79,6 +79,7 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (cross_compile) return;
if (cpu.has_sse41() && FLAG_enable_sse4_1) supported_ |= 1u << SSE4_1;
+ if (cpu.has_ssse3() && FLAG_enable_ssse3) supported_ |= 1u << SSSE3;
if (cpu.has_sse3() && FLAG_enable_sse3) supported_ |= 1u << SSE3;
// SAHF is not generally available in long mode.
if (cpu.has_sahf() && FLAG_enable_sahf) supported_ |= 1u << SAHF;
@@ -105,13 +106,15 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
void CpuFeatures::PrintTarget() { }
void CpuFeatures::PrintFeatures() {
printf(
- "SSE3=%d SSE4_1=%d SAHF=%d AVX=%d FMA3=%d BMI1=%d BMI2=%d LZCNT=%d "
+ "SSE3=%d SSSE3=%d SSE4_1=%d SAHF=%d AVX=%d FMA3=%d BMI1=%d BMI2=%d "
+ "LZCNT=%d "
"POPCNT=%d ATOM=%d\n",
- CpuFeatures::IsSupported(SSE3), CpuFeatures::IsSupported(SSE4_1),
- CpuFeatures::IsSupported(SAHF), CpuFeatures::IsSupported(AVX),
- CpuFeatures::IsSupported(FMA3), CpuFeatures::IsSupported(BMI1),
- CpuFeatures::IsSupported(BMI2), CpuFeatures::IsSupported(LZCNT),
- CpuFeatures::IsSupported(POPCNT), CpuFeatures::IsSupported(ATOM));
+ CpuFeatures::IsSupported(SSE3), CpuFeatures::IsSupported(SSSE3),
+ CpuFeatures::IsSupported(SSE4_1), CpuFeatures::IsSupported(SAHF),
+ CpuFeatures::IsSupported(AVX), CpuFeatures::IsSupported(FMA3),
+ CpuFeatures::IsSupported(BMI1), CpuFeatures::IsSupported(BMI2),
+ CpuFeatures::IsSupported(LZCNT), CpuFeatures::IsSupported(POPCNT),
+ CpuFeatures::IsSupported(ATOM));
}
// -----------------------------------------------------------------------------
@@ -2834,6 +2837,77 @@ void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
emit(imm8);
}
+void Assembler::pextrb(Register dst, XMMRegister src, int8_t imm8) {
+ DCHECK(IsEnabled(SSE4_1));
+ DCHECK(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(src, dst);
+ emit(0x0F);
+ emit(0x3A);
+ emit(0x14);
+ emit_sse_operand(src, dst);
+ emit(imm8);
+}
+
+void Assembler::pextrb(const Operand& dst, XMMRegister src, int8_t imm8) {
+ DCHECK(IsEnabled(SSE4_1));
+ DCHECK(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(src, dst);
+ emit(0x0F);
+ emit(0x3A);
+ emit(0x14);
+ emit_sse_operand(src, dst);
+ emit(imm8);
+}
+
+void Assembler::pinsrw(XMMRegister dst, Register src, int8_t imm8) {
+ DCHECK(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xC4);
+ emit_sse_operand(dst, src);
+ emit(imm8);
+}
+
+void Assembler::pinsrw(XMMRegister dst, const Operand& src, int8_t imm8) {
+ DCHECK(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xC4);
+ emit_sse_operand(dst, src);
+ emit(imm8);
+}
+
+void Assembler::pextrw(Register dst, XMMRegister src, int8_t imm8) {
+ DCHECK(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(src, dst);
+ emit(0x0F);
+ emit(0xC5);
+ emit_sse_operand(src, dst);
+ emit(imm8);
+}
+
+void Assembler::pextrw(const Operand& dst, XMMRegister src, int8_t imm8) {
+ DCHECK(IsEnabled(SSE4_1));
+ DCHECK(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(src, dst);
+ emit(0x0F);
+ emit(0x3A);
+ emit(0x15);
+ emit_sse_operand(src, dst);
+ emit(imm8);
+}
void Assembler::pextrd(Register dst, XMMRegister src, int8_t imm8) {
DCHECK(IsEnabled(SSE4_1));
@@ -2847,6 +2921,17 @@ void Assembler::pextrd(Register dst, XMMRegister src, int8_t imm8) {
emit(imm8);
}
+void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t imm8) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(src, dst);
+ emit(0x0F);
+ emit(0x3A);
+ emit(0x16);
+ emit_sse_operand(src, dst);
+ emit(imm8);
+}
void Assembler::pinsrd(XMMRegister dst, Register src, int8_t imm8) {
DCHECK(IsEnabled(SSE4_1));
@@ -2873,6 +2958,30 @@ void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
emit(imm8);
}
+void Assembler::pinsrb(XMMRegister dst, Register src, int8_t imm8) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x3A);
+ emit(0x20);
+ emit_sse_operand(dst, src);
+ emit(imm8);
+}
+
+void Assembler::pinsrb(XMMRegister dst, const Operand& src, int8_t imm8) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x3A);
+ emit(0x20);
+ emit_sse_operand(dst, src);
+ emit(imm8);
+}
+
void Assembler::insertps(XMMRegister dst, XMMRegister src, byte imm8) {
DCHECK(CpuFeatures::IsSupported(SSE4_1));
DCHECK(is_uint8(imm8));
@@ -3202,6 +3311,15 @@ void Assembler::psrlq(XMMRegister reg, byte imm8) {
emit(imm8);
}
+void Assembler::psllw(XMMRegister reg, byte imm8) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(reg);
+ emit(0x0F);
+ emit(0x71);
+ emit_sse_operand(rsi, reg); // rsi == 6
+ emit(imm8);
+}
void Assembler::pslld(XMMRegister reg, byte imm8) {
EnsureSpace ensure_space(this);
@@ -3213,6 +3331,15 @@ void Assembler::pslld(XMMRegister reg, byte imm8) {
emit(imm8);
}
+void Assembler::psrlw(XMMRegister reg, byte imm8) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(reg);
+ emit(0x0F);
+ emit(0x71);
+ emit_sse_operand(rdx, reg); // rdx == 2
+ emit(imm8);
+}
void Assembler::psrld(XMMRegister reg, byte imm8) {
EnsureSpace ensure_space(this);
@@ -3224,6 +3351,26 @@ void Assembler::psrld(XMMRegister reg, byte imm8) {
emit(imm8);
}
+void Assembler::psraw(XMMRegister reg, byte imm8) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(reg);
+ emit(0x0F);
+ emit(0x71);
+ emit_sse_operand(rsp, reg); // rsp == 4
+ emit(imm8);
+}
+
+void Assembler::psrad(XMMRegister reg, byte imm8) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(reg);
+ emit(0x0F);
+ emit(0x72);
+ emit_sse_operand(rsp, reg); // rsp == 4
+ emit(imm8);
+}
+
void Assembler::cmpps(XMMRegister dst, XMMRegister src, int8_t cmp) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
@@ -3789,17 +3936,6 @@ void Assembler::movmskps(Register dst, XMMRegister src) {
}
-void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
- DCHECK(!IsEnabled(AVX));
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x76);
- emit_sse_operand(dst, src);
-}
-
-
void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0x66);
@@ -3926,9 +4062,9 @@ void Assembler::vmovq(Register dst, XMMRegister src) {
emit_sse_operand(src, dst);
}
-
-void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1,
- XMMRegister src2, SIMDPrefix pp, LeadingOpcode m, VexW w) {
+void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, SIMDPrefix pp, LeadingOpcode m,
+ VexW w) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, src1, src2, kLIG, pp, m, w);
@@ -3936,10 +4072,9 @@ void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
-
-void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2, SIMDPrefix pp, LeadingOpcode m,
- VexW w) {
+void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
+ const Operand& src2, SIMDPrefix pp, LeadingOpcode m,
+ VexW w) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, src1, src2, kLIG, pp, m, w);
@@ -4409,78 +4544,81 @@ void Assembler::movups(const Operand& dst, XMMRegister src) {
emit_sse_operand(src, dst);
}
-void Assembler::paddd(XMMRegister dst, XMMRegister src) {
+void Assembler::sse2_instr(XMMRegister dst, XMMRegister src, byte prefix,
+ byte escape, byte opcode) {
EnsureSpace ensure_space(this);
- emit(0x66);
+ emit(prefix);
emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xFE);
+ emit(escape);
+ emit(opcode);
emit_sse_operand(dst, src);
}
-void Assembler::paddd(XMMRegister dst, const Operand& src) {
+void Assembler::sse2_instr(XMMRegister dst, const Operand& src, byte prefix,
+ byte escape, byte opcode) {
EnsureSpace ensure_space(this);
- emit(0x66);
+ emit(prefix);
emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xFE);
+ emit(escape);
+ emit(opcode);
emit_sse_operand(dst, src);
}
-void Assembler::psubd(XMMRegister dst, XMMRegister src) {
+void Assembler::ssse3_instr(XMMRegister dst, XMMRegister src, byte prefix,
+ byte escape1, byte escape2, byte opcode) {
+ DCHECK(IsEnabled(SSSE3));
EnsureSpace ensure_space(this);
- emit(0x66);
+ emit(prefix);
emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xFA);
+ emit(escape1);
+ emit(escape2);
+ emit(opcode);
emit_sse_operand(dst, src);
}
-void Assembler::psubd(XMMRegister dst, const Operand& src) {
+void Assembler::ssse3_instr(XMMRegister dst, const Operand& src, byte prefix,
+ byte escape1, byte escape2, byte opcode) {
+ DCHECK(IsEnabled(SSSE3));
EnsureSpace ensure_space(this);
- emit(0x66);
+ emit(prefix);
emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xFA);
+ emit(escape1);
+ emit(escape2);
+ emit(opcode);
emit_sse_operand(dst, src);
}
-void Assembler::pmulld(XMMRegister dst, XMMRegister src) {
+void Assembler::sse4_instr(XMMRegister dst, XMMRegister src, byte prefix,
+ byte escape1, byte escape2, byte opcode) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
- emit(0x66);
+ emit(prefix);
emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x38);
- emit(0x40);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::pmulld(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x38);
- emit(0x40);
+ emit(escape1);
+ emit(escape2);
+ emit(opcode);
emit_sse_operand(dst, src);
}
-void Assembler::pmuludq(XMMRegister dst, XMMRegister src) {
+void Assembler::sse4_instr(XMMRegister dst, const Operand& src, byte prefix,
+ byte escape1, byte escape2, byte opcode) {
+ DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
- emit(0x66);
+ emit(prefix);
emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xF4);
+ emit(escape1);
+ emit(escape2);
+ emit(opcode);
emit_sse_operand(dst, src);
}
-void Assembler::pmuludq(XMMRegister dst, const Operand& src) {
+void Assembler::lddqu(XMMRegister dst, const Operand& src) {
+ DCHECK(IsEnabled(SSE3));
EnsureSpace ensure_space(this);
- emit(0x66);
+ emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
- emit(0xF4);
+ emit(0xF0);
emit_sse_operand(dst, src);
}
@@ -4494,25 +4632,17 @@ void Assembler::psrldq(XMMRegister dst, uint8_t shift) {
emit(shift);
}
-void Assembler::cvtps2dq(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x5B);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::cvtps2dq(XMMRegister dst, const Operand& src) {
+void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0F);
- emit(0x5B);
+ emit(0x70);
emit_sse_operand(dst, src);
+ emit(shuffle);
}
-void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
+void Assembler::pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index b2154fbaf4..5de891cf36 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -40,6 +40,7 @@
#include <deque>
#include "src/assembler.h"
+#include "src/x64/sse-instr.h"
namespace v8 {
namespace internal {
@@ -1072,7 +1073,91 @@ class Assembler : public AssemblerBase {
void movmskps(Register dst, XMMRegister src);
+ void vinstr(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ SIMDPrefix pp, LeadingOpcode m, VexW w);
+ void vinstr(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2,
+ SIMDPrefix pp, LeadingOpcode m, VexW w);
+
// SSE2 instructions
+ void sse2_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape,
+ byte opcode);
+ void sse2_instr(XMMRegister dst, const Operand& src, byte prefix, byte escape,
+ byte opcode);
+#define DECLARE_SSE2_INSTRUCTION(instruction, prefix, escape, opcode) \
+ void instruction(XMMRegister dst, XMMRegister src) { \
+ sse2_instr(dst, src, 0x##prefix, 0x##escape, 0x##opcode); \
+ } \
+ void instruction(XMMRegister dst, const Operand& src) { \
+ sse2_instr(dst, src, 0x##prefix, 0x##escape, 0x##opcode); \
+ }
+
+ SSE2_INSTRUCTION_LIST(DECLARE_SSE2_INSTRUCTION)
+#undef DECLARE_SSE2_INSTRUCTION
+
+#define DECLARE_SSE2_AVX_INSTRUCTION(instruction, prefix, escape, opcode) \
+ void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0); \
+ } \
+ void v##instruction(XMMRegister dst, XMMRegister src1, \
+ const Operand& src2) { \
+ vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0); \
+ }
+
+ SSE2_INSTRUCTION_LIST(DECLARE_SSE2_AVX_INSTRUCTION)
+#undef DECLARE_SSE2_AVX_INSTRUCTION
+
+ // SSE3
+ void lddqu(XMMRegister dst, const Operand& src);
+
+ // SSSE3
+ void ssse3_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape1,
+ byte escape2, byte opcode);
+ void ssse3_instr(XMMRegister dst, const Operand& src, byte prefix,
+ byte escape1, byte escape2, byte opcode);
+
+#define DECLARE_SSSE3_INSTRUCTION(instruction, prefix, escape1, escape2, \
+ opcode) \
+ void instruction(XMMRegister dst, XMMRegister src) { \
+ ssse3_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
+ } \
+ void instruction(XMMRegister dst, const Operand& src) { \
+ ssse3_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
+ }
+
+ SSSE3_INSTRUCTION_LIST(DECLARE_SSSE3_INSTRUCTION)
+#undef DECLARE_SSSE3_INSTRUCTION
+
+ // SSE4
+ void sse4_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape1,
+ byte escape2, byte opcode);
+ void sse4_instr(XMMRegister dst, const Operand& src, byte prefix,
+ byte escape1, byte escape2, byte opcode);
+#define DECLARE_SSE4_INSTRUCTION(instruction, prefix, escape1, escape2, \
+ opcode) \
+ void instruction(XMMRegister dst, XMMRegister src) { \
+ sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
+ } \
+ void instruction(XMMRegister dst, const Operand& src) { \
+ sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
+ }
+
+ SSE4_INSTRUCTION_LIST(DECLARE_SSE4_INSTRUCTION)
+#undef DECLARE_SSE4_INSTRUCTION
+
+#define DECLARE_SSE34_AVX_INSTRUCTION(instruction, prefix, escape1, escape2, \
+ opcode) \
+ void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0); \
+ } \
+ void v##instruction(XMMRegister dst, XMMRegister src1, \
+ const Operand& src2) { \
+ vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0); \
+ }
+
+ SSSE3_INSTRUCTION_LIST(DECLARE_SSE34_AVX_INSTRUCTION)
+ SSE4_INSTRUCTION_LIST(DECLARE_SSE34_AVX_INSTRUCTION)
+#undef DECLARE_SSE34_AVX_INSTRUCTION
+
void movd(XMMRegister dst, Register src);
void movd(XMMRegister dst, const Operand& src);
void movd(Register dst, XMMRegister src);
@@ -1101,8 +1186,12 @@ class Assembler : public AssemblerBase {
void psllq(XMMRegister reg, byte imm8);
void psrlq(XMMRegister reg, byte imm8);
+ void psllw(XMMRegister reg, byte imm8);
void pslld(XMMRegister reg, byte imm8);
+ void psrlw(XMMRegister reg, byte imm8);
void psrld(XMMRegister reg, byte imm8);
+ void psraw(XMMRegister reg, byte imm8);
+ void psrad(XMMRegister reg, byte imm8);
void cvttsd2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, XMMRegister src);
@@ -1155,7 +1244,6 @@ class Assembler : public AssemblerBase {
void ucomisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, const Operand& src);
void cmpltsd(XMMRegister dst, XMMRegister src);
- void pcmpeqd(XMMRegister dst, XMMRegister src);
void movmskpd(Register dst, XMMRegister src);
@@ -1166,7 +1254,16 @@ class Assembler : public AssemblerBase {
// SSE 4.1 instruction
void insertps(XMMRegister dst, XMMRegister src, byte imm8);
void extractps(Register dst, XMMRegister src, byte imm8);
+ void pextrb(Register dst, XMMRegister src, int8_t imm8);
+ void pextrb(const Operand& dst, XMMRegister src, int8_t imm8);
+ void pextrw(Register dst, XMMRegister src, int8_t imm8);
+ void pextrw(const Operand& dst, XMMRegister src, int8_t imm8);
void pextrd(Register dst, XMMRegister src, int8_t imm8);
+ void pextrd(const Operand& dst, XMMRegister src, int8_t imm8);
+ void pinsrb(XMMRegister dst, Register src, int8_t imm8);
+ void pinsrb(XMMRegister dst, const Operand& src, int8_t imm8);
+ void pinsrw(XMMRegister dst, Register src, int8_t imm8);
+ void pinsrw(XMMRegister dst, const Operand& src, int8_t imm8);
void pinsrd(XMMRegister dst, Register src, int8_t imm8);
void pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
@@ -1208,18 +1305,9 @@ class Assembler : public AssemblerBase {
void movups(XMMRegister dst, XMMRegister src);
void movups(XMMRegister dst, const Operand& src);
void movups(const Operand& dst, XMMRegister src);
- void paddd(XMMRegister dst, XMMRegister src);
- void paddd(XMMRegister dst, const Operand& src);
- void psubd(XMMRegister dst, XMMRegister src);
- void psubd(XMMRegister dst, const Operand& src);
- void pmulld(XMMRegister dst, XMMRegister src);
- void pmulld(XMMRegister dst, const Operand& src);
- void pmuludq(XMMRegister dst, XMMRegister src);
- void pmuludq(XMMRegister dst, const Operand& src);
void psrldq(XMMRegister dst, uint8_t shift);
void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
- void cvtps2dq(XMMRegister dst, XMMRegister src);
- void cvtps2dq(XMMRegister dst, const Operand& src);
+ void pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle);
void cvtdq2ps(XMMRegister dst, XMMRegister src);
void cvtdq2ps(XMMRegister dst, const Operand& src);
@@ -1421,7 +1509,6 @@ class Assembler : public AssemblerBase {
AVX_P_3(vand, 0x54);
AVX_P_3(vor, 0x56);
AVX_P_3(vxor, 0x57);
- AVX_3(vpcmpeqd, 0x76, vpd);
AVX_3(vcvtsd2ss, 0x5a, vsd);
#undef AVX_3
@@ -1440,102 +1527,98 @@ class Assembler : public AssemblerBase {
emit(imm8);
}
void vcvtss2sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vsd(0x5a, dst, src1, src2, kF3, k0F, kWIG);
+ vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
}
void vcvtss2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vsd(0x5a, dst, src1, src2, kF3, k0F, kWIG);
+ vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
}
void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, Register src2) {
XMMRegister isrc2 = {src2.code()};
- vsd(0x2a, dst, src1, isrc2, kF2, k0F, kW0);
+ vinstr(0x2a, dst, src1, isrc2, kF2, k0F, kW0);
}
void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vsd(0x2a, dst, src1, src2, kF2, k0F, kW0);
+ vinstr(0x2a, dst, src1, src2, kF2, k0F, kW0);
}
void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, Register src2) {
XMMRegister isrc2 = {src2.code()};
- vsd(0x2a, dst, src1, isrc2, kF3, k0F, kW0);
+ vinstr(0x2a, dst, src1, isrc2, kF3, k0F, kW0);
}
void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vsd(0x2a, dst, src1, src2, kF3, k0F, kW0);
+ vinstr(0x2a, dst, src1, src2, kF3, k0F, kW0);
}
void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, Register src2) {
XMMRegister isrc2 = {src2.code()};
- vsd(0x2a, dst, src1, isrc2, kF3, k0F, kW1);
+ vinstr(0x2a, dst, src1, isrc2, kF3, k0F, kW1);
}
void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vsd(0x2a, dst, src1, src2, kF3, k0F, kW1);
+ vinstr(0x2a, dst, src1, src2, kF3, k0F, kW1);
}
void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, Register src2) {
XMMRegister isrc2 = {src2.code()};
- vsd(0x2a, dst, src1, isrc2, kF2, k0F, kW1);
+ vinstr(0x2a, dst, src1, isrc2, kF2, k0F, kW1);
}
void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vsd(0x2a, dst, src1, src2, kF2, k0F, kW1);
+ vinstr(0x2a, dst, src1, src2, kF2, k0F, kW1);
}
void vcvttss2si(Register dst, XMMRegister src) {
XMMRegister idst = {dst.code()};
- vsd(0x2c, idst, xmm0, src, kF3, k0F, kW0);
+ vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
}
void vcvttss2si(Register dst, const Operand& src) {
XMMRegister idst = {dst.code()};
- vsd(0x2c, idst, xmm0, src, kF3, k0F, kW0);
+ vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
}
void vcvttsd2si(Register dst, XMMRegister src) {
XMMRegister idst = {dst.code()};
- vsd(0x2c, idst, xmm0, src, kF2, k0F, kW0);
+ vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
}
void vcvttsd2si(Register dst, const Operand& src) {
XMMRegister idst = {dst.code()};
- vsd(0x2c, idst, xmm0, src, kF2, k0F, kW0);
+ vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
}
void vcvttss2siq(Register dst, XMMRegister src) {
XMMRegister idst = {dst.code()};
- vsd(0x2c, idst, xmm0, src, kF3, k0F, kW1);
+ vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW1);
}
void vcvttss2siq(Register dst, const Operand& src) {
XMMRegister idst = {dst.code()};
- vsd(0x2c, idst, xmm0, src, kF3, k0F, kW1);
+ vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW1);
}
void vcvttsd2siq(Register dst, XMMRegister src) {
XMMRegister idst = {dst.code()};
- vsd(0x2c, idst, xmm0, src, kF2, k0F, kW1);
+ vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW1);
}
void vcvttsd2siq(Register dst, const Operand& src) {
XMMRegister idst = {dst.code()};
- vsd(0x2c, idst, xmm0, src, kF2, k0F, kW1);
+ vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW1);
}
void vcvtsd2si(Register dst, XMMRegister src) {
XMMRegister idst = {dst.code()};
- vsd(0x2d, idst, xmm0, src, kF2, k0F, kW0);
+ vinstr(0x2d, idst, xmm0, src, kF2, k0F, kW0);
}
void vucomisd(XMMRegister dst, XMMRegister src) {
- vsd(0x2e, dst, xmm0, src, k66, k0F, kWIG);
+ vinstr(0x2e, dst, xmm0, src, k66, k0F, kWIG);
}
void vucomisd(XMMRegister dst, const Operand& src) {
- vsd(0x2e, dst, xmm0, src, k66, k0F, kWIG);
+ vinstr(0x2e, dst, xmm0, src, k66, k0F, kWIG);
}
void vroundss(XMMRegister dst, XMMRegister src1, XMMRegister src2,
RoundingMode mode) {
- vsd(0x0a, dst, src1, src2, k66, k0F3A, kWIG);
+ vinstr(0x0a, dst, src1, src2, k66, k0F3A, kWIG);
emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
}
void vroundsd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
RoundingMode mode) {
- vsd(0x0b, dst, src1, src2, k66, k0F3A, kWIG);
+ vinstr(0x0b, dst, src1, src2, k66, k0F3A, kWIG);
emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
}
void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vsd(op, dst, src1, src2, kF2, k0F, kWIG);
+ vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
}
void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vsd(op, dst, src1, src2, kF2, k0F, kWIG);
+ vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
}
- void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
- SIMDPrefix pp, LeadingOpcode m, VexW w);
- void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2,
- SIMDPrefix pp, LeadingOpcode m, VexW w);
void vmovss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vss(0x10, dst, src1, src2);
@@ -1616,6 +1699,101 @@ class Assembler : public AssemblerBase {
#undef AVX_CMP_P
+ void vlddqu(XMMRegister dst, const Operand& src) {
+ vinstr(0xF0, dst, xmm0, src, kF2, k0F, kWIG);
+ }
+ void vpsllw(XMMRegister dst, XMMRegister src, int8_t imm8) {
+ XMMRegister iop = {6};
+ vinstr(0x71, iop, dst, src, k66, k0F, kWIG);
+ emit(imm8);
+ }
+ void vpsrlw(XMMRegister dst, XMMRegister src, int8_t imm8) {
+ XMMRegister iop = {2};
+ vinstr(0x71, iop, dst, src, k66, k0F, kWIG);
+ emit(imm8);
+ }
+ void vpsraw(XMMRegister dst, XMMRegister src, int8_t imm8) {
+ XMMRegister iop = {4};
+ vinstr(0x71, iop, dst, src, k66, k0F, kWIG);
+ emit(imm8);
+ }
+ void vpslld(XMMRegister dst, XMMRegister src, int8_t imm8) {
+ XMMRegister iop = {6};
+ vinstr(0x72, iop, dst, src, k66, k0F, kWIG);
+ emit(imm8);
+ }
+ void vpsrld(XMMRegister dst, XMMRegister src, int8_t imm8) {
+ XMMRegister iop = {2};
+ vinstr(0x72, iop, dst, src, k66, k0F, kWIG);
+ emit(imm8);
+ }
+ void vpsrad(XMMRegister dst, XMMRegister src, int8_t imm8) {
+ XMMRegister iop = {4};
+ vinstr(0x72, iop, dst, src, k66, k0F, kWIG);
+ emit(imm8);
+ }
+ void vpextrb(Register dst, XMMRegister src, int8_t imm8) {
+ XMMRegister idst = {dst.code()};
+ vinstr(0x14, src, xmm0, idst, k66, k0F3A, kW0);
+ emit(imm8);
+ }
+ void vpextrb(const Operand& dst, XMMRegister src, int8_t imm8) {
+ vinstr(0x14, src, xmm0, dst, k66, k0F3A, kW0);
+ emit(imm8);
+ }
+ void vpextrw(Register dst, XMMRegister src, int8_t imm8) {
+ XMMRegister idst = {dst.code()};
+ vinstr(0xc5, idst, xmm0, src, k66, k0F, kW0);
+ emit(imm8);
+ }
+ void vpextrw(const Operand& dst, XMMRegister src, int8_t imm8) {
+ vinstr(0x15, src, xmm0, dst, k66, k0F3A, kW0);
+ emit(imm8);
+ }
+ void vpextrd(Register dst, XMMRegister src, int8_t imm8) {
+ XMMRegister idst = {dst.code()};
+ vinstr(0x16, src, xmm0, idst, k66, k0F3A, kW0);
+ emit(imm8);
+ }
+ void vpextrd(const Operand& dst, XMMRegister src, int8_t imm8) {
+ vinstr(0x16, src, xmm0, dst, k66, k0F3A, kW0);
+ emit(imm8);
+ }
+ void vpinsrb(XMMRegister dst, XMMRegister src1, Register src2, int8_t imm8) {
+ XMMRegister isrc = {src2.code()};
+ vinstr(0x20, dst, src1, isrc, k66, k0F3A, kW0);
+ emit(imm8);
+ }
+ void vpinsrb(XMMRegister dst, XMMRegister src1, const Operand& src2,
+ int8_t imm8) {
+ vinstr(0x20, dst, src1, src2, k66, k0F3A, kW0);
+ emit(imm8);
+ }
+ void vpinsrw(XMMRegister dst, XMMRegister src1, Register src2, int8_t imm8) {
+ XMMRegister isrc = {src2.code()};
+ vinstr(0xc4, dst, src1, isrc, k66, k0F, kW0);
+ emit(imm8);
+ }
+ void vpinsrw(XMMRegister dst, XMMRegister src1, const Operand& src2,
+ int8_t imm8) {
+ vinstr(0xc4, dst, src1, src2, k66, k0F, kW0);
+ emit(imm8);
+ }
+ void vpinsrd(XMMRegister dst, XMMRegister src1, Register src2, int8_t imm8) {
+ XMMRegister isrc = {src2.code()};
+ vinstr(0x22, dst, src1, isrc, k66, k0F3A, kW0);
+ emit(imm8);
+ }
+ void vpinsrd(XMMRegister dst, XMMRegister src1, const Operand& src2,
+ int8_t imm8) {
+ vinstr(0x22, dst, src1, src2, k66, k0F3A, kW0);
+ emit(imm8);
+ }
+ void vpshufd(XMMRegister dst, XMMRegister src, int8_t imm8) {
+ vinstr(0x70, dst, xmm0, src, k66, k0F, kWIG);
+ emit(imm8);
+ }
+
void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
void vps(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
@@ -1852,6 +2030,8 @@ class Assembler : public AssemblerBase {
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
+ Address pc() const { return pc_; }
+
protected:
// Call near indirect
void call(const Operand& operand);
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 4b5165a0be..2a962b32f9 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -1175,6 +1175,7 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
__ Pop(rdx);
__ Pop(rdi);
__ Pop(rax);
+ __ SmiToInteger32(rdx, rdx);
__ SmiToInteger32(rax, rax);
}
@@ -1189,7 +1190,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// rdi : the function to call
Isolate* isolate = masm->isolate();
Label initialize, done, miss, megamorphic, not_array_function;
- Label done_initialize_count, done_increment_count;
// Load the cache state into r11.
__ SmiToInteger32(rdx, rdx);
@@ -1203,7 +1203,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// type-feedback-vector.h).
Label check_allocation_site;
__ cmpp(rdi, FieldOperand(r11, WeakCell::kValueOffset));
- __ j(equal, &done_increment_count, Label::kFar);
+ __ j(equal, &done, Label::kFar);
__ CompareRoot(r11, Heap::kmegamorphic_symbolRootIndex);
__ j(equal, &done, Label::kFar);
__ CompareRoot(FieldOperand(r11, HeapObject::kMapOffset),
@@ -1227,7 +1227,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r11);
__ cmpp(rdi, r11);
__ j(not_equal, &megamorphic);
- __ jmp(&done_increment_count);
+ __ jmp(&done);
__ bind(&miss);
@@ -1253,29 +1253,17 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
CreateAllocationSiteStub create_stub(isolate);
CallStubInRecordCallTarget(masm, &create_stub);
- __ jmp(&done_initialize_count);
+ __ jmp(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(isolate);
CallStubInRecordCallTarget(masm, &weak_cell_stub);
- __ bind(&done_initialize_count);
- // Initialize the call counter.
- __ SmiToInteger32(rdx, rdx);
- __ Move(FieldOperand(rbx, rdx, times_pointer_size,
- FixedArray::kHeaderSize + kPointerSize),
- Smi::FromInt(1));
- __ jmp(&done);
-
- __ bind(&done_increment_count);
-
- // Increment the call count for monomorphic function calls.
+ __ bind(&done);
+ // Increment the call count for all function calls.
__ SmiAddConstant(FieldOperand(rbx, rdx, times_pointer_size,
FixedArray::kHeaderSize + kPointerSize),
Smi::FromInt(1));
-
- __ bind(&done);
- __ Integer32ToSmi(rdx, rdx);
}
@@ -1294,7 +1282,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
GenerateRecordCallTarget(masm);
- __ SmiToInteger32(rdx, rdx);
Label feedback_register_initialized;
// Put the AllocationSite from the feedback vector into rbx, or undefined.
__ movp(rbx,
@@ -1321,6 +1308,12 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
+static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
+ Register slot) {
+ __ SmiAddConstant(FieldOperand(feedback_vector, slot, times_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize),
+ Smi::FromInt(1));
+}
void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// rdi - function
@@ -1334,9 +1327,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ movp(rax, Immediate(arg_count()));
// Increment the call count for monomorphic function calls.
- __ SmiAddConstant(FieldOperand(rbx, rdx, times_pointer_size,
- FixedArray::kHeaderSize + kPointerSize),
- Smi::FromInt(1));
+ IncrementCallCount(masm, rbx, rdx);
__ movp(rbx, rcx);
__ movp(rdx, rdi);
@@ -1352,7 +1343,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// -- rbx - vector
// -----------------------------------
Isolate* isolate = masm->isolate();
- Label extra_checks_or_miss, call, call_function;
+ Label extra_checks_or_miss, call, call_function, call_count_incremented;
int argc = arg_count();
StackArgumentsAccessor args(rsp, argc);
ParameterCount actual(argc);
@@ -1383,12 +1374,10 @@ void CallICStub::Generate(MacroAssembler* masm) {
// convincing us that we have a monomorphic JSFunction.
__ JumpIfSmi(rdi, &extra_checks_or_miss);
+ __ bind(&call_function);
// Increment the call count for monomorphic function calls.
- __ SmiAddConstant(FieldOperand(rbx, rdx, times_pointer_size,
- FixedArray::kHeaderSize + kPointerSize),
- Smi::FromInt(1));
+ IncrementCallCount(masm, rbx, rdx);
- __ bind(&call_function);
__ Set(rax, argc);
__ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
tail_call_mode()),
@@ -1428,6 +1417,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
TypeFeedbackVector::MegamorphicSentinel(isolate));
__ bind(&call);
+
+ // Increment the call count for megamorphic function calls.
+ IncrementCallCount(masm, rbx, rdx);
+
+ __ bind(&call_count_incremented);
__ Set(rax, argc);
__ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET);
@@ -1453,11 +1447,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ cmpp(rcx, NativeContextOperand());
__ j(not_equal, &miss);
- // Initialize the call counter.
- __ Move(FieldOperand(rbx, rdx, times_pointer_size,
- FixedArray::kHeaderSize + kPointerSize),
- Smi::FromInt(1));
-
// Store the function. Use a stub since we need a frame for allocation.
// rbx - vector
// rdx - slot (needs to be in smi form)
@@ -1467,11 +1456,16 @@ void CallICStub::Generate(MacroAssembler* masm) {
CreateWeakCellStub create_stub(isolate);
__ Integer32ToSmi(rdx, rdx);
+ __ Push(rbx);
+ __ Push(rdx);
__ Push(rdi);
__ Push(rsi);
__ CallStub(&create_stub);
__ Pop(rsi);
__ Pop(rdi);
+ __ Pop(rdx);
+ __ Pop(rbx);
+ __ SmiToInteger32(rdx, rdx);
}
__ jmp(&call_function);
@@ -1481,20 +1475,19 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&miss);
GenerateMiss(masm);
- __ jmp(&call);
+ __ jmp(&call_count_incremented);
// Unreachable
__ int3();
}
-
void CallICStub::GenerateMiss(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Push the receiver and the function and feedback info.
+ __ Integer32ToSmi(rdx, rdx);
__ Push(rdi);
__ Push(rbx);
- __ Integer32ToSmi(rdx, rdx);
__ Push(rdx);
// Call the entry.
@@ -1504,7 +1497,6 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ movp(rdi, rax);
}
-
bool CEntryStub::NeedsImmovableCode() {
return false;
}
@@ -2020,296 +2012,6 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
}
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // rsp[0] : return address
- // rsp[8] : to
- // rsp[16] : from
- // rsp[24] : string
-
- enum SubStringStubArgumentIndices {
- STRING_ARGUMENT_INDEX,
- FROM_ARGUMENT_INDEX,
- TO_ARGUMENT_INDEX,
- SUB_STRING_ARGUMENT_COUNT
- };
-
- StackArgumentsAccessor args(rsp, SUB_STRING_ARGUMENT_COUNT,
- ARGUMENTS_DONT_CONTAIN_RECEIVER);
-
- // Make sure first argument is a string.
- __ movp(rax, args.GetArgumentOperand(STRING_ARGUMENT_INDEX));
- STATIC_ASSERT(kSmiTag == 0);
- __ testl(rax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
- Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
- __ j(NegateCondition(is_string), &runtime);
-
- // rax: string
- // rbx: instance type
- // Calculate length of sub string using the smi values.
- __ movp(rcx, args.GetArgumentOperand(TO_ARGUMENT_INDEX));
- __ movp(rdx, args.GetArgumentOperand(FROM_ARGUMENT_INDEX));
- __ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
-
- __ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
- __ cmpp(rcx, FieldOperand(rax, String::kLengthOffset));
- Label not_original_string;
- // Shorter than original string's length: an actual substring.
- __ j(below, &not_original_string, Label::kNear);
- // Longer than original string's length or negative: unsafe arguments.
- __ j(above, &runtime);
- // Return original string.
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
- __ bind(&not_original_string);
-
- Label single_char;
- __ SmiCompare(rcx, Smi::FromInt(1));
- __ j(equal, &single_char);
-
- __ SmiToInteger32(rcx, rcx);
-
- // rax: string
- // rbx: instance type
- // rcx: sub string length
- // rdx: from index (smi)
- // Deal with different string types: update the index if necessary
- // and put the underlying string into edi.
- Label underlying_unpacked, sliced_string, seq_or_external_string;
- // If the string is not indirect, it can only be sequential or external.
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
- STATIC_ASSERT(kIsIndirectStringMask != 0);
- __ testb(rbx, Immediate(kIsIndirectStringMask));
- __ j(zero, &seq_or_external_string, Label::kNear);
-
- __ testb(rbx, Immediate(kSlicedNotConsMask));
- __ j(not_zero, &sliced_string, Label::kNear);
- // Cons string. Check whether it is flat, then fetch first part.
- // Flat cons strings have an empty second part.
- __ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
- Heap::kempty_stringRootIndex);
- __ j(not_equal, &runtime);
- __ movp(rdi, FieldOperand(rax, ConsString::kFirstOffset));
- // Update instance type.
- __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
- __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked, Label::kNear);
-
- __ bind(&sliced_string);
- // Sliced string. Fetch parent and correct start index by offset.
- __ addp(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
- __ movp(rdi, FieldOperand(rax, SlicedString::kParentOffset));
- // Update instance type.
- __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
- __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked, Label::kNear);
-
- __ bind(&seq_or_external_string);
- // Sequential or external string. Just move string to the correct register.
- __ movp(rdi, rax);
-
- __ bind(&underlying_unpacked);
-
- if (FLAG_string_slices) {
- Label copy_routine;
- // rdi: underlying subject string
- // rbx: instance type of underlying subject string
- // rdx: adjusted start index (smi)
- // rcx: length
- // If coming from the make_two_character_string path, the string
- // is too short to be sliced anyways.
- __ cmpp(rcx, Immediate(SlicedString::kMinLength));
- // Short slice. Copy instead of slicing.
- __ j(less, &copy_routine);
- // Allocate new sliced string. At this point we do not reload the instance
- // type including the string encoding because we simply rely on the info
- // provided by the original string. It does not matter if the original
- // string's encoding is wrong because we always have to recheck encoding of
- // the newly created string's parent anyways due to externalized strings.
- Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ testb(rbx, Immediate(kStringEncodingMask));
- __ j(zero, &two_byte_slice, Label::kNear);
- __ AllocateOneByteSlicedString(rax, rbx, r14, &runtime);
- __ jmp(&set_slice_header, Label::kNear);
- __ bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
- __ bind(&set_slice_header);
- __ Integer32ToSmi(rcx, rcx);
- __ movp(FieldOperand(rax, SlicedString::kLengthOffset), rcx);
- __ movp(FieldOperand(rax, SlicedString::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
- __ movp(FieldOperand(rax, SlicedString::kParentOffset), rdi);
- __ movp(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(3 * kPointerSize);
-
- __ bind(&copy_routine);
- }
-
- // rdi: underlying subject string
- // rbx: instance type of underlying subject string
- // rdx: adjusted start index (smi)
- // rcx: length
- // The subject string can only be external or sequential string of either
- // encoding at this point.
- Label two_byte_sequential, sequential_string;
- STATIC_ASSERT(kExternalStringTag != 0);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(rbx, Immediate(kExternalStringTag));
- __ j(zero, &sequential_string);
-
- // Handle external string.
- // Rule out short external strings.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ testb(rbx, Immediate(kShortExternalStringMask));
- __ j(not_zero, &runtime);
- __ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
- // Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ subp(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- __ bind(&sequential_string);
- STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
- __ testb(rbx, Immediate(kStringEncodingMask));
- __ j(zero, &two_byte_sequential);
-
- // Allocate the result.
- __ AllocateOneByteString(rax, rcx, r11, r14, r15, &runtime);
-
- // rax: result string
- // rcx: result string length
- { // Locate character of sub string start.
- SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
- __ leap(r14, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
- SeqOneByteString::kHeaderSize - kHeapObjectTag));
- }
- // Locate first character of result.
- __ leap(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize));
-
- // rax: result string
- // rcx: result length
- // r14: first character of result
- // rsi: character of sub string start
- StringHelper::GenerateCopyCharacters(
- masm, rdi, r14, rcx, String::ONE_BYTE_ENCODING);
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
-
- __ bind(&two_byte_sequential);
- // Allocate the result.
- __ AllocateTwoByteString(rax, rcx, r11, r14, r15, &runtime);
-
- // rax: result string
- // rcx: result string length
- { // Locate character of sub string start.
- SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
- __ leap(r14, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
- SeqOneByteString::kHeaderSize - kHeapObjectTag));
- }
- // Locate first character of result.
- __ leap(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
-
- // rax: result string
- // rcx: result length
- // rdi: first character of result
- // r14: character of sub string start
- StringHelper::GenerateCopyCharacters(
- masm, rdi, r14, rcx, String::TWO_BYTE_ENCODING);
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString);
-
- __ bind(&single_char);
- // rax: string
- // rbx: instance type
- // rcx: sub string length (smi)
- // rdx: from index (smi)
- StringCharAtGenerator generator(rax, rdx, rcx, rax, &runtime, &runtime,
- &runtime, RECEIVER_IS_STRING);
- generator.GenerateFast(masm);
- __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
- generator.SkipSlow(masm, &runtime);
-}
-
-void ToStringStub::Generate(MacroAssembler* masm) {
- // The ToString stub takes one argument in rax.
- Label is_number;
- __ JumpIfSmi(rax, &is_number, Label::kNear);
-
- Label not_string;
- __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdi);
- // rax: receiver
- // rdi: receiver map
- __ j(above_equal, &not_string, Label::kNear);
- __ Ret();
- __ bind(&not_string);
-
- Label not_heap_number;
- __ CompareRoot(rdi, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &not_heap_number, Label::kNear);
- __ bind(&is_number);
- NumberToStringStub stub(isolate());
- __ TailCallStub(&stub);
- __ bind(&not_heap_number);
-
- Label not_oddball;
- __ CmpInstanceType(rdi, ODDBALL_TYPE);
- __ j(not_equal, &not_oddball, Label::kNear);
- __ movp(rax, FieldOperand(rax, Oddball::kToStringOffset));
- __ Ret();
- __ bind(&not_oddball);
-
- __ PopReturnAddressTo(rcx); // Pop return address.
- __ Push(rax); // Push argument.
- __ PushReturnAddressFrom(rcx); // Push return address.
- __ TailCallRuntime(Runtime::kToString);
-}
-
-void ToNameStub::Generate(MacroAssembler* masm) {
- // The ToName stub takes one argument in rax.
- Label is_number;
- __ JumpIfSmi(rax, &is_number, Label::kNear);
-
- Label not_name;
- STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
- __ CmpObjectType(rax, LAST_NAME_TYPE, rdi);
- // rax: receiver
- // rdi: receiver map
- __ j(above, &not_name, Label::kNear);
- __ Ret();
- __ bind(&not_name);
-
- Label not_heap_number;
- __ CompareRoot(rdi, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &not_heap_number, Label::kNear);
- __ bind(&is_number);
- NumberToStringStub stub(isolate());
- __ TailCallStub(&stub);
- __ bind(&not_heap_number);
-
- Label not_oddball;
- __ CmpInstanceType(rdi, ODDBALL_TYPE);
- __ j(not_equal, &not_oddball, Label::kNear);
- __ movp(rax, FieldOperand(rax, Oddball::kToStringOffset));
- __ Ret();
- __ bind(&not_oddball);
-
- __ PopReturnAddressTo(rcx); // Pop return address.
- __ Push(rax); // Push argument.
- __ PushReturnAddressFrom(rcx); // Push return address.
- __ TailCallRuntime(Runtime::kToName);
-}
-
-
void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left,
Register right,
@@ -3172,17 +2874,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental;
Label need_incremental_pop_object;
- __ movp(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
- __ andp(regs_.scratch0(), regs_.object());
- __ movp(regs_.scratch1(),
- Operand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset));
- __ subp(regs_.scratch1(), Immediate(1));
- __ movp(Operand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset),
- regs_.scratch1());
- __ j(negative, &need_incremental);
-
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(),
@@ -3575,7 +3266,7 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
__ jmp(feedback);
__ bind(&transition_call);
- DCHECK(receiver_map.is(VectorStoreTransitionDescriptor::MapRegister()));
+ DCHECK(receiver_map.is(StoreTransitionDescriptor::MapRegister()));
__ movp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
// The weak cell may have been cleared.
__ JumpIfSmi(receiver_map, miss);
@@ -4308,7 +3999,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// Fall back to %AllocateInNewSpace (if not too big).
Label too_big_for_new_space;
__ bind(&allocate);
- __ cmpl(rcx, Immediate(Page::kMaxRegularHeapObjectSize));
+ __ cmpl(rcx, Immediate(kMaxRegularHeapObjectSize));
__ j(greater, &too_big_for_new_space);
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -4671,7 +4362,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// Fall back to %AllocateInNewSpace (if not too big).
Label too_big_for_new_space;
__ bind(&allocate);
- __ cmpl(rcx, Immediate(Page::kMaxRegularHeapObjectSize));
+ __ cmpl(rcx, Immediate(kMaxRegularHeapObjectSize));
__ j(greater, &too_big_for_new_space);
{
FrameScope scope(masm, StackFrame::INTERNAL);
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 83f34d07a0..6adb820ef1 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -11,6 +11,7 @@
#include "src/base/compiler-specific.h"
#include "src/base/lazy-instance.h"
#include "src/disasm.h"
+#include "src/x64/sse-instr.h"
namespace disasm {
@@ -875,6 +876,7 @@ int DisassemblerX64::SetCC(byte* data) {
return 3; // includes 0x0F
}
+const char* sf_str[4] = {"", "rl", "ra", "ll"};
int DisassemblerX64::AVXInstruction(byte* data) {
byte opcode = *data;
@@ -949,6 +951,18 @@ int DisassemblerX64::AVXInstruction(byte* data) {
current += PrintRightOperand(current);
AppendToBuffer(",%s", NameOfCPURegister(vvvv));
break;
+#define DECLARE_SSE_AVX_DIS_CASE(instruction, notUsed1, notUsed2, notUsed3, \
+ opcode) \
+ case 0x##opcode: { \
+ AppendToBuffer("v" #instruction " %s,%s,", NameOfXMMRegister(regop), \
+ NameOfXMMRegister(vvvv)); \
+ current += PrintRightXMMOperand(current); \
+ break; \
+ }
+
+ SSSE3_INSTRUCTION_LIST(DECLARE_SSE_AVX_DIS_CASE)
+ SSE4_INSTRUCTION_LIST(DECLARE_SSE_AVX_DIS_CASE)
+#undef DECLARE_SSE_AVX_DIS_CASE
default:
UnimplementedInstruction();
}
@@ -968,6 +982,33 @@ int DisassemblerX64::AVXInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
+ case 0x14:
+ AppendToBuffer("vpextrb ");
+ current += PrintRightByteOperand(current);
+ AppendToBuffer(",%s,0x%x,", NameOfXMMRegister(regop), *current++);
+ break;
+ case 0x15:
+ AppendToBuffer("vpextrw ");
+ current += PrintRightOperand(current);
+ AppendToBuffer(",%s,0x%x,", NameOfXMMRegister(regop), *current++);
+ break;
+ case 0x16:
+ AppendToBuffer("vpextrd ");
+ current += PrintRightOperand(current);
+ AppendToBuffer(",%s,0x%x,", NameOfXMMRegister(regop), *current++);
+ break;
+ case 0x20:
+ AppendToBuffer("vpinsrb %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightByteOperand(current);
+ AppendToBuffer(",0x%x", *current++);
+ break;
+ case 0x22:
+ AppendToBuffer("vpinsrd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightOperand(current);
+ AppendToBuffer(",0x%x", *current++);
+ break;
default:
UnimplementedInstruction();
}
@@ -1112,6 +1153,10 @@ int DisassemblerX64::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0xf0:
+ AppendToBuffer("vlddqu %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
default:
UnimplementedInstruction();
}
@@ -1326,16 +1371,28 @@ int DisassemblerX64::AVXInstruction(byte* data) {
NameOfXMMRegister(regop));
current += PrintRightOperand(current);
break;
- case 0x73:
- AppendToBuffer("%s %s,", regop == 6 ? "vpsllq" : "vpsrlq",
+ case 0x70:
+ AppendToBuffer("vpshufd %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",0x%x", *current++);
+ break;
+ case 0x71:
+ AppendToBuffer("vps%sw %s,", sf_str[regop / 2],
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%u", *current++);
+ break;
+ case 0x72:
+ AppendToBuffer("vps%sd %s,", sf_str[regop / 2],
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
AppendToBuffer(",%u", *current++);
break;
- case 0x76:
- AppendToBuffer("vpcmpeqd %s,%s,", NameOfXMMRegister(regop),
+ case 0x73:
+ AppendToBuffer("vps%sq %s,", sf_str[regop / 2],
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%u", *current++);
break;
case 0x7e:
AppendToBuffer("vmov%c ", vex_w() ? 'q' : 'd');
@@ -1352,6 +1409,27 @@ int DisassemblerX64::AVXInstruction(byte* data) {
current += 1;
break;
}
+ case 0xc4:
+ AppendToBuffer("vpinsrw %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightOperand(current);
+ AppendToBuffer(",0x%x", *current++);
+ break;
+ case 0xc5:
+ AppendToBuffer("vpextrw %s,", NameOfCPURegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",0x%x", *current++);
+ break;
+#define DECLARE_SSE_AVX_DIS_CASE(instruction, notUsed1, notUsed2, opcode) \
+ case 0x##opcode: { \
+ AppendToBuffer("v" #instruction " %s,%s,", NameOfXMMRegister(regop), \
+ NameOfXMMRegister(vvvv)); \
+ current += PrintRightXMMOperand(current); \
+ break; \
+ }
+
+ SSE2_INSTRUCTION_LIST(DECLARE_SSE_AVX_DIS_CASE)
+#undef DECLARE_SSE_AVX_DIS_CASE
default:
UnimplementedInstruction();
}
@@ -1363,7 +1441,6 @@ int DisassemblerX64::AVXInstruction(byte* data) {
return static_cast<int>(current - data);
}
-
// Returns number of bytes used, including *data.
int DisassemblerX64::FPUInstruction(byte* data) {
byte escape_opcode = *data;
@@ -1558,11 +1635,20 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
if (opcode == 0x38) {
byte third_byte = *current;
current = data + 3;
- if (third_byte == 0x40) {
- // pmulld xmm, xmm/m128
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("pmulld %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ get_modrm(*current, &mod, &regop, &rm);
+ switch (third_byte) {
+#define SSE34_DIS_CASE(instruction, notUsed1, notUsed2, notUsed3, opcode) \
+ case 0x##opcode: { \
+ AppendToBuffer(#instruction " %s,", NameOfXMMRegister(regop)); \
+ current += PrintRightXMMOperand(current); \
+ break; \
+ }
+
+ SSSE3_INSTRUCTION_LIST(SSE34_DIS_CASE)
+ SSE4_INSTRUCTION_LIST(SSE34_DIS_CASE)
+#undef SSE34_DIS_CASE
+ default:
+ UnimplementedInstruction();
}
} else if (opcode == 0x3A) {
byte third_byte = *current;
@@ -1586,12 +1672,31 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(",0x%x", (*current) & 3);
current += 1;
+ } else if (third_byte == 0x14) {
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("pextrb "); // reg/m32, xmm, imm8
+ current += PrintRightOperand(current);
+ AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
+ current += 1;
+ } else if (third_byte == 0x15) {
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("pextrw "); // reg/m32, xmm, imm8
+ current += PrintRightOperand(current);
+ AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
+ current += 1;
} else if (third_byte == 0x16) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("pextrd "); // reg/m32, xmm, imm8
current += PrintRightOperand(current);
AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
current += 1;
+ } else if (third_byte == 0x20) {
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("pinsrd "); // xmm, reg/m32, imm8
+ AppendToBuffer(" %s,", NameOfXMMRegister(regop));
+ current += PrintRightOperand(current);
+ AppendToBuffer(",%d", (*current) & 3);
+ current += 1;
} else if (third_byte == 0x21) {
get_modrm(*current, &mod, &regop, &rm);
// insertps xmm, xmm/m32, imm8
@@ -1666,15 +1771,20 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(",0x%x", *current);
current += 1;
+ } else if (opcode == 0x71) {
+ current += 1;
+ AppendToBuffer("ps%sw %s,%d", sf_str[regop / 2], NameOfXMMRegister(rm),
+ *current & 0x7f);
+ current += 1;
} else if (opcode == 0x72) {
current += 1;
- AppendToBuffer("%s %s,%d", (regop == 6) ? "pslld" : "psrld",
- NameOfXMMRegister(rm), *current & 0x7f);
+ AppendToBuffer("ps%sd %s,%d", sf_str[regop / 2], NameOfXMMRegister(rm),
+ *current & 0x7f);
current += 1;
} else if (opcode == 0x73) {
current += 1;
- AppendToBuffer("%s %s,%d", (regop == 6) ? "psllq" : "psrlq",
- NameOfXMMRegister(rm), *current & 0x7f);
+ AppendToBuffer("ps%sq %s,%d", sf_str[regop / 2], NameOfXMMRegister(rm),
+ *current & 0x7f);
current += 1;
} else if (opcode == 0xB1) {
current += PrintOperands("cmpxchg", OPER_REG_OP_ORDER, current);
@@ -1692,16 +1802,86 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
mnemonic = "ucomisd";
} else if (opcode == 0x2F) {
mnemonic = "comisd";
+ } else if (opcode == 0x64) {
+ mnemonic = "pcmpgtb";
+ } else if (opcode == 0x65) {
+ mnemonic = "pcmpgtw";
+ } else if (opcode == 0x66) {
+ mnemonic = "pcmpgtd";
+ } else if (opcode == 0x74) {
+ mnemonic = "pcmpeqb";
+ } else if (opcode == 0x75) {
+ mnemonic = "pcmpeqw";
} else if (opcode == 0x76) {
mnemonic = "pcmpeqd";
} else if (opcode == 0x62) {
mnemonic = "punpckldq";
+ } else if (opcode == 0x63) {
+ mnemonic = "packsswb";
+ } else if (opcode == 0x67) {
+ mnemonic = "packuswb";
} else if (opcode == 0x6A) {
mnemonic = "punpckhdq";
+ } else if (opcode == 0x6B) {
+ mnemonic = "packssdw";
+ } else if (opcode == 0xC4) {
+ mnemonic = "pinsrw";
+ } else if (opcode == 0xC5) {
+ mnemonic = "pextrw";
+ } else if (opcode == 0xD1) {
+ mnemonic = "psrlw";
+ } else if (opcode == 0xD2) {
+ mnemonic = "psrld";
+ } else if (opcode == 0xD5) {
+ mnemonic = "pmullw";
+ } else if (opcode == 0xD7) {
+ mnemonic = "pmovmskb";
+ } else if (opcode == 0xD8) {
+ mnemonic = "psubusb";
+ } else if (opcode == 0xD9) {
+ mnemonic = "psubusw";
+ } else if (opcode == 0xDA) {
+ mnemonic = "pminub";
+ } else if (opcode == 0xDC) {
+ mnemonic = "paddusb";
+ } else if (opcode == 0xDD) {
+ mnemonic = "paddusw";
+ } else if (opcode == 0xDE) {
+ mnemonic = "pmaxub";
+ } else if (opcode == 0xE1) {
+ mnemonic = "psraw";
+ } else if (opcode == 0xE2) {
+ mnemonic = "psrad";
+ } else if (opcode == 0xE8) {
+ mnemonic = "psubsb";
+ } else if (opcode == 0xE9) {
+ mnemonic = "psubsw";
+ } else if (opcode == 0xEA) {
+ mnemonic = "pminsw";
+ } else if (opcode == 0xEC) {
+ mnemonic = "paddsb";
+ } else if (opcode == 0xED) {
+ mnemonic = "paddsw";
+ } else if (opcode == 0xEE) {
+ mnemonic = "pmaxsw";
+ } else if (opcode == 0xEF) {
+ mnemonic = "pxor";
+ } else if (opcode == 0xF1) {
+ mnemonic = "psllw";
+ } else if (opcode == 0xF2) {
+ mnemonic = "pslld";
} else if (opcode == 0xF4) {
mnemonic = "pmuludq";
+ } else if (opcode == 0xF8) {
+ mnemonic = "psubb";
+ } else if (opcode == 0xF9) {
+ mnemonic = "psubw";
} else if (opcode == 0xFA) {
mnemonic = "psubd";
+ } else if (opcode == 0xFC) {
+ mnemonic = "paddb";
+ } else if (opcode == 0xFD) {
+ mnemonic = "paddw";
} else if (opcode == 0xFE) {
mnemonic = "paddd";
} else if (opcode == 0xC2) {
@@ -1780,6 +1960,11 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
current += 2;
+ } else if (opcode == 0xF0) {
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("lddqu %s,", NameOfXMMRegister(regop));
+ current += PrintRightOperand(current);
} else {
UnimplementedInstruction();
}
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index 7d39b42ac3..9e486446bc 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -40,13 +40,9 @@ const Register StoreDescriptor::SlotRegister() { return rdi; }
const Register StoreWithVectorDescriptor::VectorRegister() { return rbx; }
-const Register VectorStoreTransitionDescriptor::SlotRegister() { return rdi; }
-const Register VectorStoreTransitionDescriptor::VectorRegister() { return rbx; }
-const Register VectorStoreTransitionDescriptor::MapRegister() { return r11; }
-
-
-const Register StoreTransitionDescriptor::MapRegister() { return rbx; }
-
+const Register StoreTransitionDescriptor::SlotRegister() { return rdi; }
+const Register StoreTransitionDescriptor::VectorRegister() { return rbx; }
+const Register StoreTransitionDescriptor::MapRegister() { return r11; }
const Register StoreGlobalViaContextDescriptor::SlotRegister() { return rbx; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return rax; }
@@ -356,7 +352,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ApiCallbackDescriptorBase::InitializePlatformSpecific(
+void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
rdi, // callee
@@ -391,7 +387,19 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
rax, // argument count (not including receiver)
rdx, // new target
rdi, // constructor
- rbx, // address of first argument
+ rbx, // allocation site feedback if available, undefined otherwise
+ rcx, // address of first argument
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterPushArgsAndConstructArrayDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ rax, // argument count (not including receiver)
+ rdx, // target to the call. It is checked to be Array function.
+ rbx, // allocation site feedback
+ rcx, // address of first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 6dacc011df..0fd6333996 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -253,9 +253,8 @@ void MacroAssembler::InNewSpace(Register object,
Condition cc,
Label* branch,
Label::Distance distance) {
- const int mask =
- (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
- CheckPageFlag(object, scratch, mask, cc, branch, distance);
+ CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cc, branch,
+ distance);
}
@@ -3325,12 +3324,12 @@ void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
Movd(dst, src);
return;
}
- DCHECK_EQ(1, imm8);
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
pextrd(dst, src, imm8);
return;
}
+ DCHECK_EQ(1, imm8);
movq(dst, src);
shrq(dst, Immediate(32));
}
@@ -4974,7 +4973,7 @@ void MacroAssembler::Allocate(int object_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(object_size <= kMaxRegularHeapObjectSize);
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index d5e411f36f..a8d0c60aa1 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -891,6 +891,18 @@ class MacroAssembler: public Assembler {
// miss label if the weak cell was cleared.
void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
+ // Emit code that loads |parameter_index|'th parameter from the stack to
+ // the register according to the CallInterfaceDescriptor definition.
+ // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
+ // below the caller's sp (on x64 it's at least return address).
+ template <class Descriptor>
+ void LoadParameterFromStack(
+ Register reg, typename Descriptor::ParameterIndices parameter_index,
+ int sp_to_ra_offset_in_words = 1) {
+ DCHECK(Descriptor::kPassLastArgsOnStack);
+ UNIMPLEMENTED();
+ }
+
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the rsp register.
void Drop(int stack_elements);
diff --git a/deps/v8/src/x64/sse-instr.h b/deps/v8/src/x64/sse-instr.h
new file mode 100644
index 0000000000..00957278a7
--- /dev/null
+++ b/deps/v8/src/x64/sse-instr.h
@@ -0,0 +1,69 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SSE_INSTR_H_
+#define V8_SSE_INSTR_H_
+
+#define SSE2_INSTRUCTION_LIST(V) \
+ V(packsswb, 66, 0F, 63) \
+ V(packssdw, 66, 0F, 6B) \
+ V(packuswb, 66, 0F, 67) \
+ V(paddb, 66, 0F, FC) \
+ V(paddw, 66, 0F, FD) \
+ V(paddd, 66, 0F, FE) \
+ V(paddsb, 66, 0F, EC) \
+ V(paddsw, 66, 0F, ED) \
+ V(paddusb, 66, 0F, DC) \
+ V(paddusw, 66, 0F, DD) \
+ V(pcmpeqb, 66, 0F, 74) \
+ V(pcmpeqw, 66, 0F, 75) \
+ V(pcmpeqd, 66, 0F, 76) \
+ V(pcmpgtb, 66, 0F, 64) \
+ V(pcmpgtw, 66, 0F, 65) \
+ V(pcmpgtd, 66, 0F, 66) \
+ V(pmaxsw, 66, 0F, EE) \
+ V(pmaxub, 66, 0F, DE) \
+ V(pminsw, 66, 0F, EA) \
+ V(pminub, 66, 0F, DA) \
+ V(pmullw, 66, 0F, D5) \
+ V(pmuludq, 66, 0F, F4) \
+ V(psllw, 66, 0F, F1) \
+ V(pslld, 66, 0F, F2) \
+ V(psraw, 66, 0F, E1) \
+ V(psrad, 66, 0F, E2) \
+ V(psrlw, 66, 0F, D1) \
+ V(psrld, 66, 0F, D2) \
+ V(psubb, 66, 0F, F8) \
+ V(psubw, 66, 0F, F9) \
+ V(psubd, 66, 0F, FA) \
+ V(psubsb, 66, 0F, E8) \
+ V(psubsw, 66, 0F, E9) \
+ V(psubusb, 66, 0F, D8) \
+ V(psubusw, 66, 0F, D9) \
+ V(pxor, 66, 0F, EF) \
+ V(cvtps2dq, 66, 0F, 5B)
+
+#define SSSE3_INSTRUCTION_LIST(V) \
+ V(pabsb, 66, 0F, 38, 1C) \
+ V(pabsw, 66, 0F, 38, 1D) \
+ V(pabsd, 66, 0F, 38, 1E) \
+ V(pshufb, 66, 0F, 38, 00) \
+ V(psignb, 66, 0F, 38, 08) \
+ V(psignw, 66, 0F, 38, 09) \
+ V(psignd, 66, 0F, 38, 0A)
+
+#define SSE4_INSTRUCTION_LIST(V) \
+ V(packusdw, 66, 0F, 38, 2B) \
+ V(pminsb, 66, 0F, 38, 38) \
+ V(pminsd, 66, 0F, 38, 39) \
+ V(pminuw, 66, 0F, 38, 3A) \
+ V(pminud, 66, 0F, 38, 3B) \
+ V(pmaxsb, 66, 0F, 38, 3C) \
+ V(pmaxsd, 66, 0F, 38, 3D) \
+ V(pmaxuw, 66, 0F, 38, 3E) \
+ V(pmaxud, 66, 0F, 38, 3F) \
+ V(pmulld, 66, 0F, 38, 40) \
+ V(ptest, 66, 0F, 38, 17)
+
+#endif // V8_SSE_INSTR_H_
diff --git a/deps/v8/src/x87/code-stubs-x87.cc b/deps/v8/src/x87/code-stubs-x87.cc
index 02de67afbc..e70cbad7ee 100644
--- a/deps/v8/src/x87/code-stubs-x87.cc
+++ b/deps/v8/src/x87/code-stubs-x87.cc
@@ -1130,7 +1130,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// edi : the function to call
Isolate* isolate = masm->isolate();
Label initialize, done, miss, megamorphic, not_array_function;
- Label done_increment_count, done_initialize_count;
// Load the cache state into ecx.
__ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
@@ -1143,7 +1142,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// type-feedback-vector.h).
Label check_allocation_site;
__ cmp(edi, FieldOperand(ecx, WeakCell::kValueOffset));
- __ j(equal, &done_increment_count, Label::kFar);
+ __ j(equal, &done, Label::kFar);
__ CompareRoot(ecx, Heap::kmegamorphic_symbolRootIndex);
__ j(equal, &done, Label::kFar);
__ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset),
@@ -1166,7 +1165,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
__ cmp(edi, ecx);
__ j(not_equal, &megamorphic);
- __ jmp(&done_increment_count, Label::kFar);
+ __ jmp(&done, Label::kFar);
__ bind(&miss);
@@ -1195,26 +1194,17 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// slot.
CreateAllocationSiteStub create_stub(isolate);
CallStubInRecordCallTarget(masm, &create_stub);
- __ jmp(&done_initialize_count);
+ __ jmp(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(isolate);
CallStubInRecordCallTarget(masm, &weak_cell_stub);
- __ bind(&done_initialize_count);
-
- // Initialize the call counter.
- __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize),
- Immediate(Smi::FromInt(1)));
- __ jmp(&done);
- __ bind(&done_increment_count);
- // Increment the call count for monomorphic function calls.
+ __ bind(&done);
+ // Increment the call count for all function calls.
__ add(FieldOperand(ebx, edx, times_half_pointer_size,
FixedArray::kHeaderSize + kPointerSize),
Immediate(Smi::FromInt(1)));
-
- __ bind(&done);
}
@@ -1260,6 +1250,12 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
+static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
+ Register slot) {
+ __ add(FieldOperand(feedback_vector, slot, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize),
+ Immediate(Smi::FromInt(1)));
+}
void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// edi - function
@@ -1275,9 +1271,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
FixedArray::kHeaderSize));
// Increment the call count for monomorphic function calls.
- __ add(FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize),
- Immediate(Smi::FromInt(1)));
+ IncrementCallCount(masm, ebx, edx);
__ mov(ebx, ecx);
__ mov(edx, edi);
@@ -1293,7 +1287,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// edx - slot id
// ebx - vector
Isolate* isolate = masm->isolate();
- Label extra_checks_or_miss, call, call_function;
+ Label extra_checks_or_miss, call, call_function, call_count_incremented;
int argc = arg_count();
ParameterCount actual(argc);
@@ -1322,12 +1316,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
// convincing us that we have a monomorphic JSFunction.
__ JumpIfSmi(edi, &extra_checks_or_miss);
+ __ bind(&call_function);
+
// Increment the call count for monomorphic function calls.
- __ add(FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize),
- Immediate(Smi::FromInt(1)));
+ IncrementCallCount(masm, ebx, edx);
- __ bind(&call_function);
__ Set(eax, argc);
__ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
tail_call_mode()),
@@ -1368,6 +1361,12 @@ void CallICStub::Generate(MacroAssembler* masm) {
Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
__ bind(&call);
+
+ // Increment the call count for megamorphic function calls.
+ IncrementCallCount(masm, ebx, edx);
+
+ __ bind(&call_count_incremented);
+
__ Set(eax, argc);
__ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET);
@@ -1393,11 +1392,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ cmp(ecx, NativeContextOperand());
__ j(not_equal, &miss);
- // Initialize the call counter.
- __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize),
- Immediate(Smi::FromInt(1)));
-
// Store the function. Use a stub since we need a frame for allocation.
// ebx - vector
// edx - slot
@@ -1405,11 +1399,15 @@ void CallICStub::Generate(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(isolate);
+ __ push(ebx);
+ __ push(edx);
__ push(edi);
__ push(esi);
__ CallStub(&create_stub);
__ pop(esi);
__ pop(edi);
+ __ pop(edx);
+ __ pop(ebx);
}
__ jmp(&call_function);
@@ -1419,7 +1417,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&miss);
GenerateMiss(masm);
- __ jmp(&call);
+ __ jmp(&call_count_incremented);
// Unreachable
__ int3();
@@ -1910,297 +1908,6 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
}
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[4]: to
- // esp[8]: from
- // esp[12]: string
-
- // Make sure first argument is a string.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(eax, &runtime);
- Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
- __ j(NegateCondition(is_string), &runtime);
-
- // eax: string
- // ebx: instance type
-
- // Calculate length of sub string using the smi values.
- __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
- __ JumpIfNotSmi(ecx, &runtime);
- __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
- __ JumpIfNotSmi(edx, &runtime);
- __ sub(ecx, edx);
- __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
- Label not_original_string;
- // Shorter than original string's length: an actual substring.
- __ j(below, &not_original_string, Label::kNear);
- // Longer than original string's length or negative: unsafe arguments.
- __ j(above, &runtime);
- // Return original string.
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(3 * kPointerSize);
- __ bind(&not_original_string);
-
- Label single_char;
- __ cmp(ecx, Immediate(Smi::FromInt(1)));
- __ j(equal, &single_char);
-
- // eax: string
- // ebx: instance type
- // ecx: sub string length (smi)
- // edx: from index (smi)
- // Deal with different string types: update the index if necessary
- // and put the underlying string into edi.
- Label underlying_unpacked, sliced_string, seq_or_external_string;
- // If the string is not indirect, it can only be sequential or external.
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
- STATIC_ASSERT(kIsIndirectStringMask != 0);
- __ test(ebx, Immediate(kIsIndirectStringMask));
- __ j(zero, &seq_or_external_string, Label::kNear);
-
- Factory* factory = isolate()->factory();
- __ test(ebx, Immediate(kSlicedNotConsMask));
- __ j(not_zero, &sliced_string, Label::kNear);
- // Cons string. Check whether it is flat, then fetch first part.
- // Flat cons strings have an empty second part.
- __ cmp(FieldOperand(eax, ConsString::kSecondOffset),
- factory->empty_string());
- __ j(not_equal, &runtime);
- __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
- // Update instance type.
- __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked, Label::kNear);
-
- __ bind(&sliced_string);
- // Sliced string. Fetch parent and adjust start index by offset.
- __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
- __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
- // Update instance type.
- __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked, Label::kNear);
-
- __ bind(&seq_or_external_string);
- // Sequential or external string. Just move string to the expected register.
- __ mov(edi, eax);
-
- __ bind(&underlying_unpacked);
-
- if (FLAG_string_slices) {
- Label copy_routine;
- // edi: underlying subject string
- // ebx: instance type of underlying subject string
- // edx: adjusted start index (smi)
- // ecx: length (smi)
- __ cmp(ecx, Immediate(Smi::FromInt(SlicedString::kMinLength)));
- // Short slice. Copy instead of slicing.
- __ j(less, &copy_routine);
- // Allocate new sliced string. At this point we do not reload the instance
- // type including the string encoding because we simply rely on the info
- // provided by the original string. It does not matter if the original
- // string's encoding is wrong because we always have to recheck encoding of
- // the newly created string's parent anyways due to externalized strings.
- Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ test(ebx, Immediate(kStringEncodingMask));
- __ j(zero, &two_byte_slice, Label::kNear);
- __ AllocateOneByteSlicedString(eax, ebx, no_reg, &runtime);
- __ jmp(&set_slice_header, Label::kNear);
- __ bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
- __ bind(&set_slice_header);
- __ mov(FieldOperand(eax, SlicedString::kLengthOffset), ecx);
- __ mov(FieldOperand(eax, SlicedString::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
- __ mov(FieldOperand(eax, SlicedString::kParentOffset), edi);
- __ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx);
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(3 * kPointerSize);
-
- __ bind(&copy_routine);
- }
-
- // edi: underlying subject string
- // ebx: instance type of underlying subject string
- // edx: adjusted start index (smi)
- // ecx: length (smi)
- // The subject string can only be external or sequential string of either
- // encoding at this point.
- Label two_byte_sequential, runtime_drop_two, sequential_string;
- STATIC_ASSERT(kExternalStringTag != 0);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test_b(ebx, Immediate(kExternalStringTag));
- __ j(zero, &sequential_string);
-
- // Handle external string.
- // Rule out short external strings.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ test_b(ebx, Immediate(kShortExternalStringMask));
- __ j(not_zero, &runtime);
- __ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
- // Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- __ bind(&sequential_string);
- // Stash away (adjusted) index and (underlying) string.
- __ push(edx);
- __ push(edi);
- __ SmiUntag(ecx);
- STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
- __ test_b(ebx, Immediate(kStringEncodingMask));
- __ j(zero, &two_byte_sequential);
-
- // Sequential one byte string. Allocate the result.
- __ AllocateOneByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
-
- // eax: result string
- // ecx: result string length
- // Locate first character of result.
- __ mov(edi, eax);
- __ add(edi, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- // Load string argument and locate character of sub string start.
- __ pop(edx);
- __ pop(ebx);
- __ SmiUntag(ebx);
- __ lea(edx, FieldOperand(edx, ebx, times_1, SeqOneByteString::kHeaderSize));
-
- // eax: result string
- // ecx: result length
- // edi: first character of result
- // edx: character of sub string start
- StringHelper::GenerateCopyCharacters(
- masm, edi, edx, ecx, ebx, String::ONE_BYTE_ENCODING);
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(3 * kPointerSize);
-
- __ bind(&two_byte_sequential);
- // Sequential two-byte string. Allocate the result.
- __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
-
- // eax: result string
- // ecx: result string length
- // Locate first character of result.
- __ mov(edi, eax);
- __ add(edi,
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Load string argument and locate character of sub string start.
- __ pop(edx);
- __ pop(ebx);
- // As from is a smi it is 2 times the value which matches the size of a two
- // byte character.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ lea(edx, FieldOperand(edx, ebx, times_1, SeqTwoByteString::kHeaderSize));
-
- // eax: result string
- // ecx: result length
- // edi: first character of result
- // edx: character of sub string start
- StringHelper::GenerateCopyCharacters(
- masm, edi, edx, ecx, ebx, String::TWO_BYTE_ENCODING);
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(3 * kPointerSize);
-
- // Drop pushed values on the stack before tail call.
- __ bind(&runtime_drop_two);
- __ Drop(2);
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString);
-
- __ bind(&single_char);
- // eax: string
- // ebx: instance type
- // ecx: sub string length (smi)
- // edx: from index (smi)
- StringCharAtGenerator generator(eax, edx, ecx, eax, &runtime, &runtime,
- &runtime, RECEIVER_IS_STRING);
- generator.GenerateFast(masm);
- __ ret(3 * kPointerSize);
- generator.SkipSlow(masm, &runtime);
-}
-
-void ToStringStub::Generate(MacroAssembler* masm) {
- // The ToString stub takes one argument in eax.
- Label is_number;
- __ JumpIfSmi(eax, &is_number, Label::kNear);
-
- Label not_string;
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edi);
- // eax: receiver
- // edi: receiver map
- __ j(above_equal, &not_string, Label::kNear);
- __ Ret();
- __ bind(&not_string);
-
- Label not_heap_number;
- __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &not_heap_number, Label::kNear);
- __ bind(&is_number);
- NumberToStringStub stub(isolate());
- __ TailCallStub(&stub);
- __ bind(&not_heap_number);
-
- Label not_oddball;
- __ CmpInstanceType(edi, ODDBALL_TYPE);
- __ j(not_equal, &not_oddball, Label::kNear);
- __ mov(eax, FieldOperand(eax, Oddball::kToStringOffset));
- __ Ret();
- __ bind(&not_oddball);
-
- __ pop(ecx); // Pop return address.
- __ push(eax); // Push argument.
- __ push(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kToString);
-}
-
-
-void ToNameStub::Generate(MacroAssembler* masm) {
- // The ToName stub takes one argument in eax.
- Label is_number;
- __ JumpIfSmi(eax, &is_number, Label::kNear);
-
- Label not_name;
- STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
- __ CmpObjectType(eax, LAST_NAME_TYPE, edi);
- // eax: receiver
- // edi: receiver map
- __ j(above, &not_name, Label::kNear);
- __ Ret();
- __ bind(&not_name);
-
- Label not_heap_number;
- __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &not_heap_number, Label::kNear);
- __ bind(&is_number);
- NumberToStringStub stub(isolate());
- __ TailCallStub(&stub);
- __ bind(&not_heap_number);
-
- Label not_oddball;
- __ CmpInstanceType(edi, ODDBALL_TYPE);
- __ j(not_equal, &not_oddball, Label::kNear);
- __ mov(eax, FieldOperand(eax, Oddball::kToStringOffset));
- __ Ret();
- __ bind(&not_oddball);
-
- __ pop(ecx); // Pop return address.
- __ push(eax); // Push argument.
- __ push(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kToName);
-}
-
-
void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left,
Register right,
@@ -3040,17 +2747,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Mode mode) {
Label object_is_black, need_incremental, need_incremental_pop_object;
- __ mov(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
- __ and_(regs_.scratch0(), regs_.object());
- __ mov(regs_.scratch1(),
- Operand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset));
- __ sub(regs_.scratch1(), Immediate(1));
- __ mov(Operand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset),
- regs_.scratch1());
- __ j(negative, &need_incremental);
-
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(),
@@ -3392,11 +3088,10 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
Label load_smi_map, compare_map;
Label start_polymorphic;
Label pop_and_miss;
- ExternalReference virtual_register =
- ExternalReference::virtual_handler_register(masm->isolate());
__ push(receiver);
- __ push(vector);
+ // Value, vector and slot are passed on the stack, so no need to save/restore
+ // them.
Register receiver_map = receiver;
Register cached_map = vector;
@@ -3417,12 +3112,9 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
Register handler = feedback;
DCHECK(handler.is(StoreWithVectorDescriptor::ValueRegister()));
__ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
- __ pop(vector);
__ pop(receiver);
__ lea(handler, FieldOperand(handler, Code::kHeaderSize));
- __ mov(Operand::StaticVariable(virtual_register), handler);
- __ pop(handler); // Pop "value".
- __ jmp(Operand::StaticVariable(virtual_register));
+ __ jmp(handler);
// Polymorphic, we have to loop from 2 to N
__ bind(&start_polymorphic);
@@ -3446,11 +3138,8 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
FixedArray::kHeaderSize + kPointerSize));
__ lea(handler, FieldOperand(handler, Code::kHeaderSize));
__ pop(key);
- __ pop(vector);
__ pop(receiver);
- __ mov(Operand::StaticVariable(virtual_register), handler);
- __ pop(handler); // Pop "value".
- __ jmp(Operand::StaticVariable(virtual_register));
+ __ jmp(handler);
__ bind(&prepare_next);
__ add(counter, Immediate(Smi::FromInt(2)));
@@ -3460,7 +3149,6 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
// We exhausted our array of map handler pairs.
__ bind(&pop_and_miss);
__ pop(key);
- __ pop(vector);
__ pop(receiver);
__ jmp(miss);
@@ -3476,8 +3164,6 @@ static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver,
Label* miss) {
// The store ic value is on the stack.
DCHECK(weak_cell.is(StoreWithVectorDescriptor::ValueRegister()));
- ExternalReference virtual_register =
- ExternalReference::virtual_handler_register(masm->isolate());
// feedback initially contains the feedback array
Label compare_smi_map;
@@ -3493,11 +3179,8 @@ static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver,
__ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
FixedArray::kHeaderSize + kPointerSize));
__ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
- // Put the store ic value back in it's register.
- __ mov(Operand::StaticVariable(virtual_register), weak_cell);
- __ pop(weak_cell); // Pop "value".
// jump to the handler.
- __ jmp(Operand::StaticVariable(virtual_register));
+ __ jmp(weak_cell);
// In microbenchmarks, it made sense to unroll this code so that the call to
// the handler is duplicated for a HeapObject receiver and a Smi receiver.
@@ -3507,10 +3190,8 @@ static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver,
__ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
FixedArray::kHeaderSize + kPointerSize));
__ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
- __ mov(Operand::StaticVariable(virtual_register), weak_cell);
- __ pop(weak_cell); // Pop "value".
// jump to the handler.
- __ jmp(Operand::StaticVariable(virtual_register));
+ __ jmp(weak_cell);
}
void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
@@ -3521,7 +3202,26 @@ void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register slot = StoreWithVectorDescriptor::SlotRegister(); // edi
Label miss;
- __ push(value);
+ if (StoreWithVectorDescriptor::kPassLastArgsOnStack) {
+ // Current stack layout:
+ // - esp[8] -- value
+ // - esp[4] -- slot
+ // - esp[0] -- return address
+ STATIC_ASSERT(StoreDescriptor::kStackArgumentsCount == 2);
+ STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
+ if (in_frame) {
+ __ RecordComment("[ StoreDescriptor -> StoreWithVectorDescriptor");
+ // If the vector is not on the stack, then insert the vector beneath
+ // return address in order to prepare for calling handler with
+ // StoreWithVector calling convention.
+ __ push(Operand(esp, 0));
+ __ mov(Operand(esp, 4), StoreWithVectorDescriptor::VectorRegister());
+ __ RecordComment("]");
+ } else {
+ __ mov(vector, Operand(esp, 1 * kPointerSize));
+ }
+ __ mov(slot, Operand(esp, 2 * kPointerSize));
+ }
Register scratch = value;
__ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
@@ -3545,19 +3245,9 @@ void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
__ j(not_equal, &miss);
- __ pop(value);
- __ push(slot);
- __ push(vector);
masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, slot,
no_reg);
- __ pop(vector);
- __ pop(slot);
- Label no_pop_miss;
- __ jmp(&no_pop_miss);
-
__ bind(&miss);
- __ pop(value);
- __ bind(&no_pop_miss);
StoreIC::GenerateMiss(masm);
}
@@ -3579,17 +3269,13 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
Label load_smi_map, compare_map;
Label transition_call;
Label pop_and_miss;
- ExternalReference virtual_register =
- ExternalReference::virtual_handler_register(masm->isolate());
- ExternalReference virtual_slot =
- ExternalReference::virtual_slot_register(masm->isolate());
__ push(receiver);
- __ push(vector);
+ // Value, vector and slot are passed on the stack, so no need to save/restore
+ // them.
Register receiver_map = receiver;
Register cached_map = vector;
- Register value = StoreDescriptor::ValueRegister();
// Receiver might not be a heap object.
__ JumpIfSmi(receiver, &load_smi_map);
@@ -3600,15 +3286,18 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
__ push(key);
// Current stack layout:
// - esp[0] -- key
- // - esp[4] -- vector
- // - esp[8] -- receiver
- // - esp[12] -- value
- // - esp[16] -- return address
+ // - esp[4] -- receiver
+ // - esp[8] -- return address
+ // - esp[12] -- vector
+ // - esp[16] -- slot
+ // - esp[20] -- value
//
- // Required stack layout for handler call:
+ // Required stack layout for handler call (see StoreWithVectorDescriptor):
// - esp[0] -- return address
- // - receiver, key, value, vector, slot in registers.
- // - handler in virtual register.
+ // - esp[4] -- vector
+ // - esp[8] -- slot
+ // - esp[12] -- value
+ // - receiver, key, handler in registers.
Register counter = key;
__ mov(counter, Immediate(Smi::FromInt(0)));
__ bind(&next_loop);
@@ -3623,43 +3312,57 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
__ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
FixedArray::kHeaderSize + 2 * kPointerSize));
__ pop(key);
- __ pop(vector);
__ pop(receiver);
__ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
- __ mov(Operand::StaticVariable(virtual_register), feedback);
- __ pop(value);
- __ jmp(Operand::StaticVariable(virtual_register));
+ __ jmp(feedback);
__ bind(&transition_call);
// Current stack layout:
// - esp[0] -- key
- // - esp[4] -- vector
- // - esp[8] -- receiver
- // - esp[12] -- value
- // - esp[16] -- return address
+ // - esp[4] -- receiver
+ // - esp[8] -- return address
+ // - esp[12] -- vector
+ // - esp[16] -- slot
+ // - esp[20] -- value
//
- // Required stack layout for handler call:
+ // Required stack layout for handler call (see StoreTransitionDescriptor):
// - esp[0] -- return address
- // - receiver, key, value, map, vector in registers.
- // - handler and slot in virtual registers.
- __ mov(Operand::StaticVariable(virtual_slot), slot);
+ // - esp[4] -- vector
+ // - esp[8] -- slot
+ // - esp[12] -- value
+ // - receiver, key, map, handler in registers.
__ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
FixedArray::kHeaderSize + 2 * kPointerSize));
__ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
- __ mov(Operand::StaticVariable(virtual_register), feedback);
__ mov(cached_map, FieldOperand(cached_map, WeakCell::kValueOffset));
// The weak cell may have been cleared.
__ JumpIfSmi(cached_map, &pop_and_miss);
- DCHECK(!cached_map.is(VectorStoreTransitionDescriptor::MapRegister()));
- __ mov(VectorStoreTransitionDescriptor::MapRegister(), cached_map);
+ DCHECK(!cached_map.is(StoreTransitionDescriptor::MapRegister()));
+ __ mov(StoreTransitionDescriptor::MapRegister(), cached_map);
- // Pop key into place.
+ // Call store transition handler using StoreTransitionDescriptor calling
+ // convention.
__ pop(key);
- __ pop(vector);
__ pop(receiver);
- __ pop(value);
- __ jmp(Operand::StaticVariable(virtual_register));
+ // Ensure that the transition handler we are going to call has the same
+ // number of stack arguments which means that we don't have to adapt them
+ // before the call.
+ STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
+ STATIC_ASSERT(StoreTransitionDescriptor::kStackArgumentsCount == 3);
+ STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
+ StoreWithVectorDescriptor::kValue ==
+ StoreTransitionDescriptor::kParameterCount -
+ StoreTransitionDescriptor::kValue);
+ STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
+ StoreWithVectorDescriptor::kSlot ==
+ StoreTransitionDescriptor::kParameterCount -
+ StoreTransitionDescriptor::kSlot);
+ STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
+ StoreWithVectorDescriptor::kVector ==
+ StoreTransitionDescriptor::kParameterCount -
+ StoreTransitionDescriptor::kVector);
+ __ jmp(feedback);
__ bind(&prepare_next);
__ add(counter, Immediate(Smi::FromInt(3)));
@@ -3669,7 +3372,6 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
// We exhausted our array of map handler pairs.
__ bind(&pop_and_miss);
__ pop(key);
- __ pop(vector);
__ pop(receiver);
__ jmp(miss);
@@ -3686,7 +3388,26 @@ void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register slot = StoreWithVectorDescriptor::SlotRegister(); // edi
Label miss;
- __ push(value);
+ if (StoreWithVectorDescriptor::kPassLastArgsOnStack) {
+ // Current stack layout:
+ // - esp[8] -- value
+ // - esp[4] -- slot
+ // - esp[0] -- return address
+ STATIC_ASSERT(StoreDescriptor::kStackArgumentsCount == 2);
+ STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
+ if (in_frame) {
+ __ RecordComment("[ StoreDescriptor -> StoreWithVectorDescriptor");
+ // If the vector is not on the stack, then insert the vector beneath
+ // return address in order to prepare for calling handler with
+ // StoreWithVector calling convention.
+ __ push(Operand(esp, 0));
+ __ mov(Operand(esp, 4), StoreWithVectorDescriptor::VectorRegister());
+ __ RecordComment("]");
+ } else {
+ __ mov(vector, Operand(esp, 1 * kPointerSize));
+ }
+ __ mov(slot, Operand(esp, 2 * kPointerSize));
+ }
Register scratch = value;
__ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
@@ -3711,8 +3432,6 @@ void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
__ j(not_equal, &try_poly_name);
- __ pop(value);
-
Handle<Code> megamorphic_stub =
KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
@@ -3729,7 +3448,6 @@ void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
&miss);
__ bind(&miss);
- __ pop(value);
KeyedStoreIC::GenerateMiss(masm);
}
@@ -4358,7 +4076,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// Fall back to %AllocateInNewSpace (if not too big).
Label too_big_for_new_space;
__ bind(&allocate);
- __ cmp(ecx, Immediate(Page::kMaxRegularHeapObjectSize));
+ __ cmp(ecx, Immediate(kMaxRegularHeapObjectSize));
__ j(greater, &too_big_for_new_space);
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -4745,7 +4463,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// Fall back to %AllocateInNewSpace (if not too big).
Label too_big_for_new_space;
__ bind(&allocate);
- __ cmp(ecx, Immediate(Page::kMaxRegularHeapObjectSize));
+ __ cmp(ecx, Immediate(kMaxRegularHeapObjectSize));
__ j(greater, &too_big_for_new_space);
{
FrameScope scope(masm, StackFrame::INTERNAL);
diff --git a/deps/v8/src/x87/interface-descriptors-x87.cc b/deps/v8/src/x87/interface-descriptors-x87.cc
index 4ef88e87dc..85b26ca1b0 100644
--- a/deps/v8/src/x87/interface-descriptors-x87.cc
+++ b/deps/v8/src/x87/interface-descriptors-x87.cc
@@ -39,19 +39,11 @@ const Register StoreDescriptor::SlotRegister() { return edi; }
const Register StoreWithVectorDescriptor::VectorRegister() { return ebx; }
-const Register VectorStoreTransitionDescriptor::SlotRegister() {
- return no_reg;
-}
-
-
-const Register VectorStoreTransitionDescriptor::VectorRegister() { return ebx; }
-
+const Register StoreTransitionDescriptor::SlotRegister() { return no_reg; }
-const Register VectorStoreTransitionDescriptor::MapRegister() { return edi; }
-
-
-const Register StoreTransitionDescriptor::MapRegister() { return ebx; }
+const Register StoreTransitionDescriptor::VectorRegister() { return ebx; }
+const Register StoreTransitionDescriptor::MapRegister() { return edi; }
const Register StoreGlobalViaContextDescriptor::SlotRegister() { return ebx; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return eax; }
@@ -363,7 +355,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ApiCallbackDescriptorBase::InitializePlatformSpecific(
+void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
edi, // callee
@@ -398,7 +390,19 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
eax, // argument count (not including receiver)
edx, // new target
edi, // constructor
- ebx, // address of first argument
+ ebx, // allocation site feedback
+ ecx, // address of first argument
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterPushArgsAndConstructArrayDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ eax, // argument count (not including receiver)
+ edx, // target to the call. It is checked to be Array function.
+ ebx, // allocation site feedback
+ ecx, // address of first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/x87/macro-assembler-x87.cc b/deps/v8/src/x87/macro-assembler-x87.cc
index 9ffbf9f34b..dafe985ff8 100644
--- a/deps/v8/src/x87/macro-assembler-x87.cc
+++ b/deps/v8/src/x87/macro-assembler-x87.cc
@@ -167,9 +167,8 @@ void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
void MacroAssembler::InNewSpace(Register object, Register scratch, Condition cc,
Label* condition_met,
Label::Distance distance) {
- const int mask =
- (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
- CheckPageFlag(object, scratch, mask, cc, condition_met, distance);
+ CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cc,
+ condition_met, distance);
}
@@ -1487,7 +1486,7 @@ void MacroAssembler::Allocate(int object_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(object_size <= kMaxRegularHeapObjectSize);
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
diff --git a/deps/v8/src/x87/macro-assembler-x87.h b/deps/v8/src/x87/macro-assembler-x87.h
index 13988aee67..bdd3c037e5 100644
--- a/deps/v8/src/x87/macro-assembler-x87.h
+++ b/deps/v8/src/x87/macro-assembler-x87.h
@@ -787,6 +787,24 @@ class MacroAssembler: public Assembler {
// may be bigger than 2^16 - 1. Requires a scratch register.
void Ret(int bytes_dropped, Register scratch);
+ // Emit code that loads |parameter_index|'th parameter from the stack to
+ // the register according to the CallInterfaceDescriptor definition.
+ // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
+ // below the caller's sp (on x87 it's at least return address).
+ template <class Descriptor>
+ void LoadParameterFromStack(
+ Register reg, typename Descriptor::ParameterIndices parameter_index,
+ int sp_to_ra_offset_in_words = 1) {
+ DCHECK(Descriptor::kPassLastArgsOnStack);
+ DCHECK_LT(parameter_index, Descriptor::kParameterCount);
+ DCHECK_LE(Descriptor::kParameterCount - Descriptor::kStackArgumentsCount,
+ parameter_index);
+ int offset = (Descriptor::kParameterCount - parameter_index - 1 +
+ sp_to_ra_offset_in_words) *
+ kPointerSize;
+ mov(reg, Operand(esp, offset));
+ }
+
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the esp register.
void Drop(int element_count);
diff --git a/deps/v8/src/zone/accounting-allocator.cc b/deps/v8/src/zone/accounting-allocator.cc
new file mode 100644
index 0000000000..663ea321a4
--- /dev/null
+++ b/deps/v8/src/zone/accounting-allocator.cc
@@ -0,0 +1,45 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/zone/accounting-allocator.h"
+
+#include <cstdlib>
+
+#if V8_LIBC_BIONIC
+#include <malloc.h> // NOLINT
+#endif
+
+namespace v8 {
+namespace internal {
+
+Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
+ void* memory = malloc(bytes);
+ if (memory) {
+ base::AtomicWord current =
+ base::NoBarrier_AtomicIncrement(&current_memory_usage_, bytes);
+ base::AtomicWord max = base::NoBarrier_Load(&max_memory_usage_);
+ while (current > max) {
+ max = base::NoBarrier_CompareAndSwap(&max_memory_usage_, max, current);
+ }
+ }
+ return reinterpret_cast<Segment*>(memory);
+}
+
+void AccountingAllocator::FreeSegment(Segment* memory) {
+ base::NoBarrier_AtomicIncrement(
+ &current_memory_usage_, -static_cast<base::AtomicWord>(memory->size()));
+ memory->ZapHeader();
+ free(memory);
+}
+
+size_t AccountingAllocator::GetCurrentMemoryUsage() const {
+ return base::NoBarrier_Load(&current_memory_usage_);
+}
+
+size_t AccountingAllocator::GetMaxMemoryUsage() const {
+ return base::NoBarrier_Load(&max_memory_usage_);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/zone/accounting-allocator.h b/deps/v8/src/zone/accounting-allocator.h
new file mode 100644
index 0000000000..31016a5018
--- /dev/null
+++ b/deps/v8/src/zone/accounting-allocator.h
@@ -0,0 +1,41 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ZONE_ACCOUNTING_ALLOCATOR_H_
+#define V8_ZONE_ACCOUNTING_ALLOCATOR_H_
+
+#include "include/v8-platform.h"
+#include "src/base/atomic-utils.h"
+#include "src/base/atomicops.h"
+#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/semaphore.h"
+#include "src/base/platform/time.h"
+#include "src/zone/zone-segment.h"
+
+namespace v8 {
+namespace internal {
+
+class V8_EXPORT_PRIVATE AccountingAllocator {
+ public:
+ AccountingAllocator() = default;
+ virtual ~AccountingAllocator() = default;
+
+ virtual Segment* AllocateSegment(size_t bytes);
+ virtual void FreeSegment(Segment* memory);
+
+ size_t GetCurrentMemoryUsage() const;
+ size_t GetMaxMemoryUsage() const;
+
+ private:
+ base::AtomicWord current_memory_usage_ = 0;
+ base::AtomicWord max_memory_usage_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(AccountingAllocator);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_ZONE_ACCOUNTING_ALLOCATOR_H_
diff --git a/deps/v8/src/zone-allocator.h b/deps/v8/src/zone/zone-allocator.h
index f46151ebc3..8370d73e49 100644
--- a/deps/v8/src/zone-allocator.h
+++ b/deps/v8/src/zone/zone-allocator.h
@@ -2,17 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ZONE_ALLOCATOR_H_
-#define V8_ZONE_ALLOCATOR_H_
-
+#ifndef V8_ZONE_ZONE_ALLOCATOR_H_
+#define V8_ZONE_ZONE_ALLOCATOR_H_
#include <limits>
-#include "src/zone.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
-template<typename T>
+template <typename T>
class zone_allocator {
public:
typedef T* pointer;
@@ -22,31 +21,34 @@ class zone_allocator {
typedef T value_type;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
- template<class O> struct rebind {
+ template <class O>
+ struct rebind {
typedef zone_allocator<O> other;
};
explicit zone_allocator(Zone* zone) throw() : zone_(zone) {}
explicit zone_allocator(const zone_allocator& other) throw()
: zone_(other.zone_) {}
- template<typename U> zone_allocator(const zone_allocator<U>& other) throw()
- : zone_(other.zone_) {}
- template<typename U> friend class zone_allocator;
+ template <typename U>
+ zone_allocator(const zone_allocator<U>& other) throw() : zone_(other.zone_) {}
+ template <typename U>
+ friend class zone_allocator;
- pointer address(reference x) const {return &x;}
- const_pointer address(const_reference x) const {return &x;}
+ pointer address(reference x) const { return &x; }
+ const_pointer address(const_reference x) const { return &x; }
pointer allocate(size_type n, const void* hint = 0) {
- return static_cast<pointer>(zone_->NewArray<value_type>(
- static_cast<int>(n)));
+ return static_cast<pointer>(
+ zone_->NewArray<value_type>(static_cast<int>(n)));
+ }
+ void deallocate(pointer p, size_type) { /* noop for Zones */
}
- void deallocate(pointer p, size_type) { /* noop for Zones */ }
size_type max_size() const throw() {
return std::numeric_limits<int>::max() / sizeof(value_type);
}
void construct(pointer p, const T& val) {
- new(static_cast<void*>(p)) T(val);
+ new (static_cast<void*>(p)) T(val);
}
void destroy(pointer p) { p->~T(); }
@@ -69,4 +71,4 @@ typedef zone_allocator<int> ZoneIntAllocator;
} // namespace internal
} // namespace v8
-#endif // V8_ZONE_ALLOCATOR_H_
+#endif // V8_ZONE_ZONE_ALLOCATOR_H_
diff --git a/deps/v8/src/zone-containers.h b/deps/v8/src/zone/zone-containers.h
index 79b168c37e..0aecd98e50 100644
--- a/deps/v8/src/zone-containers.h
+++ b/deps/v8/src/zone/zone-containers.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ZONE_CONTAINERS_H_
-#define V8_ZONE_CONTAINERS_H_
+#ifndef V8_SRC_ZONE_ZONE_CONTAINERS_H_
+#define V8_SRC_ZONE_ZONE_CONTAINERS_H_
#include <deque>
#include <list>
@@ -13,7 +13,7 @@
#include <stack>
#include <vector>
-#include "src/zone-allocator.h"
+#include "src/zone/zone-allocator.h"
namespace v8 {
namespace internal {
@@ -38,7 +38,6 @@ class ZoneVector : public std::vector<T, zone_allocator<T>> {
: std::vector<T, zone_allocator<T>>(size, def, zone_allocator<T>(zone)) {}
};
-
// A wrapper subclass std::deque to make it easy to construct one
// that uses a zone allocator.
template <typename T>
@@ -49,7 +48,6 @@ class ZoneDeque : public std::deque<T, zone_allocator<T>> {
: std::deque<T, zone_allocator<T>>(zone_allocator<T>(zone)) {}
};
-
// A wrapper subclass std::list to make it easy to construct one
// that uses a zone allocator.
// TODO(mstarzinger): This should be renamed to ZoneList once we got rid of our
@@ -62,7 +60,6 @@ class ZoneLinkedList : public std::list<T, zone_allocator<T>> {
: std::list<T, zone_allocator<T>>(zone_allocator<T>(zone)) {}
};
-
// A wrapper subclass std::priority_queue to make it easy to construct one
// that uses a zone allocator.
template <typename T, typename Compare = std::less<T>>
@@ -75,7 +72,6 @@ class ZonePriorityQueue
ZoneVector<T>(zone)) {}
};
-
// A wrapper subclass for std::queue to make it easy to construct one
// that uses a zone allocator.
template <typename T>
@@ -86,7 +82,6 @@ class ZoneQueue : public std::queue<T, ZoneDeque<T>> {
: std::queue<T, ZoneDeque<T>>(ZoneDeque<T>(zone)) {}
};
-
// A wrapper subclass for std::stack to make it easy to construct one that uses
// a zone allocator.
template <typename T>
@@ -97,7 +92,6 @@ class ZoneStack : public std::stack<T, ZoneDeque<T>> {
: std::stack<T, ZoneDeque<T>>(ZoneDeque<T>(zone)) {}
};
-
// A wrapper subclass for std::set to make it easy to construct one that uses
// a zone allocator.
template <typename K, typename Compare = std::less<K>>
@@ -109,7 +103,6 @@ class ZoneSet : public std::set<K, Compare, zone_allocator<K>> {
zone_allocator<K>(zone)) {}
};
-
// A wrapper subclass for std::map to make it easy to construct one that uses
// a zone allocator.
template <typename K, typename V, typename Compare = std::less<K>>
@@ -122,6 +115,18 @@ class ZoneMap
Compare(), zone_allocator<std::pair<const K, V>>(zone)) {}
};
+// A wrapper subclass for std::multimap to make it easy to construct one that
+// uses a zone allocator.
+template <typename K, typename V, typename Compare = std::less<K>>
+class ZoneMultimap
+ : public std::multimap<K, V, Compare,
+ zone_allocator<std::pair<const K, V>>> {
+ public:
+ // Constructs an empty multimap.
+ explicit ZoneMultimap(Zone* zone)
+ : std::multimap<K, V, Compare, zone_allocator<std::pair<const K, V>>>(
+ Compare(), zone_allocator<std::pair<const K, V>>(zone)) {}
+};
// Typedefs to shorten commonly used vectors.
typedef ZoneVector<bool> BoolVector;
@@ -130,4 +135,4 @@ typedef ZoneVector<int> IntVector;
} // namespace internal
} // namespace v8
-#endif // V8_ZONE_CONTAINERS_H_
+#endif // V8_SRC_ZONE_ZONE_CONTAINERS_H_
diff --git a/deps/v8/src/zone/zone-segment.cc b/deps/v8/src/zone/zone-segment.cc
new file mode 100644
index 0000000000..f63b530667
--- /dev/null
+++ b/deps/v8/src/zone/zone-segment.cc
@@ -0,0 +1,22 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/zone/zone-segment.h"
+
+namespace v8 {
+namespace internal {
+
+void Segment::ZapContents() {
+#ifdef DEBUG
+ memset(start(), kZapDeadByte, capacity());
+#endif
+}
+
+void Segment::ZapHeader() {
+#ifdef DEBUG
+ memset(this, kZapDeadByte, sizeof(Segment));
+#endif
+}
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/zone/zone-segment.h b/deps/v8/src/zone/zone-segment.h
new file mode 100644
index 0000000000..d37cf5648d
--- /dev/null
+++ b/deps/v8/src/zone/zone-segment.h
@@ -0,0 +1,61 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ZONE_ZONE_SEGMENT_H_
+#define V8_ZONE_ZONE_SEGMENT_H_
+
+#include "src/v8.h"
+
+// Segments represent chunks of memory: They have starting address
+// (encoded in the this pointer) and a size in bytes. Segments are
+// chained together forming a LIFO structure with the newest segment
+// available as segment_head_. Segments are allocated using malloc()
+// and de-allocated using free().
+namespace v8 {
+namespace internal {
+
+// Forward declaration
+class Zone;
+
+class Segment {
+ public:
+ void Initialize(Segment* next, size_t size, Zone* zone) {
+ next_ = next;
+ size_ = size;
+ zone_ = zone;
+ }
+
+ Zone* zone() const { return zone_; }
+ void set_zone(Zone* const zone) { zone_ = zone; }
+
+ Segment* next() const { return next_; }
+ void set_next(Segment* const next) { next_ = next; }
+
+ size_t size() const { return size_; }
+ size_t capacity() const { return size_ - sizeof(Segment); }
+
+ Address start() const { return address(sizeof(Segment)); }
+ Address end() const { return address(size_); }
+
+ // Zap the contents of the segment (but not the header).
+ void ZapContents();
+ // Zaps the header and makes the segment unusable this way.
+ void ZapHeader();
+
+ private:
+#ifdef DEBUG
+ // Constant byte value used for zapping dead memory in debug mode.
+ static const unsigned char kZapDeadByte = 0xcd;
+#endif
+ // Computes the address of the nth byte in this segment.
+ Address address(size_t n) const { return Address(this) + n; }
+
+ Zone* zone_;
+ Segment* next_;
+ size_t size_;
+};
+} // namespace internal
+} // namespace v8
+
+#endif // V8_ZONE_ZONE_SEGMENT_H_
diff --git a/deps/v8/src/zone.cc b/deps/v8/src/zone/zone.cc
index a10b63612e..4272e17fd2 100644
--- a/deps/v8/src/zone.cc
+++ b/deps/v8/src/zone/zone.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/zone.h"
+#include "src/zone/zone.h"
#include <cstring>
@@ -41,38 +41,7 @@ const size_t kASanRedzoneBytes = 0;
} // namespace
-
-// Segments represent chunks of memory: They have starting address
-// (encoded in the this pointer) and a size in bytes. Segments are
-// chained together forming a LIFO structure with the newest segment
-// available as segment_head_. Segments are allocated using malloc()
-// and de-allocated using free().
-
-class Segment {
- public:
- void Initialize(Segment* next, size_t size) {
- next_ = next;
- size_ = size;
- }
-
- Segment* next() const { return next_; }
- void clear_next() { next_ = nullptr; }
-
- size_t size() const { return size_; }
- size_t capacity() const { return size_ - sizeof(Segment); }
-
- Address start() const { return address(sizeof(Segment)); }
- Address end() const { return address(size_); }
-
- private:
- // Computes the address of the nth byte in this segment.
- Address address(size_t n) const { return Address(this) + n; }
-
- Segment* next_;
- size_t size_;
-};
-
-Zone::Zone(base::AccountingAllocator* allocator)
+Zone::Zone(AccountingAllocator* allocator)
: allocation_size_(0),
segment_bytes_allocated_(0),
position_(0),
@@ -87,7 +56,6 @@ Zone::~Zone() {
DCHECK(segment_bytes_allocated_ == 0);
}
-
void* Zone::New(size_t size) {
// Round up the requested size to fit the alignment.
size = RoundUp(size, kAlignment);
@@ -123,13 +91,7 @@ void* Zone::New(size_t size) {
return reinterpret_cast<void*>(result);
}
-
void Zone::DeleteAll() {
-#ifdef DEBUG
- // Constant byte value used for zapping dead memory in debug mode.
- static const unsigned char kZapDeadByte = 0xcd;
-#endif
-
// Find a segment with a suitable size to keep around.
Segment* keep = nullptr;
// Traverse the chained list of segments, zapping (in debug mode)
@@ -139,16 +101,16 @@ void Zone::DeleteAll() {
if (!keep && current->size() <= kMaximumKeptSegmentSize) {
// Unlink the segment we wish to keep from the list.
keep = current;
- keep->clear_next();
+ keep->set_next(nullptr);
} else {
size_t size = current->size();
#ifdef DEBUG
// Un-poison first so the zapping doesn't trigger ASan complaints.
ASAN_UNPOISON_MEMORY_REGION(current, size);
- // Zap the entire current segment (including the header).
- memset(current, kZapDeadByte, size);
#endif
- DeleteSegment(current, size);
+ current->ZapContents();
+ segment_bytes_allocated_ -= size;
+ allocator_->FreeSegment(current);
}
current = next;
}
@@ -163,10 +125,7 @@ void Zone::DeleteAll() {
limit_ = keep->end();
// Un-poison so we can re-use the segment later.
ASAN_UNPOISON_MEMORY_REGION(start, keep->capacity());
-#ifdef DEBUG
- // Zap the contents of the kept segment (but not the header).
- memset(start, kZapDeadByte, keep->capacity());
-#endif
+ keep->ZapContents();
} else {
position_ = limit_ = 0;
}
@@ -176,50 +135,35 @@ void Zone::DeleteAll() {
segment_head_ = keep;
}
-
void Zone::DeleteKeptSegment() {
-#ifdef DEBUG
- // Constant byte value used for zapping dead memory in debug mode.
- static const unsigned char kZapDeadByte = 0xcd;
-#endif
-
DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr);
if (segment_head_ != nullptr) {
size_t size = segment_head_->size();
#ifdef DEBUG
// Un-poison first so the zapping doesn't trigger ASan complaints.
ASAN_UNPOISON_MEMORY_REGION(segment_head_, size);
- // Zap the entire kept segment (including the header).
- memset(segment_head_, kZapDeadByte, size);
#endif
- DeleteSegment(segment_head_, size);
+ segment_head_->ZapContents();
+ segment_bytes_allocated_ -= size;
+ allocator_->FreeSegment(segment_head_);
segment_head_ = nullptr;
}
DCHECK(segment_bytes_allocated_ == 0);
}
-
// Creates a new segment, sets it size, and pushes it to the front
// of the segment chain. Returns the new segment.
Segment* Zone::NewSegment(size_t size) {
- Segment* result = reinterpret_cast<Segment*>(allocator_->Allocate(size));
+ Segment* result = allocator_->AllocateSegment(size);
segment_bytes_allocated_ += size;
if (result != nullptr) {
- result->Initialize(segment_head_, size);
+ result->Initialize(segment_head_, size, this);
segment_head_ = result;
}
return result;
}
-
-// Deletes the given segment. Does not touch the segment chain.
-void Zone::DeleteSegment(Segment* segment, size_t size) {
- segment_bytes_allocated_ -= size;
- allocator_->Free(segment, size);
-}
-
-
Address Zone::NewExpand(size_t size) {
// Make sure the requested size is already properly aligned and that
// there isn't enough room in the Zone to satisfy the request.
diff --git a/deps/v8/src/zone.h b/deps/v8/src/zone/zone.h
index 29055cb70d..9ff259e790 100644
--- a/deps/v8/src/zone.h
+++ b/deps/v8/src/zone/zone.h
@@ -2,25 +2,21 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ZONE_H_
-#define V8_ZONE_H_
+#ifndef V8_ZONE_ZONE_H_
+#define V8_ZONE_ZONE_H_
#include <limits>
-#include "src/base/accounting-allocator.h"
#include "src/base/hashmap.h"
#include "src/base/logging.h"
#include "src/globals.h"
#include "src/list.h"
#include "src/splay-tree.h"
+#include "src/zone/accounting-allocator.h"
namespace v8 {
namespace internal {
-// Forward declarations.
-class Segment;
-
-
// The Zone supports very fast allocation of small chunks of
// memory. The chunks cannot be deallocated individually, but instead
// the Zone supports deallocating all chunks in one fast
@@ -33,9 +29,9 @@ class Segment;
//
// Note: The implementation is inherently not thread safe. Do not use
// from multi-threaded code.
-class Zone final {
+class V8_EXPORT_PRIVATE Zone final {
public:
- explicit Zone(base::AccountingAllocator* allocator);
+ explicit Zone(AccountingAllocator* allocator);
~Zone();
// Allocate 'size' bytes of memory in the Zone; expands the Zone by
@@ -64,12 +60,12 @@ class Zone final {
size_t allocation_size() const { return allocation_size_; }
- base::AccountingAllocator* allocator() const { return allocator_; }
+ AccountingAllocator* allocator() const { return allocator_; }
private:
- // All pointers returned from New() have this alignment. In addition, if the
- // object being allocated has a size that is divisible by 8 then its alignment
- // will be 8. ASan requires 8-byte alignment.
+// All pointers returned from New() have this alignment. In addition, if the
+// object being allocated has a size that is divisible by 8 then its alignment
+// will be 8. ASan requires 8-byte alignment.
#ifdef V8_USE_ADDRESS_SANITIZER
static const size_t kAlignment = 8;
STATIC_ASSERT(kPointerSize <= 8);
@@ -107,21 +103,17 @@ class Zone final {
// of the segment chain. Returns the new segment.
inline Segment* NewSegment(size_t size);
- // Deletes the given segment. Does not touch the segment chain.
- inline void DeleteSegment(Segment* segment, size_t size);
-
// The free region in the current (front) segment is represented as
// the half-open interval [position, limit). The 'position' variable
// is guaranteed to be aligned as dictated by kAlignment.
Address position_;
Address limit_;
- base::AccountingAllocator* allocator_;
+ AccountingAllocator* allocator_;
Segment* segment_head_;
};
-
// ZoneObject is an abstraction that helps define classes of objects
// allocated in the Zone. Use it as a base class; see ast.h.
class ZoneObject {
@@ -141,12 +133,11 @@ class ZoneObject {
void operator delete(void* pointer, Zone* zone) { UNREACHABLE(); }
};
-
// The ZoneScope is used to automatically call DeleteAll() on a
// Zone when the ZoneScope is destroyed (i.e. goes out of scope)
class ZoneScope final {
public:
- explicit ZoneScope(Zone* zone) : zone_(zone) { }
+ explicit ZoneScope(Zone* zone) : zone_(zone) {}
~ZoneScope() { zone_->DeleteAll(); }
Zone* zone() const { return zone_; }
@@ -155,12 +146,11 @@ class ZoneScope final {
Zone* zone_;
};
-
// The ZoneAllocationPolicy is used to specialize generic data
// structures to allocate themselves and their elements in the Zone.
class ZoneAllocationPolicy final {
public:
- explicit ZoneAllocationPolicy(Zone* zone) : zone_(zone) { }
+ explicit ZoneAllocationPolicy(Zone* zone) : zone_(zone) {}
void* New(size_t size) { return zone()->New(size); }
static void Delete(void* pointer) {}
Zone* zone() const { return zone_; }
@@ -169,7 +159,6 @@ class ZoneAllocationPolicy final {
Zone* zone_;
};
-
// ZoneLists are growable lists with constant-time access to the
// elements. The list itself and all its elements are allocated in the
// Zone. ZoneLists cannot be deleted individually; you can delete all
@@ -180,7 +169,7 @@ class ZoneList final : public List<T, ZoneAllocationPolicy> {
// Construct a new ZoneList with the given capacity; the length is
// always zero. The capacity must be non-negative.
ZoneList(int capacity, Zone* zone)
- : List<T, ZoneAllocationPolicy>(capacity, ZoneAllocationPolicy(zone)) { }
+ : List<T, ZoneAllocationPolicy>(capacity, ZoneAllocationPolicy(zone)) {}
void* operator new(size_t size, Zone* zone) { return zone->New(size); }
@@ -222,7 +211,6 @@ class ZoneList final : public List<T, ZoneAllocationPolicy> {
void operator delete(void* pointer, Zone* zone) { UNREACHABLE(); }
};
-
// A zone splay tree. The config type parameter encapsulates the
// different configurations of a concrete splay tree (see splay-tree.h).
// The tree itself and all its elements are allocated in the Zone.
@@ -244,9 +232,12 @@ class ZoneSplayTree final : public SplayTree<Config, ZoneAllocationPolicy> {
void operator delete(void* pointer, Zone* zone) { UNREACHABLE(); }
};
-typedef base::TemplateHashMapImpl<ZoneAllocationPolicy> ZoneHashMap;
+typedef base::PointerTemplateHashMapImpl<ZoneAllocationPolicy> ZoneHashMap;
+
+typedef base::CustomMatcherTemplateHashMapImpl<ZoneAllocationPolicy>
+ CustomMatcherZoneHashMap;
} // namespace internal
} // namespace v8
-#endif // V8_ZONE_H_
+#endif // V8_ZONE_ZONE_H_
diff --git a/deps/v8/test/BUILD.gn b/deps/v8/test/BUILD.gn
index 36ca7a2049..e24615a443 100644
--- a/deps/v8/test/BUILD.gn
+++ b/deps/v8/test/BUILD.gn
@@ -3,6 +3,7 @@
# found in the LICENSE file.
import("../gni/isolate.gni")
+import("//build_overrides/v8.gni")
group("gn_all") {
testonly = true
@@ -20,6 +21,10 @@ group("gn_all") {
]
}
+ if (v8_enable_inspector_override) {
+ deps += [ "inspector:inspector-test" ]
+ }
+
if (v8_test_isolation_mode != "noop") {
deps += [
":benchmarks_run",
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index db94e0c7ff..d2918d90ae 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -2,42 +2,307 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-# The sources are kept automatically in sync with cctest.gyp.
-
import("../../gni/v8.gni")
-gypi_values = exec_script("//build/gypi_to_gn.py",
- [ rebase_path("cctest.gyp") ],
- "scope",
- [ "cctest.gyp" ])
-
v8_executable("cctest") {
testonly = true
- sources = [ "$target_gen_dir/resources.cc" ] + gypi_values.cctest_sources
+ sources = [
+ "$target_gen_dir/resources.cc",
+
+ ### gcmole(all) ###
+ "asmjs/test-asm-typer.cc",
+ "ast-types-fuzz.h",
+ "cctest.cc",
+ "cctest.h",
+ "compiler/c-signature.h",
+ "compiler/call-tester.h",
+ "compiler/code-assembler-tester.h",
+ "compiler/codegen-tester.cc",
+ "compiler/codegen-tester.h",
+ "compiler/function-tester.cc",
+ "compiler/function-tester.h",
+ "compiler/graph-builder-tester.h",
+ "compiler/test-basic-block-profiler.cc",
+ "compiler/test-branch-combine.cc",
+ "compiler/test-code-assembler.cc",
+ "compiler/test-gap-resolver.cc",
+ "compiler/test-graph-visualizer.cc",
+ "compiler/test-instruction.cc",
+ "compiler/test-js-constant-cache.cc",
+ "compiler/test-js-context-specialization.cc",
+ "compiler/test-js-typed-lowering.cc",
+ "compiler/test-jump-threading.cc",
+ "compiler/test-linkage.cc",
+ "compiler/test-loop-analysis.cc",
+ "compiler/test-loop-assignment-analysis.cc",
+ "compiler/test-machine-operator-reducer.cc",
+ "compiler/test-multiple-return.cc",
+ "compiler/test-node.cc",
+ "compiler/test-operator.cc",
+ "compiler/test-osr.cc",
+ "compiler/test-representation-change.cc",
+ "compiler/test-run-bytecode-graph-builder.cc",
+ "compiler/test-run-calls-to-external-references.cc",
+ "compiler/test-run-deopt.cc",
+ "compiler/test-run-inlining.cc",
+ "compiler/test-run-intrinsics.cc",
+ "compiler/test-run-jsbranches.cc",
+ "compiler/test-run-jscalls.cc",
+ "compiler/test-run-jsexceptions.cc",
+ "compiler/test-run-jsobjects.cc",
+ "compiler/test-run-jsops.cc",
+ "compiler/test-run-load-store.cc",
+ "compiler/test-run-machops.cc",
+ "compiler/test-run-native-calls.cc",
+ "compiler/test-run-stackcheck.cc",
+ "compiler/test-run-stubs.cc",
+ "compiler/test-run-unwinding-info.cc",
+ "compiler/test-run-variables.cc",
+ "compiler/test-run-wasm-machops.cc",
+ "compiler/value-helper.h",
+ "expression-type-collector-macros.h",
+ "gay-fixed.cc",
+ "gay-fixed.h",
+ "gay-precision.cc",
+ "gay-precision.h",
+ "gay-shortest.cc",
+ "gay-shortest.h",
+ "heap/heap-tester.h",
+ "heap/heap-utils.cc",
+ "heap/heap-utils.h",
+ "heap/test-alloc.cc",
+ "heap/test-array-buffer-tracker.cc",
+ "heap/test-compaction.cc",
+ "heap/test-heap.cc",
+ "heap/test-incremental-marking.cc",
+ "heap/test-lab.cc",
+ "heap/test-mark-compact.cc",
+ "heap/test-page-promotion.cc",
+ "heap/test-spaces.cc",
+ "interpreter/bytecode-expectations-printer.cc",
+ "interpreter/bytecode-expectations-printer.h",
+ "interpreter/interpreter-tester.cc",
+ "interpreter/interpreter-tester.h",
+ "interpreter/source-position-matcher.cc",
+ "interpreter/source-position-matcher.h",
+ "interpreter/test-bytecode-generator.cc",
+ "interpreter/test-interpreter-intrinsics.cc",
+ "interpreter/test-interpreter.cc",
+ "interpreter/test-source-positions.cc",
+ "libplatform/test-tracing.cc",
+ "libsampler/test-sampler.cc",
+ "parsing/test-scanner-streams.cc",
+ "parsing/test-scanner.cc",
+ "print-extension.cc",
+ "print-extension.h",
+ "profiler-extension.cc",
+ "profiler-extension.h",
+ "test-access-checks.cc",
+ "test-accessors.cc",
+ "test-api-accessors.cc",
+ "test-api-fast-accessor-builder.cc",
+ "test-api-interceptors.cc",
+ "test-api.cc",
+ "test-api.h",
+ "test-array-list.cc",
+ "test-ast-types.cc",
+ "test-ast.cc",
+ "test-atomicops.cc",
+ "test-bignum-dtoa.cc",
+ "test-bignum.cc",
+ "test-bit-vector.cc",
+ "test-circular-queue.cc",
+ "test-code-cache.cc",
+ "test-code-layout.cc",
+ "test-code-stub-assembler.cc",
+ "test-compiler.cc",
+ "test-constantpool.cc",
+ "test-conversions.cc",
+ "test-cpu-profiler.cc",
+ "test-date.cc",
+ "test-debug.cc",
+ "test-decls.cc",
+ "test-deoptimization.cc",
+ "test-dictionary.cc",
+ "test-diy-fp.cc",
+ "test-double.cc",
+ "test-dtoa.cc",
+ "test-elements-kind.cc",
+ "test-fast-dtoa.cc",
+ "test-feedback-vector.cc",
+ "test-feedback-vector.h",
+ "test-field-type-tracking.cc",
+ "test-fixed-dtoa.cc",
+ "test-flags.cc",
+ "test-func-name-inference.cc",
+ "test-global-handles.cc",
+ "test-global-object.cc",
+ "test-hashing.cc",
+ "test-hashmap.cc",
+ "test-heap-profiler.cc",
+ "test-hydrogen-types.cc",
+ "test-identity-map.cc",
+ "test-inobject-slack-tracking.cc",
+ "test-list.cc",
+ "test-liveedit.cc",
+ "test-lockers.cc",
+ "test-log.cc",
+ "test-mementos.cc",
+ "test-modules.cc",
+ "test-object.cc",
+ "test-parsing.cc",
+ "test-platform.cc",
+ "test-profile-generator.cc",
+ "test-random-number-generator.cc",
+ "test-receiver-check-hidden-prototype.cc",
+ "test-regexp.cc",
+ "test-representation.cc",
+ "test-sampler-api.cc",
+ "test-serialize.cc",
+ "test-simd.cc",
+ "test-strings.cc",
+ "test-strtod.cc",
+ "test-symbols.cc",
+ "test-thread-termination.cc",
+ "test-threads.cc",
+ "test-trace-event.cc",
+ "test-transitions.cc",
+ "test-typedarrays.cc",
+ "test-types.cc",
+ "test-unbound-queue.cc",
+ "test-unboxed-doubles.cc",
+ "test-unique.cc",
+ "test-unscopables-hidden-prototype.cc",
+ "test-usecounters.cc",
+ "test-utils.cc",
+ "test-version.cc",
+ "test-weakmaps.cc",
+ "test-weaksets.cc",
+ "trace-extension.cc",
+ "trace-extension.h",
+ "types-fuzz.h",
+ "wasm/test-run-wasm-64.cc",
+ "wasm/test-run-wasm-asmjs.cc",
+ "wasm/test-run-wasm-interpreter.cc",
+ "wasm/test-run-wasm-js.cc",
+ "wasm/test-run-wasm-module.cc",
+ "wasm/test-run-wasm-relocation.cc",
+ "wasm/test-run-wasm.cc",
+ "wasm/test-wasm-function-name-table.cc",
+ "wasm/test-wasm-stack.cc",
+ "wasm/test-wasm-trap-position.cc",
+ "wasm/wasm-run-utils.h",
+ ]
if (v8_current_cpu == "arm") {
- sources += gypi_values.cctest_sources_arm
+ sources += [ ### gcmole(arch:arm) ###
+ "test-assembler-arm.cc",
+ "test-code-stubs-arm.cc",
+ "test-code-stubs.cc",
+ "test-code-stubs.h",
+ "test-disasm-arm.cc",
+ "test-macro-assembler-arm.cc",
+ "test-run-wasm-relocation-arm.cc",
+ ]
} else if (v8_current_cpu == "arm64") {
- sources += gypi_values.cctest_sources_arm64
+ sources += [ ### gcmole(arch:arm64) ###
+ "test-assembler-arm64.cc",
+ "test-code-stubs-arm64.cc",
+ "test-code-stubs.cc",
+ "test-code-stubs.h",
+ "test-disasm-arm64.cc",
+ "test-fuzz-arm64.cc",
+ "test-javascript-arm64.cc",
+ "test-js-arm64-variables.cc",
+ "test-run-wasm-relocation-arm64.cc",
+ "test-utils-arm64.cc",
+ "test-utils-arm64.h",
+ ]
} else if (v8_current_cpu == "x86") {
- sources += gypi_values.cctest_sources_ia32
+ sources += [ ### gcmole(arch:ia32) ###
+ "test-assembler-ia32.cc",
+ "test-code-stubs-ia32.cc",
+ "test-code-stubs.cc",
+ "test-code-stubs.h",
+ "test-disasm-ia32.cc",
+ "test-log-stack-tracer.cc",
+ "test-macro-assembler-ia32.cc",
+ "test-run-wasm-relocation-ia32.cc",
+ ]
} else if (v8_current_cpu == "mips") {
- sources += gypi_values.cctest_sources_mips
+ sources += [ ### gcmole(arch:mips) ###
+ "test-assembler-mips.cc",
+ "test-code-stubs-mips.cc",
+ "test-code-stubs.cc",
+ "test-code-stubs.h",
+ "test-disasm-mips.cc",
+ "test-macro-assembler-mips.cc",
+ ]
} else if (v8_current_cpu == "mipsel") {
- sources += gypi_values.cctest_sources_mipsel
+ sources += [ ### gcmole(arch:mipsel) ###
+ "test-assembler-mips.cc",
+ "test-code-stubs-mips.cc",
+ "test-code-stubs.cc",
+ "test-code-stubs.h",
+ "test-disasm-mips.cc",
+ "test-macro-assembler-mips.cc",
+ ]
} else if (v8_current_cpu == "mips64") {
- sources += gypi_values.cctest_sources_mips64
+ sources += [ ### gcmole(arch:mips64) ###
+ "test-assembler-mips64.cc",
+ "test-code-stubs-mips64.cc",
+ "test-code-stubs.cc",
+ "test-code-stubs.h",
+ "test-disasm-mips64.cc",
+ "test-macro-assembler-mips64.cc",
+ ]
} else if (v8_current_cpu == "mips64el") {
- sources += gypi_values.cctest_sources_mips64el
+ sources += [ ### gcmole(arch:mips64el) ###
+ "test-assembler-mips64.cc",
+ "test-code-stubs-mips64.cc",
+ "test-code-stubs.cc",
+ "test-code-stubs.h",
+ "test-disasm-mips64.cc",
+ "test-macro-assembler-mips64.cc",
+ ]
} else if (v8_current_cpu == "x64") {
- sources += gypi_values.cctest_sources_x64
+ sources += [ ### gcmole(arch:x64) ###
+ "test-assembler-x64.cc",
+ "test-code-stubs-x64.cc",
+ "test-code-stubs.cc",
+ "test-code-stubs.h",
+ "test-disasm-x64.cc",
+ "test-log-stack-tracer.cc",
+ "test-macro-assembler-x64.cc",
+ "test-run-wasm-relocation-x64.cc",
+ "wasm/test-run-wasm-simd.cc",
+ ]
} else if (v8_current_cpu == "x87") {
- sources += gypi_values.cctest_sources_x87
+ sources += [ ### gcmole(arch:x87) ###
+ "test-assembler-x87.cc",
+ "test-code-stubs-x87.cc",
+ "test-code-stubs.cc",
+ "test-code-stubs.h",
+ "test-disasm-x87.cc",
+ "test-log-stack-tracer.cc",
+ "test-macro-assembler-x87.cc",
+ "test-run-wasm-relocation-x87.cc",
+ ]
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
- sources += gypi_values.cctest_sources_ppc
+ sources += [ ### gcmole(arch:ppc) ###
+ "test-assembler-ppc.cc",
+ "test-code-stubs.cc",
+ "test-code-stubs.h",
+ "test-disasm-ppc.cc",
+ ]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
- sources += gypi_values.cctest_sources_s390
+ sources += [ ### gcmole(arch:s390) ###
+ "test-assembler-s390.cc",
+ "test-code-stubs.cc",
+ "test-code-stubs.h",
+ "test-disasm-s390.cc",
+ ]
}
if (is_linux) {
@@ -60,6 +325,8 @@ v8_executable("cctest") {
deps = [
":resources",
"../..:v8_libplatform",
+ "../..:wasm_module_runner",
+ "../..:wasm_test_signatures",
"//build/config/sanitizers:deps",
"//build/win:default_exe_manifest",
]
diff --git a/deps/v8/test/cctest/asmjs/test-asm-typer.cc b/deps/v8/test/cctest/asmjs/test-asm-typer.cc
index dcb778533d..a44ecf9283 100644
--- a/deps/v8/test/cctest/asmjs/test-asm-typer.cc
+++ b/deps/v8/test/cctest/asmjs/test-asm-typer.cc
@@ -276,12 +276,13 @@ class AsmTyperHarnessBuilder {
private:
Variable* DeclareVariable(VariableName var_name) {
auto* name_ast_string = ast_value_factory_.GetOneByteString(var_name.name_);
+ ast_value_factory_.Internalize(isolate_);
return var_name.mode_ == DYNAMIC_GLOBAL
? outer_scope_->DeclareDynamicGlobal(name_ast_string,
- Variable::NORMAL)
+ NORMAL_VARIABLE)
: module_->scope()->DeclareLocal(name_ast_string, VAR,
kCreatedInitialized,
- Variable::NORMAL);
+ NORMAL_VARIABLE);
}
bool ValidateAllStatements(FunctionDeclaration* fun_decl) {
@@ -514,7 +515,6 @@ TEST(ErrorsInGlobalVariableDefinition) {
{"var v = __fround__(1.0);", "expected call fround(literal)"},
{"var v = fround(1.0, 1.0);", "expected call fround(literal)"},
{"var v = fround(not_fround);", "literal argument for call to fround"},
- {"var v = fround(1);", "literal argument to be a floating point"},
{"var v = stdlib.nan", "Invalid import"},
{"var v = stdlib.Math.nan", "Invalid import"},
{"var v = stdlib.Mathh.E", "Invalid import"},
diff --git a/deps/v8/test/cctest/ast-types-fuzz.h b/deps/v8/test/cctest/ast-types-fuzz.h
new file mode 100644
index 0000000000..ba6286d54a
--- /dev/null
+++ b/deps/v8/test/cctest/ast-types-fuzz.h
@@ -0,0 +1,327 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_TEST_CCTEST_TYPES_H_
+#define V8_TEST_CCTEST_TYPES_H_
+
+#include "src/base/utils/random-number-generator.h"
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+class AstTypes {
+ public:
+ AstTypes(Zone* zone, Isolate* isolate, v8::base::RandomNumberGenerator* rng)
+ : zone_(zone), isolate_(isolate), rng_(rng) {
+#define DECLARE_TYPE(name, value) \
+ name = AstType::name(); \
+ types.push_back(name);
+ AST_PROPER_BITSET_TYPE_LIST(DECLARE_TYPE)
+#undef DECLARE_TYPE
+
+ SignedSmall = AstType::SignedSmall();
+ UnsignedSmall = AstType::UnsignedSmall();
+
+ object_map =
+ isolate->factory()->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ array_map = isolate->factory()->NewMap(JS_ARRAY_TYPE, JSArray::kSize);
+ number_map =
+ isolate->factory()->NewMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
+ uninitialized_map = isolate->factory()->uninitialized_map();
+ ObjectClass = AstType::Class(object_map, zone);
+ ArrayClass = AstType::Class(array_map, zone);
+ NumberClass = AstType::Class(number_map, zone);
+ UninitializedClass = AstType::Class(uninitialized_map, zone);
+
+ maps.push_back(object_map);
+ maps.push_back(array_map);
+ maps.push_back(uninitialized_map);
+ for (MapVector::iterator it = maps.begin(); it != maps.end(); ++it) {
+ types.push_back(AstType::Class(*it, zone));
+ }
+
+ smi = handle(Smi::FromInt(666), isolate);
+ signed32 = isolate->factory()->NewHeapNumber(0x40000000);
+ object1 = isolate->factory()->NewJSObjectFromMap(object_map);
+ object2 = isolate->factory()->NewJSObjectFromMap(object_map);
+ array = isolate->factory()->NewJSArray(20);
+ uninitialized = isolate->factory()->uninitialized_value();
+ SmiConstant = AstType::Constant(smi, zone);
+ Signed32Constant = AstType::Constant(signed32, zone);
+
+ ObjectConstant1 = AstType::Constant(object1, zone);
+ ObjectConstant2 = AstType::Constant(object2, zone);
+ ArrayConstant = AstType::Constant(array, zone);
+ UninitializedConstant = AstType::Constant(uninitialized, zone);
+
+ values.push_back(smi);
+ values.push_back(signed32);
+ values.push_back(object1);
+ values.push_back(object2);
+ values.push_back(array);
+ values.push_back(uninitialized);
+ for (ValueVector::iterator it = values.begin(); it != values.end(); ++it) {
+ types.push_back(AstType::Constant(*it, zone));
+ }
+
+ integers.push_back(isolate->factory()->NewNumber(-V8_INFINITY));
+ integers.push_back(isolate->factory()->NewNumber(+V8_INFINITY));
+ integers.push_back(isolate->factory()->NewNumber(-rng_->NextInt(10)));
+ integers.push_back(isolate->factory()->NewNumber(+rng_->NextInt(10)));
+ for (int i = 0; i < 10; ++i) {
+ double x = rng_->NextInt();
+ integers.push_back(isolate->factory()->NewNumber(x));
+ x *= rng_->NextInt();
+ if (!IsMinusZero(x)) integers.push_back(isolate->factory()->NewNumber(x));
+ }
+
+ Integer = AstType::Range(-V8_INFINITY, +V8_INFINITY, zone);
+
+ NumberArray = AstType::Array(Number, zone);
+ StringArray = AstType::Array(String, zone);
+ AnyArray = AstType::Array(Any, zone);
+
+ SignedFunction1 = AstType::Function(SignedSmall, SignedSmall, zone);
+ NumberFunction1 = AstType::Function(Number, Number, zone);
+ NumberFunction2 = AstType::Function(Number, Number, Number, zone);
+ MethodFunction = AstType::Function(String, Object, 0, zone);
+
+ for (int i = 0; i < 30; ++i) {
+ types.push_back(Fuzz());
+ }
+ }
+
+ Handle<i::Map> object_map;
+ Handle<i::Map> array_map;
+ Handle<i::Map> number_map;
+ Handle<i::Map> uninitialized_map;
+
+ Handle<i::Smi> smi;
+ Handle<i::HeapNumber> signed32;
+ Handle<i::JSObject> object1;
+ Handle<i::JSObject> object2;
+ Handle<i::JSArray> array;
+ Handle<i::Oddball> uninitialized;
+
+#define DECLARE_TYPE(name, value) AstType* name;
+ AST_PROPER_BITSET_TYPE_LIST(DECLARE_TYPE)
+#undef DECLARE_TYPE
+
+#define DECLARE_TYPE(name, value) AstType* Mask##name##ForTesting;
+ AST_MASK_BITSET_TYPE_LIST(DECLARE_TYPE)
+#undef DECLARE_TYPE
+ AstType* SignedSmall;
+ AstType* UnsignedSmall;
+
+ AstType* ObjectClass;
+ AstType* ArrayClass;
+ AstType* NumberClass;
+ AstType* UninitializedClass;
+
+ AstType* SmiConstant;
+ AstType* Signed32Constant;
+ AstType* ObjectConstant1;
+ AstType* ObjectConstant2;
+ AstType* ArrayConstant;
+ AstType* UninitializedConstant;
+
+ AstType* Integer;
+
+ AstType* NumberArray;
+ AstType* StringArray;
+ AstType* AnyArray;
+
+ AstType* SignedFunction1;
+ AstType* NumberFunction1;
+ AstType* NumberFunction2;
+ AstType* MethodFunction;
+
+ typedef std::vector<AstType*> TypeVector;
+ typedef std::vector<Handle<i::Map> > MapVector;
+ typedef std::vector<Handle<i::Object> > ValueVector;
+
+ TypeVector types;
+ MapVector maps;
+ ValueVector values;
+ ValueVector integers; // "Integer" values used for range limits.
+
+ AstType* Of(Handle<i::Object> value) { return AstType::Of(value, zone_); }
+
+ AstType* NowOf(Handle<i::Object> value) {
+ return AstType::NowOf(value, zone_);
+ }
+
+ AstType* Class(Handle<i::Map> map) { return AstType::Class(map, zone_); }
+
+ AstType* Constant(Handle<i::Object> value) {
+ return AstType::Constant(value, zone_);
+ }
+
+ AstType* Range(double min, double max) {
+ return AstType::Range(min, max, zone_);
+ }
+
+ AstType* Context(AstType* outer) { return AstType::Context(outer, zone_); }
+
+ AstType* Array1(AstType* element) { return AstType::Array(element, zone_); }
+
+ AstType* Function0(AstType* result, AstType* receiver) {
+ return AstType::Function(result, receiver, 0, zone_);
+ }
+
+ AstType* Function1(AstType* result, AstType* receiver, AstType* arg) {
+ AstType* type = AstType::Function(result, receiver, 1, zone_);
+ type->AsFunction()->InitParameter(0, arg);
+ return type;
+ }
+
+ AstType* Function2(AstType* result, AstType* arg1, AstType* arg2) {
+ return AstType::Function(result, arg1, arg2, zone_);
+ }
+
+ AstType* Union(AstType* t1, AstType* t2) {
+ return AstType::Union(t1, t2, zone_);
+ }
+
+ AstType* Intersect(AstType* t1, AstType* t2) {
+ return AstType::Intersect(t1, t2, zone_);
+ }
+
+ AstType* Representation(AstType* t) {
+ return AstType::Representation(t, zone_);
+ }
+
+ AstType* Semantic(AstType* t) { return AstType::Semantic(t, zone_); }
+
+ AstType* Random() {
+ return types[rng_->NextInt(static_cast<int>(types.size()))];
+ }
+
+ AstType* Fuzz(int depth = 4) {
+ switch (rng_->NextInt(depth == 0 ? 3 : 20)) {
+ case 0: { // bitset
+#define COUNT_BITSET_TYPES(type, value) +1
+ int n = 0 AST_PROPER_BITSET_TYPE_LIST(COUNT_BITSET_TYPES);
+#undef COUNT_BITSET_TYPES
+ // Pick a bunch of named bitsets and return their intersection.
+ AstType* result = AstType::Any();
+ for (int i = 0, m = 1 + rng_->NextInt(3); i < m; ++i) {
+ int j = rng_->NextInt(n);
+#define PICK_BITSET_TYPE(type, value) \
+ if (j-- == 0) { \
+ AstType* tmp = AstType::Intersect(result, AstType::type(), zone_); \
+ if (tmp->Is(AstType::None()) && i != 0) { \
+ break; \
+ } else { \
+ result = tmp; \
+ continue; \
+ } \
+ }
+ AST_PROPER_BITSET_TYPE_LIST(PICK_BITSET_TYPE)
+#undef PICK_BITSET_TYPE
+ }
+ return result;
+ }
+ case 1: { // class
+ int i = rng_->NextInt(static_cast<int>(maps.size()));
+ return AstType::Class(maps[i], zone_);
+ }
+ case 2: { // constant
+ int i = rng_->NextInt(static_cast<int>(values.size()));
+ return AstType::Constant(values[i], zone_);
+ }
+ case 3: { // range
+ int i = rng_->NextInt(static_cast<int>(integers.size()));
+ int j = rng_->NextInt(static_cast<int>(integers.size()));
+ double min = integers[i]->Number();
+ double max = integers[j]->Number();
+ if (min > max) std::swap(min, max);
+ return AstType::Range(min, max, zone_);
+ }
+ case 4: { // context
+ int depth = rng_->NextInt(3);
+ AstType* type = AstType::Internal();
+ for (int i = 0; i < depth; ++i) type = AstType::Context(type, zone_);
+ return type;
+ }
+ case 5: { // array
+ AstType* element = Fuzz(depth / 2);
+ return AstType::Array(element, zone_);
+ }
+ case 6:
+ case 7: { // function
+ AstType* result = Fuzz(depth / 2);
+ AstType* receiver = Fuzz(depth / 2);
+ int arity = rng_->NextInt(3);
+ AstType* type = AstType::Function(result, receiver, arity, zone_);
+ for (int i = 0; i < type->AsFunction()->Arity(); ++i) {
+ AstType* parameter = Fuzz(depth / 2);
+ type->AsFunction()->InitParameter(i, parameter);
+ }
+ return type;
+ }
+ case 8: { // simd
+ static const int num_simd_types =
+#define COUNT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) +1
+ SIMD128_TYPES(COUNT_SIMD_TYPE);
+#undef COUNT_SIMD_TYPE
+ AstType* (*simd_constructors[num_simd_types])(Isolate*, Zone*) = {
+#define COUNT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) &AstType::Name,
+ SIMD128_TYPES(COUNT_SIMD_TYPE)
+#undef COUNT_SIMD_TYPE
+ };
+ return simd_constructors[rng_->NextInt(num_simd_types)](isolate_,
+ zone_);
+ }
+ default: { // union
+ int n = rng_->NextInt(10);
+ AstType* type = None;
+ for (int i = 0; i < n; ++i) {
+ AstType* operand = Fuzz(depth - 1);
+ type = AstType::Union(type, operand, zone_);
+ }
+ return type;
+ }
+ }
+ UNREACHABLE();
+ }
+
+ Zone* zone() { return zone_; }
+
+ private:
+ Zone* zone_;
+ Isolate* isolate_;
+ v8::base::RandomNumberGenerator* rng_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 312001a35b..17127ed9ec 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -105,6 +105,39 @@ void CcTest::Run() {
}
}
+i::Heap* CcTest::heap() { return i_isolate()->heap(); }
+
+void CcTest::CollectGarbage(i::AllocationSpace space) {
+ heap()->CollectGarbage(space, i::GarbageCollectionReason::kTesting);
+}
+
+void CcTest::CollectAllGarbage(int flags) {
+ heap()->CollectAllGarbage(flags, i::GarbageCollectionReason::kTesting);
+}
+
+void CcTest::CollectAllAvailableGarbage() {
+ heap()->CollectAllAvailableGarbage(i::GarbageCollectionReason::kTesting);
+}
+
+v8::base::RandomNumberGenerator* CcTest::random_number_generator() {
+ return InitIsolateOnce()->random_number_generator();
+}
+
+v8::Local<v8::Object> CcTest::global() {
+ return isolate()->GetCurrentContext()->Global();
+}
+
+void CcTest::InitializeVM() {
+ CHECK(!v8::base::NoBarrier_Load(&isolate_used_));
+ CHECK(!initialize_called_);
+ initialize_called_ = true;
+ v8::HandleScope handle_scope(CcTest::isolate());
+ v8::Context::New(CcTest::isolate())->Enter();
+}
+
+void CcTest::TearDown() {
+ if (isolate_ != NULL) isolate_->Dispose();
+}
v8::Local<v8::Context> CcTest::NewContext(CcTestExtensionFlags extensions,
v8::Isolate* isolate) {
@@ -126,6 +159,47 @@ void CcTest::DisableAutomaticDispose() {
disable_automatic_dispose_ = true;
}
+LocalContext::~LocalContext() {
+ v8::HandleScope scope(isolate_);
+ v8::Local<v8::Context>::New(isolate_, context_)->Exit();
+ context_.Reset();
+}
+
+void LocalContext::Initialize(v8::Isolate* isolate,
+ v8::ExtensionConfiguration* extensions,
+ v8::Local<v8::ObjectTemplate> global_template,
+ v8::Local<v8::Value> global_object) {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context =
+ v8::Context::New(isolate, extensions, global_template, global_object);
+ context_.Reset(isolate, context);
+ context->Enter();
+ // We can't do this later perhaps because of a fatal error.
+ isolate_ = isolate;
+}
+
+// This indirection is needed because HandleScopes cannot be heap-allocated, and
+// we don't want any unnecessary #includes in cctest.h.
+class InitializedHandleScopeImpl {
+ public:
+ explicit InitializedHandleScopeImpl(i::Isolate* isolate)
+ : handle_scope_(isolate) {}
+
+ private:
+ i::HandleScope handle_scope_;
+};
+
+InitializedHandleScope::InitializedHandleScope()
+ : main_isolate_(CcTest::InitIsolateOnce()),
+ initialized_handle_scope_impl_(
+ new InitializedHandleScopeImpl(main_isolate_)) {}
+
+InitializedHandleScope::~InitializedHandleScope() {}
+
+HandleAndZoneScope::HandleAndZoneScope()
+ : main_zone_(new i::Zone(&allocator_)) {}
+
+HandleAndZoneScope::~HandleAndZoneScope() {}
static void PrintTestList(CcTest* current) {
if (current == NULL) return;
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index 217d74b6dc..c95a0b1749 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -33,10 +33,13 @@
'generated_file': '<(SHARED_INTERMEDIATE_DIR)/resources.cc',
'cctest_sources': [ ### gcmole(all) ###
'asmjs/test-asm-typer.cc',
+ 'ast-types-fuzz.h',
'compiler/c-signature.h',
+ 'compiler/call-tester.h',
'compiler/codegen-tester.cc',
'compiler/codegen-tester.h',
'compiler/code-assembler-tester.h',
+ 'compiler/function-tester.cc',
'compiler/function-tester.h',
'compiler/graph-builder-tester.h',
'compiler/test-basic-block-profiler.cc',
@@ -76,9 +79,12 @@
'compiler/test-run-stubs.cc',
'compiler/test-run-variables.cc',
'compiler/test-run-wasm-machops.cc',
- 'compiler/test-simplified-lowering.cc',
+ 'compiler/value-helper.h',
'cctest.cc',
+ 'cctest.h',
+ 'expression-type-collector-macros.h',
'interpreter/interpreter-tester.cc',
+ 'interpreter/interpreter-tester.h',
'interpreter/source-position-matcher.cc',
'interpreter/source-position-matcher.h',
'interpreter/test-bytecode-generator.cc',
@@ -88,8 +94,11 @@
'interpreter/bytecode-expectations-printer.cc',
'interpreter/bytecode-expectations-printer.h',
'gay-fixed.cc',
+ 'gay-fixed.h',
'gay-precision.cc',
+ 'gay-precision.h',
'gay-shortest.cc',
+ 'gay-shortest.h',
'heap/heap-tester.h',
'heap/heap-utils.cc',
'heap/heap-utils.h',
@@ -104,8 +113,12 @@
'heap/test-spaces.cc',
'libplatform/test-tracing.cc',
'libsampler/test-sampler.cc',
+ 'parsing/test-scanner-streams.cc',
+ 'parsing/test-scanner.cc',
'print-extension.cc',
+ 'print-extension.h',
'profiler-extension.cc',
+ 'profiler-extension.h',
'test-access-checks.cc',
'test-accessors.cc',
'test-api.cc',
@@ -138,6 +151,7 @@
'test-elements-kind.cc',
'test-fast-dtoa.cc',
'test-feedback-vector.cc',
+ 'test-feedback-vector.h',
'test-field-type-tracking.cc',
'test-fixed-dtoa.cc',
'test-flags.cc',
@@ -155,6 +169,7 @@
'test-lockers.cc',
'test-log.cc',
'test-mementos.cc',
+ 'test-modules.cc',
'test-object.cc',
'test-parsing.cc',
'test-platform.cc',
@@ -174,6 +189,7 @@
'test-trace-event.cc',
'test-transitions.cc',
'test-typedarrays.cc',
+ 'test-ast-types.cc',
'test-types.cc',
'test-unbound-queue.cc',
'test-unboxed-doubles.cc',
@@ -185,6 +201,8 @@
'test-weakmaps.cc',
'test-weaksets.cc',
'trace-extension.cc',
+ 'trace-extension.h',
+ 'types-fuzz.h',
'wasm/test-run-wasm.cc',
'wasm/test-run-wasm-64.cc',
'wasm/test-run-wasm-asmjs.cc',
@@ -192,7 +210,6 @@
'wasm/test-run-wasm-js.cc',
'wasm/test-run-wasm-module.cc',
'wasm/test-run-wasm-relocation.cc',
- 'wasm/test-signatures.h',
'wasm/test-wasm-function-name-table.cc',
'wasm/test-wasm-stack.cc',
'wasm/test-wasm-trap-position.cc',
@@ -201,6 +218,7 @@
'cctest_sources_ia32': [ ### gcmole(arch:ia32) ###
'test-assembler-ia32.cc',
'test-code-stubs.cc',
+ 'test-code-stubs.h',
'test-code-stubs-ia32.cc',
'test-disasm-ia32.cc',
'test-macro-assembler-ia32.cc',
@@ -210,15 +228,18 @@
'cctest_sources_x64': [ ### gcmole(arch:x64) ###
'test-assembler-x64.cc',
'test-code-stubs.cc',
+ 'test-code-stubs.h',
'test-code-stubs-x64.cc',
'test-disasm-x64.cc',
'test-macro-assembler-x64.cc',
'test-log-stack-tracer.cc',
- 'test-run-wasm-relocation-x64.cc'
+ 'test-run-wasm-relocation-x64.cc',
+ 'wasm/test-run-wasm-simd.cc'
],
'cctest_sources_arm': [ ### gcmole(arch:arm) ###
'test-assembler-arm.cc',
'test-code-stubs.cc',
+ 'test-code-stubs.h',
'test-code-stubs-arm.cc',
'test-disasm-arm.cc',
'test-macro-assembler-arm.cc',
@@ -226,8 +247,10 @@
],
'cctest_sources_arm64': [ ### gcmole(arch:arm64) ###
'test-utils-arm64.cc',
+ 'test-utils-arm64.h',
'test-assembler-arm64.cc',
'test-code-stubs.cc',
+ 'test-code-stubs.h',
'test-code-stubs-arm64.cc',
'test-disasm-arm64.cc',
'test-fuzz-arm64.cc',
@@ -238,16 +261,19 @@
'cctest_sources_s390': [ ### gcmole(arch:s390) ###
'test-assembler-s390.cc',
'test-code-stubs.cc',
+ 'test-code-stubs.h',
'test-disasm-s390.cc'
],
'cctest_sources_ppc': [ ### gcmole(arch:ppc) ###
'test-assembler-ppc.cc',
'test-code-stubs.cc',
+ 'test-code-stubs.h',
'test-disasm-ppc.cc'
],
'cctest_sources_mips': [ ### gcmole(arch:mips) ###
'test-assembler-mips.cc',
'test-code-stubs.cc',
+ 'test-code-stubs.h',
'test-code-stubs-mips.cc',
'test-disasm-mips.cc',
'test-macro-assembler-mips.cc'
@@ -255,6 +281,7 @@
'cctest_sources_mipsel': [ ### gcmole(arch:mipsel) ###
'test-assembler-mips.cc',
'test-code-stubs.cc',
+ 'test-code-stubs.h',
'test-code-stubs-mips.cc',
'test-disasm-mips.cc',
'test-macro-assembler-mips.cc'
@@ -262,6 +289,7 @@
'cctest_sources_mips64': [ ### gcmole(arch:mips64) ###
'test-assembler-mips64.cc',
'test-code-stubs.cc',
+ 'test-code-stubs.h',
'test-code-stubs-mips64.cc',
'test-disasm-mips64.cc',
'test-macro-assembler-mips64.cc'
@@ -269,6 +297,7 @@
'cctest_sources_mips64el': [ ### gcmole(arch:mips64el) ###
'test-assembler-mips64.cc',
'test-code-stubs.cc',
+ 'test-code-stubs.h',
'test-code-stubs-mips64.cc',
'test-disasm-mips64.cc',
'test-macro-assembler-mips64.cc'
@@ -276,6 +305,7 @@
'cctest_sources_x87': [ ### gcmole(arch:x87) ###
'test-assembler-x87.cc',
'test-code-stubs.cc',
+ 'test-code-stubs.h',
'test-code-stubs-x87.cc',
'test-disasm-x87.cc',
'test-macro-assembler-x87.cc',
@@ -296,6 +326,9 @@
'../..',
],
'sources': [
+ '../common/wasm/test-signatures.h',
+ '../common/wasm/wasm-module-runner.cc',
+ '../common/wasm/wasm-module-runner.h',
'<@(cctest_sources)',
'<(generated_file)',
],
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index dac3a5b94f..d8fa871484 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -28,10 +28,29 @@
#ifndef CCTEST_H_
#define CCTEST_H_
+#include <memory>
+
#include "include/libplatform/libplatform.h"
-#include "src/isolate-inl.h" // TODO(everyone): Make cctest IWYU.
-#include "src/objects-inl.h" // TODO(everyone): Make cctest IWYU.
+#include "include/v8-debug.h"
+#include "src/utils.h"
#include "src/v8.h"
+#include "src/zone/accounting-allocator.h"
+
+namespace v8 {
+namespace base {
+
+class RandomNumberGenerator;
+
+} // namespace base
+
+namespace internal {
+
+class HandleScope;
+class Zone;
+
+} // namespace internal
+
+} // namespace v8
#ifndef TEST
#define TEST(Name) \
@@ -104,17 +123,15 @@ class CcTest {
return reinterpret_cast<i::Isolate*>(isolate());
}
- static i::Heap* heap() {
- return i_isolate()->heap();
- }
+ static i::Heap* heap();
- static v8::base::RandomNumberGenerator* random_number_generator() {
- return InitIsolateOnce()->random_number_generator();
- }
+ static void CollectGarbage(i::AllocationSpace space);
+ static void CollectAllGarbage(int flags);
+ static void CollectAllAvailableGarbage();
- static v8::Local<v8::Object> global() {
- return isolate()->GetCurrentContext()->Global();
- }
+ static v8::base::RandomNumberGenerator* random_number_generator();
+
+ static v8::Local<v8::Object> global();
static v8::ArrayBuffer::Allocator* array_buffer_allocator() {
return allocator_;
@@ -127,13 +144,7 @@ class CcTest {
// TODO(dcarney): Remove.
// This must be called first in a test.
- static void InitializeVM() {
- CHECK(!v8::base::NoBarrier_Load(&isolate_used_));
- CHECK(!initialize_called_);
- initialize_called_ = true;
- v8::HandleScope handle_scope(CcTest::isolate());
- v8::Context::New(CcTest::isolate())->Enter();
- }
+ static void InitializeVM();
// Only for UNINITIALIZED_TESTs
static void DisableAutomaticDispose();
@@ -144,9 +155,7 @@ class CcTest {
CcTestExtensionFlags extensions,
v8::Isolate* isolate = CcTest::isolate());
- static void TearDown() {
- if (isolate_ != NULL) isolate_->Dispose();
- }
+ static void TearDown();
private:
friend int main(int argc, char** argv);
@@ -269,11 +278,7 @@ class LocalContext {
Initialize(CcTest::isolate(), extensions, global_template, global_object);
}
- virtual ~LocalContext() {
- v8::HandleScope scope(isolate_);
- v8::Local<v8::Context>::New(isolate_, context_)->Exit();
- context_.Reset();
- }
+ virtual ~LocalContext();
v8::Context* operator->() {
return *reinterpret_cast<v8::Context**>(&context_);
@@ -288,17 +293,7 @@ class LocalContext {
private:
void Initialize(v8::Isolate* isolate, v8::ExtensionConfiguration* extensions,
v8::Local<v8::ObjectTemplate> global_template,
- v8::Local<v8::Value> global_object) {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate,
- extensions,
- global_template,
- global_object);
- context_.Reset(isolate, context);
- context->Enter();
- // We can't do this later perhaps because of a fatal error.
- isolate_ = isolate;
- }
+ v8::Local<v8::Value> global_object);
v8::Persistent<v8::Context> context_;
v8::Isolate* isolate_;
@@ -567,32 +562,33 @@ static inline void EmptyMessageQueues(v8::Isolate* isolate) {
}
}
+class InitializedHandleScopeImpl;
class InitializedHandleScope {
public:
- InitializedHandleScope()
- : main_isolate_(CcTest::InitIsolateOnce()),
- handle_scope_(main_isolate_) {}
+ InitializedHandleScope();
+ ~InitializedHandleScope();
// Prefixing the below with main_ reduces a lot of naming clashes.
i::Isolate* main_isolate() { return main_isolate_; }
private:
i::Isolate* main_isolate_;
- i::HandleScope handle_scope_;
+ std::unique_ptr<InitializedHandleScopeImpl> initialized_handle_scope_impl_;
};
class HandleAndZoneScope : public InitializedHandleScope {
public:
- HandleAndZoneScope() : main_zone_(&allocator_) {}
+ HandleAndZoneScope();
+ ~HandleAndZoneScope();
// Prefixing the below with main_ reduces a lot of naming clashes.
- i::Zone* main_zone() { return &main_zone_; }
+ i::Zone* main_zone() { return main_zone_.get(); }
private:
- v8::base::AccountingAllocator allocator_;
- i::Zone main_zone_;
+ v8::internal::AccountingAllocator allocator_;
+ std::unique_ptr<i::Zone> main_zone_;
};
#endif // ifndef CCTEST_H_
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 5a88f0f701..b7bcb6b5e3 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -66,6 +66,7 @@
# This tests only the type system, no point in running several variants.
'test-hydrogen-types/*': [PASS, NO_VARIANTS],
'test-types/*': [PASS, NO_VARIANTS],
+ 'test-ast-types/*': [PASS, NO_VARIANTS],
# This tests API threading, no point in running several variants.
'test-api/Threading*': [PASS, NO_VARIANTS],
@@ -150,6 +151,12 @@
['asan == True', {
# Skip tests not suitable for ASAN.
'test-assembler-x64/AssemblerX64XchglOperations': [SKIP],
+
+ # BUG(v8:5243).
+ 'test-cpu-profiler/JsNative1JsNative2JsSample': [SKIP],
+ 'test-cpu-profiler/JsNativeJsRuntimeJsSample': [SKIP],
+ 'test-cpu-profiler/JsNativeJsRuntimeJsSampleMultiple': [SKIP],
+ 'test-cpu-profiler/JsNativeJsSample': [SKIP],
}], # 'asan == True'
##############################################################################
@@ -284,6 +291,8 @@
'test-run-machops/RunFloat64Tan': [SKIP],
'test-cpu-profiler/Inlining': [SKIP],
'test-gap-resolver/FuzzResolver': [SKIP],
+ 'test-run-wasm/RunWasmCompiled_MultiReturnSelect_f32': [SKIP],
+ 'test-run-wasm/RunWasmCompiled_MultiReturnSelect_f64': [SKIP],
}], # 'arch == x87'
##############################################################################
@@ -327,11 +336,6 @@
##############################################################################
['variant == turbofan', {
- # TODO(bmeurer): TurboFan embeds strong references to all kinds of objects
- # via deoptimization data (Crankshaft also does this, but lack proper test
- # coverage).
- 'test-heap/ObjectsInOptimizedCodeAreWeak': [FAIL],
-
# TurboFan cpu profiler result is different.
'test-cpu-profiler/CollectDeoptEvents': [FAIL],
'test-cpu-profiler/DeoptAtFirstLevelInlinedSource': [FAIL],
@@ -354,14 +358,6 @@
'test-heap/TestCodeFlushingIncremental': [FAIL],
'test-heap/TestCodeFlushingIncrementalAbort': [PASS, ['mode == debug or dcheck_always_on == True', FAIL]],
- # TODO(mythria,4780): Related to type feedback support for Array function.
- 'test-feedback-vector/VectorCallFeedbackForArray': [FAIL],
-
- # TODO(mythria,4780): Related to type feedback support for constructor.
- 'test-feedback-vector/VectorConstructCounts': [FAIL],
- 'test-heap/WeakFunctionInConstructor': [FAIL],
- 'test-heap/IncrementalMarkingPreservesMonomorphicConstructor': [FAIL],
-
# TODO(mythria,4680): Lack of code-ageing in interpreter.
'test-heap/Regress169209': [FAIL],
@@ -369,23 +365,9 @@
# in interpreter.
'test-heap/CompilationCacheCachingBehavior': [FAIL],
- # BUG(rmcilroy,4680): Function is optimized without type feedback and so immediately deopts again, causing check failure in the test.
- 'test-heap/ResetSharedFunctionInfoCountersDuringIncrementalMarking': [FAIL],
- 'test-heap/ResetSharedFunctionInfoCountersDuringMarkSweep': [FAIL],
-
# BUG(4680): Missing type feedback makes optimistic optimizations fail.
- 'test-cpu-profiler/CollectDeoptEvents': [FAIL],
'test-cpu-profiler/DeoptUntrackedFunction': [SKIP],
- # BUG(4680): Ignition doesn't support allocation sites currently.
- 'test-heap/EnsureAllocationSiteDependentCodesProcessed': [FAIL],
- 'test-heap/OptimizedPretenuringAllocationFolding': [FAIL],
- 'test-heap/OptimizedPretenuringdoubleArrayLiterals': [FAIL],
- 'test-heap/OptimizedPretenuringNestedDoubleLiterals': [FAIL],
- 'test-heap/OptimizedPretenuringNestedMixedArrayLiterals': [FAIL],
- 'test-heap/OptimizedPretenuringNestedObjectLiterals': [FAIL],
- 'test-heap/OptimizedPretenuringObjectArrayLiterals': [FAIL],
-
# BUG(4751). Flaky with ignition.
'test-cpu-profiler/JsNativeJsSample': [PASS, FAIL],
@@ -393,10 +375,6 @@
# with crankshaft.
'test-cpu-profiler/TickLinesOptimized': [SKIP],
- # TurboFan cpu profiler result is different.
- 'test-cpu-profiler/DeoptAtFirstLevelInlinedSource': [FAIL],
- 'test-cpu-profiler/DeoptAtSecondLevelInlinedSource': [FAIL],
-
# BUG(5193): Flaky.
'test-cpu-profiler/FunctionApplySample': [PASS, ['system == windows', SKIP]],
}], # variant == ignition
@@ -405,51 +383,12 @@
['variant == ignition_staging', {
'test-cpu-profiler/DeoptUntrackedFunction': [SKIP],
'test-cpu-profiler/TickLinesOptimized': [SKIP],
- 'test-cpu-profiler/CollectDeoptEvents': [FAIL],
- 'test-cpu-profiler/DeoptAtFirstLevelInlinedSource': [FAIL],
- 'test-cpu-profiler/DeoptAtSecondLevelInlinedSource': [FAIL],
- 'test-feedback-vector/VectorCallFeedbackForArray': [FAIL],
- 'test-feedback-vector/VectorConstructCounts': [FAIL],
'test-heap/CompilationCacheCachingBehavior': [FAIL],
- 'test-heap/EnsureAllocationSiteDependentCodesProcessed': [FAIL],
- 'test-heap/IncrementalMarkingPreservesMonomorphicConstructor': [FAIL],
- 'test-heap/OptimizedPretenuringAllocationFolding': [FAIL],
- 'test-heap/OptimizedPretenuringdoubleArrayLiterals': [FAIL],
- 'test-heap/OptimizedPretenuringNestedDoubleLiterals': [FAIL],
- 'test-heap/OptimizedPretenuringNestedMixedArrayLiterals': [FAIL],
- 'test-heap/OptimizedPretenuringNestedObjectLiterals': [FAIL],
- 'test-heap/OptimizedPretenuringObjectArrayLiterals': [FAIL],
'test-heap/Regress169209': [FAIL],
- 'test-heap/ResetSharedFunctionInfoCountersDuringIncrementalMarking': [FAIL],
- 'test-heap/ResetSharedFunctionInfoCountersDuringMarkSweep': [FAIL],
'test-heap/TestCodeFlushing': [FAIL],
'test-heap/TestCodeFlushingIncremental': [FAIL],
'test-heap/TestCodeFlushingIncrementalScavenge': [FAIL],
'test-heap/TestCodeFlushingPreAged': [FAIL],
- 'test-heap/WeakFunctionInConstructor': [FAIL],
- 'test-run-inlining/InlineBuiltin': [FAIL],
- 'test-run-inlining/InlineLoopGuardedEmpty': [FAIL],
- 'test-run-inlining/InlineLoopGuardedOnce': [FAIL],
- 'test-run-inlining/InlineLoopGuardedTwice': [FAIL],
- 'test-run-inlining/InlineLoopUnguardedEmpty': [FAIL],
- 'test-run-inlining/InlineLoopUnguardedOnce': [FAIL],
- 'test-run-inlining/InlineLoopUnguardedTwice': [FAIL],
- 'test-run-inlining/InlineMutuallyRecursive': [FAIL],
- 'test-run-inlining/InlineNestedBuiltin': [FAIL],
- 'test-run-inlining/InlineOmitArgumentsDeopt': [FAIL],
- 'test-run-inlining/InlineOmitArguments': [FAIL],
- 'test-run-inlining/InlineOmitArgumentsObject': [FAIL],
- 'test-run-inlining/InlineSurplusArgumentsDeopt': [FAIL],
- 'test-run-inlining/InlineSurplusArguments': [FAIL],
- 'test-run-inlining/InlineSurplusArgumentsObject': [FAIL],
- 'test-run-inlining/InlineTwiceDependentDiamondDifferent': [FAIL],
- 'test-run-inlining/InlineTwiceDependentDiamond': [FAIL],
- 'test-run-inlining/InlineTwiceDependent': [FAIL],
- 'test-run-inlining/InlineTwice': [FAIL],
- 'test-run-inlining/InlineWithArguments': [FAIL],
- 'test-run-inlining/SimpleInliningContextDeopt': [FAIL],
- 'test-run-inlining/SimpleInliningContext': [FAIL],
- 'test-run-inlining/SimpleInlining': [FAIL],
# BUG(5193): Flaky.
'test-cpu-profiler/FunctionApplySample': [PASS, ['system == windows', SKIP]],
@@ -457,31 +396,6 @@
##############################################################################
['variant == ignition_turbofan', {
- # TODO(5251): Inlining is currently disabled for the BytecodeGraphBuilder.
- 'test-run-inlining/InlineLoopGuardedTwice': [FAIL],
- 'test-run-inlining/InlineSurplusArgumentsDeopt': [FAIL],
- 'test-run-inlining/InlineTwice': [FAIL],
- 'test-run-inlining/InlineSurplusArgumentsObject': [FAIL],
- 'test-run-inlining/InlineTwiceDependentDiamond': [FAIL],
- 'test-run-inlining/InlineWithArguments': [FAIL],
- 'test-run-inlining/InlineLoopUnguardedTwice': [FAIL],
- 'test-run-inlining/InlineOmitArgumentsObject': [FAIL],
- 'test-run-inlining/InlineLoopUnguardedOnce': [FAIL],
- 'test-run-inlining/InlineOmitArgumentsDeopt': [FAIL],
- 'test-run-inlining/InlineTwiceDependentDiamondDifferent': [FAIL],
- 'test-run-inlining/SimpleInliningContext': [FAIL],
- 'test-run-inlining/InlineMutuallyRecursive': [FAIL],
- 'test-run-inlining/InlineLoopGuardedEmpty': [FAIL],
- 'test-run-inlining/InlineLoopGuardedOnce': [FAIL],
- 'test-run-inlining/InlineOmitArguments': [FAIL],
- 'test-run-inlining/SimpleInlining': [FAIL],
- 'test-run-inlining/InlineLoopUnguardedEmpty': [FAIL],
- 'test-run-inlining/InlineNestedBuiltin': [FAIL],
- 'test-run-inlining/InlineSurplusArguments': [FAIL],
- 'test-run-inlining/InlineBuiltin': [FAIL],
- 'test-run-inlining/InlineTwiceDependent': [FAIL],
- 'test-run-inlining/SimpleInliningContextDeopt': [FAIL],
-
# TODO(rmcilroy,4766): Requires BytecodeGraphBuilder to track source position
# on nodes (behind --turbo_source_positions flag).
'test-cpu-profiler/TickLinesOptimized': [FAIL],
@@ -493,14 +407,6 @@
'test-heap/TestCodeFlushingIncremental': [FAIL],
'test-heap/TestCodeFlushingIncrementalAbort': [PASS, ['mode == debug or dcheck_always_on == True', FAIL]],
- # TODO(mythria,4780): Related to type feedback support for Array function.
- 'test-feedback-vector/VectorCallFeedbackForArray': [FAIL],
-
- # TODO(mythria,4780): Related to type feedback support for constructor.
- 'test-feedback-vector/VectorConstructCounts': [FAIL],
- 'test-heap/WeakFunctionInConstructor': [FAIL],
- 'test-heap/IncrementalMarkingPreservesMonomorphicConstructor': [FAIL],
-
# TODO(mythria,4680): Lack of code-ageing in interpreter.
'test-heap/Regress169209': [FAIL],
@@ -512,23 +418,9 @@
'test-cpu-profiler/CollectDeoptEvents': [FAIL],
'test-cpu-profiler/DeoptUntrackedFunction': [SKIP],
- # BUG(4680): Ignition doesn't support allocation sites currently.
- 'test-heap/EnsureAllocationSiteDependentCodesProcessed': [FAIL],
- 'test-heap/OptimizedPretenuringAllocationFolding': [FAIL],
- 'test-heap/OptimizedPretenuringdoubleArrayLiterals': [FAIL],
- 'test-heap/OptimizedPretenuringNestedDoubleLiterals': [FAIL],
- 'test-heap/OptimizedPretenuringNestedMixedArrayLiterals': [FAIL],
- 'test-heap/OptimizedPretenuringNestedObjectLiterals': [FAIL],
- 'test-heap/OptimizedPretenuringObjectArrayLiterals': [FAIL],
-
# BUG(4751). Flaky with Ignition.
'test-cpu-profiler/JsNativeJsSample': [SKIP],
- # TODO(bmeurer): TurboFan embeds strong references to all kinds of objects
- # via deoptimization data (Crankshaft also does this, but lack proper test
- # coverage).
- 'test-heap/ObjectsInOptimizedCodeAreWeak': [FAIL],
-
# TurboFan cpu profiler result is different.
'test-cpu-profiler/DeoptAtFirstLevelInlinedSource': [FAIL],
'test-cpu-profiler/DeoptAtSecondLevelInlinedSource': [FAIL],
@@ -537,4 +429,16 @@
'test-cpu-profiler/FunctionApplySample': [PASS, ['system == windows', SKIP]],
}], # variant == ignition_turbofan
+##############################################################################
+['variant != ignition and variant != ignition_staging and variant != ignition_turbofan', {
+ # Ongoing implementation of modules.
+ # https://bugs.chromium.org/p/v8/issues/detail?id=1569
+ 'test-modules/*': [SKIP],
+}], # variant != ignition and variant != ignition_staging and variant != ignition_turbofan
+
+##############################################################################
+['variant == asm_wasm', {
+ '*': [SKIP],
+}], # variant == asm_wasm
+
]
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.h b/deps/v8/test/cctest/compiler/codegen-tester.h
index 3d115454b9..90c32ce99e 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.h
+++ b/deps/v8/test/cctest/compiler/codegen-tester.h
@@ -5,7 +5,7 @@
#ifndef V8_CCTEST_COMPILER_CODEGEN_TESTER_H_
#define V8_CCTEST_COMPILER_CODEGEN_TESTER_H_
-#include "src/compiler.h"
+#include "src/compilation-info.h"
#include "src/compiler/instruction-selector.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/raw-machine-assembler.h"
@@ -67,7 +67,8 @@ class RawMachineAssemblerTester : public HandleAndZoneScope,
Schedule* schedule = this->Export();
CallDescriptor* call_descriptor = this->call_descriptor();
Graph* graph = this->graph();
- CompilationInfo info(ArrayVector("testing"), main_isolate(), main_zone());
+ CompilationInfo info(ArrayVector("testing"), main_isolate(), main_zone(),
+ Code::ComputeFlags(Code::STUB));
code_ = Pipeline::GenerateCodeForTesting(&info, call_descriptor, graph,
schedule);
}
diff --git a/deps/v8/test/cctest/compiler/function-tester.cc b/deps/v8/test/cctest/compiler/function-tester.cc
new file mode 100644
index 0000000000..2da2dc14aa
--- /dev/null
+++ b/deps/v8/test/cctest/compiler/function-tester.cc
@@ -0,0 +1,211 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/compiler/function-tester.h"
+
+#include "src/ast/ast-numbering.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/pipeline.h"
+#include "src/execution.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/handles.h"
+#include "src/objects-inl.h"
+#include "src/parsing/parse-info.h"
+#include "src/parsing/parser.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+FunctionTester::FunctionTester(const char* source, uint32_t flags)
+ : isolate(main_isolate()),
+ function((FLAG_allow_natives_syntax = true, NewFunction(source))),
+ flags_(flags) {
+ Compile(function);
+ const uint32_t supported_flags = CompilationInfo::kNativeContextSpecializing |
+ CompilationInfo::kInliningEnabled;
+ CHECK_EQ(0u, flags_ & ~supported_flags);
+}
+
+FunctionTester::FunctionTester(Graph* graph, int param_count)
+ : isolate(main_isolate()),
+ function(NewFunction(BuildFunction(param_count).c_str())),
+ flags_(0) {
+ CompileGraph(graph);
+}
+
+FunctionTester::FunctionTester(Handle<Code> code, int param_count)
+ : isolate(main_isolate()),
+ function((FLAG_allow_natives_syntax = true,
+ NewFunction(BuildFunction(param_count).c_str()))),
+ flags_(0) {
+ Compile(function);
+ function->ReplaceCode(*code);
+}
+
+FunctionTester::FunctionTester(const CallInterfaceDescriptor& descriptor,
+ Handle<Code> code)
+ : FunctionTester(code, descriptor.GetParameterCount()) {}
+
+MaybeHandle<Object> FunctionTester::Call() {
+ return Execution::Call(isolate, function, undefined(), 0, nullptr);
+}
+
+MaybeHandle<Object> FunctionTester::Call(Handle<Object> a) {
+ Handle<Object> args[] = {a};
+ return Execution::Call(isolate, function, undefined(), 1, args);
+}
+
+MaybeHandle<Object> FunctionTester::Call(Handle<Object> a, Handle<Object> b) {
+ Handle<Object> args[] = {a, b};
+ return Execution::Call(isolate, function, undefined(), 2, args);
+}
+
+MaybeHandle<Object> FunctionTester::Call(Handle<Object> a, Handle<Object> b,
+ Handle<Object> c) {
+ Handle<Object> args[] = {a, b, c};
+ return Execution::Call(isolate, function, undefined(), 3, args);
+}
+
+MaybeHandle<Object> FunctionTester::Call(Handle<Object> a, Handle<Object> b,
+ Handle<Object> c, Handle<Object> d) {
+ Handle<Object> args[] = {a, b, c, d};
+ return Execution::Call(isolate, function, undefined(), 4, args);
+}
+
+void FunctionTester::CheckThrows(Handle<Object> a, Handle<Object> b) {
+ TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
+ MaybeHandle<Object> no_result = Call(a, b);
+ CHECK(isolate->has_pending_exception());
+ CHECK(try_catch.HasCaught());
+ CHECK(no_result.is_null());
+ isolate->OptionalRescheduleException(true);
+}
+
+v8::Local<v8::Message> FunctionTester::CheckThrowsReturnMessage(
+ Handle<Object> a, Handle<Object> b) {
+ TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
+ MaybeHandle<Object> no_result = Call(a, b);
+ CHECK(isolate->has_pending_exception());
+ CHECK(try_catch.HasCaught());
+ CHECK(no_result.is_null());
+ isolate->OptionalRescheduleException(true);
+ CHECK(!try_catch.Message().IsEmpty());
+ return try_catch.Message();
+}
+
+void FunctionTester::CheckCall(Handle<Object> expected, Handle<Object> a,
+ Handle<Object> b, Handle<Object> c,
+ Handle<Object> d) {
+ Handle<Object> result = Call(a, b, c, d).ToHandleChecked();
+ CHECK(expected->SameValue(*result));
+}
+
+Handle<JSFunction> FunctionTester::NewFunction(const char* source) {
+ return Handle<JSFunction>::cast(v8::Utils::OpenHandle(
+ *v8::Local<v8::Function>::Cast(CompileRun(source))));
+}
+
+Handle<JSObject> FunctionTester::NewObject(const char* source) {
+ return Handle<JSObject>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(CompileRun(source))));
+}
+
+Handle<String> FunctionTester::Val(const char* string) {
+ return isolate->factory()->InternalizeUtf8String(string);
+}
+
+Handle<Object> FunctionTester::Val(double value) {
+ return isolate->factory()->NewNumber(value);
+}
+
+Handle<Object> FunctionTester::infinity() {
+ return isolate->factory()->infinity_value();
+}
+
+Handle<Object> FunctionTester::minus_infinity() { return Val(-V8_INFINITY); }
+
+Handle<Object> FunctionTester::nan() { return isolate->factory()->nan_value(); }
+
+Handle<Object> FunctionTester::undefined() {
+ return isolate->factory()->undefined_value();
+}
+
+Handle<Object> FunctionTester::null() {
+ return isolate->factory()->null_value();
+}
+
+Handle<Object> FunctionTester::true_value() {
+ return isolate->factory()->true_value();
+}
+
+Handle<Object> FunctionTester::false_value() {
+ return isolate->factory()->false_value();
+}
+
+Handle<JSFunction> FunctionTester::ForMachineGraph(Graph* graph,
+ int param_count) {
+ JSFunction* p = NULL;
+ { // because of the implicit handle scope of FunctionTester.
+ FunctionTester f(graph, param_count);
+ p = *f.function;
+ }
+ return Handle<JSFunction>(p); // allocated in outer handle scope.
+}
+
+Handle<JSFunction> FunctionTester::Compile(Handle<JSFunction> function) {
+ Zone zone(function->GetIsolate()->allocator());
+ ParseInfo parse_info(&zone, function);
+ CompilationInfo info(&parse_info, function);
+ info.MarkAsDeoptimizationEnabled();
+
+ if (!FLAG_turbo_from_bytecode) {
+ CHECK(Parser::ParseStatic(info.parse_info()));
+ }
+ info.SetOptimizing();
+ if (flags_ & CompilationInfo::kNativeContextSpecializing) {
+ info.MarkAsNativeContextSpecializing();
+ }
+ if (flags_ & CompilationInfo::kInliningEnabled) {
+ info.MarkAsInliningEnabled();
+ }
+ if (FLAG_turbo_from_bytecode) {
+ CHECK(Compiler::EnsureBytecode(&info));
+ info.MarkAsOptimizeFromBytecode();
+ } else {
+ CHECK(Compiler::Analyze(info.parse_info()));
+ CHECK(Compiler::EnsureDeoptimizationSupport(&info));
+ }
+ JSFunction::EnsureLiterals(function);
+
+ Handle<Code> code = Pipeline::GenerateCodeForTesting(&info);
+ CHECK(!code.is_null());
+ info.dependencies()->Commit(code);
+ info.context()->native_context()->AddOptimizedCode(*code);
+ function->ReplaceCode(*code);
+ return function;
+}
+
+// Compile the given machine graph instead of the source of the function
+// and replace the JSFunction's code with the result.
+Handle<JSFunction> FunctionTester::CompileGraph(Graph* graph) {
+ Zone zone(function->GetIsolate()->allocator());
+ ParseInfo parse_info(&zone, function);
+ CompilationInfo info(&parse_info, function);
+
+ CHECK(Parser::ParseStatic(info.parse_info()));
+ info.SetOptimizing();
+
+ Handle<Code> code = Pipeline::GenerateCodeForTesting(&info, graph);
+ CHECK(!code.is_null());
+ function->ReplaceCode(*code);
+ return function;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/function-tester.h b/deps/v8/test/cctest/compiler/function-tester.h
index c1473ac960..e65fa78c13 100644
--- a/deps/v8/test/cctest/compiler/function-tester.h
+++ b/deps/v8/test/cctest/compiler/function-tester.h
@@ -5,110 +5,45 @@
#ifndef V8_CCTEST_COMPILER_FUNCTION_TESTER_H_
#define V8_CCTEST_COMPILER_FUNCTION_TESTER_H_
-#include "src/ast/ast-numbering.h"
-#include "src/compiler.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/pipeline.h"
-#include "src/execution.h"
-#include "src/full-codegen/full-codegen.h"
#include "src/handles.h"
-#include "src/objects-inl.h"
-#include "src/parsing/parse-info.h"
-#include "src/parsing/parser.h"
-#include "src/parsing/rewriter.h"
#include "test/cctest/cctest.h"
namespace v8 {
namespace internal {
+
+class CallInterfaceDescriptor;
+class Isolate;
+
namespace compiler {
+class Graph;
+
class FunctionTester : public InitializedHandleScope {
public:
- explicit FunctionTester(const char* source, uint32_t flags = 0)
- : isolate(main_isolate()),
- function((FLAG_allow_natives_syntax = true, NewFunction(source))),
- flags_(flags) {
- Compile(function);
- const uint32_t supported_flags =
- CompilationInfo::kNativeContextSpecializing |
- CompilationInfo::kInliningEnabled;
- CHECK_EQ(0u, flags_ & ~supported_flags);
- }
+ explicit FunctionTester(const char* source, uint32_t flags = 0);
- FunctionTester(Graph* graph, int param_count)
- : isolate(main_isolate()),
- function(NewFunction(BuildFunction(param_count).c_str())),
- flags_(0) {
- CompileGraph(graph);
- }
+ FunctionTester(Graph* graph, int param_count);
- FunctionTester(Handle<Code> code, int param_count)
- : isolate(main_isolate()),
- function((FLAG_allow_natives_syntax = true,
- NewFunction(BuildFunction(param_count).c_str()))),
- flags_(0) {
- Compile(function);
- function->ReplaceCode(*code);
- }
+ FunctionTester(Handle<Code> code, int param_count);
- FunctionTester(const CallInterfaceDescriptor& descriptor, Handle<Code> code)
- : FunctionTester(code, descriptor.GetParameterCount()) {}
+ FunctionTester(const CallInterfaceDescriptor& descriptor, Handle<Code> code);
Isolate* isolate;
Handle<JSFunction> function;
- MaybeHandle<Object> Call() {
- return Execution::Call(isolate, function, undefined(), 0, nullptr);
- }
-
- MaybeHandle<Object> Call(Handle<Object> a) {
- Handle<Object> args[] = {a};
- return Execution::Call(isolate, function, undefined(), 1, args);
- }
-
- MaybeHandle<Object> Call(Handle<Object> a, Handle<Object> b) {
- Handle<Object> args[] = {a, b};
- return Execution::Call(isolate, function, undefined(), 2, args);
- }
-
+ MaybeHandle<Object> Call();
+ MaybeHandle<Object> Call(Handle<Object> a);
+ MaybeHandle<Object> Call(Handle<Object> a, Handle<Object> b);
MaybeHandle<Object> Call(Handle<Object> a, Handle<Object> b,
- Handle<Object> c) {
- Handle<Object> args[] = {a, b, c};
- return Execution::Call(isolate, function, undefined(), 3, args);
- }
-
+ Handle<Object> c);
MaybeHandle<Object> Call(Handle<Object> a, Handle<Object> b, Handle<Object> c,
- Handle<Object> d) {
- Handle<Object> args[] = {a, b, c, d};
- return Execution::Call(isolate, function, undefined(), 4, args);
- }
-
- void CheckThrows(Handle<Object> a, Handle<Object> b) {
- TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
- MaybeHandle<Object> no_result = Call(a, b);
- CHECK(isolate->has_pending_exception());
- CHECK(try_catch.HasCaught());
- CHECK(no_result.is_null());
- isolate->OptionalRescheduleException(true);
- }
+ Handle<Object> d);
+ void CheckThrows(Handle<Object> a, Handle<Object> b);
v8::Local<v8::Message> CheckThrowsReturnMessage(Handle<Object> a,
- Handle<Object> b) {
- TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
- MaybeHandle<Object> no_result = Call(a, b);
- CHECK(isolate->has_pending_exception());
- CHECK(try_catch.HasCaught());
- CHECK(no_result.is_null());
- isolate->OptionalRescheduleException(true);
- CHECK(!try_catch.Message().IsEmpty());
- return try_catch.Message();
- }
-
+ Handle<Object> b);
void CheckCall(Handle<Object> expected, Handle<Object> a, Handle<Object> b,
- Handle<Object> c, Handle<Object> d) {
- Handle<Object> result = Call(a, b, c, d).ToHandleChecked();
- CHECK(expected->SameValue(*result));
- }
+ Handle<Object> c, Handle<Object> d);
void CheckCall(Handle<Object> expected, Handle<Object> a, Handle<Object> b,
Handle<Object> c) {
@@ -158,83 +93,25 @@ class FunctionTester : public InitializedHandleScope {
CheckCall(false_value(), Val(a), Val(b));
}
- Handle<JSFunction> NewFunction(const char* source) {
- return Handle<JSFunction>::cast(v8::Utils::OpenHandle(
- *v8::Local<v8::Function>::Cast(CompileRun(source))));
- }
+ Handle<JSFunction> NewFunction(const char* source);
+ Handle<JSObject> NewObject(const char* source);
- Handle<JSObject> NewObject(const char* source) {
- return Handle<JSObject>::cast(v8::Utils::OpenHandle(
- *v8::Local<v8::Object>::Cast(CompileRun(source))));
- }
+ Handle<String> Val(const char* string);
+ Handle<Object> Val(double value);
+ Handle<Object> infinity();
+ Handle<Object> minus_infinity();
+ Handle<Object> nan();
+ Handle<Object> undefined();
+ Handle<Object> null();
+ Handle<Object> true_value();
+ Handle<Object> false_value();
- Handle<String> Val(const char* string) {
- return isolate->factory()->InternalizeUtf8String(string);
- }
-
- Handle<Object> Val(double value) {
- return isolate->factory()->NewNumber(value);
- }
-
- Handle<Object> infinity() { return isolate->factory()->infinity_value(); }
-
- Handle<Object> minus_infinity() { return Val(-V8_INFINITY); }
-
- Handle<Object> nan() { return isolate->factory()->nan_value(); }
-
- Handle<Object> undefined() { return isolate->factory()->undefined_value(); }
-
- Handle<Object> null() { return isolate->factory()->null_value(); }
-
- Handle<Object> true_value() { return isolate->factory()->true_value(); }
-
- Handle<Object> false_value() { return isolate->factory()->false_value(); }
-
- static Handle<JSFunction> ForMachineGraph(Graph* graph, int param_count) {
- JSFunction* p = NULL;
- { // because of the implicit handle scope of FunctionTester.
- FunctionTester f(graph, param_count);
- p = *f.function;
- }
- return Handle<JSFunction>(p); // allocated in outer handle scope.
- }
+ static Handle<JSFunction> ForMachineGraph(Graph* graph, int param_count);
private:
uint32_t flags_;
- Handle<JSFunction> Compile(Handle<JSFunction> function) {
- Zone zone(function->GetIsolate()->allocator());
- ParseInfo parse_info(&zone, function);
- CompilationInfo info(&parse_info, function);
- info.MarkAsDeoptimizationEnabled();
-
- if (!FLAG_turbo_from_bytecode) {
- CHECK(Parser::ParseStatic(info.parse_info()));
- }
- info.SetOptimizing();
- if (flags_ & CompilationInfo::kNativeContextSpecializing) {
- info.MarkAsNativeContextSpecializing();
- }
- if (flags_ & CompilationInfo::kInliningEnabled) {
- info.MarkAsInliningEnabled();
- }
- if (FLAG_turbo_from_bytecode) {
- CHECK(Compiler::EnsureBytecode(&info));
- info.MarkAsOptimizeFromBytecode();
- } else {
- CHECK(Compiler::Analyze(info.parse_info()));
- CHECK(Compiler::EnsureDeoptimizationSupport(&info));
- }
- JSFunction::EnsureLiterals(function);
-
- Handle<Code> code = Pipeline::GenerateCodeForTesting(&info);
- CHECK(!code.is_null());
- info.dependencies()->Commit(code);
- info.context()->native_context()->AddOptimizedCode(*code);
- function->ReplaceCode(*code);
- return function;
- }
-
+ Handle<JSFunction> Compile(Handle<JSFunction> function);
std::string BuildFunction(int param_count) {
std::string function_string = "(function(";
if (param_count > 0) {
@@ -250,19 +127,7 @@ class FunctionTester : public InitializedHandleScope {
// Compile the given machine graph instead of the source of the function
// and replace the JSFunction's code with the result.
- Handle<JSFunction> CompileGraph(Graph* graph) {
- Zone zone(function->GetIsolate()->allocator());
- ParseInfo parse_info(&zone, function);
- CompilationInfo info(&parse_info, function);
-
- CHECK(Parser::ParseStatic(info.parse_info()));
- info.SetOptimizing();
-
- Handle<Code> code = Pipeline::GenerateCodeForTesting(&info, graph);
- CHECK(!code.is_null());
- function->ReplaceCode(*code);
- return function;
- }
+ Handle<JSFunction> CompileGraph(Graph* graph);
};
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/test/cctest/compiler/graph-builder-tester.h b/deps/v8/test/cctest/compiler/graph-builder-tester.h
index c870a3e84e..c257448b8a 100644
--- a/deps/v8/test/cctest/compiler/graph-builder-tester.h
+++ b/deps/v8/test/cctest/compiler/graph-builder-tester.h
@@ -5,7 +5,7 @@
#ifndef V8_CCTEST_COMPILER_GRAPH_BUILDER_TESTER_H_
#define V8_CCTEST_COMPILER_GRAPH_BUILDER_TESTER_H_
-#include "src/compiler.h"
+#include "src/compilation-info.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/instruction-selector.h"
#include "src/compiler/linkage.h"
@@ -279,7 +279,8 @@ class GraphBuilderTester : public HandleAndZoneScope,
Zone* zone = graph()->zone();
CallDescriptor* desc =
Linkage::GetSimplifiedCDescriptor(zone, this->csig_);
- CompilationInfo info(ArrayVector("testing"), main_isolate(), main_zone());
+ CompilationInfo info(ArrayVector("testing"), main_isolate(), main_zone(),
+ Code::ComputeFlags(Code::STUB));
code_ = Pipeline::GenerateCodeForTesting(&info, desc, graph());
#ifdef ENABLE_DISASSEMBLER
if (!code_.is_null() && FLAG_print_opt_code) {
diff --git a/deps/v8/test/cctest/compiler/test-code-assembler.cc b/deps/v8/test/cctest/compiler/test-code-assembler.cc
index d9bb9346f5..6fe733af8d 100644
--- a/deps/v8/test/cctest/compiler/test-code-assembler.cc
+++ b/deps/v8/test/cctest/compiler/test-code-assembler.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/code-factory.h"
#include "src/compiler/code-assembler.h"
#include "src/isolate.h"
#include "test/cctest/compiler/code-assembler-tester.h"
diff --git a/deps/v8/test/cctest/compiler/test-gap-resolver.cc b/deps/v8/test/cctest/compiler/test-gap-resolver.cc
index b8b1251724..3b1cdb6d81 100644
--- a/deps/v8/test/cctest/compiler/test-gap-resolver.cc
+++ b/deps/v8/test/cctest/compiler/test-gap-resolver.cc
@@ -11,6 +11,19 @@ namespace v8 {
namespace internal {
namespace compiler {
+const auto GetRegConfig = RegisterConfiguration::Turbofan;
+
+// Fragments the given operand into an equivalent set of operands to simplify
+// ParallelMove equivalence testing.
+void GetCanonicalOperands(const InstructionOperand& op,
+ std::vector<InstructionOperand>* fragments) {
+ CHECK(!kSimpleFPAliasing);
+ CHECK(op.IsFPLocationOperand());
+ // TODO(bbudge) Split into float operands on platforms with non-simple FP
+ // register aliasing.
+ fragments->push_back(op);
+}
+
// The state of our move interpreter is the mapping of operands to values. Note
// that the actual values don't really matter, all we care about is equality.
class InterpreterState {
@@ -18,7 +31,26 @@ class InterpreterState {
void ExecuteInParallel(const ParallelMove* moves) {
InterpreterState copy(*this);
for (const auto m : *moves) {
- if (!m->IsRedundant()) write(m->destination(), copy.read(m->source()));
+ CHECK(!m->IsRedundant());
+ const InstructionOperand& src = m->source();
+ const InstructionOperand& dst = m->destination();
+ if (!kSimpleFPAliasing && src.IsFPLocationOperand() &&
+ dst.IsFPLocationOperand()) {
+ // Canonicalize FP location-location moves.
+ std::vector<InstructionOperand> src_fragments;
+ GetCanonicalOperands(src, &src_fragments);
+ CHECK(!src_fragments.empty());
+ std::vector<InstructionOperand> dst_fragments;
+ GetCanonicalOperands(dst, &dst_fragments);
+ CHECK_EQ(src_fragments.size(), dst_fragments.size());
+
+ for (size_t i = 0; i < src_fragments.size(); ++i) {
+ write(dst_fragments[i], copy.read(src_fragments[i]));
+ }
+ continue;
+ }
+ // All other moves.
+ write(dst, copy.read(src));
}
}
@@ -26,11 +58,13 @@ class InterpreterState {
return values_ == other.values_;
}
- bool operator!=(const InterpreterState& other) const {
- return values_ != other.values_;
- }
-
private:
+ // struct for mapping operands to a unique value, that makes it easier to
+ // detect illegal parallel moves, and to evaluate moves for equivalence. This
+ // is a one way transformation. All general register and slot operands are
+ // mapped to the default representation. FP registers and slots are mapped to
+ // float64 except on architectures with non-simple FP register aliasing, where
+ // the actual representation is used.
struct Key {
bool is_constant;
MachineRepresentation rep;
@@ -42,7 +76,7 @@ class InterpreterState {
return this->is_constant;
}
if (this->rep != other.rep) {
- return static_cast<int>(this->rep) < static_cast<int>(other.rep);
+ return this->rep < other.rep;
}
if (this->kind != other.kind) {
return this->kind < other.kind;
@@ -56,7 +90,7 @@ class InterpreterState {
}
};
- // Internally, the state is a normalized permutation of (kind,index) pairs.
+ // Internally, the state is a normalized permutation of Value pairs.
typedef Key Value;
typedef std::map<Key, Value> OperandMap;
@@ -65,11 +99,11 @@ class InterpreterState {
return (it == values_.end()) ? ValueFor(op) : it->second;
}
- void write(const InstructionOperand& op, Value v) {
- if (v == ValueFor(op)) {
- values_.erase(KeyFor(op));
+ void write(const InstructionOperand& dst, Value v) {
+ if (v == ValueFor(dst)) {
+ values_.erase(KeyFor(dst));
} else {
- values_[KeyFor(op)] = v;
+ values_[KeyFor(dst)] = v;
}
}
@@ -81,10 +115,11 @@ class InterpreterState {
int index;
if (!is_constant) {
const LocationOperand& loc_op = LocationOperand::cast(op);
+ // Canonicalize FP location operand representations to kFloat64.
+ if (IsFloatingPoint(loc_op.representation())) {
+ rep = MachineRepresentation::kFloat64;
+ }
if (loc_op.IsAnyRegister()) {
- if (loc_op.IsFPRegister()) {
- rep = MachineRepresentation::kFloat64;
- }
index = loc_op.register_code();
} else {
index = loc_op.index();
@@ -115,7 +150,7 @@ class InterpreterState {
InstructionOperand source = FromKey(it->second);
InstructionOperand destination = FromKey(it->first);
MoveOperands mo(source, destination);
- PrintableMoveOperands pmo = {RegisterConfiguration::Turbofan(), &mo};
+ PrintableMoveOperands pmo = {GetRegConfig(), &mo};
os << pmo;
}
return os;
@@ -124,7 +159,6 @@ class InterpreterState {
OperandMap values_;
};
-
// An abstract interpreter for moves, swaps and parallel moves.
class MoveInterpreter : public GapResolver::Assembler {
public:
@@ -161,24 +195,87 @@ class ParallelMoveCreator : public HandleAndZoneScope {
public:
ParallelMoveCreator() : rng_(CcTest::random_number_generator()) {}
+ // Creates a ParallelMove with 'size' random MoveOperands. Note that illegal
+ // moves will be rejected, so the actual number of MoveOperands may be less.
ParallelMove* Create(int size) {
ParallelMove* parallel_move = new (main_zone()) ParallelMove(main_zone());
- std::set<InstructionOperand, CompareOperandModuloType> seen;
+ // Valid ParallelMoves can't have interfering destination ops.
+ std::set<InstructionOperand, CompareOperandModuloType> destinations;
+ // Valid ParallelMoves can't have interfering source ops of different reps.
+ std::map<InstructionOperand, MachineRepresentation,
+ CompareOperandModuloType>
+ sources;
for (int i = 0; i < size; ++i) {
MachineRepresentation rep = RandomRepresentation();
MoveOperands mo(CreateRandomOperand(true, rep),
CreateRandomOperand(false, rep));
- if (!mo.IsRedundant() && seen.find(mo.destination()) == seen.end()) {
+ if (mo.IsRedundant()) continue;
+
+ const InstructionOperand& dst = mo.destination();
+ bool reject = false;
+ // On architectures where FP register aliasing is non-simple, update the
+ // destinations set with the float equivalents of the operand and check
+ // that all destinations are unique and do not alias each other.
+ if (!kSimpleFPAliasing && mo.destination().IsFPLocationOperand()) {
+ std::vector<InstructionOperand> fragments;
+ GetCanonicalOperands(dst, &fragments);
+ CHECK(!fragments.empty());
+ for (size_t i = 0; i < fragments.size(); ++i) {
+ if (destinations.find(fragments[i]) == destinations.end()) {
+ destinations.insert(fragments[i]);
+ } else {
+ reject = true;
+ break;
+ }
+ }
+ // Update the sources map, and check that no FP source has multiple
+ // representations.
+ const InstructionOperand& src = mo.source();
+ if (src.IsFPRegister()) {
+ std::vector<InstructionOperand> fragments;
+ MachineRepresentation src_rep =
+ LocationOperand::cast(src).representation();
+ GetCanonicalOperands(src, &fragments);
+ CHECK(!fragments.empty());
+ for (size_t i = 0; i < fragments.size(); ++i) {
+ auto find_it = sources.find(fragments[i]);
+ if (find_it != sources.end() && find_it->second != src_rep) {
+ reject = true;
+ break;
+ }
+ sources.insert(std::make_pair(fragments[i], src_rep));
+ }
+ }
+ } else {
+ if (destinations.find(dst) == destinations.end()) {
+ destinations.insert(dst);
+ } else {
+ reject = true;
+ }
+ }
+
+ if (!reject) {
parallel_move->AddMove(mo.source(), mo.destination());
- seen.insert(mo.destination());
}
}
return parallel_move;
}
+ // Creates a ParallelMove from a list of operand pairs. Even operands are
+ // destinations, odd ones are sources.
+ ParallelMove* Create(const std::vector<InstructionOperand>& operand_pairs) {
+ ParallelMove* parallel_move = new (main_zone()) ParallelMove(main_zone());
+ for (size_t i = 0; i < operand_pairs.size(); i += 2) {
+ const InstructionOperand& dst = operand_pairs[i];
+ const InstructionOperand& src = operand_pairs[i + 1];
+ parallel_move->AddMove(src, dst);
+ }
+ return parallel_move;
+ }
+
private:
MachineRepresentation RandomRepresentation() {
- int index = rng_->NextInt(5);
+ int index = rng_->NextInt(6);
switch (index) {
case 0:
return MachineRepresentation::kWord32;
@@ -189,47 +286,65 @@ class ParallelMoveCreator : public HandleAndZoneScope {
case 3:
return MachineRepresentation::kFloat64;
case 4:
+ return MachineRepresentation::kSimd128;
+ case 5:
return MachineRepresentation::kTagged;
}
UNREACHABLE();
return MachineRepresentation::kNone;
}
+ const int kMaxIndex = 7;
+ const int kMaxIndices = kMaxIndex + 1;
+
+ // Non-FP slots shouldn't overlap FP slots.
+ // FP slots with different representations shouldn't overlap.
+ int GetValidSlotIndex(MachineRepresentation rep, int index) {
+ DCHECK_GE(kMaxIndex, index);
+ // The first group of slots are for non-FP values.
+ if (!IsFloatingPoint(rep)) return index;
+ // The next group are for float values.
+ int base = kMaxIndices;
+ if (rep == MachineRepresentation::kFloat32) return base + index;
+ // Double values.
+ base += kMaxIndices;
+ if (rep == MachineRepresentation::kFloat64) return base + index * 2;
+ // SIMD values
+ base += kMaxIndices * 2;
+ CHECK_EQ(MachineRepresentation::kSimd128, rep);
+ return base + index * 4;
+ }
+
InstructionOperand CreateRandomOperand(bool is_source,
MachineRepresentation rep) {
auto conf = RegisterConfiguration::Turbofan();
- auto GetRegisterCode = [&conf](MachineRepresentation rep, int index) {
+ auto GetValidRegisterCode = [&conf](MachineRepresentation rep, int index) {
switch (rep) {
case MachineRepresentation::kFloat32:
-#if V8_TARGET_ARCH_ARM
- // Only even number float registers are used on Arm.
- // TODO(bbudge) Eliminate this when FP register aliasing works.
- return conf->RegisterConfiguration::GetAllocatableDoubleCode(index) *
- 2;
-#endif
- // Fall through on non-Arm targets.
case MachineRepresentation::kFloat64:
+ case MachineRepresentation::kSimd128:
return conf->RegisterConfiguration::GetAllocatableDoubleCode(index);
-
default:
return conf->RegisterConfiguration::GetAllocatableGeneralCode(index);
}
UNREACHABLE();
return static_cast<int>(Register::kCode_no_reg);
};
- int index = rng_->NextInt(7);
+ int index = rng_->NextInt(kMaxIndex);
// destination can't be Constant.
switch (rng_->NextInt(is_source ? 5 : 4)) {
case 0:
- return AllocatedOperand(LocationOperand::STACK_SLOT, rep, index);
+ return AllocatedOperand(LocationOperand::STACK_SLOT, rep,
+ GetValidSlotIndex(rep, index));
case 1:
- return AllocatedOperand(LocationOperand::REGISTER, rep, index);
+ return AllocatedOperand(LocationOperand::REGISTER, rep,
+ GetValidRegisterCode(rep, index));
case 2:
return ExplicitOperand(LocationOperand::REGISTER, rep,
- GetRegisterCode(rep, 1));
+ GetValidRegisterCode(rep, 1));
case 3:
return ExplicitOperand(LocationOperand::STACK_SLOT, rep,
- GetRegisterCode(rep, index));
+ GetValidSlotIndex(rep, index));
case 4:
return ConstantOperand(index);
}
@@ -241,22 +356,23 @@ class ParallelMoveCreator : public HandleAndZoneScope {
v8::base::RandomNumberGenerator* rng_;
};
+void RunTest(ParallelMove* pm, Zone* zone) {
+ // Note: The gap resolver modifies the ParallelMove, so interpret first.
+ MoveInterpreter mi1(zone);
+ mi1.AssembleParallelMove(pm);
+
+ MoveInterpreter mi2(zone);
+ GapResolver resolver(&mi2);
+ resolver.Resolve(pm);
+
+ CHECK_EQ(mi1.state(), mi2.state());
+}
TEST(FuzzResolver) {
ParallelMoveCreator pmc;
- for (int size = 0; size < 20; ++size) {
+ for (int size = 0; size < 80; ++size) {
for (int repeat = 0; repeat < 50; ++repeat) {
- ParallelMove* pm = pmc.Create(size);
-
- // Note: The gap resolver modifies the ParallelMove, so interpret first.
- MoveInterpreter mi1(pmc.main_zone());
- mi1.AssembleParallelMove(pm);
-
- MoveInterpreter mi2(pmc.main_zone());
- GapResolver resolver(&mi2);
- resolver.Resolve(pm);
-
- CHECK_EQ(mi1.state(), mi2.state());
+ RunTest(pmc.Create(size), pmc.main_zone());
}
}
}
diff --git a/deps/v8/test/cctest/compiler/test-instruction.cc b/deps/v8/test/cctest/compiler/test-instruction.cc
index 4cf72a55ce..5265e476aa 100644
--- a/deps/v8/test/cctest/compiler/test-instruction.cc
+++ b/deps/v8/test/cctest/compiler/test-instruction.cc
@@ -268,7 +268,7 @@ TEST(InstructionAddGapMove) {
TEST(InstructionOperands) {
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
{
diff --git a/deps/v8/test/cctest/compiler/test-js-constant-cache.cc b/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
index 06169f3ba6..24107b88ea 100644
--- a/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
+++ b/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
@@ -5,8 +5,14 @@
#include "src/assembler.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
-#include "src/compiler/typer.h"
-#include "src/types.h"
+#include "src/factory.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/factory.h -> src/objects-inl.h
+#include "src/objects-inl.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/type-feedback-vector.h ->
+// src/type-feedback-vector-inl.h
+#include "src/type-feedback-vector-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
@@ -20,12 +26,10 @@ class JSCacheTesterHelper {
: main_graph_(zone),
main_common_(zone),
main_javascript_(zone),
- main_typer_(isolate, &main_graph_),
main_machine_(zone) {}
Graph main_graph_;
CommonOperatorBuilder main_common_;
JSOperatorBuilder main_javascript_;
- Typer main_typer_;
MachineOperatorBuilder main_machine_;
};
@@ -42,11 +46,8 @@ class JSConstantCacheTester : public HandleAndZoneScope,
main_graph_.SetStart(main_graph_.NewNode(common()->Start(0)));
main_graph_.SetEnd(
main_graph_.NewNode(common()->End(1), main_graph_.start()));
- main_typer_.Run();
}
- Type* TypeOf(Node* node) { return NodeProperties::GetType(node); }
-
Handle<HeapObject> handle(Node* node) {
CHECK_EQ(IrOpcode::kHeapConstant, node->opcode());
return OpParameter<Handle<HeapObject>>(node);
@@ -68,15 +69,6 @@ TEST(ZeroConstant1) {
CHECK_NE(zero, T.Constant(std::numeric_limits<double>::quiet_NaN()));
CHECK_NE(zero, T.Float64Constant(0));
CHECK_NE(zero, T.Int32Constant(0));
-
- Type* t = T.TypeOf(zero);
-
- CHECK(t->Is(Type::Number()));
- CHECK(t->Is(Type::Integral32()));
- CHECK(t->Is(Type::Signed32()));
- CHECK(t->Is(Type::Unsigned32()));
- CHECK(t->Is(Type::SignedSmall()));
- CHECK(t->Is(Type::UnsignedSmall()));
}
@@ -90,16 +82,6 @@ TEST(MinusZeroConstant) {
CHECK_EQ(minus_zero, T.Constant(-0.0));
CHECK_NE(zero, minus_zero);
- Type* t = T.TypeOf(minus_zero);
-
- CHECK(t->Is(Type::Number()));
- CHECK(t->Is(Type::MinusZero()));
- CHECK(!t->Is(Type::Integral32()));
- CHECK(!t->Is(Type::Signed32()));
- CHECK(!t->Is(Type::Unsigned32()));
- CHECK(!t->Is(Type::SignedSmall()));
- CHECK(!t->Is(Type::UnsignedSmall()));
-
double zero_value = OpParameter<double>(zero);
double minus_zero_value = OpParameter<double>(minus_zero);
@@ -122,15 +104,6 @@ TEST(ZeroConstant2) {
CHECK_NE(zero, T.Constant(std::numeric_limits<double>::quiet_NaN()));
CHECK_NE(zero, T.Float64Constant(0));
CHECK_NE(zero, T.Int32Constant(0));
-
- Type* t = T.TypeOf(zero);
-
- CHECK(t->Is(Type::Number()));
- CHECK(t->Is(Type::Integral32()));
- CHECK(t->Is(Type::Signed32()));
- CHECK(t->Is(Type::Unsigned32()));
- CHECK(t->Is(Type::SignedSmall()));
- CHECK(t->Is(Type::UnsignedSmall()));
}
@@ -147,15 +120,6 @@ TEST(OneConstant1) {
CHECK_NE(one, T.Constant(std::numeric_limits<double>::quiet_NaN()));
CHECK_NE(one, T.Float64Constant(1.0));
CHECK_NE(one, T.Int32Constant(1));
-
- Type* t = T.TypeOf(one);
-
- CHECK(t->Is(Type::Number()));
- CHECK(t->Is(Type::Integral32()));
- CHECK(t->Is(Type::Signed32()));
- CHECK(t->Is(Type::Unsigned32()));
- CHECK(t->Is(Type::SignedSmall()));
- CHECK(t->Is(Type::UnsignedSmall()));
}
@@ -172,15 +136,6 @@ TEST(OneConstant2) {
CHECK_NE(one, T.Constant(std::numeric_limits<double>::quiet_NaN()));
CHECK_NE(one, T.Float64Constant(1.0));
CHECK_NE(one, T.Int32Constant(1));
-
- Type* t = T.TypeOf(one);
-
- CHECK(t->Is(Type::Number()));
- CHECK(t->Is(Type::Integral32()));
- CHECK(t->Is(Type::Signed32()));
- CHECK(t->Is(Type::Unsigned32()));
- CHECK(t->Is(Type::SignedSmall()));
- CHECK(t->Is(Type::UnsignedSmall()));
}
@@ -227,17 +182,6 @@ TEST(CanonicalizingNumbers) {
}
-TEST(NumberTypes) {
- JSConstantCacheTester T;
-
- FOR_FLOAT64_INPUTS(i) {
- double value = *i;
- Node* node = T.Constant(value);
- CHECK(T.TypeOf(node)->Is(Type::Of(value, T.main_zone())));
- }
-}
-
-
TEST(HeapNumbers) {
JSConstantCacheTester T;
@@ -277,21 +221,6 @@ TEST(OddballValues) {
}
-TEST(OddballTypes) {
- JSConstantCacheTester T;
-
- CHECK(T.TypeOf(T.UndefinedConstant())->Is(Type::Undefined()));
- // TODO(dcarney): figure this out.
- // CHECK(T.TypeOf(T.TheHoleConstant())->Is(Type::Internal()));
- CHECK(T.TypeOf(T.TrueConstant())->Is(Type::Boolean()));
- CHECK(T.TypeOf(T.FalseConstant())->Is(Type::Boolean()));
- CHECK(T.TypeOf(T.NullConstant())->Is(Type::Null()));
- CHECK(T.TypeOf(T.ZeroConstant())->Is(Type::Number()));
- CHECK(T.TypeOf(T.OneConstant())->Is(Type::Number()));
- CHECK(T.TypeOf(T.NaNConstant())->Is(Type::NaN()));
-}
-
-
TEST(ExternalReferences) {
// TODO(titzer): test canonicalization of external references.
}
diff --git a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
index 88cd6c663c..604e696ab5 100644
--- a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
@@ -11,6 +11,13 @@
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/typer.h"
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/type-feedback-vector.h ->
+// src/type-feedback-vector-inl.h
+#include "src/type-feedback-vector-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -463,10 +470,9 @@ TEST(JSToNumber_replacement) {
TEST(JSToNumberOfConstant) {
JSTypedLoweringTester R;
- const Operator* ops[] = {
- R.common.NumberConstant(0), R.common.NumberConstant(-1),
- R.common.NumberConstant(0.1), R.common.Int32Constant(1177),
- R.common.Float64Constant(0.99)};
+ const Operator* ops[] = {R.common.NumberConstant(0),
+ R.common.NumberConstant(-1),
+ R.common.NumberConstant(0.1)};
for (size_t i = 0; i < arraysize(ops); i++) {
Node* n = R.graph.NewNode(ops[i]);
diff --git a/deps/v8/test/cctest/compiler/test-jump-threading.cc b/deps/v8/test/cctest/compiler/test-jump-threading.cc
index ed3d79e4ba..e58de67afc 100644
--- a/deps/v8/test/cctest/compiler/test-jump-threading.cc
+++ b/deps/v8/test/cctest/compiler/test-jump-threading.cc
@@ -106,7 +106,7 @@ class TestCode : public HandleAndZoneScope {
void VerifyForwarding(TestCode& code, int count, int* expected) {
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone local_zone(&allocator);
ZoneVector<RpoNumber> result(&local_zone);
JumpThreading::ComputeForwarding(&local_zone, result, &code.sequence_, true);
diff --git a/deps/v8/test/cctest/compiler/test-linkage.cc b/deps/v8/test/cctest/compiler/test-linkage.cc
index 6661e916db..59ef5fdd25 100644
--- a/deps/v8/test/cctest/compiler/test-linkage.cc
+++ b/deps/v8/test/cctest/compiler/test-linkage.cc
@@ -2,12 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/code-factory.h"
#include "src/code-stubs.h"
+#include "src/compilation-info.h"
#include "src/compiler.h"
-#include "src/parsing/parse-info.h"
-#include "src/zone.h"
-
-#include "src/code-factory.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/linkage.h"
@@ -16,6 +14,8 @@
#include "src/compiler/operator.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/schedule.h"
+#include "src/parsing/parse-info.h"
+#include "src/zone/zone.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc b/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc
index 8ee79ddb60..d97e038883 100644
--- a/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc
+++ b/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "src/ast/scopes.h"
-#include "src/compiler.h"
+#include "src/compilation-info.h"
#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
@@ -37,7 +37,7 @@ struct TestHelper : public HandleAndZoneScope {
CHECK(Parser::ParseStatic(&parse_info));
CHECK(Rewriter::Rewrite(&parse_info));
- Scope::Analyze(&parse_info);
+ DeclarationScope::Analyze(&parse_info, AnalyzeMode::kRegular);
DeclarationScope* scope = info.literal()->scope();
AstValueFactory* factory = parse_info.ast_value_factory();
diff --git a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
index d4ea47368a..cf3da887ba 100644
--- a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
+++ b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
@@ -83,7 +83,6 @@ class ReducerTester : public HandleAndZoneScope {
common(main_zone()),
graph(main_zone()),
javascript(main_zone()),
- typer(isolate, &graph),
jsgraph(isolate, &graph, &common, &javascript, nullptr, &machine),
maxuint32(Constant<int32_t>(kMaxUInt32)) {
Node* s = graph.NewNode(common.Start(num_parameters));
@@ -97,7 +96,6 @@ class ReducerTester : public HandleAndZoneScope {
CommonOperatorBuilder common;
Graph graph;
JSOperatorBuilder javascript;
- Typer typer;
JSGraph jsgraph;
Node* maxuint32;
diff --git a/deps/v8/test/cctest/compiler/test-multiple-return.cc b/deps/v8/test/cctest/compiler/test-multiple-return.cc
index 2221ffbc86..6cda32c792 100644
--- a/deps/v8/test/cctest/compiler/test-multiple-return.cc
+++ b/deps/v8/test/cctest/compiler/test-multiple-return.cc
@@ -64,7 +64,7 @@ CallDescriptor* GetCallDescriptor(Zone* zone, int return_count,
TEST(ReturnThreeValues) {
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
CallDescriptor* desc = GetCallDescriptor(&zone, 3, 2);
HandleAndZoneScope handles;
@@ -81,7 +81,7 @@ TEST(ReturnThreeValues) {
m.Return(add, sub, mul);
CompilationInfo info(ArrayVector("testing"), handles.main_isolate(),
- handles.main_zone());
+ handles.main_zone(), Code::ComputeFlags(Code::STUB));
Handle<Code> code =
Pipeline::GenerateCodeForTesting(&info, desc, m.graph(), m.Export());
#ifdef ENABLE_DISASSEMBLER
diff --git a/deps/v8/test/cctest/compiler/test-node.cc b/deps/v8/test/cctest/compiler/test-node.cc
index e2aacf3100..c5fc5b3c50 100644
--- a/deps/v8/test/cctest/compiler/test-node.cc
+++ b/deps/v8/test/cctest/compiler/test-node.cc
@@ -141,7 +141,7 @@ void CheckInputs(Node* node, Node** inputs, int input_count) {
TEST(NodeUseIteratorReplaceUses) {
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
@@ -167,7 +167,7 @@ TEST(NodeUseIteratorReplaceUses) {
TEST(NodeUseIteratorReplaceUsesSelf) {
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
@@ -192,7 +192,7 @@ TEST(NodeUseIteratorReplaceUsesSelf) {
TEST(ReplaceInput) {
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
@@ -219,7 +219,7 @@ TEST(ReplaceInput) {
TEST(OwnedBy) {
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
Graph graph(&zone);
@@ -270,7 +270,7 @@ TEST(OwnedBy) {
TEST(Uses) {
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
Graph graph(&zone);
@@ -293,7 +293,7 @@ TEST(Uses) {
TEST(Inputs) {
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
Graph graph(&zone);
@@ -321,7 +321,7 @@ TEST(Inputs) {
}
TEST(InsertInputs) {
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
Graph graph(&zone);
@@ -396,7 +396,7 @@ TEST(InsertInputs) {
}
TEST(RemoveInput) {
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
Graph graph(&zone);
@@ -427,7 +427,7 @@ TEST(RemoveInput) {
TEST(AppendInputsAndIterator) {
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
Graph graph(&zone);
@@ -450,7 +450,7 @@ TEST(AppendInputsAndIterator) {
TEST(NullInputsSimple) {
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
Graph graph(&zone);
@@ -478,7 +478,7 @@ TEST(NullInputsSimple) {
TEST(NullInputsAppended) {
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
Graph graph(&zone);
@@ -502,7 +502,7 @@ TEST(NullInputsAppended) {
TEST(ReplaceUsesFromAppendedInputs) {
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
Graph graph(&zone);
@@ -531,7 +531,7 @@ TEST(ReplaceUsesFromAppendedInputs) {
TEST(ReplaceInputMultipleUses) {
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
Graph graph(&zone);
@@ -550,7 +550,7 @@ TEST(ReplaceInputMultipleUses) {
TEST(TrimInputCountInline) {
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
Graph graph(&zone);
@@ -619,7 +619,7 @@ TEST(TrimInputCountInline) {
TEST(TrimInputCountOutOfLine1) {
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
Graph graph(&zone);
@@ -714,7 +714,7 @@ TEST(TrimInputCountOutOfLine1) {
TEST(TrimInputCountOutOfLine2) {
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
Graph graph(&zone);
@@ -784,7 +784,7 @@ TEST(TrimInputCountOutOfLine2) {
TEST(NullAllInputs) {
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
Graph graph(&zone);
@@ -837,7 +837,7 @@ TEST(NullAllInputs) {
TEST(AppendAndTrim) {
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
Graph graph(&zone);
diff --git a/deps/v8/test/cctest/compiler/test-representation-change.cc b/deps/v8/test/cctest/compiler/test-representation-change.cc
index b475e9a5b9..242793340e 100644
--- a/deps/v8/test/cctest/compiler/test-representation-change.cc
+++ b/deps/v8/test/cctest/compiler/test-representation-change.cc
@@ -493,9 +493,9 @@ static void CheckChange(IrOpcode::Value expected, MachineRepresentation from,
TEST(SingleChanges) {
CheckChange(IrOpcode::kChangeTaggedToBit, MachineRepresentation::kTagged,
- Type::None(), MachineRepresentation::kBit);
+ Type::Boolean(), MachineRepresentation::kBit);
CheckChange(IrOpcode::kChangeBitToTagged, MachineRepresentation::kBit,
- Type::None(), MachineRepresentation::kTagged);
+ Type::Boolean(), MachineRepresentation::kTagged);
CheckChange(IrOpcode::kChangeInt31ToTaggedSigned,
MachineRepresentation::kWord32, Type::Signed31(),
@@ -525,15 +525,11 @@ TEST(SingleChanges) {
Type::Unsigned32(), MachineRepresentation::kWord32);
CheckChange(IrOpcode::kChangeTaggedToFloat64, MachineRepresentation::kTagged,
Type::Number(), MachineRepresentation::kFloat64);
- CheckChange(IrOpcode::kChangeTaggedToFloat64, MachineRepresentation::kTagged,
- Type::Number(), MachineRepresentation::kFloat64);
CheckChange(IrOpcode::kTruncateTaggedToFloat64,
MachineRepresentation::kTagged, Type::NumberOrUndefined(),
MachineRepresentation::kFloat64);
- CheckTwoChanges(IrOpcode::kChangeTaggedSignedToInt32,
- IrOpcode::kChangeInt32ToFloat64,
- MachineRepresentation::kTagged, Type::TaggedSigned(),
- MachineRepresentation::kFloat64);
+ CheckChange(IrOpcode::kChangeTaggedToFloat64, MachineRepresentation::kTagged,
+ Type::Signed31(), MachineRepresentation::kFloat64);
// Int32,Uint32 <-> Float64 are actually machine conversions.
CheckChange(IrOpcode::kChangeInt32ToFloat64, MachineRepresentation::kWord32,
@@ -546,7 +542,7 @@ TEST(SingleChanges) {
Type::Unsigned32(), MachineRepresentation::kWord32);
CheckChange(IrOpcode::kTruncateFloat64ToFloat32,
- MachineRepresentation::kFloat64, Type::None(),
+ MachineRepresentation::kFloat64, Type::Number(),
MachineRepresentation::kFloat32);
// Int32,Uint32 <-> Float32 require two changes.
@@ -570,11 +566,11 @@ TEST(SingleChanges) {
// Float32 <-> Tagged require two changes.
CheckTwoChanges(IrOpcode::kChangeFloat32ToFloat64,
IrOpcode::kChangeFloat64ToTagged,
- MachineRepresentation::kFloat32, Type::None(),
+ MachineRepresentation::kFloat32, Type::Number(),
MachineRepresentation::kTagged);
CheckTwoChanges(IrOpcode::kChangeTaggedToFloat64,
IrOpcode::kTruncateFloat64ToFloat32,
- MachineRepresentation::kTagged, Type::None(),
+ MachineRepresentation::kTagged, Type::Number(),
MachineRepresentation::kFloat32);
}
@@ -587,7 +583,7 @@ TEST(SignednessInWord32) {
CheckChange(IrOpcode::kChangeTaggedToUint32, MachineRepresentation::kTagged,
Type::Unsigned32(), MachineRepresentation::kWord32);
CheckChange(IrOpcode::kChangeInt32ToFloat64, MachineRepresentation::kWord32,
- Type::None(), MachineRepresentation::kFloat64);
+ Type::Signed32(), MachineRepresentation::kFloat64);
CheckChange(IrOpcode::kChangeFloat64ToInt32, MachineRepresentation::kFloat64,
Type::Signed32(), MachineRepresentation::kWord32);
CheckChange(IrOpcode::kTruncateFloat64ToWord32,
@@ -600,7 +596,7 @@ TEST(SignednessInWord32) {
CheckTwoChanges(IrOpcode::kChangeInt32ToFloat64,
IrOpcode::kTruncateFloat64ToFloat32,
- MachineRepresentation::kWord32, Type::None(),
+ MachineRepresentation::kWord32, Type::Signed32(),
MachineRepresentation::kFloat32);
CheckTwoChanges(IrOpcode::kChangeFloat32ToFloat64,
IrOpcode::kTruncateFloat64ToWord32,
@@ -614,13 +610,11 @@ TEST(Nops) {
// X -> X is always a nop for any single representation X.
for (size_t i = 0; i < arraysize(kMachineTypes); i++) {
- r.CheckNop(kMachineTypes[i].representation(), Type::None(),
+ r.CheckNop(kMachineTypes[i].representation(), Type::Number(),
kMachineTypes[i].representation());
}
// 32-bit floats.
- r.CheckNop(MachineRepresentation::kFloat32, Type::None(),
- MachineRepresentation::kFloat32);
r.CheckNop(MachineRepresentation::kFloat32, Type::Number(),
MachineRepresentation::kFloat32);
@@ -639,14 +633,6 @@ TEST(Nops) {
MachineRepresentation::kWord32);
// kRepBit (result of comparison) is implicitly a wordish thing.
- r.CheckNop(MachineRepresentation::kBit, Type::None(),
- MachineRepresentation::kWord8);
- r.CheckNop(MachineRepresentation::kBit, Type::None(),
- MachineRepresentation::kWord16);
- r.CheckNop(MachineRepresentation::kBit, Type::None(),
- MachineRepresentation::kWord32);
- r.CheckNop(MachineRepresentation::kBit, Type::None(),
- MachineRepresentation::kWord64);
r.CheckNop(MachineRepresentation::kBit, Type::Boolean(),
MachineRepresentation::kWord8);
r.CheckNop(MachineRepresentation::kBit, Type::Boolean(),
@@ -661,40 +647,24 @@ TEST(Nops) {
TEST(TypeErrors) {
RepresentationChangerTester r;
- // Wordish cannot be implicitly converted to/from comparison conditions.
- r.CheckTypeError(MachineRepresentation::kWord8, Type::None(),
- MachineRepresentation::kBit);
- r.CheckTypeError(MachineRepresentation::kWord16, Type::None(),
- MachineRepresentation::kBit);
- r.CheckTypeError(MachineRepresentation::kWord32, Type::None(),
- MachineRepresentation::kBit);
- r.CheckTypeError(MachineRepresentation::kWord64, Type::None(),
- MachineRepresentation::kBit);
-
- // Floats cannot be implicitly converted to/from comparison conditions.
- r.CheckTypeError(MachineRepresentation::kFloat64, Type::None(),
- MachineRepresentation::kBit);
-
// Floats cannot be implicitly converted to/from comparison conditions.
- r.CheckTypeError(MachineRepresentation::kFloat32, Type::None(),
- MachineRepresentation::kBit);
- r.CheckTypeError(MachineRepresentation::kBit, Type::None(),
+ r.CheckTypeError(MachineRepresentation::kBit, Type::Number(),
MachineRepresentation::kFloat32);
r.CheckTypeError(MachineRepresentation::kBit, Type::Boolean(),
MachineRepresentation::kFloat32);
// Word64 is internal and shouldn't be implicitly converted.
- r.CheckTypeError(MachineRepresentation::kWord64, Type::None(),
+ r.CheckTypeError(MachineRepresentation::kWord64, Type::Internal(),
MachineRepresentation::kTagged);
- r.CheckTypeError(MachineRepresentation::kTagged, Type::None(),
+ r.CheckTypeError(MachineRepresentation::kTagged, Type::Number(),
MachineRepresentation::kWord64);
r.CheckTypeError(MachineRepresentation::kTagged, Type::Boolean(),
MachineRepresentation::kWord64);
// Word64 / Word32 shouldn't be implicitly converted.
- r.CheckTypeError(MachineRepresentation::kWord64, Type::None(),
+ r.CheckTypeError(MachineRepresentation::kWord64, Type::Internal(),
MachineRepresentation::kWord32);
- r.CheckTypeError(MachineRepresentation::kWord32, Type::None(),
+ r.CheckTypeError(MachineRepresentation::kWord32, Type::Number(),
MachineRepresentation::kWord64);
r.CheckTypeError(MachineRepresentation::kWord32, Type::Signed32(),
MachineRepresentation::kWord64);
diff --git a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
index 446b5e7d5f..9c2b05dd4b 100644
--- a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
+++ b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
@@ -4,7 +4,7 @@
#include <utility>
-#include "src/compiler.h"
+#include "src/compilation-info.h"
#include "src/compiler/pipeline.h"
#include "src/execution.h"
#include "src/handles.h"
@@ -1067,6 +1067,105 @@ TEST(BytecodeGraphBuilderLookupSlot) {
}
}
+TEST(BytecodeGraphBuilderLookupContextSlot) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ // Testing with eval called in the current context.
+ const char* inner_eval_prologue = "var x = 0; function inner() {";
+ const char* inner_eval_epilogue = "}; return inner();";
+
+ ExpectedSnippet<0> inner_eval_snippets[] = {
+ {"eval(''); return x;", {factory->NewNumber(0)}},
+ {"eval('var x = 1'); return x;", {factory->NewNumber(1)}},
+ {"'use strict'; eval('var x = 1'); return x;", {factory->NewNumber(0)}}};
+
+ for (size_t i = 0; i < arraysize(inner_eval_snippets); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s(p1) { %s %s %s } ; %s() ;", kFunctionName,
+ inner_eval_prologue, inner_eval_snippets[i].code_snippet,
+ inner_eval_epilogue, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*inner_eval_snippets[i].return_value()));
+ }
+
+ // Testing with eval called in a parent context.
+ const char* outer_eval_prologue = "";
+ const char* outer_eval_epilogue =
+ "function inner() { return x; }; return inner();";
+
+ ExpectedSnippet<0> outer_eval_snippets[] = {
+ {"var x = 0; eval('');", {factory->NewNumber(0)}},
+ {"var x = 0; eval('var x = 1');", {factory->NewNumber(1)}},
+ {"'use strict'; var x = 0; eval('var x = 1');", {factory->NewNumber(0)}}};
+
+ for (size_t i = 0; i < arraysize(outer_eval_snippets); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() { %s %s %s } ; %s() ;", kFunctionName,
+ outer_eval_prologue, outer_eval_snippets[i].code_snippet,
+ outer_eval_epilogue, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*outer_eval_snippets[i].return_value()));
+ }
+}
+
+TEST(BytecodeGraphBuilderLookupGlobalSlot) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ // Testing with eval called in the current context.
+ const char* inner_eval_prologue = "x = 0; function inner() {";
+ const char* inner_eval_epilogue = "}; return inner();";
+
+ ExpectedSnippet<0> inner_eval_snippets[] = {
+ {"eval(''); return x;", {factory->NewNumber(0)}},
+ {"eval('var x = 1'); return x;", {factory->NewNumber(1)}},
+ {"'use strict'; eval('var x = 1'); return x;", {factory->NewNumber(0)}}};
+
+ for (size_t i = 0; i < arraysize(inner_eval_snippets); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s(p1) { %s %s %s } ; %s() ;", kFunctionName,
+ inner_eval_prologue, inner_eval_snippets[i].code_snippet,
+ inner_eval_epilogue, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*inner_eval_snippets[i].return_value()));
+ }
+
+ // Testing with eval called in a parent context.
+ const char* outer_eval_prologue = "";
+ const char* outer_eval_epilogue =
+ "function inner() { return x; }; return inner();";
+
+ ExpectedSnippet<0> outer_eval_snippets[] = {
+ {"x = 0; eval('');", {factory->NewNumber(0)}},
+ {"x = 0; eval('var x = 1');", {factory->NewNumber(1)}},
+ {"'use strict'; x = 0; eval('var x = 1');", {factory->NewNumber(0)}}};
+
+ for (size_t i = 0; i < arraysize(outer_eval_snippets); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() { %s %s %s } ; %s() ;", kFunctionName,
+ outer_eval_prologue, outer_eval_snippets[i].code_snippet,
+ outer_eval_epilogue, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*outer_eval_snippets[i].return_value()));
+ }
+}
TEST(BytecodeGraphBuilderLookupSlotWide) {
HandleAndZoneScope scope;
diff --git a/deps/v8/test/cctest/compiler/test-run-inlining.cc b/deps/v8/test/cctest/compiler/test-run-inlining.cc
index b715214c0d..aab8b4e86b 100644
--- a/deps/v8/test/cctest/compiler/test-run-inlining.cc
+++ b/deps/v8/test/cctest/compiler/test-run-inlining.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compilation-info.h"
#include "src/frames-inl.h"
#include "test/cctest/compiler/function-tester.h"
diff --git a/deps/v8/test/cctest/compiler/test-run-intrinsics.cc b/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
index 681891c91f..47116ad674 100644
--- a/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
+++ b/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compilation-info.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
@@ -130,15 +131,6 @@ TEST(StringCharCodeAt) {
}
-TEST(StringCharFromCode) {
- FunctionTester T("(function(a) { return %_StringCharFromCode(a); })", flags);
-
- T.CheckCall(T.Val("a"), T.Val(97));
- T.CheckCall(T.Val("\xE2\x9D\x8A"), T.Val(0x274A));
- T.CheckCall(T.Val(""), T.undefined());
-}
-
-
TEST(StringCompare) {
FunctionTester T("(function(a,b) { return %_StringCompare(a,b); })", flags);
diff --git a/deps/v8/test/cctest/compiler/test-run-jsbranches.cc b/deps/v8/test/cctest/compiler/test-run-jsbranches.cc
index 613528d7a0..502295e471 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsbranches.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsbranches.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/objects-inl.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/test-run-jscalls.cc b/deps/v8/test/cctest/compiler/test-run-jscalls.cc
index f69e508f90..84d7f714ae 100644
--- a/deps/v8/test/cctest/compiler/test-run-jscalls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jscalls.cc
@@ -2,6 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/api.h"
+#include "src/contexts.h"
+#include "src/flags.h"
+#include "src/objects.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc b/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc
index ab8c42a979..8da2b53fe6 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/objects-inl.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/test-run-jsobjects.cc b/deps/v8/test/cctest/compiler/test-run-jsobjects.cc
index 80a918134f..338e0e27d3 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsobjects.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsobjects.cc
@@ -2,6 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/factory.h -> src/objects-inl.h
+#include "src/objects-inl.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/type-feedback-vector.h ->
+// src/type-feedback-vector-inl.h
+#include "src/type-feedback-vector-inl.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/test-run-jsops.cc b/deps/v8/test/cctest/compiler/test-run-jsops.cc
index 78e12576f1..49033f7995 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsops.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/objects-inl.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/test-run-native-calls.cc b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
index 5c2672f8d4..0f76b897c3 100644
--- a/deps/v8/test/cctest/compiler/test-run-native-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
@@ -242,7 +242,8 @@ class Int32Signature : public MachineSignature {
Handle<Code> CompileGraph(const char* name, CallDescriptor* desc, Graph* graph,
Schedule* schedule = nullptr) {
Isolate* isolate = CcTest::InitIsolateOnce();
- CompilationInfo info(ArrayVector("testing"), isolate, graph->zone());
+ CompilationInfo info(ArrayVector("testing"), isolate, graph->zone(),
+ Code::ComputeFlags(Code::STUB));
Handle<Code> code =
Pipeline::GenerateCodeForTesting(&info, desc, graph, schedule);
CHECK(!code.is_null());
@@ -604,7 +605,7 @@ static void CopyTwentyInt32(CallDescriptor* desc) {
static void Test_RunInt32SubWithRet(int retreg) {
Int32Signature sig(2);
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
RegisterPairs pairs;
while (pairs.More()) {
@@ -655,7 +656,7 @@ TEST(Run_Int32Sub_all_allocatable_single) {
Int32Signature sig(2);
RegisterPairs pairs;
while (pairs.More()) {
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
int parray[1];
int rarray[1];
@@ -673,7 +674,7 @@ TEST(Run_CopyTwentyInt32_all_allocatable_pairs) {
Int32Signature sig(20);
RegisterPairs pairs;
while (pairs.More()) {
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
int parray[2];
int rarray[] = {GetRegConfig()->GetAllocatableGeneralCode(0)};
@@ -724,7 +725,7 @@ static void Test_Int32_WeightedSum_of_size(int count) {
Int32Signature sig(count);
for (int p0 = 0; p0 < Register::kNumRegisters; p0++) {
if (GetRegConfig()->IsAllocatableGeneralCode(p0)) {
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
int parray[] = {p0};
@@ -787,7 +788,7 @@ void Test_Int32_Select() {
Allocator rets(rarray, 1, nullptr, 0);
RegisterConfig config(params, rets);
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
for (int i = which + 1; i <= 64; i++) {
@@ -826,7 +827,7 @@ TEST(Int64Select_registers) {
ArgsBuffer<int64_t>::Sig sig(2);
RegisterPairs pairs;
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
while (pairs.More()) {
int parray[2];
@@ -851,7 +852,7 @@ TEST(Float32Select_registers) {
ArgsBuffer<float32>::Sig sig(2);
Float32RegisterPairs pairs;
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
while (pairs.More()) {
int parray[2];
@@ -874,7 +875,7 @@ TEST(Float64Select_registers) {
ArgsBuffer<float64>::Sig sig(2);
Float64RegisterPairs pairs;
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
while (pairs.More()) {
int parray[2];
@@ -896,7 +897,7 @@ TEST(Float32Select_stack_params_return_reg) {
Allocator rets(nullptr, 0, rarray, 1);
RegisterConfig config(params, rets);
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
for (int count = 1; count < 6; count++) {
ArgsBuffer<float32>::Sig sig(count);
@@ -917,7 +918,7 @@ TEST(Float64Select_stack_params_return_reg) {
Allocator rets(nullptr, 0, rarray, 1);
RegisterConfig config(params, rets);
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
for (int count = 1; count < 6; count++) {
ArgsBuffer<float64>::Sig sig(count);
@@ -969,7 +970,7 @@ TEST(Float64StackParamsToStackParams) {
Allocator params(nullptr, 0, nullptr, 0);
Allocator rets(nullptr, 0, rarray, 1);
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
ArgsBuffer<float64>::Sig sig(2);
RegisterConfig config(params, rets);
@@ -1024,7 +1025,7 @@ void MixedParamTest(int start) {
RegisterConfig config(palloc, ralloc);
for (int which = 0; which < num_params; which++) {
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
HandleScope scope(isolate);
MachineSignature::Builder builder(&zone, 1, num_params);
diff --git a/deps/v8/test/cctest/compiler/test-run-stackcheck.cc b/deps/v8/test/cctest/compiler/test-run-stackcheck.cc
index 52556ac87f..0dd28a7419 100644
--- a/deps/v8/test/cctest/compiler/test-run-stackcheck.cc
+++ b/deps/v8/test/cctest/compiler/test-run-stackcheck.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/isolate.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/test-run-stubs.cc b/deps/v8/test/cctest/compiler/test-run-stubs.cc
index feb25c992a..b34e5d4a51 100644
--- a/deps/v8/test/cctest/compiler/test-run-stubs.cc
+++ b/deps/v8/test/cctest/compiler/test-run-stubs.cc
@@ -4,6 +4,7 @@
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
+#include "src/compilation-info.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/js-graph.h"
@@ -11,7 +12,6 @@
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/pipeline.h"
-#include "src/parsing/parser.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/test-run-unwinding-info.cc b/deps/v8/test/cctest/compiler/test-run-unwinding-info.cc
index 4536725d4f..a7e63822b5 100644
--- a/deps/v8/test/cctest/compiler/test-run-unwinding-info.cc
+++ b/deps/v8/test/cctest/compiler/test-run-unwinding-info.cc
@@ -6,6 +6,10 @@
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM) || \
defined(V8_TARGET_ARCH_ARM64)
+#include "src/flags.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+#include "src/unicode-cache.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/test-run-variables.cc b/deps/v8/test/cctest/compiler/test-run-variables.cc
index 6997967b42..9d6291a5aa 100644
--- a/deps/v8/test/cctest/compiler/test-run-variables.cc
+++ b/deps/v8/test/cctest/compiler/test-run-variables.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/api.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/test-simplified-lowering.cc b/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
deleted file mode 100644
index 2e3dcd148a..0000000000
--- a/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
+++ /dev/null
@@ -1,1756 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <limits>
-
-#include "src/ast/scopes.h"
-#include "src/compiler/access-builder.h"
-#include "src/compiler/control-builders.h"
-#include "src/compiler/effect-control-linearizer.h"
-#include "src/compiler/graph-visualizer.h"
-#include "src/compiler/memory-optimizer.h"
-#include "src/compiler/node-properties.h"
-#include "src/compiler/pipeline.h"
-#include "src/compiler/representation-change.h"
-#include "src/compiler/scheduler.h"
-#include "src/compiler/simplified-lowering.h"
-#include "src/compiler/source-position.h"
-#include "src/compiler/typer.h"
-#include "src/compiler/verifier.h"
-#include "src/execution.h"
-#include "src/parsing/parser.h"
-#include "src/parsing/rewriter.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/compiler/codegen-tester.h"
-#include "test/cctest/compiler/function-tester.h"
-#include "test/cctest/compiler/graph-builder-tester.h"
-#include "test/cctest/compiler/value-helper.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-template <typename ReturnType>
-class SimplifiedLoweringTester : public GraphBuilderTester<ReturnType> {
- public:
- SimplifiedLoweringTester(MachineType p0 = MachineType::None(),
- MachineType p1 = MachineType::None())
- : GraphBuilderTester<ReturnType>(p0, p1),
- typer(new Typer(this->isolate(), this->graph())),
- javascript(this->zone()),
- jsgraph(this->isolate(), this->graph(), this->common(), &javascript,
- this->simplified(), this->machine()),
- source_positions(jsgraph.graph()),
- lowering(&jsgraph, this->zone(), &source_positions) {}
- ~SimplifiedLoweringTester() final { delete typer; }
-
- Typer* typer = nullptr;
- JSOperatorBuilder javascript;
- JSGraph jsgraph;
- SourcePositionTable source_positions;
- SimplifiedLowering lowering;
-
- void LowerAllNodes() {
- this->End();
- typer->Run();
- delete typer, typer = nullptr;
- lowering.LowerAllNodes();
- }
-
- void LowerAllNodesAndLowerChanges() {
- this->End();
- typer->Run();
- delete typer, typer = nullptr;
- lowering.LowerAllNodes();
-
- Schedule* schedule = Scheduler::ComputeSchedule(this->zone(), this->graph(),
- Scheduler::kNoFlags);
- EffectControlLinearizer linearizer(&jsgraph, schedule, this->zone());
- linearizer.Run();
-
- MemoryOptimizer memory_optimizer(&jsgraph, this->zone());
- memory_optimizer.Optimize();
- }
-
- void CheckNumberCall(double expected, double input) {
- // TODO(titzer): make calls to NewNumber work in cctests.
- if (expected <= Smi::kMinValue) return;
- if (expected >= Smi::kMaxValue) return;
- Handle<Object> num = factory()->NewNumber(input);
- Object* result = this->Call(*num);
- CHECK(factory()->NewNumber(expected)->SameValue(result));
- }
-
- template <typename T>
- T* CallWithPotentialGC() {
- // TODO(titzer): we wrap the code in a JSFunction here to reuse the
- // JSEntryStub; that could be done with a special prologue or other stub.
- Handle<JSFunction> fun = FunctionTester::ForMachineGraph(this->graph(), 0);
- Handle<Object>* args = NULL;
- MaybeHandle<Object> result = Execution::Call(
- this->isolate(), fun, factory()->undefined_value(), 0, args);
- return T::cast(*result.ToHandleChecked());
- }
-
- Factory* factory() { return this->isolate()->factory(); }
- Heap* heap() { return this->isolate()->heap(); }
-};
-
-
-// TODO(titzer): factor these tests out to test-run-simplifiedops.cc.
-// TODO(titzer): test tagged representation for input to NumberToInt32.
-TEST(RunNumberToInt32_float64) {
- // TODO(titzer): explicit load/stores here are only because of representations
- double input;
- int32_t result;
- SimplifiedLoweringTester<Object*> t;
- FieldAccess load = {kUntaggedBase, 0,
- Handle<Name>(), Type::Number(),
- MachineType::Float64(), kNoWriteBarrier};
- Node* loaded = t.LoadField(load, t.PointerConstant(&input));
- NodeProperties::SetType(loaded, Type::Number());
- Node* convert = t.NumberToInt32(loaded);
- FieldAccess store = {kUntaggedBase, 0,
- Handle<Name>(), Type::Signed32(),
- MachineType::Int32(), kNoWriteBarrier};
- t.StoreField(store, t.PointerConstant(&result), convert);
- t.Return(t.jsgraph.TrueConstant());
- t.LowerAllNodesAndLowerChanges();
- t.GenerateCode();
-
- FOR_FLOAT64_INPUTS(i) {
- input = *i;
- int32_t expected = DoubleToInt32(*i);
- t.Call();
- CHECK_EQ(expected, result);
- }
-}
-
-
-// TODO(titzer): test tagged representation for input to NumberToUint32.
-TEST(RunNumberToUint32_float64) {
- // TODO(titzer): explicit load/stores here are only because of representations
- double input;
- uint32_t result;
- SimplifiedLoweringTester<Object*> t;
- FieldAccess load = {kUntaggedBase, 0,
- Handle<Name>(), Type::Number(),
- MachineType::Float64(), kNoWriteBarrier};
- Node* loaded = t.LoadField(load, t.PointerConstant(&input));
- NodeProperties::SetType(loaded, Type::Number());
- Node* convert = t.NumberToUint32(loaded);
- FieldAccess store = {kUntaggedBase, 0,
- Handle<Name>(), Type::Unsigned32(),
- MachineType::Uint32(), kNoWriteBarrier};
- t.StoreField(store, t.PointerConstant(&result), convert);
- t.Return(t.jsgraph.TrueConstant());
- t.LowerAllNodesAndLowerChanges();
- t.GenerateCode();
-
- FOR_FLOAT64_INPUTS(i) {
- input = *i;
- uint32_t expected = DoubleToUint32(*i);
- t.Call();
- CHECK_EQ(static_cast<int32_t>(expected), static_cast<int32_t>(result));
- }
- }
-
-
-// Create a simple JSObject with a unique map.
-static Handle<JSObject> TestObject() {
- static int index = 0;
- char buffer[50];
- v8::base::OS::SNPrintF(buffer, 50, "({'a_%d':1})", index++);
- return Handle<JSObject>::cast(v8::Utils::OpenHandle(*CompileRun(buffer)));
-}
-
-
-TEST(RunLoadMap) {
- SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
- FieldAccess access = AccessBuilder::ForMap();
- Node* load = t.LoadField(access, t.Parameter(0));
- t.Return(load);
-
- t.LowerAllNodesAndLowerChanges();
- t.GenerateCode();
-
- Handle<JSObject> src = TestObject();
- Handle<Map> src_map(src->map());
- Object* result = t.Call(*src); // TODO(titzer): raw pointers in call
- CHECK_EQ(*src_map, result);
-}
-
-
-TEST(RunStoreMap) {
- SimplifiedLoweringTester<int32_t> t(MachineType::AnyTagged(),
- MachineType::AnyTagged());
- FieldAccess access = AccessBuilder::ForMap();
- t.StoreField(access, t.Parameter(1), t.Parameter(0));
- t.Return(t.jsgraph.TrueConstant());
-
- t.LowerAllNodesAndLowerChanges();
- t.GenerateCode();
-
- Handle<JSObject> src = TestObject();
- Handle<Map> src_map(src->map());
- Handle<JSObject> dst = TestObject();
- CHECK(src->map() != dst->map());
- t.Call(*src_map, *dst); // TODO(titzer): raw pointers in call
- CHECK(*src_map == dst->map());
- }
-
-
-TEST(RunLoadProperties) {
- SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
- FieldAccess access = AccessBuilder::ForJSObjectProperties();
- Node* load = t.LoadField(access, t.Parameter(0));
- t.Return(load);
-
- t.LowerAllNodesAndLowerChanges();
- t.GenerateCode();
-
- Handle<JSObject> src = TestObject();
- Handle<FixedArray> src_props(src->properties());
- Object* result = t.Call(*src); // TODO(titzer): raw pointers in call
- CHECK_EQ(*src_props, result);
-}
-
-
-TEST(RunLoadStoreMap) {
- SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged(),
- MachineType::AnyTagged());
- FieldAccess access = AccessBuilder::ForMap();
- Node* load = t.LoadField(access, t.Parameter(0));
- t.StoreField(access, t.Parameter(1), load);
- t.Return(load);
-
- t.LowerAllNodesAndLowerChanges();
- t.GenerateCode();
-
- Handle<JSObject> src = TestObject();
- Handle<Map> src_map(src->map());
- Handle<JSObject> dst = TestObject();
- CHECK(src->map() != dst->map());
- Object* result = t.Call(*src, *dst); // TODO(titzer): raw pointers in call
- CHECK(result->IsMap());
- CHECK_EQ(*src_map, result);
- CHECK(*src_map == dst->map());
-}
-
-
-TEST(RunLoadStoreFixedArrayIndex) {
- SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
- ElementAccess access = AccessBuilder::ForFixedArrayElement();
- Node* load = t.LoadElement(access, t.Parameter(0), t.Int32Constant(0));
- t.StoreElement(access, t.Parameter(0), t.Int32Constant(1), load);
- t.Return(load);
-
- t.LowerAllNodesAndLowerChanges();
- t.GenerateCode();
-
- Handle<FixedArray> array = t.factory()->NewFixedArray(2);
- Handle<JSObject> src = TestObject();
- Handle<JSObject> dst = TestObject();
- array->set(0, *src);
- array->set(1, *dst);
- Object* result = t.Call(*array);
- CHECK_EQ(*src, result);
- CHECK_EQ(*src, array->get(0));
- CHECK_EQ(*src, array->get(1));
-}
-
-
-TEST(RunLoadStoreArrayBuffer) {
- SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
- const int index = 12;
- const int array_length = 2 * index;
- ElementAccess buffer_access =
- AccessBuilder::ForTypedArrayElement(kExternalInt8Array, true);
- Node* backing_store = t.LoadField(
- AccessBuilder::ForJSArrayBufferBackingStore(), t.Parameter(0));
- Node* load =
- t.LoadElement(buffer_access, backing_store, t.Int32Constant(index));
- t.StoreElement(buffer_access, backing_store, t.Int32Constant(index + 1),
- load);
- t.Return(t.jsgraph.TrueConstant());
-
- t.LowerAllNodesAndLowerChanges();
- t.GenerateCode();
-
- Handle<JSArrayBuffer> array = t.factory()->NewJSArrayBuffer();
- JSArrayBuffer::SetupAllocatingData(array, t.isolate(), array_length);
- uint8_t* data = reinterpret_cast<uint8_t*>(array->backing_store());
- for (int i = 0; i < array_length; i++) {
- data[i] = i;
- }
-
- // TODO(titzer): raw pointers in call
- Object* result = t.Call(*array);
- CHECK_EQ(t.isolate()->heap()->true_value(), result);
- for (int i = 0; i < array_length; i++) {
- uint8_t expected = i;
- if (i == (index + 1)) expected = index;
- CHECK_EQ(data[i], expected);
- }
- }
-
-
-TEST(RunLoadFieldFromUntaggedBase) {
- Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3)};
-
- for (size_t i = 0; i < arraysize(smis); i++) {
- int offset = static_cast<int>(i * sizeof(Smi*));
- FieldAccess access = {kUntaggedBase,
- offset,
- Handle<Name>(),
- Type::Integral32(),
- MachineType::AnyTagged(),
- kNoWriteBarrier};
-
- SimplifiedLoweringTester<Object*> t;
- Node* load = t.LoadField(access, t.PointerConstant(smis));
- t.Return(load);
- t.LowerAllNodesAndLowerChanges();
-
- for (int j = -5; j <= 5; j++) {
- Smi* expected = Smi::FromInt(j);
- smis[i] = expected;
- CHECK_EQ(expected, t.Call());
- }
- }
-}
-
-
-TEST(RunStoreFieldToUntaggedBase) {
- Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3)};
-
- for (size_t i = 0; i < arraysize(smis); i++) {
- int offset = static_cast<int>(i * sizeof(Smi*));
- FieldAccess access = {kUntaggedBase,
- offset,
- Handle<Name>(),
- Type::Integral32(),
- MachineType::AnyTagged(),
- kNoWriteBarrier};
-
- SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
- Node* p0 = t.Parameter(0);
- t.StoreField(access, t.PointerConstant(smis), p0);
- t.Return(p0);
- t.LowerAllNodesAndLowerChanges();
-
- for (int j = -5; j <= 5; j++) {
- Smi* expected = Smi::FromInt(j);
- smis[i] = Smi::FromInt(-100);
- CHECK_EQ(expected, t.Call(expected));
- CHECK_EQ(expected, smis[i]);
- }
- }
-}
-
-
-TEST(RunLoadElementFromUntaggedBase) {
- Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3),
- Smi::FromInt(4), Smi::FromInt(5)};
-
- for (size_t i = 0; i < arraysize(smis); i++) { // for header sizes
- for (size_t j = 0; (i + j) < arraysize(smis); j++) { // for element index
- int offset = static_cast<int>(i * sizeof(Smi*));
- ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
- MachineType::AnyTagged(), kNoWriteBarrier};
-
- SimplifiedLoweringTester<Object*> t;
- Node* load = t.LoadElement(access, t.PointerConstant(smis),
- t.Int32Constant(static_cast<int>(j)));
- t.Return(load);
- t.LowerAllNodesAndLowerChanges();
-
- for (int k = -5; k <= 5; k++) {
- Smi* expected = Smi::FromInt(k);
- smis[i + j] = expected;
- CHECK_EQ(expected, t.Call());
- }
- }
- }
-}
-
-
-TEST(RunStoreElementFromUntaggedBase) {
- Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3),
- Smi::FromInt(4), Smi::FromInt(5)};
-
- for (size_t i = 0; i < arraysize(smis); i++) { // for header sizes
- for (size_t j = 0; (i + j) < arraysize(smis); j++) { // for element index
- int offset = static_cast<int>(i * sizeof(Smi*));
- ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
- MachineType::AnyTagged(), kNoWriteBarrier};
-
- SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
- Node* p0 = t.Parameter(0);
- t.StoreElement(access, t.PointerConstant(smis),
- t.Int32Constant(static_cast<int>(j)), p0);
- t.Return(p0);
- t.LowerAllNodesAndLowerChanges();
-
- for (int k = -5; k <= 5; k++) {
- Smi* expected = Smi::FromInt(k);
- smis[i + j] = Smi::FromInt(-100);
- CHECK_EQ(expected, t.Call(expected));
- CHECK_EQ(expected, smis[i + j]);
- }
-
- // TODO(titzer): assert the contents of the array.
- }
- }
-}
-
-
-// A helper class for accessing fields and elements of various types, on both
-// tagged and untagged base pointers. Contains both tagged and untagged buffers
-// for testing direct memory access from generated code.
-template <typename E>
-class AccessTester : public HandleAndZoneScope {
- public:
- bool tagged;
- MachineType rep;
- E* original_elements;
- size_t num_elements;
- E* untagged_array;
- Handle<ByteArray> tagged_array; // TODO(titzer): use FixedArray for tagged.
-
- AccessTester(bool t, MachineType r, E* orig, size_t num)
- : tagged(t),
- rep(r),
- original_elements(orig),
- num_elements(num),
- untagged_array(static_cast<E*>(malloc(ByteSize()))),
- tagged_array(main_isolate()->factory()->NewByteArray(
- static_cast<int>(ByteSize()))) {
- Reinitialize();
- }
-
- ~AccessTester() { free(untagged_array); }
-
- size_t ByteSize() { return num_elements * sizeof(E); }
-
- // Nuke both {untagged_array} and {tagged_array} with {original_elements}.
- void Reinitialize() {
- memcpy(untagged_array, original_elements, ByteSize());
- CHECK_EQ(static_cast<int>(ByteSize()), tagged_array->length());
- E* raw = reinterpret_cast<E*>(tagged_array->GetDataStartAddress());
- memcpy(raw, original_elements, ByteSize());
- }
-
- // Create and run code that copies the element in either {untagged_array}
- // or {tagged_array} at index {from_index} to index {to_index}.
- void RunCopyElement(int from_index, int to_index) {
- // TODO(titzer): test element and field accesses where the base is not
- // a constant in the code.
- BoundsCheck(from_index);
- BoundsCheck(to_index);
- ElementAccess access = GetElementAccess();
-
- SimplifiedLoweringTester<Object*> t;
- Node* ptr = GetBaseNode(&t);
- Node* load = t.LoadElement(access, ptr, t.Int32Constant(from_index));
- t.StoreElement(access, ptr, t.Int32Constant(to_index), load);
- t.Return(t.jsgraph.TrueConstant());
- t.LowerAllNodesAndLowerChanges();
- t.GenerateCode();
-
- Object* result = t.Call();
- CHECK_EQ(t.isolate()->heap()->true_value(), result);
- }
-
- // Create and run code that copies the field in either {untagged_array}
- // or {tagged_array} at index {from_index} to index {to_index}.
- void RunCopyField(int from_index, int to_index) {
- BoundsCheck(from_index);
- BoundsCheck(to_index);
- FieldAccess from_access = GetFieldAccess(from_index);
- FieldAccess to_access = GetFieldAccess(to_index);
-
- SimplifiedLoweringTester<Object*> t;
- Node* ptr = GetBaseNode(&t);
- Node* load = t.LoadField(from_access, ptr);
- t.StoreField(to_access, ptr, load);
- t.Return(t.jsgraph.TrueConstant());
- t.LowerAllNodesAndLowerChanges();
- t.GenerateCode();
-
- Object* result = t.Call();
- CHECK_EQ(t.isolate()->heap()->true_value(), result);
- }
-
- // Create and run code that copies the elements from {this} to {that}.
- void RunCopyElements(AccessTester<E>* that) {
-// TODO(titzer): Rewrite this test without StructuredGraphBuilder support.
-#if 0
- SimplifiedLoweringTester<Object*> t;
-
- Node* one = t.Int32Constant(1);
- Node* index = t.Int32Constant(0);
- Node* limit = t.Int32Constant(static_cast<int>(num_elements));
- t.environment()->Push(index);
- Node* src = this->GetBaseNode(&t);
- Node* dst = that->GetBaseNode(&t);
- {
- LoopBuilder loop(&t);
- loop.BeginLoop();
- // Loop exit condition
- index = t.environment()->Top();
- Node* condition = t.Int32LessThan(index, limit);
- loop.BreakUnless(condition);
- // dst[index] = src[index]
- index = t.environment()->Pop();
- Node* load = t.LoadElement(this->GetElementAccess(), src, index);
- t.StoreElement(that->GetElementAccess(), dst, index, load);
- // index++
- index = t.Int32Add(index, one);
- t.environment()->Push(index);
- // continue
- loop.EndBody();
- loop.EndLoop();
- }
- index = t.environment()->Pop();
- t.Return(t.jsgraph.TrueConstant());
- t.LowerAllNodes();
- t.GenerateCode();
-
- Object* result = t.Call();
- CHECK_EQ(t.isolate()->heap()->true_value(), result);
-#endif
- }
-
- E GetElement(int index) {
- BoundsCheck(index);
- if (tagged) {
- return GetTaggedElement(index);
- } else {
- return untagged_array[index];
- }
- }
-
- private:
- ElementAccess GetElementAccess() {
- ElementAccess access = {tagged ? kTaggedBase : kUntaggedBase,
- tagged ? FixedArrayBase::kHeaderSize : 0,
- Type::Any(), rep, kFullWriteBarrier};
- return access;
- }
-
- FieldAccess GetFieldAccess(int field) {
- int offset = field * sizeof(E);
- FieldAccess access = {tagged ? kTaggedBase : kUntaggedBase,
- offset + (tagged ? FixedArrayBase::kHeaderSize : 0),
- Handle<Name>(),
- Type::Any(),
- rep,
- kFullWriteBarrier};
- return access;
- }
-
- template <typename T>
- Node* GetBaseNode(SimplifiedLoweringTester<T>* t) {
- return tagged ? t->HeapConstant(tagged_array)
- : t->PointerConstant(untagged_array);
- }
-
- void BoundsCheck(int index) {
- CHECK_GE(index, 0);
- CHECK_LT(index, static_cast<int>(num_elements));
- CHECK_EQ(static_cast<int>(ByteSize()), tagged_array->length());
- }
-
- E GetTaggedElement(int index) {
- E* raw = reinterpret_cast<E*>(tagged_array->GetDataStartAddress());
- return raw[index];
- }
-};
-
-template <>
-double AccessTester<double>::GetTaggedElement(int index) {
- return ReadDoubleValue(tagged_array->GetDataStartAddress() +
- index * sizeof(double));
-}
-
-
-template <typename E>
-static void RunAccessTest(MachineType rep, E* original_elements, size_t num) {
- int num_elements = static_cast<int>(num);
-
- for (int taggedness = 0; taggedness < 2; taggedness++) {
- AccessTester<E> a(taggedness == 1, rep, original_elements, num);
- for (int field = 0; field < 2; field++) {
- for (int i = 0; i < num_elements - 1; i++) {
- a.Reinitialize();
- if (field == 0) {
- a.RunCopyField(i, i + 1); // Test field read/write.
- } else {
- a.RunCopyElement(i, i + 1); // Test element read/write.
- }
- for (int j = 0; j < num_elements; j++) {
- E expect =
- j == (i + 1) ? original_elements[i] : original_elements[j];
- CHECK_EQ(expect, a.GetElement(j));
- }
- }
- }
- }
- // Test array copy.
- for (int tf = 0; tf < 2; tf++) {
- for (int tt = 0; tt < 2; tt++) {
- AccessTester<E> a(tf == 1, rep, original_elements, num);
- AccessTester<E> b(tt == 1, rep, original_elements, num);
- a.RunCopyElements(&b);
- for (int i = 0; i < num_elements; i++) {
- CHECK_EQ(a.GetElement(i), b.GetElement(i));
- }
- }
- }
-}
-
-
-TEST(RunAccessTests_uint8) {
- uint8_t data[] = {0x07, 0x16, 0x25, 0x34, 0x43, 0x99,
- 0xab, 0x78, 0x89, 0x19, 0x2b, 0x38};
- RunAccessTest<uint8_t>(MachineType::Int8(), data, arraysize(data));
-}
-
-
-TEST(RunAccessTests_uint16) {
- uint16_t data[] = {0x071a, 0x162b, 0x253c, 0x344d, 0x435e, 0x7777};
- RunAccessTest<uint16_t>(MachineType::Int16(), data, arraysize(data));
-}
-
-
-TEST(RunAccessTests_int32) {
- int32_t data[] = {-211, 211, 628347, 2000000000, -2000000000, -1, -100000034};
- RunAccessTest<int32_t>(MachineType::Int32(), data, arraysize(data));
-}
-
-
-#define V8_2PART_INT64(a, b) (((static_cast<int64_t>(a) << 32) + 0x##b##u))
-
-
-TEST(RunAccessTests_int64) {
- if (kPointerSize != 8) return;
- int64_t data[] = {V8_2PART_INT64(0x10111213, 14151617),
- V8_2PART_INT64(0x20212223, 24252627),
- V8_2PART_INT64(0x30313233, 34353637),
- V8_2PART_INT64(0xa0a1a2a3, a4a5a6a7),
- V8_2PART_INT64(0xf0f1f2f3, f4f5f6f7)};
- RunAccessTest<int64_t>(MachineType::Int64(), data, arraysize(data));
-}
-
-
-TEST(RunAccessTests_float64) {
- double data[] = {1.25, -1.25, 2.75, 11.0, 11100.8};
- RunAccessTest<double>(MachineType::Float64(), data, arraysize(data));
-}
-
-
-TEST(RunAccessTests_Smi) {
- Smi* data[] = {Smi::FromInt(-1), Smi::FromInt(-9),
- Smi::FromInt(0), Smi::FromInt(666),
- Smi::FromInt(77777), Smi::FromInt(Smi::kMaxValue)};
- RunAccessTest<Smi*>(MachineType::AnyTagged(), data, arraysize(data));
-}
-
-
-TEST(RunAllocate) {
- PretenureFlag flag[] = {NOT_TENURED, TENURED};
-
- for (size_t i = 0; i < arraysize(flag); i++) {
- SimplifiedLoweringTester<HeapObject*> t;
- FieldAccess access = AccessBuilder::ForMap();
- Node* size = t.jsgraph.Constant(HeapNumber::kSize);
- Node* alloc = t.NewNode(t.simplified()->Allocate(flag[i]), size);
- Node* map = t.jsgraph.Constant(t.factory()->heap_number_map());
- t.StoreField(access, alloc, map);
- t.Return(alloc);
-
- t.LowerAllNodesAndLowerChanges();
- t.GenerateCode();
-
- HeapObject* result = t.CallWithPotentialGC<HeapObject>();
- CHECK(t.heap()->new_space()->Contains(result) || flag[i] == TENURED);
- CHECK(t.heap()->old_space()->Contains(result) || flag[i] == NOT_TENURED);
- CHECK(result->IsHeapNumber());
- }
-}
-
-
-// Fills in most of the nodes of the graph in order to make tests shorter.
-class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
- public:
- Typer* typer = nullptr;
- JSOperatorBuilder javascript;
- JSGraph jsgraph;
- Node* p0;
- Node* p1;
- Node* p2;
- Node* start;
- Node* end;
- Node* ret;
-
- explicit TestingGraph(Type* p0_type, Type* p1_type = Type::None(),
- Type* p2_type = Type::None())
- : GraphAndBuilders(main_zone()),
- typer(new Typer(main_isolate(), graph())),
- javascript(main_zone()),
- jsgraph(main_isolate(), graph(), common(), &javascript, simplified(),
- machine()) {
- start = graph()->NewNode(common()->Start(4));
- graph()->SetStart(start);
- ret =
- graph()->NewNode(common()->Return(), jsgraph.Constant(0), start, start);
- end = graph()->NewNode(common()->End(1), ret);
- graph()->SetEnd(end);
- p0 = graph()->NewNode(common()->Parameter(0), start);
- p1 = graph()->NewNode(common()->Parameter(1), start);
- p2 = graph()->NewNode(common()->Parameter(2), start);
- typer->Run();
- NodeProperties::SetType(p0, p0_type);
- NodeProperties::SetType(p1, p1_type);
- NodeProperties::SetType(p2, p2_type);
- }
- ~TestingGraph() { delete typer; }
-
- void CheckLoweringBinop(IrOpcode::Value expected, const Operator* op) {
- Node* node = Return(graph()->NewNode(op, p0, p1));
- Lower();
- CHECK_EQ(expected, node->opcode());
- }
-
- void CheckLoweringStringBinop(IrOpcode::Value expected, const Operator* op) {
- Node* node = Return(
- graph()->NewNode(op, p0, p1, graph()->start(), graph()->start()));
- Lower();
- CHECK_EQ(expected, node->opcode());
- }
-
- void CheckLoweringTruncatedBinop(IrOpcode::Value expected, const Operator* op,
- const Operator* trunc) {
- Node* node = graph()->NewNode(op, p0, p1);
- Return(graph()->NewNode(trunc, node));
- Lower();
- CHECK_EQ(expected, node->opcode());
- }
-
- void Lower() {
- delete typer;
- SourcePositionTable table(jsgraph.graph());
- SimplifiedLowering(&jsgraph, jsgraph.zone(), &table).LowerAllNodes();
- typer = new Typer(main_isolate(), graph());
- }
-
- void LowerAllNodesAndLowerChanges() {
- delete typer;
- SourcePositionTable table(jsgraph.graph());
- SimplifiedLowering(&jsgraph, jsgraph.zone(), &table).LowerAllNodes();
-
- Schedule* schedule = Scheduler::ComputeSchedule(this->zone(), this->graph(),
- Scheduler::kNoFlags);
- EffectControlLinearizer linearizer(&jsgraph, schedule, this->zone());
- linearizer.Run();
-
- MemoryOptimizer memory_optimizer(&jsgraph, this->zone());
- memory_optimizer.Optimize();
- typer = new Typer(main_isolate(), graph());
- }
-
- // Inserts the node as the return value of the graph.
- Node* Return(Node* node) {
- ret->ReplaceInput(0, node);
- return node;
- }
-
- // Inserts the node as the effect input to the return of the graph.
- void Effect(Node* node) { ret->ReplaceInput(1, node); }
-
- Node* ExampleWithOutput(MachineType type) {
- if (type.semantic() == MachineSemantic::kInt32) {
- return graph()->NewNode(machine()->Int32Add(), jsgraph.Int32Constant(1),
- jsgraph.Int32Constant(1));
- } else if (type.semantic() == MachineSemantic::kUint32) {
- return graph()->NewNode(machine()->Word32Shr(), jsgraph.Int32Constant(1),
- jsgraph.Int32Constant(1));
- } else if (type.representation() == MachineRepresentation::kFloat64) {
- return graph()->NewNode(machine()->Float64Add(),
- jsgraph.Float64Constant(1),
- jsgraph.Float64Constant(1));
- } else if (type.representation() == MachineRepresentation::kBit) {
- return graph()->NewNode(machine()->Word32Equal(),
- jsgraph.Int32Constant(1),
- jsgraph.Int32Constant(1));
- } else if (type.representation() == MachineRepresentation::kWord64) {
- return graph()->NewNode(machine()->Int64Add(), Int64Constant(1),
- Int64Constant(1));
- } else {
- CHECK(type.representation() == MachineRepresentation::kTagged);
- return p0;
- }
- }
-
- Node* Use(Node* node, MachineType type) {
- if (type.semantic() == MachineSemantic::kInt32) {
- return graph()->NewNode(machine()->Int32LessThan(), node,
- jsgraph.Int32Constant(1));
- } else if (type.semantic() == MachineSemantic::kUint32) {
- return graph()->NewNode(machine()->Uint32LessThan(), node,
- jsgraph.Int32Constant(1));
- } else if (type.representation() == MachineRepresentation::kFloat64) {
- return graph()->NewNode(machine()->Float64Add(), node,
- jsgraph.Float64Constant(1));
- } else if (type.representation() == MachineRepresentation::kWord64) {
- return graph()->NewNode(machine()->Int64LessThan(), node,
- Int64Constant(1));
- } else if (type.representation() == MachineRepresentation::kWord32) {
- return graph()->NewNode(machine()->Word32Equal(), node,
- jsgraph.Int32Constant(1));
- } else {
- return graph()->NewNode(simplified()->ReferenceEqual(), node,
- jsgraph.TrueConstant());
- }
- }
-
- Node* Branch(Node* cond) {
- Node* br = graph()->NewNode(common()->Branch(), cond, start);
- Node* tb = graph()->NewNode(common()->IfTrue(), br);
- Node* fb = graph()->NewNode(common()->IfFalse(), br);
- Node* m = graph()->NewNode(common()->Merge(2), tb, fb);
- NodeProperties::ReplaceControlInput(ret, m);
- return br;
- }
-
- Node* Int64Constant(int64_t v) {
- return graph()->NewNode(common()->Int64Constant(v));
- }
-
- SimplifiedOperatorBuilder* simplified() { return &main_simplified_; }
- MachineOperatorBuilder* machine() { return &main_machine_; }
- CommonOperatorBuilder* common() { return &main_common_; }
- Graph* graph() { return main_graph_; }
-};
-
-
-TEST(LowerBooleanNot_bit_bit) {
- // BooleanNot(x: kRepBit) used as kRepBit
- TestingGraph t(Type::Boolean());
- Node* b = t.ExampleWithOutput(MachineType::Bool());
- Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
- Node* use = t.Branch(inv);
- t.Lower();
- Node* cmp = use->InputAt(0);
- CHECK_EQ(t.machine()->Word32Equal()->opcode(), cmp->opcode());
- CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
- Node* f = t.jsgraph.Int32Constant(0);
- CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
-}
-
-
-TEST(LowerBooleanNot_bit_tagged) {
- // BooleanNot(x: kRepBit) used as kRepTagged
- TestingGraph t(Type::Boolean());
- Node* b = t.ExampleWithOutput(MachineType::Bool());
- Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
- Node* use = t.Use(inv, MachineType::AnyTagged());
- t.Return(use);
- t.Lower();
- CHECK_EQ(IrOpcode::kChangeBitToTagged, use->InputAt(0)->opcode());
- Node* cmp = use->InputAt(0)->InputAt(0);
- CHECK_EQ(t.machine()->Word32Equal()->opcode(), cmp->opcode());
- CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
- Node* f = t.jsgraph.Int32Constant(0);
- CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
-}
-
-
-TEST(LowerBooleanNot_tagged_bit) {
- // BooleanNot(x: kRepTagged) used as kRepBit
- TestingGraph t(Type::Boolean());
- Node* b = t.p0;
- Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
- Node* use = t.Branch(inv);
- t.Lower();
- Node* cmp = use->InputAt(0);
- CHECK_EQ(t.machine()->WordEqual()->opcode(), cmp->opcode());
- CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
- Node* f = t.jsgraph.FalseConstant();
- CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
-}
-
-
-TEST(LowerBooleanNot_tagged_tagged) {
- // BooleanNot(x: kRepTagged) used as kRepTagged
- TestingGraph t(Type::Boolean());
- Node* b = t.p0;
- Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
- Node* use = t.Use(inv, MachineType::AnyTagged());
- t.Return(use);
- t.Lower();
- CHECK_EQ(IrOpcode::kChangeBitToTagged, use->InputAt(0)->opcode());
- Node* cmp = use->InputAt(0)->InputAt(0);
- CHECK_EQ(t.machine()->WordEqual()->opcode(), cmp->opcode());
- CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
- Node* f = t.jsgraph.FalseConstant();
- CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
-}
-
-static Type* test_types[] = {Type::Signed32(), Type::Unsigned32(),
- Type::Number()};
-
-TEST(LowerNumberCmp_to_int32) {
- TestingGraph t(Type::Signed32(), Type::Signed32());
-
- t.CheckLoweringBinop(IrOpcode::kWord32Equal, t.simplified()->NumberEqual());
- t.CheckLoweringBinop(IrOpcode::kInt32LessThan,
- t.simplified()->NumberLessThan());
- t.CheckLoweringBinop(IrOpcode::kInt32LessThanOrEqual,
- t.simplified()->NumberLessThanOrEqual());
-}
-
-
-TEST(LowerNumberCmp_to_uint32) {
- TestingGraph t(Type::Unsigned32(), Type::Unsigned32());
-
- t.CheckLoweringBinop(IrOpcode::kWord32Equal, t.simplified()->NumberEqual());
- t.CheckLoweringBinop(IrOpcode::kUint32LessThan,
- t.simplified()->NumberLessThan());
- t.CheckLoweringBinop(IrOpcode::kUint32LessThanOrEqual,
- t.simplified()->NumberLessThanOrEqual());
-}
-
-
-TEST(LowerNumberCmp_to_float64) {
- TestingGraph t(Type::Number(), Type::Number());
-
- t.CheckLoweringBinop(IrOpcode::kFloat64Equal, t.simplified()->NumberEqual());
- t.CheckLoweringBinop(IrOpcode::kFloat64LessThan,
- t.simplified()->NumberLessThan());
- t.CheckLoweringBinop(IrOpcode::kFloat64LessThanOrEqual,
- t.simplified()->NumberLessThanOrEqual());
-}
-
-
-TEST(LowerNumberAddSub_to_int32) {
- HandleAndZoneScope scope;
- Type* small_range = Type::Range(1, 10, scope.main_zone());
- Type* large_range = Type::Range(-1e+13, 1e+14, scope.main_zone());
- static Type* types[] = {Type::Signed32(), Type::Integral32(), small_range,
- large_range};
-
- for (size_t i = 0; i < arraysize(types); i++) {
- for (size_t j = 0; j < arraysize(types); j++) {
- TestingGraph t(types[i], types[j]);
- t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Add,
- t.simplified()->NumberAdd(),
- t.simplified()->NumberToInt32());
- t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Sub,
- t.simplified()->NumberSubtract(),
- t.simplified()->NumberToInt32());
- }
- }
-}
-
-
-TEST(LowerNumberAddSub_to_uint32) {
- HandleAndZoneScope scope;
- Type* small_range = Type::Range(1, 10, scope.main_zone());
- Type* large_range = Type::Range(-1e+13, 1e+14, scope.main_zone());
- static Type* types[] = {Type::Signed32(), Type::Integral32(), small_range,
- large_range};
-
- for (size_t i = 0; i < arraysize(types); i++) {
- for (size_t j = 0; j < arraysize(types); j++) {
- TestingGraph t(types[i], types[j]);
- t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Add,
- t.simplified()->NumberAdd(),
- t.simplified()->NumberToUint32());
- t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Sub,
- t.simplified()->NumberSubtract(),
- t.simplified()->NumberToUint32());
- }
- }
-}
-
-
-TEST(LowerNumberAddSub_to_float64) {
- for (size_t i = 0; i < arraysize(test_types); i++) {
- TestingGraph t(test_types[i], test_types[i]);
-
- t.CheckLoweringBinop(IrOpcode::kFloat64Add, t.simplified()->NumberAdd());
- t.CheckLoweringBinop(IrOpcode::kFloat64Sub,
- t.simplified()->NumberSubtract());
- }
-}
-
-
-TEST(LowerNumberDivMod_to_float64) {
- for (size_t i = 0; i < arraysize(test_types); i++) {
- TestingGraph t(test_types[i], test_types[i]);
-
- t.CheckLoweringBinop(IrOpcode::kFloat64Div, t.simplified()->NumberDivide());
- if (!test_types[i]->Is(Type::Unsigned32())) {
- t.CheckLoweringBinop(IrOpcode::kFloat64Mod,
- t.simplified()->NumberModulus());
- }
- }
-}
-
-
-static void CheckChangeOf(IrOpcode::Value change, Node* of, Node* node) {
- CHECK_EQ(change, node->opcode());
- CHECK_EQ(of, node->InputAt(0));
-}
-
-
-TEST(LowerNumberToInt32_to_ChangeTaggedToInt32) {
- // NumberToInt32(x: kRepTagged | kTypeInt32) used as kRepWord32
- TestingGraph t(Type::Signed32());
- Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
- Node* use = t.Use(trunc, MachineType::Int32());
- t.Return(use);
- t.Lower();
- CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p0, use->InputAt(0));
-}
-
-TEST(LowerNumberToInt32_to_TruncateFloat64ToWord32) {
- // NumberToInt32(x: kRepFloat64) used as MachineType::Int32()
- TestingGraph t(Type::Number());
- Node* p0 = t.ExampleWithOutput(MachineType::Float64());
- Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), p0);
- Node* use = t.Use(trunc, MachineType::Int32());
- t.Return(use);
- t.Lower();
- CheckChangeOf(IrOpcode::kTruncateFloat64ToWord32, p0, use->InputAt(0));
-}
-
-TEST(LowerNumberToInt32_to_TruncateTaggedToWord32) {
- // NumberToInt32(x: kTypeNumber | kRepTagged) used as MachineType::Int32()
- TestingGraph t(Type::Number());
- Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
- Node* use = t.Use(trunc, MachineType::Int32());
- t.Return(use);
- t.Lower();
- CheckChangeOf(IrOpcode::kTruncateTaggedToWord32, t.p0, use->InputAt(0));
-}
-
-
-TEST(LowerNumberToUint32_to_ChangeTaggedToUint32) {
- // NumberToUint32(x: kRepTagged | kTypeUint32) used as kRepWord32
- TestingGraph t(Type::Unsigned32());
- Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
- Node* use = t.Use(trunc, MachineType::Uint32());
- t.Return(use);
- t.Lower();
- CheckChangeOf(IrOpcode::kChangeTaggedToUint32, t.p0, use->InputAt(0));
-}
-
-TEST(LowerNumberToUint32_to_TruncateFloat64ToWord32) {
- // NumberToUint32(x: kRepFloat64) used as MachineType::Uint32()
- TestingGraph t(Type::Number());
- Node* p0 = t.ExampleWithOutput(MachineType::Float64());
- // TODO(titzer): run the typer here, or attach machine type to param.
- NodeProperties::SetType(p0, Type::Number());
- Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), p0);
- Node* use = t.Use(trunc, MachineType::Uint32());
- t.Return(use);
- t.Lower();
- CheckChangeOf(IrOpcode::kTruncateFloat64ToWord32, p0, use->InputAt(0));
-}
-
-TEST(LowerNumberToUint32_to_TruncateTaggedToWord32) {
- // NumberToInt32(x: kTypeNumber | kRepTagged) used as MachineType::Uint32()
- TestingGraph t(Type::Number());
- Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
- Node* use = t.Use(trunc, MachineType::Uint32());
- t.Return(use);
- t.Lower();
- CheckChangeOf(IrOpcode::kTruncateTaggedToWord32, t.p0, use->InputAt(0));
-}
-
-TEST(LowerNumberToUint32_to_TruncateFloat64ToWord32_uint32) {
- // NumberToUint32(x: kRepFloat64) used as kRepWord32
- TestingGraph t(Type::Unsigned32());
- Node* input = t.ExampleWithOutput(MachineType::Float64());
- Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), input);
- Node* use = t.Use(trunc, MachineType::RepWord32());
- t.Return(use);
- t.Lower();
- CheckChangeOf(IrOpcode::kTruncateFloat64ToWord32, input, use->InputAt(0));
-}
-
-
-TEST(LowerReferenceEqual_to_wordeq) {
- TestingGraph t(Type::Any(), Type::Any());
- IrOpcode::Value opcode =
- static_cast<IrOpcode::Value>(t.machine()->WordEqual()->opcode());
- t.CheckLoweringBinop(opcode, t.simplified()->ReferenceEqual());
-}
-
-void CheckChangeInsertion(IrOpcode::Value expected, MachineType from,
- MachineType to, Type* type = Type::Any()) {
- TestingGraph t(Type::Any());
- Node* in = t.ExampleWithOutput(from);
- NodeProperties::SetType(in, type);
- Node* use = t.Use(in, to);
- t.Return(use);
- t.Lower();
- CHECK_EQ(expected, use->InputAt(0)->opcode());
- CHECK_EQ(in, use->InputAt(0)->InputAt(0));
-}
-
-TEST(InsertBasicChanges) {
- CheckChangeInsertion(IrOpcode::kChangeFloat64ToInt32, MachineType::Float64(),
- MachineType::Int32(), Type::Signed32());
- CheckChangeInsertion(IrOpcode::kChangeFloat64ToUint32, MachineType::Float64(),
- MachineType::Uint32(), Type::Unsigned32());
- CheckChangeInsertion(IrOpcode::kTruncateFloat64ToWord32,
- MachineType::Float64(), MachineType::Uint32(),
- Type::Integral32());
- CheckChangeInsertion(IrOpcode::kChangeTaggedToInt32, MachineType::AnyTagged(),
- MachineType::Int32(), Type::Signed32());
- CheckChangeInsertion(IrOpcode::kChangeTaggedToUint32,
- MachineType::AnyTagged(), MachineType::Uint32(),
- Type::Unsigned32());
-
- CheckChangeInsertion(IrOpcode::kChangeFloat64ToTagged, MachineType::Float64(),
- MachineType::AnyTagged(), Type::Number());
- CheckChangeInsertion(IrOpcode::kChangeTaggedToFloat64,
- MachineType::AnyTagged(), MachineType::Float64(),
- Type::Number());
-
- CheckChangeInsertion(IrOpcode::kChangeInt32ToFloat64, MachineType::Int32(),
- MachineType::Float64(), Type::Signed32());
- CheckChangeInsertion(IrOpcode::kChangeInt32ToTagged, MachineType::Int32(),
- MachineType::AnyTagged(), Type::Signed32());
-
- CheckChangeInsertion(IrOpcode::kChangeUint32ToFloat64, MachineType::Uint32(),
- MachineType::Float64(), Type::Unsigned32());
- CheckChangeInsertion(IrOpcode::kChangeUint32ToTagged, MachineType::Uint32(),
- MachineType::AnyTagged(), Type::Unsigned32());
-}
-
-static void CheckChangesAroundBinop(TestingGraph* t, const Operator* op,
- IrOpcode::Value input_change,
- IrOpcode::Value output_change, Type* type) {
- Node* binop =
- op->ControlInputCount() == 0
- ? t->graph()->NewNode(op, t->p0, t->p1)
- : t->graph()->NewNode(op, t->p0, t->p1, t->graph()->start());
- NodeProperties::SetType(binop, type);
- t->Return(binop);
- t->Lower();
- CHECK_EQ(input_change, binop->InputAt(0)->opcode());
- CHECK_EQ(input_change, binop->InputAt(1)->opcode());
- CHECK_EQ(t->p0, binop->InputAt(0)->InputAt(0));
- CHECK_EQ(t->p1, binop->InputAt(1)->InputAt(0));
- CHECK_EQ(output_change, t->ret->InputAt(0)->opcode());
- CHECK_EQ(binop, t->ret->InputAt(0)->InputAt(0));
-}
-
-
-TEST(InsertChangesAroundInt32Binops) {
- TestingGraph t(Type::Signed32(), Type::Signed32());
-
- const Operator* ops[] = {t.machine()->Int32Add(), t.machine()->Int32Sub(),
- t.machine()->Int32Mul(), t.machine()->Int32Div(),
- t.machine()->Int32Mod(), t.machine()->Word32And(),
- t.machine()->Word32Or(), t.machine()->Word32Xor(),
- t.machine()->Word32Shl(), t.machine()->Word32Sar()};
-
- for (size_t i = 0; i < arraysize(ops); i++) {
- CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToInt32,
- IrOpcode::kChangeInt32ToTagged, Type::Signed32());
- CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToInt32,
- IrOpcode::kChangeInt32ToTagged, Type::Signed32());
- }
-}
-
-
-TEST(InsertChangesAroundInt32Cmp) {
- TestingGraph t(Type::Signed32(), Type::Signed32());
-
- const Operator* ops[] = {t.machine()->Int32LessThan(),
- t.machine()->Int32LessThanOrEqual()};
-
- for (size_t i = 0; i < arraysize(ops); i++) {
- CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToInt32,
- IrOpcode::kChangeBitToTagged, Type::Boolean());
- }
-}
-
-
-TEST(InsertChangesAroundUint32Cmp) {
- TestingGraph t(Type::Unsigned32(), Type::Unsigned32());
-
- const Operator* ops[] = {t.machine()->Uint32LessThan(),
- t.machine()->Uint32LessThanOrEqual()};
-
- for (size_t i = 0; i < arraysize(ops); i++) {
- CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToUint32,
- IrOpcode::kChangeBitToTagged, Type::Boolean());
- }
-}
-
-
-TEST(InsertChangesAroundFloat64Binops) {
- TestingGraph t(Type::Number(), Type::Number());
-
- const Operator* ops[] = {
- t.machine()->Float64Add(), t.machine()->Float64Sub(),
- t.machine()->Float64Mul(), t.machine()->Float64Div(),
- t.machine()->Float64Mod(),
- };
-
- for (size_t i = 0; i < arraysize(ops); i++) {
- CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToFloat64,
- IrOpcode::kChangeFloat64ToTagged, Type::Number());
- }
-}
-
-
-TEST(InsertChangesAroundFloat64Cmp) {
- TestingGraph t(Type::Number(), Type::Number());
-
- const Operator* ops[] = {t.machine()->Float64Equal(),
- t.machine()->Float64LessThan(),
- t.machine()->Float64LessThanOrEqual()};
-
- for (size_t i = 0; i < arraysize(ops); i++) {
- CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToFloat64,
- IrOpcode::kChangeBitToTagged, Type::Boolean());
- }
-}
-
-
-namespace {
-
-void CheckFieldAccessArithmetic(FieldAccess access, Node* load_or_store) {
- IntPtrMatcher mindex(load_or_store->InputAt(1));
- CHECK(mindex.Is(access.offset - access.tag()));
-}
-
-
-Node* CheckElementAccessArithmetic(ElementAccess access, Node* load_or_store) {
- Node* index = load_or_store->InputAt(1);
- if (kPointerSize == 8) {
- Int64BinopMatcher mindex(index);
- CHECK_EQ(IrOpcode::kInt64Add, mindex.node()->opcode());
- CHECK(mindex.right().Is(access.header_size - access.tag()));
-
- const int element_size_shift =
- ElementSizeLog2Of(access.machine_type.representation());
- Node* index;
- if (element_size_shift) {
- Int64BinopMatcher shl(mindex.left().node());
- CHECK_EQ(IrOpcode::kWord64Shl, shl.node()->opcode());
- CHECK(shl.right().Is(element_size_shift));
- index = shl.left().node();
- } else {
- index = mindex.left().node();
- }
- CHECK_EQ(IrOpcode::kChangeUint32ToUint64, index->opcode());
- return index->InputAt(0);
- } else {
- Int32BinopMatcher mindex(index);
- CHECK_EQ(IrOpcode::kInt32Add, mindex.node()->opcode());
- CHECK(mindex.right().Is(access.header_size - access.tag()));
-
- const int element_size_shift =
- ElementSizeLog2Of(access.machine_type.representation());
- if (element_size_shift) {
- Int32BinopMatcher shl(mindex.left().node());
- CHECK_EQ(IrOpcode::kWord32Shl, shl.node()->opcode());
- CHECK(shl.right().Is(element_size_shift));
- return shl.left().node();
- } else {
- return mindex.left().node();
- }
- }
-}
-
-
-const MachineType kMachineReps[] = {
- MachineType::Int8(), MachineType::Int16(), MachineType::Int32(),
- MachineType::Uint32(), MachineType::Int64(), MachineType::Float64(),
- MachineType::AnyTagged()};
-
-} // namespace
-
-
-TEST(LowerLoadField_to_load) {
- for (size_t i = 0; i < arraysize(kMachineReps); i++) {
- TestingGraph t(Type::Any(), Type::Signed32());
- FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), Type::Any(),
- kMachineReps[i], kNoWriteBarrier};
-
- Node* load = t.graph()->NewNode(t.simplified()->LoadField(access), t.p0,
- t.start, t.start);
- Node* use = t.Use(load, kMachineReps[i]);
- t.Return(use);
- t.LowerAllNodesAndLowerChanges();
- CHECK_EQ(IrOpcode::kLoad, load->opcode());
- CHECK_EQ(t.p0, load->InputAt(0));
- CheckFieldAccessArithmetic(access, load);
-
- MachineType rep = LoadRepresentationOf(load->op());
- CHECK_EQ(kMachineReps[i], rep);
- }
-}
-
-
-TEST(LowerStoreField_to_store) {
- {
- TestingGraph t(Type::Any(), Type::Signed32());
-
- for (size_t i = 0; i < arraysize(kMachineReps); i++) {
- FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), Type::Any(),
- kMachineReps[i], kNoWriteBarrier};
-
- Node* val = t.ExampleWithOutput(kMachineReps[i]);
- Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
- val, t.start, t.start);
- t.Effect(store);
- t.LowerAllNodesAndLowerChanges();
- CHECK_EQ(IrOpcode::kStore, store->opcode());
- CHECK_EQ(val, store->InputAt(2));
- CheckFieldAccessArithmetic(access, store);
-
- StoreRepresentation rep = StoreRepresentationOf(store->op());
- if (kMachineReps[i].representation() == MachineRepresentation::kTagged) {
- CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
- }
- CHECK_EQ(kMachineReps[i].representation(), rep.representation());
- }
- }
- {
- HandleAndZoneScope scope;
- Zone* z = scope.main_zone();
- TestingGraph t(Type::Any(), Type::Intersect(Type::SignedSmall(),
- Type::TaggedSigned(), z));
- FieldAccess access = {
- kTaggedBase, FixedArrayBase::kHeaderSize, Handle<Name>::null(),
- Type::Any(), MachineType::AnyTagged(), kNoWriteBarrier};
- Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
- t.p1, t.start, t.start);
- t.Effect(store);
- t.LowerAllNodesAndLowerChanges();
- CHECK_EQ(IrOpcode::kStore, store->opcode());
- CHECK_EQ(t.p1, store->InputAt(2));
- StoreRepresentation rep = StoreRepresentationOf(store->op());
- CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
- }
-}
-
-
-TEST(LowerLoadElement_to_load) {
- for (size_t i = 0; i < arraysize(kMachineReps); i++) {
- TestingGraph t(Type::Any(), Type::Signed32());
- ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Type::Any(), kMachineReps[i], kNoWriteBarrier};
-
- Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
- t.p1, t.start, t.start);
- Node* use = t.Use(load, kMachineReps[i]);
- t.Return(use);
- t.LowerAllNodesAndLowerChanges();
- CHECK_EQ(IrOpcode::kLoad, load->opcode());
- CHECK_EQ(t.p0, load->InputAt(0));
- CheckElementAccessArithmetic(access, load);
-
- MachineType rep = LoadRepresentationOf(load->op());
- CHECK_EQ(kMachineReps[i], rep);
- }
-}
-
-
-TEST(LowerStoreElement_to_store) {
- {
- for (size_t i = 0; i < arraysize(kMachineReps); i++) {
- TestingGraph t(Type::Any(), Type::Signed32());
-
- ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Type::Any(), kMachineReps[i], kNoWriteBarrier};
-
- Node* val = t.ExampleWithOutput(kMachineReps[i]);
- Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access),
- t.p0, t.p1, val, t.start, t.start);
- t.Effect(store);
- t.LowerAllNodesAndLowerChanges();
- CHECK_EQ(IrOpcode::kStore, store->opcode());
- CHECK_EQ(val, store->InputAt(2));
- CheckElementAccessArithmetic(access, store);
-
- StoreRepresentation rep = StoreRepresentationOf(store->op());
- if (kMachineReps[i].representation() == MachineRepresentation::kTagged) {
- CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
- }
- CHECK_EQ(kMachineReps[i].representation(), rep.representation());
- }
- }
- {
- HandleAndZoneScope scope;
- Zone* z = scope.main_zone();
- TestingGraph t(
- Type::Any(), Type::Signed32(),
- Type::Intersect(Type::SignedSmall(), Type::TaggedSigned(), z));
- ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Type::Any(), MachineType::AnyTagged(),
- kNoWriteBarrier};
- Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
- t.p1, t.p2, t.start, t.start);
- t.Effect(store);
- t.LowerAllNodesAndLowerChanges();
- CHECK_EQ(IrOpcode::kStore, store->opcode());
- CHECK_EQ(t.p2, store->InputAt(2));
- StoreRepresentation rep = StoreRepresentationOf(store->op());
- CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
- }
-}
-
-
-TEST(InsertChangeForLoadElementIndex) {
- // LoadElement(obj: Tagged, index: kTypeInt32 | kRepTagged, length) =>
- // Load(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k))
- TestingGraph t(Type::Any(), Type::Signed32());
- ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
- MachineType::AnyTagged(), kNoWriteBarrier};
-
- Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
- t.p1, t.start, t.start);
- t.Return(load);
- t.Lower();
- CHECK_EQ(IrOpcode::kLoadElement, load->opcode());
- CHECK_EQ(t.p0, load->InputAt(0));
- CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p1, load->InputAt(1));
-}
-
-
-TEST(InsertChangeForStoreElementIndex) {
- // StoreElement(obj: Tagged, index: kTypeInt32 | kRepTagged, length, val) =>
- // Store(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k), val)
- TestingGraph t(Type::Any(), Type::Signed32());
- ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
- MachineType::AnyTagged(), kFullWriteBarrier};
-
- Node* store =
- t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0, t.p1,
- t.jsgraph.TrueConstant(), t.start, t.start);
- t.Effect(store);
- t.Lower();
- CHECK_EQ(IrOpcode::kStoreElement, store->opcode());
- CHECK_EQ(t.p0, store->InputAt(0));
- CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p1, store->InputAt(1));
-}
-
-
-TEST(InsertChangeForLoadElement) {
- // TODO(titzer): test all load/store representation change insertions.
- TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
- ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Type::Number(), MachineType::Float64(),
- kNoWriteBarrier};
-
- Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
- t.p1, t.start, t.start);
- t.Return(load);
- t.Lower();
- CHECK_EQ(IrOpcode::kLoadElement, load->opcode());
- CHECK_EQ(t.p0, load->InputAt(0));
- CheckChangeOf(IrOpcode::kChangeFloat64ToTagged, load, t.ret->InputAt(0));
-}
-
-
-TEST(InsertChangeForLoadField) {
- // TODO(titzer): test all load/store representation change insertions.
- TestingGraph t(Type::Any(), Type::Signed32());
- FieldAccess access = {
- kTaggedBase, FixedArrayBase::kHeaderSize, Handle<Name>::null(),
- Type::Number(), MachineType::Float64(), kNoWriteBarrier};
-
- Node* load = t.graph()->NewNode(t.simplified()->LoadField(access), t.p0,
- t.start, t.start);
- t.Return(load);
- t.Lower();
- CHECK_EQ(IrOpcode::kLoadField, load->opcode());
- CHECK_EQ(t.p0, load->InputAt(0));
- CheckChangeOf(IrOpcode::kChangeFloat64ToTagged, load, t.ret->InputAt(0));
-}
-
-
-TEST(InsertChangeForStoreElement) {
- // TODO(titzer): test all load/store representation change insertions.
- TestingGraph t(Type::Any(), Type::Signed32());
- ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
- MachineType::Float64(), kFullWriteBarrier};
-
- Node* store =
- t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
- t.jsgraph.Int32Constant(0), t.p1, t.start, t.start);
- t.Effect(store);
- t.Lower();
-
- CHECK_EQ(IrOpcode::kStoreElement, store->opcode());
- CHECK_EQ(t.p0, store->InputAt(0));
- CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p1, store->InputAt(2));
-}
-
-
-TEST(InsertChangeForStoreField) {
- // TODO(titzer): test all load/store representation change insertions.
- TestingGraph t(Type::Any(), Type::Signed32());
- FieldAccess access = {
- kTaggedBase, FixedArrayBase::kHeaderSize, Handle<Name>::null(),
- Type::Any(), MachineType::Float64(), kNoWriteBarrier};
-
- Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
- t.p1, t.start, t.start);
- t.Effect(store);
- t.Lower();
-
- CHECK_EQ(IrOpcode::kStoreField, store->opcode());
- CHECK_EQ(t.p0, store->InputAt(0));
- CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p1, store->InputAt(1));
-}
-
-
-TEST(UpdatePhi) {
- TestingGraph t(Type::Any(), Type::Signed32());
- static const MachineType kMachineTypes[] = {
- MachineType::Int32(), MachineType::Uint32(), MachineType::Float64()};
- Type* kTypes[] = {Type::Signed32(), Type::Unsigned32(), Type::Number()};
-
- for (size_t i = 0; i < arraysize(kMachineTypes); i++) {
- FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), kTypes[i],
- kMachineTypes[i], kFullWriteBarrier};
-
- Node* load0 = t.graph()->NewNode(t.simplified()->LoadField(access), t.p0,
- t.start, t.start);
- Node* load1 = t.graph()->NewNode(t.simplified()->LoadField(access), t.p1,
- t.start, t.start);
- Node* phi =
- t.graph()->NewNode(t.common()->Phi(MachineRepresentation::kTagged, 2),
- load0, load1, t.start);
- t.Return(t.Use(phi, kMachineTypes[i]));
- t.Lower();
-
- CHECK_EQ(IrOpcode::kPhi, phi->opcode());
- CHECK_EQ(kMachineTypes[i].representation(), PhiRepresentationOf(phi->op()));
- }
-}
-
-
-TEST(NumberMultiply_ConstantOutOfRange) {
- TestingGraph t(Type::Signed32());
- Node* k = t.jsgraph.Constant(1000000023);
- Node* mul = t.graph()->NewNode(t.simplified()->NumberMultiply(), t.p0, k);
- Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), mul);
- t.Return(trunc);
- t.Lower();
-
- CHECK_EQ(IrOpcode::kFloat64Mul, mul->opcode());
-}
-
-
-TEST(NumberMultiply_NonTruncating) {
- TestingGraph t(Type::Signed32());
- Node* k = t.jsgraph.Constant(111);
- Node* mul = t.graph()->NewNode(t.simplified()->NumberMultiply(), t.p0, k);
- t.Return(mul);
- t.Lower();
-
- CHECK_EQ(IrOpcode::kFloat64Mul, mul->opcode());
-}
-
-
-TEST(NumberDivide_TruncatingToInt32) {
- int32_t constants[] = {-100, -10, 1, 4, 100, 1000};
-
- for (size_t i = 0; i < arraysize(constants); i++) {
- TestingGraph t(Type::Signed32());
- Node* k = t.jsgraph.Constant(constants[i]);
- Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
- Node* use = t.Use(div, MachineType::Int32());
- t.Return(use);
- t.Lower();
-
- CHECK_EQ(IrOpcode::kInt32Div, use->InputAt(0)->opcode());
- }
-}
-
-
-TEST(NumberDivide_TruncatingToUint32) {
- double constants[] = {1, 3, 100, 1000, 100998348};
-
- for (size_t i = 0; i < arraysize(constants); i++) {
- TestingGraph t(Type::Unsigned32());
- Node* k = t.jsgraph.Constant(constants[i]);
- Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
- Node* use = t.Use(div, MachineType::Uint32());
- t.Return(use);
- t.Lower();
-
- CHECK_EQ(IrOpcode::kUint32Div, use->InputAt(0)->opcode());
- }
-}
-
-
-TEST(NumberDivide_BadConstants) {
- {
- TestingGraph t(Type::Signed32());
- Node* k = t.jsgraph.Constant(-1);
- Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
- Node* use = t.Use(div, MachineType::Int32());
- t.Return(use);
- t.Lower();
-
- CHECK_EQ(IrOpcode::kInt32Sub, use->InputAt(0)->opcode());
- }
-
- {
- TestingGraph t(Type::Signed32());
- Node* k = t.jsgraph.Constant(0);
- Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
- Node* use = t.Use(div, MachineType::Int32());
- t.Return(use);
- t.Lower();
-
- CHECK_EQ(IrOpcode::kInt32Constant, use->InputAt(0)->opcode());
- CHECK_EQ(0, OpParameter<int32_t>(use->InputAt(0)));
- }
-
- {
- TestingGraph t(Type::Unsigned32());
- Node* k = t.jsgraph.Constant(0);
- Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
- Node* use = t.Use(div, MachineType::Uint32());
- t.Return(use);
- t.Lower();
-
- CHECK_EQ(IrOpcode::kInt32Constant, use->InputAt(0)->opcode());
- CHECK_EQ(0, OpParameter<int32_t>(use->InputAt(0)));
- }
-}
-
-
-TEST(NumberModulus_TruncatingToInt32) {
- int32_t constants[] = {-100, -10, 1, 4, 100, 1000};
-
- for (size_t i = 0; i < arraysize(constants); i++) {
- TestingGraph t(Type::Signed32());
- Node* k = t.jsgraph.Constant(constants[i]);
- Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
- Node* use = t.Use(mod, MachineType::Int32());
- t.Return(use);
- t.Lower();
-
- CHECK_EQ(IrOpcode::kInt32Mod, use->InputAt(0)->opcode());
- }
-}
-
-
-TEST(NumberModulus_TruncatingToUint32) {
- double constants[] = {1, 3, 100, 1000, 100998348};
-
- for (size_t i = 0; i < arraysize(constants); i++) {
- TestingGraph t(Type::Unsigned32());
- Node* k = t.jsgraph.Constant(constants[i]);
- Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
- Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), mod);
- t.Return(trunc);
- t.Lower();
-
- CHECK_EQ(IrOpcode::kUint32Mod, t.ret->InputAt(0)->InputAt(0)->opcode());
- }
-}
-
-
-TEST(NumberModulus_Int32) {
- int32_t constants[] = {-100, -10, 1, 4, 100, 1000};
-
- for (size_t i = 0; i < arraysize(constants); i++) {
- TestingGraph t(Type::Signed32());
- Node* k = t.jsgraph.Constant(constants[i]);
- Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
- t.Return(mod);
- t.Lower();
-
- CHECK_EQ(IrOpcode::kFloat64Mod, mod->opcode()); // Pesky -0 behavior.
- }
-}
-
-
-TEST(NumberModulus_Uint32) {
- const double kConstants[] = {2, 100, 1000, 1024, 2048};
- const MachineType kTypes[] = {MachineType::Int32(), MachineType::Uint32()};
-
- for (auto const type : kTypes) {
- for (auto const c : kConstants) {
- TestingGraph t(Type::Unsigned32());
- Node* k = t.jsgraph.Constant(c);
- Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
- Node* use = t.Use(mod, type);
- t.Return(use);
- t.Lower();
-
- CHECK_EQ(IrOpcode::kUint32Mod, use->InputAt(0)->opcode());
- }
- }
-}
-
-
-TEST(PhiRepresentation) {
- HandleAndZoneScope scope;
- Zone* z = scope.main_zone();
-
- struct TestData {
- Type* arg1;
- Type* arg2;
- MachineType use;
- MachineRepresentation expected;
- };
-
- TestData test_data[] = {
- {Type::Signed32(), Type::Unsigned32(), MachineType::Int32(),
- MachineRepresentation::kWord32},
- {Type::Signed32(), Type::Unsigned32(), MachineType::Uint32(),
- MachineRepresentation::kWord32},
- {Type::Signed32(), Type::Signed32(), MachineType::Int32(),
- MachineRepresentation::kWord32},
- {Type::Unsigned32(), Type::Unsigned32(), MachineType::Int32(),
- MachineRepresentation::kWord32},
- {Type::Number(), Type::Signed32(), MachineType::Int32(),
- MachineRepresentation::kWord32}};
-
- for (auto const d : test_data) {
- TestingGraph t(d.arg1, d.arg2, Type::Boolean());
-
- Node* br = t.graph()->NewNode(t.common()->Branch(), t.p2, t.start);
- Node* tb = t.graph()->NewNode(t.common()->IfTrue(), br);
- Node* fb = t.graph()->NewNode(t.common()->IfFalse(), br);
- Node* m = t.graph()->NewNode(t.common()->Merge(2), tb, fb);
-
- Node* phi = t.graph()->NewNode(
- t.common()->Phi(MachineRepresentation::kTagged, 2), t.p0, t.p1, m);
-
- Type* phi_type = Type::Union(d.arg1, d.arg2, z);
- NodeProperties::SetType(phi, phi_type);
-
- Node* use = t.Use(phi, d.use);
- t.Return(use);
- t.Lower();
-
- CHECK_EQ(d.expected, PhiRepresentationOf(phi->op()));
- }
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/heap/heap-tester.h b/deps/v8/test/cctest/heap/heap-tester.h
index 674bdcb1cc..a01de69291 100644
--- a/deps/v8/test/cctest/heap/heap-tester.h
+++ b/deps/v8/test/cctest/heap/heap-tester.h
@@ -30,6 +30,7 @@
V(TestMemoryReducerSampleJsCalls) \
V(TestSizeOfObjects) \
V(Regress587004) \
+ V(Regress538257) \
V(Regress589413) \
V(WriteBarriersInCopyJSObject)
diff --git a/deps/v8/test/cctest/heap/heap-utils.cc b/deps/v8/test/cctest/heap/heap-utils.cc
index 7d4d4bf40d..4f7d088a94 100644
--- a/deps/v8/test/cctest/heap/heap-utils.cc
+++ b/deps/v8/test/cctest/heap/heap-utils.cc
@@ -15,8 +15,10 @@ namespace internal {
namespace heap {
void SealCurrentObjects(Heap* heap) {
- heap->CollectAllGarbage();
- heap->CollectAllGarbage();
+ heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
+ GarbageCollectionReason::kTesting);
+ heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
+ GarbageCollectionReason::kTesting);
heap->mark_compact_collector()->EnsureSweepingCompleted();
heap->old_space()->EmptyAllocationInfo();
for (Page* page : *heap->old_space()) {
@@ -28,6 +30,35 @@ int FixedArrayLenFromSize(int size) {
return (size - FixedArray::kHeaderSize) / kPointerSize;
}
+std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
+ int remainder) {
+ std::vector<Handle<FixedArray>> handles;
+ Isolate* isolate = heap->isolate();
+ const int kArraySize = 128;
+ const int kArrayLen = heap::FixedArrayLenFromSize(kArraySize);
+ CHECK_EQ(Page::kAllocatableMemory % kArraySize, 0);
+ Handle<FixedArray> array;
+ for (size_t allocated = 0;
+ allocated != (Page::kAllocatableMemory - remainder);
+ allocated += array->Size()) {
+ if (allocated == (Page::kAllocatableMemory - kArraySize)) {
+ array = isolate->factory()->NewFixedArray(
+ heap::FixedArrayLenFromSize(kArraySize - remainder), TENURED);
+ CHECK_EQ(kArraySize - remainder, array->Size());
+ } else {
+ array = isolate->factory()->NewFixedArray(kArrayLen, TENURED);
+ CHECK_EQ(kArraySize, array->Size());
+ }
+ if (handles.empty()) {
+ // Check that allocations started on a new page.
+ CHECK_EQ(array->address(),
+ Page::FromAddress(array->address())->area_start());
+ }
+ handles.push_back(array);
+ }
+ return handles;
+}
+
std::vector<Handle<FixedArray>> CreatePadding(Heap* heap, int padding_size,
PretenureFlag tenure,
int object_size) {
@@ -112,20 +143,25 @@ void SimulateFullSpace(v8::internal::NewSpace* space,
}
void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) {
- i::MarkCompactCollector* collector = heap->mark_compact_collector();
i::IncrementalMarking* marking = heap->incremental_marking();
+ i::MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->sweeping_in_progress()) {
collector->EnsureSweepingCompleted();
}
- CHECK(marking->IsMarking() || marking->IsStopped());
+ if (marking->IsSweeping()) {
+ marking->FinalizeSweeping();
+ }
+ CHECK(marking->IsMarking() || marking->IsStopped() || marking->IsComplete());
if (marking->IsStopped()) {
- heap->StartIncrementalMarking();
+ heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
+ i::GarbageCollectionReason::kTesting);
}
- CHECK(marking->IsMarking());
+ CHECK(marking->IsMarking() || marking->IsComplete());
if (!force_completion) return;
while (!marking->IsComplete()) {
- marking->Step(i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+ marking->Step(i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ i::IncrementalMarking::FORCE_COMPLETION, i::StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
@@ -147,7 +183,7 @@ void AbandonCurrentlyFreeMemory(PagedSpace* space) {
}
void GcAndSweep(Heap* heap, AllocationSpace space) {
- heap->CollectGarbage(space);
+ heap->CollectGarbage(space, GarbageCollectionReason::kTesting);
if (heap->mark_compact_collector()->sweeping_in_progress()) {
heap->mark_compact_collector()->EnsureSweepingCompleted();
}
diff --git a/deps/v8/test/cctest/heap/heap-utils.h b/deps/v8/test/cctest/heap/heap-utils.h
index e03e6fa6e0..2f704cb422 100644
--- a/deps/v8/test/cctest/heap/heap-utils.h
+++ b/deps/v8/test/cctest/heap/heap-utils.h
@@ -15,9 +15,15 @@ void SealCurrentObjects(Heap* heap);
int FixedArrayLenFromSize(int size);
+// Fill a page with fixed arrays leaving remainder behind. The function does
+// not create additional fillers and assumes that the space has just been
+// sealed.
+std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
+ int remainder);
+
std::vector<Handle<FixedArray>> CreatePadding(
Heap* heap, int padding_size, PretenureFlag tenure,
- int object_size = Page::kMaxRegularHeapObjectSize);
+ int object_size = kMaxRegularHeapObjectSize);
void AllocateAllButNBytes(
v8::internal::NewSpace* space, int extra_bytes,
diff --git a/deps/v8/test/cctest/heap/test-alloc.cc b/deps/v8/test/cctest/heap/test-alloc.cc
index 348ba1979d..c01827eee1 100644
--- a/deps/v8/test/cctest/heap/test-alloc.cc
+++ b/deps/v8/test/cctest/heap/test-alloc.cc
@@ -91,7 +91,7 @@ Handle<Object> v8::internal::HeapTester::TestAllocateAfterFailures() {
// Similar to what the CALL_AND_RETRY macro does in the last-resort case, we
// are wrapping the allocator function in an AlwaysAllocateScope. Test that
// all allocations succeed immediately without any retry.
- CcTest::heap()->CollectAllAvailableGarbage("panic");
+ CcTest::CollectAllAvailableGarbage();
AlwaysAllocateScope scope(CcTest::i_isolate());
return handle(AllocateAfterFailures().ToObjectChecked(), CcTest::i_isolate());
}
@@ -220,12 +220,11 @@ TEST(CodeRange) {
if (current_allocated < code_range_size / 10) {
// Allocate a block.
// Geometrically distributed sizes, greater than
- // Page::kMaxRegularHeapObjectSize (which is greater than code page area).
+ // kMaxRegularHeapObjectSize (which is greater than code page area).
// TODO(gc): instead of using 3 use some contant based on code_range_size
// kMaxRegularHeapObjectSize.
- size_t requested =
- (Page::kMaxRegularHeapObjectSize << (Pseudorandom() % 3)) +
- Pseudorandom() % 5000 + 1;
+ size_t requested = (kMaxRegularHeapObjectSize << (Pseudorandom() % 3)) +
+ Pseudorandom() % 5000 + 1;
size_t allocated = 0;
// The request size has to be at least 2 code guard pages larger than the
diff --git a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
index b331f6bf3a..173d1fa85f 100644
--- a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
+++ b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/api.h"
#include "src/heap/array-buffer-tracker.h"
+#include "src/isolate.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
@@ -128,7 +130,7 @@ TEST(ArrayBuffer_Compaction) {
page_before_gc->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
CHECK(IsTracked(*buf1));
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
Page* page_after_gc = Page::FromAddress(buf1->address());
CHECK(IsTracked(*buf1));
@@ -175,7 +177,7 @@ TEST(ArrayBuffer_UnregisterDuringSweep) {
CHECK(IsTracked(*buf2));
}
- heap->CollectGarbage(OLD_SPACE);
+ CcTest::CollectGarbage(OLD_SPACE);
// |Externalize| will cause the buffer to be |Unregister|ed. Without
// barriers and proper synchronization this will trigger a data race on
// TSAN.
diff --git a/deps/v8/test/cctest/heap/test-compaction.cc b/deps/v8/test/cctest/heap/test-compaction.cc
index f61f7e1c41..339aef3ea5 100644
--- a/deps/v8/test/cctest/heap/test-compaction.cc
+++ b/deps/v8/test/cctest/heap/test-compaction.cc
@@ -2,6 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/factory.h"
+#include "src/heap/mark-compact.h"
+#include "src/isolate.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/factory.h -> src/objects-inl.h
+#include "src/objects-inl.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/type-feedback-vector.h ->
+// src/type-feedback-vector-inl.h
+#include "src/type-feedback-vector-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
@@ -58,7 +68,7 @@ HEAP_TEST(CompactionFullAbortedPage) {
CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
heap->set_force_oom(true);
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
heap->mark_compact_collector()->EnsureSweepingCompleted();
// Check that all handles still point to the same page, i.e., compaction
@@ -118,7 +128,7 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
Page::FromAddress(page_to_fill_handles.front()->address());
heap->set_force_oom(true);
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
heap->mark_compact_collector()->EnsureSweepingCompleted();
bool migration_aborted = false;
@@ -200,7 +210,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
Page::FromAddress(page_to_fill_handles.front()->address());
heap->set_force_oom(true);
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
heap->mark_compact_collector()->EnsureSweepingCompleted();
// The following check makes sure that we compacted "some" objects, while
@@ -293,7 +303,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
Page::FromAddress(page_to_fill_handles.front()->address());
heap->set_force_oom(true);
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
heap->mark_compact_collector()->EnsureSweepingCompleted();
// The following check makes sure that we compacted "some" objects, while
@@ -343,7 +353,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
// If store buffer entries are not properly filtered/reset for aborted
// pages we have now a broken address at an object slot in old space and
// the following scavenge will crash.
- heap->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
}
}
}
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index 80c4cc7c34..c69d391f90 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -138,7 +138,7 @@ HEAP_TEST(TestNewSpaceRefsInCopiedCode) {
Handle<Code> copy(tmp);
CheckEmbeddedObjectsAreEqual(code, copy);
- heap->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
CheckEmbeddedObjectsAreEqual(code, copy);
}
@@ -478,12 +478,11 @@ TEST(Tagging) {
TEST(GarbageCollection) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
HandleScope sc(isolate);
// Check GC.
- heap->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
Handle<JSGlobalObject> global(
CcTest::i_isolate()->context()->global_object());
@@ -510,7 +509,7 @@ TEST(GarbageCollection) {
*Object::GetProperty(obj, prop_namex).ToHandleChecked());
}
- heap->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
// Function should be alive.
CHECK(Just(true) == JSReceiver::HasOwnProperty(global, name));
@@ -529,7 +528,7 @@ TEST(GarbageCollection) {
}
// After gc, it should survive.
- heap->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
CHECK(Just(true) == JSReceiver::HasOwnProperty(global, obj_name));
Handle<Object> obj =
@@ -578,7 +577,6 @@ TEST(LocalHandles) {
TEST(GlobalHandles) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
GlobalHandles* global_handles = isolate->global_handles();
@@ -600,7 +598,7 @@ TEST(GlobalHandles) {
}
// after gc, it should survive
- heap->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
CHECK((*h1)->IsString());
CHECK((*h2)->IsHeapNumber());
@@ -633,7 +631,6 @@ TEST(WeakGlobalHandlesScavenge) {
i::FLAG_stress_compaction = false;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
GlobalHandles* global_handles = isolate->global_handles();
@@ -658,7 +655,7 @@ TEST(WeakGlobalHandlesScavenge) {
&TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter);
// Scavenge treats weak pointers as normal roots.
- heap->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
CHECK((*h1)->IsString());
CHECK((*h2)->IsHeapNumber());
@@ -695,8 +692,8 @@ TEST(WeakGlobalHandlesMark) {
}
// Make sure the objects are promoted.
- heap->CollectGarbage(OLD_SPACE);
- heap->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(OLD_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
CHECK(!heap->InNewSpace(*h1) && !heap->InNewSpace(*h2));
std::pair<Handle<Object>*, int> handle_and_id(&h2, 1234);
@@ -707,7 +704,7 @@ TEST(WeakGlobalHandlesMark) {
CHECK(!GlobalHandles::IsNearDeath(h2.location()));
// Incremental marking potentially marked handles before they turned weak.
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK((*h1)->IsString());
@@ -722,7 +719,6 @@ TEST(DeleteWeakGlobalHandle) {
i::FLAG_stress_compaction = false;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
GlobalHandles* global_handles = isolate->global_handles();
@@ -743,12 +739,12 @@ TEST(DeleteWeakGlobalHandle) {
v8::WeakCallbackType::kParameter);
// Scanvenge does not recognize weak reference.
- heap->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
CHECK(!WeakPointerCleared);
// Mark-compact treats weak reference properly.
- heap->CollectGarbage(OLD_SPACE);
+ CcTest::CollectGarbage(OLD_SPACE);
CHECK(WeakPointerCleared);
}
@@ -764,7 +760,7 @@ TEST(DoNotPromoteWhiteObjectsOnScavenge) {
CHECK(Marking::IsWhite(ObjectMarking::MarkBitFrom(HeapObject::cast(*white))));
- heap->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
CHECK(heap->InNewSpace(*white));
}
@@ -780,15 +776,15 @@ TEST(PromoteGreyOrBlackObjectsOnScavenge) {
IncrementalMarking* marking = heap->incremental_marking();
marking->Stop();
- heap->StartIncrementalMarking();
+ heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
+ i::GarbageCollectionReason::kTesting);
while (
Marking::IsWhite(ObjectMarking::MarkBitFrom(HeapObject::cast(*marked)))) {
marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- IncrementalMarking::FORCE_MARKING,
- IncrementalMarking::DO_NOT_FORCE_COMPLETION);
+ IncrementalMarking::DO_NOT_FORCE_COMPLETION, StepOrigin::kV8);
}
- heap->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
CHECK(!heap->InNewSpace(*marked));
}
@@ -836,7 +832,7 @@ TEST(BytecodeArray) {
// evacuation candidate.
Page* evac_page = Page::FromAddress(constant_pool->address());
evac_page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// BytecodeArray should survive.
CHECK_EQ(array->length(), kRawBytesSize);
@@ -1254,7 +1250,7 @@ TEST(Iteration) {
factory->NewStringFromStaticChars("abcdefghij", TENURED);
// Allocate a large string (for large object space).
- int large_size = Page::kMaxRegularHeapObjectSize + 1;
+ int large_size = kMaxRegularHeapObjectSize + 1;
char* str = new char[large_size];
for (int i = 0; i < large_size - 1; ++i) str[i] = 'a';
str[large_size - 1] = '\0';
@@ -1306,14 +1302,20 @@ UNINITIALIZED_TEST(TestCodeFlushing) {
CHECK(function->shared()->is_compiled());
// The code will survive at least two GCs.
- i_isolate->heap()->CollectAllGarbage();
- i_isolate->heap()->CollectAllGarbage();
+ i_isolate->heap()->CollectAllGarbage(
+ i::Heap::kFinalizeIncrementalMarkingMask,
+ i::GarbageCollectionReason::kTesting);
+ i_isolate->heap()->CollectAllGarbage(
+ i::Heap::kFinalizeIncrementalMarkingMask,
+ i::GarbageCollectionReason::kTesting);
CHECK(function->shared()->is_compiled());
// Simulate several GCs that use full marking.
const int kAgingThreshold = 6;
for (int i = 0; i < kAgingThreshold; i++) {
- i_isolate->heap()->CollectAllGarbage();
+ i_isolate->heap()->CollectAllGarbage(
+ i::Heap::kFinalizeIncrementalMarkingMask,
+ i::GarbageCollectionReason::kTesting);
}
// foo should no longer be in the compilation cache
@@ -1359,12 +1361,12 @@ TEST(TestCodeFlushingPreAged) {
CHECK(function->shared()->is_compiled());
// The code has been run so will survive at least one GC.
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK(function->shared()->is_compiled());
// The code was only run once, so it should be pre-aged and collected on the
// next GC.
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK(!function->shared()->is_compiled() || function->IsOptimized());
// Execute the function again twice, and ensure it is reset to the young age.
@@ -1374,14 +1376,14 @@ TEST(TestCodeFlushingPreAged) {
}
// The code will survive at least two GC now that it is young again.
- CcTest::heap()->CollectAllGarbage();
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK(function->shared()->is_compiled());
// Simulate several GCs that use full marking.
const int kAgingThreshold = 6;
for (int i = 0; i < kAgingThreshold; i++) {
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
// foo should no longer be in the compilation cache
@@ -1424,15 +1426,15 @@ TEST(TestCodeFlushingIncremental) {
CHECK(function->shared()->is_compiled());
// The code will survive at least two GCs.
- CcTest::heap()->CollectAllGarbage();
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK(function->shared()->is_compiled());
// Simulate several GCs that use incremental marking.
const int kAgingThreshold = 6;
for (int i = 0; i < kAgingThreshold; i++) {
heap::SimulateIncrementalMarking(CcTest::heap());
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
CHECK(!function->shared()->is_compiled() || function->IsOptimized());
CHECK(!function->is_compiled() || function->IsOptimized());
@@ -1448,7 +1450,7 @@ TEST(TestCodeFlushingIncremental) {
heap::SimulateIncrementalMarking(CcTest::heap());
if (!function->next_function_link()->IsUndefined(CcTest::i_isolate()))
break;
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
// Force optimization while incremental marking is active and while
@@ -1458,7 +1460,7 @@ TEST(TestCodeFlushingIncremental) {
}
// Simulate one final GC to make sure the candidate queue is sane.
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK(function->shared()->is_compiled() || !function->IsOptimized());
CHECK(function->is_compiled() || !function->IsOptimized());
}
@@ -1487,7 +1489,7 @@ TEST(TestCodeFlushingIncrementalScavenge) {
Handle<String> bar_name = factory->InternalizeUtf8String("bar");
// Perfrom one initial GC to enable code flushing.
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// This compile will add the code to the compilation cache.
{ v8::HandleScope scope(CcTest::isolate());
@@ -1524,10 +1526,10 @@ TEST(TestCodeFlushingIncrementalScavenge) {
// perform a scavenge while incremental marking is still running.
heap::SimulateIncrementalMarking(CcTest::heap(), false);
*function2.location() = NULL;
- CcTest::heap()->CollectGarbage(NEW_SPACE, "test scavenge while marking");
+ CcTest::CollectGarbage(NEW_SPACE);
// Simulate one final GC to make sure the candidate queue is sane.
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK(!function->shared()->is_compiled() || function->IsOptimized());
CHECK(!function->is_compiled() || function->IsOptimized());
}
@@ -1564,8 +1566,8 @@ TEST(TestCodeFlushingIncrementalAbort) {
CHECK(function->shared()->is_compiled());
// The code will survive at least two GCs.
- heap->CollectAllGarbage();
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK(function->shared()->is_compiled());
// Bump the code age so that flushing is triggered.
@@ -1594,7 +1596,7 @@ TEST(TestCodeFlushingIncrementalAbort) {
}
// Simulate one final GC to make sure the candidate queue is sane.
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK(function->shared()->is_compiled() || !function->IsOptimized());
CHECK(function->is_compiled() || !function->IsOptimized());
}
@@ -1650,7 +1652,6 @@ TEST(CompilationCacheCachingBehavior) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
CompilationCache* compilation_cache = isolate->compilation_cache();
LanguageMode language_mode = construct_language_mode(FLAG_use_strict);
@@ -1681,7 +1682,7 @@ TEST(CompilationCacheCachingBehavior) {
// (Unless --optimize-for-size, in which case it might get collected
// immediately.)
if (!FLAG_optimize_for_size) {
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
info = compilation_cache->LookupScript(
source, Handle<Object>(), 0, 0,
v8::ScriptOriginOptions(false, true, false), native_context,
@@ -1699,7 +1700,7 @@ TEST(CompilationCacheCachingBehavior) {
info.ToHandleChecked()->code()->MakeOlder(EVEN_MARKING_PARITY);
}
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// Ensure code aging cleared the entry from the cache.
info = compilation_cache->LookupScript(
source, Handle<Object>(), 0, 0,
@@ -1762,7 +1763,6 @@ TEST(TestInternalWeakLists) {
static const int kNumTestContexts = 10;
Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
HandleScope scope(isolate);
v8::Local<v8::Context> ctx[kNumTestContexts];
if (!isolate->use_crankshaft()) return;
@@ -1776,7 +1776,7 @@ TEST(TestInternalWeakLists) {
// Collect garbage that might have been created by one of the
// installed extensions.
isolate->compilation_cache()->Clear();
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(i + 1, CountNativeContexts());
@@ -1802,29 +1802,29 @@ TEST(TestInternalWeakLists) {
// Scavenge treats these references as strong.
for (int j = 0; j < 10; j++) {
- CcTest::heap()->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
CHECK_EQ(5, CountOptimizedUserFunctions(ctx[i]));
}
// Mark compact handles the weak references.
isolate->compilation_cache()->Clear();
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(4, CountOptimizedUserFunctions(ctx[i]));
// Get rid of f3 and f5 in the same way.
CompileRun("f3=null");
for (int j = 0; j < 10; j++) {
- CcTest::heap()->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
CHECK_EQ(4, CountOptimizedUserFunctions(ctx[i]));
}
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(3, CountOptimizedUserFunctions(ctx[i]));
CompileRun("f5=null");
for (int j = 0; j < 10; j++) {
- CcTest::heap()->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
CHECK_EQ(3, CountOptimizedUserFunctions(ctx[i]));
}
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(2, CountOptimizedUserFunctions(ctx[i]));
ctx[i]->Exit();
@@ -1832,7 +1832,7 @@ TEST(TestInternalWeakLists) {
// Force compilation cache cleanup.
CcTest::heap()->NotifyContextDisposed(true);
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// Dispose the native contexts one by one.
for (int i = 0; i < kNumTestContexts; i++) {
@@ -1843,12 +1843,12 @@ TEST(TestInternalWeakLists) {
// Scavenge treats these references as strong.
for (int j = 0; j < 10; j++) {
- CcTest::heap()->CollectGarbage(i::NEW_SPACE);
+ CcTest::CollectGarbage(i::NEW_SPACE);
CHECK_EQ(kNumTestContexts - i, CountNativeContexts());
}
// Mark compact handles the weak references.
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(kNumTestContexts - i - 1, CountNativeContexts());
}
@@ -1864,7 +1864,8 @@ static int CountNativeContextsWithGC(Isolate* isolate, int n) {
Handle<Object> object(heap->native_contexts_list(), isolate);
while (!object->IsUndefined(isolate)) {
count++;
- if (count == n) heap->CollectAllGarbage();
+ if (count == n)
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
object =
Handle<Object>(Context::cast(*object)->next_context_link(), isolate);
}
@@ -1885,7 +1886,10 @@ static int CountOptimizedUserFunctionsWithGC(v8::Local<v8::Context> context,
while (object->IsJSFunction() &&
!Handle<JSFunction>::cast(object)->shared()->IsBuiltin()) {
count++;
- if (count == n) isolate->heap()->CollectAllGarbage();
+ if (count == n)
+ isolate->heap()->CollectAllGarbage(
+ i::Heap::kFinalizeIncrementalMarkingMask,
+ i::GarbageCollectionReason::kTesting);
object = Handle<Object>(
Object::cast(JSFunction::cast(*object)->next_function_link()),
isolate);
@@ -1968,7 +1972,7 @@ TEST(TestSizeOfRegExpCode) {
// Get initial heap size after several full GCs, which will stabilize
// the heap size and return with sweeping finished completely.
- CcTest::heap()->CollectAllAvailableGarbage("initial cleanup");
+ CcTest::CollectAllAvailableGarbage();
MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
collector->EnsureSweepingCompleted();
@@ -1976,11 +1980,11 @@ TEST(TestSizeOfRegExpCode) {
int initial_size = static_cast<int>(CcTest::heap()->SizeOfObjects());
CompileRun("'foo'.match(reg_exp_source);");
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
int size_with_regexp = static_cast<int>(CcTest::heap()->SizeOfObjects());
CompileRun("'foo'.match(half_size_reg_exp);");
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
int size_with_optimized_regexp =
static_cast<int>(CcTest::heap()->SizeOfObjects());
@@ -2006,7 +2010,7 @@ HEAP_TEST(TestSizeOfObjects) {
// Get initial heap size after several full GCs, which will stabilize
// the heap size and return with sweeping finished completely.
- heap->CollectAllAvailableGarbage("initial cleanup");
+ CcTest::CollectAllAvailableGarbage();
if (collector->sweeping_in_progress()) {
collector->EnsureSweepingCompleted();
}
@@ -2027,7 +2031,7 @@ HEAP_TEST(TestSizeOfObjects) {
// The heap size should go back to initial size after a full GC, even
// though sweeping didn't finish yet.
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// Normally sweeping would not be complete here, but no guarantees.
CHECK_EQ(initial_size, static_cast<int>(heap->SizeOfObjects()));
// Waiting for sweeper threads should not change heap size.
@@ -2218,6 +2222,17 @@ static Address AlignOldSpace(AllocationAlignment alignment, int offset) {
// Test the case where allocation must be done from the free list, so filler
// may precede or follow the object.
TEST(TestAlignedOverAllocation) {
+ Heap* heap = CcTest::heap();
+ // Test checks for fillers before and behind objects and requires a fresh
+ // page and empty free list.
+ heap::AbandonCurrentlyFreeMemory(heap->old_space());
+ // Allocate a dummy object to properly set up the linear allocation info.
+ AllocationResult dummy =
+ heap->old_space()->AllocateRawUnaligned(kPointerSize);
+ CHECK(!dummy.IsRetry());
+ heap->CreateFillerObjectAt(dummy.ToObjectChecked()->address(), kPointerSize,
+ ClearRecordedSlots::kNo);
+
// Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones.
const intptr_t double_misalignment = kDoubleSize - kPointerSize;
Address start;
@@ -2340,24 +2355,11 @@ TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
}
}
-
-static void FillUpNewSpace(NewSpace* new_space) {
- // Fill up new space to the point that it is completely full. Make sure
- // that the scavenger does not undo the filling.
- Heap* heap = new_space->heap();
- Isolate* isolate = heap->isolate();
- Factory* factory = isolate->factory();
- HandleScope scope(isolate);
- AlwaysAllocateScope always_allocate(isolate);
- intptr_t available = new_space->Capacity() - new_space->Size();
- intptr_t number_of_fillers = (available / FixedArray::SizeFor(32)) - 1;
- for (intptr_t i = 0; i < number_of_fillers; i++) {
- CHECK(heap->InNewSpace(*factory->NewFixedArray(32, NOT_TENURED)));
- }
-}
-
-
TEST(GrowAndShrinkNewSpace) {
+ // Avoid shrinking new space in GC epilogue. This can happen if allocation
+ // throughput samples have been taken while executing the benchmark.
+ FLAG_predictable = true;
+
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
NewSpace* new_space = heap->new_space();
@@ -2371,28 +2373,31 @@ TEST(GrowAndShrinkNewSpace) {
old_capacity = new_space->TotalCapacity();
new_space->Grow();
new_capacity = new_space->TotalCapacity();
- CHECK(2 * old_capacity == new_capacity);
+ CHECK_EQ(2 * old_capacity, new_capacity);
old_capacity = new_space->TotalCapacity();
- FillUpNewSpace(new_space);
+ {
+ v8::HandleScope temporary_scope(CcTest::isolate());
+ heap::SimulateFullSpace(new_space);
+ }
new_capacity = new_space->TotalCapacity();
- CHECK(old_capacity == new_capacity);
+ CHECK_EQ(old_capacity, new_capacity);
// Explicitly shrinking should not affect space capacity.
old_capacity = new_space->TotalCapacity();
new_space->Shrink();
new_capacity = new_space->TotalCapacity();
- CHECK(old_capacity == new_capacity);
+ CHECK_EQ(old_capacity, new_capacity);
// Let the scavenger empty the new space.
- heap->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
CHECK_LE(new_space->Size(), old_capacity);
// Explicitly shrinking should halve the space capacity.
old_capacity = new_space->TotalCapacity();
new_space->Shrink();
new_capacity = new_space->TotalCapacity();
- CHECK(old_capacity == 2 * new_capacity);
+ CHECK_EQ(old_capacity, 2 * new_capacity);
// Consecutive shrinking should not affect space capacity.
old_capacity = new_space->TotalCapacity();
@@ -2400,10 +2405,9 @@ TEST(GrowAndShrinkNewSpace) {
new_space->Shrink();
new_space->Shrink();
new_capacity = new_space->TotalCapacity();
- CHECK(old_capacity == new_capacity);
+ CHECK_EQ(old_capacity, new_capacity);
}
-
TEST(CollectingAllAvailableGarbageShrinksNewSpace) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
@@ -2417,14 +2421,16 @@ TEST(CollectingAllAvailableGarbageShrinksNewSpace) {
old_capacity = new_space->TotalCapacity();
new_space->Grow();
new_capacity = new_space->TotalCapacity();
- CHECK(2 * old_capacity == new_capacity);
- FillUpNewSpace(new_space);
- heap->CollectAllAvailableGarbage();
+ CHECK_EQ(2 * old_capacity, new_capacity);
+ {
+ v8::HandleScope temporary_scope(CcTest::isolate());
+ heap::SimulateFullSpace(new_space);
+ }
+ CcTest::CollectAllAvailableGarbage();
new_capacity = new_space->TotalCapacity();
- CHECK(old_capacity == new_capacity);
+ CHECK_EQ(old_capacity, new_capacity);
}
-
static int NumberOfGlobalObjects() {
int count = 0;
HeapIterator iterator(CcTest::heap());
@@ -2450,7 +2456,7 @@ TEST(LeakNativeContextViaMap) {
v8::Local<v8::Context>::New(isolate, ctx1p)->Enter();
}
- CcTest::heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
CHECK_EQ(2, NumberOfGlobalObjects());
{
@@ -2476,10 +2482,10 @@ TEST(LeakNativeContextViaMap) {
ctx1p.Reset();
isolate->ContextDisposedNotification();
}
- CcTest::heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
CHECK_EQ(1, NumberOfGlobalObjects());
ctx2p.Reset();
- CcTest::heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
CHECK_EQ(0, NumberOfGlobalObjects());
}
@@ -2499,7 +2505,7 @@ TEST(LeakNativeContextViaFunction) {
v8::Local<v8::Context>::New(isolate, ctx1p)->Enter();
}
- CcTest::heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
CHECK_EQ(2, NumberOfGlobalObjects());
{
@@ -2525,10 +2531,10 @@ TEST(LeakNativeContextViaFunction) {
ctx1p.Reset();
isolate->ContextDisposedNotification();
}
- CcTest::heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
CHECK_EQ(1, NumberOfGlobalObjects());
ctx2p.Reset();
- CcTest::heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
CHECK_EQ(0, NumberOfGlobalObjects());
}
@@ -2546,7 +2552,7 @@ TEST(LeakNativeContextViaMapKeyed) {
v8::Local<v8::Context>::New(isolate, ctx1p)->Enter();
}
- CcTest::heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
CHECK_EQ(2, NumberOfGlobalObjects());
{
@@ -2572,10 +2578,10 @@ TEST(LeakNativeContextViaMapKeyed) {
ctx1p.Reset();
isolate->ContextDisposedNotification();
}
- CcTest::heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
CHECK_EQ(1, NumberOfGlobalObjects());
ctx2p.Reset();
- CcTest::heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
CHECK_EQ(0, NumberOfGlobalObjects());
}
@@ -2593,7 +2599,7 @@ TEST(LeakNativeContextViaMapProto) {
v8::Local<v8::Context>::New(isolate, ctx1p)->Enter();
}
- CcTest::heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
CHECK_EQ(2, NumberOfGlobalObjects());
{
@@ -2623,10 +2629,10 @@ TEST(LeakNativeContextViaMapProto) {
ctx1p.Reset();
isolate->ContextDisposedNotification();
}
- CcTest::heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
CHECK_EQ(1, NumberOfGlobalObjects());
ctx2p.Reset();
- CcTest::heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
CHECK_EQ(0, NumberOfGlobalObjects());
}
@@ -2657,7 +2663,8 @@ TEST(InstanceOfStubWriteBarrier) {
IncrementalMarking* marking = CcTest::heap()->incremental_marking();
marking->Stop();
- CcTest::heap()->StartIncrementalMarking();
+ CcTest::heap()->StartIncrementalMarking(i::Heap::kNoGCFlags,
+ i::GarbageCollectionReason::kTesting);
i::Handle<JSFunction> f = i::Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
@@ -2669,7 +2676,8 @@ TEST(InstanceOfStubWriteBarrier) {
!marking->IsStopped()) {
// Discard any pending GC requests otherwise we will get GC when we enter
// code below.
- marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+ marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
}
CHECK(marking->IsMarking());
@@ -2683,7 +2691,7 @@ TEST(InstanceOfStubWriteBarrier) {
}
CcTest::heap()->incremental_marking()->set_should_hurry(true);
- CcTest::heap()->CollectGarbage(OLD_SPACE);
+ CcTest::CollectGarbage(OLD_SPACE);
}
namespace {
@@ -2716,6 +2724,8 @@ TEST(ResetSharedFunctionInfoCountersDuringIncrementalMarking) {
" return s;"
"}"
"f(); f();"
+ "%BaselineFunctionOnNextCall(f);"
+ "f(); f();"
"%OptimizeFunctionOnNextCall(f);"
"f();");
}
@@ -2727,11 +2737,12 @@ TEST(ResetSharedFunctionInfoCountersDuringIncrementalMarking) {
// Make sure incremental marking it not running.
CcTest::heap()->incremental_marking()->Stop();
- CcTest::heap()->StartIncrementalMarking();
+ CcTest::heap()->StartIncrementalMarking(i::Heap::kNoGCFlags,
+ i::GarbageCollectionReason::kTesting);
// The following calls will increment CcTest::heap()->global_ic_age().
CcTest::isolate()->ContextDisposedNotification();
heap::SimulateIncrementalMarking(CcTest::heap());
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(CcTest::heap()->global_ic_age(), f->shared()->ic_age());
CHECK_EQ(0, f->shared()->opt_count());
@@ -2760,6 +2771,8 @@ TEST(ResetSharedFunctionInfoCountersDuringMarkSweep) {
" return s;"
"}"
"f(); f();"
+ "%BaselineFunctionOnNextCall(f);"
+ "f(); f();"
"%OptimizeFunctionOnNextCall(f);"
"f();");
}
@@ -2773,7 +2786,7 @@ TEST(ResetSharedFunctionInfoCountersDuringMarkSweep) {
// The following two calls will increment CcTest::heap()->global_ic_age().
CcTest::isolate()->ContextDisposedNotification();
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(CcTest::heap()->global_ic_age(), f->shared()->ic_age());
CHECK_EQ(0, f->shared()->opt_count());
@@ -2790,7 +2803,7 @@ HEAP_TEST(GCFlags) {
// Set the flags to check whether we appropriately resets them after the GC.
heap->set_current_gc_flags(Heap::kAbortIncrementalMarkingMask);
- heap->CollectAllGarbage(Heap::kReduceMemoryFootprintMask);
+ CcTest::CollectAllGarbage(Heap::kReduceMemoryFootprintMask);
CHECK_EQ(Heap::kNoGCFlags, heap->current_gc_flags_);
MarkCompactCollector* collector = heap->mark_compact_collector();
@@ -2800,14 +2813,15 @@ HEAP_TEST(GCFlags) {
IncrementalMarking* marking = heap->incremental_marking();
marking->Stop();
- heap->StartIncrementalMarking(Heap::kReduceMemoryFootprintMask);
+ heap->StartIncrementalMarking(Heap::kReduceMemoryFootprintMask,
+ i::GarbageCollectionReason::kTesting);
CHECK_NE(0, heap->current_gc_flags_ & Heap::kReduceMemoryFootprintMask);
- heap->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
// NewSpace scavenges should not overwrite the flags.
CHECK_NE(0, heap->current_gc_flags_ & Heap::kReduceMemoryFootprintMask);
- heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
CHECK_EQ(Heap::kNoGCFlags, heap->current_gc_flags_);
}
@@ -2819,7 +2833,8 @@ TEST(IdleNotificationFinishMarking) {
heap::SimulateFullSpace(CcTest::heap()->old_space());
IncrementalMarking* marking = CcTest::heap()->incremental_marking();
marking->Stop();
- CcTest::heap()->StartIncrementalMarking();
+ CcTest::heap()->StartIncrementalMarking(i::Heap::kNoGCFlags,
+ i::GarbageCollectionReason::kTesting);
CHECK_EQ(CcTest::heap()->gc_count(), initial_gc_count);
@@ -2828,14 +2843,12 @@ TEST(IdleNotificationFinishMarking) {
// marking delay counter.
// Perform a huge incremental marking step but don't complete marking.
- intptr_t bytes_processed = 0;
do {
- bytes_processed =
- marking->Step(1 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- IncrementalMarking::FORCE_MARKING,
- IncrementalMarking::DO_NOT_FORCE_COMPLETION);
+ marking->Step(1 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ IncrementalMarking::DO_NOT_FORCE_COMPLETION, StepOrigin::kV8);
CHECK(!marking->IsIdleMarkingDelayCounterLimitReached());
- } while (bytes_processed);
+ } while (
+ !CcTest::heap()->mark_compact_collector()->marking_deque()->IsEmpty());
// The next invocations of incremental marking are not going to complete
// marking
@@ -2843,8 +2856,7 @@ TEST(IdleNotificationFinishMarking) {
for (size_t i = 0; i < IncrementalMarking::kMaxIdleMarkingDelayCounter - 2;
i++) {
marking->Step(1 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- IncrementalMarking::FORCE_MARKING,
- IncrementalMarking::DO_NOT_FORCE_COMPLETION);
+ IncrementalMarking::DO_NOT_FORCE_COMPLETION, StepOrigin::kV8);
CHECK(!marking->IsIdleMarkingDelayCounterLimitReached());
}
@@ -3344,7 +3356,7 @@ TEST(Regress1465) {
CHECK_EQ(transitions_count, transitions_before);
heap::SimulateIncrementalMarking(CcTest::heap());
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// Count number of live transitions after marking. Note that one transition
// is left, because 'o' still holds an instance of one transition target.
@@ -3408,7 +3420,7 @@ TEST(TransitionArrayShrinksDuringAllocToZero) {
"root = new F");
root = GetByName("root");
AddPropertyTo(2, root, "funny");
- CcTest::heap()->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
// Count number of live transitions after marking. Note that one transition
// is left, because 'o' still holds an instance of one transition target.
@@ -3435,7 +3447,7 @@ TEST(TransitionArrayShrinksDuringAllocToOne) {
root = GetByName("root");
AddPropertyTo(2, root, "funny");
- CcTest::heap()->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
// Count number of live transitions after marking. Note that one transition
// is left, because 'o' still holds an instance of one transition target.
@@ -3462,7 +3474,7 @@ TEST(TransitionArrayShrinksDuringAllocToOnePropertyFound) {
root = GetByName("root");
AddPropertyTo(0, root, "prop9");
- CcTest::i_isolate()->heap()->CollectGarbage(OLD_SPACE);
+ CcTest::CollectGarbage(OLD_SPACE);
// Count number of live transitions after marking. Note that one transition
// is left, because 'o' still holds an instance of one transition target.
@@ -3528,7 +3540,7 @@ TEST(Regress2143a) {
CcTest::heap()->AgeInlineCaches();
// Explicitly request GC to perform final marking step and sweeping.
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
Handle<JSReceiver> root = v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(
CcTest::global()
@@ -3571,7 +3583,7 @@ TEST(Regress2143b) {
CcTest::heap()->AgeInlineCaches();
// Explicitly request GC to perform final marking step and sweeping.
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
Handle<JSReceiver> root = v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(
CcTest::global()
@@ -3601,6 +3613,9 @@ TEST(ReleaseOverReservedPages) {
i::FLAG_page_promotion = false;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
+ // If there's snapshot available, we don't know whether 20 small arrays will
+ // fit on the initial pages.
+ if (!isolate->snapshot_available()) return;
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
v8::HandleScope scope(CcTest::isolate());
@@ -3619,17 +3634,14 @@ TEST(ReleaseOverReservedPages) {
// Triggering one GC will cause a lot of garbage to be discovered but
// even spread across all allocated pages.
- heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
- "triggered for preparation");
+ CcTest::CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask);
CHECK_GE(overall_page_count, old_space->CountTotalPages());
// Triggering subsequent GCs should cause at least half of the pages
// to be released to the OS after at most two cycles.
- heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
- "triggered by test 1");
+ CcTest::CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask);
CHECK_GE(overall_page_count, old_space->CountTotalPages());
- heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
- "triggered by test 2");
+ CcTest::CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask);
CHECK_GE(overall_page_count, old_space->CountTotalPages() * 2);
// Triggering a last-resort GC should cause all pages to be released to the
@@ -3639,7 +3651,7 @@ TEST(ReleaseOverReservedPages) {
// first page should be small in order to reduce memory used when the VM
// boots, but if the 20 small arrays don't fit on the first page then that's
// an indication that it is too small.
- heap->CollectAllAvailableGarbage("triggered really hard");
+ CcTest::CollectAllAvailableGarbage();
CHECK_EQ(initial_page_count, old_space->CountTotalPages());
}
@@ -3724,7 +3736,7 @@ TEST(IncrementalMarkingPreservesMonomorphicCallIC) {
CHECK(feedback_vector->Get(feedback_helper.slot(slot2))->IsWeakCell());
heap::SimulateIncrementalMarking(CcTest::heap());
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK(!WeakCell::cast(feedback_vector->Get(feedback_helper.slot(slot1)))
->cleared());
@@ -3781,7 +3793,7 @@ TEST(IncrementalMarkingPreservesMonomorphicConstructor) {
CHECK(vector->Get(FeedbackVectorSlot(0))->IsWeakCell());
heap::SimulateIncrementalMarking(CcTest::heap());
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK(vector->Get(FeedbackVectorSlot(0))->IsWeakCell());
}
@@ -3802,7 +3814,7 @@ TEST(IncrementalMarkingPreservesMonomorphicIC) {
CheckVectorIC(f, 0, MONOMORPHIC);
heap::SimulateIncrementalMarking(CcTest::heap());
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CheckVectorIC(f, 0, MONOMORPHIC);
}
@@ -3839,7 +3851,7 @@ TEST(IncrementalMarkingPreservesPolymorphicIC) {
// Fire context dispose notification.
heap::SimulateIncrementalMarking(CcTest::heap());
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CheckVectorIC(f, 0, POLYMORPHIC);
}
@@ -3877,7 +3889,7 @@ TEST(ContextDisposeDoesntClearPolymorphicIC) {
// Fire context dispose notification.
CcTest::isolate()->ContextDisposedNotification();
heap::SimulateIncrementalMarking(CcTest::heap());
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CheckVectorIC(f, 0, POLYMORPHIC);
}
@@ -3919,7 +3931,8 @@ void ReleaseStackTraceDataTest(v8::Isolate* isolate, const char* source,
v8::Local<v8::Context> ctx = isolate->GetCurrentContext();
v8::Local<v8::String> source_string =
v8::String::NewExternalOneByte(isolate, resource).ToLocalChecked();
- i_isolate->heap()->CollectAllAvailableGarbage();
+ i_isolate->heap()->CollectAllAvailableGarbage(
+ i::GarbageCollectionReason::kTesting);
v8::Script::Compile(ctx, source_string)
.ToLocalChecked()
->Run(ctx)
@@ -3930,7 +3943,8 @@ void ReleaseStackTraceDataTest(v8::Isolate* isolate, const char* source,
CHECK(!resource->IsDisposed());
CompileRun(accessor);
- i_isolate->heap()->CollectAllAvailableGarbage();
+ i_isolate->heap()->CollectAllAvailableGarbage(
+ i::GarbageCollectionReason::kTesting);
// External source has been released.
CHECK(resource->IsDisposed());
@@ -4006,7 +4020,7 @@ TEST(Regress159140) {
HandleScope scope(isolate);
// Perform one initial GC to enable code flushing.
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// Prepare several closures that are all eligible for code flushing
// because all reachable ones are not optimized. Make sure that the
@@ -4050,7 +4064,7 @@ TEST(Regress159140) {
// finish the GC to complete code flushing.
heap::SimulateIncrementalMarking(heap);
CompileRun("%OptimizeFunctionOnNextCall(g); g(3);");
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// Unoptimized code is missing and the deoptimizer will go ballistic.
CompileRun("g('bozo');");
@@ -4065,7 +4079,7 @@ TEST(Regress165495) {
HandleScope scope(isolate);
// Perform one initial GC to enable code flushing.
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// Prepare an optimized closure that the optimized code map will get
// populated. Then age the unoptimized code to trigger code flushing
@@ -4095,7 +4109,7 @@ TEST(Regress165495) {
// Simulate incremental marking so that unoptimized code is flushed
// even though it still is cached in the optimized code map.
heap::SimulateIncrementalMarking(heap);
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// Make a new closure that will get code installed from the code map.
// Unoptimized code is missing and the deoptimizer will go ballistic.
@@ -4113,7 +4127,7 @@ TEST(Regress169209) {
HandleScope scope(isolate);
// Perform one initial GC to enable code flushing.
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// Prepare a shared function info eligible for code flushing for which
// the unoptimized code will be replaced during optimization.
@@ -4170,7 +4184,7 @@ TEST(Regress169209) {
"g(false);");
// Finish garbage collection cycle.
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK(shared1->code()->gc_metadata() == NULL);
}
@@ -4211,7 +4225,7 @@ TEST(Regress169928) {
.FromJust());
// First make sure we flip spaces
- CcTest::heap()->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
// Allocate the object.
Handle<FixedArray> array_data = factory->NewFixedArray(2, NOT_TENURED);
@@ -4321,7 +4335,7 @@ TEST(Regress514122) {
HandleScope scope(isolate);
// Perfrom one initial GC to enable code flushing.
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// Prepare function whose optimized code map we can use.
Handle<SharedFunctionInfo> shared;
@@ -4399,7 +4413,7 @@ TEST(Regress514122) {
}
// Trigger a GC to flush out the bug.
- heap->CollectGarbage(i::OLD_SPACE, "fire in the hole");
+ CcTest::CollectGarbage(i::OLD_SPACE);
boomer->Print();
}
@@ -4413,7 +4427,6 @@ TEST(OptimizedCodeMapReuseEntries) {
CcTest::InitializeVM();
v8::Isolate* v8_isolate = CcTest::isolate();
Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
HandleScope scope(isolate);
// Create 3 contexts, allow the 2nd one to be disposed, and verify that
@@ -4429,7 +4442,7 @@ TEST(OptimizedCodeMapReuseEntries) {
.ToLocalChecked();
const char* toplevel = "foo(3); %OptimizeFunctionOnNextCall(foo); foo(3);";
// Perfrom one initial GC to enable code flushing.
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
c1->Enter();
indep->BindToCurrentContext()->Run(c1).ToLocalChecked();
@@ -4463,7 +4476,7 @@ TEST(OptimizedCodeMapReuseEntries) {
// Now, collect garbage. Context c2 should have no roots to it, and it's
// entry in the optimized code map should be free for a new context.
for (int i = 0; i < 4; i++) {
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
Handle<FixedArray> optimized_code_map =
@@ -4518,11 +4531,10 @@ TEST(Regress513496) {
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
HandleScope scope(isolate);
// Perfrom one initial GC to enable code flushing.
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// Prepare an optimized closure with containing an inlined function. Then age
// the inlined unoptimized code to trigger code flushing but make sure the
@@ -4566,7 +4578,7 @@ TEST(Regress513496) {
// Finish a full GC cycle so that the unoptimized code of 'g' is flushed even
// though the optimized code for 'f' is reachable via the optimized code map.
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// Make a new closure that will get code installed from the code map.
// Unoptimized code is missing and the deoptimizer will go ballistic.
@@ -4589,15 +4601,16 @@ TEST(LargeObjectSlotRecording) {
FixedArray* old_location = *lit;
// Allocate a large object.
- int size = Max(1000000, Page::kMaxRegularHeapObjectSize + KB);
- CHECK(size > Page::kMaxRegularHeapObjectSize);
+ int size = Max(1000000, kMaxRegularHeapObjectSize + KB);
+ CHECK(size > kMaxRegularHeapObjectSize);
Handle<FixedArray> lo = isolate->factory()->NewFixedArray(size, TENURED);
CHECK(heap->lo_space()->Contains(*lo));
// Start incremental marking to active write barrier.
heap::SimulateIncrementalMarking(heap, false);
heap->incremental_marking()->AdvanceIncrementalMarking(
- 10000000, IncrementalMarking::IdleStepActions());
+ 10000000, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
// Create references from the large object to the object on the evacuation
// candidate.
@@ -4608,7 +4621,7 @@ TEST(LargeObjectSlotRecording) {
}
// Move the evaucation candidate object.
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// Verify that the pointers in the large object got updated.
for (int i = 0; i < size; i += kStep) {
@@ -4655,10 +4668,12 @@ TEST(IncrementalMarkingStepMakesBigProgressWithLargeObjects) {
"f(10 * 1024 * 1024);");
IncrementalMarking* marking = CcTest::heap()->incremental_marking();
if (marking->IsStopped()) {
- CcTest::heap()->StartIncrementalMarking();
+ CcTest::heap()->StartIncrementalMarking(
+ i::Heap::kNoGCFlags, i::GarbageCollectionReason::kTesting);
}
// This big step should be sufficient to mark the whole array.
- marking->Step(100 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+ marking->Step(100 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
CHECK(marking->IsComplete() ||
marking->IsReadyToOverApproximateWeakClosure());
}
@@ -4765,7 +4780,7 @@ TEST(EnsureAllocationSiteDependentCodesProcessed) {
// Now make sure that a gc should get rid of the function, even though we
// still have the allocation site alive.
for (int i = 0; i < 4; i++) {
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
// The site still exists because of our global handle, but the code is no
@@ -4813,7 +4828,7 @@ TEST(CellsInOptimizedCodeAreWeak) {
// Now make sure that a gc should get rid of the function
for (int i = 0; i < 4; i++) {
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
CHECK(code->marked_for_deoptimization());
@@ -4856,7 +4871,7 @@ TEST(ObjectsInOptimizedCodeAreWeak) {
// Now make sure that a gc should get rid of the function
for (int i = 0; i < 4; i++) {
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
CHECK(code->marked_for_deoptimization());
@@ -4906,8 +4921,8 @@ TEST(NewSpaceObjectsInOptimizedCode) {
.ToLocalChecked())));
CHECK(heap->InNewSpace(*foo));
- heap->CollectGarbage(NEW_SPACE);
- heap->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
CHECK(!heap->InNewSpace(*foo));
#ifdef VERIFY_HEAP
heap->Verify();
@@ -4918,7 +4933,7 @@ TEST(NewSpaceObjectsInOptimizedCode) {
// Now make sure that a gc should get rid of the function
for (int i = 0; i < 4; i++) {
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
CHECK(code->marked_for_deoptimization());
@@ -4941,7 +4956,7 @@ TEST(NoWeakHashTableLeakWithIncrementalMarking) {
// Get a clean slate regarding optimized functions on the heap.
i::Deoptimizer::DeoptimizeAll(isolate);
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
if (!isolate->use_crankshaft()) return;
HandleScope outer_scope(heap->isolate());
@@ -4965,7 +4980,7 @@ TEST(NoWeakHashTableLeakWithIncrementalMarking) {
CompileRun(source.start());
}
// We have to abort incremental marking here to abandon black pages.
- heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
}
int elements = 0;
if (heap->weak_object_to_code_table()->IsHashTable()) {
@@ -5014,7 +5029,7 @@ TEST(NextCodeLinkIsWeak) {
if (!isolate->use_crankshaft()) return;
HandleScope outer_scope(heap->isolate());
Handle<Code> code;
- heap->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
int code_chain_length_before, code_chain_length_after;
{
HandleScope scope(heap->isolate());
@@ -5028,7 +5043,7 @@ TEST(NextCodeLinkIsWeak) {
code = scope.CloseAndEscape(Handle<Code>(immortal->code()));
CompileRun("mortal = null; immortal = null;");
}
- heap->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
// Now mortal code should be dead.
code_chain_length_after = GetCodeChainLength(*code);
CHECK_EQ(code_chain_length_before - 1, code_chain_length_after);
@@ -5059,7 +5074,7 @@ TEST(NextCodeLinkIsWeak2) {
if (!isolate->use_crankshaft()) return;
HandleScope outer_scope(heap->isolate());
- heap->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
Handle<Context> context(Context::cast(heap->native_contexts_list()), isolate);
Handle<Code> new_head;
Handle<Object> old_head(context->get(Context::OPTIMIZED_CODE_LIST), isolate);
@@ -5072,7 +5087,7 @@ TEST(NextCodeLinkIsWeak2) {
context->set(Context::OPTIMIZED_CODE_LIST, *immortal);
new_head = scope.CloseAndEscape(immortal);
}
- heap->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
// Now mortal code should be dead.
CHECK_EQ(*old_head, new_head->next_code_link());
}
@@ -5122,8 +5137,7 @@ TEST(WeakFunctionInConstructor) {
}
weak_ic_cleared = false;
garbage.SetWeak(&garbage, &ClearWeakIC, v8::WeakCallbackType::kParameter);
- Heap* heap = CcTest::i_isolate()->heap();
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK(weak_ic_cleared);
// We've determined the constructor in createObj has had it's weak cell
@@ -5135,7 +5149,7 @@ TEST(WeakFunctionInConstructor) {
Object* slot_value = feedback_vector->Get(FeedbackVectorSlot(0));
CHECK(slot_value->IsWeakCell());
if (WeakCell::cast(slot_value)->cleared()) break;
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
Object* slot_value = feedback_vector->Get(FeedbackVectorSlot(0));
@@ -5165,8 +5179,7 @@ void CheckWeakness(const char* source) {
}
weak_ic_cleared = false;
garbage.SetWeak(&garbage, &ClearWeakIC, v8::WeakCallbackType::kParameter);
- Heap* heap = CcTest::i_isolate()->heap();
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK(weak_ic_cleared);
}
@@ -5360,7 +5373,6 @@ TEST(MonomorphicStaysMonomorphicAfterGC) {
if (FLAG_always_opt) return;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
v8::HandleScope scope(CcTest::isolate());
CompileRun(
"function loadIC(obj) {"
@@ -5379,7 +5391,7 @@ TEST(MonomorphicStaysMonomorphicAfterGC) {
v8::HandleScope scope(CcTest::isolate());
CompileRun("(testIC())");
}
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CheckIC(loadIC, Code::LOAD_IC, 0, MONOMORPHIC);
{
v8::HandleScope scope(CcTest::isolate());
@@ -5393,7 +5405,6 @@ TEST(PolymorphicStaysPolymorphicAfterGC) {
if (FLAG_always_opt) return;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
v8::HandleScope scope(CcTest::isolate());
CompileRun(
"function loadIC(obj) {"
@@ -5415,7 +5426,7 @@ TEST(PolymorphicStaysPolymorphicAfterGC) {
v8::HandleScope scope(CcTest::isolate());
CompileRun("(testIC())");
}
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CheckIC(loadIC, Code::LOAD_IC, 0, POLYMORPHIC);
{
v8::HandleScope scope(CcTest::isolate());
@@ -5428,7 +5439,6 @@ TEST(PolymorphicStaysPolymorphicAfterGC) {
TEST(WeakCell) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
- v8::internal::Heap* heap = CcTest::heap();
v8::internal::Factory* factory = isolate->factory();
HandleScope outer_scope(isolate);
@@ -5447,13 +5457,13 @@ TEST(WeakCell) {
}
CHECK(weak_cell1->value()->IsFixedArray());
CHECK_EQ(*survivor, weak_cell2->value());
- heap->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
CHECK(weak_cell1->value()->IsFixedArray());
CHECK_EQ(*survivor, weak_cell2->value());
- heap->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
CHECK(weak_cell1->value()->IsFixedArray());
CHECK_EQ(*survivor, weak_cell2->value());
- heap->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
CHECK(weak_cell1->cleared());
CHECK_EQ(*survivor, weak_cell2->value());
}
@@ -5478,17 +5488,19 @@ TEST(WeakCellsWithIncrementalMarking) {
CHECK(weak_cell->value()->IsFixedArray());
IncrementalMarking* marking = heap->incremental_marking();
if (marking->IsStopped()) {
- heap->StartIncrementalMarking();
+ heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
+ i::GarbageCollectionReason::kTesting);
}
- marking->Step(128, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
- heap->CollectGarbage(NEW_SPACE);
+ marking->Step(128, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
+ CcTest::CollectGarbage(NEW_SPACE);
CHECK(weak_cell->value()->IsFixedArray());
weak_cells[i] = inner_scope.CloseAndEscape(weak_cell);
}
// Call collect all twice to make sure that we also cleared
// weak cells that were allocated on black pages.
- heap->CollectAllGarbage();
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(*survivor, weak_cells[0]->value());
for (int i = 1; i < N; i++) {
CHECK(weak_cells[i]->cleared());
@@ -5535,7 +5547,7 @@ TEST(AddInstructionChangesNewSpacePromotion) {
heap->DisableInlineAllocation();
heap->set_allocation_timeout(1);
g->Call(env.local(), global, 1, args1).ToLocalChecked();
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
@@ -5570,13 +5582,12 @@ static void RequestInterrupt(const v8::FunctionCallbackInfo<v8::Value>& args) {
CcTest::isolate()->RequestInterrupt(&InterruptCallback357137, NULL);
}
-
-UNINITIALIZED_TEST(Regress538257) {
+HEAP_TEST(Regress538257) {
i::FLAG_manual_evacuation_candidates_selection = true;
v8::Isolate::CreateParams create_params;
// Set heap limits.
- create_params.constraints.set_max_semi_space_size(1 * Page::kPageSize / MB);
- create_params.constraints.set_max_old_space_size(6 * Page::kPageSize / MB);
+ create_params.constraints.set_max_semi_space_size(1);
+ create_params.constraints.set_max_old_space_size(6);
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
isolate->Enter();
@@ -5596,7 +5607,8 @@ UNINITIALIZED_TEST(Regress538257) {
->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
}
heap::SimulateFullSpace(old_space);
- heap->CollectGarbage(OLD_SPACE);
+ heap->CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask,
+ i::GarbageCollectionReason::kTesting);
// If we get this far, we've successfully aborted compaction. Any further
// allocations might trigger OOM.
}
@@ -5692,8 +5704,8 @@ UNINITIALIZED_TEST(PromotionQueue) {
CHECK(i::FLAG_min_semi_space_size * MB == new_space->TotalCapacity());
// Call the scavenger two times to get an empty new space
- heap->CollectGarbage(NEW_SPACE);
- heap->CollectGarbage(NEW_SPACE);
+ heap->CollectGarbage(NEW_SPACE, i::GarbageCollectionReason::kTesting);
+ heap->CollectGarbage(NEW_SPACE, i::GarbageCollectionReason::kTesting);
// First create a few objects which will survive a scavenge, and will get
// promoted to the old generation later on. These objects will create
@@ -5704,7 +5716,7 @@ UNINITIALIZED_TEST(PromotionQueue) {
handles[i] = i_isolate->factory()->NewFixedArray(1, NOT_TENURED);
}
- heap->CollectGarbage(NEW_SPACE);
+ heap->CollectGarbage(NEW_SPACE, i::GarbageCollectionReason::kTesting);
CHECK(i::FLAG_min_semi_space_size * MB == new_space->TotalCapacity());
// Fill-up the first semi-space page.
@@ -5721,7 +5733,7 @@ UNINITIALIZED_TEST(PromotionQueue) {
// This scavenge will corrupt memory if the promotion queue is not
// evacuated.
- heap->CollectGarbage(NEW_SPACE);
+ heap->CollectGarbage(NEW_SPACE, i::GarbageCollectionReason::kTesting);
}
isolate->Dispose();
}
@@ -5764,7 +5776,8 @@ TEST(Regress388880) {
// that would cause crash.
IncrementalMarking* marking = CcTest::heap()->incremental_marking();
marking->Stop();
- CcTest::heap()->StartIncrementalMarking();
+ CcTest::heap()->StartIncrementalMarking(i::Heap::kNoGCFlags,
+ i::GarbageCollectionReason::kTesting);
CHECK(marking->IsMarking());
// Now everything is set up for crashing in JSObject::MigrateFastToFast()
@@ -5790,7 +5803,8 @@ TEST(Regress3631) {
"}"
"weak_map");
if (marking->IsStopped()) {
- CcTest::heap()->StartIncrementalMarking();
+ CcTest::heap()->StartIncrementalMarking(
+ i::Heap::kNoGCFlags, i::GarbageCollectionReason::kTesting);
}
// Incrementally mark the backing store.
Handle<JSReceiver> obj =
@@ -5799,7 +5813,8 @@ TEST(Regress3631) {
while (!Marking::IsBlack(
ObjectMarking::MarkBitFrom(HeapObject::cast(weak_map->table()))) &&
!marking->IsStopped()) {
- marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+ marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
}
// Stash the backing store in a handle.
Handle<Object> save(weak_map->table(), isolate);
@@ -5809,14 +5824,13 @@ TEST(Regress3631) {
" weak_map.set(future_keys[i], i);"
"}");
heap->incremental_marking()->set_should_hurry(true);
- heap->CollectGarbage(OLD_SPACE);
+ CcTest::CollectGarbage(OLD_SPACE);
}
TEST(Regress442710) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
HandleScope sc(isolate);
@@ -5827,7 +5841,7 @@ TEST(Regress442710) {
Handle<String> name = factory->InternalizeUtf8String("testArray");
JSReceiver::SetProperty(global, name, array, SLOPPY).Check();
CompileRun("testArray[0] = 1; testArray[1] = 2; testArray.shift();");
- heap->CollectGarbage(OLD_SPACE);
+ CcTest::CollectGarbage(OLD_SPACE);
}
@@ -5845,7 +5859,6 @@ HEAP_TEST(NumberStringCacheSize) {
TEST(Regress3877) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
HandleScope scope(isolate);
CompileRun("function cls() { this.x = 10; }");
@@ -5863,14 +5876,14 @@ TEST(Regress3877) {
"a.x = new cls();"
"cls.prototype = null;");
for (int i = 0; i < 4; i++) {
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
// The map of a.x keeps prototype alive
CHECK(!weak_prototype->cleared());
// Change the map of a.x and make the previous map garbage collectable.
CompileRun("a.x.__proto__ = {};");
for (int i = 0; i < 4; i++) {
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
CHECK(weak_prototype->cleared());
}
@@ -5897,11 +5910,11 @@ void CheckMapRetainingFor(int n) {
CHECK(!weak_cell->cleared());
for (int i = 0; i < n; i++) {
heap::SimulateIncrementalMarking(heap);
- heap->CollectGarbage(OLD_SPACE);
+ CcTest::CollectGarbage(OLD_SPACE);
}
CHECK(!weak_cell->cleared());
heap::SimulateIncrementalMarking(heap);
- heap->CollectGarbage(OLD_SPACE);
+ CcTest::CollectGarbage(OLD_SPACE);
CHECK(weak_cell->cleared());
}
@@ -5926,14 +5939,14 @@ TEST(RegressArrayListGC) {
Heap* heap = isolate->heap();
AddRetainedMap(isolate, heap);
Handle<Map> map = Map::Create(isolate, 1);
- heap->CollectGarbage(OLD_SPACE);
+ CcTest::CollectGarbage(OLD_SPACE);
// Force GC in old space on next addition of retained map.
Map::WeakCellForMap(map);
heap::SimulateFullSpace(CcTest::heap()->new_space());
for (int i = 0; i < 10; i++) {
heap->AddRetainedMap(map);
}
- heap->CollectGarbage(OLD_SPACE);
+ CcTest::CollectGarbage(OLD_SPACE);
}
@@ -5984,7 +5997,7 @@ static void TestRightTrimFixedTypedArray(i::ExternalArrayType type,
Address next_obj_address = array->address() + array->size();
CHECK(HeapObject::FromAddress(next_obj_address)->IsFiller());
}
- heap->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
}
@@ -6041,7 +6054,7 @@ TEST(PreprocessStackTrace) {
Object::GetElement(isolate, stack_trace, 3).ToHandleChecked();
CHECK(code->IsCode());
- isolate->heap()->CollectAllAvailableGarbage("stack trace preprocessing");
+ CcTest::CollectAllAvailableGarbage();
Handle<Object> pos =
Object::GetElement(isolate, stack_trace, 3).ToHandleChecked();
@@ -6094,7 +6107,7 @@ TEST(BootstrappingExports) {
utils.SetWeak(&utils, UtilsHasBeenCollected,
v8::WeakCallbackType::kParameter);
- CcTest::heap()->CollectAllAvailableGarbage("fire weak callbacks");
+ CcTest::CollectAllAvailableGarbage();
CHECK(utils_has_been_collected);
}
@@ -6152,12 +6165,12 @@ TEST(NewSpaceAllocationCounter) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
size_t counter1 = heap->NewSpaceAllocationCounter();
- heap->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
const size_t kSize = 1024;
AllocateInSpace(isolate, kSize, NEW_SPACE);
size_t counter2 = heap->NewSpaceAllocationCounter();
CHECK_EQ(kSize, counter2 - counter1);
- heap->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
size_t counter3 = heap->NewSpaceAllocationCounter();
CHECK_EQ(0U, counter3 - counter2);
// Test counter overflow.
@@ -6179,23 +6192,24 @@ TEST(OldSpaceAllocationCounter) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
size_t counter1 = heap->OldGenerationAllocationCounter();
- heap->CollectGarbage(NEW_SPACE);
- heap->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
const size_t kSize = 1024;
AllocateInSpace(isolate, kSize, OLD_SPACE);
size_t counter2 = heap->OldGenerationAllocationCounter();
// TODO(ulan): replace all CHECK_LE with CHECK_EQ after v8:4148 is fixed.
CHECK_LE(kSize, counter2 - counter1);
- heap->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
size_t counter3 = heap->OldGenerationAllocationCounter();
CHECK_EQ(0u, counter3 - counter2);
AllocateInSpace(isolate, kSize, OLD_SPACE);
- heap->CollectGarbage(OLD_SPACE);
+ CcTest::CollectGarbage(OLD_SPACE);
size_t counter4 = heap->OldGenerationAllocationCounter();
CHECK_LE(kSize, counter4 - counter3);
// Test counter overflow.
size_t max_counter = -1;
- heap->set_old_generation_allocation_counter(max_counter - 10 * kSize);
+ heap->set_old_generation_allocation_counter_at_last_gc(max_counter -
+ 10 * kSize);
size_t start = heap->OldGenerationAllocationCounter();
for (int i = 0; i < 20; i++) {
AllocateInSpace(isolate, kSize, OLD_SPACE);
@@ -6265,7 +6279,7 @@ static void RemoveCodeAndGC(const v8::FunctionCallbackInfo<v8::Value>& args) {
fun->ReplaceCode(*isolate->builtins()->CompileLazy());
fun->shared()->ReplaceCode(*isolate->builtins()->CompileLazy());
fun->shared()->ClearBytecodeArray(); // Bytecode is code too.
- isolate->heap()->CollectAllAvailableGarbage("remove code and gc");
+ CcTest::CollectAllAvailableGarbage();
}
@@ -6352,7 +6366,7 @@ TEST(ScriptIterator) {
Heap* heap = CcTest::heap();
LocalContext context;
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
int script_count = 0;
{
@@ -6378,8 +6392,8 @@ TEST(SharedFunctionInfoIterator) {
Heap* heap = CcTest::heap();
LocalContext context;
- heap->CollectAllGarbage();
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
int sfi_count = 0;
{
@@ -6419,7 +6433,7 @@ TEST(Regress519319) {
child.Reset(isolate, v8::Object::New(isolate));
heap::SimulateFullSpace(heap->old_space());
- heap->CollectGarbage(OLD_SPACE);
+ CcTest::CollectGarbage(OLD_SPACE);
{
UniqueId id = MakeUniqueId(parent);
isolate->SetObjectGroupId(parent, id);
@@ -6428,8 +6442,10 @@ TEST(Regress519319) {
// The CollectGarbage call above starts sweeper threads.
// The crash will happen if the following two functions
// are called before sweeping finishes.
- heap->StartIncrementalMarking();
- heap->FinalizeIncrementalMarkingIfComplete("test");
+ heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
+ i::GarbageCollectionReason::kTesting);
+ heap->FinalizeIncrementalMarkingIfComplete(
+ i::GarbageCollectionReason::kTesting);
}
@@ -6467,8 +6483,8 @@ HEAP_TEST(Regress587004) {
Heap* heap = CcTest::heap();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- const int N = (Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) /
- kPointerSize;
+ const int N =
+ (kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) / kPointerSize;
Handle<FixedArray> array = factory->NewFixedArray(N, TENURED);
CHECK(heap->old_space()->Contains(*array));
Handle<Object> number = factory->NewHeapNumber(1.0);
@@ -6476,7 +6492,7 @@ HEAP_TEST(Regress587004) {
for (int i = 0; i < N; i++) {
array->set(i, *number);
}
- heap->CollectGarbage(OLD_SPACE);
+ CcTest::CollectGarbage(OLD_SPACE);
heap::SimulateFullSpace(heap->old_space());
heap->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(*array, N - 1);
heap->mark_compact_collector()->EnsureSweepingCompleted();
@@ -6492,7 +6508,7 @@ HEAP_TEST(Regress587004) {
}
// Re-enable old space expansion to avoid OOM crash.
heap->set_force_oom(false);
- heap->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
}
HEAP_TEST(Regress589413) {
@@ -6504,8 +6520,8 @@ HEAP_TEST(Regress589413) {
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
// Get the heap in clean state.
- heap->CollectGarbage(OLD_SPACE);
- heap->CollectGarbage(OLD_SPACE);
+ CcTest::CollectGarbage(OLD_SPACE);
+ CcTest::CollectGarbage(OLD_SPACE);
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
// Fill the new space with byte arrays with elements looking like pointers.
@@ -6519,7 +6535,7 @@ HEAP_TEST(Regress589413) {
handle(byte_array);
}
// Make sure the byte arrays will be promoted on the next GC.
- heap->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
// This number is close to large free list category threshold.
const int N = 0x3eee;
{
@@ -6567,7 +6583,7 @@ HEAP_TEST(Regress589413) {
}
// Force allocation from the free list.
heap->set_force_oom(true);
- heap->CollectGarbage(OLD_SPACE);
+ CcTest::CollectGarbage(OLD_SPACE);
}
TEST(Regress598319) {
@@ -6579,7 +6595,7 @@ TEST(Regress598319) {
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
- const int kNumberOfObjects = Page::kMaxRegularHeapObjectSize / kPointerSize;
+ const int kNumberOfObjects = kMaxRegularHeapObjectSize / kPointerSize;
struct Arr {
Arr(Isolate* isolate, int number_of_objects) {
@@ -6608,7 +6624,7 @@ TEST(Regress598319) {
CHECK_NOT_NULL(page);
// GC to cleanup state
- heap->CollectGarbage(OLD_SPACE);
+ CcTest::CollectGarbage(OLD_SPACE);
MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->sweeping_in_progress()) {
collector->EnsureSweepingCompleted();
@@ -6625,7 +6641,8 @@ TEST(Regress598319) {
IncrementalMarking* marking = heap->incremental_marking();
CHECK(marking->IsMarking() || marking->IsStopped());
if (marking->IsStopped()) {
- heap->StartIncrementalMarking();
+ heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
+ i::GarbageCollectionReason::kTesting);
}
CHECK(marking->IsMarking());
@@ -6638,7 +6655,8 @@ TEST(Regress598319) {
// Now we search for a state where we are in incremental marking and have
// only partially marked the large object.
while (!marking->IsComplete()) {
- marking->Step(i::KB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+ marking->Step(i::KB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
if (page->IsFlagSet(Page::HAS_PROGRESS_BAR) && page->progress_bar() > 0) {
CHECK_NE(page->progress_bar(), arr.get()->Size());
{
@@ -6655,7 +6673,8 @@ TEST(Regress598319) {
// Finish marking with bigger steps to speed up test.
while (!marking->IsComplete()) {
- marking->Step(10 * i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+ marking->Step(10 * i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
@@ -6689,7 +6708,7 @@ TEST(Regress615489) {
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
i::MarkCompactCollector* collector = heap->mark_compact_collector();
i::IncrementalMarking* marking = heap->incremental_marking();
@@ -6698,7 +6717,8 @@ TEST(Regress615489) {
}
CHECK(marking->IsMarking() || marking->IsStopped());
if (marking->IsStopped()) {
- heap->StartIncrementalMarking();
+ heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
+ i::GarbageCollectionReason::kTesting);
}
CHECK(marking->IsMarking());
marking->StartBlackAllocationForTesting();
@@ -6708,14 +6728,15 @@ TEST(Regress615489) {
isolate->factory()->NewFixedArray(500, TENURED)->Size();
}
while (!marking->IsComplete()) {
- marking->Step(i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+ marking->Step(i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
}
CHECK(marking->IsComplete());
intptr_t size_before = heap->SizeOfObjects();
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
intptr_t size_after = heap->SizeOfObjects();
// Live size does not increase after garbage collection.
CHECK_LE(size_after, size_before);
@@ -6743,8 +6764,8 @@ TEST(Regress631969) {
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
// Get the heap in clean state.
- heap->CollectGarbage(OLD_SPACE);
- heap->CollectGarbage(OLD_SPACE);
+ CcTest::CollectGarbage(OLD_SPACE);
+ CcTest::CollectGarbage(OLD_SPACE);
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
// Allocate two strings in a fresh page and mark the page as evacuation
@@ -6761,13 +6782,14 @@ TEST(Regress631969) {
heap::SimulateFullSpace(heap->old_space());
Handle<String> s3;
factory->NewConsString(s1, s2).ToHandle(&s3);
- heap->CollectGarbage(NEW_SPACE);
- heap->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
// Finish incremental marking.
IncrementalMarking* marking = heap->incremental_marking();
while (!marking->IsComplete()) {
- marking->Step(MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+ marking->Step(MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
@@ -6776,7 +6798,7 @@ TEST(Regress631969) {
{
StaticOneByteResource external_string("12345678901234");
s3->MakeExternal(&external_string);
- heap->CollectGarbage(OLD_SPACE);
+ CcTest::CollectGarbage(OLD_SPACE);
}
}
@@ -6786,7 +6808,7 @@ TEST(LeftTrimFixedArrayInBlackArea) {
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
i::MarkCompactCollector* collector = heap->mark_compact_collector();
i::IncrementalMarking* marking = heap->incremental_marking();
@@ -6795,7 +6817,8 @@ TEST(LeftTrimFixedArrayInBlackArea) {
}
CHECK(marking->IsMarking() || marking->IsStopped());
if (marking->IsStopped()) {
- heap->StartIncrementalMarking();
+ heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
+ i::GarbageCollectionReason::kTesting);
}
CHECK(marking->IsMarking());
marking->StartBlackAllocationForTesting();
@@ -6823,7 +6846,7 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
i::MarkCompactCollector* collector = heap->mark_compact_collector();
i::IncrementalMarking* marking = heap->incremental_marking();
@@ -6832,7 +6855,8 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
}
CHECK(marking->IsMarking() || marking->IsStopped());
if (marking->IsStopped()) {
- heap->StartIncrementalMarking();
+ heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
+ i::GarbageCollectionReason::kTesting);
}
CHECK(marking->IsMarking());
marking->StartBlackAllocationForTesting();
@@ -6887,7 +6911,7 @@ TEST(ContinuousRightTrimFixedArrayInBlackArea) {
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
i::MarkCompactCollector* collector = heap->mark_compact_collector();
i::IncrementalMarking* marking = heap->incremental_marking();
@@ -6896,7 +6920,8 @@ TEST(ContinuousRightTrimFixedArrayInBlackArea) {
}
CHECK(marking->IsMarking() || marking->IsStopped());
if (marking->IsStopped()) {
- heap->StartIncrementalMarking();
+ heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
+ i::GarbageCollectionReason::kTesting);
}
CHECK(marking->IsMarking());
marking->StartBlackAllocationForTesting();
@@ -6945,7 +6970,7 @@ TEST(SlotFilteringAfterBlackAreas) {
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
MarkCompactCollector* mark_compact_collector = heap->mark_compact_collector();
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
i::MarkCompactCollector* collector = heap->mark_compact_collector();
i::IncrementalMarking* marking = heap->incremental_marking();
@@ -6954,7 +6979,8 @@ TEST(SlotFilteringAfterBlackAreas) {
}
CHECK(marking->IsMarking() || marking->IsStopped());
if (marking->IsStopped()) {
- heap->StartIncrementalMarking();
+ heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
+ i::GarbageCollectionReason::kTesting);
}
CHECK(marking->IsMarking());
marking->StartBlackAllocationForTesting();
@@ -7014,7 +7040,7 @@ TEST(UncommitUnusedLargeObjectMemory) {
array->Shrink(1);
CHECK(array->Size() < size_before);
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK(chunk->CommittedPhysicalMemory() < committed_memory_before);
size_t shrinked_size =
RoundUp((array->address() - chunk->address()) + array->Size(),
diff --git a/deps/v8/test/cctest/heap/test-incremental-marking.cc b/deps/v8/test/cctest/heap/test-incremental-marking.cc
index 59697a94a1..c659cecea7 100644
--- a/deps/v8/test/cctest/heap/test-incremental-marking.cc
+++ b/deps/v8/test/cctest/heap/test-incremental-marking.cc
@@ -31,11 +31,8 @@ namespace internal {
class MockPlatform : public v8::Platform {
public:
explicit MockPlatform(v8::Platform* platform)
- : platform_(platform), idle_task_(nullptr), delayed_task_(nullptr) {}
- virtual ~MockPlatform() {
- delete idle_task_;
- delete delayed_task_;
- }
+ : platform_(platform), task_(nullptr) {}
+ virtual ~MockPlatform() { delete task_; }
void CallOnBackgroundThread(Task* task,
ExpectedRuntime expected_runtime) override {
@@ -43,15 +40,12 @@ class MockPlatform : public v8::Platform {
}
void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override {
- platform_->CallOnForegroundThread(isolate, task);
+ task_ = task;
}
void CallDelayedOnForegroundThread(v8::Isolate* isolate, Task* task,
double delay_in_seconds) override {
- if (delayed_task_ != nullptr) {
- delete delayed_task_;
- }
- delayed_task_ = task;
+ platform_->CallDelayedOnForegroundThread(isolate, task, delay_in_seconds);
}
double MonotonicallyIncreasingTime() override {
@@ -60,30 +54,21 @@ class MockPlatform : public v8::Platform {
void CallIdleOnForegroundThread(v8::Isolate* isolate,
IdleTask* task) override {
- CHECK(nullptr == idle_task_);
- idle_task_ = task;
+ platform_->CallIdleOnForegroundThread(isolate, task);
}
bool IdleTasksEnabled(v8::Isolate* isolate) override { return true; }
- bool PendingIdleTask() { return idle_task_ != nullptr; }
-
- void PerformIdleTask(double idle_time_in_seconds) {
- IdleTask* task = idle_task_;
- idle_task_ = nullptr;
- task->Run(MonotonicallyIncreasingTime() + idle_time_in_seconds);
- delete task;
- }
-
- bool PendingDelayedTask() { return delayed_task_ != nullptr; }
+ bool PendingTask() { return task_ != nullptr; }
- void PerformDelayedTask() {
- Task* task = delayed_task_;
- delayed_task_ = nullptr;
+ void PerformTask() {
+ Task* task = task_;
+ task_ = nullptr;
task->Run();
delete task;
}
+ using Platform::AddTraceEvent;
uint64_t AddTraceEvent(char phase, const uint8_t* categoryEnabledFlag,
const char* name, const char* scope, uint64_t id,
uint64_t bind_id, int numArgs, const char** argNames,
@@ -108,12 +93,10 @@ class MockPlatform : public v8::Platform {
private:
v8::Platform* platform_;
- IdleTask* idle_task_;
- Task* delayed_task_;
+ Task* task_;
};
-
-TEST(IncrementalMarkingUsingIdleTasks) {
+TEST(IncrementalMarkingUsingTasks) {
if (!i::FLAG_incremental_marking) return;
CcTest::InitializeVM();
v8::Platform* old_platform = i::V8::GetCurrentPlatform();
@@ -122,16 +105,10 @@ TEST(IncrementalMarkingUsingIdleTasks) {
i::heap::SimulateFullSpace(CcTest::heap()->old_space());
i::IncrementalMarking* marking = CcTest::heap()->incremental_marking();
marking->Stop();
- marking->Start();
- CHECK(platform.PendingIdleTask());
- const double kLongIdleTimeInSeconds = 1;
- const double kShortIdleTimeInSeconds = 0.010;
- const int kShortStepCount = 10;
- for (int i = 0; i < kShortStepCount && platform.PendingIdleTask(); i++) {
- platform.PerformIdleTask(kShortIdleTimeInSeconds);
- }
- while (platform.PendingIdleTask()) {
- platform.PerformIdleTask(kLongIdleTimeInSeconds);
+ marking->Start(i::GarbageCollectionReason::kTesting);
+ CHECK(platform.PendingTask());
+ while (platform.PendingTask()) {
+ platform.PerformTask();
}
CHECK(marking->IsStopped());
i::V8::SetPlatformForTesting(old_platform);
@@ -140,55 +117,25 @@ TEST(IncrementalMarkingUsingIdleTasks) {
TEST(IncrementalMarkingUsingIdleTasksAfterGC) {
if (!i::FLAG_incremental_marking) return;
- CcTest::InitializeVM();
- v8::Platform* old_platform = i::V8::GetCurrentPlatform();
- MockPlatform platform(old_platform);
- i::V8::SetPlatformForTesting(&platform);
- i::heap::SimulateFullSpace(CcTest::heap()->old_space());
- CcTest::heap()->CollectAllGarbage();
- i::IncrementalMarking* marking = CcTest::heap()->incremental_marking();
- marking->Stop();
- marking->Start();
- CHECK(platform.PendingIdleTask());
- const double kLongIdleTimeInSeconds = 1;
- const double kShortIdleTimeInSeconds = 0.010;
- const int kShortStepCount = 10;
- for (int i = 0; i < kShortStepCount && platform.PendingIdleTask(); i++) {
- platform.PerformIdleTask(kShortIdleTimeInSeconds);
- }
- while (platform.PendingIdleTask()) {
- platform.PerformIdleTask(kLongIdleTimeInSeconds);
- }
- CHECK(marking->IsStopped());
- i::V8::SetPlatformForTesting(old_platform);
-}
-
-TEST(IncrementalMarkingUsingDelayedTasks) {
- if (!i::FLAG_incremental_marking) return;
CcTest::InitializeVM();
v8::Platform* old_platform = i::V8::GetCurrentPlatform();
MockPlatform platform(old_platform);
i::V8::SetPlatformForTesting(&platform);
i::heap::SimulateFullSpace(CcTest::heap()->old_space());
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ // Perform any pending idle tasks.
+ while (platform.PendingTask()) {
+ platform.PerformTask();
+ }
+ CHECK(!platform.PendingTask());
i::IncrementalMarking* marking = CcTest::heap()->incremental_marking();
marking->Stop();
- marking->Start();
- CHECK(platform.PendingIdleTask());
- // The delayed task should be a no-op if the idle task makes progress.
- const int kIgnoredDelayedTaskStepCount = 1000;
- for (int i = 0; i < kIgnoredDelayedTaskStepCount; i++) {
- // Dummy idle task progress.
- marking->incremental_marking_job()->NotifyIdleTaskProgress();
- CHECK(platform.PendingDelayedTask());
- platform.PerformDelayedTask();
- }
- // Once we stop notifying idle task progress, the delayed tasks
- // should finish marking.
- while (!marking->IsStopped() && platform.PendingDelayedTask()) {
- platform.PerformDelayedTask();
+ marking->Start(i::GarbageCollectionReason::kTesting);
+ CHECK(platform.PendingTask());
+ while (platform.PendingTask()) {
+ platform.PerformTask();
}
- // There could be pending delayed task from memory reducer after GC finishes.
CHECK(marking->IsStopped());
i::V8::SetPlatformForTesting(old_platform);
}
diff --git a/deps/v8/test/cctest/heap/test-lab.cc b/deps/v8/test/cctest/heap/test-lab.cc
index 5a0ff2fbc4..b625206f48 100644
--- a/deps/v8/test/cctest/heap/test-lab.cc
+++ b/deps/v8/test/cctest/heap/test-lab.cc
@@ -8,6 +8,9 @@
#include "src/heap/heap.h"
#include "src/heap/spaces.h"
#include "src/heap/spaces-inl.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/heap/incremental-marking.h -> src/objects-inl.h
+#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -16,8 +19,7 @@ namespace internal {
static Address AllocateLabBackingStore(Heap* heap, intptr_t size_in_bytes) {
AllocationResult result = heap->old_space()->AllocateRaw(
static_cast<int>(size_in_bytes), kDoubleAligned);
- Object* obj = result.ToObjectChecked();
- Address adr = HeapObject::cast(obj)->address();
+ Address adr = result.ToObjectChecked()->address();
return adr;
}
diff --git a/deps/v8/test/cctest/heap/test-mark-compact.cc b/deps/v8/test/cctest/heap/test-mark-compact.cc
index 1e5d30d0e7..d0f7f82741 100644
--- a/deps/v8/test/cctest/heap/test-mark-compact.cc
+++ b/deps/v8/test/cctest/heap/test-mark-compact.cc
@@ -84,19 +84,22 @@ TEST(Promotion) {
heap::SealCurrentObjects(heap);
- int array_length =
- heap::FixedArrayLenFromSize(Page::kMaxRegularHeapObjectSize);
+ int array_length = heap::FixedArrayLenFromSize(kMaxRegularHeapObjectSize);
Handle<FixedArray> array = isolate->factory()->NewFixedArray(array_length);
// Array should be in the new space.
CHECK(heap->InSpace(*array, NEW_SPACE));
- heap->CollectAllGarbage();
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK(heap->InSpace(*array, OLD_SPACE));
}
}
HEAP_TEST(NoPromotion) {
+ // Page promotion allows pages to be moved to old space even in the case of
+ // OOM scenarios.
+ FLAG_page_promotion = false;
+
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
{
@@ -105,15 +108,14 @@ HEAP_TEST(NoPromotion) {
heap::SealCurrentObjects(heap);
- int array_length =
- heap::FixedArrayLenFromSize(Page::kMaxRegularHeapObjectSize);
+ int array_length = heap::FixedArrayLenFromSize(kMaxRegularHeapObjectSize);
Handle<FixedArray> array = isolate->factory()->NewFixedArray(array_length);
heap->set_force_oom(true);
// Array should be in the new space.
CHECK(heap->InSpace(*array, NEW_SPACE));
- heap->CollectAllGarbage();
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK(heap->InSpace(*array, NEW_SPACE));
}
}
@@ -130,7 +132,7 @@ HEAP_TEST(MarkCompactCollector) {
Handle<JSGlobalObject> global(isolate->context()->global_object());
// call mark-compact when heap is empty
- heap->CollectGarbage(OLD_SPACE, "trigger 1");
+ CcTest::CollectGarbage(OLD_SPACE);
// keep allocating garbage in new space until it fails
const int arraysize = 100;
@@ -138,14 +140,14 @@ HEAP_TEST(MarkCompactCollector) {
do {
allocation = heap->AllocateFixedArray(arraysize);
} while (!allocation.IsRetry());
- heap->CollectGarbage(NEW_SPACE, "trigger 2");
+ CcTest::CollectGarbage(NEW_SPACE);
heap->AllocateFixedArray(arraysize).ToObjectChecked();
// keep allocating maps until it fails
do {
allocation = heap->AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
} while (!allocation.IsRetry());
- heap->CollectGarbage(MAP_SPACE, "trigger 3");
+ CcTest::CollectGarbage(MAP_SPACE);
heap->AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize).ToObjectChecked();
{ HandleScope scope(isolate);
@@ -157,7 +159,7 @@ HEAP_TEST(MarkCompactCollector) {
factory->NewJSObject(function);
}
- heap->CollectGarbage(OLD_SPACE, "trigger 4");
+ CcTest::CollectGarbage(OLD_SPACE);
{ HandleScope scope(isolate);
Handle<String> func_name = factory->InternalizeUtf8String("theFunction");
@@ -175,7 +177,7 @@ HEAP_TEST(MarkCompactCollector) {
JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check();
}
- heap->CollectGarbage(OLD_SPACE, "trigger 5");
+ CcTest::CollectGarbage(OLD_SPACE);
{ HandleScope scope(isolate);
Handle<String> obj_name = factory->InternalizeUtf8String("theObject");
@@ -218,7 +220,7 @@ TEST(MapCompact) {
// be able to trigger map compaction.
// To give an additional chance to fail, try to force compaction which
// should be impossible right now.
- CcTest::heap()->CollectAllGarbage(Heap::kForceCompactionMask);
+ CcTest::CollectAllGarbage(Heap::kForceCompactionMask);
// And now map pointers should be encodable again.
CHECK(CcTest::heap()->map_space()->MapPointersEncodable());
}
@@ -299,7 +301,7 @@ HEAP_TEST(ObjectGroups) {
g2c1.location());
}
// Do a full GC
- heap->CollectGarbage(OLD_SPACE);
+ CcTest::CollectGarbage(OLD_SPACE);
// All object should be alive.
CHECK_EQ(0, NumberOfWeakCalls);
@@ -326,7 +328,7 @@ HEAP_TEST(ObjectGroups) {
g2c1.location());
}
- heap->CollectGarbage(OLD_SPACE);
+ CcTest::CollectGarbage(OLD_SPACE);
// All objects should be gone. 5 global handles in total.
CHECK_EQ(5, NumberOfWeakCalls);
@@ -339,7 +341,7 @@ HEAP_TEST(ObjectGroups) {
g2c1.location(), reinterpret_cast<void*>(&g2c1_and_id),
&WeakPointerCallback, v8::WeakCallbackType::kParameter);
- heap->CollectGarbage(OLD_SPACE);
+ CcTest::CollectGarbage(OLD_SPACE);
CHECK_EQ(7, NumberOfWeakCalls);
}
diff --git a/deps/v8/test/cctest/heap/test-page-promotion.cc b/deps/v8/test/cctest/heap/test-page-promotion.cc
index 4ec2e2a416..b3ac4960a5 100644
--- a/deps/v8/test/cctest/heap/test-page-promotion.cc
+++ b/deps/v8/test/cctest/heap/test-page-promotion.cc
@@ -2,7 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/factory.h"
#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/spaces-inl.h"
+#include "src/isolate.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/factory.h -> src/objects-inl.h
+#include "src/objects-inl.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/type-feedback-vector.h ->
+// src/type-feedback-vector-inl.h
+#include "src/type-feedback-vector-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
@@ -11,7 +21,7 @@ namespace {
v8::Isolate* NewIsolateForPagePromotion() {
i::FLAG_page_promotion = true;
i::FLAG_page_promotion_threshold = 0; // %
- i::FLAG_min_semi_space_size = 8 * (i::Page::kPageSize / i::MB);
+ i::FLAG_min_semi_space_size = 8;
// We cannot optimize for size as we require a new space with more than one
// page.
i::FLAG_optimize_for_size = false;
@@ -40,7 +50,7 @@ UNINITIALIZED_TEST(PagePromotion_NewToOld) {
std::vector<Handle<FixedArray>> handles;
heap::SimulateFullSpace(heap->new_space(), &handles);
- heap->CollectGarbage(NEW_SPACE);
+ heap->CollectGarbage(NEW_SPACE, i::GarbageCollectionReason::kTesting);
CHECK_GT(handles.size(), 0u);
// First object in handle should be on the first page.
Handle<FixedArray> first_object = handles.front();
@@ -99,7 +109,6 @@ UNINITIALIZED_TEST(PagePromotion_NewToNewJSArrayBuffer) {
// Fill the current page which potentially contains the age mark.
heap::FillCurrentPage(heap->new_space());
-
// Allocate a buffer we would like to check against.
Handle<JSArrayBuffer> buffer =
i_isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared);
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index 2328518f2a..262d0c5d58 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -28,10 +28,15 @@
#include <stdlib.h>
#include "src/base/platform/platform.h"
+#include "src/heap/spaces-inl.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/heap/incremental-marking.h -> src/objects-inl.h
+#include "src/objects-inl.h"
#include "src/snapshot/snapshot.h"
#include "src/v8.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
+#include "test/cctest/heap/heap-utils.h"
namespace v8 {
namespace internal {
@@ -362,11 +367,10 @@ TEST(NewSpace) {
CcTest::heap()->InitialSemiSpaceSize()));
CHECK(new_space.HasBeenSetUp());
- while (new_space.Available() >= Page::kMaxRegularHeapObjectSize) {
- Object* obj =
- new_space.AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize)
- .ToObjectChecked();
- CHECK(new_space.Contains(HeapObject::cast(obj)));
+ while (new_space.Available() >= kMaxRegularHeapObjectSize) {
+ CHECK(new_space.Contains(
+ new_space.AllocateRawUnaligned(kMaxRegularHeapObjectSize)
+ .ToObjectChecked()));
}
new_space.TearDown();
@@ -389,7 +393,7 @@ TEST(OldSpace) {
CHECK(s->SetUp());
while (s->Available() > 0) {
- s->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize).ToObjectChecked();
+ s->AllocateRawUnaligned(kMaxRegularHeapObjectSize).ToObjectChecked();
}
delete s;
@@ -420,11 +424,11 @@ TEST(CompactionSpace) {
// and would thus neither grow, nor be able to allocate an object.
const int kNumObjects = 100;
const int kNumObjectsPerPage =
- compaction_space->AreaSize() / Page::kMaxRegularHeapObjectSize;
+ compaction_space->AreaSize() / kMaxRegularHeapObjectSize;
const int kExpectedPages =
(kNumObjects + kNumObjectsPerPage - 1) / kNumObjectsPerPage;
for (int i = 0; i < kNumObjects; i++) {
- compaction_space->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize)
+ compaction_space->AllocateRawUnaligned(kMaxRegularHeapObjectSize)
.ToObjectChecked();
}
int pages_in_old_space = old_space->CountTotalPages();
@@ -445,6 +449,9 @@ TEST(CompactionSpace) {
TEST(LargeObjectSpace) {
+ // This test does not initialize allocated objects, which confuses the
+ // incremental marker.
+ FLAG_incremental_marking = false;
v8::V8::Initialize();
LargeObjectSpace* lo = CcTest::heap()->lo_space();
@@ -478,8 +485,7 @@ TEST(LargeObjectSpace) {
CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE).IsRetry());
}
-
-TEST(SizeOfFirstPageIsLargeEnough) {
+TEST(SizeOfInitialHeap) {
if (i::FLAG_always_opt) return;
// Bootstrapping without a snapshot causes more allocations.
CcTest::InitializeVM();
@@ -494,27 +500,37 @@ TEST(SizeOfFirstPageIsLargeEnough) {
->IsUndefined()) {
return;
}
+ // Initial size of LO_SPACE
+ size_t initial_lo_space = isolate->heap()->lo_space()->Size();
- // If this test fails due to enabling experimental natives that are not part
- // of the snapshot, we may need to adjust CalculateFirstPageSizes.
+ // The limit for each space for an empty isolate containing just the
+ // snapshot.
+ const size_t kMaxInitialSizePerSpace = 2 * MB;
- // Freshly initialized VM gets by with one page per space.
+ // Freshly initialized VM gets by with the snapshot size (which is below
+ // kMaxInitialSizePerSpace per space).
+ Heap* heap = isolate->heap();
+ int page_count[LAST_PAGED_SPACE + 1] = {0, 0, 0, 0};
for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
// Debug code can be very large, so skip CODE_SPACE if we are generating it.
if (i == CODE_SPACE && i::FLAG_debug_code) continue;
- CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
+
+ page_count[i] = heap->paged_space(i)->CountTotalPages();
+ // Check that the initial heap is also below the limit.
+ CHECK_LT(heap->paged_space(i)->CommittedMemory(), kMaxInitialSizePerSpace);
}
- // Executing the empty script gets by with one page per space.
+ // Executing the empty script gets by with the same number of pages, i.e.,
+ // requires no extra space.
CompileRun("/*empty*/");
for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
// Debug code can be very large, so skip CODE_SPACE if we are generating it.
if (i == CODE_SPACE && i::FLAG_debug_code) continue;
- CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
+ CHECK_EQ(page_count[i], isolate->heap()->paged_space(i)->CountTotalPages());
}
// No large objects required to perform the above steps.
- CHECK(isolate->heap()->lo_space()->IsEmpty());
+ CHECK_EQ(initial_lo_space, isolate->heap()->lo_space()->Size());
}
static HeapObject* AllocateUnaligned(NewSpace* space, int size) {
@@ -681,5 +697,104 @@ UNINITIALIZED_TEST(InlineAllocationObserverCadence) {
isolate->Dispose();
}
+TEST(ShrinkPageToHighWaterMarkFreeSpaceEnd) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ heap::SealCurrentObjects(CcTest::heap());
+
+ // Prepare page that only contains a single object and a trailing FreeSpace
+ // filler.
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(128, TENURED);
+ Page* page = Page::FromAddress(array->address());
+
+ // Reset space so high water mark is consistent.
+ CcTest::heap()->old_space()->ResetFreeList();
+ CcTest::heap()->old_space()->EmptyAllocationInfo();
+
+ HeapObject* filler =
+ HeapObject::FromAddress(array->address() + array->Size());
+ CHECK(filler->IsFreeSpace());
+ size_t shrinked = page->ShrinkToHighWaterMark();
+ size_t should_have_shrinked =
+ RoundDown(static_cast<size_t>(Page::kAllocatableMemory - array->Size()),
+ base::OS::CommitPageSize());
+ CHECK_EQ(should_have_shrinked, shrinked);
+}
+
+TEST(ShrinkPageToHighWaterMarkNoFiller) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ heap::SealCurrentObjects(CcTest::heap());
+
+ const int kFillerSize = 0;
+ std::vector<Handle<FixedArray>> arrays =
+ heap::FillOldSpacePageWithFixedArrays(CcTest::heap(), kFillerSize);
+ Handle<FixedArray> array = arrays.back();
+ Page* page = Page::FromAddress(array->address());
+ CHECK_EQ(page->area_end(), array->address() + array->Size() + kFillerSize);
+
+ // Reset space so high water mark and fillers are consistent.
+ CcTest::heap()->old_space()->ResetFreeList();
+ CcTest::heap()->old_space()->EmptyAllocationInfo();
+
+ const size_t shrinked = page->ShrinkToHighWaterMark();
+ CHECK_EQ(0, shrinked);
+}
+
+TEST(ShrinkPageToHighWaterMarkOneWordFiller) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ heap::SealCurrentObjects(CcTest::heap());
+
+ const int kFillerSize = kPointerSize;
+ std::vector<Handle<FixedArray>> arrays =
+ heap::FillOldSpacePageWithFixedArrays(CcTest::heap(), kFillerSize);
+ Handle<FixedArray> array = arrays.back();
+ Page* page = Page::FromAddress(array->address());
+ CHECK_EQ(page->area_end(), array->address() + array->Size() + kFillerSize);
+
+ // Reset space so high water mark and fillers are consistent.
+ CcTest::heap()->old_space()->ResetFreeList();
+ CcTest::heap()->old_space()->EmptyAllocationInfo();
+
+ HeapObject* filler =
+ HeapObject::FromAddress(array->address() + array->Size());
+ CHECK_EQ(filler->map(), CcTest::heap()->one_pointer_filler_map());
+
+ const size_t shrinked = page->ShrinkToHighWaterMark();
+ CHECK_EQ(0, shrinked);
+}
+
+TEST(ShrinkPageToHighWaterMarkTwoWordFiller) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ heap::SealCurrentObjects(CcTest::heap());
+
+ const int kFillerSize = 2 * kPointerSize;
+ std::vector<Handle<FixedArray>> arrays =
+ heap::FillOldSpacePageWithFixedArrays(CcTest::heap(), kFillerSize);
+ Handle<FixedArray> array = arrays.back();
+ Page* page = Page::FromAddress(array->address());
+ CHECK_EQ(page->area_end(), array->address() + array->Size() + kFillerSize);
+
+ // Reset space so high water mark and fillers are consistent.
+ CcTest::heap()->old_space()->ResetFreeList();
+ CcTest::heap()->old_space()->EmptyAllocationInfo();
+
+ HeapObject* filler =
+ HeapObject::FromAddress(array->address() + array->Size());
+ CHECK_EQ(filler->map(), CcTest::heap()->two_pointer_filler_map());
+
+ const size_t shrinked = page->ShrinkToHighWaterMark();
+ CHECK_EQ(0, shrinked);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
index 507875742d..81be1c0028 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
@@ -12,7 +12,6 @@
#include "include/v8.h"
#include "src/base/logging.h"
-#include "src/compiler.h"
#include "src/runtime/runtime.h"
#include "src/interpreter/bytecode-array-iterator.h"
@@ -47,13 +46,19 @@ std::string BytecodeExpectationsPrinter::WrapCodeInFunction(
return program_stream.str();
}
-v8::Local<v8::Script> BytecodeExpectationsPrinter::Compile(
+v8::Local<v8::Script> BytecodeExpectationsPrinter::CompileScript(
const char* program) const {
v8::Local<v8::String> source = V8StringFromUTF8(program);
return v8::Script::Compile(isolate_->GetCurrentContext(), source)
.ToLocalChecked();
}
+v8::Local<v8::Module> BytecodeExpectationsPrinter::CompileModule(
+ const char* program) const {
+ v8::ScriptCompiler::Source source(V8StringFromUTF8(program));
+ return v8::ScriptCompiler::CompileModule(isolate_, &source).ToLocalChecked();
+}
+
void BytecodeExpectationsPrinter::Run(v8::Local<v8::Script> script) const {
(void)script->Run(isolate_->GetCurrentContext());
}
@@ -75,6 +80,13 @@ BytecodeExpectationsPrinter::GetBytecodeArrayForGlobal(
}
i::Handle<i::BytecodeArray>
+BytecodeExpectationsPrinter::GetBytecodeArrayForModule(
+ v8::Local<v8::Module> module) const {
+ i::Handle<i::Module> i_module = v8::Utils::OpenHandle(*module);
+ return i::handle(i_module->shared()->bytecode_array(), i_isolate());
+}
+
+i::Handle<i::BytecodeArray>
BytecodeExpectationsPrinter::GetBytecodeArrayForScript(
v8::Local<v8::Script> script) const {
i::Handle<i::JSFunction> js_function = v8::Utils::OpenHandle(*script);
@@ -151,6 +163,9 @@ void BytecodeExpectationsPrinter::PrintBytecodeOperand(
case OperandType::kIdx:
stream << bytecode_iterator.GetIndexOperand(op_index);
break;
+ case OperandType::kUImm:
+ stream << bytecode_iterator.GetUnsignedImmediateOperand(op_index);
+ break;
case OperandType::kImm:
stream << bytecode_iterator.GetImmediateOperand(op_index);
break;
@@ -225,32 +240,21 @@ void BytecodeExpectationsPrinter::PrintV8String(std::ostream& stream,
void BytecodeExpectationsPrinter::PrintConstant(
std::ostream& stream, i::Handle<i::Object> constant) const {
- switch (const_pool_type_) {
- case ConstantPoolType::kString:
- CHECK(constant->IsString());
+ if (constant->IsSmi()) {
+ stream << "Smi [";
+ i::Smi::cast(*constant)->SmiPrint(stream);
+ stream << "]";
+ } else {
+ stream << i::HeapObject::cast(*constant)->map()->instance_type();
+ if (constant->IsHeapNumber()) {
+ stream << " [";
+ i::HeapNumber::cast(*constant)->HeapNumberPrint(stream);
+ stream << "]";
+ } else if (constant->IsString()) {
+ stream << " [";
PrintV8String(stream, i::String::cast(*constant));
- break;
- case ConstantPoolType::kNumber:
- if (constant->IsSmi()) {
- i::Smi::cast(*constant)->SmiPrint(stream);
- } else if (constant->IsHeapNumber()) {
- i::HeapNumber::cast(*constant)->HeapNumberPrint(stream);
- } else {
- UNREACHABLE();
- }
- break;
- case ConstantPoolType::kMixed:
- if (constant->IsSmi()) {
- stream << "kInstanceTypeDontCare";
- } else {
- stream << "InstanceType::"
- << i::HeapObject::cast(*constant)->map()->instance_type();
- }
- break;
- case ConstantPoolType::kUnknown:
- default:
- UNREACHABLE();
- return;
+ stream << "]";
+ }
}
}
@@ -335,13 +339,20 @@ void BytecodeExpectationsPrinter::PrintExpectation(
wrap_ ? WrapCodeInFunction(test_function_name_.c_str(), snippet)
: snippet;
- v8::Local<v8::Script> script = Compile(source_code.c_str());
-
- if (execute_) Run(script);
-
- i::Handle<i::BytecodeArray> bytecode_array =
- top_level_ ? GetBytecodeArrayForScript(script)
- : GetBytecodeArrayForGlobal(test_function_name_.c_str());
+ i::Handle<i::BytecodeArray> bytecode_array;
+ if (module_) {
+ CHECK(top_level_ && !wrap_);
+ v8::Local<v8::Module> module = CompileModule(source_code.c_str());
+ bytecode_array = GetBytecodeArrayForModule(module);
+ } else {
+ v8::Local<v8::Script> script = CompileScript(source_code.c_str());
+ if (top_level_) {
+ bytecode_array = GetBytecodeArrayForScript(script);
+ } else {
+ Run(script);
+ bytecode_array = GetBytecodeArrayForGlobal(test_function_name_.c_str());
+ }
+ }
stream << "---\n";
PrintCodeSnippet(stream, snippet);
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
index c64ca90c81..89f79d3b40 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
@@ -26,18 +26,9 @@ class BytecodeArrayIterator;
class BytecodeExpectationsPrinter final {
public:
- enum class ConstantPoolType {
- kUnknown,
- kString,
- kNumber,
- kMixed,
- };
-
- BytecodeExpectationsPrinter(v8::Isolate* i,
- ConstantPoolType t = ConstantPoolType::kMixed)
+ explicit BytecodeExpectationsPrinter(v8::Isolate* i)
: isolate_(i),
- const_pool_type_(t),
- execute_(true),
+ module_(false),
wrap_(true),
top_level_(false),
test_function_name_(kDefaultTopFunctionName) {}
@@ -45,13 +36,8 @@ class BytecodeExpectationsPrinter final {
void PrintExpectation(std::ostream& stream, // NOLINT
const std::string& snippet) const;
- void set_constant_pool_type(ConstantPoolType const_pool_type) {
- const_pool_type_ = const_pool_type;
- }
- ConstantPoolType const_pool_type() const { return const_pool_type_; }
-
- void set_execute(bool execute) { execute_ = execute; }
- bool execute() const { return execute_; }
+ void set_module(bool module) { module_ = module; }
+ bool module() const { return module_; }
void set_wrap(bool wrap) { wrap_ = wrap; }
bool wrap() const { return wrap_; }
@@ -98,10 +84,13 @@ class BytecodeExpectationsPrinter final {
std::string WrapCodeInFunction(const char* function_name,
const std::string& function_body) const;
- v8::Local<v8::Script> Compile(const char* program) const;
+ v8::Local<v8::Script> CompileScript(const char* program) const;
+ v8::Local<v8::Module> CompileModule(const char* program) const;
void Run(v8::Local<v8::Script> script) const;
i::Handle<i::BytecodeArray> GetBytecodeArrayForGlobal(
const char* global_name) const;
+ i::Handle<v8::internal::BytecodeArray> GetBytecodeArrayForModule(
+ v8::Local<v8::Module> module) const;
i::Handle<v8::internal::BytecodeArray> GetBytecodeArrayForScript(
v8::Local<v8::Script> script) const;
@@ -110,8 +99,7 @@ class BytecodeExpectationsPrinter final {
}
v8::Isolate* isolate_;
- ConstantPoolType const_pool_type_;
- bool execute_;
+ bool module_;
bool wrap_;
bool top_level_;
std::string test_function_name_;
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
index 4997d1a004..92cdcac1bf 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: yes
---
@@ -16,11 +14,11 @@ parameter count: 1
bytecode array length: 6
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(3),
+ /* 34 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(9),
/* 51 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
]
handlers: [
]
@@ -36,21 +34,21 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
- /* 45 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(3),
+ /* 45 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(9),
B(Star), R(2),
B(LdaZero),
B(Star), R(1),
B(Ldar), R(0),
- /* 54 E> */ B(StaKeyedPropertySloppy), R(2), R(1), U8(2),
+ /* 54 E> */ B(StaKeyedPropertySloppy), R(2), R(1), U8(3),
B(LdaSmi), U8(1),
B(Star), R(1),
- /* 57 E> */ B(AddSmi), U8(1), R(0), U8(1),
- B(StaKeyedPropertySloppy), R(2), R(1), U8(2),
+ /* 57 E> */ B(AddSmi), U8(1), R(0), U8(2),
+ B(StaKeyedPropertySloppy), R(2), R(1), U8(3),
B(Ldar), R(2),
/* 66 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
]
handlers: [
]
@@ -64,11 +62,11 @@ parameter count: 1
bytecode array length: 6
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateArrayLiteral), U8(0), U8(2), U8(2),
+ /* 34 S> */ B(CreateArrayLiteral), U8(0), U8(2), U8(0),
/* 62 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
]
handlers: [
]
@@ -84,35 +82,35 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
- /* 45 S> */ B(CreateArrayLiteral), U8(0), U8(2), U8(2),
+ /* 45 S> */ B(CreateArrayLiteral), U8(0), U8(2), U8(0),
B(Star), R(2),
B(LdaZero),
B(Star), R(1),
- B(CreateArrayLiteral), U8(1), U8(0), U8(3),
+ B(CreateArrayLiteral), U8(1), U8(0), U8(9),
B(Star), R(4),
B(LdaZero),
B(Star), R(3),
B(Ldar), R(0),
- /* 56 E> */ B(StaKeyedPropertySloppy), R(4), R(3), U8(1),
+ /* 56 E> */ B(StaKeyedPropertySloppy), R(4), R(3), U8(2),
B(Ldar), R(4),
- B(StaKeyedPropertySloppy), R(2), R(1), U8(6),
+ B(StaKeyedPropertySloppy), R(2), R(1), U8(7),
B(LdaSmi), U8(1),
B(Star), R(1),
- B(CreateArrayLiteral), U8(2), U8(1), U8(3),
+ B(CreateArrayLiteral), U8(2), U8(1), U8(9),
B(Star), R(4),
B(LdaZero),
B(Star), R(3),
- /* 66 E> */ B(AddSmi), U8(2), R(0), U8(3),
- B(StaKeyedPropertySloppy), R(4), R(3), U8(4),
+ /* 66 E> */ B(AddSmi), U8(2), R(0), U8(4),
+ B(StaKeyedPropertySloppy), R(4), R(3), U8(5),
B(Ldar), R(4),
- B(StaKeyedPropertySloppy), R(2), R(1), U8(6),
+ B(StaKeyedPropertySloppy), R(2), R(1), U8(7),
B(Ldar), R(2),
/* 77 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiteralsWide.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiteralsWide.golden
index 9f9a25120b..6431d8adbe 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiteralsWide.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiteralsWide.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: yes
---
@@ -785,267 +783,267 @@ bytecodes: [
B(Star), R(0),
/* 2591 S> */ B(LdaConstant), U8(255),
B(Star), R(0),
- /* 2601 S> */ B(Wide), B(CreateArrayLiteral), U16(256), U16(0), U8(3),
+ /* 2601 S> */ B(Wide), B(CreateArrayLiteral), U16(256), U16(0), U8(9),
/* 2619 S> */ B(Return),
]
constant pool: [
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::FIXED_ARRAY_TYPE,
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ FIXED_ARRAY_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
index 5a1efc2889..4d78aa6bfc 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
@@ -3,8 +3,6 @@
#
---
-pool type: string
-execute: yes
wrap: yes
---
@@ -66,7 +64,7 @@ snippet: "
x = x + (x = 100) + (x = 101);
return x;
"
-frame size: 3
+frame size: 2
parameter count: 1
bytecode array length: 28
bytecodes: [
@@ -76,11 +74,11 @@ bytecodes: [
/* 46 S> */ B(LdaSmi), U8(100),
B(Mov), R(0), R(1),
B(Star), R(0),
- /* 57 E> */ B(Add), R(1), U8(1),
- B(Star), R(2),
+ /* 57 E> */ B(Add), R(1), U8(2),
+ B(Star), R(1),
B(LdaSmi), U8(101),
B(Star), R(0),
- /* 69 E> */ B(Add), R(2), U8(2),
+ /* 69 E> */ B(Add), R(1), U8(3),
B(Star), R(0),
/* 77 S> */ B(Nop),
/* 87 S> */ B(Return),
@@ -97,7 +95,7 @@ snippet: "
x++;
return x;
"
-frame size: 3
+frame size: 2
parameter count: 1
bytecode array length: 29
bytecodes: [
@@ -106,13 +104,13 @@ bytecodes: [
B(Star), R(0),
/* 46 S> */ B(LdaSmi), U8(56),
B(Star), R(0),
- /* 61 E> */ B(Sub), R(0), U8(1),
- B(Star), R(2),
+ /* 61 E> */ B(Sub), R(0), U8(2),
+ B(Star), R(1),
B(LdaSmi), U8(57),
B(Star), R(0),
- /* 68 E> */ B(Add), R(2), U8(2),
+ /* 68 E> */ B(Add), R(1), U8(3),
B(Star), R(0),
- /* 75 S> */ B(Inc), U8(3),
+ /* 75 S> */ B(Inc), U8(4),
B(Star), R(0),
/* 80 S> */ B(Nop),
/* 90 S> */ B(Return),
@@ -128,7 +126,7 @@ snippet: "
var y = x + (x = 1) + (x = 2) + (x = 3);
return y;
"
-frame size: 4
+frame size: 3
parameter count: 1
bytecode array length: 37
bytecodes: [
@@ -138,15 +136,15 @@ bytecodes: [
/* 76 S> */ B(LdaSmi), U8(1),
B(Mov), R(0), R(2),
B(Star), R(0),
- /* 61 E> */ B(Add), R(2), U8(1),
- B(Star), R(3),
+ /* 61 E> */ B(Add), R(2), U8(2),
+ B(Star), R(2),
B(LdaSmi), U8(2),
B(Star), R(0),
- /* 71 E> */ B(Add), R(3), U8(2),
+ /* 71 E> */ B(Add), R(2), U8(3),
B(Star), R(2),
B(LdaSmi), U8(3),
B(Star), R(0),
- /* 81 E> */ B(Add), R(2), U8(3),
+ /* 81 E> */ B(Add), R(2), U8(4),
B(Star), R(1),
/* 87 S> */ B(Nop),
/* 97 S> */ B(Return),
@@ -162,7 +160,7 @@ snippet: "
var x = x + (x = 1) + (x = 2) + (x = 3);
return x;
"
-frame size: 3
+frame size: 2
parameter count: 1
bytecode array length: 37
bytecodes: [
@@ -172,15 +170,15 @@ bytecodes: [
/* 76 S> */ B(LdaSmi), U8(1),
B(Mov), R(0), R(1),
B(Star), R(0),
- /* 61 E> */ B(Add), R(1), U8(1),
- B(Star), R(2),
+ /* 61 E> */ B(Add), R(1), U8(2),
+ B(Star), R(1),
B(LdaSmi), U8(2),
B(Star), R(0),
- /* 71 E> */ B(Add), R(2), U8(2),
+ /* 71 E> */ B(Add), R(1), U8(3),
B(Star), R(1),
B(LdaSmi), U8(3),
B(Star), R(0),
- /* 81 E> */ B(Add), R(1), U8(3),
+ /* 81 E> */ B(Add), R(1), U8(4),
B(Star), R(0),
/* 87 S> */ B(Nop),
/* 97 S> */ B(Return),
@@ -195,7 +193,7 @@ snippet: "
var x = 10, y = 20;
return x + (x = 1) + (x + 1) * (y = 2) + (y = 3) + (x = 4) + (y = 5) + y;
"
-frame size: 5
+frame size: 4
parameter count: 1
bytecode array length: 72
bytecodes: [
@@ -207,29 +205,29 @@ bytecodes: [
/* 54 S> */ B(LdaSmi), U8(1),
B(Mov), R(0), R(2),
B(Star), R(0),
- /* 68 E> */ B(Add), R(2), U8(1),
+ /* 68 E> */ B(Add), R(2), U8(2),
+ B(Star), R(2),
+ /* 76 E> */ B(AddSmi), U8(1), R(0), U8(3),
B(Star), R(3),
- /* 76 E> */ B(AddSmi), U8(1), R(0), U8(2),
- B(Star), R(4),
B(LdaSmi), U8(2),
B(Star), R(1),
- /* 88 E> */ B(Mul), R(4), U8(3),
- B(Add), R(3), U8(4),
+ /* 88 E> */ B(Mul), R(3), U8(4),
+ B(Add), R(2), U8(5),
B(Star), R(2),
B(LdaSmi), U8(3),
B(Star), R(1),
- /* 98 E> */ B(Add), R(2), U8(5),
- B(Star), R(3),
+ /* 98 E> */ B(Add), R(2), U8(6),
+ B(Star), R(2),
B(LdaSmi), U8(4),
B(Star), R(0),
- /* 108 E> */ B(Add), R(3), U8(6),
+ /* 108 E> */ B(Add), R(2), U8(7),
B(Star), R(2),
B(LdaSmi), U8(5),
B(Star), R(1),
- /* 118 E> */ B(Add), R(2), U8(7),
- B(Star), R(3),
+ /* 118 E> */ B(Add), R(2), U8(8),
+ B(Star), R(2),
B(Ldar), R(1),
- /* 125 E> */ B(Add), R(3), U8(8),
+ /* 125 E> */ B(Add), R(2), U8(9),
/* 128 S> */ B(Return),
]
constant pool: [
@@ -242,7 +240,7 @@ snippet: "
var x = 17;
return 1 + x + (x++) + (++x);
"
-frame size: 4
+frame size: 3
parameter count: 1
bytecode array length: 41
bytecodes: [
@@ -252,19 +250,19 @@ bytecodes: [
/* 46 S> */ B(LdaSmi), U8(1),
B(Star), R(1),
B(Ldar), R(0),
- /* 57 E> */ B(Add), R(1), U8(1),
- B(Star), R(2),
+ /* 57 E> */ B(Add), R(1), U8(2),
+ B(Star), R(1),
B(Ldar), R(0),
- B(ToNumber), R(1),
- B(Inc), U8(2),
+ B(ToNumber), R(2),
+ B(Inc), U8(3),
B(Star), R(0),
- B(Ldar), R(1),
- /* 63 E> */ B(Add), R(2), U8(3),
- B(Star), R(3),
+ B(Ldar), R(2),
+ /* 63 E> */ B(Add), R(1), U8(4),
+ B(Star), R(1),
B(Ldar), R(0),
- B(Inc), U8(4),
+ B(Inc), U8(5),
B(Star), R(0),
- /* 72 E> */ B(Add), R(3), U8(5),
+ /* 72 E> */ B(Add), R(1), U8(6),
/* 76 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden
index 422fad3283..547e83590e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden
@@ -3,8 +3,6 @@
#
---
-pool type: number
-execute: yes
wrap: yes
---
@@ -13,14 +11,14 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 17
+bytecode array length: 18
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
- /* 45 S> */ B(JumpIfToBooleanTrue), U8(7),
+ /* 45 S> */ B(JumpIfToBooleanTrue), U8(8),
B(LdaZero),
- /* 56 E> */ B(TestLessThan), R(0),
+ /* 56 E> */ B(TestLessThan), R(0), U8(2),
B(JumpIfFalse), U8(5),
/* 63 S> */ B(LdaSmi), U8(1),
/* 75 S> */ B(Return),
@@ -38,14 +36,14 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 17
+bytecode array length: 18
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
- /* 45 S> */ B(JumpIfToBooleanFalse), U8(10),
+ /* 45 S> */ B(JumpIfToBooleanFalse), U8(11),
B(LdaZero),
- /* 56 E> */ B(TestLessThan), R(0),
+ /* 56 E> */ B(TestLessThan), R(0), U8(2),
B(JumpIfFalse), U8(5),
/* 63 S> */ B(LdaSmi), U8(1),
/* 75 S> */ B(Return),
@@ -63,14 +61,14 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 22
+bytecode array length: 23
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
- /* 45 S> */ B(JumpIfToBooleanTrue), U8(7),
+ /* 45 S> */ B(JumpIfToBooleanTrue), U8(8),
B(LdaZero),
- /* 57 E> */ B(TestLessThan), R(0),
+ /* 57 E> */ B(TestLessThan), R(0), U8(2),
B(JumpIfFalse), U8(6),
B(LdaSmi), U8(2),
B(Jump), U8(4),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
index 6dcd2692af..19d83661f0 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: yes
---
@@ -65,7 +63,7 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 49
+bytecode array length: 53
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
@@ -73,23 +71,23 @@ bytecodes: [
/* 53 S> */ B(LdaSmi), U8(1),
B(Star), R(1),
/* 65 S> */ B(LdaSmi), U8(10),
- /* 65 E> */ B(TestLessThan), R(0),
- B(JumpIfFalse), U8(34),
+ /* 65 E> */ B(TestLessThan), R(0), U8(2),
+ B(JumpIfFalse), U8(37),
/* 56 E> */ B(StackCheck),
/* 75 S> */ B(LdaSmi), U8(12),
- B(Mul), R(1), U8(1),
+ B(Mul), R(1), U8(3),
B(Star), R(1),
- /* 89 S> */ B(AddSmi), U8(1), R(0), U8(2),
+ /* 89 S> */ B(AddSmi), U8(1), R(0), U8(4),
B(Star), R(0),
/* 102 S> */ B(LdaSmi), U8(3),
- /* 108 E> */ B(TestEqual), R(0),
+ /* 108 E> */ B(TestEqual), R(0), U8(5),
B(JumpIfFalse), U8(4),
- /* 114 S> */ B(Jump), U8(10),
+ /* 114 S> */ B(Jump), U8(11),
/* 126 S> */ B(LdaSmi), U8(4),
- /* 132 E> */ B(TestEqual), R(0),
+ /* 132 E> */ B(TestEqual), R(0), U8(6),
B(JumpIfFalse), U8(4),
- /* 138 S> */ B(Jump), U8(4),
- B(Jump), U8(-36),
+ /* 138 S> */ B(Jump), U8(5),
+ B(JumpLoop), U8(-39), U8(0),
/* 147 S> */ B(Ldar), R(1),
/* 157 S> */ B(Return),
]
@@ -113,35 +111,35 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 55
+bytecode array length: 61
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 E> */ B(StackCheck),
/* 62 S> */ B(LdaZero),
- /* 68 E> */ B(TestLessThan), R(0),
+ /* 68 E> */ B(TestLessThan), R(0), U8(2),
B(JumpIfFalse), U8(4),
- /* 73 S> */ B(Jump), U8(40),
+ /* 73 S> */ B(Jump), U8(44),
/* 85 S> */ B(LdaSmi), U8(3),
- /* 91 E> */ B(TestEqual), R(0),
+ /* 91 E> */ B(TestEqual), R(0), U8(3),
B(JumpIfFalse), U8(4),
- /* 97 S> */ B(Jump), U8(34),
+ /* 97 S> */ B(Jump), U8(38),
/* 106 S> */ B(LdaSmi), U8(4),
- /* 112 E> */ B(TestEqual), R(0),
+ /* 112 E> */ B(TestEqual), R(0), U8(4),
B(JumpIfFalse), U8(4),
- /* 118 S> */ B(Jump), U8(26),
+ /* 118 S> */ B(Jump), U8(29),
/* 127 S> */ B(LdaSmi), U8(10),
- /* 133 E> */ B(TestEqual), R(0),
+ /* 133 E> */ B(TestEqual), R(0), U8(5),
B(JumpIfFalse), U8(4),
- /* 140 S> */ B(Jump), U8(16),
+ /* 140 S> */ B(Jump), U8(17),
/* 152 S> */ B(LdaSmi), U8(5),
- /* 158 E> */ B(TestEqual), R(0),
+ /* 158 E> */ B(TestEqual), R(0), U8(6),
B(JumpIfFalse), U8(4),
- /* 164 S> */ B(Jump), U8(10),
- /* 173 S> */ B(AddSmi), U8(1), R(0), U8(1),
+ /* 164 S> */ B(Jump), U8(11),
+ /* 173 S> */ B(AddSmi), U8(1), R(0), U8(7),
B(Star), R(0),
- B(Jump), U8(-46),
+ B(JumpLoop), U8(-51), U8(0),
/* 186 S> */ B(Ldar), R(0),
/* 196 S> */ B(Return),
]
@@ -165,24 +163,24 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 39
+bytecode array length: 42
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 E> */ B(StackCheck),
/* 71 S> */ B(LdaSmi), U8(3),
- /* 71 E> */ B(TestLessThan), R(0),
- B(JumpIfFalse), U8(19),
+ /* 71 E> */ B(TestLessThan), R(0), U8(2),
+ B(JumpIfFalse), U8(21),
/* 62 E> */ B(StackCheck),
/* 82 S> */ B(LdaSmi), U8(2),
- /* 88 E> */ B(TestEqual), R(0),
+ /* 88 E> */ B(TestEqual), R(0), U8(3),
B(JumpIfFalse), U8(4),
- /* 94 S> */ B(Jump), U8(10),
- /* 105 S> */ B(AddSmi), U8(1), R(0), U8(1),
+ /* 94 S> */ B(Jump), U8(11),
+ /* 105 S> */ B(AddSmi), U8(1), R(0), U8(4),
B(Star), R(0),
- B(Jump), U8(-21),
- /* 122 S> */ B(AddSmi), U8(1), R(0), U8(2),
+ B(JumpLoop), U8(-23), U8(1),
+ /* 122 S> */ B(AddSmi), U8(1), R(0), U8(5),
B(Star), R(0),
/* 135 S> */ B(Jump), U8(2),
/* 144 S> */ B(Ldar), R(0),
@@ -205,7 +203,7 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 32
+bytecode array length: 33
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(10),
@@ -213,14 +211,14 @@ bytecodes: [
/* 54 S> */ B(LdaSmi), U8(1),
B(Star), R(1),
/* 64 S> */ B(Ldar), R(0),
- B(JumpIfToBooleanFalse), U8(18),
+ B(JumpIfToBooleanFalse), U8(19),
/* 57 E> */ B(StackCheck),
/* 71 S> */ B(LdaSmi), U8(12),
- B(Mul), R(1), U8(1),
+ B(Mul), R(1), U8(2),
B(Star), R(1),
- /* 85 S> */ B(SubSmi), U8(1), R(0), U8(2),
+ /* 85 S> */ B(SubSmi), U8(1), R(0), U8(3),
B(Star), R(0),
- B(Jump), U8(-18),
+ B(JumpLoop), U8(-18), U8(0),
/* 98 S> */ B(Ldar), R(1),
/* 108 S> */ B(Return),
]
@@ -242,7 +240,7 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 47
+bytecode array length: 53
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
@@ -251,21 +249,22 @@ bytecodes: [
B(Star), R(1),
/* 56 E> */ B(StackCheck),
/* 63 S> */ B(LdaSmi), U8(10),
- B(Mul), R(1), U8(1),
+ B(Mul), R(1), U8(2),
B(Star), R(1),
/* 77 S> */ B(LdaSmi), U8(5),
- /* 83 E> */ B(TestEqual), R(0),
+ /* 83 E> */ B(TestEqual), R(0), U8(3),
B(JumpIfFalse), U8(4),
- /* 89 S> */ B(Jump), U8(22),
+ /* 89 S> */ B(Jump), U8(27),
/* 98 S> */ B(LdaSmi), U8(6),
- /* 104 E> */ B(TestEqual), R(0),
+ /* 104 E> */ B(TestEqual), R(0), U8(4),
B(JumpIfFalse), U8(4),
/* 110 S> */ B(Jump), U8(8),
- /* 122 S> */ B(AddSmi), U8(1), R(0), U8(2),
+ /* 122 S> */ B(AddSmi), U8(1), R(0), U8(5),
B(Star), R(0),
/* 144 S> */ B(LdaSmi), U8(10),
- /* 144 E> */ B(TestLessThan), R(0),
- B(JumpIfTrue), U8(-34),
+ /* 144 E> */ B(TestLessThan), R(0), U8(6),
+ B(JumpIfFalse), U8(5),
+ B(JumpLoop), U8(-39), U8(0),
/* 151 S> */ B(Ldar), R(1),
/* 161 S> */ B(Return),
]
@@ -286,7 +285,7 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 28
+bytecode array length: 31
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(10),
@@ -295,11 +294,12 @@ bytecodes: [
B(Star), R(1),
/* 57 E> */ B(StackCheck),
/* 64 S> */ B(LdaSmi), U8(12),
- B(Mul), R(1), U8(1),
+ B(Mul), R(1), U8(2),
B(Star), R(1),
- /* 78 S> */ B(SubSmi), U8(1), R(0), U8(2),
+ /* 78 S> */ B(SubSmi), U8(1), R(0), U8(3),
B(Star), R(0),
- /* 98 S> */ B(JumpIfToBooleanTrue), U8(-14),
+ /* 98 S> */ B(JumpIfToBooleanFalse), U8(5),
+ B(JumpLoop), U8(-16), U8(0),
/* 102 S> */ B(Ldar), R(1),
/* 112 S> */ B(Return),
]
@@ -321,7 +321,7 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 41
+bytecode array length: 43
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
@@ -330,16 +330,16 @@ bytecodes: [
B(Star), R(1),
/* 56 E> */ B(StackCheck),
/* 63 S> */ B(LdaSmi), U8(10),
- B(Mul), R(1), U8(1),
+ B(Mul), R(1), U8(2),
B(Star), R(1),
/* 77 S> */ B(LdaSmi), U8(5),
- /* 83 E> */ B(TestEqual), R(0),
+ /* 83 E> */ B(TestEqual), R(0), U8(3),
B(JumpIfFalse), U8(4),
- /* 89 S> */ B(Jump), U8(16),
- /* 98 S> */ B(AddSmi), U8(1), R(0), U8(2),
+ /* 89 S> */ B(Jump), U8(17),
+ /* 98 S> */ B(AddSmi), U8(1), R(0), U8(4),
B(Star), R(0),
/* 111 S> */ B(LdaSmi), U8(6),
- /* 117 E> */ B(TestEqual), R(0),
+ /* 117 E> */ B(TestEqual), R(0), U8(5),
B(JumpIfFalse), U8(4),
/* 123 S> */ B(Jump), U8(2),
/* 150 S> */ B(Ldar), R(1),
@@ -363,7 +363,7 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 43
+bytecode array length: 46
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
@@ -372,19 +372,19 @@ bytecodes: [
B(Star), R(1),
/* 56 E> */ B(StackCheck),
/* 63 S> */ B(LdaSmi), U8(10),
- B(Mul), R(1), U8(1),
+ B(Mul), R(1), U8(2),
B(Star), R(1),
/* 77 S> */ B(LdaSmi), U8(5),
- /* 83 E> */ B(TestEqual), R(0),
+ /* 83 E> */ B(TestEqual), R(0), U8(3),
B(JumpIfFalse), U8(4),
- /* 89 S> */ B(Jump), U8(18),
- /* 98 S> */ B(AddSmi), U8(1), R(0), U8(2),
+ /* 89 S> */ B(Jump), U8(20),
+ /* 98 S> */ B(AddSmi), U8(1), R(0), U8(4),
B(Star), R(0),
/* 111 S> */ B(LdaSmi), U8(6),
- /* 117 E> */ B(TestEqual), R(0),
+ /* 117 E> */ B(TestEqual), R(0), U8(5),
B(JumpIfFalse), U8(4),
/* 123 S> */ B(Jump), U8(2),
- B(Jump), U8(-30),
+ B(JumpLoop), U8(-32), U8(0),
/* 149 S> */ B(Ldar), R(1),
/* 159 S> */ B(Return),
]
@@ -404,23 +404,23 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 31
+bytecode array length: 34
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 E> */ B(StackCheck),
/* 58 S> */ B(LdaSmi), U8(1),
- /* 64 E> */ B(TestEqual), R(0),
+ /* 64 E> */ B(TestEqual), R(0), U8(2),
B(JumpIfFalse), U8(4),
- /* 70 S> */ B(Jump), U8(18),
+ /* 70 S> */ B(Jump), U8(20),
/* 79 S> */ B(LdaSmi), U8(2),
- /* 85 E> */ B(TestEqual), R(0),
+ /* 85 E> */ B(TestEqual), R(0), U8(3),
B(JumpIfFalse), U8(4),
/* 91 S> */ B(Jump), U8(8),
- /* 103 S> */ B(AddSmi), U8(1), R(0), U8(1),
+ /* 103 S> */ B(AddSmi), U8(1), R(0), U8(4),
B(Star), R(0),
- B(Jump), U8(-23),
+ B(JumpLoop), U8(-25), U8(0),
B(LdaUndefined),
/* 116 S> */ B(Return),
]
@@ -439,23 +439,23 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 31
+bytecode array length: 34
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 47 S> */ B(LdaZero),
B(Star), R(0),
/* 34 E> */ B(StackCheck),
/* 56 S> */ B(LdaSmi), U8(1),
- /* 62 E> */ B(TestEqual), R(0),
+ /* 62 E> */ B(TestEqual), R(0), U8(2),
B(JumpIfFalse), U8(4),
- /* 68 S> */ B(Jump), U8(18),
+ /* 68 S> */ B(Jump), U8(20),
/* 77 S> */ B(LdaSmi), U8(2),
- /* 83 E> */ B(TestEqual), R(0),
+ /* 83 E> */ B(TestEqual), R(0), U8(3),
B(JumpIfFalse), U8(4),
/* 89 S> */ B(Jump), U8(8),
- /* 101 S> */ B(AddSmi), U8(1), R(0), U8(1),
+ /* 101 S> */ B(AddSmi), U8(1), R(0), U8(4),
B(Star), R(0),
- B(Jump), U8(-23),
+ B(JumpLoop), U8(-25), U8(0),
B(LdaUndefined),
/* 114 S> */ B(Return),
]
@@ -474,23 +474,23 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 31
+bytecode array length: 34
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 E> */ B(StackCheck),
/* 68 S> */ B(LdaSmi), U8(1),
- /* 74 E> */ B(TestEqual), R(0),
+ /* 74 E> */ B(TestEqual), R(0), U8(3),
B(JumpIfFalse), U8(4),
- /* 80 S> */ B(Jump), U8(18),
+ /* 80 S> */ B(Jump), U8(20),
/* 89 S> */ B(LdaSmi), U8(2),
- /* 95 E> */ B(TestEqual), R(0),
+ /* 95 E> */ B(TestEqual), R(0), U8(4),
B(JumpIfFalse), U8(4),
/* 101 S> */ B(Jump), U8(2),
- /* 55 S> */ B(AddSmi), U8(1), R(0), U8(1),
+ /* 55 S> */ B(AddSmi), U8(1), R(0), U8(2),
B(Star), R(0),
- B(Jump), U8(-23),
+ B(JumpLoop), U8(-25), U8(0),
B(LdaUndefined),
/* 113 S> */ B(Return),
]
@@ -508,23 +508,23 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 31
+bytecode array length: 34
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 47 S> */ B(LdaZero),
B(Star), R(0),
/* 34 E> */ B(StackCheck),
/* 66 S> */ B(LdaSmi), U8(1),
- /* 72 E> */ B(TestEqual), R(0),
+ /* 72 E> */ B(TestEqual), R(0), U8(3),
B(JumpIfFalse), U8(4),
- /* 78 S> */ B(Jump), U8(18),
+ /* 78 S> */ B(Jump), U8(20),
/* 87 S> */ B(LdaSmi), U8(2),
- /* 93 E> */ B(TestEqual), R(0),
+ /* 93 E> */ B(TestEqual), R(0), U8(4),
B(JumpIfFalse), U8(4),
/* 99 S> */ B(Jump), U8(2),
- /* 53 S> */ B(AddSmi), U8(1), R(0), U8(1),
+ /* 53 S> */ B(AddSmi), U8(1), R(0), U8(2),
B(Star), R(0),
- B(Jump), U8(-23),
+ B(JumpLoop), U8(-25), U8(0),
B(LdaUndefined),
/* 111 S> */ B(Return),
]
@@ -543,7 +543,7 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 32
+bytecode array length: 34
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
@@ -551,15 +551,15 @@ bytecodes: [
/* 58 S> */ B(LdaZero),
B(Star), R(1),
/* 63 S> */ B(LdaSmi), U8(100),
- /* 63 E> */ B(TestLessThan), R(1),
- B(JumpIfFalse), U8(19),
+ /* 63 E> */ B(TestLessThan), R(1), U8(2),
+ B(JumpIfFalse), U8(20),
/* 45 E> */ B(StackCheck),
- /* 85 S> */ B(AddSmi), U8(1), R(0), U8(2),
+ /* 85 S> */ B(AddSmi), U8(1), R(0), U8(4),
B(Star), R(0),
/* 98 S> */ B(Jump), U8(2),
- /* 72 S> */ B(AddSmi), U8(1), R(1), U8(1),
+ /* 72 S> */ B(AddSmi), U8(1), R(1), U8(3),
B(Star), R(1),
- B(Jump), U8(-21),
+ B(JumpLoop), U8(-22), U8(0),
B(LdaUndefined),
/* 110 S> */ B(Return),
]
@@ -578,7 +578,7 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 32
+bytecode array length: 33
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
@@ -586,15 +586,15 @@ bytecodes: [
/* 58 S> */ B(LdaSmi), U8(10),
B(Star), R(1),
/* 62 S> */ B(Ldar), R(1),
- B(JumpIfToBooleanFalse), U8(18),
+ B(JumpIfToBooleanFalse), U8(19),
/* 45 E> */ B(StackCheck),
/* 74 S> */ B(LdaSmi), U8(12),
- B(Mul), R(0), U8(2),
+ B(Mul), R(0), U8(3),
B(Star), R(0),
/* 67 S> */ B(Ldar), R(1),
- B(Dec), U8(1),
+ B(Dec), U8(2),
B(Star), R(1),
- B(Jump), U8(-18),
+ B(JumpLoop), U8(-18), U8(0),
/* 88 S> */ B(Ldar), R(0),
/* 98 S> */ B(Return),
]
@@ -639,7 +639,7 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 33
+bytecode array length: 35
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
@@ -647,16 +647,16 @@ bytecodes: [
/* 58 S> */ B(LdaZero),
B(Star), R(1),
/* 45 E> */ B(StackCheck),
- /* 76 S> */ B(AddSmi), U8(1), R(0), U8(2),
+ /* 76 S> */ B(AddSmi), U8(1), R(0), U8(3),
B(Star), R(0),
/* 89 S> */ B(LdaSmi), U8(20),
- /* 95 E> */ B(TestEqual), R(0),
+ /* 95 E> */ B(TestEqual), R(0), U8(4),
B(JumpIfFalse), U8(4),
- /* 102 S> */ B(Jump), U8(10),
+ /* 102 S> */ B(Jump), U8(11),
/* 69 S> */ B(Ldar), R(1),
- B(Inc), U8(1),
+ B(Inc), U8(2),
B(Star), R(1),
- B(Jump), U8(-21),
+ B(JumpLoop), U8(-22), U8(0),
/* 112 S> */ B(Ldar), R(0),
/* 122 S> */ B(Return),
]
@@ -679,25 +679,25 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 97
+bytecode array length: 104
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(1),
/* 52 S> */ B(Ldar), R(1),
- B(JumpIfToBooleanFalse), U8(89),
+ B(JumpIfToBooleanFalse), U8(96),
/* 45 E> */ B(StackCheck),
B(Ldar), R(closure),
B(CreateBlockContext), U8(0),
B(PushContext), R(3),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
B(CreateClosure), U8(1), U8(2),
B(Star), R(0),
/* 73 S> */ B(LdaSmi), U8(1),
- /* 73 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 73 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
B(Mov), R(0), R(2),
- /* 106 S> */ B(LdaContextSlot), R(context), U8(4),
+ /* 106 S> */ B(LdaContextSlot), R(context), U8(4), U8(0),
B(JumpIfNotHole), U8(11),
B(LdaConstant), U8(2),
B(Star), R(4),
@@ -705,30 +705,30 @@ bytecodes: [
B(JumpIfToBooleanFalse), U8(8),
/* 113 S> */ B(PopContext), R(3),
B(PopContext), R(3),
- B(Jump), U8(41),
- /* 126 S> */ B(LdaContextSlot), R(context), U8(4),
+ B(Jump), U8(44),
+ /* 126 S> */ B(LdaContextSlot), R(context), U8(4), U8(0),
B(JumpIfNotHole), U8(11),
B(LdaConstant), U8(2),
B(Star), R(4),
B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
- B(Inc), U8(1),
+ B(Inc), U8(2),
B(Star), R(4),
- /* 127 E> */ B(LdaContextSlot), R(context), U8(4),
+ /* 127 E> */ B(LdaContextSlot), R(context), U8(4), U8(0),
B(JumpIfNotHole), U8(11),
B(LdaConstant), U8(2),
B(Star), R(5),
B(CallRuntime), U16(Runtime::kThrowReferenceError), R(5), U8(1),
B(Ldar), R(4),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
B(PopContext), R(3),
- B(Jump), U8(-89),
+ B(JumpLoop), U8(-95), U8(0),
B(LdaUndefined),
/* 137 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["z"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
index bae9bd4da3..276c8daacb 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: yes
---
@@ -24,7 +22,7 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
- /* 56 S> */ B(AddSmi), U8(1), R(0), U8(1),
+ /* 56 S> */ B(AddSmi), U8(1), R(0), U8(2),
B(Star), R(0),
/* 69 S> */ B(Jump), U8(2),
/* 97 S> */ B(Ldar), R(0),
@@ -48,9 +46,9 @@ snippet: "
}
return sum;
"
-frame size: 5
+frame size: 4
parameter count: 1
-bytecode array length: 64
+bytecode array length: 69
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaZero),
@@ -58,33 +56,33 @@ bytecodes: [
/* 71 S> */ B(LdaZero),
B(Star), R(1),
/* 76 S> */ B(LdaSmi), U8(10),
- /* 76 E> */ B(TestLessThan), R(1),
- B(JumpIfFalse), U8(50),
+ /* 76 E> */ B(TestLessThan), R(1), U8(2),
+ B(JumpIfFalse), U8(54),
/* 58 E> */ B(StackCheck),
/* 106 S> */ B(LdaZero),
B(Star), R(2),
/* 111 S> */ B(LdaSmi), U8(3),
- /* 111 E> */ B(TestLessThan), R(2),
- B(JumpIfFalse), U8(32),
+ /* 111 E> */ B(TestLessThan), R(2), U8(4),
+ B(JumpIfFalse), U8(34),
/* 93 E> */ B(StackCheck),
/* 129 S> */ B(Ldar), R(0),
- B(Inc), U8(3),
+ B(Inc), U8(6),
B(Star), R(0),
/* 142 S> */ B(Ldar), R(2),
- /* 150 E> */ B(Add), R(1), U8(4),
- B(Star), R(4),
+ /* 150 E> */ B(Add), R(1), U8(7),
+ B(Star), R(3),
B(LdaSmi), U8(12),
- /* 152 E> */ B(TestEqual), R(4),
+ /* 152 E> */ B(TestEqual), R(3), U8(8),
B(JumpIfFalse), U8(4),
- /* 161 S> */ B(Jump), U8(18),
+ /* 161 S> */ B(Jump), U8(20),
/* 118 S> */ B(Ldar), R(2),
- B(Inc), U8(2),
+ B(Inc), U8(5),
B(Star), R(2),
- B(Jump), U8(-34),
+ B(JumpLoop), U8(-36), U8(1),
/* 84 S> */ B(Ldar), R(1),
- B(Inc), U8(1),
+ B(Inc), U8(3),
B(Star), R(1),
- B(Jump), U8(-52),
+ B(JumpLoop), U8(-56), U8(0),
/* 188 S> */ B(Ldar), R(0),
/* 200 S> */ B(Return),
]
@@ -103,18 +101,18 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 32
+bytecode array length: 34
bytecodes: [
/* 30 E> */ B(StackCheck),
B(Ldar), R(closure),
B(CreateBlockContext), U8(0),
B(PushContext), R(2),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
B(CreateClosure), U8(1), U8(2),
B(Star), R(0),
/* 53 S> */ B(LdaSmi), U8(10),
- /* 53 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 53 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
B(Mov), R(0), R(1),
B(Ldar), R(0),
/* 88 S> */ B(Jump), U8(2),
@@ -123,8 +121,8 @@ bytecodes: [
/* 103 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -144,60 +142,60 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 107
+bytecode array length: 116
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(2),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
- /* 42 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 42 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
B(Ldar), R(closure),
B(CreateBlockContext), U8(0),
B(PushContext), R(3),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
B(CreateClosure), U8(1), U8(2),
B(Star), R(0),
/* 76 S> */ B(LdaSmi), U8(2),
- /* 76 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 76 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
B(Mov), R(0), R(1),
- /* 118 S> */ B(LdaContextSlot), R(context), U8(4),
+ /* 118 S> */ B(LdaContextSlot), R(context), U8(4), U8(0),
B(JumpIfNotHole), U8(11),
B(LdaConstant), U8(2),
B(Star), R(4),
B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
B(JumpIfToBooleanFalse), U8(6),
/* 125 S> */ B(PopContext), R(3),
- B(Jump), U8(27),
+ B(Jump), U8(29),
/* 142 S> */ B(LdaSmi), U8(3),
B(Star), R(4),
- /* 144 E> */ B(LdaContextSlot), R(context), U8(4),
+ /* 144 E> */ B(LdaContextSlot), R(context), U8(4), U8(0),
B(JumpIfNotHole), U8(11),
B(LdaConstant), U8(2),
B(Star), R(5),
B(CallRuntime), U16(Runtime::kThrowReferenceError), R(5), U8(1),
B(Ldar), R(4),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
B(PopContext), R(3),
/* 155 S> */ B(LdaSmi), U8(4),
B(Star), R(4),
- /* 157 E> */ B(LdaContextSlot), R(context), U8(4),
+ /* 157 E> */ B(LdaContextSlot), R(context), U8(4), U8(0),
B(JumpIfNotHole), U8(11),
B(LdaConstant), U8(3),
B(Star), R(5),
B(CallRuntime), U16(Runtime::kThrowReferenceError), R(5), U8(1),
B(Ldar), R(4),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
B(LdaUndefined),
/* 162 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["y"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden
index 45fb07ad08..49e6f71265 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden
@@ -3,8 +3,6 @@
#
---
-pool type: string
-execute: yes
wrap: no
test function name: f
@@ -20,8 +18,8 @@ bytecode array length: 12
bytecodes: [
/* 27 E> */ B(StackCheck),
/* 32 S> */ B(LdrUndefined), R(1),
- B(LdrGlobal), U8(3), R(0),
- /* 39 E> */ B(Call), R(0), R(1), U8(1), U8(1),
+ B(LdrGlobal), U8(4), R(0),
+ /* 39 E> */ B(Call), R(0), R(1), U8(1), U8(2),
/* 44 S> */ B(Return),
]
constant pool: [
@@ -41,14 +39,14 @@ bytecode array length: 24
bytecodes: [
/* 34 E> */ B(StackCheck),
/* 39 S> */ B(LdrUndefined), R(1),
- B(LdrGlobal), U8(3), R(0),
+ B(LdrGlobal), U8(4), R(0),
B(LdaSmi), U8(1),
B(Star), R(2),
B(LdaSmi), U8(2),
B(Star), R(3),
B(LdaSmi), U8(3),
B(Star), R(4),
- /* 46 E> */ B(Call), R(0), R(1), U8(4), U8(1),
+ /* 46 E> */ B(Call), R(0), R(1), U8(4), U8(2),
/* 58 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden
index 9438503ae4..b238d95954 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: yes
---
@@ -13,22 +11,22 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 86
+bytecode array length: 89
bytecodes: [
B(CreateFunctionContext), U8(3),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
B(CreateMappedArguments),
- B(StaContextSlot), R(context), U8(5),
+ B(StaContextSlot), R(context), U8(6), U8(0),
B(Ldar), R(new_target),
- B(StaContextSlot), R(context), U8(6),
+ B(StaContextSlot), R(context), U8(5), U8(0),
/* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateClosure), U8(0), U8(2),
/* 36 E> */ B(StaLookupSlotSloppy), U8(1),
/* 52 S> */ B(LdaConstant), U8(2),
- B(Star), R(3),
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(3), U8(1), R(1),
+ B(Star), R(4),
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(4), U8(1), R(1),
B(LdaConstant), U8(3),
B(Star), R(3),
B(LdaZero),
@@ -46,14 +44,14 @@ bytecodes: [
/* 62 S> */ B(LdaConstant), U8(1),
B(Star), R(3),
B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(3), U8(1), R(1),
- /* 69 E> */ B(Call), R(1), R(2), U8(1), U8(3),
+ /* 69 E> */ B(Call), R(1), R(2), U8(1), U8(4),
/* 74 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["g"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["eval"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden
index 2ee9613b59..56f4f3ae59 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: no
test function name: f
@@ -16,12 +14,12 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 12
bytecodes: [
/* 45 E> */ B(StackCheck),
- /* 50 S> */ B(LdrGlobal), U8(3), R(0),
+ /* 50 S> */ B(LdrGlobal), U8(4), R(0),
B(Ldar), R(0),
- /* 57 E> */ B(New), R(0), R(0), U8(0),
+ /* 57 E> */ B(New), R(0), R(0), U8(0), U8(2),
/* 68 S> */ B(Return),
]
constant pool: [
@@ -37,14 +35,14 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 15
+bytecode array length: 16
bytecodes: [
/* 58 E> */ B(StackCheck),
- /* 63 S> */ B(LdrGlobal), U8(3), R(0),
+ /* 63 S> */ B(LdrGlobal), U8(4), R(0),
B(LdaSmi), U8(3),
B(Star), R(1),
B(Ldar), R(0),
- /* 70 E> */ B(New), R(0), R(1), U8(1),
+ /* 70 E> */ B(New), R(0), R(1), U8(1), U8(2),
/* 82 S> */ B(Return),
]
constant pool: [
@@ -65,10 +63,10 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 23
+bytecode array length: 24
bytecodes: [
/* 100 E> */ B(StackCheck),
- /* 105 S> */ B(LdrGlobal), U8(3), R(0),
+ /* 105 S> */ B(LdrGlobal), U8(4), R(0),
B(LdaSmi), U8(3),
B(Star), R(1),
B(LdaSmi), U8(4),
@@ -76,7 +74,7 @@ bytecodes: [
B(LdaSmi), U8(5),
B(Star), R(3),
B(Ldar), R(0),
- /* 112 E> */ B(New), R(0), R(1), U8(3),
+ /* 112 E> */ B(New), R(0), R(1), U8(3), U8(2),
/* 130 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden
index aa2a994507..3d4f5f7cc7 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: no
test function name: f
@@ -78,13 +76,13 @@ bytecode array length: 14
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 15 S> */ B(LdrUndefined), R(0),
- B(CreateArrayLiteral), U8(0), U8(0), U8(3),
+ B(CreateArrayLiteral), U8(0), U8(0), U8(9),
B(Star), R(1),
- B(CallJSRuntime), U8(134), R(0), U8(2),
+ B(CallJSRuntime), U8(141), R(0), U8(2),
/* 44 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
index 865a4c3000..8a381f803f 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: no
test function name: test
@@ -29,20 +27,20 @@ bytecodes: [
B(Mov), R(closure), R(0),
/* 99 E> */ B(StackCheck),
/* 104 S> */ B(LdaConstant), U8(0),
- /* 111 E> */ B(LdrKeyedProperty), R(closure), U8(3), R(4),
+ /* 111 E> */ B(LdrKeyedProperty), R(closure), U8(4), R(4),
B(LdaConstant), U8(1),
B(Star), R(5),
B(Mov), R(this), R(3),
B(CallRuntime), U16(Runtime::kLoadFromSuper), R(3), U8(3),
B(Star), R(1),
- /* 117 E> */ B(Call), R(1), R(this), U8(1), U8(1),
- B(Star), R(3),
- B(AddSmi), U8(1), R(3), U8(7),
+ /* 117 E> */ B(Call), R(1), R(this), U8(1), U8(2),
+ B(Star), R(1),
+ B(AddSmi), U8(1), R(1), U8(8),
/* 131 S> */ B(Return),
]
constant pool: [
- InstanceType::SYMBOL_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ SYMBOL_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["method"],
]
handlers: [
]
@@ -69,7 +67,7 @@ bytecodes: [
B(Mov), R(closure), R(0),
/* 125 E> */ B(StackCheck),
/* 130 S> */ B(LdaConstant), U8(0),
- /* 130 E> */ B(LdrKeyedProperty), R(closure), U8(1), R(2),
+ /* 130 E> */ B(LdrKeyedProperty), R(closure), U8(2), R(2),
B(LdaConstant), U8(1),
B(Star), R(3),
B(LdaSmi), U8(2),
@@ -77,7 +75,7 @@ bytecodes: [
B(Mov), R(this), R(1),
/* 138 E> */ B(CallRuntime), U16(Runtime::kStoreToSuper_Strict), R(1), U8(4),
/* 143 S> */ B(LdaConstant), U8(0),
- /* 150 E> */ B(LdrKeyedProperty), R(closure), U8(3), R(2),
+ /* 150 E> */ B(LdrKeyedProperty), R(closure), U8(4), R(2),
B(LdaConstant), U8(1),
B(Star), R(3),
B(Mov), R(this), R(1),
@@ -85,8 +83,8 @@ bytecodes: [
/* 159 S> */ B(Return),
]
constant pool: [
- InstanceType::SYMBOL_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ SYMBOL_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
@@ -106,17 +104,18 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 79
+bytecode array length: 82
bytecodes: [
B(Mov), R(closure), R(1),
B(Mov), R(new_target), R(0),
+ B(Ldar), R(new_target),
/* 113 E> */ B(StackCheck),
- /* 118 S> */ B(CallRuntime), U16(Runtime::k_GetSuperConstructor), R(closure), U8(1),
+ /* 118 S> */ B(CallRuntime), U16(Runtime::k_GetSuperConstructor), R(1), U8(1),
B(Star), R(2),
B(LdaSmi), U8(1),
B(Star), R(3),
- B(Ldar), R(new_target),
- /* 118 E> */ B(New), R(2), R(3), U8(1),
+ B(Ldar), R(0),
+ /* 118 E> */ B(New), R(2), R(3), U8(1), U8(0),
B(Star), R(2),
B(Ldar), R(this),
B(JumpIfNotHole), U8(4),
@@ -132,7 +131,7 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
B(Star), R(2),
B(LdaSmi), U8(2),
- /* 136 E> */ B(StaNamedPropertyStrict), R(2), U8(1), U8(4),
+ /* 136 E> */ B(StaNamedPropertyStrict), R(2), U8(1), U8(5),
B(Ldar), R(this),
B(JumpIfNotHole), U8(11),
B(LdaConstant), U8(0),
@@ -141,8 +140,8 @@ bytecodes: [
/* 141 S> */ B(Return),
]
constant pool: [
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["this"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["y_"],
]
handlers: [
]
@@ -162,15 +161,16 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 75
+bytecode array length: 78
bytecodes: [
B(Mov), R(closure), R(1),
B(Mov), R(new_target), R(0),
+ B(Ldar), R(new_target),
/* 112 E> */ B(StackCheck),
- /* 117 S> */ B(CallRuntime), U16(Runtime::k_GetSuperConstructor), R(closure), U8(1),
+ /* 117 S> */ B(CallRuntime), U16(Runtime::k_GetSuperConstructor), R(1), U8(1),
B(Star), R(2),
- B(Ldar), R(new_target),
- /* 117 E> */ B(New), R(2), R(0), U8(0),
+ B(Ldar), R(0),
+ /* 117 E> */ B(New), R(2), R(0), U8(0), U8(0),
B(Star), R(2),
B(Ldar), R(this),
B(JumpIfNotHole), U8(4),
@@ -186,7 +186,7 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
B(Star), R(2),
B(LdaSmi), U8(2),
- /* 134 E> */ B(StaNamedPropertyStrict), R(2), U8(1), U8(4),
+ /* 134 E> */ B(StaNamedPropertyStrict), R(2), U8(1), U8(5),
B(Ldar), R(this),
B(JumpIfNotHole), U8(11),
B(LdaConstant), U8(0),
@@ -195,8 +195,8 @@ bytecodes: [
/* 139 S> */ B(Return),
]
constant pool: [
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["this"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["y_"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
index d7ebabc8e4..f1a15639a8 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: yes
---
@@ -33,7 +31,7 @@ bytecodes: [
B(Star), R(6),
B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
B(Star), R(3),
- B(LdrNamedProperty), R(3), U8(1), U8(1), R(4),
+ B(LdrNamedProperty), R(3), U8(1), U8(2), R(4),
B(LdaConstant), U8(2),
B(ToName), R(6),
B(CreateClosure), U8(3), U8(2),
@@ -52,10 +50,10 @@ bytecodes: [
/* 149 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["prototype"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["speak"],
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -86,7 +84,7 @@ bytecodes: [
B(Star), R(6),
B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
B(Star), R(3),
- B(LdrNamedProperty), R(3), U8(1), U8(1), R(4),
+ B(LdrNamedProperty), R(3), U8(1), U8(2), R(4),
B(LdaConstant), U8(2),
B(ToName), R(6),
B(CreateClosure), U8(3), U8(2),
@@ -105,10 +103,10 @@ bytecodes: [
/* 149 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["prototype"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["speak"],
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -124,7 +122,7 @@ snippet: "
"
frame size: 11
parameter count: 1
-bytecode array length: 123
+bytecode array length: 128
bytecodes: [
B(CreateFunctionContext), U8(2),
B(PushContext), R(3),
@@ -132,9 +130,9 @@ bytecodes: [
B(Star), R(2),
/* 30 E> */ B(StackCheck),
/* 43 S> */ B(LdaConstant), U8(0),
- /* 43 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 43 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
/* 57 S> */ B(LdaConstant), U8(1),
- /* 57 E> */ B(StaContextSlot), R(context), U8(5),
+ /* 57 E> */ B(StaContextSlot), R(context), U8(5), U8(0),
B(LdaTheHole),
B(Star), R(0),
/* 62 S> */ B(LdaTheHole),
@@ -147,8 +145,8 @@ bytecodes: [
B(Star), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
B(Star), R(4),
- B(LdrNamedProperty), R(4), U8(3), U8(1), R(5),
- /* 75 E> */ B(LdaContextSlot), R(context), U8(4),
+ B(LdrNamedProperty), R(4), U8(3), U8(2), R(5),
+ /* 75 E> */ B(LdaContextSlot), R(context), U8(4), U8(0),
B(ToName), R(7),
B(CreateClosure), U8(4), U8(2),
B(Star), R(8),
@@ -158,10 +156,10 @@ bytecodes: [
B(Star), R(10),
B(Mov), R(5), R(6),
B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(6), U8(5),
- /* 106 E> */ B(LdaContextSlot), R(context), U8(5),
+ /* 106 E> */ B(LdaContextSlot), R(context), U8(5), U8(0),
B(ToName), R(7),
B(LdaConstant), U8(3),
- B(TestEqualStrict), R(7),
+ B(TestEqualStrict), R(7), U8(0),
B(Mov), R(4), R(6),
B(JumpIfToBooleanFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
@@ -178,12 +176,12 @@ bytecodes: [
/* 129 S> */ B(Return),
]
constant pool: [
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["b"],
+ SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["prototype"],
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -196,7 +194,7 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 72
+bytecode array length: 74
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(3),
@@ -204,7 +202,7 @@ bytecodes: [
B(Star), R(2),
/* 30 E> */ B(StackCheck),
/* 46 S> */ B(LdaZero),
- /* 46 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 46 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
B(LdaTheHole),
B(Star), R(0),
/* 49 S> */ B(LdaTheHole),
@@ -217,23 +215,23 @@ bytecodes: [
B(Star), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
B(Star), R(4),
- B(LdrNamedProperty), R(4), U8(1), U8(1), R(5),
+ B(LdrNamedProperty), R(4), U8(1), U8(2), R(5),
B(CallRuntime), U16(Runtime::kToFastProperties), R(4), U8(1),
B(Star), R(0),
B(Star), R(1),
B(Star), R(2),
/* 87 S> */ B(JumpIfNotHole), U8(11),
B(LdaConstant), U8(2),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(5), U8(1),
B(Star), R(4),
- /* 94 E> */ B(New), R(4), R(0), U8(0),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
+ B(Star), R(4),
+ /* 94 E> */ B(New), R(4), R(0), U8(0), U8(4),
/* 103 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["prototype"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["C"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
index 873857a613..053bce6e0f 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: yes
---
@@ -18,7 +16,7 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
- /* 45 S> */ B(AddSmi), U8(2), R(0), U8(1),
+ /* 45 S> */ B(AddSmi), U8(2), R(0), U8(2),
B(Mov), R(0), R(1),
B(Star), R(0),
B(LdaUndefined),
@@ -41,7 +39,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
/* 45 S> */ B(LdaSmi), U8(2),
- B(Div), R(0), U8(1),
+ B(Div), R(0), U8(2),
B(Mov), R(0), R(1),
B(Star), R(0),
B(LdaUndefined),
@@ -63,16 +61,16 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
B(Mov), R(1), R(0),
- /* 54 S> */ B(LdrNamedProperty), R(0), U8(1), U8(1), R(2),
+ /* 54 S> */ B(LdrNamedProperty), R(0), U8(1), U8(2), R(2),
B(LdaSmi), U8(2),
- B(Mul), R(2), U8(3),
- /* 61 E> */ B(StaNamedPropertySloppy), R(0), U8(1), U8(4),
+ B(Mul), R(2), U8(4),
+ /* 61 E> */ B(StaNamedPropertySloppy), R(0), U8(1), U8(5),
B(LdaUndefined),
/* 67 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["name"],
]
handlers: [
]
@@ -90,15 +88,15 @@ bytecodes: [
B(Mov), R(1), R(0),
/* 52 S> */ B(LdaSmi), U8(1),
B(Star), R(2),
- B(LdrKeyedProperty), R(0), U8(1), R(3),
+ B(LdrKeyedProperty), R(0), U8(2), R(3),
B(LdaSmi), U8(2),
- B(BitwiseXor), R(3), U8(3),
- /* 57 E> */ B(StaKeyedPropertySloppy), R(0), R(2), U8(4),
+ B(BitwiseXor), R(3), U8(4),
+ /* 57 E> */ B(StaKeyedPropertySloppy), R(0), R(2), U8(5),
B(LdaUndefined),
/* 63 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
]
handlers: [
]
@@ -109,22 +107,22 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 26
+bytecode array length: 29
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(0),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
- /* 42 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 42 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
/* 45 S> */ B(CreateClosure), U8(0), U8(2),
- /* 75 S> */ B(LdrContextSlot), R(context), U8(4), R(1),
- B(BitwiseOrSmi), U8(24), R(1), U8(1),
- /* 77 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 75 S> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(1),
+ B(BitwiseOrSmi), U8(24), R(1), U8(2),
+ /* 77 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
B(LdaUndefined),
/* 84 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden
index ddaf989ca3..049de5a8b3 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden
@@ -3,8 +3,6 @@
#
---
-pool type: number
-execute: yes
wrap: yes
---
@@ -47,13 +45,13 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 17
+bytecode array length: 18
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaZero),
B(Star), R(0),
B(LdaSmi), U8(1),
- /* 43 E> */ B(TestLessThan), R(0),
+ /* 43 E> */ B(TestLessThan), R(0), U8(2),
B(JumpIfFalse), U8(6),
B(LdaSmi), U8(2),
B(Jump), U8(4),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden
index f2120cf876..107844cf6a 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden
@@ -3,8 +3,6 @@
#
---
-pool type: string
-execute: yes
wrap: yes
---
@@ -48,7 +46,7 @@ bytecodes: [
/* 58 S> */ B(Return),
]
constant pool: [
- "x",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
@@ -77,7 +75,7 @@ bytecodes: [
/* 55 S> */ B(Return),
]
constant pool: [
- "x",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
@@ -107,7 +105,7 @@ bytecodes: [
/* 56 S> */ B(Return),
]
constant pool: [
- "x",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden
index 8bc1afcf37..f1b696bdff 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: yes
---
@@ -13,22 +11,22 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 21
+bytecode array length: 23
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(1),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
B(CreateClosure), U8(0), U8(2),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), U8(10),
- /* 44 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 44 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
B(LdaUndefined),
/* 74 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -39,18 +37,18 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 34
+bytecode array length: 37
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(1),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
B(CreateClosure), U8(0), U8(2),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), U8(10),
- /* 44 E> */ B(StaContextSlot), R(context), U8(4),
- /* 74 S> */ B(LdaContextSlot), R(context), U8(4),
+ /* 44 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 74 S> */ B(LdaContextSlot), R(context), U8(4), U8(0),
B(JumpIfNotHole), U8(11),
B(LdaConstant), U8(1),
B(Star), R(2),
@@ -58,8 +56,8 @@ bytecodes: [
/* 84 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
@@ -70,30 +68,30 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 42
+bytecode array length: 45
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(1),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
B(CreateClosure), U8(0), U8(2),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 47 S> */ B(LdaSmi), U8(20),
B(Star), R(2),
- /* 47 E> */ B(LdaContextSlot), R(context), U8(4),
+ /* 47 E> */ B(LdaContextSlot), R(context), U8(4), U8(0),
B(JumpIfNotHole), U8(11),
B(LdaConstant), U8(1),
B(Star), R(3),
B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
- /* 47 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 47 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
B(LdaUndefined),
/* 80 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
@@ -104,20 +102,20 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 44
+bytecode array length: 47
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(1),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
B(CreateClosure), U8(0), U8(2),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), U8(10),
- /* 44 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 44 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
/* 48 S> */ B(LdaSmi), U8(20),
B(Star), R(2),
- /* 50 E> */ B(LdaContextSlot), R(context), U8(4),
+ /* 50 E> */ B(LdaContextSlot), R(context), U8(4), U8(0),
B(JumpIfNotHole), U8(11),
B(LdaConstant), U8(1),
B(Star), R(3),
@@ -127,8 +125,8 @@ bytecodes: [
/* 82 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden
index f07e5ce4d7..4e65f63fa2 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: no
test function name: f
@@ -15,18 +13,18 @@ snippet: "
"
frame size: 1
parameter count: 2
-bytecode array length: 14
+bytecode array length: 15
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(0),
B(Ldar), R(arg0),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
/* 10 E> */ B(StackCheck),
/* 19 S> */ B(CreateClosure), U8(0), U8(2),
/* 52 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -38,20 +36,20 @@ snippet: "
"
frame size: 2
parameter count: 2
-bytecode array length: 19
+bytecode array length: 21
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(1),
B(Ldar), R(arg0),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
/* 10 E> */ B(StackCheck),
/* 27 S> */ B(CreateClosure), U8(0), U8(2),
B(Star), R(0),
- /* 53 S> */ B(LdaContextSlot), R(context), U8(4),
+ /* 53 S> */ B(LdaContextSlot), R(context), U8(4), U8(0),
/* 66 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -63,20 +61,20 @@ snippet: "
"
frame size: 1
parameter count: 5
-bytecode array length: 19
+bytecode array length: 21
bytecodes: [
B(CreateFunctionContext), U8(2),
B(PushContext), R(0),
B(Ldar), R(arg0),
- B(StaContextSlot), R(context), U8(5),
+ B(StaContextSlot), R(context), U8(5), U8(0),
B(Ldar), R(arg2),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
/* 10 E> */ B(StackCheck),
/* 29 S> */ B(CreateClosure), U8(0), U8(2),
/* 61 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -88,18 +86,18 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 14
+bytecode array length: 15
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(0),
/* 10 E> */ B(StackCheck),
/* 26 S> */ B(Ldar), R(this),
- /* 26 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 26 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
/* 32 S> */ B(CreateClosure), U8(0), U8(2),
/* 65 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
index b3226e0d64..2eb52731bb 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: yes
---
@@ -22,7 +20,7 @@ bytecodes: [
/* 71 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -33,18 +31,18 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 14
+bytecode array length: 15
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(0),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
- /* 42 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 42 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
/* 45 S> */ B(CreateClosure), U8(0), U8(2),
/* 75 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -55,20 +53,20 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 19
+bytecode array length: 21
bytecodes: [
B(CreateFunctionContext), U8(2),
B(PushContext), R(0),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
- /* 42 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 42 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
/* 53 S> */ B(LdaSmi), U8(2),
- /* 53 E> */ B(StaContextSlot), R(context), U8(5),
+ /* 53 E> */ B(StaContextSlot), R(context), U8(5), U8(0),
/* 56 S> */ B(CreateClosure), U8(0), U8(2),
/* 92 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -79,7 +77,7 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 21
+bytecode array length: 22
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(0),
@@ -87,12 +85,12 @@ bytecodes: [
/* 41 S> */ B(LdrUndefined), R(2),
B(CreateClosure), U8(0), U8(2),
B(Star), R(1),
- /* 64 E> */ B(Call), R(1), R(2), U8(1), U8(1),
- /* 68 S> */ B(LdaContextSlot), R(context), U8(4),
+ /* 64 E> */ B(Call), R(1), R(2), U8(1), U8(2),
+ /* 68 S> */ B(LdaContextSlot), R(context), U8(4), U8(0),
/* 78 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -105,29 +103,29 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 35
+bytecode array length: 39
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(0),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
/* 30 E> */ B(StackCheck),
/* 56 S> */ B(LdaSmi), U8(1),
- /* 56 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 56 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
B(Ldar), R(closure),
B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
/* 69 S> */ B(LdaSmi), U8(2),
- /* 69 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 69 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
/* 72 S> */ B(CreateClosure), U8(1), U8(2),
B(PopContext), R(0),
/* 104 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -384,528 +382,531 @@ snippet: "
var a246 = 0;
var a247 = 0;
var a248 = 0;
+ var a249 = 0;
eval();
var b = 100;
return b
"
frame size: 3
parameter count: 1
-bytecode array length: 1040
+bytecode array length: 1305
bytecodes: [
- B(CreateFunctionContext), U8(253),
+ B(CreateFunctionContext), U8(254),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
B(CreateUnmappedArguments),
- B(StaContextSlot), R(context), U8(5),
+ B(Wide), B(StaContextSlot), R16(context), U16(257), U16(0),
B(Ldar), R(new_target),
- B(StaContextSlot), R(context), U8(6),
+ B(StaContextSlot), R(context), U8(5), U8(0),
/* 30 E> */ B(StackCheck),
/* 57 S> */ B(LdaZero),
- /* 57 E> */ B(StaContextSlot), R(context), U8(7),
+ /* 57 E> */ B(StaContextSlot), R(context), U8(6), U8(0),
/* 69 S> */ B(LdaZero),
- /* 69 E> */ B(StaContextSlot), R(context), U8(8),
+ /* 69 E> */ B(StaContextSlot), R(context), U8(7), U8(0),
/* 81 S> */ B(LdaZero),
- /* 81 E> */ B(StaContextSlot), R(context), U8(9),
+ /* 81 E> */ B(StaContextSlot), R(context), U8(8), U8(0),
/* 93 S> */ B(LdaZero),
- /* 93 E> */ B(StaContextSlot), R(context), U8(10),
+ /* 93 E> */ B(StaContextSlot), R(context), U8(9), U8(0),
/* 105 S> */ B(LdaZero),
- /* 105 E> */ B(StaContextSlot), R(context), U8(11),
+ /* 105 E> */ B(StaContextSlot), R(context), U8(10), U8(0),
/* 117 S> */ B(LdaZero),
- /* 117 E> */ B(StaContextSlot), R(context), U8(12),
+ /* 117 E> */ B(StaContextSlot), R(context), U8(11), U8(0),
/* 129 S> */ B(LdaZero),
- /* 129 E> */ B(StaContextSlot), R(context), U8(13),
+ /* 129 E> */ B(StaContextSlot), R(context), U8(12), U8(0),
/* 141 S> */ B(LdaZero),
- /* 141 E> */ B(StaContextSlot), R(context), U8(14),
+ /* 141 E> */ B(StaContextSlot), R(context), U8(13), U8(0),
/* 153 S> */ B(LdaZero),
- /* 153 E> */ B(StaContextSlot), R(context), U8(15),
+ /* 153 E> */ B(StaContextSlot), R(context), U8(14), U8(0),
/* 165 S> */ B(LdaZero),
- /* 165 E> */ B(StaContextSlot), R(context), U8(16),
+ /* 165 E> */ B(StaContextSlot), R(context), U8(15), U8(0),
/* 178 S> */ B(LdaZero),
- /* 178 E> */ B(StaContextSlot), R(context), U8(17),
+ /* 178 E> */ B(StaContextSlot), R(context), U8(16), U8(0),
/* 191 S> */ B(LdaZero),
- /* 191 E> */ B(StaContextSlot), R(context), U8(18),
+ /* 191 E> */ B(StaContextSlot), R(context), U8(17), U8(0),
/* 204 S> */ B(LdaZero),
- /* 204 E> */ B(StaContextSlot), R(context), U8(19),
+ /* 204 E> */ B(StaContextSlot), R(context), U8(18), U8(0),
/* 217 S> */ B(LdaZero),
- /* 217 E> */ B(StaContextSlot), R(context), U8(20),
+ /* 217 E> */ B(StaContextSlot), R(context), U8(19), U8(0),
/* 230 S> */ B(LdaZero),
- /* 230 E> */ B(StaContextSlot), R(context), U8(21),
+ /* 230 E> */ B(StaContextSlot), R(context), U8(20), U8(0),
/* 243 S> */ B(LdaZero),
- /* 243 E> */ B(StaContextSlot), R(context), U8(22),
+ /* 243 E> */ B(StaContextSlot), R(context), U8(21), U8(0),
/* 256 S> */ B(LdaZero),
- /* 256 E> */ B(StaContextSlot), R(context), U8(23),
+ /* 256 E> */ B(StaContextSlot), R(context), U8(22), U8(0),
/* 269 S> */ B(LdaZero),
- /* 269 E> */ B(StaContextSlot), R(context), U8(24),
+ /* 269 E> */ B(StaContextSlot), R(context), U8(23), U8(0),
/* 282 S> */ B(LdaZero),
- /* 282 E> */ B(StaContextSlot), R(context), U8(25),
+ /* 282 E> */ B(StaContextSlot), R(context), U8(24), U8(0),
/* 295 S> */ B(LdaZero),
- /* 295 E> */ B(StaContextSlot), R(context), U8(26),
+ /* 295 E> */ B(StaContextSlot), R(context), U8(25), U8(0),
/* 308 S> */ B(LdaZero),
- /* 308 E> */ B(StaContextSlot), R(context), U8(27),
+ /* 308 E> */ B(StaContextSlot), R(context), U8(26), U8(0),
/* 321 S> */ B(LdaZero),
- /* 321 E> */ B(StaContextSlot), R(context), U8(28),
+ /* 321 E> */ B(StaContextSlot), R(context), U8(27), U8(0),
/* 334 S> */ B(LdaZero),
- /* 334 E> */ B(StaContextSlot), R(context), U8(29),
+ /* 334 E> */ B(StaContextSlot), R(context), U8(28), U8(0),
/* 347 S> */ B(LdaZero),
- /* 347 E> */ B(StaContextSlot), R(context), U8(30),
+ /* 347 E> */ B(StaContextSlot), R(context), U8(29), U8(0),
/* 360 S> */ B(LdaZero),
- /* 360 E> */ B(StaContextSlot), R(context), U8(31),
+ /* 360 E> */ B(StaContextSlot), R(context), U8(30), U8(0),
/* 373 S> */ B(LdaZero),
- /* 373 E> */ B(StaContextSlot), R(context), U8(32),
+ /* 373 E> */ B(StaContextSlot), R(context), U8(31), U8(0),
/* 386 S> */ B(LdaZero),
- /* 386 E> */ B(StaContextSlot), R(context), U8(33),
+ /* 386 E> */ B(StaContextSlot), R(context), U8(32), U8(0),
/* 399 S> */ B(LdaZero),
- /* 399 E> */ B(StaContextSlot), R(context), U8(34),
+ /* 399 E> */ B(StaContextSlot), R(context), U8(33), U8(0),
/* 412 S> */ B(LdaZero),
- /* 412 E> */ B(StaContextSlot), R(context), U8(35),
+ /* 412 E> */ B(StaContextSlot), R(context), U8(34), U8(0),
/* 425 S> */ B(LdaZero),
- /* 425 E> */ B(StaContextSlot), R(context), U8(36),
+ /* 425 E> */ B(StaContextSlot), R(context), U8(35), U8(0),
/* 438 S> */ B(LdaZero),
- /* 438 E> */ B(StaContextSlot), R(context), U8(37),
+ /* 438 E> */ B(StaContextSlot), R(context), U8(36), U8(0),
/* 451 S> */ B(LdaZero),
- /* 451 E> */ B(StaContextSlot), R(context), U8(38),
+ /* 451 E> */ B(StaContextSlot), R(context), U8(37), U8(0),
/* 464 S> */ B(LdaZero),
- /* 464 E> */ B(StaContextSlot), R(context), U8(39),
+ /* 464 E> */ B(StaContextSlot), R(context), U8(38), U8(0),
/* 477 S> */ B(LdaZero),
- /* 477 E> */ B(StaContextSlot), R(context), U8(40),
+ /* 477 E> */ B(StaContextSlot), R(context), U8(39), U8(0),
/* 490 S> */ B(LdaZero),
- /* 490 E> */ B(StaContextSlot), R(context), U8(41),
+ /* 490 E> */ B(StaContextSlot), R(context), U8(40), U8(0),
/* 503 S> */ B(LdaZero),
- /* 503 E> */ B(StaContextSlot), R(context), U8(42),
+ /* 503 E> */ B(StaContextSlot), R(context), U8(41), U8(0),
/* 516 S> */ B(LdaZero),
- /* 516 E> */ B(StaContextSlot), R(context), U8(43),
+ /* 516 E> */ B(StaContextSlot), R(context), U8(42), U8(0),
/* 529 S> */ B(LdaZero),
- /* 529 E> */ B(StaContextSlot), R(context), U8(44),
+ /* 529 E> */ B(StaContextSlot), R(context), U8(43), U8(0),
/* 542 S> */ B(LdaZero),
- /* 542 E> */ B(StaContextSlot), R(context), U8(45),
+ /* 542 E> */ B(StaContextSlot), R(context), U8(44), U8(0),
/* 555 S> */ B(LdaZero),
- /* 555 E> */ B(StaContextSlot), R(context), U8(46),
+ /* 555 E> */ B(StaContextSlot), R(context), U8(45), U8(0),
/* 568 S> */ B(LdaZero),
- /* 568 E> */ B(StaContextSlot), R(context), U8(47),
+ /* 568 E> */ B(StaContextSlot), R(context), U8(46), U8(0),
/* 581 S> */ B(LdaZero),
- /* 581 E> */ B(StaContextSlot), R(context), U8(48),
+ /* 581 E> */ B(StaContextSlot), R(context), U8(47), U8(0),
/* 594 S> */ B(LdaZero),
- /* 594 E> */ B(StaContextSlot), R(context), U8(49),
+ /* 594 E> */ B(StaContextSlot), R(context), U8(48), U8(0),
/* 607 S> */ B(LdaZero),
- /* 607 E> */ B(StaContextSlot), R(context), U8(50),
+ /* 607 E> */ B(StaContextSlot), R(context), U8(49), U8(0),
/* 620 S> */ B(LdaZero),
- /* 620 E> */ B(StaContextSlot), R(context), U8(51),
+ /* 620 E> */ B(StaContextSlot), R(context), U8(50), U8(0),
/* 633 S> */ B(LdaZero),
- /* 633 E> */ B(StaContextSlot), R(context), U8(52),
+ /* 633 E> */ B(StaContextSlot), R(context), U8(51), U8(0),
/* 646 S> */ B(LdaZero),
- /* 646 E> */ B(StaContextSlot), R(context), U8(53),
+ /* 646 E> */ B(StaContextSlot), R(context), U8(52), U8(0),
/* 659 S> */ B(LdaZero),
- /* 659 E> */ B(StaContextSlot), R(context), U8(54),
+ /* 659 E> */ B(StaContextSlot), R(context), U8(53), U8(0),
/* 672 S> */ B(LdaZero),
- /* 672 E> */ B(StaContextSlot), R(context), U8(55),
+ /* 672 E> */ B(StaContextSlot), R(context), U8(54), U8(0),
/* 685 S> */ B(LdaZero),
- /* 685 E> */ B(StaContextSlot), R(context), U8(56),
+ /* 685 E> */ B(StaContextSlot), R(context), U8(55), U8(0),
/* 698 S> */ B(LdaZero),
- /* 698 E> */ B(StaContextSlot), R(context), U8(57),
+ /* 698 E> */ B(StaContextSlot), R(context), U8(56), U8(0),
/* 711 S> */ B(LdaZero),
- /* 711 E> */ B(StaContextSlot), R(context), U8(58),
+ /* 711 E> */ B(StaContextSlot), R(context), U8(57), U8(0),
/* 724 S> */ B(LdaZero),
- /* 724 E> */ B(StaContextSlot), R(context), U8(59),
+ /* 724 E> */ B(StaContextSlot), R(context), U8(58), U8(0),
/* 737 S> */ B(LdaZero),
- /* 737 E> */ B(StaContextSlot), R(context), U8(60),
+ /* 737 E> */ B(StaContextSlot), R(context), U8(59), U8(0),
/* 750 S> */ B(LdaZero),
- /* 750 E> */ B(StaContextSlot), R(context), U8(61),
+ /* 750 E> */ B(StaContextSlot), R(context), U8(60), U8(0),
/* 763 S> */ B(LdaZero),
- /* 763 E> */ B(StaContextSlot), R(context), U8(62),
+ /* 763 E> */ B(StaContextSlot), R(context), U8(61), U8(0),
/* 776 S> */ B(LdaZero),
- /* 776 E> */ B(StaContextSlot), R(context), U8(63),
+ /* 776 E> */ B(StaContextSlot), R(context), U8(62), U8(0),
/* 789 S> */ B(LdaZero),
- /* 789 E> */ B(StaContextSlot), R(context), U8(64),
+ /* 789 E> */ B(StaContextSlot), R(context), U8(63), U8(0),
/* 802 S> */ B(LdaZero),
- /* 802 E> */ B(StaContextSlot), R(context), U8(65),
+ /* 802 E> */ B(StaContextSlot), R(context), U8(64), U8(0),
/* 815 S> */ B(LdaZero),
- /* 815 E> */ B(StaContextSlot), R(context), U8(66),
+ /* 815 E> */ B(StaContextSlot), R(context), U8(65), U8(0),
/* 828 S> */ B(LdaZero),
- /* 828 E> */ B(StaContextSlot), R(context), U8(67),
+ /* 828 E> */ B(StaContextSlot), R(context), U8(66), U8(0),
/* 841 S> */ B(LdaZero),
- /* 841 E> */ B(StaContextSlot), R(context), U8(68),
+ /* 841 E> */ B(StaContextSlot), R(context), U8(67), U8(0),
/* 854 S> */ B(LdaZero),
- /* 854 E> */ B(StaContextSlot), R(context), U8(69),
+ /* 854 E> */ B(StaContextSlot), R(context), U8(68), U8(0),
/* 867 S> */ B(LdaZero),
- /* 867 E> */ B(StaContextSlot), R(context), U8(70),
+ /* 867 E> */ B(StaContextSlot), R(context), U8(69), U8(0),
/* 880 S> */ B(LdaZero),
- /* 880 E> */ B(StaContextSlot), R(context), U8(71),
+ /* 880 E> */ B(StaContextSlot), R(context), U8(70), U8(0),
/* 893 S> */ B(LdaZero),
- /* 893 E> */ B(StaContextSlot), R(context), U8(72),
+ /* 893 E> */ B(StaContextSlot), R(context), U8(71), U8(0),
/* 906 S> */ B(LdaZero),
- /* 906 E> */ B(StaContextSlot), R(context), U8(73),
+ /* 906 E> */ B(StaContextSlot), R(context), U8(72), U8(0),
/* 919 S> */ B(LdaZero),
- /* 919 E> */ B(StaContextSlot), R(context), U8(74),
+ /* 919 E> */ B(StaContextSlot), R(context), U8(73), U8(0),
/* 932 S> */ B(LdaZero),
- /* 932 E> */ B(StaContextSlot), R(context), U8(75),
+ /* 932 E> */ B(StaContextSlot), R(context), U8(74), U8(0),
/* 945 S> */ B(LdaZero),
- /* 945 E> */ B(StaContextSlot), R(context), U8(76),
+ /* 945 E> */ B(StaContextSlot), R(context), U8(75), U8(0),
/* 958 S> */ B(LdaZero),
- /* 958 E> */ B(StaContextSlot), R(context), U8(77),
+ /* 958 E> */ B(StaContextSlot), R(context), U8(76), U8(0),
/* 971 S> */ B(LdaZero),
- /* 971 E> */ B(StaContextSlot), R(context), U8(78),
+ /* 971 E> */ B(StaContextSlot), R(context), U8(77), U8(0),
/* 984 S> */ B(LdaZero),
- /* 984 E> */ B(StaContextSlot), R(context), U8(79),
+ /* 984 E> */ B(StaContextSlot), R(context), U8(78), U8(0),
/* 997 S> */ B(LdaZero),
- /* 997 E> */ B(StaContextSlot), R(context), U8(80),
+ /* 997 E> */ B(StaContextSlot), R(context), U8(79), U8(0),
/* 1010 S> */ B(LdaZero),
- /* 1010 E> */ B(StaContextSlot), R(context), U8(81),
+ /* 1010 E> */ B(StaContextSlot), R(context), U8(80), U8(0),
/* 1023 S> */ B(LdaZero),
- /* 1023 E> */ B(StaContextSlot), R(context), U8(82),
+ /* 1023 E> */ B(StaContextSlot), R(context), U8(81), U8(0),
/* 1036 S> */ B(LdaZero),
- /* 1036 E> */ B(StaContextSlot), R(context), U8(83),
+ /* 1036 E> */ B(StaContextSlot), R(context), U8(82), U8(0),
/* 1049 S> */ B(LdaZero),
- /* 1049 E> */ B(StaContextSlot), R(context), U8(84),
+ /* 1049 E> */ B(StaContextSlot), R(context), U8(83), U8(0),
/* 1062 S> */ B(LdaZero),
- /* 1062 E> */ B(StaContextSlot), R(context), U8(85),
+ /* 1062 E> */ B(StaContextSlot), R(context), U8(84), U8(0),
/* 1075 S> */ B(LdaZero),
- /* 1075 E> */ B(StaContextSlot), R(context), U8(86),
+ /* 1075 E> */ B(StaContextSlot), R(context), U8(85), U8(0),
/* 1088 S> */ B(LdaZero),
- /* 1088 E> */ B(StaContextSlot), R(context), U8(87),
+ /* 1088 E> */ B(StaContextSlot), R(context), U8(86), U8(0),
/* 1101 S> */ B(LdaZero),
- /* 1101 E> */ B(StaContextSlot), R(context), U8(88),
+ /* 1101 E> */ B(StaContextSlot), R(context), U8(87), U8(0),
/* 1114 S> */ B(LdaZero),
- /* 1114 E> */ B(StaContextSlot), R(context), U8(89),
+ /* 1114 E> */ B(StaContextSlot), R(context), U8(88), U8(0),
/* 1127 S> */ B(LdaZero),
- /* 1127 E> */ B(StaContextSlot), R(context), U8(90),
+ /* 1127 E> */ B(StaContextSlot), R(context), U8(89), U8(0),
/* 1140 S> */ B(LdaZero),
- /* 1140 E> */ B(StaContextSlot), R(context), U8(91),
+ /* 1140 E> */ B(StaContextSlot), R(context), U8(90), U8(0),
/* 1153 S> */ B(LdaZero),
- /* 1153 E> */ B(StaContextSlot), R(context), U8(92),
+ /* 1153 E> */ B(StaContextSlot), R(context), U8(91), U8(0),
/* 1166 S> */ B(LdaZero),
- /* 1166 E> */ B(StaContextSlot), R(context), U8(93),
+ /* 1166 E> */ B(StaContextSlot), R(context), U8(92), U8(0),
/* 1179 S> */ B(LdaZero),
- /* 1179 E> */ B(StaContextSlot), R(context), U8(94),
+ /* 1179 E> */ B(StaContextSlot), R(context), U8(93), U8(0),
/* 1192 S> */ B(LdaZero),
- /* 1192 E> */ B(StaContextSlot), R(context), U8(95),
+ /* 1192 E> */ B(StaContextSlot), R(context), U8(94), U8(0),
/* 1205 S> */ B(LdaZero),
- /* 1205 E> */ B(StaContextSlot), R(context), U8(96),
+ /* 1205 E> */ B(StaContextSlot), R(context), U8(95), U8(0),
/* 1218 S> */ B(LdaZero),
- /* 1218 E> */ B(StaContextSlot), R(context), U8(97),
+ /* 1218 E> */ B(StaContextSlot), R(context), U8(96), U8(0),
/* 1231 S> */ B(LdaZero),
- /* 1231 E> */ B(StaContextSlot), R(context), U8(98),
+ /* 1231 E> */ B(StaContextSlot), R(context), U8(97), U8(0),
/* 1244 S> */ B(LdaZero),
- /* 1244 E> */ B(StaContextSlot), R(context), U8(99),
+ /* 1244 E> */ B(StaContextSlot), R(context), U8(98), U8(0),
/* 1257 S> */ B(LdaZero),
- /* 1257 E> */ B(StaContextSlot), R(context), U8(100),
+ /* 1257 E> */ B(StaContextSlot), R(context), U8(99), U8(0),
/* 1270 S> */ B(LdaZero),
- /* 1270 E> */ B(StaContextSlot), R(context), U8(101),
+ /* 1270 E> */ B(StaContextSlot), R(context), U8(100), U8(0),
/* 1283 S> */ B(LdaZero),
- /* 1283 E> */ B(StaContextSlot), R(context), U8(102),
+ /* 1283 E> */ B(StaContextSlot), R(context), U8(101), U8(0),
/* 1296 S> */ B(LdaZero),
- /* 1296 E> */ B(StaContextSlot), R(context), U8(103),
+ /* 1296 E> */ B(StaContextSlot), R(context), U8(102), U8(0),
/* 1309 S> */ B(LdaZero),
- /* 1309 E> */ B(StaContextSlot), R(context), U8(104),
+ /* 1309 E> */ B(StaContextSlot), R(context), U8(103), U8(0),
/* 1322 S> */ B(LdaZero),
- /* 1322 E> */ B(StaContextSlot), R(context), U8(105),
+ /* 1322 E> */ B(StaContextSlot), R(context), U8(104), U8(0),
/* 1335 S> */ B(LdaZero),
- /* 1335 E> */ B(StaContextSlot), R(context), U8(106),
+ /* 1335 E> */ B(StaContextSlot), R(context), U8(105), U8(0),
/* 1349 S> */ B(LdaZero),
- /* 1349 E> */ B(StaContextSlot), R(context), U8(107),
+ /* 1349 E> */ B(StaContextSlot), R(context), U8(106), U8(0),
/* 1363 S> */ B(LdaZero),
- /* 1363 E> */ B(StaContextSlot), R(context), U8(108),
+ /* 1363 E> */ B(StaContextSlot), R(context), U8(107), U8(0),
/* 1377 S> */ B(LdaZero),
- /* 1377 E> */ B(StaContextSlot), R(context), U8(109),
+ /* 1377 E> */ B(StaContextSlot), R(context), U8(108), U8(0),
/* 1391 S> */ B(LdaZero),
- /* 1391 E> */ B(StaContextSlot), R(context), U8(110),
+ /* 1391 E> */ B(StaContextSlot), R(context), U8(109), U8(0),
/* 1405 S> */ B(LdaZero),
- /* 1405 E> */ B(StaContextSlot), R(context), U8(111),
+ /* 1405 E> */ B(StaContextSlot), R(context), U8(110), U8(0),
/* 1419 S> */ B(LdaZero),
- /* 1419 E> */ B(StaContextSlot), R(context), U8(112),
+ /* 1419 E> */ B(StaContextSlot), R(context), U8(111), U8(0),
/* 1433 S> */ B(LdaZero),
- /* 1433 E> */ B(StaContextSlot), R(context), U8(113),
+ /* 1433 E> */ B(StaContextSlot), R(context), U8(112), U8(0),
/* 1447 S> */ B(LdaZero),
- /* 1447 E> */ B(StaContextSlot), R(context), U8(114),
+ /* 1447 E> */ B(StaContextSlot), R(context), U8(113), U8(0),
/* 1461 S> */ B(LdaZero),
- /* 1461 E> */ B(StaContextSlot), R(context), U8(115),
+ /* 1461 E> */ B(StaContextSlot), R(context), U8(114), U8(0),
/* 1475 S> */ B(LdaZero),
- /* 1475 E> */ B(StaContextSlot), R(context), U8(116),
+ /* 1475 E> */ B(StaContextSlot), R(context), U8(115), U8(0),
/* 1489 S> */ B(LdaZero),
- /* 1489 E> */ B(StaContextSlot), R(context), U8(117),
+ /* 1489 E> */ B(StaContextSlot), R(context), U8(116), U8(0),
/* 1503 S> */ B(LdaZero),
- /* 1503 E> */ B(StaContextSlot), R(context), U8(118),
+ /* 1503 E> */ B(StaContextSlot), R(context), U8(117), U8(0),
/* 1517 S> */ B(LdaZero),
- /* 1517 E> */ B(StaContextSlot), R(context), U8(119),
+ /* 1517 E> */ B(StaContextSlot), R(context), U8(118), U8(0),
/* 1531 S> */ B(LdaZero),
- /* 1531 E> */ B(StaContextSlot), R(context), U8(120),
+ /* 1531 E> */ B(StaContextSlot), R(context), U8(119), U8(0),
/* 1545 S> */ B(LdaZero),
- /* 1545 E> */ B(StaContextSlot), R(context), U8(121),
+ /* 1545 E> */ B(StaContextSlot), R(context), U8(120), U8(0),
/* 1559 S> */ B(LdaZero),
- /* 1559 E> */ B(StaContextSlot), R(context), U8(122),
+ /* 1559 E> */ B(StaContextSlot), R(context), U8(121), U8(0),
/* 1573 S> */ B(LdaZero),
- /* 1573 E> */ B(StaContextSlot), R(context), U8(123),
+ /* 1573 E> */ B(StaContextSlot), R(context), U8(122), U8(0),
/* 1587 S> */ B(LdaZero),
- /* 1587 E> */ B(StaContextSlot), R(context), U8(124),
+ /* 1587 E> */ B(StaContextSlot), R(context), U8(123), U8(0),
/* 1601 S> */ B(LdaZero),
- /* 1601 E> */ B(StaContextSlot), R(context), U8(125),
+ /* 1601 E> */ B(StaContextSlot), R(context), U8(124), U8(0),
/* 1615 S> */ B(LdaZero),
- /* 1615 E> */ B(StaContextSlot), R(context), U8(126),
+ /* 1615 E> */ B(StaContextSlot), R(context), U8(125), U8(0),
/* 1629 S> */ B(LdaZero),
- /* 1629 E> */ B(StaContextSlot), R(context), U8(127),
+ /* 1629 E> */ B(StaContextSlot), R(context), U8(126), U8(0),
/* 1643 S> */ B(LdaZero),
- /* 1643 E> */ B(StaContextSlot), R(context), U8(128),
+ /* 1643 E> */ B(StaContextSlot), R(context), U8(127), U8(0),
/* 1657 S> */ B(LdaZero),
- /* 1657 E> */ B(StaContextSlot), R(context), U8(129),
+ /* 1657 E> */ B(StaContextSlot), R(context), U8(128), U8(0),
/* 1671 S> */ B(LdaZero),
- /* 1671 E> */ B(StaContextSlot), R(context), U8(130),
+ /* 1671 E> */ B(StaContextSlot), R(context), U8(129), U8(0),
/* 1685 S> */ B(LdaZero),
- /* 1685 E> */ B(StaContextSlot), R(context), U8(131),
+ /* 1685 E> */ B(StaContextSlot), R(context), U8(130), U8(0),
/* 1699 S> */ B(LdaZero),
- /* 1699 E> */ B(StaContextSlot), R(context), U8(132),
+ /* 1699 E> */ B(StaContextSlot), R(context), U8(131), U8(0),
/* 1713 S> */ B(LdaZero),
- /* 1713 E> */ B(StaContextSlot), R(context), U8(133),
+ /* 1713 E> */ B(StaContextSlot), R(context), U8(132), U8(0),
/* 1727 S> */ B(LdaZero),
- /* 1727 E> */ B(StaContextSlot), R(context), U8(134),
+ /* 1727 E> */ B(StaContextSlot), R(context), U8(133), U8(0),
/* 1741 S> */ B(LdaZero),
- /* 1741 E> */ B(StaContextSlot), R(context), U8(135),
+ /* 1741 E> */ B(StaContextSlot), R(context), U8(134), U8(0),
/* 1755 S> */ B(LdaZero),
- /* 1755 E> */ B(StaContextSlot), R(context), U8(136),
+ /* 1755 E> */ B(StaContextSlot), R(context), U8(135), U8(0),
/* 1769 S> */ B(LdaZero),
- /* 1769 E> */ B(StaContextSlot), R(context), U8(137),
+ /* 1769 E> */ B(StaContextSlot), R(context), U8(136), U8(0),
/* 1783 S> */ B(LdaZero),
- /* 1783 E> */ B(StaContextSlot), R(context), U8(138),
+ /* 1783 E> */ B(StaContextSlot), R(context), U8(137), U8(0),
/* 1797 S> */ B(LdaZero),
- /* 1797 E> */ B(StaContextSlot), R(context), U8(139),
+ /* 1797 E> */ B(StaContextSlot), R(context), U8(138), U8(0),
/* 1811 S> */ B(LdaZero),
- /* 1811 E> */ B(StaContextSlot), R(context), U8(140),
+ /* 1811 E> */ B(StaContextSlot), R(context), U8(139), U8(0),
/* 1825 S> */ B(LdaZero),
- /* 1825 E> */ B(StaContextSlot), R(context), U8(141),
+ /* 1825 E> */ B(StaContextSlot), R(context), U8(140), U8(0),
/* 1839 S> */ B(LdaZero),
- /* 1839 E> */ B(StaContextSlot), R(context), U8(142),
+ /* 1839 E> */ B(StaContextSlot), R(context), U8(141), U8(0),
/* 1853 S> */ B(LdaZero),
- /* 1853 E> */ B(StaContextSlot), R(context), U8(143),
+ /* 1853 E> */ B(StaContextSlot), R(context), U8(142), U8(0),
/* 1867 S> */ B(LdaZero),
- /* 1867 E> */ B(StaContextSlot), R(context), U8(144),
+ /* 1867 E> */ B(StaContextSlot), R(context), U8(143), U8(0),
/* 1881 S> */ B(LdaZero),
- /* 1881 E> */ B(StaContextSlot), R(context), U8(145),
+ /* 1881 E> */ B(StaContextSlot), R(context), U8(144), U8(0),
/* 1895 S> */ B(LdaZero),
- /* 1895 E> */ B(StaContextSlot), R(context), U8(146),
+ /* 1895 E> */ B(StaContextSlot), R(context), U8(145), U8(0),
/* 1909 S> */ B(LdaZero),
- /* 1909 E> */ B(StaContextSlot), R(context), U8(147),
+ /* 1909 E> */ B(StaContextSlot), R(context), U8(146), U8(0),
/* 1923 S> */ B(LdaZero),
- /* 1923 E> */ B(StaContextSlot), R(context), U8(148),
+ /* 1923 E> */ B(StaContextSlot), R(context), U8(147), U8(0),
/* 1937 S> */ B(LdaZero),
- /* 1937 E> */ B(StaContextSlot), R(context), U8(149),
+ /* 1937 E> */ B(StaContextSlot), R(context), U8(148), U8(0),
/* 1951 S> */ B(LdaZero),
- /* 1951 E> */ B(StaContextSlot), R(context), U8(150),
+ /* 1951 E> */ B(StaContextSlot), R(context), U8(149), U8(0),
/* 1965 S> */ B(LdaZero),
- /* 1965 E> */ B(StaContextSlot), R(context), U8(151),
+ /* 1965 E> */ B(StaContextSlot), R(context), U8(150), U8(0),
/* 1979 S> */ B(LdaZero),
- /* 1979 E> */ B(StaContextSlot), R(context), U8(152),
+ /* 1979 E> */ B(StaContextSlot), R(context), U8(151), U8(0),
/* 1993 S> */ B(LdaZero),
- /* 1993 E> */ B(StaContextSlot), R(context), U8(153),
+ /* 1993 E> */ B(StaContextSlot), R(context), U8(152), U8(0),
/* 2007 S> */ B(LdaZero),
- /* 2007 E> */ B(StaContextSlot), R(context), U8(154),
+ /* 2007 E> */ B(StaContextSlot), R(context), U8(153), U8(0),
/* 2021 S> */ B(LdaZero),
- /* 2021 E> */ B(StaContextSlot), R(context), U8(155),
+ /* 2021 E> */ B(StaContextSlot), R(context), U8(154), U8(0),
/* 2035 S> */ B(LdaZero),
- /* 2035 E> */ B(StaContextSlot), R(context), U8(156),
+ /* 2035 E> */ B(StaContextSlot), R(context), U8(155), U8(0),
/* 2049 S> */ B(LdaZero),
- /* 2049 E> */ B(StaContextSlot), R(context), U8(157),
+ /* 2049 E> */ B(StaContextSlot), R(context), U8(156), U8(0),
/* 2063 S> */ B(LdaZero),
- /* 2063 E> */ B(StaContextSlot), R(context), U8(158),
+ /* 2063 E> */ B(StaContextSlot), R(context), U8(157), U8(0),
/* 2077 S> */ B(LdaZero),
- /* 2077 E> */ B(StaContextSlot), R(context), U8(159),
+ /* 2077 E> */ B(StaContextSlot), R(context), U8(158), U8(0),
/* 2091 S> */ B(LdaZero),
- /* 2091 E> */ B(StaContextSlot), R(context), U8(160),
+ /* 2091 E> */ B(StaContextSlot), R(context), U8(159), U8(0),
/* 2105 S> */ B(LdaZero),
- /* 2105 E> */ B(StaContextSlot), R(context), U8(161),
+ /* 2105 E> */ B(StaContextSlot), R(context), U8(160), U8(0),
/* 2119 S> */ B(LdaZero),
- /* 2119 E> */ B(StaContextSlot), R(context), U8(162),
+ /* 2119 E> */ B(StaContextSlot), R(context), U8(161), U8(0),
/* 2133 S> */ B(LdaZero),
- /* 2133 E> */ B(StaContextSlot), R(context), U8(163),
+ /* 2133 E> */ B(StaContextSlot), R(context), U8(162), U8(0),
/* 2147 S> */ B(LdaZero),
- /* 2147 E> */ B(StaContextSlot), R(context), U8(164),
+ /* 2147 E> */ B(StaContextSlot), R(context), U8(163), U8(0),
/* 2161 S> */ B(LdaZero),
- /* 2161 E> */ B(StaContextSlot), R(context), U8(165),
+ /* 2161 E> */ B(StaContextSlot), R(context), U8(164), U8(0),
/* 2175 S> */ B(LdaZero),
- /* 2175 E> */ B(StaContextSlot), R(context), U8(166),
+ /* 2175 E> */ B(StaContextSlot), R(context), U8(165), U8(0),
/* 2189 S> */ B(LdaZero),
- /* 2189 E> */ B(StaContextSlot), R(context), U8(167),
+ /* 2189 E> */ B(StaContextSlot), R(context), U8(166), U8(0),
/* 2203 S> */ B(LdaZero),
- /* 2203 E> */ B(StaContextSlot), R(context), U8(168),
+ /* 2203 E> */ B(StaContextSlot), R(context), U8(167), U8(0),
/* 2217 S> */ B(LdaZero),
- /* 2217 E> */ B(StaContextSlot), R(context), U8(169),
+ /* 2217 E> */ B(StaContextSlot), R(context), U8(168), U8(0),
/* 2231 S> */ B(LdaZero),
- /* 2231 E> */ B(StaContextSlot), R(context), U8(170),
+ /* 2231 E> */ B(StaContextSlot), R(context), U8(169), U8(0),
/* 2245 S> */ B(LdaZero),
- /* 2245 E> */ B(StaContextSlot), R(context), U8(171),
+ /* 2245 E> */ B(StaContextSlot), R(context), U8(170), U8(0),
/* 2259 S> */ B(LdaZero),
- /* 2259 E> */ B(StaContextSlot), R(context), U8(172),
+ /* 2259 E> */ B(StaContextSlot), R(context), U8(171), U8(0),
/* 2273 S> */ B(LdaZero),
- /* 2273 E> */ B(StaContextSlot), R(context), U8(173),
+ /* 2273 E> */ B(StaContextSlot), R(context), U8(172), U8(0),
/* 2287 S> */ B(LdaZero),
- /* 2287 E> */ B(StaContextSlot), R(context), U8(174),
+ /* 2287 E> */ B(StaContextSlot), R(context), U8(173), U8(0),
/* 2301 S> */ B(LdaZero),
- /* 2301 E> */ B(StaContextSlot), R(context), U8(175),
+ /* 2301 E> */ B(StaContextSlot), R(context), U8(174), U8(0),
/* 2315 S> */ B(LdaZero),
- /* 2315 E> */ B(StaContextSlot), R(context), U8(176),
+ /* 2315 E> */ B(StaContextSlot), R(context), U8(175), U8(0),
/* 2329 S> */ B(LdaZero),
- /* 2329 E> */ B(StaContextSlot), R(context), U8(177),
+ /* 2329 E> */ B(StaContextSlot), R(context), U8(176), U8(0),
/* 2343 S> */ B(LdaZero),
- /* 2343 E> */ B(StaContextSlot), R(context), U8(178),
+ /* 2343 E> */ B(StaContextSlot), R(context), U8(177), U8(0),
/* 2357 S> */ B(LdaZero),
- /* 2357 E> */ B(StaContextSlot), R(context), U8(179),
+ /* 2357 E> */ B(StaContextSlot), R(context), U8(178), U8(0),
/* 2371 S> */ B(LdaZero),
- /* 2371 E> */ B(StaContextSlot), R(context), U8(180),
+ /* 2371 E> */ B(StaContextSlot), R(context), U8(179), U8(0),
/* 2385 S> */ B(LdaZero),
- /* 2385 E> */ B(StaContextSlot), R(context), U8(181),
+ /* 2385 E> */ B(StaContextSlot), R(context), U8(180), U8(0),
/* 2399 S> */ B(LdaZero),
- /* 2399 E> */ B(StaContextSlot), R(context), U8(182),
+ /* 2399 E> */ B(StaContextSlot), R(context), U8(181), U8(0),
/* 2413 S> */ B(LdaZero),
- /* 2413 E> */ B(StaContextSlot), R(context), U8(183),
+ /* 2413 E> */ B(StaContextSlot), R(context), U8(182), U8(0),
/* 2427 S> */ B(LdaZero),
- /* 2427 E> */ B(StaContextSlot), R(context), U8(184),
+ /* 2427 E> */ B(StaContextSlot), R(context), U8(183), U8(0),
/* 2441 S> */ B(LdaZero),
- /* 2441 E> */ B(StaContextSlot), R(context), U8(185),
+ /* 2441 E> */ B(StaContextSlot), R(context), U8(184), U8(0),
/* 2455 S> */ B(LdaZero),
- /* 2455 E> */ B(StaContextSlot), R(context), U8(186),
+ /* 2455 E> */ B(StaContextSlot), R(context), U8(185), U8(0),
/* 2469 S> */ B(LdaZero),
- /* 2469 E> */ B(StaContextSlot), R(context), U8(187),
+ /* 2469 E> */ B(StaContextSlot), R(context), U8(186), U8(0),
/* 2483 S> */ B(LdaZero),
- /* 2483 E> */ B(StaContextSlot), R(context), U8(188),
+ /* 2483 E> */ B(StaContextSlot), R(context), U8(187), U8(0),
/* 2497 S> */ B(LdaZero),
- /* 2497 E> */ B(StaContextSlot), R(context), U8(189),
+ /* 2497 E> */ B(StaContextSlot), R(context), U8(188), U8(0),
/* 2511 S> */ B(LdaZero),
- /* 2511 E> */ B(StaContextSlot), R(context), U8(190),
+ /* 2511 E> */ B(StaContextSlot), R(context), U8(189), U8(0),
/* 2525 S> */ B(LdaZero),
- /* 2525 E> */ B(StaContextSlot), R(context), U8(191),
+ /* 2525 E> */ B(StaContextSlot), R(context), U8(190), U8(0),
/* 2539 S> */ B(LdaZero),
- /* 2539 E> */ B(StaContextSlot), R(context), U8(192),
+ /* 2539 E> */ B(StaContextSlot), R(context), U8(191), U8(0),
/* 2553 S> */ B(LdaZero),
- /* 2553 E> */ B(StaContextSlot), R(context), U8(193),
+ /* 2553 E> */ B(StaContextSlot), R(context), U8(192), U8(0),
/* 2567 S> */ B(LdaZero),
- /* 2567 E> */ B(StaContextSlot), R(context), U8(194),
+ /* 2567 E> */ B(StaContextSlot), R(context), U8(193), U8(0),
/* 2581 S> */ B(LdaZero),
- /* 2581 E> */ B(StaContextSlot), R(context), U8(195),
+ /* 2581 E> */ B(StaContextSlot), R(context), U8(194), U8(0),
/* 2595 S> */ B(LdaZero),
- /* 2595 E> */ B(StaContextSlot), R(context), U8(196),
+ /* 2595 E> */ B(StaContextSlot), R(context), U8(195), U8(0),
/* 2609 S> */ B(LdaZero),
- /* 2609 E> */ B(StaContextSlot), R(context), U8(197),
+ /* 2609 E> */ B(StaContextSlot), R(context), U8(196), U8(0),
/* 2623 S> */ B(LdaZero),
- /* 2623 E> */ B(StaContextSlot), R(context), U8(198),
+ /* 2623 E> */ B(StaContextSlot), R(context), U8(197), U8(0),
/* 2637 S> */ B(LdaZero),
- /* 2637 E> */ B(StaContextSlot), R(context), U8(199),
+ /* 2637 E> */ B(StaContextSlot), R(context), U8(198), U8(0),
/* 2651 S> */ B(LdaZero),
- /* 2651 E> */ B(StaContextSlot), R(context), U8(200),
+ /* 2651 E> */ B(StaContextSlot), R(context), U8(199), U8(0),
/* 2665 S> */ B(LdaZero),
- /* 2665 E> */ B(StaContextSlot), R(context), U8(201),
+ /* 2665 E> */ B(StaContextSlot), R(context), U8(200), U8(0),
/* 2679 S> */ B(LdaZero),
- /* 2679 E> */ B(StaContextSlot), R(context), U8(202),
+ /* 2679 E> */ B(StaContextSlot), R(context), U8(201), U8(0),
/* 2693 S> */ B(LdaZero),
- /* 2693 E> */ B(StaContextSlot), R(context), U8(203),
+ /* 2693 E> */ B(StaContextSlot), R(context), U8(202), U8(0),
/* 2707 S> */ B(LdaZero),
- /* 2707 E> */ B(StaContextSlot), R(context), U8(204),
+ /* 2707 E> */ B(StaContextSlot), R(context), U8(203), U8(0),
/* 2721 S> */ B(LdaZero),
- /* 2721 E> */ B(StaContextSlot), R(context), U8(205),
+ /* 2721 E> */ B(StaContextSlot), R(context), U8(204), U8(0),
/* 2735 S> */ B(LdaZero),
- /* 2735 E> */ B(StaContextSlot), R(context), U8(206),
+ /* 2735 E> */ B(StaContextSlot), R(context), U8(205), U8(0),
/* 2749 S> */ B(LdaZero),
- /* 2749 E> */ B(StaContextSlot), R(context), U8(207),
+ /* 2749 E> */ B(StaContextSlot), R(context), U8(206), U8(0),
/* 2763 S> */ B(LdaZero),
- /* 2763 E> */ B(StaContextSlot), R(context), U8(208),
+ /* 2763 E> */ B(StaContextSlot), R(context), U8(207), U8(0),
/* 2777 S> */ B(LdaZero),
- /* 2777 E> */ B(StaContextSlot), R(context), U8(209),
+ /* 2777 E> */ B(StaContextSlot), R(context), U8(208), U8(0),
/* 2791 S> */ B(LdaZero),
- /* 2791 E> */ B(StaContextSlot), R(context), U8(210),
+ /* 2791 E> */ B(StaContextSlot), R(context), U8(209), U8(0),
/* 2805 S> */ B(LdaZero),
- /* 2805 E> */ B(StaContextSlot), R(context), U8(211),
+ /* 2805 E> */ B(StaContextSlot), R(context), U8(210), U8(0),
/* 2819 S> */ B(LdaZero),
- /* 2819 E> */ B(StaContextSlot), R(context), U8(212),
+ /* 2819 E> */ B(StaContextSlot), R(context), U8(211), U8(0),
/* 2833 S> */ B(LdaZero),
- /* 2833 E> */ B(StaContextSlot), R(context), U8(213),
+ /* 2833 E> */ B(StaContextSlot), R(context), U8(212), U8(0),
/* 2847 S> */ B(LdaZero),
- /* 2847 E> */ B(StaContextSlot), R(context), U8(214),
+ /* 2847 E> */ B(StaContextSlot), R(context), U8(213), U8(0),
/* 2861 S> */ B(LdaZero),
- /* 2861 E> */ B(StaContextSlot), R(context), U8(215),
+ /* 2861 E> */ B(StaContextSlot), R(context), U8(214), U8(0),
/* 2875 S> */ B(LdaZero),
- /* 2875 E> */ B(StaContextSlot), R(context), U8(216),
+ /* 2875 E> */ B(StaContextSlot), R(context), U8(215), U8(0),
/* 2889 S> */ B(LdaZero),
- /* 2889 E> */ B(StaContextSlot), R(context), U8(217),
+ /* 2889 E> */ B(StaContextSlot), R(context), U8(216), U8(0),
/* 2903 S> */ B(LdaZero),
- /* 2903 E> */ B(StaContextSlot), R(context), U8(218),
+ /* 2903 E> */ B(StaContextSlot), R(context), U8(217), U8(0),
/* 2917 S> */ B(LdaZero),
- /* 2917 E> */ B(StaContextSlot), R(context), U8(219),
+ /* 2917 E> */ B(StaContextSlot), R(context), U8(218), U8(0),
/* 2931 S> */ B(LdaZero),
- /* 2931 E> */ B(StaContextSlot), R(context), U8(220),
+ /* 2931 E> */ B(StaContextSlot), R(context), U8(219), U8(0),
/* 2945 S> */ B(LdaZero),
- /* 2945 E> */ B(StaContextSlot), R(context), U8(221),
+ /* 2945 E> */ B(StaContextSlot), R(context), U8(220), U8(0),
/* 2959 S> */ B(LdaZero),
- /* 2959 E> */ B(StaContextSlot), R(context), U8(222),
+ /* 2959 E> */ B(StaContextSlot), R(context), U8(221), U8(0),
/* 2973 S> */ B(LdaZero),
- /* 2973 E> */ B(StaContextSlot), R(context), U8(223),
+ /* 2973 E> */ B(StaContextSlot), R(context), U8(222), U8(0),
/* 2987 S> */ B(LdaZero),
- /* 2987 E> */ B(StaContextSlot), R(context), U8(224),
+ /* 2987 E> */ B(StaContextSlot), R(context), U8(223), U8(0),
/* 3001 S> */ B(LdaZero),
- /* 3001 E> */ B(StaContextSlot), R(context), U8(225),
+ /* 3001 E> */ B(StaContextSlot), R(context), U8(224), U8(0),
/* 3015 S> */ B(LdaZero),
- /* 3015 E> */ B(StaContextSlot), R(context), U8(226),
+ /* 3015 E> */ B(StaContextSlot), R(context), U8(225), U8(0),
/* 3029 S> */ B(LdaZero),
- /* 3029 E> */ B(StaContextSlot), R(context), U8(227),
+ /* 3029 E> */ B(StaContextSlot), R(context), U8(226), U8(0),
/* 3043 S> */ B(LdaZero),
- /* 3043 E> */ B(StaContextSlot), R(context), U8(228),
+ /* 3043 E> */ B(StaContextSlot), R(context), U8(227), U8(0),
/* 3057 S> */ B(LdaZero),
- /* 3057 E> */ B(StaContextSlot), R(context), U8(229),
+ /* 3057 E> */ B(StaContextSlot), R(context), U8(228), U8(0),
/* 3071 S> */ B(LdaZero),
- /* 3071 E> */ B(StaContextSlot), R(context), U8(230),
+ /* 3071 E> */ B(StaContextSlot), R(context), U8(229), U8(0),
/* 3085 S> */ B(LdaZero),
- /* 3085 E> */ B(StaContextSlot), R(context), U8(231),
+ /* 3085 E> */ B(StaContextSlot), R(context), U8(230), U8(0),
/* 3099 S> */ B(LdaZero),
- /* 3099 E> */ B(StaContextSlot), R(context), U8(232),
+ /* 3099 E> */ B(StaContextSlot), R(context), U8(231), U8(0),
/* 3113 S> */ B(LdaZero),
- /* 3113 E> */ B(StaContextSlot), R(context), U8(233),
+ /* 3113 E> */ B(StaContextSlot), R(context), U8(232), U8(0),
/* 3127 S> */ B(LdaZero),
- /* 3127 E> */ B(StaContextSlot), R(context), U8(234),
+ /* 3127 E> */ B(StaContextSlot), R(context), U8(233), U8(0),
/* 3141 S> */ B(LdaZero),
- /* 3141 E> */ B(StaContextSlot), R(context), U8(235),
+ /* 3141 E> */ B(StaContextSlot), R(context), U8(234), U8(0),
/* 3155 S> */ B(LdaZero),
- /* 3155 E> */ B(StaContextSlot), R(context), U8(236),
+ /* 3155 E> */ B(StaContextSlot), R(context), U8(235), U8(0),
/* 3169 S> */ B(LdaZero),
- /* 3169 E> */ B(StaContextSlot), R(context), U8(237),
+ /* 3169 E> */ B(StaContextSlot), R(context), U8(236), U8(0),
/* 3183 S> */ B(LdaZero),
- /* 3183 E> */ B(StaContextSlot), R(context), U8(238),
+ /* 3183 E> */ B(StaContextSlot), R(context), U8(237), U8(0),
/* 3197 S> */ B(LdaZero),
- /* 3197 E> */ B(StaContextSlot), R(context), U8(239),
+ /* 3197 E> */ B(StaContextSlot), R(context), U8(238), U8(0),
/* 3211 S> */ B(LdaZero),
- /* 3211 E> */ B(StaContextSlot), R(context), U8(240),
+ /* 3211 E> */ B(StaContextSlot), R(context), U8(239), U8(0),
/* 3225 S> */ B(LdaZero),
- /* 3225 E> */ B(StaContextSlot), R(context), U8(241),
+ /* 3225 E> */ B(StaContextSlot), R(context), U8(240), U8(0),
/* 3239 S> */ B(LdaZero),
- /* 3239 E> */ B(StaContextSlot), R(context), U8(242),
+ /* 3239 E> */ B(StaContextSlot), R(context), U8(241), U8(0),
/* 3253 S> */ B(LdaZero),
- /* 3253 E> */ B(StaContextSlot), R(context), U8(243),
+ /* 3253 E> */ B(StaContextSlot), R(context), U8(242), U8(0),
/* 3267 S> */ B(LdaZero),
- /* 3267 E> */ B(StaContextSlot), R(context), U8(244),
+ /* 3267 E> */ B(StaContextSlot), R(context), U8(243), U8(0),
/* 3281 S> */ B(LdaZero),
- /* 3281 E> */ B(StaContextSlot), R(context), U8(245),
+ /* 3281 E> */ B(StaContextSlot), R(context), U8(244), U8(0),
/* 3295 S> */ B(LdaZero),
- /* 3295 E> */ B(StaContextSlot), R(context), U8(246),
+ /* 3295 E> */ B(StaContextSlot), R(context), U8(245), U8(0),
/* 3309 S> */ B(LdaZero),
- /* 3309 E> */ B(StaContextSlot), R(context), U8(247),
+ /* 3309 E> */ B(StaContextSlot), R(context), U8(246), U8(0),
/* 3323 S> */ B(LdaZero),
- /* 3323 E> */ B(StaContextSlot), R(context), U8(248),
+ /* 3323 E> */ B(StaContextSlot), R(context), U8(247), U8(0),
/* 3337 S> */ B(LdaZero),
- /* 3337 E> */ B(StaContextSlot), R(context), U8(249),
+ /* 3337 E> */ B(StaContextSlot), R(context), U8(248), U8(0),
/* 3351 S> */ B(LdaZero),
- /* 3351 E> */ B(StaContextSlot), R(context), U8(250),
+ /* 3351 E> */ B(StaContextSlot), R(context), U8(249), U8(0),
/* 3365 S> */ B(LdaZero),
- /* 3365 E> */ B(StaContextSlot), R(context), U8(251),
+ /* 3365 E> */ B(StaContextSlot), R(context), U8(250), U8(0),
/* 3379 S> */ B(LdaZero),
- /* 3379 E> */ B(StaContextSlot), R(context), U8(252),
+ /* 3379 E> */ B(StaContextSlot), R(context), U8(251), U8(0),
/* 3393 S> */ B(LdaZero),
- /* 3393 E> */ B(StaContextSlot), R(context), U8(253),
+ /* 3393 E> */ B(StaContextSlot), R(context), U8(252), U8(0),
/* 3407 S> */ B(LdaZero),
- /* 3407 E> */ B(StaContextSlot), R(context), U8(254),
+ /* 3407 E> */ B(StaContextSlot), R(context), U8(253), U8(0),
/* 3421 S> */ B(LdaZero),
- /* 3421 E> */ B(StaContextSlot), R(context), U8(255),
- /* 3424 S> */ B(LdrUndefined), R(2),
- /* 3424 E> */ B(LdrGlobal), U8(1), R(1),
- /* 3424 E> */ B(Call), R(1), R(2), U8(1), U8(0),
- /* 3440 S> */ B(LdaSmi), U8(100),
- /* 3440 E> */ B(Wide), B(StaContextSlot), R16(context), U16(256),
- /* 3445 S> */ B(Wide), B(LdaContextSlot), R16(context), U16(256),
- /* 3454 S> */ B(Return),
+ /* 3421 E> */ B(StaContextSlot), R(context), U8(254), U8(0),
+ /* 3435 S> */ B(LdaZero),
+ /* 3435 E> */ B(StaContextSlot), R(context), U8(255), U8(0),
+ /* 3438 S> */ B(LdrUndefined), R(2),
+ /* 3438 E> */ B(LdrGlobal), U8(2), R(1),
+ /* 3438 E> */ B(Call), R(1), R(2), U8(1), U8(0),
+ /* 3454 S> */ B(LdaSmi), U8(100),
+ /* 3454 E> */ B(Wide), B(StaContextSlot), R16(context), U16(256), U16(0),
+ /* 3459 S> */ B(Wide), B(LdaContextSlot), R16(context), U16(256), U16(0),
+ /* 3468 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
index aef4e1456c..29e0ec3582 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: yes
---
@@ -18,7 +16,7 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
- /* 45 S> */ B(Inc), U8(1),
+ /* 45 S> */ B(Inc), U8(2),
B(Star), R(0),
/* 57 S> */ B(Return),
]
@@ -39,7 +37,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
/* 45 S> */ B(ToNumber), R(1),
- B(Inc), U8(1),
+ B(Inc), U8(2),
B(Star), R(0),
B(Ldar), R(1),
/* 57 S> */ B(Return),
@@ -60,7 +58,7 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
- /* 45 S> */ B(Dec), U8(1),
+ /* 45 S> */ B(Dec), U8(2),
B(Star), R(0),
/* 57 S> */ B(Return),
]
@@ -81,7 +79,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
/* 45 S> */ B(ToNumber), R(1),
- B(Dec), U8(1),
+ B(Dec), U8(2),
B(Star), R(0),
B(Ldar), R(1),
/* 57 S> */ B(Return),
@@ -102,16 +100,16 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
B(Mov), R(1), R(0),
- /* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(1),
+ /* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(2),
B(ToNumber), R(2),
- B(Inc), U8(5),
- /* 66 E> */ B(StaNamedPropertySloppy), R(0), U8(1), U8(3),
+ B(Inc), U8(6),
+ /* 66 E> */ B(StaNamedPropertySloppy), R(0), U8(1), U8(4),
B(Ldar), R(2),
/* 70 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["val"],
]
handlers: [
]
@@ -127,14 +125,14 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
B(Mov), R(1), R(0),
- /* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(1),
- B(Dec), U8(5),
- /* 65 E> */ B(StaNamedPropertySloppy), R(0), U8(1), U8(3),
+ /* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(2),
+ B(Dec), U8(6),
+ /* 65 E> */ B(StaNamedPropertySloppy), R(0), U8(1), U8(4),
/* 70 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["val"],
]
handlers: [
]
@@ -153,16 +151,16 @@ bytecodes: [
/* 60 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(1), R(2),
B(Mov), R(2), R(1),
/* 72 S> */ B(Ldar), R(0),
- /* 81 E> */ B(LdaKeyedProperty), R(1), U8(1),
+ /* 81 E> */ B(LdaKeyedProperty), R(1), U8(2),
B(ToNumber), R(4),
- B(Dec), U8(5),
- /* 86 E> */ B(StaKeyedPropertySloppy), R(1), R(0), U8(3),
+ B(Dec), U8(6),
+ /* 86 E> */ B(StaKeyedPropertySloppy), R(1), R(0), U8(4),
B(Ldar), R(4),
/* 90 S> */ B(Return),
]
constant pool: [
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["var"],
+ FIXED_ARRAY_TYPE,
]
handlers: [
]
@@ -181,14 +179,14 @@ bytecodes: [
/* 60 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(1), R(2),
B(Mov), R(2), R(1),
/* 72 S> */ B(Ldar), R(0),
- /* 83 E> */ B(LdaKeyedProperty), R(1), U8(1),
- B(Inc), U8(5),
- /* 87 E> */ B(StaKeyedPropertySloppy), R(1), R(0), U8(3),
+ /* 83 E> */ B(LdaKeyedProperty), R(1), U8(2),
+ B(Inc), U8(6),
+ /* 87 E> */ B(StaKeyedPropertySloppy), R(1), R(0), U8(4),
/* 90 S> */ B(Return),
]
constant pool: [
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["var"],
+ FIXED_ARRAY_TYPE,
]
handlers: [
]
@@ -199,22 +197,22 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 24
+bytecode array length: 27
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(1),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
- /* 42 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 42 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
/* 53 S> */ B(CreateClosure), U8(0), U8(2),
B(Star), R(0),
- /* 78 S> */ B(LdaContextSlot), R(context), U8(4),
- B(Inc), U8(1),
- /* 87 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 78 S> */ B(LdaContextSlot), R(context), U8(4), U8(0),
+ B(Inc), U8(2),
+ /* 87 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
/* 90 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -225,24 +223,24 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 28
+bytecode array length: 31
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(1),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
- /* 42 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 42 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
/* 53 S> */ B(CreateClosure), U8(0), U8(2),
B(Star), R(0),
- /* 78 S> */ B(LdaContextSlot), R(context), U8(4),
+ /* 78 S> */ B(LdaContextSlot), R(context), U8(4), U8(0),
B(ToNumber), R(2),
- B(Dec), U8(1),
- /* 86 E> */ B(StaContextSlot), R(context), U8(4),
+ B(Dec), U8(2),
+ /* 86 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
B(Ldar), R(2),
/* 90 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -258,18 +256,18 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
- /* 55 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(3),
+ /* 55 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(9),
B(Star), R(1),
/* 63 S> */ B(Ldar), R(0),
B(ToNumber), R(3),
- B(Inc), U8(1),
+ B(Inc), U8(2),
B(Star), R(0),
B(LdaSmi), U8(2),
- /* 79 E> */ B(StaKeyedPropertySloppy), R(1), R(3), U8(2),
+ /* 79 E> */ B(StaKeyedPropertySloppy), R(1), R(3), U8(3),
/* 84 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden
index 1668c81302..1c12767e09 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden
@@ -3,8 +3,6 @@
#
---
-pool type: string
-execute: yes
wrap: no
test function name: f
@@ -41,7 +39,7 @@ bytecodes: [
B(Star), R(0),
/* 10 E> */ B(StackCheck),
/* 15 S> */ B(LdaZero),
- /* 31 E> */ B(LdaKeyedProperty), R(0), U8(1),
+ /* 31 E> */ B(LdaKeyedProperty), R(0), U8(2),
/* 36 S> */ B(Return),
]
constant pool: [
@@ -76,17 +74,17 @@ snippet: "
"
frame size: 2
parameter count: 2
-bytecode array length: 18
+bytecode array length: 19
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(1),
B(Ldar), R(arg0),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
B(CreateMappedArguments),
B(Star), R(0),
/* 10 E> */ B(StackCheck),
/* 16 S> */ B(LdaZero),
- /* 32 E> */ B(LdaKeyedProperty), R(0), U8(1),
+ /* 32 E> */ B(LdaKeyedProperty), R(0), U8(2),
/* 37 S> */ B(Return),
]
constant pool: [
@@ -101,16 +99,16 @@ snippet: "
"
frame size: 2
parameter count: 4
-bytecode array length: 25
+bytecode array length: 28
bytecodes: [
B(CreateFunctionContext), U8(3),
B(PushContext), R(1),
B(Ldar), R(arg0),
- B(StaContextSlot), R(context), U8(6),
+ B(StaContextSlot), R(context), U8(6), U8(0),
B(Ldar), R(arg1),
- B(StaContextSlot), R(context), U8(5),
+ B(StaContextSlot), R(context), U8(5), U8(0),
B(Ldar), R(arg2),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
B(CreateMappedArguments),
B(Star), R(0),
/* 10 E> */ B(StackCheck),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
index afa349ac3e..c960237f09 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
@@ -3,8 +3,6 @@
#
---
-pool type: number
-execute: yes
wrap: no
test function name: f
@@ -67,7 +65,7 @@ bytecodes: [
/* 10 E> */ B(StackCheck),
B(Mov), R(arg0), R(1),
/* 29 S> */ B(LdaZero),
- /* 44 E> */ B(LdaKeyedProperty), R(0), U8(1),
+ /* 44 E> */ B(LdaKeyedProperty), R(0), U8(2),
/* 49 S> */ B(Return),
]
constant pool: [
@@ -80,23 +78,23 @@ snippet: "
function f(a, ...restArgs) { return restArgs[0] + arguments[0]; }
f();
"
-frame size: 5
+frame size: 4
parameter count: 2
bytecode array length: 26
bytecodes: [
B(CreateUnmappedArguments),
- B(Star), R(0),
+ B(Star), R(2),
B(CreateRestParameter),
- B(Star), R(1),
+ B(Star), R(0),
B(LdaTheHole),
- B(Star), R(2),
+ B(Star), R(1),
/* 10 E> */ B(StackCheck),
- B(Mov), R(arg0), R(2),
+ B(Mov), R(arg0), R(1),
/* 29 S> */ B(LdaZero),
- /* 44 E> */ B(LdrKeyedProperty), R(1), U8(1), R(4),
+ /* 44 E> */ B(LdrKeyedProperty), R(0), U8(2), R(3),
B(LdaZero),
- /* 59 E> */ B(LdaKeyedProperty), R(0), U8(3),
- B(Add), R(4), U8(5),
+ /* 59 E> */ B(LdaKeyedProperty), R(2), U8(4),
+ B(Add), R(3), U8(6),
/* 64 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeadCodeRemoval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeadCodeRemoval.golden
index 2530404379..6c4a7b5ac2 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeadCodeRemoval.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeadCodeRemoval.golden
@@ -3,8 +3,6 @@
#
---
-pool type: number
-execute: yes
wrap: yes
---
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
index 9d16d06aff..a61e993e52 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: no
wrap: no
top level: yes
@@ -34,8 +32,8 @@ bytecodes: [
/* 10 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
]
handlers: [
]
@@ -59,7 +57,7 @@ bytecodes: [
/* 15 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
]
handlers: [
]
@@ -88,13 +86,13 @@ bytecodes: [
B(Star), R(3),
B(CallRuntime), U16(Runtime::kInitializeVarGlobal), R(1), U8(3),
/* 11 S> */ B(LdaSmi), U8(2),
- /* 12 E> */ B(StaGlobalSloppy), U8(1), U8(3),
+ /* 12 E> */ B(StaGlobalSloppy), U8(1), U8(4),
B(Star), R(0),
/* 15 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
]
handlers: [
]
@@ -116,13 +114,13 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kDeclareGlobalsForInterpreter), R(1), U8(3),
/* 0 E> */ B(StackCheck),
/* 16 S> */ B(LdrUndefined), R(2),
- B(LdrGlobal), U8(1), R(1),
- /* 16 E> */ B(Call), R(1), R(2), U8(1), U8(3),
+ B(LdrGlobal), U8(2), R(1),
+ /* 16 E> */ B(Call), R(1), R(2), U8(1), U8(4),
B(Star), R(0),
/* 20 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden
index aeebe7a3c2..d7d60aa26f 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: yes
---
@@ -23,8 +21,8 @@ bytecodes: [
/* 75 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
@@ -45,8 +43,8 @@ bytecodes: [
/* 89 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
@@ -67,7 +65,7 @@ bytecodes: [
/* 76 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
]
handlers: [
]
@@ -100,23 +98,23 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 27
+bytecode array length: 29
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(0),
/* 30 E> */ B(StackCheck),
/* 56 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
B(Ldar), R(1),
- /* 56 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 56 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
/* 64 S> */ B(CreateClosure), U8(1), U8(2),
- /* 93 S> */ B(LdrContextSlot), R(context), U8(4), R(1),
+ /* 93 S> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(1),
B(LdaSmi), U8(1),
B(DeletePropertyStrict), R(1),
/* 113 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeleteLookupSlotInEval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeleteLookupSlotInEval.golden
index dcc72134b2..6869dcfa2c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeleteLookupSlotInEval.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeleteLookupSlotInEval.golden
@@ -3,8 +3,6 @@
#
---
-pool type: string
-execute: yes
wrap: no
test function name: f
@@ -31,7 +29,7 @@ bytecodes: [
/* 25 S> */ B(Return),
]
constant pool: [
- "x",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
@@ -82,7 +80,7 @@ bytecodes: [
/* 32 S> */ B(Return),
]
constant pool: [
- "z",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["z"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DoDebugger.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DoDebugger.golden
index ac0b2ee8d4..60e585f974 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DoDebugger.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DoDebugger.golden
@@ -3,8 +3,6 @@
#
---
-pool type: string
-execute: yes
wrap: yes
---
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden
index e04e131928..08a5aaa871 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden
@@ -3,8 +3,6 @@
#
---
-pool type: string
-execute: yes
wrap: yes
do expressions: yes
@@ -36,9 +34,9 @@ bytecode array length: 13
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 55 S> */ B(LdaSmi), U8(100),
- B(Star), R(1),
- /* 42 S> */ B(LdrUndefined), R(0),
- B(Ldar), R(0),
+ B(Star), R(0),
+ /* 42 S> */ B(LdrUndefined), R(1),
+ B(Ldar), R(1),
B(Star), R(2),
/* 63 S> */ B(Nop),
/* 73 S> */ B(Return),
@@ -59,10 +57,10 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 34 E> */ B(StackCheck),
/* 56 S> */ B(LdaSmi), U8(10),
- B(Star), R(1),
- /* 69 S> */ B(Inc), U8(1),
- B(Star), R(1),
B(Star), R(0),
+ /* 69 S> */ B(Inc), U8(2),
+ B(Star), R(0),
+ B(Star), R(1),
/* 74 S> */ B(Jump), U8(2),
B(LdaUndefined),
/* 94 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden
index f8ee37a398..07bd99c1f0 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden
@@ -3,8 +3,6 @@
#
---
-pool type: string
-execute: yes
wrap: yes
---
@@ -13,20 +11,20 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 66
+bytecode array length: 69
bytecodes: [
B(CreateFunctionContext), U8(3),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
B(CreateMappedArguments),
- B(StaContextSlot), R(context), U8(5),
+ B(StaContextSlot), R(context), U8(6), U8(0),
B(Ldar), R(new_target),
- B(StaContextSlot), R(context), U8(6),
+ B(StaContextSlot), R(context), U8(5), U8(0),
/* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaConstant), U8(0),
- B(Star), R(3),
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(3), U8(1), R(1),
+ B(Star), R(4),
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(4), U8(1), R(1),
B(LdaConstant), U8(1),
B(Star), R(3),
B(LdaZero),
@@ -44,8 +42,8 @@ bytecodes: [
/* 53 S> */ B(Return),
]
constant pool: [
- "eval",
- "1;",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["eval"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["1;"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
index b6a8df8636..a23bb90226 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: yes
---
@@ -65,34 +63,34 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 44
+bytecode array length: 45
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
- B(Star), R(1),
- /* 68 S> */ B(JumpIfUndefined), U8(37),
- B(JumpIfNull), U8(35),
+ B(Star), R(0),
+ /* 68 S> */ B(JumpIfUndefined), U8(38),
+ B(JumpIfNull), U8(36),
B(ToObject), R(3),
B(ForInPrepare), R(3), R(4),
B(LdaZero),
B(Star), R(7),
- /* 63 S> */ B(ForInDone), R(7), R(6),
- B(JumpIfTrue), U8(22),
- B(ForInNext), R(3), R(7), R(4), U8(1),
+ /* 63 S> */ B(ForInContinue), R(7), R(6),
+ B(JumpIfFalse), U8(23),
+ B(ForInNext), R(3), R(7), R(4), U8(2),
B(JumpIfUndefined), U8(9),
- B(Star), R(0),
+ B(Star), R(1),
/* 54 E> */ B(StackCheck),
B(Star), R(2),
/* 73 S> */ B(Nop),
/* 85 S> */ B(Return),
B(ForInStep), R(7),
B(Star), R(7),
- B(Jump), U8(-23),
+ B(JumpLoop), U8(-23), U8(0),
B(LdaUndefined),
/* 85 S> */ B(Return),
]
constant pool: [
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["potatoes"],
]
handlers: [
]
@@ -104,37 +102,37 @@ snippet: "
"
frame size: 9
parameter count: 1
-bytecode array length: 55
+bytecode array length: 56
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
- B(Star), R(1),
- /* 59 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(3),
- B(JumpIfUndefined), U8(45),
- B(JumpIfNull), U8(43),
+ B(Star), R(0),
+ /* 59 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(9),
+ B(JumpIfUndefined), U8(46),
+ B(JumpIfNull), U8(44),
B(ToObject), R(3),
B(ForInPrepare), R(3), R(4),
B(LdaZero),
B(Star), R(7),
- /* 54 S> */ B(ForInDone), R(7), R(6),
- B(JumpIfTrue), U8(30),
- B(ForInNext), R(3), R(7), R(4), U8(2),
+ /* 54 S> */ B(ForInContinue), R(7), R(6),
+ B(JumpIfFalse), U8(31),
+ B(ForInNext), R(3), R(7), R(4), U8(3),
B(JumpIfUndefined), U8(17),
- B(Star), R(0),
+ B(Star), R(1),
/* 45 E> */ B(StackCheck),
B(Star), R(2),
- /* 70 S> */ B(Ldar), R(0),
- /* 75 E> */ B(Add), R(1), U8(1),
- B(Mov), R(1), R(8),
- B(Star), R(1),
+ /* 70 S> */ B(Ldar), R(1),
+ /* 75 E> */ B(Add), R(0), U8(2),
+ B(Mov), R(0), R(8),
+ B(Star), R(0),
/* 72 E> */ B(ForInStep), R(7),
B(Star), R(7),
- B(Jump), U8(-31),
+ B(JumpLoop), U8(-31), U8(0),
B(LdaUndefined),
/* 80 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
]
handlers: [
]
@@ -147,49 +145,49 @@ snippet: "
if (x['a'] == 20) break;
}
"
-frame size: 8
+frame size: 7
parameter count: 1
-bytecode array length: 80
+bytecode array length: 83
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
B(Mov), R(1), R(0),
- /* 77 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(3),
- B(JumpIfUndefined), U8(65),
- B(JumpIfNull), U8(63),
+ /* 77 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(9),
+ B(JumpIfUndefined), U8(68),
+ B(JumpIfNull), U8(66),
B(ToObject), R(1),
B(ForInPrepare), R(1), R(2),
B(LdaZero),
B(Star), R(5),
- /* 68 S> */ B(ForInDone), R(5), R(4),
- B(JumpIfTrue), U8(50),
- B(ForInNext), R(1), R(5), R(2), U8(9),
- B(JumpIfUndefined), U8(37),
+ /* 68 S> */ B(ForInContinue), R(5), R(4),
+ B(JumpIfFalse), U8(53),
+ B(ForInNext), R(1), R(5), R(2), U8(12),
+ B(JumpIfUndefined), U8(39),
B(Star), R(6),
- /* 67 E> */ B(StaNamedPropertySloppy), R(0), U8(2), U8(7),
+ /* 67 E> */ B(StaNamedPropertySloppy), R(0), U8(2), U8(10),
/* 62 E> */ B(StackCheck),
/* 95 S> */ B(Nop),
- /* 100 E> */ B(LdrNamedProperty), R(0), U8(2), U8(3), R(7),
+ /* 100 E> */ B(LdrNamedProperty), R(0), U8(2), U8(4), R(6),
B(LdaSmi), U8(10),
- /* 106 E> */ B(TestEqual), R(7),
+ /* 106 E> */ B(TestEqual), R(6), U8(6),
B(JumpIfFalse), U8(4),
- /* 113 S> */ B(Jump), U8(16),
+ /* 113 S> */ B(Jump), U8(17),
/* 125 S> */ B(Nop),
- /* 130 E> */ B(LdrNamedProperty), R(0), U8(2), U8(5), R(7),
+ /* 130 E> */ B(LdrNamedProperty), R(0), U8(2), U8(7), R(6),
B(LdaSmi), U8(20),
- /* 136 E> */ B(TestEqual), R(7),
+ /* 136 E> */ B(TestEqual), R(6), U8(9),
B(JumpIfFalse), U8(4),
- /* 143 S> */ B(Jump), U8(8),
+ /* 143 S> */ B(Jump), U8(9),
B(ForInStep), R(5),
B(Star), R(5),
- B(Jump), U8(-51),
+ B(JumpLoop), U8(-53), U8(0),
B(LdaUndefined),
/* 152 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
]
handlers: [
]
@@ -201,40 +199,40 @@ snippet: "
"
frame size: 9
parameter count: 1
-bytecode array length: 61
+bytecode array length: 62
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 42 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(3),
+ /* 42 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(9),
B(Star), R(0),
- /* 72 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(3),
- B(JumpIfUndefined), U8(48),
- B(JumpIfNull), U8(46),
+ /* 72 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(9),
+ B(JumpIfUndefined), U8(49),
+ B(JumpIfNull), U8(47),
B(ToObject), R(1),
B(ForInPrepare), R(1), R(2),
B(LdaZero),
B(Star), R(5),
- /* 65 S> */ B(ForInDone), R(5), R(4),
- B(JumpIfTrue), U8(33),
- B(ForInNext), R(1), R(5), R(2), U8(7),
+ /* 65 S> */ B(ForInContinue), R(5), R(4),
+ B(JumpIfFalse), U8(34),
+ B(ForInNext), R(1), R(5), R(2), U8(8),
B(JumpIfUndefined), U8(20),
B(Star), R(6),
B(LdaZero),
B(Star), R(8),
B(Ldar), R(6),
- /* 64 E> */ B(StaKeyedPropertySloppy), R(0), R(8), U8(5),
+ /* 64 E> */ B(StaKeyedPropertySloppy), R(0), R(8), U8(6),
/* 59 E> */ B(StackCheck),
/* 83 S> */ B(LdaSmi), U8(3),
- /* 91 E> */ B(LdaKeyedProperty), R(0), U8(3),
+ /* 91 E> */ B(LdaKeyedProperty), R(0), U8(4),
/* 98 S> */ B(Return),
B(ForInStep), R(5),
B(Star), R(5),
- B(Jump), U8(-34),
+ B(JumpLoop), U8(-34), U8(0),
B(LdaUndefined),
/* 98 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
index 01121e5017..3ede3ec597 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: yes
---
@@ -13,49 +11,49 @@ snippet: "
"
frame size: 15
parameter count: 1
-bytecode array length: 268
+bytecode array length: 279
bytecodes: [
/* 30 E> */ B(StackCheck),
B(LdaZero),
- B(Star), R(3),
+ B(Star), R(4),
B(Mov), R(context), R(11),
B(Mov), R(context), R(12),
- /* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(3),
+ /* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(9),
B(Star), R(14),
B(LdaConstant), U8(1),
- /* 48 E> */ B(LdrKeyedProperty), R(14), U8(3), R(13),
- /* 48 E> */ B(Call), R(13), R(14), U8(1), U8(1),
- B(Star), R(1),
- /* 45 S> */ B(LdrNamedProperty), R(1), U8(2), U8(7), R(14),
- /* 45 E> */ B(Call), R(14), R(1), U8(1), U8(5),
+ /* 48 E> */ B(LdrKeyedProperty), R(14), U8(4), R(13),
+ /* 48 E> */ B(Call), R(13), R(14), U8(1), U8(2),
B(Star), R(2),
- /* 45 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(2), U8(1),
+ /* 45 S> */ B(LdrNamedProperty), R(2), U8(2), U8(8), R(14),
+ /* 45 E> */ B(Call), R(14), R(2), U8(1), U8(6),
+ B(Star), R(3),
+ /* 45 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(3), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(2), U8(1),
- B(LdaNamedProperty), R(2), U8(3), U8(9),
- B(JumpIfToBooleanTrue), U8(23),
- B(LdrNamedProperty), R(2), U8(4), U8(11), R(4),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(3), U8(1),
+ B(LdaNamedProperty), R(3), U8(3), U8(10),
+ B(JumpIfToBooleanTrue), U8(24),
+ B(LdrNamedProperty), R(3), U8(4), U8(12), R(5),
B(LdaSmi), U8(2),
- B(Star), R(3),
- B(Mov), R(4), R(0),
+ B(Star), R(4),
+ B(Mov), R(5), R(0),
/* 34 E> */ B(StackCheck),
- B(Mov), R(0), R(7),
+ B(Mov), R(0), R(1),
B(LdaZero),
- B(Star), R(3),
- B(Jump), U8(-49),
- B(Jump), U8(34),
+ B(Star), R(4),
+ B(JumpLoop), U8(-49), U8(0),
+ B(Jump), U8(37),
B(Star), R(13),
B(Ldar), R(closure),
- B(CreateCatchContext), R(13), U8(5),
+ B(CreateCatchContext), R(13), U8(5), U8(6),
B(Star), R(12),
B(PushContext), R(8),
B(LdaSmi), U8(2),
- B(TestEqualStrict), R(3),
+ B(TestEqualStrict), R(4), U8(14),
B(JumpIfFalse), U8(6),
B(LdaSmi), U8(1),
- B(Star), R(3),
- B(LdrContextSlot), R(context), U8(4), R(13),
+ B(Star), R(4),
+ B(LdrContextSlot), R(context), U8(4), U8(0), R(13),
B(CallRuntime), U16(Runtime::kReThrow), R(13), U8(1),
B(PopContext), R(8),
B(LdaSmi), U8(-1),
@@ -67,57 +65,57 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
B(Star), R(11),
B(LdaZero),
- B(TestEqualStrict), R(3),
- B(JumpIfTrue), U8(116),
+ B(TestEqualStrict), R(4), U8(15),
+ B(JumpIfTrue), U8(121),
B(LdaUndefined),
- B(TestEqualStrict), R(1),
- B(JumpIfTrue), U8(111),
- B(LdrNamedProperty), R(1), U8(6), U8(13), R(5),
+ B(TestEqualStrict), R(2), U8(16),
+ B(JumpIfTrue), U8(115),
+ B(LdrNamedProperty), R(2), U8(7), U8(17), R(6),
B(LdaNull),
- B(TestEqual), R(5),
+ B(TestEqual), R(6), U8(19),
B(JumpIfFalse), U8(4),
- B(Jump), U8(99),
+ B(Jump), U8(102),
B(LdaSmi), U8(1),
- B(TestEqualStrict), R(3),
- B(JumpIfFalse), U8(68),
- B(Ldar), R(5),
+ B(TestEqualStrict), R(4), U8(20),
+ B(JumpIfFalse), U8(70),
+ B(Ldar), R(6),
B(TypeOf),
B(Star), R(12),
- B(LdaConstant), U8(7),
- B(TestEqualStrict), R(12),
+ B(LdaConstant), U8(8),
+ B(TestEqualStrict), R(12), U8(21),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), U16(129),
+ B(Wide), B(LdaSmi), U16(130),
B(Star), R(12),
- B(LdaConstant), U8(8),
+ B(LdaConstant), U8(9),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
B(Throw),
B(Mov), R(context), R(12),
- B(Mov), R(5), R(13),
- B(Mov), R(1), R(14),
+ B(Mov), R(6), R(13),
+ B(Mov), R(2), R(14),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
- B(Jump), U8(22),
+ B(Jump), U8(23),
B(Star), R(13),
B(Ldar), R(closure),
- B(CreateCatchContext), R(13), U8(5),
+ B(CreateCatchContext), R(13), U8(5), U8(10),
B(Star), R(12),
B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
B(Ldar), R(12),
B(PushContext), R(8),
B(PopContext), R(8),
B(Jump), U8(27),
- B(Mov), R(5), R(12),
- B(Mov), R(1), R(13),
+ B(Mov), R(6), R(12),
+ B(Mov), R(2), R(13),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
- B(Star), R(6),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
+ B(Star), R(7),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(7), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(11), U8(1),
B(LdaZero),
- B(TestEqualStrict), R(9),
+ B(TestEqualStrict), R(9), U8(0),
B(JumpIfTrue), U8(4),
B(Jump), U8(5),
B(Ldar), R(10),
@@ -126,20 +124,22 @@ bytecodes: [
/* 62 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::SYMBOL_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ FIXED_ARRAY_TYPE,
+ SYMBOL_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["function"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
+ FIXED_ARRAY_TYPE,
]
handlers: [
- [7, 114, 120],
- [10, 80, 82],
- [192, 202, 204],
+ [7, 118, 124],
+ [10, 81, 83],
+ [201, 211, 213],
]
---
@@ -149,50 +149,50 @@ snippet: "
"
frame size: 16
parameter count: 1
-bytecode array length: 279
+bytecode array length: 290
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
- B(Star), R(7),
+ B(Star), R(0),
B(LdaZero),
- B(Star), R(3),
+ B(Star), R(5),
B(Mov), R(context), R(12),
B(Mov), R(context), R(13),
/* 68 S> */ B(LdaConstant), U8(1),
- /* 68 E> */ B(LdrKeyedProperty), R(7), U8(3), R(14),
- /* 68 E> */ B(Call), R(14), R(7), U8(1), U8(1),
- B(Star), R(1),
- /* 65 S> */ B(LdrNamedProperty), R(1), U8(2), U8(7), R(15),
- /* 65 E> */ B(Call), R(15), R(1), U8(1), U8(5),
- B(Star), R(2),
- /* 65 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(2), U8(1),
+ /* 68 E> */ B(LdrKeyedProperty), R(0), U8(4), R(14),
+ /* 68 E> */ B(Call), R(14), R(0), U8(1), U8(2),
+ B(Star), R(3),
+ /* 65 S> */ B(LdrNamedProperty), R(3), U8(2), U8(8), R(15),
+ /* 65 E> */ B(Call), R(15), R(3), U8(1), U8(6),
+ B(Star), R(4),
+ /* 65 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(4), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(2), U8(1),
- B(LdaNamedProperty), R(2), U8(3), U8(9),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(4), U8(1),
+ B(LdaNamedProperty), R(4), U8(3), U8(10),
B(JumpIfToBooleanTrue), U8(26),
- B(LdrNamedProperty), R(2), U8(4), U8(11), R(4),
+ B(LdrNamedProperty), R(4), U8(4), U8(12), R(6),
B(LdaSmi), U8(2),
- B(Star), R(3),
- B(Mov), R(4), R(0),
+ B(Star), R(5),
+ B(Mov), R(6), R(1),
/* 54 E> */ B(StackCheck),
- B(Mov), R(0), R(8),
+ B(Mov), R(1), R(2),
/* 73 S> */ B(LdaZero),
B(Star), R(10),
- B(Mov), R(0), R(11),
- B(Jump), U8(48),
- B(Jump), U8(34),
+ B(Mov), R(1), R(11),
+ B(Jump), U8(51),
+ B(Jump), U8(37),
B(Star), R(14),
B(Ldar), R(closure),
- B(CreateCatchContext), R(14), U8(5),
+ B(CreateCatchContext), R(14), U8(5), U8(6),
B(Star), R(13),
B(PushContext), R(9),
B(LdaSmi), U8(2),
- B(TestEqualStrict), R(3),
+ B(TestEqualStrict), R(5), U8(14),
B(JumpIfFalse), U8(6),
B(LdaSmi), U8(1),
- B(Star), R(3),
- B(LdrContextSlot), R(context), U8(4), R(14),
+ B(Star), R(5),
+ B(LdrContextSlot), R(context), U8(4), U8(0), R(14),
B(CallRuntime), U16(Runtime::kReThrow), R(14), U8(1),
B(PopContext), R(9),
B(LdaSmi), U8(-1),
@@ -204,60 +204,60 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
B(Star), R(12),
B(LdaZero),
- B(TestEqualStrict), R(3),
- B(JumpIfTrue), U8(116),
+ B(TestEqualStrict), R(5), U8(15),
+ B(JumpIfTrue), U8(121),
B(LdaUndefined),
- B(TestEqualStrict), R(1),
- B(JumpIfTrue), U8(111),
- B(LdrNamedProperty), R(1), U8(6), U8(13), R(5),
+ B(TestEqualStrict), R(3), U8(16),
+ B(JumpIfTrue), U8(115),
+ B(LdrNamedProperty), R(3), U8(7), U8(17), R(7),
B(LdaNull),
- B(TestEqual), R(5),
+ B(TestEqual), R(7), U8(19),
B(JumpIfFalse), U8(4),
- B(Jump), U8(99),
+ B(Jump), U8(102),
B(LdaSmi), U8(1),
- B(TestEqualStrict), R(3),
- B(JumpIfFalse), U8(68),
- B(Ldar), R(5),
+ B(TestEqualStrict), R(5), U8(20),
+ B(JumpIfFalse), U8(70),
+ B(Ldar), R(7),
B(TypeOf),
B(Star), R(13),
- B(LdaConstant), U8(7),
- B(TestEqualStrict), R(13),
+ B(LdaConstant), U8(8),
+ B(TestEqualStrict), R(13), U8(21),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), U16(129),
+ B(Wide), B(LdaSmi), U16(130),
B(Star), R(13),
- B(LdaConstant), U8(8),
+ B(LdaConstant), U8(9),
B(Star), R(14),
B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
B(Throw),
B(Mov), R(context), R(13),
- B(Mov), R(5), R(14),
- B(Mov), R(1), R(15),
+ B(Mov), R(7), R(14),
+ B(Mov), R(3), R(15),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(14), U8(2),
- B(Jump), U8(22),
+ B(Jump), U8(23),
B(Star), R(14),
B(Ldar), R(closure),
- B(CreateCatchContext), R(14), U8(5),
+ B(CreateCatchContext), R(14), U8(5), U8(10),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
B(Ldar), R(13),
B(PushContext), R(9),
B(PopContext), R(9),
B(Jump), U8(27),
- B(Mov), R(5), R(13),
- B(Mov), R(1), R(14),
+ B(Mov), R(7), R(13),
+ B(Mov), R(3), R(14),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
- B(Star), R(6),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
+ B(Star), R(8),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(8), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(12), U8(1),
B(LdaZero),
- B(TestEqualStrict), R(10),
- B(JumpIfTrue), U8(10),
+ B(TestEqualStrict), R(10), U8(0),
+ B(JumpIfTrue), U8(11),
B(LdaSmi), U8(1),
- B(TestEqualStrict), R(10),
+ B(TestEqualStrict), R(10), U8(0),
B(JumpIfTrue), U8(7),
B(Jump), U8(8),
B(Ldar), R(11),
@@ -268,20 +268,22 @@ bytecodes: [
/* 85 S> */ B(Return),
]
constant pool: [
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::SYMBOL_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["potatoes"],
+ SYMBOL_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["function"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
+ FIXED_ARRAY_TYPE,
]
handlers: [
- [11, 115, 121],
+ [11, 118, 124],
[14, 81, 83],
- [194, 204, 206],
+ [202, 212, 214],
]
---
@@ -293,57 +295,57 @@ snippet: "
"
frame size: 15
parameter count: 1
-bytecode array length: 284
+bytecode array length: 297
bytecodes: [
/* 30 E> */ B(StackCheck),
B(LdaZero),
- B(Star), R(3),
+ B(Star), R(4),
B(Mov), R(context), R(11),
B(Mov), R(context), R(12),
- /* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(3),
+ /* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(9),
B(Star), R(14),
B(LdaConstant), U8(1),
- /* 48 E> */ B(LdrKeyedProperty), R(14), U8(3), R(13),
- /* 48 E> */ B(Call), R(13), R(14), U8(1), U8(1),
- B(Star), R(1),
- /* 45 S> */ B(LdrNamedProperty), R(1), U8(2), U8(7), R(14),
- /* 45 E> */ B(Call), R(14), R(1), U8(1), U8(5),
+ /* 48 E> */ B(LdrKeyedProperty), R(14), U8(4), R(13),
+ /* 48 E> */ B(Call), R(13), R(14), U8(1), U8(2),
B(Star), R(2),
- /* 45 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(2), U8(1),
+ /* 45 S> */ B(LdrNamedProperty), R(2), U8(2), U8(8), R(14),
+ /* 45 E> */ B(Call), R(14), R(2), U8(1), U8(6),
+ B(Star), R(3),
+ /* 45 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(3), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(2), U8(1),
- B(LdaNamedProperty), R(2), U8(3), U8(9),
- B(JumpIfToBooleanTrue), U8(39),
- B(LdrNamedProperty), R(2), U8(4), U8(11), R(4),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(3), U8(1),
+ B(LdaNamedProperty), R(3), U8(3), U8(10),
+ B(JumpIfToBooleanTrue), U8(42),
+ B(LdrNamedProperty), R(3), U8(4), U8(12), R(5),
B(LdaSmi), U8(2),
- B(Star), R(3),
- B(Mov), R(4), R(0),
+ B(Star), R(4),
+ B(Mov), R(5), R(0),
/* 34 E> */ B(StackCheck),
- B(Mov), R(0), R(7),
+ B(Mov), R(0), R(1),
/* 66 S> */ B(LdaSmi), U8(10),
- /* 72 E> */ B(TestEqual), R(7),
+ /* 72 E> */ B(TestEqual), R(1), U8(14),
B(JumpIfFalse), U8(4),
- /* 79 S> */ B(Jump), U8(13),
+ /* 79 S> */ B(Jump), U8(14),
/* 91 S> */ B(LdaSmi), U8(20),
- /* 97 E> */ B(TestEqual), R(7),
+ /* 97 E> */ B(TestEqual), R(1), U8(15),
B(JumpIfFalse), U8(4),
- /* 104 S> */ B(Jump), U8(7),
+ /* 104 S> */ B(Jump), U8(8),
B(LdaZero),
- B(Star), R(3),
- B(Jump), U8(-65),
- B(Jump), U8(34),
+ B(Star), R(4),
+ B(JumpLoop), U8(-67), U8(0),
+ B(Jump), U8(37),
B(Star), R(13),
B(Ldar), R(closure),
- B(CreateCatchContext), R(13), U8(5),
+ B(CreateCatchContext), R(13), U8(5), U8(6),
B(Star), R(12),
B(PushContext), R(8),
B(LdaSmi), U8(2),
- B(TestEqualStrict), R(3),
+ B(TestEqualStrict), R(4), U8(16),
B(JumpIfFalse), U8(6),
B(LdaSmi), U8(1),
- B(Star), R(3),
- B(LdrContextSlot), R(context), U8(4), R(13),
+ B(Star), R(4),
+ B(LdrContextSlot), R(context), U8(4), U8(0), R(13),
B(CallRuntime), U16(Runtime::kReThrow), R(13), U8(1),
B(PopContext), R(8),
B(LdaSmi), U8(-1),
@@ -355,57 +357,57 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
B(Star), R(11),
B(LdaZero),
- B(TestEqualStrict), R(3),
- B(JumpIfTrue), U8(116),
+ B(TestEqualStrict), R(4), U8(17),
+ B(JumpIfTrue), U8(121),
B(LdaUndefined),
- B(TestEqualStrict), R(1),
- B(JumpIfTrue), U8(111),
- B(LdrNamedProperty), R(1), U8(6), U8(13), R(5),
+ B(TestEqualStrict), R(2), U8(18),
+ B(JumpIfTrue), U8(115),
+ B(LdrNamedProperty), R(2), U8(7), U8(19), R(6),
B(LdaNull),
- B(TestEqual), R(5),
+ B(TestEqual), R(6), U8(21),
B(JumpIfFalse), U8(4),
- B(Jump), U8(99),
+ B(Jump), U8(102),
B(LdaSmi), U8(1),
- B(TestEqualStrict), R(3),
- B(JumpIfFalse), U8(68),
- B(Ldar), R(5),
+ B(TestEqualStrict), R(4), U8(22),
+ B(JumpIfFalse), U8(70),
+ B(Ldar), R(6),
B(TypeOf),
B(Star), R(12),
- B(LdaConstant), U8(7),
- B(TestEqualStrict), R(12),
+ B(LdaConstant), U8(8),
+ B(TestEqualStrict), R(12), U8(23),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), U16(129),
+ B(Wide), B(LdaSmi), U16(130),
B(Star), R(12),
- B(LdaConstant), U8(8),
+ B(LdaConstant), U8(9),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
B(Throw),
B(Mov), R(context), R(12),
- B(Mov), R(5), R(13),
- B(Mov), R(1), R(14),
+ B(Mov), R(6), R(13),
+ B(Mov), R(2), R(14),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
- B(Jump), U8(22),
+ B(Jump), U8(23),
B(Star), R(13),
B(Ldar), R(closure),
- B(CreateCatchContext), R(13), U8(5),
+ B(CreateCatchContext), R(13), U8(5), U8(10),
B(Star), R(12),
B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
B(Ldar), R(12),
B(PushContext), R(8),
B(PopContext), R(8),
B(Jump), U8(27),
- B(Mov), R(5), R(12),
- B(Mov), R(1), R(13),
+ B(Mov), R(6), R(12),
+ B(Mov), R(2), R(13),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
- B(Star), R(6),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
+ B(Star), R(7),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(7), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(11), U8(1),
B(LdaZero),
- B(TestEqualStrict), R(9),
+ B(TestEqualStrict), R(9), U8(0),
B(JumpIfTrue), U8(4),
B(Jump), U8(5),
B(Ldar), R(10),
@@ -414,20 +416,22 @@ bytecodes: [
/* 113 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::SYMBOL_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ FIXED_ARRAY_TYPE,
+ SYMBOL_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["function"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
+ FIXED_ARRAY_TYPE,
]
handlers: [
- [7, 130, 136],
- [10, 96, 98],
- [208, 218, 220],
+ [7, 136, 142],
+ [10, 99, 101],
+ [219, 229, 231],
]
---
@@ -437,53 +441,53 @@ snippet: "
"
frame size: 14
parameter count: 1
-bytecode array length: 292
+bytecode array length: 303
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(8),
- B(Mov), R(8), R(6),
+ B(Mov), R(8), R(0),
B(LdaZero),
- B(Star), R(2),
+ B(Star), R(3),
B(Mov), R(context), R(10),
B(Mov), R(context), R(11),
- /* 77 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(3),
+ /* 77 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(9),
B(Star), R(13),
B(LdaConstant), U8(2),
- /* 77 E> */ B(LdrKeyedProperty), R(13), U8(3), R(12),
- /* 77 E> */ B(Call), R(12), R(13), U8(1), U8(1),
- B(Star), R(0),
- /* 74 S> */ B(LdrNamedProperty), R(0), U8(3), U8(7), R(13),
- /* 74 E> */ B(Call), R(13), R(0), U8(1), U8(5),
+ /* 77 E> */ B(LdrKeyedProperty), R(13), U8(4), R(12),
+ /* 77 E> */ B(Call), R(12), R(13), U8(1), U8(2),
B(Star), R(1),
- /* 74 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(1), U8(1),
+ /* 74 S> */ B(LdrNamedProperty), R(1), U8(3), U8(8), R(13),
+ /* 74 E> */ B(Call), R(13), R(1), U8(1), U8(6),
+ B(Star), R(2),
+ /* 74 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(2), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(1), U8(1),
- B(LdaNamedProperty), R(1), U8(4), U8(9),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(2), U8(1),
+ B(LdaNamedProperty), R(2), U8(4), U8(10),
B(JumpIfToBooleanTrue), U8(29),
- /* 67 E> */ B(LdrNamedProperty), R(1), U8(5), U8(11), R(3),
+ /* 67 E> */ B(LdrNamedProperty), R(2), U8(5), U8(12), R(4),
B(LdaSmi), U8(2),
- B(Star), R(2),
- B(Ldar), R(3),
- B(StaNamedPropertySloppy), R(6), U8(6), U8(13),
+ B(Star), R(3),
+ B(Ldar), R(4),
+ B(StaNamedPropertySloppy), R(0), U8(6), U8(14),
/* 62 E> */ B(StackCheck),
/* 88 S> */ B(Nop),
- /* 96 E> */ B(LdrNamedProperty), R(6), U8(6), U8(15), R(9),
+ /* 96 E> */ B(LdrNamedProperty), R(0), U8(6), U8(16), R(9),
B(LdaZero),
B(Star), R(8),
- B(Jump), U8(48),
- B(Jump), U8(34),
+ B(Jump), U8(51),
+ B(Jump), U8(37),
B(Star), R(12),
B(Ldar), R(closure),
- B(CreateCatchContext), R(12), U8(7),
+ B(CreateCatchContext), R(12), U8(7), U8(8),
B(Star), R(11),
B(PushContext), R(7),
B(LdaSmi), U8(2),
- B(TestEqualStrict), R(2),
+ B(TestEqualStrict), R(3), U8(18),
B(JumpIfFalse), U8(6),
B(LdaSmi), U8(1),
- B(Star), R(2),
- B(LdrContextSlot), R(context), U8(4), R(12),
+ B(Star), R(3),
+ B(LdrContextSlot), R(context), U8(4), U8(0), R(12),
B(CallRuntime), U16(Runtime::kReThrow), R(12), U8(1),
B(PopContext), R(7),
B(LdaSmi), U8(-1),
@@ -495,60 +499,60 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
B(Star), R(10),
B(LdaZero),
- B(TestEqualStrict), R(2),
- B(JumpIfTrue), U8(116),
+ B(TestEqualStrict), R(3), U8(19),
+ B(JumpIfTrue), U8(121),
B(LdaUndefined),
- B(TestEqualStrict), R(0),
- B(JumpIfTrue), U8(111),
- B(LdrNamedProperty), R(0), U8(8), U8(17), R(4),
+ B(TestEqualStrict), R(1), U8(20),
+ B(JumpIfTrue), U8(115),
+ B(LdrNamedProperty), R(1), U8(9), U8(21), R(5),
B(LdaNull),
- B(TestEqual), R(4),
+ B(TestEqual), R(5), U8(23),
B(JumpIfFalse), U8(4),
- B(Jump), U8(99),
+ B(Jump), U8(102),
B(LdaSmi), U8(1),
- B(TestEqualStrict), R(2),
- B(JumpIfFalse), U8(68),
- B(Ldar), R(4),
+ B(TestEqualStrict), R(3), U8(24),
+ B(JumpIfFalse), U8(70),
+ B(Ldar), R(5),
B(TypeOf),
B(Star), R(11),
- B(LdaConstant), U8(9),
- B(TestEqualStrict), R(11),
+ B(LdaConstant), U8(10),
+ B(TestEqualStrict), R(11), U8(25),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), U16(129),
+ B(Wide), B(LdaSmi), U16(130),
B(Star), R(11),
- B(LdaConstant), U8(10),
+ B(LdaConstant), U8(11),
B(Star), R(12),
B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
B(Throw),
B(Mov), R(context), R(11),
- B(Mov), R(4), R(12),
- B(Mov), R(0), R(13),
+ B(Mov), R(5), R(12),
+ B(Mov), R(1), R(13),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
- B(Jump), U8(22),
+ B(Jump), U8(23),
B(Star), R(12),
B(Ldar), R(closure),
- B(CreateCatchContext), R(12), U8(7),
+ B(CreateCatchContext), R(12), U8(7), U8(12),
B(Star), R(11),
B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
B(Ldar), R(11),
B(PushContext), R(7),
B(PopContext), R(7),
B(Jump), U8(27),
- B(Mov), R(4), R(11),
- B(Mov), R(0), R(12),
+ B(Mov), R(5), R(11),
+ B(Mov), R(1), R(12),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(11), U8(2),
- B(Star), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(5), U8(1),
+ B(Star), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(10), U8(1),
B(LdaZero),
- B(TestEqualStrict), R(8),
- B(JumpIfTrue), U8(10),
+ B(TestEqualStrict), R(8), U8(0),
+ B(JumpIfTrue), U8(11),
B(LdaSmi), U8(1),
- B(TestEqualStrict), R(8),
+ B(TestEqualStrict), R(8), U8(0),
B(JumpIfTrue), U8(7),
B(Jump), U8(8),
B(Ldar), R(9),
@@ -559,21 +563,23 @@ bytecodes: [
/* 105 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::SYMBOL_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ SYMBOL_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["function"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
+ FIXED_ARRAY_TYPE,
]
handlers: [
- [15, 128, 134],
+ [15, 131, 137],
[18, 94, 96],
- [207, 217, 219],
+ [215, 225, 227],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden
index fd04c713a4..9a81b88a03 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: yes
---
@@ -20,7 +18,7 @@ bytecodes: [
/* 55 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -37,11 +35,11 @@ bytecodes: [
/* 34 S> */ B(LdrUndefined), R(1),
B(CreateClosure), U8(0), U8(2),
B(Star), R(0),
- /* 56 E> */ B(Call), R(0), R(1), U8(1), U8(1),
+ /* 56 E> */ B(Call), R(0), R(1), U8(1), U8(2),
/* 59 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -60,11 +58,11 @@ bytecodes: [
B(Star), R(0),
B(LdaSmi), U8(1),
B(Star), R(2),
- /* 67 E> */ B(Call), R(0), R(1), U8(2), U8(1),
+ /* 67 E> */ B(Call), R(0), R(1), U8(2), U8(2),
/* 71 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
index 57dbfd153d..840aa9ae42 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: no
test function name: f
@@ -15,15 +13,15 @@ snippet: "
"
frame size: 11
parameter count: 1
-bytecode array length: 201
+bytecode array length: 212
bytecodes: [
B(Ldar), R(new_target),
- B(JumpIfUndefined), U8(20),
+ B(JumpIfUndefined), U8(21),
B(ResumeGenerator), R(new_target),
B(Star), R(1),
B(LdaZero),
- B(TestEqualStrict), R(1),
- B(JumpIfTrue), U8(57),
+ B(TestEqualStrict), R(1), U8(0),
+ B(JumpIfTrue), U8(61),
B(LdaSmi), U8(76),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
@@ -32,16 +30,16 @@ bytecodes: [
B(CreateFunctionContext), U8(2),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
/* 11 E> */ B(StackCheck),
B(Mov), R(context), R(4),
- /* 11 E> */ B(LdrContextSlot), R(context), U8(4), R(6),
+ /* 11 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(6),
B(Ldar), R(6),
B(Mov), R(closure), R(5),
B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(5), U8(2),
- B(StaContextSlot), R(context), U8(5),
+ B(StaContextSlot), R(context), U8(5), U8(0),
B(Star), R(5),
- B(LdrContextSlot), R(context), U8(5), R(6),
+ B(LdrContextSlot), R(context), U8(5), U8(0), R(6),
B(LdaZero),
B(SuspendGenerator), R(6),
B(Ldar), R(5),
@@ -53,10 +51,10 @@ bytecodes: [
B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(6), U8(1),
B(Star), R(8),
B(LdaZero),
- B(TestEqualStrict), R(8),
- B(JumpIfTrue), U8(31),
+ B(TestEqualStrict), R(8), U8(0),
+ B(JumpIfTrue), U8(32),
B(LdaSmi), U8(2),
- B(TestEqualStrict), R(8),
+ B(TestEqualStrict), R(8), U8(0),
B(JumpIfTrue), U8(22),
B(Jump), U8(2),
B(LdaTrue),
@@ -85,17 +83,17 @@ bytecodes: [
B(Star), R(2),
B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
B(Star), R(4),
- B(LdrContextSlot), R(context), U8(5), R(5),
+ B(LdrContextSlot), R(context), U8(5), U8(0), R(5),
B(CallRuntime), U16(Runtime::k_GeneratorClose), R(5), U8(1),
B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(4), U8(1),
B(LdaZero),
- B(TestEqualStrict), R(2),
- B(JumpIfTrue), U8(16),
+ B(TestEqualStrict), R(2), U8(0),
+ B(JumpIfTrue), U8(18),
B(LdaSmi), U8(1),
- B(TestEqualStrict), R(2),
- B(JumpIfTrue), U8(13),
+ B(TestEqualStrict), R(2), U8(0),
+ B(JumpIfTrue), U8(14),
B(LdaSmi), U8(2),
- B(TestEqualStrict), R(2),
+ B(TestEqualStrict), R(2), U8(0),
B(JumpIfTrue), U8(10),
B(Jump), U8(11),
B(Ldar), R(3),
@@ -110,7 +108,7 @@ bytecodes: [
constant pool: [
]
handlers: [
- [39, 138, 144],
+ [41, 145, 151],
]
---
@@ -120,17 +118,17 @@ snippet: "
"
frame size: 11
parameter count: 1
-bytecode array length: 294
+bytecode array length: 310
bytecodes: [
B(Ldar), R(new_target),
- B(JumpIfUndefined), U8(26),
+ B(JumpIfUndefined), U8(28),
B(ResumeGenerator), R(new_target),
B(Star), R(1),
B(LdaZero),
- B(TestEqualStrict), R(1),
- B(JumpIfTrue), U8(63),
+ B(TestEqualStrict), R(1), U8(0),
+ B(JumpIfTrue), U8(68),
B(LdaSmi), U8(1),
- B(TestEqualStrict), R(1),
+ B(TestEqualStrict), R(1), U8(0),
B(JumpIfTrueConstant), U8(0),
B(LdaSmi), U8(76),
B(Star), R(2),
@@ -140,16 +138,16 @@ bytecodes: [
B(CreateFunctionContext), U8(2),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
/* 11 E> */ B(StackCheck),
B(Mov), R(context), R(4),
- /* 11 E> */ B(LdrContextSlot), R(context), U8(4), R(6),
+ /* 11 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(6),
B(Ldar), R(6),
B(Mov), R(closure), R(5),
B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(5), U8(2),
- B(StaContextSlot), R(context), U8(5),
+ B(StaContextSlot), R(context), U8(5), U8(0),
B(Star), R(5),
- B(LdrContextSlot), R(context), U8(5), R(6),
+ B(LdrContextSlot), R(context), U8(5), U8(0), R(6),
B(LdaZero),
B(SuspendGenerator), R(6),
B(Ldar), R(5),
@@ -161,10 +159,10 @@ bytecodes: [
B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(6), U8(1),
B(Star), R(8),
B(LdaZero),
- B(TestEqualStrict), R(8),
- B(JumpIfTrue), U8(31),
+ B(TestEqualStrict), R(8), U8(0),
+ B(JumpIfTrue), U8(32),
B(LdaSmi), U8(2),
- B(TestEqualStrict), R(8),
+ B(TestEqualStrict), R(8), U8(0),
B(JumpIfTrue), U8(22),
B(Jump), U8(2),
B(LdaTrue),
@@ -174,7 +172,7 @@ bytecodes: [
B(Star), R(3),
B(LdaZero),
B(Star), R(2),
- B(Jump), U8(113),
+ B(Jump), U8(116),
B(Ldar), R(7),
/* 11 E> */ B(Throw),
/* 16 S> */ B(LdaSmi), U8(42),
@@ -182,34 +180,34 @@ bytecodes: [
B(LdaFalse),
B(Star), R(6),
B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(5), U8(2),
- B(Star), R(7),
- B(LdrContextSlot), R(context), U8(5), R(5),
+ B(Star), R(5),
+ B(LdrContextSlot), R(context), U8(5), U8(0), R(6),
B(LdaSmi), U8(1),
- B(SuspendGenerator), R(5),
- B(Ldar), R(7),
+ B(SuspendGenerator), R(6),
+ B(Ldar), R(5),
/* 25 S> */ B(Return),
B(LdaSmi), U8(-2),
B(Star), R(1),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(5), U8(1),
- B(Star), R(6),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(5), U8(1),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(6), U8(1),
+ B(Star), R(7),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(6), U8(1),
B(Star), R(8),
B(LdaZero),
- B(TestEqualStrict), R(8),
- B(JumpIfTrue), U8(32),
+ B(TestEqualStrict), R(8), U8(0),
+ B(JumpIfTrue), U8(33),
B(LdaSmi), U8(2),
- B(TestEqualStrict), R(8),
+ B(TestEqualStrict), R(8), U8(0),
B(JumpIfTrue), U8(23),
B(Jump), U8(2),
B(LdaTrue),
B(Star), R(10),
- B(Mov), R(6), R(9),
+ B(Mov), R(7), R(9),
B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(9), U8(2),
B(Star), R(3),
B(LdaSmi), U8(1),
B(Star), R(2),
B(Jump), U8(35),
- B(Ldar), R(6),
+ B(Ldar), R(7),
/* 16 E> */ B(Throw),
B(LdrUndefined), R(5),
B(LdaTrue),
@@ -227,20 +225,20 @@ bytecodes: [
B(Star), R(2),
B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
B(Star), R(4),
- B(LdrContextSlot), R(context), U8(5), R(5),
+ B(LdrContextSlot), R(context), U8(5), U8(0), R(5),
B(CallRuntime), U16(Runtime::k_GeneratorClose), R(5), U8(1),
B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(4), U8(1),
B(LdaZero),
- B(TestEqualStrict), R(2),
- B(JumpIfTrue), U8(22),
+ B(TestEqualStrict), R(2), U8(0),
+ B(JumpIfTrue), U8(25),
B(LdaSmi), U8(1),
- B(TestEqualStrict), R(2),
- B(JumpIfTrue), U8(19),
+ B(TestEqualStrict), R(2), U8(0),
+ B(JumpIfTrue), U8(21),
B(LdaSmi), U8(2),
- B(TestEqualStrict), R(2),
- B(JumpIfTrue), U8(16),
+ B(TestEqualStrict), R(2), U8(0),
+ B(JumpIfTrue), U8(17),
B(LdaSmi), U8(3),
- B(TestEqualStrict), R(2),
+ B(TestEqualStrict), R(2), U8(0),
B(JumpIfTrue), U8(13),
B(Jump), U8(14),
B(Ldar), R(3),
@@ -255,10 +253,10 @@ bytecodes: [
/* 25 S> */ B(Return),
]
constant pool: [
- kInstanceTypeDontCare,
+ Smi [141],
]
handlers: [
- [45, 222, 228],
+ [48, 233, 239],
]
---
@@ -266,339 +264,341 @@ snippet: "
function* f() { for (let x of [42]) yield x }
f();
"
-frame size: 18
+frame size: 17
parameter count: 1
-bytecode array length: 742
+bytecode array length: 805
bytecodes: [
B(Ldar), R(new_target),
- B(JumpIfUndefined), U8(26),
+ B(JumpIfUndefined), U8(28),
B(ResumeGenerator), R(new_target),
- B(Star), R(4),
+ B(Star), R(3),
B(LdaZero),
- B(TestEqualStrict), R(4),
- B(JumpIfTrue), U8(63),
+ B(TestEqualStrict), R(3), U8(0),
+ B(JumpIfTrue), U8(68),
B(LdaSmi), U8(1),
- B(TestEqualStrict), R(4),
+ B(TestEqualStrict), R(3), U8(0),
B(JumpIfTrueConstant), U8(3),
B(LdaSmi), U8(76),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kAbort), R(5), U8(1),
- B(LdaSmi), U8(-2),
B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kAbort), R(4), U8(1),
+ B(LdaSmi), U8(-2),
+ B(Star), R(3),
B(CreateFunctionContext), U8(9),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
/* 11 E> */ B(StackCheck),
- B(Mov), R(context), R(7),
- /* 11 E> */ B(LdrContextSlot), R(context), U8(4), R(9),
- B(Ldar), R(9),
- B(Mov), R(closure), R(8),
- B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(8), U8(2),
- B(StaContextSlot), R(context), U8(5),
- B(Star), R(8),
- B(LdrContextSlot), R(context), U8(5), R(9),
- B(LdaZero),
- B(SuspendGenerator), R(9),
+ B(Mov), R(context), R(6),
+ /* 11 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(8),
B(Ldar), R(8),
+ B(Mov), R(closure), R(7),
+ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(7), U8(2),
+ B(StaContextSlot), R(context), U8(5), U8(0),
+ B(Star), R(7),
+ B(LdrContextSlot), R(context), U8(5), U8(0), R(8),
+ B(LdaZero),
+ B(SuspendGenerator), R(8),
+ B(Ldar), R(7),
/* 44 S> */ B(Return),
B(LdaSmi), U8(-2),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(9), U8(1),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(8), U8(1),
+ B(Star), R(9),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(8), U8(1),
B(Star), R(10),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(9), U8(1),
- B(Star), R(11),
B(LdaZero),
- B(TestEqualStrict), R(11),
- B(JumpIfTrue), U8(31),
+ B(TestEqualStrict), R(10), U8(0),
+ B(JumpIfTrue), U8(32),
B(LdaSmi), U8(2),
- B(TestEqualStrict), R(11),
+ B(TestEqualStrict), R(10), U8(0),
B(JumpIfTrue), U8(22),
B(Jump), U8(2),
B(LdaTrue),
- B(Star), R(13),
- B(Mov), R(10), R(12),
- B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(12), U8(2),
- B(Star), R(6),
- B(LdaZero),
+ B(Star), R(12),
+ B(Mov), R(9), R(11),
+ B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(11), U8(2),
B(Star), R(5),
- B(JumpConstant), U8(17),
- B(Ldar), R(10),
+ B(LdaZero),
+ B(Star), R(4),
+ B(JumpConstant), U8(20),
+ B(Ldar), R(9),
/* 11 E> */ B(Throw),
B(Ldar), R(closure),
B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
B(LdaZero),
- B(StaContextSlot), R(1), U8(9),
+ B(StaContextSlot), R(1), U8(9), U8(0),
+ B(Mov), R(context), R(9),
B(Mov), R(context), R(10),
- B(Mov), R(context), R(11),
- /* 30 S> */ B(CreateArrayLiteral), U8(1), U8(0), U8(3),
- B(Star), R(13),
+ /* 30 S> */ B(CreateArrayLiteral), U8(1), U8(0), U8(9),
+ B(Star), R(12),
B(LdaConstant), U8(2),
- /* 30 E> */ B(LdrKeyedProperty), R(13), U8(3), R(12),
- /* 30 E> */ B(Call), R(12), R(13), U8(1), U8(1),
- /* 30 E> */ B(StaContextSlot), R(1), U8(7),
+ /* 30 E> */ B(LdrKeyedProperty), R(12), U8(4), R(11),
+ /* 30 E> */ B(Call), R(11), R(12), U8(1), U8(2),
+ /* 30 E> */ B(StaContextSlot), R(1), U8(7), U8(0),
B(LdaSmi), U8(-2),
- B(TestEqual), R(4),
- B(JumpIfTrue), U8(17),
+ B(TestEqual), R(3), U8(0),
+ B(JumpIfTrue), U8(18),
B(LdaSmi), U8(1),
- B(TestEqualStrict), R(4),
+ B(TestEqualStrict), R(3), U8(0),
B(JumpIfTrueConstant), U8(9),
B(LdaSmi), U8(76),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kAbort), R(12), U8(1),
- /* 27 S> */ B(LdrContextSlot), R(1), U8(7), R(14),
- B(LdrNamedProperty), R(14), U8(4), U8(7), R(13),
- /* 27 E> */ B(Call), R(13), R(14), U8(1), U8(5),
- /* 27 E> */ B(StaContextSlot), R(1), U8(8),
- B(Star), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(12), U8(1),
+ B(Star), R(11),
+ B(CallRuntime), U16(Runtime::kAbort), R(11), U8(1),
+ /* 27 S> */ B(LdrContextSlot), R(1), U8(7), U8(0), R(13),
+ B(LdrNamedProperty), R(13), U8(4), U8(8), R(12),
+ /* 27 E> */ B(Call), R(12), R(13), U8(1), U8(6),
+ /* 27 E> */ B(StaContextSlot), R(1), U8(8), U8(0),
+ B(Star), R(11),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(11), U8(1),
B(ToBooleanLogicalNot),
- B(JumpIfFalse), U8(11),
- B(LdrContextSlot), R(1), U8(8), R(12),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
- B(LdrContextSlot), R(1), U8(8), R(12),
- B(LdaNamedProperty), R(12), U8(5), U8(9),
+ B(JumpIfFalse), U8(12),
+ B(LdrContextSlot), R(1), U8(8), U8(0), R(11),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
+ B(LdrContextSlot), R(1), U8(8), U8(0), R(11),
+ B(LdaNamedProperty), R(11), U8(5), U8(10),
B(JumpIfToBooleanTrueConstant), U8(10),
- B(LdrContextSlot), R(1), U8(8), R(12),
- B(LdaNamedProperty), R(12), U8(6), U8(11),
- B(StaContextSlot), R(1), U8(10),
+ B(LdrContextSlot), R(1), U8(8), U8(0), R(11),
+ B(LdaNamedProperty), R(11), U8(6), U8(12),
+ B(StaContextSlot), R(1), U8(10), U8(0),
B(LdaSmi), U8(2),
- B(StaContextSlot), R(1), U8(9),
- B(LdaContextSlot), R(1), U8(10),
- B(StaContextSlot), R(1), U8(6),
+ B(StaContextSlot), R(1), U8(9), U8(0),
+ B(LdaContextSlot), R(1), U8(10), U8(0),
+ B(StaContextSlot), R(1), U8(6), U8(0),
/* 16 E> */ B(StackCheck),
B(Ldar), R(closure),
B(CreateBlockContext), U8(7),
B(PushContext), R(2),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
- B(LdaContextSlot), R(1), U8(6),
- B(StaContextSlot), R(context), U8(4),
- /* 36 S> */ B(LdaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
+ B(LdaContextSlot), R(1), U8(6), U8(0),
+ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 36 S> */ B(LdaContextSlot), R(context), U8(4), U8(0),
B(JumpIfNotHole), U8(11),
B(LdaConstant), U8(8),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kThrowReferenceError), R(13), U8(1),
- B(Star), R(12),
+ B(Star), R(11),
B(LdaFalse),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(12), U8(2),
- B(Star), R(14),
- B(LdrContextSlot), R(1), U8(5), R(12),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(11), U8(2),
+ B(Star), R(11),
+ B(LdrContextSlot), R(1), U8(5), U8(0), R(12),
B(LdaSmi), U8(1),
B(SuspendGenerator), R(12),
- B(Ldar), R(14),
+ B(Ldar), R(11),
/* 44 S> */ B(Return),
B(LdaSmi), U8(-2),
- B(Star), R(4),
+ B(Star), R(3),
B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(12), U8(1),
B(Star), R(13),
B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(12), U8(1),
- B(Star), R(15),
+ B(Star), R(14),
B(LdaZero),
- B(TestEqualStrict), R(15),
- B(JumpIfTrue), U8(43),
+ B(TestEqualStrict), R(14), U8(0),
+ B(JumpIfTrue), U8(44),
B(LdaSmi), U8(2),
- B(TestEqualStrict), R(15),
+ B(TestEqualStrict), R(14), U8(0),
B(JumpIfTrue), U8(34),
B(Jump), U8(2),
B(LdaTrue),
- B(Star), R(17),
- B(Mov), R(13), R(16),
- B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(16), U8(2),
+ B(Star), R(16),
+ B(Mov), R(13), R(15),
+ B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(15), U8(2),
B(PopContext), R(2),
B(PopContext), R(2),
B(PopContext), R(2),
B(PopContext), R(2),
B(PopContext), R(2),
B(PopContext), R(2),
- B(Star), R(9),
- B(LdaZero),
B(Star), R(8),
- B(Jump), U8(68),
+ B(LdaZero),
+ B(Star), R(7),
+ B(Jump), U8(74),
B(Ldar), R(13),
/* 36 E> */ B(Throw),
- B(Ldar), R(13),
B(PopContext), R(2),
B(LdaZero),
- B(StaContextSlot), R(1), U8(9),
- B(Wide), B(Jump), U16(-215),
- B(Jump), U8(39),
- B(Star), R(12),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(12), U8(11),
+ B(StaContextSlot), R(1), U8(9), U8(0),
+ B(Wide), B(JumpLoop), U16(-232), U16(0),
+ B(Jump), U8(44),
B(Star), R(11),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(11), U8(11), U8(12),
+ B(Star), R(10),
B(PushContext), R(2),
- B(LdrContextSlot), R(0), U8(9), R(12),
+ B(LdrContextSlot), R(1), U8(9), U8(0), R(11),
B(LdaSmi), U8(2),
- B(TestEqualStrict), R(12),
- B(JumpIfFalse), U8(7),
+ B(TestEqualStrict), R(11), U8(14),
+ B(JumpIfFalse), U8(8),
B(LdaSmi), U8(1),
- B(StaContextSlot), R(0), U8(9),
- B(LdrContextSlot), R(context), U8(4), R(12),
- B(CallRuntime), U16(Runtime::kReThrow), R(12), U8(1),
+ B(StaContextSlot), R(1), U8(9), U8(0),
+ B(LdrContextSlot), R(context), U8(4), U8(0), R(11),
+ B(CallRuntime), U16(Runtime::kReThrow), R(11), U8(1),
B(PopContext), R(2),
B(LdaSmi), U8(-1),
- B(Star), R(8),
+ B(Star), R(7),
B(Jump), U8(8),
- B(Star), R(9),
- B(LdaSmi), U8(1),
B(Star), R(8),
+ B(LdaSmi), U8(1),
+ B(Star), R(7),
B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
- B(Star), R(10),
- B(LdrContextSlot), R(1), U8(9), R(11),
+ B(Star), R(9),
+ B(LdrContextSlot), R(1), U8(9), U8(0), R(10),
B(LdaZero),
- B(TestEqualStrict), R(11),
- B(JumpIfTrueConstant), U8(15),
- B(LdrContextSlot), R(1), U8(7), R(11),
+ B(TestEqualStrict), R(10), U8(15),
+ B(JumpIfTrueConstant), U8(18),
+ B(LdrContextSlot), R(1), U8(7), U8(0), R(10),
B(LdaUndefined),
- B(TestEqualStrict), R(11),
- B(JumpIfTrueConstant), U8(16),
- B(LdrContextSlot), R(1), U8(7), R(11),
- B(LdaNamedProperty), R(11), U8(12), U8(13),
- B(StaContextSlot), R(1), U8(11),
- B(LdrContextSlot), R(1), U8(11), R(11),
+ B(TestEqualStrict), R(10), U8(16),
+ B(JumpIfTrueConstant), U8(19),
+ B(LdrContextSlot), R(1), U8(7), U8(0), R(10),
+ B(LdaNamedProperty), R(10), U8(13), U8(17),
+ B(StaContextSlot), R(1), U8(11), U8(0),
+ B(LdrContextSlot), R(1), U8(11), U8(0), R(10),
B(LdaNull),
- B(TestEqual), R(11),
+ B(TestEqual), R(10), U8(19),
B(JumpIfFalse), U8(4),
- B(Jump), U8(117),
- B(LdrContextSlot), R(1), U8(9), R(11),
+ B(JumpConstant), U8(17),
+ B(LdrContextSlot), R(1), U8(9), U8(0), R(10),
B(LdaSmi), U8(1),
- B(TestEqualStrict), R(11),
- B(JumpIfFalse), U8(71),
- B(LdaContextSlot), R(1), U8(11),
+ B(TestEqualStrict), R(10), U8(20),
+ B(JumpIfFalse), U8(76),
+ B(LdaContextSlot), R(1), U8(11), U8(0),
B(TypeOf),
- B(Star), R(11),
- B(LdaConstant), U8(13),
- B(TestEqualStrict), R(11),
+ B(Star), R(10),
+ B(LdaConstant), U8(14),
+ B(TestEqualStrict), R(10), U8(21),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), U16(129),
+ B(Wide), B(LdaSmi), U16(130),
+ B(Star), R(10),
+ B(LdaConstant), U8(15),
B(Star), R(11),
- B(LdaConstant), U8(14),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(10), U8(2),
B(Throw),
- B(Mov), R(context), R(11),
- B(LdrContextSlot), R(1), U8(11), R(12),
- B(LdrContextSlot), R(1), U8(7), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
- B(Jump), U8(22),
- B(Star), R(12),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(12), U8(11),
+ B(Mov), R(context), R(10),
+ B(LdrContextSlot), R(1), U8(11), U8(0), R(11),
+ B(LdrContextSlot), R(1), U8(7), U8(0), R(12),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(11), U8(2),
+ B(Jump), U8(23),
B(Star), R(11),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(11), U8(11), U8(16),
+ B(Star), R(10),
B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
- B(Ldar), R(11),
+ B(Ldar), R(10),
B(PushContext), R(2),
B(PopContext), R(2),
- B(Jump), U8(38),
- B(LdrContextSlot), R(1), U8(11), R(11),
- B(LdrContextSlot), R(1), U8(7), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(11), U8(2),
- B(StaContextSlot), R(1), U8(12),
- B(LdrContextSlot), R(1), U8(12), R(11),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(11), U8(1),
+ B(Jump), U8(43),
+ B(LdrContextSlot), R(1), U8(11), U8(0), R(10),
+ B(LdrContextSlot), R(1), U8(7), U8(0), R(11),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(10), U8(2),
+ B(StaContextSlot), R(1), U8(12), U8(0),
+ B(LdrContextSlot), R(1), U8(12), U8(0), R(10),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(10), U8(1),
B(JumpIfToBooleanFalse), U8(4),
- B(Jump), U8(11),
- B(LdrContextSlot), R(1), U8(12), R(11),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
- B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(10), U8(1),
+ B(Jump), U8(12),
+ B(LdrContextSlot), R(1), U8(12), U8(0), R(10),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(9), U8(1),
B(LdaZero),
- B(TestEqualStrict), R(8),
- B(JumpIfTrue), U8(10),
+ B(TestEqualStrict), R(7), U8(0),
+ B(JumpIfTrue), U8(11),
B(LdaSmi), U8(1),
- B(TestEqualStrict), R(8),
+ B(TestEqualStrict), R(7), U8(0),
B(JumpIfTrue), U8(17),
B(Jump), U8(28),
B(PopContext), R(1),
B(PopContext), R(1),
B(LdaSmi), U8(1),
- B(Star), R(5),
- B(Mov), R(9), R(6),
+ B(Star), R(4),
+ B(Mov), R(8), R(5),
B(Jump), U8(47),
B(PopContext), R(1),
B(PopContext), R(1),
B(LdaSmi), U8(2),
- B(Star), R(5),
- B(Mov), R(9), R(6),
+ B(Star), R(4),
+ B(Mov), R(8), R(5),
B(Jump), U8(34),
B(PopContext), R(1),
- B(LdrUndefined), R(8),
+ B(LdrUndefined), R(7),
B(LdaTrue),
- B(Star), R(9),
- B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(8), U8(2),
- B(Star), R(6),
- B(LdaSmi), U8(3),
+ B(Star), R(8),
+ B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(7), U8(2),
B(Star), R(5),
+ B(LdaSmi), U8(3),
+ B(Star), R(4),
B(Jump), U8(14),
B(LdaSmi), U8(-1),
- B(Star), R(5),
+ B(Star), R(4),
B(Jump), U8(8),
- B(Star), R(6),
- B(LdaSmi), U8(4),
B(Star), R(5),
+ B(LdaSmi), U8(4),
+ B(Star), R(4),
B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
- B(Star), R(7),
- B(LdrContextSlot), R(context), U8(5), R(8),
- B(CallRuntime), U16(Runtime::k_GeneratorClose), R(8), U8(1),
- B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(7), U8(1),
+ B(Star), R(6),
+ B(LdrContextSlot), R(context), U8(5), U8(0), R(7),
+ B(CallRuntime), U16(Runtime::k_GeneratorClose), R(7), U8(1),
+ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(6), U8(1),
B(LdaZero),
- B(TestEqualStrict), R(5),
- B(JumpIfTrue), U8(28),
+ B(TestEqualStrict), R(4), U8(0),
+ B(JumpIfTrue), U8(32),
B(LdaSmi), U8(1),
- B(TestEqualStrict), R(5),
- B(JumpIfTrue), U8(25),
+ B(TestEqualStrict), R(4), U8(0),
+ B(JumpIfTrue), U8(28),
B(LdaSmi), U8(2),
- B(TestEqualStrict), R(5),
- B(JumpIfTrue), U8(22),
+ B(TestEqualStrict), R(4), U8(0),
+ B(JumpIfTrue), U8(24),
B(LdaSmi), U8(3),
- B(TestEqualStrict), R(5),
- B(JumpIfTrue), U8(19),
+ B(TestEqualStrict), R(4), U8(0),
+ B(JumpIfTrue), U8(20),
B(LdaSmi), U8(4),
- B(TestEqualStrict), R(5),
+ B(TestEqualStrict), R(4), U8(0),
B(JumpIfTrue), U8(16),
B(Jump), U8(17),
- B(Ldar), R(6),
+ B(Ldar), R(5),
/* 44 S> */ B(Return),
- B(Ldar), R(6),
+ B(Ldar), R(5),
/* 44 S> */ B(Return),
- B(Ldar), R(6),
+ B(Ldar), R(5),
B(ReThrow),
- B(Ldar), R(6),
+ B(Ldar), R(5),
/* 44 S> */ B(Return),
- B(Ldar), R(6),
+ B(Ldar), R(5),
B(ReThrow),
B(LdaUndefined),
/* 44 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::SYMBOL_TYPE,
- kInstanceTypeDontCare,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- kInstanceTypeDontCare,
- kInstanceTypeDontCare,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- kInstanceTypeDontCare,
- kInstanceTypeDontCare,
- kInstanceTypeDontCare,
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ SYMBOL_TYPE,
+ Smi [158],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
+ Smi [146],
+ Smi [167],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["function"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
+ FIXED_ARRAY_TYPE,
+ Smi [129],
+ Smi [166],
+ Smi [155],
+ Smi [601],
]
handlers: [
- [45, 661, 667],
- [143, 423, 429],
- [146, 384, 386],
- [525, 537, 539],
+ [48, 718, 724],
+ [153, 458, 464],
+ [156, 414, 416],
+ [572, 586, 588],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden
index 166f7f0351..f222e9034b 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden
@@ -3,8 +3,6 @@
#
---
-pool type: string
-execute: yes
wrap: no
test function name: f
@@ -19,13 +17,13 @@ parameter count: 1
bytecode array length: 12
bytecodes: [
/* 26 E> */ B(StackCheck),
- /* 31 S> */ B(LdrGlobal), U8(1), R(0),
- B(BitwiseAndSmi), U8(1), R(0), U8(3),
- /* 45 E> */ B(StaGlobalSloppy), U8(0), U8(4),
+ /* 31 S> */ B(LdrGlobal), U8(2), R(0),
+ B(BitwiseAndSmi), U8(1), R(0), U8(4),
+ /* 45 E> */ B(StaGlobalSloppy), U8(0), U8(5),
/* 51 S> */ B(Return),
]
constant pool: [
- "global",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["global"],
]
handlers: [
]
@@ -41,13 +39,13 @@ parameter count: 1
bytecode array length: 12
bytecodes: [
/* 27 E> */ B(StackCheck),
- /* 32 S> */ B(LdrGlobal), U8(1), R(0),
- B(AddSmi), U8(1), R(0), U8(3),
- /* 51 E> */ B(StaGlobalSloppy), U8(0), U8(4),
+ /* 32 S> */ B(LdrGlobal), U8(2), R(0),
+ B(AddSmi), U8(1), R(0), U8(4),
+ /* 51 E> */ B(StaGlobalSloppy), U8(0), U8(5),
/* 57 S> */ B(Return),
]
constant pool: [
- "unallocated",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["unallocated"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
index 6a2406ad12..2c6616bb58 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
@@ -3,8 +3,6 @@
#
---
-pool type: string
-execute: yes
wrap: no
test function name: f
@@ -19,13 +17,13 @@ parameter count: 1
bytecode array length: 9
bytecodes: [
/* 26 E> */ B(StackCheck),
- /* 31 S> */ B(LdaGlobal), U8(1),
- B(Inc), U8(5),
- /* 40 E> */ B(StaGlobalSloppy), U8(0), U8(3),
+ /* 31 S> */ B(LdaGlobal), U8(2),
+ B(Inc), U8(6),
+ /* 40 E> */ B(StaGlobalSloppy), U8(0), U8(4),
/* 48 S> */ B(Return),
]
constant pool: [
- "global",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["global"],
]
handlers: [
]
@@ -41,15 +39,15 @@ parameter count: 1
bytecode array length: 13
bytecodes: [
/* 26 E> */ B(StackCheck),
- /* 31 S> */ B(LdaGlobal), U8(1),
+ /* 31 S> */ B(LdaGlobal), U8(2),
B(ToNumber), R(0),
- B(Dec), U8(5),
- /* 44 E> */ B(StaGlobalSloppy), U8(0), U8(3),
+ B(Dec), U8(6),
+ /* 44 E> */ B(StaGlobalSloppy), U8(0), U8(4),
B(Ldar), R(0),
/* 48 S> */ B(Return),
]
constant pool: [
- "global",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["global"],
]
handlers: [
]
@@ -65,13 +63,13 @@ parameter count: 1
bytecode array length: 9
bytecodes: [
/* 27 E> */ B(StackCheck),
- /* 46 S> */ B(LdaGlobal), U8(1),
- B(Dec), U8(5),
- /* 55 E> */ B(StaGlobalStrict), U8(0), U8(3),
+ /* 46 S> */ B(LdaGlobal), U8(2),
+ B(Dec), U8(6),
+ /* 55 E> */ B(StaGlobalStrict), U8(0), U8(4),
/* 68 S> */ B(Return),
]
constant pool: [
- "unallocated",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["unallocated"],
]
handlers: [
]
@@ -87,15 +85,15 @@ parameter count: 1
bytecode array length: 13
bytecodes: [
/* 27 E> */ B(StackCheck),
- /* 32 S> */ B(LdaGlobal), U8(1),
+ /* 32 S> */ B(LdaGlobal), U8(2),
B(ToNumber), R(0),
- B(Inc), U8(5),
- /* 50 E> */ B(StaGlobalSloppy), U8(0), U8(3),
+ B(Inc), U8(6),
+ /* 50 E> */ B(StaGlobalSloppy), U8(0), U8(4),
B(Ldar), R(0),
/* 54 S> */ B(Return),
]
constant pool: [
- "unallocated",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["unallocated"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden
index adead06c5c..66583f3389 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: no
test function name: f
@@ -21,13 +19,13 @@ parameter count: 1
bytecode array length: 9
bytecodes: [
/* 32 E> */ B(StackCheck),
- /* 39 S> */ B(LdrGlobal), U8(1), R(0),
+ /* 39 S> */ B(LdrGlobal), U8(2), R(0),
B(LdaConstant), U8(0),
B(DeletePropertySloppy), R(0),
/* 58 S> */ B(Return),
]
constant pool: [
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
@@ -46,7 +44,7 @@ parameter count: 1
bytecode array length: 9
bytecodes: [
/* 28 E> */ B(StackCheck),
- /* 51 S> */ B(LdrGlobal), U8(1), R(0),
+ /* 51 S> */ B(LdrGlobal), U8(2), R(0),
B(LdaSmi), U8(1),
B(DeletePropertyStrict), R(0),
/* 71 S> */ B(Return),
@@ -66,17 +64,17 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 14
+bytecode array length: 16
bytecodes: [
/* 32 E> */ B(StackCheck),
- /* 39 S> */ B(LdrContextSlot), R(context), U8(3), R(0),
- B(LdrContextSlot), R(0), U8(2), R(1),
+ /* 39 S> */ B(LdrContextSlot), R(context), U8(3), U8(0), R(0),
+ B(LdrContextSlot), R(0), U8(2), U8(0), R(1),
B(LdaConstant), U8(0),
B(DeletePropertySloppy), R(1),
/* 56 S> */ B(Return),
]
constant pool: [
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
]
handlers: [
]
@@ -91,17 +89,17 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 14
+bytecode array length: 16
bytecodes: [
/* 18 E> */ B(StackCheck),
- /* 25 S> */ B(LdrContextSlot), R(context), U8(3), R(0),
- B(LdrContextSlot), R(0), U8(2), R(1),
+ /* 25 S> */ B(LdrContextSlot), R(context), U8(3), U8(0), R(0),
+ B(LdrContextSlot), R(0), U8(2), U8(0), R(1),
B(LdaConstant), U8(0),
B(DeletePropertySloppy), R(1),
/* 42 S> */ B(Return),
]
constant pool: [
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["b"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden
index f70321aa99..14bf7088e6 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden
@@ -3,8 +3,6 @@
#
---
-pool type: number
-execute: yes
wrap: yes
---
@@ -20,7 +18,7 @@ bytecodes: [
/* 46 S> */ B(Return),
]
constant pool: [
- 1.2,
+ HEAP_NUMBER_TYPE [1.2],
]
handlers: [
]
@@ -40,8 +38,8 @@ bytecodes: [
/* 59 S> */ B(Return),
]
constant pool: [
- 1.2,
- 2.6,
+ HEAP_NUMBER_TYPE [1.2],
+ HEAP_NUMBER_TYPE [2.6],
]
handlers: [
]
@@ -61,8 +59,8 @@ bytecodes: [
/* 61 S> */ B(Return),
]
constant pool: [
- 3.14,
- 3.14,
+ HEAP_NUMBER_TYPE [3.14],
+ HEAP_NUMBER_TYPE [3.14],
]
handlers: [
]
@@ -850,263 +848,263 @@ bytecodes: [
/* 2867 S> */ B(Return),
]
constant pool: [
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 1.414,
- 3.14,
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [1.414],
+ HEAP_NUMBER_TYPE [3.14],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden
index c375fb1e1e..17ee039b91 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: no
test function name: f
@@ -122,7 +120,7 @@ bytecodes: [
/* 25 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
/* 30 S> */ B(JumpIfToBooleanFalse), U8(13),
- /* 43 S> */ B(AddSmi), U8(1), R(0), U8(1),
+ /* 43 S> */ B(AddSmi), U8(1), R(0), U8(2),
B(Mov), R(0), R(1),
B(Star), R(0),
B(Jump), U8(5),
@@ -149,11 +147,11 @@ snippet: "
"
frame size: 0
parameter count: 2
-bytecode array length: 18
+bytecode array length: 19
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 18 S> */ B(LdaZero),
- /* 24 E> */ B(TestLessThanOrEqual), R(arg0),
+ /* 24 E> */ B(TestLessThanOrEqual), R(arg0), U8(2),
B(JumpIfFalse), U8(7),
/* 36 S> */ B(Wide), B(LdaSmi), U16(200),
/* 80 S> */ B(Return),
@@ -260,7 +258,7 @@ snippet: "
"
frame size: 2
parameter count: 2
-bytecode array length: 409
+bytecode array length: 410
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 24 S> */ B(LdaZero),
@@ -268,7 +266,7 @@ bytecodes: [
/* 35 S> */ B(LdaZero),
B(Star), R(1),
/* 38 S> */ B(LdaConstant), U8(0),
- /* 44 E> */ B(TestEqualStrict), R(0),
+ /* 44 E> */ B(TestEqualStrict), R(0), U8(2),
B(JumpIfFalseConstant), U8(1),
/* 58 S> */ B(Mov), R(0), R(1),
/* 65 S> */ B(Mov), R(1), R(0),
@@ -406,8 +404,8 @@ bytecodes: [
/* 1117 S> */ B(Return),
]
constant pool: [
- InstanceType::HEAP_NUMBER_TYPE,
- kInstanceTypeDontCare,
+ HEAP_NUMBER_TYPE [0.01],
+ Smi [391],
]
handlers: [
]
@@ -632,7 +630,7 @@ bytecodes: [
/* 1112 S> */ B(Return),
]
constant pool: [
- kInstanceTypeDontCare,
+ Smi [391],
]
handlers: [
]
@@ -654,36 +652,36 @@ snippet: "
"
frame size: 0
parameter count: 3
-bytecode array length: 75
+bytecode array length: 81
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 21 S> */ B(Ldar), R(arg1),
- /* 27 E> */ B(TestEqual), R(arg0),
+ /* 27 E> */ B(TestEqual), R(arg0), U8(2),
B(JumpIfFalse), U8(5),
/* 35 S> */ B(LdaSmi), U8(1),
/* 262 S> */ B(Return),
/* 49 S> */ B(Ldar), R(arg1),
- /* 55 E> */ B(TestEqualStrict), R(arg0),
+ /* 55 E> */ B(TestEqualStrict), R(arg0), U8(3),
B(JumpIfFalse), U8(5),
/* 64 S> */ B(LdaSmi), U8(1),
/* 262 S> */ B(Return),
/* 78 S> */ B(Ldar), R(arg1),
- /* 84 E> */ B(TestLessThan), R(arg0),
+ /* 84 E> */ B(TestLessThan), R(arg0), U8(4),
B(JumpIfFalse), U8(5),
/* 91 S> */ B(LdaSmi), U8(1),
/* 262 S> */ B(Return),
/* 105 S> */ B(Ldar), R(arg1),
- /* 111 E> */ B(TestGreaterThan), R(arg0),
+ /* 111 E> */ B(TestGreaterThan), R(arg0), U8(5),
B(JumpIfFalse), U8(5),
/* 118 S> */ B(LdaSmi), U8(1),
/* 262 S> */ B(Return),
/* 132 S> */ B(Ldar), R(arg1),
- /* 138 E> */ B(TestLessThanOrEqual), R(arg0),
+ /* 138 E> */ B(TestLessThanOrEqual), R(arg0), U8(6),
B(JumpIfFalse), U8(5),
/* 146 S> */ B(LdaSmi), U8(1),
/* 262 S> */ B(Return),
/* 160 S> */ B(Ldar), R(arg1),
- /* 166 E> */ B(TestGreaterThanOrEqual), R(arg0),
+ /* 166 E> */ B(TestGreaterThanOrEqual), R(arg0), U8(7),
B(JumpIfFalse), U8(5),
/* 174 S> */ B(LdaSmi), U8(1),
/* 262 S> */ B(Return),
@@ -752,22 +750,22 @@ snippet: "
"
frame size: 0
parameter count: 3
-bytecode array length: 32
+bytecode array length: 36
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 21 S> */ B(Ldar), R(arg1),
- /* 27 E> */ B(TestEqual), R(arg0),
- B(JumpIfTrue), U8(7),
+ /* 27 E> */ B(TestEqual), R(arg0), U8(2),
+ B(JumpIfTrue), U8(8),
B(LdaZero),
- /* 37 E> */ B(TestLessThan), R(arg0),
+ /* 37 E> */ B(TestLessThan), R(arg0), U8(3),
B(JumpIfFalse), U8(5),
/* 48 S> */ B(LdaSmi), U8(1),
/* 133 S> */ B(Return),
/* 67 S> */ B(LdaZero),
- /* 73 E> */ B(TestGreaterThan), R(arg0),
- B(JumpIfFalse), U8(9),
+ /* 73 E> */ B(TestGreaterThan), R(arg0), U8(4),
+ B(JumpIfFalse), U8(10),
B(LdaZero),
- /* 82 E> */ B(TestGreaterThan), R(arg1),
+ /* 82 E> */ B(TestGreaterThan), R(arg1), U8(5),
B(JumpIfFalse), U8(4),
/* 93 S> */ B(LdaZero),
/* 133 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/IntegerConstants.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/IntegerConstants.golden
index 6ac81a606b..f71907ba4d 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/IntegerConstants.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/IntegerConstants.golden
@@ -3,8 +3,6 @@
#
---
-pool type: number
-execute: yes
wrap: yes
---
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/JumpsRequiringConstantWideOperands.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/JumpsRequiringConstantWideOperands.golden
index a5efe58049..99e7eac9c2 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/JumpsRequiringConstantWideOperands.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/JumpsRequiringConstantWideOperands.golden
@@ -3,8 +3,6 @@
#
---
-pool type: number
-execute: yes
wrap: yes
---
@@ -329,7 +327,7 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 1408
+bytecode array length: 1412
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
@@ -959,337 +957,337 @@ bytecodes: [
/* 4103 S> */ B(LdaZero),
B(Star), R(1),
/* 4108 S> */ B(LdaSmi), U8(3),
- /* 4108 E> */ B(TestLessThan), R(1),
- B(Wide), B(JumpIfFalse), U16(36),
+ /* 4108 E> */ B(TestLessThan), R(1), U8(2),
+ B(Wide), B(JumpIfFalse), U16(39),
/* 4090 E> */ B(StackCheck),
/* 4122 S> */ B(LdaSmi), U8(1),
- /* 4128 E> */ B(TestEqual), R(1),
+ /* 4128 E> */ B(TestEqual), R(1), U8(4),
B(Wide), B(JumpIfFalse), U16(7),
- /* 4134 S> */ B(Wide), B(Jump), U16(15),
+ /* 4134 S> */ B(Wide), B(Jump), U16(16),
/* 4146 S> */ B(LdaSmi), U8(2),
- /* 4152 E> */ B(TestEqual), R(1),
+ /* 4152 E> */ B(TestEqual), R(1), U8(5),
B(Wide), B(JumpIfFalse), U16(7),
- /* 4158 S> */ B(Wide), B(Jump), U16(11),
+ /* 4158 S> */ B(Wide), B(Jump), U16(12),
/* 4114 S> */ B(Ldar), R(1),
- B(Inc), U8(1),
+ B(Inc), U8(3),
B(Star), R(1),
- B(Jump), U8(-39),
+ B(JumpLoop), U8(-42), U8(0),
/* 4167 S> */ B(LdaSmi), U8(3),
/* 4177 S> */ B(Return),
]
constant pool: [
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.1,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.2,
- 0.3,
- 0.3,
- 0.3,
- 0.3,
- 0.3,
- 0.3,
- 0.3,
- 0.3,
- 0.3,
- 0.3,
- 0.3,
- 0.3,
- 0.3,
- 0.3,
- 0.3,
- 0.3,
- 0.4,
- 0.4,
- 0.4,
- 0.4,
- 0.4,
- 0.4,
- 0.4,
- 0.4,
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.1],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.2],
+ HEAP_NUMBER_TYPE [0.3],
+ HEAP_NUMBER_TYPE [0.3],
+ HEAP_NUMBER_TYPE [0.3],
+ HEAP_NUMBER_TYPE [0.3],
+ HEAP_NUMBER_TYPE [0.3],
+ HEAP_NUMBER_TYPE [0.3],
+ HEAP_NUMBER_TYPE [0.3],
+ HEAP_NUMBER_TYPE [0.3],
+ HEAP_NUMBER_TYPE [0.3],
+ HEAP_NUMBER_TYPE [0.3],
+ HEAP_NUMBER_TYPE [0.3],
+ HEAP_NUMBER_TYPE [0.3],
+ HEAP_NUMBER_TYPE [0.3],
+ HEAP_NUMBER_TYPE [0.3],
+ HEAP_NUMBER_TYPE [0.3],
+ HEAP_NUMBER_TYPE [0.3],
+ HEAP_NUMBER_TYPE [0.4],
+ HEAP_NUMBER_TYPE [0.4],
+ HEAP_NUMBER_TYPE [0.4],
+ HEAP_NUMBER_TYPE [0.4],
+ HEAP_NUMBER_TYPE [0.4],
+ HEAP_NUMBER_TYPE [0.4],
+ HEAP_NUMBER_TYPE [0.4],
+ HEAP_NUMBER_TYPE [0.4],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden
index 4dbbdafd5e..d9d8f79e0a 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden
@@ -3,8 +3,6 @@
#
---
-pool type: string
-execute: yes
wrap: yes
---
@@ -48,7 +46,7 @@ bytecodes: [
/* 56 S> */ B(Return),
]
constant pool: [
- "x",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
@@ -59,7 +57,7 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 26
+bytecode array length: 29
bytecodes: [
B(LdaTheHole),
B(Star), R(0),
@@ -72,11 +70,12 @@ bytecodes: [
B(Star), R(2),
/* 45 E> */ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
B(Mov), R(1), R(0),
+ B(Mov), R(1), R(0),
B(LdaUndefined),
/* 52 S> */ B(Return),
]
constant pool: [
- "x",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
@@ -106,7 +105,7 @@ bytecodes: [
/* 54 S> */ B(Return),
]
constant pool: [
- "x",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden
index 0b25fbf329..eb2a5c6b47 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: yes
---
@@ -13,22 +11,22 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 21
+bytecode array length: 23
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(1),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
B(CreateClosure), U8(0), U8(2),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(10),
- /* 42 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 42 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
B(LdaUndefined),
/* 72 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -39,18 +37,18 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 34
+bytecode array length: 37
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(1),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
B(CreateClosure), U8(0), U8(2),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(10),
- /* 42 E> */ B(StaContextSlot), R(context), U8(4),
- /* 72 S> */ B(LdaContextSlot), R(context), U8(4),
+ /* 42 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 72 S> */ B(LdaContextSlot), R(context), U8(4), U8(0),
B(JumpIfNotHole), U8(11),
B(LdaConstant), U8(1),
B(Star), R(2),
@@ -58,8 +56,8 @@ bytecodes: [
/* 82 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
@@ -70,31 +68,31 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 42
+bytecode array length: 46
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(1),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
B(CreateClosure), U8(0), U8(2),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 45 S> */ B(LdaSmi), U8(20),
B(Star), R(2),
- /* 45 E> */ B(LdaContextSlot), R(context), U8(4),
+ /* 45 E> */ B(LdaContextSlot), R(context), U8(4), U8(0),
B(JumpIfNotHole), U8(11),
B(LdaConstant), U8(1),
B(Star), R(3),
B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
B(Ldar), R(2),
- B(StaContextSlot), R(context), U8(4),
- /* 45 E> */ B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 45 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
B(LdaUndefined),
/* 78 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
@@ -105,32 +103,32 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 44
+bytecode array length: 48
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(1),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
B(CreateClosure), U8(0), U8(2),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(10),
- /* 42 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 42 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
/* 46 S> */ B(LdaSmi), U8(20),
B(Star), R(2),
- /* 48 E> */ B(LdaContextSlot), R(context), U8(4),
+ /* 48 E> */ B(LdaContextSlot), R(context), U8(4), U8(0),
B(JumpIfNotHole), U8(11),
B(LdaConstant), U8(1),
B(Star), R(3),
B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
B(Ldar), R(2),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
B(LdaUndefined),
/* 80 S> */ B(Return),
]
constant pool: [
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden
index dd9f714394..9c1c1b3701 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden
@@ -3,8 +3,6 @@
#
---
-pool type: string
-execute: yes
wrap: no
test function name: f
@@ -19,7 +17,7 @@ parameter count: 1
bytecode array length: 4
bytecodes: [
/* 21 E> */ B(StackCheck),
- /* 26 S> */ B(LdaGlobal), U8(1),
+ /* 26 S> */ B(LdaGlobal), U8(2),
/* 36 S> */ B(Return),
]
constant pool: [
@@ -38,7 +36,7 @@ parameter count: 1
bytecode array length: 4
bytecodes: [
/* 27 E> */ B(StackCheck),
- /* 32 S> */ B(LdaGlobal), U8(1),
+ /* 32 S> */ B(LdaGlobal), U8(2),
/* 42 S> */ B(Return),
]
constant pool: [
@@ -57,7 +55,7 @@ parameter count: 1
bytecode array length: 4
bytecodes: [
/* 17 E> */ B(StackCheck),
- /* 22 S> */ B(LdaGlobal), U8(1),
+ /* 22 S> */ B(LdaGlobal), U8(2),
/* 32 S> */ B(Return),
]
constant pool: [
@@ -203,270 +201,270 @@ snippet: "
"
frame size: 0
parameter count: 2
-bytecode array length: 646
+bytecode array length: 650
bytecodes: [
/* 17 E> */ B(StackCheck),
/* 25 S> */ B(Nop),
- /* 26 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(1),
+ /* 26 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(2),
/* 35 S> */ B(Nop),
- /* 36 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(3),
+ /* 36 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(4),
/* 45 S> */ B(Nop),
- /* 46 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(5),
+ /* 46 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(6),
/* 55 S> */ B(Nop),
- /* 56 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(7),
+ /* 56 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(8),
/* 65 S> */ B(Nop),
- /* 66 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(9),
+ /* 66 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(10),
/* 75 S> */ B(Nop),
- /* 76 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(11),
+ /* 76 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(12),
/* 85 S> */ B(Nop),
- /* 86 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(13),
+ /* 86 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(14),
/* 95 S> */ B(Nop),
- /* 96 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(15),
+ /* 96 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(16),
/* 105 S> */ B(Nop),
- /* 106 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(17),
+ /* 106 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(18),
/* 115 S> */ B(Nop),
- /* 116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(19),
+ /* 116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(20),
/* 125 S> */ B(Nop),
- /* 126 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(21),
+ /* 126 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(22),
/* 135 S> */ B(Nop),
- /* 136 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(23),
+ /* 136 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(24),
/* 145 S> */ B(Nop),
- /* 146 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(25),
+ /* 146 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(26),
/* 155 S> */ B(Nop),
- /* 156 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(27),
+ /* 156 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(28),
/* 165 S> */ B(Nop),
- /* 166 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(29),
+ /* 166 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(30),
/* 175 S> */ B(Nop),
- /* 176 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(31),
+ /* 176 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(32),
/* 185 S> */ B(Nop),
- /* 186 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(33),
+ /* 186 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(34),
/* 195 S> */ B(Nop),
- /* 196 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(35),
+ /* 196 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(36),
/* 205 S> */ B(Nop),
- /* 206 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(37),
+ /* 206 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(38),
/* 215 S> */ B(Nop),
- /* 216 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(39),
+ /* 216 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(40),
/* 225 S> */ B(Nop),
- /* 226 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(41),
+ /* 226 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(42),
/* 235 S> */ B(Nop),
- /* 236 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(43),
+ /* 236 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(44),
/* 245 S> */ B(Nop),
- /* 246 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(45),
+ /* 246 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(46),
/* 255 S> */ B(Nop),
- /* 256 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(47),
+ /* 256 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(48),
/* 265 S> */ B(Nop),
- /* 266 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(49),
+ /* 266 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(50),
/* 275 S> */ B(Nop),
- /* 276 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(51),
+ /* 276 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(52),
/* 285 S> */ B(Nop),
- /* 286 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(53),
+ /* 286 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(54),
/* 295 S> */ B(Nop),
- /* 296 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(55),
+ /* 296 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(56),
/* 305 S> */ B(Nop),
- /* 306 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(57),
+ /* 306 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(58),
/* 315 S> */ B(Nop),
- /* 316 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(59),
+ /* 316 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(60),
/* 325 S> */ B(Nop),
- /* 326 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(61),
+ /* 326 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(62),
/* 335 S> */ B(Nop),
- /* 336 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(63),
+ /* 336 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(64),
/* 345 S> */ B(Nop),
- /* 346 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(65),
+ /* 346 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(66),
/* 355 S> */ B(Nop),
- /* 356 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(67),
+ /* 356 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(68),
/* 365 S> */ B(Nop),
- /* 366 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(69),
+ /* 366 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(70),
/* 375 S> */ B(Nop),
- /* 376 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(71),
+ /* 376 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(72),
/* 385 S> */ B(Nop),
- /* 386 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(73),
+ /* 386 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(74),
/* 395 S> */ B(Nop),
- /* 396 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(75),
+ /* 396 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(76),
/* 405 S> */ B(Nop),
- /* 406 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(77),
+ /* 406 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(78),
/* 415 S> */ B(Nop),
- /* 416 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(79),
+ /* 416 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(80),
/* 425 S> */ B(Nop),
- /* 426 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(81),
+ /* 426 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(82),
/* 435 S> */ B(Nop),
- /* 436 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(83),
+ /* 436 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(84),
/* 445 S> */ B(Nop),
- /* 446 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(85),
+ /* 446 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(86),
/* 455 S> */ B(Nop),
- /* 456 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(87),
+ /* 456 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(88),
/* 465 S> */ B(Nop),
- /* 466 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(89),
+ /* 466 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(90),
/* 475 S> */ B(Nop),
- /* 476 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(91),
+ /* 476 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(92),
/* 485 S> */ B(Nop),
- /* 486 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(93),
+ /* 486 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(94),
/* 495 S> */ B(Nop),
- /* 496 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(95),
+ /* 496 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(96),
/* 505 S> */ B(Nop),
- /* 506 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(97),
+ /* 506 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(98),
/* 515 S> */ B(Nop),
- /* 516 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(99),
+ /* 516 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(100),
/* 525 S> */ B(Nop),
- /* 526 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(101),
+ /* 526 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(102),
/* 535 S> */ B(Nop),
- /* 536 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(103),
+ /* 536 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(104),
/* 545 S> */ B(Nop),
- /* 546 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(105),
+ /* 546 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(106),
/* 555 S> */ B(Nop),
- /* 556 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(107),
+ /* 556 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(108),
/* 565 S> */ B(Nop),
- /* 566 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(109),
+ /* 566 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(110),
/* 575 S> */ B(Nop),
- /* 576 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(111),
+ /* 576 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(112),
/* 585 S> */ B(Nop),
- /* 586 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(113),
+ /* 586 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(114),
/* 595 S> */ B(Nop),
- /* 596 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(115),
+ /* 596 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(116),
/* 605 S> */ B(Nop),
- /* 606 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(117),
+ /* 606 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(118),
/* 615 S> */ B(Nop),
- /* 616 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(119),
+ /* 616 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(120),
/* 625 S> */ B(Nop),
- /* 626 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(121),
+ /* 626 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(122),
/* 635 S> */ B(Nop),
- /* 636 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(123),
+ /* 636 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(124),
/* 645 S> */ B(Nop),
- /* 646 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(125),
+ /* 646 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(126),
/* 655 S> */ B(Nop),
- /* 656 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(127),
+ /* 656 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(128),
/* 665 S> */ B(Nop),
- /* 666 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(129),
+ /* 666 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(130),
/* 675 S> */ B(Nop),
- /* 676 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(131),
+ /* 676 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(132),
/* 685 S> */ B(Nop),
- /* 686 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(133),
+ /* 686 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(134),
/* 695 S> */ B(Nop),
- /* 696 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(135),
+ /* 696 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(136),
/* 705 S> */ B(Nop),
- /* 706 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(137),
+ /* 706 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(138),
/* 715 S> */ B(Nop),
- /* 716 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(139),
+ /* 716 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(140),
/* 725 S> */ B(Nop),
- /* 726 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(141),
+ /* 726 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(142),
/* 735 S> */ B(Nop),
- /* 736 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(143),
+ /* 736 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(144),
/* 745 S> */ B(Nop),
- /* 746 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(145),
+ /* 746 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(146),
/* 755 S> */ B(Nop),
- /* 756 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(147),
+ /* 756 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(148),
/* 765 S> */ B(Nop),
- /* 766 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(149),
+ /* 766 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(150),
/* 775 S> */ B(Nop),
- /* 776 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(151),
+ /* 776 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(152),
/* 785 S> */ B(Nop),
- /* 786 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(153),
+ /* 786 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(154),
/* 795 S> */ B(Nop),
- /* 796 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(155),
+ /* 796 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(156),
/* 805 S> */ B(Nop),
- /* 806 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(157),
+ /* 806 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(158),
/* 815 S> */ B(Nop),
- /* 816 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(159),
+ /* 816 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(160),
/* 825 S> */ B(Nop),
- /* 826 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(161),
+ /* 826 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(162),
/* 835 S> */ B(Nop),
- /* 836 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(163),
+ /* 836 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(164),
/* 845 S> */ B(Nop),
- /* 846 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(165),
+ /* 846 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(166),
/* 855 S> */ B(Nop),
- /* 856 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(167),
+ /* 856 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(168),
/* 865 S> */ B(Nop),
- /* 866 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(169),
+ /* 866 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(170),
/* 875 S> */ B(Nop),
- /* 876 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(171),
+ /* 876 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(172),
/* 885 S> */ B(Nop),
- /* 886 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(173),
+ /* 886 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(174),
/* 895 S> */ B(Nop),
- /* 896 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(175),
+ /* 896 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(176),
/* 905 S> */ B(Nop),
- /* 906 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(177),
+ /* 906 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(178),
/* 915 S> */ B(Nop),
- /* 916 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(179),
+ /* 916 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(180),
/* 925 S> */ B(Nop),
- /* 926 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(181),
+ /* 926 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(182),
/* 935 S> */ B(Nop),
- /* 936 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(183),
+ /* 936 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(184),
/* 945 S> */ B(Nop),
- /* 946 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(185),
+ /* 946 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(186),
/* 955 S> */ B(Nop),
- /* 956 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(187),
+ /* 956 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(188),
/* 965 S> */ B(Nop),
- /* 966 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(189),
+ /* 966 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(190),
/* 975 S> */ B(Nop),
- /* 976 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(191),
+ /* 976 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(192),
/* 985 S> */ B(Nop),
- /* 986 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(193),
+ /* 986 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(194),
/* 995 S> */ B(Nop),
- /* 996 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(195),
+ /* 996 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(196),
/* 1005 S> */ B(Nop),
- /* 1006 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(197),
+ /* 1006 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(198),
/* 1015 S> */ B(Nop),
- /* 1016 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(199),
+ /* 1016 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(200),
/* 1025 S> */ B(Nop),
- /* 1026 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(201),
+ /* 1026 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(202),
/* 1035 S> */ B(Nop),
- /* 1036 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(203),
+ /* 1036 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(204),
/* 1045 S> */ B(Nop),
- /* 1046 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(205),
+ /* 1046 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(206),
/* 1055 S> */ B(Nop),
- /* 1056 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(207),
+ /* 1056 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(208),
/* 1065 S> */ B(Nop),
- /* 1066 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(209),
+ /* 1066 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(210),
/* 1075 S> */ B(Nop),
- /* 1076 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(211),
+ /* 1076 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(212),
/* 1085 S> */ B(Nop),
- /* 1086 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(213),
+ /* 1086 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(214),
/* 1095 S> */ B(Nop),
- /* 1096 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(215),
+ /* 1096 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(216),
/* 1105 S> */ B(Nop),
- /* 1106 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(217),
+ /* 1106 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(218),
/* 1115 S> */ B(Nop),
- /* 1116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(219),
+ /* 1116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(220),
/* 1125 S> */ B(Nop),
- /* 1126 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(221),
+ /* 1126 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(222),
/* 1135 S> */ B(Nop),
- /* 1136 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(223),
+ /* 1136 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(224),
/* 1145 S> */ B(Nop),
- /* 1146 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(225),
+ /* 1146 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(226),
/* 1155 S> */ B(Nop),
- /* 1156 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(227),
+ /* 1156 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(228),
/* 1165 S> */ B(Nop),
- /* 1166 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(229),
+ /* 1166 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(230),
/* 1175 S> */ B(Nop),
- /* 1176 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(231),
+ /* 1176 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(232),
/* 1185 S> */ B(Nop),
- /* 1186 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(233),
+ /* 1186 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(234),
/* 1195 S> */ B(Nop),
- /* 1196 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(235),
+ /* 1196 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(236),
/* 1205 S> */ B(Nop),
- /* 1206 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(237),
+ /* 1206 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(238),
/* 1215 S> */ B(Nop),
- /* 1216 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(239),
+ /* 1216 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(240),
/* 1225 S> */ B(Nop),
- /* 1226 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(241),
+ /* 1226 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(242),
/* 1235 S> */ B(Nop),
- /* 1236 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(243),
+ /* 1236 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(244),
/* 1245 S> */ B(Nop),
- /* 1246 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(245),
+ /* 1246 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(246),
/* 1255 S> */ B(Nop),
- /* 1256 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(247),
+ /* 1256 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(248),
/* 1265 S> */ B(Nop),
- /* 1266 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(249),
+ /* 1266 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(250),
/* 1275 S> */ B(Nop),
- /* 1276 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(251),
+ /* 1276 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(252),
/* 1285 S> */ B(Nop),
- /* 1286 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(253),
+ /* 1286 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(254),
/* 1295 S> */ B(Nop),
- /* 1296 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(255),
- /* 1305 S> */ B(Wide), B(LdaGlobal), U16(257),
+ /* 1296 E> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(256),
+ /* 1305 S> */ B(Wide), B(LdaGlobal), U16(258),
/* 1315 S> */ B(Return),
]
constant pool: [
- "name",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["name"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden
index b8c8c5fa72..fae86a673d 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden
@@ -3,8 +3,6 @@
#
---
-pool type: number
-execute: yes
wrap: yes
---
@@ -33,13 +31,13 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 13
+bytecode array length: 14
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 S> */ B(LdaSmi), U8(1),
- /* 55 E> */ B(TestEqual), R(0),
+ /* 55 E> */ B(TestEqual), R(0), U8(2),
B(JumpIfTrue), U8(4),
B(LdaSmi), U8(3),
/* 67 S> */ B(Return),
@@ -75,13 +73,13 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 13
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 S> */ B(LdaZero),
- /* 55 E> */ B(TestEqual), R(0),
+ /* 55 E> */ B(TestEqual), R(0), U8(2),
B(JumpIfFalse), U8(4),
B(LdaSmi), U8(3),
/* 67 S> */ B(Return),
@@ -319,7 +317,7 @@ bytecodes: [
/* 624 S> */ B(Return),
]
constant pool: [
- 260,
+ Smi [260],
]
handlers: [
]
@@ -505,7 +503,7 @@ bytecodes: [
/* 624 S> */ B(Return),
]
constant pool: [
- 260,
+ Smi [260],
]
handlers: [
]
@@ -548,7 +546,7 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 278
+bytecode array length: 279
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
@@ -558,7 +556,7 @@ bytecodes: [
/* 60 S> */ B(LdaSmi), U8(3),
B(Star), R(2),
/* 63 S> */ B(LdaSmi), U8(3),
- /* 73 E> */ B(TestGreaterThan), R(0),
+ /* 73 E> */ B(TestGreaterThan), R(0), U8(2),
B(JumpIfTrueConstant), U8(0),
B(LdaSmi), U8(1),
B(Star), R(1),
@@ -692,7 +690,7 @@ bytecodes: [
/* 630 S> */ B(Return),
]
constant pool: [
- 260,
+ Smi [260],
]
handlers: [
]
@@ -735,7 +733,7 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 277
+bytecode array length: 278
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
@@ -745,7 +743,7 @@ bytecodes: [
/* 60 S> */ B(LdaSmi), U8(3),
B(Star), R(2),
/* 63 S> */ B(LdaSmi), U8(5),
- /* 73 E> */ B(TestLessThan), R(0),
+ /* 73 E> */ B(TestLessThan), R(0), U8(2),
B(JumpIfFalseConstant), U8(0),
B(LdaSmi), U8(1),
B(Star), R(1),
@@ -879,7 +877,7 @@ bytecodes: [
/* 630 S> */ B(Return),
]
constant pool: [
- 260,
+ Smi [260],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden
index ed13d254ac..acef8f74ad 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden
@@ -3,9 +3,8 @@
#
---
-pool type: string
-execute: yes
wrap: yes
+test function name: f
---
snippet: "
@@ -13,41 +12,41 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 68
+bytecode array length: 73
bytecodes: [
B(CreateFunctionContext), U8(3),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
B(CreateMappedArguments),
- B(StaContextSlot), R(context), U8(5),
+ B(StaContextSlot), R(context), U8(6), U8(0),
B(Ldar), R(new_target),
- B(StaContextSlot), R(context), U8(6),
- /* 30 E> */ B(StackCheck),
- /* 34 S> */ B(LdaConstant), U8(0),
- B(Star), R(3),
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(3), U8(1), R(1),
+ B(StaContextSlot), R(context), U8(5), U8(0),
+ /* 10 E> */ B(StackCheck),
+ /* 14 S> */ B(LdaConstant), U8(0),
+ B(Star), R(4),
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(4), U8(1), R(1),
B(LdaConstant), U8(1),
B(Star), R(3),
B(LdaZero),
B(Star), R(7),
- B(LdaSmi), U8(30),
+ B(LdaSmi), U8(10),
B(Star), R(8),
- B(LdaSmi), U8(34),
+ B(LdaSmi), U8(14),
B(Star), R(9),
B(Mov), R(1), R(4),
B(Mov), R(3), R(5),
B(Mov), R(closure), R(6),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
B(Star), R(1),
- /* 34 E> */ B(Call), R(1), R(2), U8(2), U8(0),
- /* 55 S> */ B(LdaLookupSlot), U8(2),
- /* 65 S> */ B(Return),
+ /* 14 E> */ B(Call), R(1), R(2), U8(2), U8(0),
+ /* 35 S> */ B(LdaLookupGlobalSlot), U8(2), U8(4), U8(1),
+ /* 45 S> */ B(Return),
]
constant pool: [
- "eval",
- "var x = 10;",
- "x",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["eval"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["var x = 10;"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
@@ -58,42 +57,42 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 69
+bytecode array length: 74
bytecodes: [
B(CreateFunctionContext), U8(3),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
B(CreateMappedArguments),
- B(StaContextSlot), R(context), U8(5),
+ B(StaContextSlot), R(context), U8(6), U8(0),
B(Ldar), R(new_target),
- B(StaContextSlot), R(context), U8(6),
- /* 30 E> */ B(StackCheck),
- /* 34 S> */ B(LdaConstant), U8(0),
- B(Star), R(3),
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(3), U8(1), R(1),
+ B(StaContextSlot), R(context), U8(5), U8(0),
+ /* 10 E> */ B(StackCheck),
+ /* 14 S> */ B(LdaConstant), U8(0),
+ B(Star), R(4),
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(4), U8(1), R(1),
B(LdaConstant), U8(1),
B(Star), R(3),
B(LdaZero),
B(Star), R(7),
- B(LdaSmi), U8(30),
+ B(LdaSmi), U8(10),
B(Star), R(8),
- B(LdaSmi), U8(34),
+ B(LdaSmi), U8(14),
B(Star), R(9),
B(Mov), R(1), R(4),
B(Mov), R(3), R(5),
B(Mov), R(closure), R(6),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
B(Star), R(1),
- /* 34 E> */ B(Call), R(1), R(2), U8(2), U8(0),
- /* 55 S> */ B(LdaLookupSlotInsideTypeof), U8(2),
+ /* 14 E> */ B(Call), R(1), R(2), U8(2), U8(0),
+ /* 35 S> */ B(LdaLookupGlobalSlotInsideTypeof), U8(2), U8(4), U8(1),
B(TypeOf),
- /* 72 S> */ B(Return),
+ /* 52 S> */ B(Return),
]
constant pool: [
- "eval",
- "var x = 10;",
- "x",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["eval"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["var x = 10;"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
@@ -104,42 +103,142 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 70
+bytecode array length: 73
bytecodes: [
B(CreateFunctionContext), U8(3),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4),
+ B(StaContextSlot), R(context), U8(4), U8(0),
B(CreateMappedArguments),
- B(StaContextSlot), R(context), U8(5),
+ B(StaContextSlot), R(context), U8(6), U8(0),
B(Ldar), R(new_target),
- B(StaContextSlot), R(context), U8(6),
- /* 30 E> */ B(StackCheck),
- /* 34 S> */ B(LdaSmi), U8(20),
- /* 36 E> */ B(StaLookupSlotSloppy), U8(0),
- /* 42 S> */ B(LdaConstant), U8(1),
- B(Star), R(3),
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(3), U8(1), R(1),
+ B(StaContextSlot), R(context), U8(5), U8(0),
+ /* 10 E> */ B(StackCheck),
+ /* 14 S> */ B(LdaSmi), U8(20),
+ /* 16 E> */ B(StaLookupSlotSloppy), U8(0),
+ /* 22 S> */ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(4), U8(1), R(1),
B(LdaConstant), U8(2),
B(Star), R(3),
B(LdaZero),
B(Star), R(7),
- B(LdaSmi), U8(30),
+ B(LdaSmi), U8(10),
+ B(Star), R(8),
+ B(LdaSmi), U8(29),
+ B(Star), R(9),
+ B(Mov), R(1), R(4),
+ B(Mov), R(3), R(5),
+ B(Mov), R(closure), R(6),
+ B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
+ B(Star), R(1),
+ /* 29 E> */ B(Call), R(1), R(2), U8(2), U8(0),
+ /* 39 S> */ B(Return),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["eval"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 20;
+ f = function(){
+ eval('var x = 10');
+ return x;
+ }
+ f();
+"
+frame size: 10
+parameter count: 1
+bytecode array length: 73
+bytecodes: [
+ B(CreateFunctionContext), U8(3),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4), U8(0),
+ B(CreateMappedArguments),
+ B(StaContextSlot), R(context), U8(6), U8(0),
+ B(Ldar), R(new_target),
+ B(StaContextSlot), R(context), U8(5), U8(0),
+ /* 38 E> */ B(StackCheck),
+ /* 44 S> */ B(LdaConstant), U8(0),
+ B(Star), R(4),
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(4), U8(1), R(1),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(LdaZero),
+ B(Star), R(7),
+ B(LdaSmi), U8(38),
+ B(Star), R(8),
+ B(LdaSmi), U8(44),
+ B(Star), R(9),
+ B(Mov), R(1), R(4),
+ B(Mov), R(3), R(5),
+ B(Mov), R(closure), R(6),
+ B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
+ B(Star), R(1),
+ /* 44 E> */ B(Call), R(1), R(2), U8(2), U8(0),
+ /* 66 S> */ B(LdaLookupContextSlot), U8(2), U8(6), U8(1),
+ /* 76 S> */ B(Return),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["eval"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["var x = 10"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
+]
+handlers: [
+]
+
+---
+snippet: "
+ x = 20;
+ f = function(){
+ eval('var x = 10');
+ return x;
+ }
+ f();
+"
+frame size: 10
+parameter count: 1
+bytecode array length: 73
+bytecodes: [
+ B(CreateFunctionContext), U8(3),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4), U8(0),
+ B(CreateMappedArguments),
+ B(StaContextSlot), R(context), U8(6), U8(0),
+ B(Ldar), R(new_target),
+ B(StaContextSlot), R(context), U8(5), U8(0),
+ /* 34 E> */ B(StackCheck),
+ /* 40 S> */ B(LdaConstant), U8(0),
+ B(Star), R(4),
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(4), U8(1), R(1),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(LdaZero),
+ B(Star), R(7),
+ B(LdaSmi), U8(34),
B(Star), R(8),
- B(LdaSmi), U8(49),
+ B(LdaSmi), U8(40),
B(Star), R(9),
B(Mov), R(1), R(4),
B(Mov), R(3), R(5),
B(Mov), R(closure), R(6),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
B(Star), R(1),
- /* 49 E> */ B(Call), R(1), R(2), U8(2), U8(0),
- /* 59 S> */ B(Return),
+ /* 40 E> */ B(Call), R(1), R(2), U8(2), U8(0),
+ /* 62 S> */ B(LdaLookupGlobalSlot), U8(2), U8(4), U8(1),
+ /* 72 S> */ B(Return),
]
constant pool: [
- "x",
- "eval",
- "",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["eval"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["var x = 10"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotInEval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotInEval.golden
index 41476311e8..ce915d51c0 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotInEval.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotInEval.golden
@@ -3,8 +3,6 @@
#
---
-pool type: string
-execute: yes
wrap: no
test function name: f
@@ -19,14 +17,14 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 4
+bytecode array length: 6
bytecodes: [
/* 10 E> */ B(StackCheck),
- /* 15 S> */ B(LdaLookupSlot), U8(0),
+ /* 15 S> */ B(LdaLookupGlobalSlot), U8(0), U8(2), U8(1),
/* 25 S> */ B(Return),
]
constant pool: [
- "x",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
@@ -51,7 +49,7 @@ bytecodes: [
/* 23 S> */ B(Return),
]
constant pool: [
- "x",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
@@ -76,7 +74,7 @@ bytecodes: [
/* 37 S> */ B(Return),
]
constant pool: [
- "x",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
@@ -92,15 +90,15 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 5
+bytecode array length: 7
bytecodes: [
/* 10 E> */ B(StackCheck),
- /* 15 S> */ B(LdaLookupSlotInsideTypeof), U8(0),
+ /* 15 S> */ B(LdaLookupGlobalSlotInsideTypeof), U8(0), U8(2), U8(1),
B(TypeOf),
/* 32 S> */ B(Return),
]
constant pool: [
- "x",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotWideInEval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotWideInEval.golden
index a668d62452..f7e64f4864 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotWideInEval.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotWideInEval.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: no
test function name: f
@@ -279,7 +277,7 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 1030
+bytecode array length: 1034
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 22 S> */ B(LdaConstant), U8(0),
@@ -794,267 +792,267 @@ bytecodes: [
B(Star), R(0),
/* 3082 S> */ B(LdaConstant), U8(255),
B(Star), R(0),
- /* 3086 S> */ B(Wide), B(LdaLookupSlot), U16(256),
+ /* 3086 S> */ B(Wide), B(LdaLookupGlobalSlot), U16(256), U16(2), U16(1),
/* 3095 S> */ B(Return),
]
constant pool: [
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
@@ -1330,7 +1328,7 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 1031
+bytecode array length: 1035
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 22 S> */ B(LdaConstant), U8(0),
@@ -1845,268 +1843,268 @@ bytecodes: [
B(Star), R(0),
/* 3082 S> */ B(LdaConstant), U8(255),
B(Star), R(0),
- /* 3086 S> */ B(Wide), B(LdaLookupSlotInsideTypeof), U16(256),
+ /* 3086 S> */ B(Wide), B(LdaLookupGlobalSlotInsideTypeof), U16(256), U16(2), U16(1),
B(TypeOf),
/* 3102 S> */ B(Return),
]
constant pool: [
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
@@ -2903,263 +2901,263 @@ bytecodes: [
/* 3093 S> */ B(Return),
]
constant pool: [
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
@@ -3957,263 +3955,263 @@ bytecodes: [
/* 3106 S> */ B(Return),
]
constant pool: [
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ HEAP_NUMBER_TYPE [2.3],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
new file mode 100644
index 0000000000..62dbeb7ada
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
@@ -0,0 +1,896 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+wrap: no
+module: yes
+top level: yes
+
+---
+snippet: "
+ import \"bar\";
+"
+frame size: 8
+parameter count: 2
+bytecode array length: 133
+bytecodes: [
+ B(Ldar), R(new_target),
+ B(JumpIfUndefined), U8(21),
+ B(ResumeGenerator), R(new_target),
+ B(Star), R(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(1), U8(0),
+ B(JumpIfTrue), U8(71),
+ B(LdaSmi), U8(76),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(LdaSmi), U8(-2),
+ B(Star), R(1),
+ B(LdaConstant), U8(0),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 0 E> */ B(StackCheck),
+ /* 0 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(3),
+ B(Ldar), R(3),
+ B(Mov), R(closure), R(2),
+ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(2), U8(2),
+ B(StaContextSlot), R(context), U8(5), U8(0),
+ B(Star), R(2),
+ B(LdrContextSlot), R(context), U8(5), U8(0), R(3),
+ B(LdaZero),
+ B(SuspendGenerator), R(3),
+ B(Ldar), R(2),
+ /* 13 S> */ B(Return),
+ B(LdaSmi), U8(-2),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(3), U8(1),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
+ B(Star), R(5),
+ B(LdaZero),
+ B(TestEqualStrict), R(5), U8(0),
+ B(JumpIfTrue), U8(26),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(5), U8(0),
+ B(JumpIfTrue), U8(16),
+ B(Jump), U8(2),
+ B(LdaTrue),
+ B(Star), R(7),
+ B(Mov), R(4), R(6),
+ B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(6), U8(2),
+ /* 13 S> */ B(Return),
+ B(Ldar), R(4),
+ /* 0 E> */ B(Throw),
+ B(LdaUndefined),
+ /* 13 S> */ B(Return),
+]
+constant pool: [
+ FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ import {foo} from \"bar\";
+"
+frame size: 8
+parameter count: 2
+bytecode array length: 133
+bytecodes: [
+ B(Ldar), R(new_target),
+ B(JumpIfUndefined), U8(21),
+ B(ResumeGenerator), R(new_target),
+ B(Star), R(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(1), U8(0),
+ B(JumpIfTrue), U8(71),
+ B(LdaSmi), U8(76),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(LdaSmi), U8(-2),
+ B(Star), R(1),
+ B(LdaConstant), U8(0),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 0 E> */ B(StackCheck),
+ /* 0 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(3),
+ B(Ldar), R(3),
+ B(Mov), R(closure), R(2),
+ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(2), U8(2),
+ B(StaContextSlot), R(context), U8(5), U8(0),
+ B(Star), R(2),
+ B(LdrContextSlot), R(context), U8(5), U8(0), R(3),
+ B(LdaZero),
+ B(SuspendGenerator), R(3),
+ B(Ldar), R(2),
+ /* 24 S> */ B(Return),
+ B(LdaSmi), U8(-2),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(3), U8(1),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
+ B(Star), R(5),
+ B(LdaZero),
+ B(TestEqualStrict), R(5), U8(0),
+ B(JumpIfTrue), U8(26),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(5), U8(0),
+ B(JumpIfTrue), U8(16),
+ B(Jump), U8(2),
+ B(LdaTrue),
+ B(Star), R(7),
+ B(Mov), R(4), R(6),
+ B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(6), U8(2),
+ /* 24 S> */ B(Return),
+ B(Ldar), R(4),
+ /* 0 E> */ B(Throw),
+ B(LdaUndefined),
+ /* 24 S> */ B(Return),
+]
+constant pool: [
+ FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ import {foo as goo} from \"bar\";
+ goo(42);
+ { let x; { goo(42) } };
+"
+frame size: 9
+parameter count: 2
+bytecode array length: 223
+bytecodes: [
+ B(Ldar), R(new_target),
+ B(JumpIfUndefined), U8(21),
+ B(ResumeGenerator), R(new_target),
+ B(Star), R(2),
+ B(LdaZero),
+ B(TestEqualStrict), R(2), U8(0),
+ B(JumpIfTrue), U8(71),
+ B(LdaSmi), U8(76),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
+ B(LdaSmi), U8(-2),
+ B(Star), R(2),
+ B(LdaConstant), U8(0),
+ B(Star), R(5),
+ B(Mov), R(arg0), R(3),
+ B(Mov), R(closure), R(4),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 0 E> */ B(StackCheck),
+ /* 0 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(4),
+ B(Ldar), R(4),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(3), U8(2),
+ B(StaContextSlot), R(context), U8(5), U8(0),
+ B(Star), R(3),
+ B(LdrContextSlot), R(context), U8(5), U8(0), R(4),
+ B(LdaZero),
+ B(SuspendGenerator), R(4),
+ B(Ldar), R(3),
+ /* 64 S> */ B(Return),
+ B(LdaSmi), U8(-2),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(4), U8(1),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(4), U8(1),
+ B(Star), R(6),
+ B(LdaZero),
+ B(TestEqualStrict), R(6), U8(0),
+ B(JumpIfTrue), U8(26),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(6), U8(0),
+ B(JumpIfTrue), U8(16),
+ B(Jump), U8(2),
+ B(LdaTrue),
+ B(Star), R(8),
+ B(Mov), R(5), R(7),
+ B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(7), U8(2),
+ /* 64 S> */ B(Return),
+ B(Ldar), R(5),
+ /* 0 E> */ B(Throw),
+ /* 32 S> */ B(LdrUndefined), R(4),
+ B(LdaConstant), U8(1),
+ B(Star), R(6),
+ B(LdaZero),
+ B(Star), R(7),
+ /* 32 E> */ B(CallRuntime), U16(Runtime::kLoadModuleImport), R(6), U8(2),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(2),
+ B(Star), R(8),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(8), U8(1),
+ B(Star), R(3),
+ B(LdaSmi), U8(42),
+ B(Star), R(5),
+ /* 32 E> */ B(Call), R(3), R(4), U8(2), U8(2),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(3),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 47 S> */ B(LdaUndefined),
+ /* 47 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 52 S> */ B(LdrUndefined), R(4),
+ B(LdaConstant), U8(1),
+ B(Star), R(6),
+ B(LdaZero),
+ B(Star), R(7),
+ /* 52 E> */ B(CallRuntime), U16(Runtime::kLoadModuleImport), R(6), U8(2),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(2),
+ B(Star), R(8),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(8), U8(1),
+ B(Star), R(3),
+ B(LdaSmi), U8(42),
+ B(Star), R(5),
+ /* 52 E> */ B(Call), R(3), R(4), U8(2), U8(4),
+ B(PopContext), R(1),
+ B(LdaUndefined),
+ /* 64 S> */ B(Return),
+]
+constant pool: [
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["foo"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["goo"],
+ FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ export var foo = 42;
+ foo++;
+ { let x; { foo++ } };
+"
+frame size: 9
+parameter count: 2
+bytecode array length: 208
+bytecodes: [
+ B(Ldar), R(new_target),
+ B(JumpIfUndefined), U8(21),
+ B(ResumeGenerator), R(new_target),
+ B(Star), R(2),
+ B(LdaZero),
+ B(TestEqualStrict), R(2), U8(0),
+ B(JumpIfTrue), U8(71),
+ B(LdaSmi), U8(76),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
+ B(LdaSmi), U8(-2),
+ B(Star), R(2),
+ B(LdaConstant), U8(0),
+ B(Star), R(5),
+ B(Mov), R(arg0), R(3),
+ B(Mov), R(closure), R(4),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 0 E> */ B(StackCheck),
+ /* 0 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(4),
+ B(Ldar), R(4),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(3), U8(2),
+ B(StaContextSlot), R(context), U8(5), U8(0),
+ B(Star), R(3),
+ B(LdrContextSlot), R(context), U8(5), U8(0), R(4),
+ B(LdaZero),
+ B(SuspendGenerator), R(4),
+ B(Ldar), R(3),
+ /* 49 S> */ B(Return),
+ B(LdaSmi), U8(-2),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(4), U8(1),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(4), U8(1),
+ B(Star), R(6),
+ B(LdaZero),
+ B(TestEqualStrict), R(6), U8(0),
+ B(JumpIfTrue), U8(26),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(6), U8(0),
+ B(JumpIfTrue), U8(16),
+ B(Jump), U8(2),
+ B(LdaTrue),
+ B(Star), R(8),
+ B(Mov), R(5), R(7),
+ B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(7), U8(2),
+ /* 49 S> */ B(Return),
+ B(Ldar), R(5),
+ /* 0 E> */ B(Throw),
+ /* 17 S> */ B(LdaSmi), U8(42),
+ B(Star), R(4),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ /* 17 E> */ B(CallRuntime), U16(Runtime::kStoreModuleExport), R(3), U8(2),
+ /* 21 S> */ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kLoadModuleExport), R(3), U8(1),
+ B(Inc), U8(2),
+ B(Star), R(4),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ /* 24 E> */ B(CallRuntime), U16(Runtime::kStoreModuleExport), R(3), U8(2),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(2),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 34 S> */ B(LdaUndefined),
+ /* 34 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 39 S> */ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kLoadModuleExport), R(3), U8(1),
+ B(Inc), U8(3),
+ B(Star), R(4),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ /* 42 E> */ B(CallRuntime), U16(Runtime::kStoreModuleExport), R(3), U8(2),
+ B(PopContext), R(1),
+ B(LdaUndefined),
+ /* 49 S> */ B(Return),
+]
+constant pool: [
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["foo"],
+ FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ export let foo = 42;
+ foo++;
+ { let x; { foo++ } };
+"
+frame size: 9
+parameter count: 2
+bytecode array length: 242
+bytecodes: [
+ B(Ldar), R(new_target),
+ B(JumpIfUndefined), U8(21),
+ B(ResumeGenerator), R(new_target),
+ B(Star), R(2),
+ B(LdaZero),
+ B(TestEqualStrict), R(2), U8(0),
+ B(JumpIfTrue), U8(83),
+ B(LdaSmi), U8(76),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
+ B(LdaSmi), U8(-2),
+ B(Star), R(2),
+ B(LdaConstant), U8(0),
+ B(Star), R(5),
+ B(Mov), R(arg0), R(3),
+ B(Mov), R(closure), R(4),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4), U8(0),
+ B(LdaTheHole),
+ B(Star), R(4),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kStoreModuleExport), R(3), U8(2),
+ /* 0 E> */ B(StackCheck),
+ /* 0 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(4),
+ B(Ldar), R(4),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(3), U8(2),
+ B(StaContextSlot), R(context), U8(5), U8(0),
+ B(Star), R(3),
+ B(LdrContextSlot), R(context), U8(5), U8(0), R(4),
+ B(LdaZero),
+ B(SuspendGenerator), R(4),
+ B(Ldar), R(3),
+ /* 49 S> */ B(Return),
+ B(LdaSmi), U8(-2),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(4), U8(1),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(4), U8(1),
+ B(Star), R(6),
+ B(LdaZero),
+ B(TestEqualStrict), R(6), U8(0),
+ B(JumpIfTrue), U8(26),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(6), U8(0),
+ B(JumpIfTrue), U8(16),
+ B(Jump), U8(2),
+ B(LdaTrue),
+ B(Star), R(8),
+ B(Mov), R(5), R(7),
+ B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(7), U8(2),
+ /* 49 S> */ B(Return),
+ B(Ldar), R(5),
+ /* 0 E> */ B(Throw),
+ /* 17 S> */ B(LdaSmi), U8(42),
+ B(Star), R(4),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ /* 17 E> */ B(CallRuntime), U16(Runtime::kStoreModuleExport), R(3), U8(2),
+ /* 21 S> */ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kLoadModuleExport), R(3), U8(1),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
+ B(Inc), U8(2),
+ B(Star), R(4),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ /* 24 E> */ B(CallRuntime), U16(Runtime::kStoreModuleExport), R(3), U8(2),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(2),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 34 S> */ B(LdaUndefined),
+ /* 34 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 39 S> */ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kLoadModuleExport), R(3), U8(1),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
+ B(Inc), U8(3),
+ B(Star), R(4),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ /* 42 E> */ B(CallRuntime), U16(Runtime::kStoreModuleExport), R(3), U8(2),
+ B(PopContext), R(1),
+ B(LdaUndefined),
+ /* 49 S> */ B(Return),
+]
+constant pool: [
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["foo"],
+ FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ export const foo = 42;
+ foo++;
+ { let x; { foo++ } };
+"
+frame size: 9
+parameter count: 2
+bytecode array length: 230
+bytecodes: [
+ B(Ldar), R(new_target),
+ B(JumpIfUndefined), U8(21),
+ B(ResumeGenerator), R(new_target),
+ B(Star), R(2),
+ B(LdaZero),
+ B(TestEqualStrict), R(2), U8(0),
+ B(JumpIfTrue), U8(83),
+ B(LdaSmi), U8(76),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
+ B(LdaSmi), U8(-2),
+ B(Star), R(2),
+ B(LdaConstant), U8(0),
+ B(Star), R(5),
+ B(Mov), R(arg0), R(3),
+ B(Mov), R(closure), R(4),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4), U8(0),
+ B(LdaTheHole),
+ B(Star), R(4),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kStoreModuleExport), R(3), U8(2),
+ /* 0 E> */ B(StackCheck),
+ /* 0 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(4),
+ B(Ldar), R(4),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(3), U8(2),
+ B(StaContextSlot), R(context), U8(5), U8(0),
+ B(Star), R(3),
+ B(LdrContextSlot), R(context), U8(5), U8(0), R(4),
+ B(LdaZero),
+ B(SuspendGenerator), R(4),
+ B(Ldar), R(3),
+ /* 51 S> */ B(Return),
+ B(LdaSmi), U8(-2),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(4), U8(1),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(4), U8(1),
+ B(Star), R(6),
+ B(LdaZero),
+ B(TestEqualStrict), R(6), U8(0),
+ B(JumpIfTrue), U8(26),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(6), U8(0),
+ B(JumpIfTrue), U8(16),
+ B(Jump), U8(2),
+ B(LdaTrue),
+ B(Star), R(8),
+ B(Mov), R(5), R(7),
+ B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(7), U8(2),
+ /* 51 S> */ B(Return),
+ B(Ldar), R(5),
+ /* 0 E> */ B(Throw),
+ /* 19 S> */ B(LdaSmi), U8(42),
+ B(Star), R(4),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ /* 19 E> */ B(CallRuntime), U16(Runtime::kStoreModuleExport), R(3), U8(2),
+ /* 23 S> */ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kLoadModuleExport), R(3), U8(1),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
+ B(Inc), U8(2),
+ /* 26 E> */ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(2),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 36 S> */ B(LdaUndefined),
+ /* 36 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 41 S> */ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kLoadModuleExport), R(3), U8(1),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
+ B(Inc), U8(3),
+ /* 44 E> */ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
+ B(PopContext), R(1),
+ B(LdaUndefined),
+ /* 51 S> */ B(Return),
+]
+constant pool: [
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["foo"],
+ FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ export default (function () {});
+"
+frame size: 8
+parameter count: 2
+bytecode array length: 159
+bytecodes: [
+ B(Ldar), R(new_target),
+ B(JumpIfUndefined), U8(21),
+ B(ResumeGenerator), R(new_target),
+ B(Star), R(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(1), U8(0),
+ B(JumpIfTrue), U8(83),
+ B(LdaSmi), U8(76),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(LdaSmi), U8(-2),
+ B(Star), R(1),
+ B(LdaConstant), U8(0),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4), U8(0),
+ B(LdaTheHole),
+ B(Star), R(3),
+ B(LdaConstant), U8(1),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kStoreModuleExport), R(2), U8(2),
+ /* 0 E> */ B(StackCheck),
+ /* 0 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(3),
+ B(Ldar), R(3),
+ B(Mov), R(closure), R(2),
+ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(2), U8(2),
+ B(StaContextSlot), R(context), U8(5), U8(0),
+ B(Star), R(2),
+ B(LdrContextSlot), R(context), U8(5), U8(0), R(3),
+ B(LdaZero),
+ B(SuspendGenerator), R(3),
+ B(Ldar), R(2),
+ /* 32 S> */ B(Return),
+ B(LdaSmi), U8(-2),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(3), U8(1),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
+ B(Star), R(5),
+ B(LdaZero),
+ B(TestEqualStrict), R(5), U8(0),
+ B(JumpIfTrue), U8(26),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(5), U8(0),
+ B(JumpIfTrue), U8(16),
+ B(Jump), U8(2),
+ B(LdaTrue),
+ B(Star), R(7),
+ B(Mov), R(4), R(6),
+ B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(6), U8(2),
+ /* 32 S> */ B(Return),
+ B(Ldar), R(4),
+ /* 0 E> */ B(Throw),
+ B(CreateClosure), U8(2), U8(0),
+ B(Star), R(3),
+ B(LdaConstant), U8(1),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kStoreModuleExport), R(2), U8(2),
+ B(LdaUndefined),
+ /* 32 S> */ B(Return),
+]
+constant pool: [
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["default"],
+ SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ export default (class {});
+"
+frame size: 8
+parameter count: 2
+bytecode array length: 196
+bytecodes: [
+ B(Ldar), R(new_target),
+ B(JumpIfUndefined), U8(21),
+ B(ResumeGenerator), R(new_target),
+ B(Star), R(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(1), U8(0),
+ B(JumpIfTrue), U8(83),
+ B(LdaSmi), U8(76),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(LdaSmi), U8(-2),
+ B(Star), R(1),
+ B(LdaConstant), U8(0),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4), U8(0),
+ B(LdaTheHole),
+ B(Star), R(3),
+ B(LdaConstant), U8(1),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kStoreModuleExport), R(2), U8(2),
+ /* 0 E> */ B(StackCheck),
+ /* 0 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(3),
+ B(Ldar), R(3),
+ B(Mov), R(closure), R(2),
+ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(2), U8(2),
+ B(StaContextSlot), R(context), U8(5), U8(0),
+ B(Star), R(2),
+ B(LdrContextSlot), R(context), U8(5), U8(0), R(3),
+ B(LdaZero),
+ B(SuspendGenerator), R(3),
+ B(Ldar), R(2),
+ /* 26 S> */ B(Return),
+ B(LdaSmi), U8(-2),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(3), U8(1),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
+ B(Star), R(5),
+ B(LdaZero),
+ B(TestEqualStrict), R(5), U8(0),
+ B(JumpIfTrue), U8(26),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(5), U8(0),
+ B(JumpIfTrue), U8(16),
+ B(Jump), U8(2),
+ B(LdaTrue),
+ B(Star), R(7),
+ B(Mov), R(4), R(6),
+ B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(6), U8(2),
+ /* 26 S> */ B(Return),
+ B(Ldar), R(4),
+ /* 0 E> */ B(Throw),
+ /* 16 S> */ B(LdaTheHole),
+ B(Star), R(2),
+ B(CreateClosure), U8(2), U8(0),
+ B(Star), R(3),
+ B(LdaSmi), U8(16),
+ B(Star), R(4),
+ B(LdaSmi), U8(24),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(2), U8(4),
+ B(Star), R(2),
+ B(LdrNamedProperty), R(2), U8(3), U8(2), R(3),
+ B(CallRuntime), U16(Runtime::kToFastProperties), R(2), U8(1),
+ B(StaContextSlot), R(context), U8(6), U8(0),
+ /* 16 E> */ B(LdrContextSlot), R(context), U8(6), U8(0), R(3),
+ B(LdaConstant), U8(1),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kStoreModuleExport), R(2), U8(2),
+ B(LdaUndefined),
+ /* 26 S> */ B(Return),
+]
+constant pool: [
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["default"],
+ SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["prototype"],
+]
+handlers: [
+]
+
+---
+snippet: "
+ export {foo as goo} from \"bar\"
+"
+frame size: 8
+parameter count: 2
+bytecode array length: 133
+bytecodes: [
+ B(Ldar), R(new_target),
+ B(JumpIfUndefined), U8(21),
+ B(ResumeGenerator), R(new_target),
+ B(Star), R(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(1), U8(0),
+ B(JumpIfTrue), U8(71),
+ B(LdaSmi), U8(76),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(LdaSmi), U8(-2),
+ B(Star), R(1),
+ B(LdaConstant), U8(0),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 0 E> */ B(StackCheck),
+ /* 0 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(3),
+ B(Ldar), R(3),
+ B(Mov), R(closure), R(2),
+ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(2), U8(2),
+ B(StaContextSlot), R(context), U8(5), U8(0),
+ B(Star), R(2),
+ B(LdrContextSlot), R(context), U8(5), U8(0), R(3),
+ B(LdaZero),
+ B(SuspendGenerator), R(3),
+ B(Ldar), R(2),
+ /* 30 S> */ B(Return),
+ B(LdaSmi), U8(-2),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(3), U8(1),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
+ B(Star), R(5),
+ B(LdaZero),
+ B(TestEqualStrict), R(5), U8(0),
+ B(JumpIfTrue), U8(26),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(5), U8(0),
+ B(JumpIfTrue), U8(16),
+ B(Jump), U8(2),
+ B(LdaTrue),
+ B(Star), R(7),
+ B(Mov), R(4), R(6),
+ B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(6), U8(2),
+ /* 30 S> */ B(Return),
+ B(Ldar), R(4),
+ /* 0 E> */ B(Throw),
+ B(LdaUndefined),
+ /* 30 S> */ B(Return),
+]
+constant pool: [
+ FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ export * from \"bar\"
+"
+frame size: 8
+parameter count: 2
+bytecode array length: 133
+bytecodes: [
+ B(Ldar), R(new_target),
+ B(JumpIfUndefined), U8(21),
+ B(ResumeGenerator), R(new_target),
+ B(Star), R(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(1), U8(0),
+ B(JumpIfTrue), U8(71),
+ B(LdaSmi), U8(76),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(LdaSmi), U8(-2),
+ B(Star), R(1),
+ B(LdaConstant), U8(0),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 0 E> */ B(StackCheck),
+ /* 0 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(3),
+ B(Ldar), R(3),
+ B(Mov), R(closure), R(2),
+ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(2), U8(2),
+ B(StaContextSlot), R(context), U8(5), U8(0),
+ B(Star), R(2),
+ B(LdrContextSlot), R(context), U8(5), U8(0), R(3),
+ B(LdaZero),
+ B(SuspendGenerator), R(3),
+ B(Ldar), R(2),
+ /* 19 S> */ B(Return),
+ B(LdaSmi), U8(-2),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(3), U8(1),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
+ B(Star), R(5),
+ B(LdaZero),
+ B(TestEqualStrict), R(5), U8(0),
+ B(JumpIfTrue), U8(26),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(5), U8(0),
+ B(JumpIfTrue), U8(16),
+ B(Jump), U8(2),
+ B(LdaTrue),
+ B(Star), R(7),
+ B(Mov), R(4), R(6),
+ B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(6), U8(2),
+ /* 19 S> */ B(Return),
+ B(Ldar), R(4),
+ /* 0 E> */ B(Throw),
+ B(LdaUndefined),
+ /* 19 S> */ B(Return),
+]
+constant pool: [
+ FIXED_ARRAY_TYPE,
+]
+handlers: [
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewTarget.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewTarget.golden
index 090fb0bb7a..f2a8bacc27 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewTarget.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewTarget.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: yes
---
@@ -13,9 +11,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 7
+bytecode array length: 9
bytecodes: [
B(Mov), R(new_target), R(0),
+ B(Ldar), R(new_target),
/* 30 E> */ B(StackCheck),
/* 34 S> */ B(Ldar), R(0),
/* 53 S> */ B(Return),
@@ -31,9 +30,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 6
+bytecode array length: 8
bytecodes: [
B(Mov), R(new_target), R(0),
+ B(Ldar), R(new_target),
/* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaUndefined),
/* 46 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
index b9c7d0ca4d..7f81d82a2d 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: yes
---
@@ -21,7 +19,7 @@ bytecodes: [
/* 46 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
]
handlers: [
]
@@ -40,7 +38,7 @@ bytecodes: [
/* 71 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
]
handlers: [
]
@@ -57,13 +55,13 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
/* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
- /* 75 E> */ B(StaNamedPropertySloppy), R(1), U8(1), U8(1),
+ /* 75 E> */ B(StaNamedPropertySloppy), R(1), U8(1), U8(2),
B(Ldar), R(1),
/* 80 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["val"],
]
handlers: [
]
@@ -80,14 +78,14 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
/* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
- /* 67 E> */ B(AddSmi), U8(1), R(0), U8(1),
- B(StaNamedPropertySloppy), R(1), U8(1), U8(2),
+ /* 67 E> */ B(AddSmi), U8(1), R(0), U8(2),
+ B(StaNamedPropertySloppy), R(1), U8(1), U8(3),
B(Ldar), R(1),
/* 76 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["val"],
]
handlers: [
]
@@ -103,14 +101,14 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(0),
B(CreateClosure), U8(1), U8(2),
- B(StaNamedPropertySloppy), R(0), U8(2), U8(1),
+ B(StaNamedPropertySloppy), R(0), U8(2), U8(2),
B(Ldar), R(0),
/* 67 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["func"],
]
handlers: [
]
@@ -126,14 +124,14 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(0),
B(CreateClosure), U8(1), U8(2),
- B(StaNamedPropertySloppy), R(0), U8(2), U8(1),
+ B(StaNamedPropertySloppy), R(0), U8(2), U8(2),
B(Ldar), R(0),
/* 68 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["func"],
]
handlers: [
]
@@ -158,13 +156,13 @@ bytecodes: [
B(Star), R(5),
B(Mov), R(0), R(1),
B(CallRuntime), U16(Runtime::kDefineAccessorPropertyUnchecked), R(1), U8(5),
- B(Ldar), R(0),
+ B(Ldar), R(1),
/* 68 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -189,14 +187,14 @@ bytecodes: [
B(Star), R(5),
B(Mov), R(0), R(1),
B(CallRuntime), U16(Runtime::kDefineAccessorPropertyUnchecked), R(1), U8(5),
- B(Ldar), R(0),
+ B(Ldar), R(1),
/* 102 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -221,13 +219,13 @@ bytecodes: [
B(Star), R(5),
B(Mov), R(0), R(1),
B(CallRuntime), U16(Runtime::kDefineAccessorPropertyUnchecked), R(1), U8(5),
- B(Ldar), R(0),
+ B(Ldar), R(1),
/* 74 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["b"],
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
@@ -251,11 +249,11 @@ bytecodes: [
B(Mov), R(1), R(2),
B(Mov), R(0), R(4),
/* 57 E> */ B(CallRuntime), U16(Runtime::kSetProperty), R(2), U8(4),
- B(Ldar), R(1),
+ B(Ldar), R(2),
/* 62 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
]
handlers: [
]
@@ -274,11 +272,11 @@ bytecodes: [
B(Star), R(2),
B(Mov), R(0), R(1),
B(CallRuntime), U16(Runtime::kInternalSetPrototype), R(1), U8(2),
- B(Ldar), R(0),
+ B(Ldar), R(1),
/* 62 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
]
handlers: [
]
@@ -304,12 +302,12 @@ bytecodes: [
B(Star), R(6),
B(Mov), R(1), R(2),
B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(2), U8(5),
- B(Ldar), R(1),
+ B(Ldar), R(2),
/* 69 S> */ B(Return),
]
constant pool: [
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["test"],
+ FIXED_ARRAY_TYPE,
]
handlers: [
]
@@ -326,7 +324,7 @@ bytecodes: [
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
/* 50 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(1), R(1),
- /* 64 E> */ B(StaNamedPropertySloppy), R(1), U8(2), U8(1),
+ /* 64 E> */ B(StaNamedPropertySloppy), R(1), U8(2), U8(2),
/* 68 E> */ B(ToName), R(3),
B(LdaSmi), U8(1),
B(Star), R(4),
@@ -336,13 +334,13 @@ bytecodes: [
B(Star), R(6),
B(Mov), R(1), R(2),
B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(2), U8(5),
- B(Ldar), R(1),
+ B(Ldar), R(2),
/* 77 S> */ B(Return),
]
constant pool: [
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["test"],
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["val"],
]
handlers: [
]
@@ -353,7 +351,7 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 49
+bytecode array length: 46
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
@@ -369,15 +367,14 @@ bytecodes: [
B(Mov), R(1), R(2),
B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(2), U8(5),
B(CreateObjectLiteral), U8(1), U8(0), U8(35), R(4),
- B(Mov), R(1), R(2),
B(Mov), R(4), R(3),
B(CallRuntime), U16(Runtime::kInternalSetPrototype), R(2), U8(2),
- B(Ldar), R(1),
+ B(Ldar), R(2),
/* 84 S> */ B(Return),
]
constant pool: [
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["test"],
+ FIXED_ARRAY_TYPE,
]
handlers: [
]
@@ -388,7 +385,7 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 73
+bytecode array length: 67
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
@@ -409,7 +406,6 @@ bytecodes: [
B(Star), R(4),
B(LdaZero),
B(Star), R(5),
- B(Mov), R(1), R(2),
B(CallRuntime), U16(Runtime::kDefineGetterPropertyUnchecked), R(2), U8(4),
B(LdaConstant), U8(3),
B(ToName), R(3),
@@ -417,18 +413,17 @@ bytecodes: [
B(Star), R(4),
B(LdaZero),
B(Star), R(5),
- B(Mov), R(1), R(2),
B(CallRuntime), U16(Runtime::kDefineSetterPropertyUnchecked), R(2), U8(4),
- B(Ldar), R(1),
+ B(Ldar), R(2),
/* 99 S> */ B(Return),
]
constant pool: [
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["name"],
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["val"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiteralsWide.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiteralsWide.golden
index 62b1ace69d..b281cb7336 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiteralsWide.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiteralsWide.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: yes
---
@@ -790,263 +788,263 @@ bytecodes: [
/* 2638 S> */ B(Return),
]
constant pool: [
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::FIXED_ARRAY_TYPE,
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ FIXED_ARRAY_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden
index e58694f982..397b0de724 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: no
test function name: f
@@ -20,15 +18,14 @@ snippet: "
var f = new Outer().getInnerFunc();
f();
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 16
+bytecode array length: 14
bytecodes: [
/* 97 E> */ B(StackCheck),
- /* 102 S> */ B(LdrContextSlot), R(context), U8(1), R(0),
- B(LdrContextSlot), R(0), U8(4), R(1),
- /* 120 E> */ B(LdaContextSlot), R(context), U8(4),
- B(Mul), R(1), U8(1),
+ /* 102 S> */ B(LdrContextSlot), R(context), U8(4), U8(1), R(0),
+ /* 120 E> */ B(LdaContextSlot), R(context), U8(4), U8(0),
+ B(Mul), R(0), U8(2),
/* 130 S> */ B(Return),
]
constant pool: [
@@ -48,15 +45,13 @@ snippet: "
var f = new Outer().getInnerFunc();
f();
"
-frame size: 2
+frame size: 0
parameter count: 1
-bytecode array length: 16
+bytecode array length: 11
bytecodes: [
/* 97 E> */ B(StackCheck),
- /* 102 S> */ B(LdrContextSlot), R(context), U8(4), R(0),
- /* 111 E> */ B(LdrContextSlot), R(context), U8(1), R(1),
- B(Ldar), R(0),
- B(StaContextSlot), R(1), U8(4),
+ /* 102 S> */ B(LdaContextSlot), R(context), U8(4), U8(0),
+ /* 111 E> */ B(StaContextSlot), R(context), U8(4), U8(1),
B(LdaUndefined),
/* 123 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Parameters.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Parameters.golden
index 71b6df7687..01c3ad0694 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Parameters.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Parameters.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: no
test function name: f
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden
index aadf2dec01..adcf911cdd 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden
@@ -3,8 +3,6 @@
#
---
-pool type: number
-execute: yes
wrap: yes
---
@@ -37,7 +35,7 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
- /* 45 S> */ B(AddSmi), U8(3), R(0), U8(1),
+ /* 45 S> */ B(AddSmi), U8(3), R(0), U8(2),
/* 59 S> */ B(Return),
]
constant pool: [
@@ -56,7 +54,7 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
- /* 45 S> */ B(SubSmi), U8(3), R(0), U8(1),
+ /* 45 S> */ B(SubSmi), U8(3), R(0), U8(2),
/* 59 S> */ B(Return),
]
constant pool: [
@@ -76,7 +74,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), U8(4),
B(Star), R(0),
/* 45 S> */ B(LdaSmi), U8(3),
- B(Mul), R(0), U8(1),
+ B(Mul), R(0), U8(2),
/* 59 S> */ B(Return),
]
constant pool: [
@@ -96,7 +94,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), U8(4),
B(Star), R(0),
/* 45 S> */ B(LdaSmi), U8(3),
- B(Div), R(0), U8(1),
+ B(Div), R(0), U8(2),
/* 59 S> */ B(Return),
]
constant pool: [
@@ -116,7 +114,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), U8(4),
B(Star), R(0),
/* 45 S> */ B(LdaSmi), U8(3),
- B(Mod), R(0), U8(1),
+ B(Mod), R(0), U8(2),
/* 59 S> */ B(Return),
]
constant pool: [
@@ -135,7 +133,7 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
- /* 45 S> */ B(BitwiseOrSmi), U8(2), R(0), U8(1),
+ /* 45 S> */ B(BitwiseOrSmi), U8(2), R(0), U8(2),
/* 59 S> */ B(Return),
]
constant pool: [
@@ -155,7 +153,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
/* 45 S> */ B(LdaSmi), U8(2),
- B(BitwiseXor), R(0), U8(1),
+ B(BitwiseXor), R(0), U8(2),
/* 59 S> */ B(Return),
]
constant pool: [
@@ -174,7 +172,7 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
- /* 45 S> */ B(BitwiseAndSmi), U8(2), R(0), U8(1),
+ /* 45 S> */ B(BitwiseAndSmi), U8(2), R(0), U8(2),
/* 59 S> */ B(Return),
]
constant pool: [
@@ -193,7 +191,7 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(10),
B(Star), R(0),
- /* 46 S> */ B(ShiftLeftSmi), U8(3), R(0), U8(1),
+ /* 46 S> */ B(ShiftLeftSmi), U8(3), R(0), U8(2),
/* 61 S> */ B(Return),
]
constant pool: [
@@ -212,7 +210,7 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(10),
B(Star), R(0),
- /* 46 S> */ B(ShiftRightSmi), U8(3), R(0), U8(1),
+ /* 46 S> */ B(ShiftRightSmi), U8(3), R(0), U8(2),
/* 61 S> */ B(Return),
]
constant pool: [
@@ -232,7 +230,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), U8(10),
B(Star), R(0),
/* 46 S> */ B(LdaSmi), U8(3),
- B(ShiftRightLogical), R(0), U8(1),
+ B(ShiftRightLogical), R(0), U8(2),
/* 62 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveReturnStatements.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveReturnStatements.golden
index 7eaaa88d05..48db168d03 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveReturnStatements.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveReturnStatements.golden
@@ -3,8 +3,6 @@
#
---
-pool type: number
-execute: yes
wrap: yes
---
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
index 23501bd4b9..96c0428c6c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
@@ -3,8 +3,6 @@
#
---
-pool type: string
-execute: yes
wrap: no
test function name: f
@@ -19,12 +17,12 @@ bytecode array length: 13
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 16 S> */ B(Nop),
- /* 24 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(3), R(0),
- /* 25 E> */ B(Call), R(0), R(arg0), U8(1), U8(1),
+ /* 24 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(4), R(0),
+ /* 25 E> */ B(Call), R(0), R(arg0), U8(1), U8(2),
/* 33 S> */ B(Return),
]
constant pool: [
- "func",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["func"],
]
handlers: [
]
@@ -40,16 +38,16 @@ bytecode array length: 24
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 22 S> */ B(Nop),
- /* 30 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(3), R(0),
+ /* 30 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(4), R(0),
B(Ldar), R(0),
B(Mov), R(arg0), R(1),
B(Mov), R(arg1), R(2),
B(Mov), R(arg2), R(3),
- /* 31 E> */ B(Call), R(0), R(1), U8(3), U8(1),
+ /* 31 E> */ B(Call), R(0), R(1), U8(3), U8(2),
/* 43 S> */ B(Return),
]
constant pool: [
- "func",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["func"],
]
handlers: [
]
@@ -65,17 +63,17 @@ bytecode array length: 26
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 19 S> */ B(Nop),
- /* 27 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(3), R(0),
+ /* 27 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(4), R(0),
B(Ldar), R(arg1),
- /* 37 E> */ B(Add), R(arg1), U8(5),
+ /* 37 E> */ B(Add), R(arg1), U8(6),
B(Star), R(2),
B(Mov), R(arg0), R(1),
B(Mov), R(arg1), R(3),
- /* 28 E> */ B(Call), R(0), R(1), U8(3), U8(1),
+ /* 28 E> */ B(Call), R(0), R(1), U8(3), U8(2),
/* 44 S> */ B(Return),
]
constant pool: [
- "func",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["func"],
]
handlers: [
]
@@ -216,272 +214,272 @@ snippet: "
"
frame size: 1
parameter count: 2
-bytecode array length: 663
+bytecode array length: 667
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 17 S> */ B(Nop),
- /* 18 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(1),
+ /* 18 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(2),
/* 26 S> */ B(Nop),
- /* 27 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(3),
+ /* 27 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(4),
/* 35 S> */ B(Nop),
- /* 36 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(5),
+ /* 36 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(6),
/* 44 S> */ B(Nop),
- /* 45 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(7),
+ /* 45 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(8),
/* 53 S> */ B(Nop),
- /* 54 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(9),
+ /* 54 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(10),
/* 62 S> */ B(Nop),
- /* 63 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(11),
+ /* 63 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(12),
/* 71 S> */ B(Nop),
- /* 72 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(13),
+ /* 72 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(14),
/* 80 S> */ B(Nop),
- /* 81 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(15),
+ /* 81 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(16),
/* 89 S> */ B(Nop),
- /* 90 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(17),
+ /* 90 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(18),
/* 98 S> */ B(Nop),
- /* 99 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(19),
+ /* 99 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(20),
/* 107 S> */ B(Nop),
- /* 108 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(21),
+ /* 108 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(22),
/* 116 S> */ B(Nop),
- /* 117 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(23),
+ /* 117 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(24),
/* 125 S> */ B(Nop),
- /* 126 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(25),
+ /* 126 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(26),
/* 134 S> */ B(Nop),
- /* 135 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(27),
+ /* 135 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(28),
/* 143 S> */ B(Nop),
- /* 144 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(29),
+ /* 144 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(30),
/* 152 S> */ B(Nop),
- /* 153 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(31),
+ /* 153 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(32),
/* 161 S> */ B(Nop),
- /* 162 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(33),
+ /* 162 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(34),
/* 170 S> */ B(Nop),
- /* 171 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(35),
+ /* 171 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(36),
/* 179 S> */ B(Nop),
- /* 180 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(37),
+ /* 180 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(38),
/* 188 S> */ B(Nop),
- /* 189 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(39),
+ /* 189 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(40),
/* 197 S> */ B(Nop),
- /* 198 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(41),
+ /* 198 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(42),
/* 206 S> */ B(Nop),
- /* 207 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(43),
+ /* 207 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(44),
/* 215 S> */ B(Nop),
- /* 216 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(45),
+ /* 216 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(46),
/* 224 S> */ B(Nop),
- /* 225 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(47),
+ /* 225 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(48),
/* 233 S> */ B(Nop),
- /* 234 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(49),
+ /* 234 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(50),
/* 242 S> */ B(Nop),
- /* 243 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(51),
+ /* 243 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(52),
/* 251 S> */ B(Nop),
- /* 252 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(53),
+ /* 252 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(54),
/* 260 S> */ B(Nop),
- /* 261 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(55),
+ /* 261 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(56),
/* 269 S> */ B(Nop),
- /* 270 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(57),
+ /* 270 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(58),
/* 278 S> */ B(Nop),
- /* 279 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(59),
+ /* 279 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(60),
/* 287 S> */ B(Nop),
- /* 288 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(61),
+ /* 288 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(62),
/* 296 S> */ B(Nop),
- /* 297 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(63),
+ /* 297 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(64),
/* 305 S> */ B(Nop),
- /* 306 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(65),
+ /* 306 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(66),
/* 314 S> */ B(Nop),
- /* 315 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(67),
+ /* 315 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(68),
/* 323 S> */ B(Nop),
- /* 324 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(69),
+ /* 324 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(70),
/* 332 S> */ B(Nop),
- /* 333 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(71),
+ /* 333 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(72),
/* 341 S> */ B(Nop),
- /* 342 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(73),
+ /* 342 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(74),
/* 350 S> */ B(Nop),
- /* 351 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(75),
+ /* 351 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(76),
/* 359 S> */ B(Nop),
- /* 360 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(77),
+ /* 360 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(78),
/* 368 S> */ B(Nop),
- /* 369 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(79),
+ /* 369 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(80),
/* 377 S> */ B(Nop),
- /* 378 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(81),
+ /* 378 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(82),
/* 386 S> */ B(Nop),
- /* 387 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(83),
+ /* 387 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(84),
/* 395 S> */ B(Nop),
- /* 396 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(85),
+ /* 396 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(86),
/* 404 S> */ B(Nop),
- /* 405 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(87),
+ /* 405 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(88),
/* 413 S> */ B(Nop),
- /* 414 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(89),
+ /* 414 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(90),
/* 422 S> */ B(Nop),
- /* 423 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(91),
+ /* 423 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(92),
/* 431 S> */ B(Nop),
- /* 432 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(93),
+ /* 432 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(94),
/* 440 S> */ B(Nop),
- /* 441 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(95),
+ /* 441 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(96),
/* 449 S> */ B(Nop),
- /* 450 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(97),
+ /* 450 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(98),
/* 458 S> */ B(Nop),
- /* 459 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(99),
+ /* 459 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(100),
/* 467 S> */ B(Nop),
- /* 468 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(101),
+ /* 468 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(102),
/* 476 S> */ B(Nop),
- /* 477 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(103),
+ /* 477 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(104),
/* 485 S> */ B(Nop),
- /* 486 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(105),
+ /* 486 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(106),
/* 494 S> */ B(Nop),
- /* 495 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(107),
+ /* 495 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(108),
/* 503 S> */ B(Nop),
- /* 504 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(109),
+ /* 504 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(110),
/* 512 S> */ B(Nop),
- /* 513 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(111),
+ /* 513 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(112),
/* 521 S> */ B(Nop),
- /* 522 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(113),
+ /* 522 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(114),
/* 530 S> */ B(Nop),
- /* 531 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(115),
+ /* 531 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(116),
/* 539 S> */ B(Nop),
- /* 540 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(117),
+ /* 540 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(118),
/* 548 S> */ B(Nop),
- /* 549 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(119),
+ /* 549 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(120),
/* 557 S> */ B(Nop),
- /* 558 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(121),
+ /* 558 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(122),
/* 566 S> */ B(Nop),
- /* 567 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(123),
+ /* 567 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(124),
/* 575 S> */ B(Nop),
- /* 576 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(125),
+ /* 576 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(126),
/* 584 S> */ B(Nop),
- /* 585 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(127),
+ /* 585 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(128),
/* 593 S> */ B(Nop),
- /* 594 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(129),
+ /* 594 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(130),
/* 602 S> */ B(Nop),
- /* 603 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(131),
+ /* 603 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(132),
/* 611 S> */ B(Nop),
- /* 612 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(133),
+ /* 612 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(134),
/* 620 S> */ B(Nop),
- /* 621 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(135),
+ /* 621 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(136),
/* 629 S> */ B(Nop),
- /* 630 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(137),
+ /* 630 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(138),
/* 638 S> */ B(Nop),
- /* 639 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(139),
+ /* 639 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(140),
/* 647 S> */ B(Nop),
- /* 648 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(141),
+ /* 648 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(142),
/* 656 S> */ B(Nop),
- /* 657 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(143),
+ /* 657 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(144),
/* 665 S> */ B(Nop),
- /* 666 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(145),
+ /* 666 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(146),
/* 674 S> */ B(Nop),
- /* 675 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(147),
+ /* 675 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(148),
/* 683 S> */ B(Nop),
- /* 684 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(149),
+ /* 684 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(150),
/* 692 S> */ B(Nop),
- /* 693 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(151),
+ /* 693 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(152),
/* 701 S> */ B(Nop),
- /* 702 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(153),
+ /* 702 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(154),
/* 710 S> */ B(Nop),
- /* 711 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(155),
+ /* 711 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(156),
/* 719 S> */ B(Nop),
- /* 720 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(157),
+ /* 720 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(158),
/* 728 S> */ B(Nop),
- /* 729 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(159),
+ /* 729 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(160),
/* 737 S> */ B(Nop),
- /* 738 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(161),
+ /* 738 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(162),
/* 746 S> */ B(Nop),
- /* 747 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(163),
+ /* 747 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(164),
/* 755 S> */ B(Nop),
- /* 756 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(165),
+ /* 756 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(166),
/* 764 S> */ B(Nop),
- /* 765 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(167),
+ /* 765 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(168),
/* 773 S> */ B(Nop),
- /* 774 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(169),
+ /* 774 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(170),
/* 782 S> */ B(Nop),
- /* 783 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(171),
+ /* 783 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(172),
/* 791 S> */ B(Nop),
- /* 792 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(173),
+ /* 792 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(174),
/* 800 S> */ B(Nop),
- /* 801 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(175),
+ /* 801 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(176),
/* 809 S> */ B(Nop),
- /* 810 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(177),
+ /* 810 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(178),
/* 818 S> */ B(Nop),
- /* 819 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(179),
+ /* 819 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(180),
/* 827 S> */ B(Nop),
- /* 828 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(181),
+ /* 828 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(182),
/* 836 S> */ B(Nop),
- /* 837 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(183),
+ /* 837 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(184),
/* 845 S> */ B(Nop),
- /* 846 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(185),
+ /* 846 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(186),
/* 854 S> */ B(Nop),
- /* 855 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(187),
+ /* 855 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(188),
/* 863 S> */ B(Nop),
- /* 864 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(189),
+ /* 864 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(190),
/* 872 S> */ B(Nop),
- /* 873 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(191),
+ /* 873 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(192),
/* 881 S> */ B(Nop),
- /* 882 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(193),
+ /* 882 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(194),
/* 890 S> */ B(Nop),
- /* 891 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(195),
+ /* 891 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(196),
/* 899 S> */ B(Nop),
- /* 900 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(197),
+ /* 900 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(198),
/* 908 S> */ B(Nop),
- /* 909 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(199),
+ /* 909 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(200),
/* 917 S> */ B(Nop),
- /* 918 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(201),
+ /* 918 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(202),
/* 926 S> */ B(Nop),
- /* 927 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(203),
+ /* 927 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(204),
/* 935 S> */ B(Nop),
- /* 936 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(205),
+ /* 936 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(206),
/* 944 S> */ B(Nop),
- /* 945 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(207),
+ /* 945 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(208),
/* 953 S> */ B(Nop),
- /* 954 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(209),
+ /* 954 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(210),
/* 962 S> */ B(Nop),
- /* 963 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(211),
+ /* 963 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(212),
/* 971 S> */ B(Nop),
- /* 972 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(213),
+ /* 972 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(214),
/* 980 S> */ B(Nop),
- /* 981 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(215),
+ /* 981 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(216),
/* 989 S> */ B(Nop),
- /* 990 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(217),
+ /* 990 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(218),
/* 998 S> */ B(Nop),
- /* 999 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(219),
+ /* 999 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(220),
/* 1007 S> */ B(Nop),
- /* 1008 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(221),
+ /* 1008 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(222),
/* 1016 S> */ B(Nop),
- /* 1017 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(223),
+ /* 1017 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(224),
/* 1025 S> */ B(Nop),
- /* 1026 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(225),
+ /* 1026 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(226),
/* 1034 S> */ B(Nop),
- /* 1035 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(227),
+ /* 1035 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(228),
/* 1043 S> */ B(Nop),
- /* 1044 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(229),
+ /* 1044 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(230),
/* 1052 S> */ B(Nop),
- /* 1053 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(231),
+ /* 1053 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(232),
/* 1061 S> */ B(Nop),
- /* 1062 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(233),
+ /* 1062 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(234),
/* 1070 S> */ B(Nop),
- /* 1071 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(235),
+ /* 1071 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(236),
/* 1079 S> */ B(Nop),
- /* 1080 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(237),
+ /* 1080 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(238),
/* 1088 S> */ B(Nop),
- /* 1089 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(239),
+ /* 1089 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(240),
/* 1097 S> */ B(Nop),
- /* 1098 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(241),
+ /* 1098 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(242),
/* 1106 S> */ B(Nop),
- /* 1107 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(243),
+ /* 1107 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(244),
/* 1115 S> */ B(Nop),
- /* 1116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(245),
+ /* 1116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(246),
/* 1124 S> */ B(Nop),
- /* 1125 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(247),
+ /* 1125 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(248),
/* 1133 S> */ B(Nop),
- /* 1134 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(249),
+ /* 1134 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(250),
/* 1142 S> */ B(Nop),
- /* 1143 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(251),
+ /* 1143 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(252),
/* 1151 S> */ B(Nop),
- /* 1152 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(253),
+ /* 1152 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(254),
/* 1160 S> */ B(Nop),
- /* 1161 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(255),
+ /* 1161 E> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(256),
/* 1169 S> */ B(Nop),
- /* 1177 E> */ B(Wide), B(LdrNamedProperty), R16(arg0), U16(0), U16(259), R16(0),
- /* 1178 E> */ B(Wide), B(Call), R16(0), R16(arg0), U16(1), U16(257),
+ /* 1177 E> */ B(Wide), B(LdrNamedProperty), R16(arg0), U16(0), U16(260), R16(0),
+ /* 1178 E> */ B(Wide), B(Call), R16(0), R16(arg0), U16(1), U16(258),
/* 1186 S> */ B(Return),
]
constant pool: [
- "func",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["func"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden
index cee0357ab8..09f073e859 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden
@@ -3,8 +3,6 @@
#
---
-pool type: string
-execute: yes
wrap: no
test function name: f
@@ -19,11 +17,11 @@ bytecode array length: 7
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 16 S> */ B(Nop),
- /* 24 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(1),
+ /* 24 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(2),
/* 31 S> */ B(Return),
]
constant pool: [
- "name",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["name"],
]
handlers: [
]
@@ -39,11 +37,11 @@ bytecode array length: 7
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 16 S> */ B(Nop),
- /* 24 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(1),
+ /* 24 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(2),
/* 33 S> */ B(Return),
]
constant pool: [
- "key",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["key"],
]
handlers: [
]
@@ -59,7 +57,7 @@ bytecode array length: 7
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 16 S> */ B(LdaSmi), U8(100),
- /* 24 E> */ B(LdaKeyedProperty), R(arg0), U8(1),
+ /* 24 E> */ B(LdaKeyedProperty), R(arg0), U8(2),
/* 31 S> */ B(Return),
]
constant pool: [
@@ -78,7 +76,7 @@ bytecode array length: 7
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 19 S> */ B(Ldar), R(arg1),
- /* 28 E> */ B(LdaKeyedProperty), R(arg0), U8(1),
+ /* 28 E> */ B(LdaKeyedProperty), R(arg0), U8(2),
/* 32 S> */ B(Return),
]
constant pool: [
@@ -97,14 +95,14 @@ bytecode array length: 15
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 25 S> */ B(Nop),
- /* 25 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(1), R(0),
+ /* 25 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(2), R(0),
B(Ldar), R(0),
/* 32 S> */ B(LdaSmi), U8(-124),
- /* 40 E> */ B(LdaKeyedProperty), R(arg0), U8(3),
+ /* 40 E> */ B(LdaKeyedProperty), R(arg0), U8(4),
/* 48 S> */ B(Return),
]
constant pool: [
- "name",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["name"],
]
handlers: [
]
@@ -247,399 +245,399 @@ snippet: "
"
frame size: 1
parameter count: 2
-bytecode array length: 1035
+bytecode array length: 1040
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 27 S> */ B(Nop),
- /* 32 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(1), R(0),
+ /* 32 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(2), R(0),
B(Ldar), R(0),
/* 41 S> */ B(Nop),
- /* 46 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(3), R(0),
+ /* 46 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(4), R(0),
B(Ldar), R(0),
/* 55 S> */ B(Nop),
- /* 60 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(5), R(0),
+ /* 60 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(6), R(0),
B(Ldar), R(0),
/* 69 S> */ B(Nop),
- /* 74 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(7), R(0),
+ /* 74 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(8), R(0),
B(Ldar), R(0),
/* 83 S> */ B(Nop),
- /* 88 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(9), R(0),
+ /* 88 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(10), R(0),
B(Ldar), R(0),
/* 97 S> */ B(Nop),
- /* 102 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(11), R(0),
+ /* 102 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(12), R(0),
B(Ldar), R(0),
/* 111 S> */ B(Nop),
- /* 116 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(13), R(0),
+ /* 116 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(14), R(0),
B(Ldar), R(0),
/* 125 S> */ B(Nop),
- /* 130 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(15), R(0),
+ /* 130 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(16), R(0),
B(Ldar), R(0),
/* 139 S> */ B(Nop),
- /* 144 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(17), R(0),
+ /* 144 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(18), R(0),
B(Ldar), R(0),
/* 153 S> */ B(Nop),
- /* 158 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(19), R(0),
+ /* 158 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(20), R(0),
B(Ldar), R(0),
/* 167 S> */ B(Nop),
- /* 172 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(21), R(0),
+ /* 172 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(22), R(0),
B(Ldar), R(0),
/* 181 S> */ B(Nop),
- /* 186 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(23), R(0),
+ /* 186 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(24), R(0),
B(Ldar), R(0),
/* 195 S> */ B(Nop),
- /* 200 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(25), R(0),
+ /* 200 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(26), R(0),
B(Ldar), R(0),
/* 209 S> */ B(Nop),
- /* 214 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(27), R(0),
+ /* 214 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(28), R(0),
B(Ldar), R(0),
/* 223 S> */ B(Nop),
- /* 228 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(29), R(0),
+ /* 228 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(30), R(0),
B(Ldar), R(0),
/* 237 S> */ B(Nop),
- /* 242 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(31), R(0),
+ /* 242 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(32), R(0),
B(Ldar), R(0),
/* 251 S> */ B(Nop),
- /* 256 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(33), R(0),
+ /* 256 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(34), R(0),
B(Ldar), R(0),
/* 265 S> */ B(Nop),
- /* 270 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(35), R(0),
+ /* 270 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(36), R(0),
B(Ldar), R(0),
/* 279 S> */ B(Nop),
- /* 284 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(37), R(0),
+ /* 284 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(38), R(0),
B(Ldar), R(0),
/* 293 S> */ B(Nop),
- /* 298 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(39), R(0),
+ /* 298 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(40), R(0),
B(Ldar), R(0),
/* 307 S> */ B(Nop),
- /* 312 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(41), R(0),
+ /* 312 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(42), R(0),
B(Ldar), R(0),
/* 321 S> */ B(Nop),
- /* 326 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(43), R(0),
+ /* 326 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(44), R(0),
B(Ldar), R(0),
/* 335 S> */ B(Nop),
- /* 340 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(45), R(0),
+ /* 340 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(46), R(0),
B(Ldar), R(0),
/* 349 S> */ B(Nop),
- /* 354 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(47), R(0),
+ /* 354 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(48), R(0),
B(Ldar), R(0),
/* 363 S> */ B(Nop),
- /* 368 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(49), R(0),
+ /* 368 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(50), R(0),
B(Ldar), R(0),
/* 377 S> */ B(Nop),
- /* 382 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(51), R(0),
+ /* 382 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(52), R(0),
B(Ldar), R(0),
/* 391 S> */ B(Nop),
- /* 396 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(53), R(0),
+ /* 396 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(54), R(0),
B(Ldar), R(0),
/* 405 S> */ B(Nop),
- /* 410 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(55), R(0),
+ /* 410 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(56), R(0),
B(Ldar), R(0),
/* 419 S> */ B(Nop),
- /* 424 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(57), R(0),
+ /* 424 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(58), R(0),
B(Ldar), R(0),
/* 433 S> */ B(Nop),
- /* 438 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(59), R(0),
+ /* 438 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(60), R(0),
B(Ldar), R(0),
/* 447 S> */ B(Nop),
- /* 452 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(61), R(0),
+ /* 452 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(62), R(0),
B(Ldar), R(0),
/* 461 S> */ B(Nop),
- /* 466 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(63), R(0),
+ /* 466 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(64), R(0),
B(Ldar), R(0),
/* 475 S> */ B(Nop),
- /* 480 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(65), R(0),
+ /* 480 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(66), R(0),
B(Ldar), R(0),
/* 489 S> */ B(Nop),
- /* 494 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(67), R(0),
+ /* 494 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(68), R(0),
B(Ldar), R(0),
/* 503 S> */ B(Nop),
- /* 508 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(69), R(0),
+ /* 508 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(70), R(0),
B(Ldar), R(0),
/* 517 S> */ B(Nop),
- /* 522 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(71), R(0),
+ /* 522 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(72), R(0),
B(Ldar), R(0),
/* 531 S> */ B(Nop),
- /* 536 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(73), R(0),
+ /* 536 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(74), R(0),
B(Ldar), R(0),
/* 545 S> */ B(Nop),
- /* 550 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(75), R(0),
+ /* 550 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(76), R(0),
B(Ldar), R(0),
/* 559 S> */ B(Nop),
- /* 564 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(77), R(0),
+ /* 564 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(78), R(0),
B(Ldar), R(0),
/* 573 S> */ B(Nop),
- /* 578 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(79), R(0),
+ /* 578 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(80), R(0),
B(Ldar), R(0),
/* 587 S> */ B(Nop),
- /* 592 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(81), R(0),
+ /* 592 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(82), R(0),
B(Ldar), R(0),
/* 601 S> */ B(Nop),
- /* 606 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(83), R(0),
+ /* 606 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(84), R(0),
B(Ldar), R(0),
/* 615 S> */ B(Nop),
- /* 620 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(85), R(0),
+ /* 620 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(86), R(0),
B(Ldar), R(0),
/* 629 S> */ B(Nop),
- /* 634 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(87), R(0),
+ /* 634 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(88), R(0),
B(Ldar), R(0),
/* 643 S> */ B(Nop),
- /* 648 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(89), R(0),
+ /* 648 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(90), R(0),
B(Ldar), R(0),
/* 657 S> */ B(Nop),
- /* 662 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(91), R(0),
+ /* 662 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(92), R(0),
B(Ldar), R(0),
/* 671 S> */ B(Nop),
- /* 676 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(93), R(0),
+ /* 676 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(94), R(0),
B(Ldar), R(0),
/* 685 S> */ B(Nop),
- /* 690 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(95), R(0),
+ /* 690 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(96), R(0),
B(Ldar), R(0),
/* 699 S> */ B(Nop),
- /* 704 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(97), R(0),
+ /* 704 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(98), R(0),
B(Ldar), R(0),
/* 713 S> */ B(Nop),
- /* 718 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(99), R(0),
+ /* 718 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(100), R(0),
B(Ldar), R(0),
/* 727 S> */ B(Nop),
- /* 732 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(101), R(0),
+ /* 732 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(102), R(0),
B(Ldar), R(0),
/* 741 S> */ B(Nop),
- /* 746 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(103), R(0),
+ /* 746 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(104), R(0),
B(Ldar), R(0),
/* 755 S> */ B(Nop),
- /* 760 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(105), R(0),
+ /* 760 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(106), R(0),
B(Ldar), R(0),
/* 769 S> */ B(Nop),
- /* 774 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(107), R(0),
+ /* 774 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(108), R(0),
B(Ldar), R(0),
/* 783 S> */ B(Nop),
- /* 788 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(109), R(0),
+ /* 788 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(110), R(0),
B(Ldar), R(0),
/* 797 S> */ B(Nop),
- /* 802 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(111), R(0),
+ /* 802 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(112), R(0),
B(Ldar), R(0),
/* 811 S> */ B(Nop),
- /* 816 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(113), R(0),
+ /* 816 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(114), R(0),
B(Ldar), R(0),
/* 825 S> */ B(Nop),
- /* 830 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(115), R(0),
+ /* 830 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(116), R(0),
B(Ldar), R(0),
/* 839 S> */ B(Nop),
- /* 844 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(117), R(0),
+ /* 844 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(118), R(0),
B(Ldar), R(0),
/* 853 S> */ B(Nop),
- /* 858 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(119), R(0),
+ /* 858 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(120), R(0),
B(Ldar), R(0),
/* 867 S> */ B(Nop),
- /* 872 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(121), R(0),
+ /* 872 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(122), R(0),
B(Ldar), R(0),
/* 881 S> */ B(Nop),
- /* 886 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(123), R(0),
+ /* 886 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(124), R(0),
B(Ldar), R(0),
/* 895 S> */ B(Nop),
- /* 900 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(125), R(0),
+ /* 900 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(126), R(0),
B(Ldar), R(0),
/* 909 S> */ B(Nop),
- /* 914 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(127), R(0),
+ /* 914 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(128), R(0),
B(Ldar), R(0),
/* 923 S> */ B(Nop),
- /* 928 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(129), R(0),
+ /* 928 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(130), R(0),
B(Ldar), R(0),
/* 937 S> */ B(Nop),
- /* 942 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(131), R(0),
+ /* 942 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(132), R(0),
B(Ldar), R(0),
/* 951 S> */ B(Nop),
- /* 956 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(133), R(0),
+ /* 956 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(134), R(0),
B(Ldar), R(0),
/* 965 S> */ B(Nop),
- /* 970 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(135), R(0),
+ /* 970 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(136), R(0),
B(Ldar), R(0),
/* 979 S> */ B(Nop),
- /* 984 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(137), R(0),
+ /* 984 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(138), R(0),
B(Ldar), R(0),
/* 993 S> */ B(Nop),
- /* 998 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(139), R(0),
+ /* 998 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(140), R(0),
B(Ldar), R(0),
/* 1007 S> */ B(Nop),
- /* 1012 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(141), R(0),
+ /* 1012 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(142), R(0),
B(Ldar), R(0),
/* 1021 S> */ B(Nop),
- /* 1026 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(143), R(0),
+ /* 1026 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(144), R(0),
B(Ldar), R(0),
/* 1035 S> */ B(Nop),
- /* 1040 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(145), R(0),
+ /* 1040 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(146), R(0),
B(Ldar), R(0),
/* 1049 S> */ B(Nop),
- /* 1054 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(147), R(0),
+ /* 1054 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(148), R(0),
B(Ldar), R(0),
/* 1063 S> */ B(Nop),
- /* 1068 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(149), R(0),
+ /* 1068 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(150), R(0),
B(Ldar), R(0),
/* 1077 S> */ B(Nop),
- /* 1082 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(151), R(0),
+ /* 1082 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(152), R(0),
B(Ldar), R(0),
/* 1091 S> */ B(Nop),
- /* 1096 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(153), R(0),
+ /* 1096 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(154), R(0),
B(Ldar), R(0),
/* 1105 S> */ B(Nop),
- /* 1110 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(155), R(0),
+ /* 1110 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(156), R(0),
B(Ldar), R(0),
/* 1119 S> */ B(Nop),
- /* 1124 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(157), R(0),
+ /* 1124 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(158), R(0),
B(Ldar), R(0),
/* 1133 S> */ B(Nop),
- /* 1138 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(159), R(0),
+ /* 1138 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(160), R(0),
B(Ldar), R(0),
/* 1147 S> */ B(Nop),
- /* 1152 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(161), R(0),
+ /* 1152 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(162), R(0),
B(Ldar), R(0),
/* 1161 S> */ B(Nop),
- /* 1166 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(163), R(0),
+ /* 1166 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(164), R(0),
B(Ldar), R(0),
/* 1175 S> */ B(Nop),
- /* 1180 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(165), R(0),
+ /* 1180 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(166), R(0),
B(Ldar), R(0),
/* 1189 S> */ B(Nop),
- /* 1194 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(167), R(0),
+ /* 1194 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(168), R(0),
B(Ldar), R(0),
/* 1203 S> */ B(Nop),
- /* 1208 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(169), R(0),
+ /* 1208 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(170), R(0),
B(Ldar), R(0),
/* 1217 S> */ B(Nop),
- /* 1222 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(171), R(0),
+ /* 1222 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(172), R(0),
B(Ldar), R(0),
/* 1231 S> */ B(Nop),
- /* 1236 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(173), R(0),
+ /* 1236 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(174), R(0),
B(Ldar), R(0),
/* 1245 S> */ B(Nop),
- /* 1250 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(175), R(0),
+ /* 1250 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(176), R(0),
B(Ldar), R(0),
/* 1259 S> */ B(Nop),
- /* 1264 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(177), R(0),
+ /* 1264 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(178), R(0),
B(Ldar), R(0),
/* 1273 S> */ B(Nop),
- /* 1278 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(179), R(0),
+ /* 1278 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(180), R(0),
B(Ldar), R(0),
/* 1287 S> */ B(Nop),
- /* 1292 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(181), R(0),
+ /* 1292 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(182), R(0),
B(Ldar), R(0),
/* 1301 S> */ B(Nop),
- /* 1306 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(183), R(0),
+ /* 1306 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(184), R(0),
B(Ldar), R(0),
/* 1315 S> */ B(Nop),
- /* 1320 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(185), R(0),
+ /* 1320 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(186), R(0),
B(Ldar), R(0),
/* 1329 S> */ B(Nop),
- /* 1334 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(187), R(0),
+ /* 1334 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(188), R(0),
B(Ldar), R(0),
/* 1343 S> */ B(Nop),
- /* 1348 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(189), R(0),
+ /* 1348 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(190), R(0),
B(Ldar), R(0),
/* 1357 S> */ B(Nop),
- /* 1362 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(191), R(0),
+ /* 1362 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(192), R(0),
B(Ldar), R(0),
/* 1371 S> */ B(Nop),
- /* 1376 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(193), R(0),
+ /* 1376 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(194), R(0),
B(Ldar), R(0),
/* 1385 S> */ B(Nop),
- /* 1390 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(195), R(0),
+ /* 1390 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(196), R(0),
B(Ldar), R(0),
/* 1399 S> */ B(Nop),
- /* 1404 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(197), R(0),
+ /* 1404 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(198), R(0),
B(Ldar), R(0),
/* 1413 S> */ B(Nop),
- /* 1418 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(199), R(0),
+ /* 1418 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(200), R(0),
B(Ldar), R(0),
/* 1427 S> */ B(Nop),
- /* 1432 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(201), R(0),
+ /* 1432 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(202), R(0),
B(Ldar), R(0),
/* 1441 S> */ B(Nop),
- /* 1446 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(203), R(0),
+ /* 1446 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(204), R(0),
B(Ldar), R(0),
/* 1455 S> */ B(Nop),
- /* 1460 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(205), R(0),
+ /* 1460 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(206), R(0),
B(Ldar), R(0),
/* 1469 S> */ B(Nop),
- /* 1474 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(207), R(0),
+ /* 1474 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(208), R(0),
B(Ldar), R(0),
/* 1483 S> */ B(Nop),
- /* 1488 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(209), R(0),
+ /* 1488 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(210), R(0),
B(Ldar), R(0),
/* 1497 S> */ B(Nop),
- /* 1502 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(211), R(0),
+ /* 1502 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(212), R(0),
B(Ldar), R(0),
/* 1511 S> */ B(Nop),
- /* 1516 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(213), R(0),
+ /* 1516 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(214), R(0),
B(Ldar), R(0),
/* 1525 S> */ B(Nop),
- /* 1530 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(215), R(0),
+ /* 1530 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(216), R(0),
B(Ldar), R(0),
/* 1539 S> */ B(Nop),
- /* 1544 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(217), R(0),
+ /* 1544 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(218), R(0),
B(Ldar), R(0),
/* 1553 S> */ B(Nop),
- /* 1558 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(219), R(0),
+ /* 1558 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(220), R(0),
B(Ldar), R(0),
/* 1567 S> */ B(Nop),
- /* 1572 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(221), R(0),
+ /* 1572 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(222), R(0),
B(Ldar), R(0),
/* 1581 S> */ B(Nop),
- /* 1586 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(223), R(0),
+ /* 1586 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(224), R(0),
B(Ldar), R(0),
/* 1595 S> */ B(Nop),
- /* 1600 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(225), R(0),
+ /* 1600 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(226), R(0),
B(Ldar), R(0),
/* 1609 S> */ B(Nop),
- /* 1614 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(227), R(0),
+ /* 1614 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(228), R(0),
B(Ldar), R(0),
/* 1623 S> */ B(Nop),
- /* 1628 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(229), R(0),
+ /* 1628 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(230), R(0),
B(Ldar), R(0),
/* 1637 S> */ B(Nop),
- /* 1642 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(231), R(0),
+ /* 1642 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(232), R(0),
B(Ldar), R(0),
/* 1651 S> */ B(Nop),
- /* 1656 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(233), R(0),
+ /* 1656 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(234), R(0),
B(Ldar), R(0),
/* 1665 S> */ B(Nop),
- /* 1670 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(235), R(0),
+ /* 1670 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(236), R(0),
B(Ldar), R(0),
/* 1679 S> */ B(Nop),
- /* 1684 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(237), R(0),
+ /* 1684 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(238), R(0),
B(Ldar), R(0),
/* 1693 S> */ B(Nop),
- /* 1698 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(239), R(0),
+ /* 1698 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(240), R(0),
B(Ldar), R(0),
/* 1707 S> */ B(Nop),
- /* 1712 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(241), R(0),
+ /* 1712 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(242), R(0),
B(Ldar), R(0),
/* 1721 S> */ B(Nop),
- /* 1726 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(243), R(0),
+ /* 1726 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(244), R(0),
B(Ldar), R(0),
/* 1735 S> */ B(Nop),
- /* 1740 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(245), R(0),
+ /* 1740 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(246), R(0),
B(Ldar), R(0),
/* 1749 S> */ B(Nop),
- /* 1754 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(247), R(0),
+ /* 1754 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(248), R(0),
B(Ldar), R(0),
/* 1763 S> */ B(Nop),
- /* 1768 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(249), R(0),
+ /* 1768 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(250), R(0),
B(Ldar), R(0),
/* 1777 S> */ B(Nop),
- /* 1782 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(251), R(0),
+ /* 1782 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(252), R(0),
B(Ldar), R(0),
/* 1791 S> */ B(Nop),
- /* 1796 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(253), R(0),
+ /* 1796 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(254), R(0),
B(Ldar), R(0),
/* 1805 S> */ B(Nop),
- /* 1810 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(255), R(0),
+ /* 1810 E> */ B(Wide), B(LdrNamedProperty), R16(arg0), U16(0), U16(256), R16(0),
B(Ldar), R(0),
/* 1819 S> */ B(Nop),
- /* 1827 E> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(257),
+ /* 1827 E> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(258),
/* 1834 S> */ B(Return),
]
constant pool: [
- "name",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["name"],
]
handlers: [
]
@@ -782,395 +780,395 @@ snippet: "
"
frame size: 1
parameter count: 3
-bytecode array length: 1034
+bytecode array length: 1038
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 30 S> */ B(Ldar), R(arg1),
- /* 36 E> */ B(LdrKeyedProperty), R(arg0), U8(1), R(0),
+ /* 36 E> */ B(LdrKeyedProperty), R(arg0), U8(2), R(0),
B(Ldar), R(0),
/* 42 S> */ B(Ldar), R(arg1),
- /* 48 E> */ B(LdrKeyedProperty), R(arg0), U8(3), R(0),
+ /* 48 E> */ B(LdrKeyedProperty), R(arg0), U8(4), R(0),
B(Ldar), R(0),
/* 54 S> */ B(Ldar), R(arg1),
- /* 60 E> */ B(LdrKeyedProperty), R(arg0), U8(5), R(0),
+ /* 60 E> */ B(LdrKeyedProperty), R(arg0), U8(6), R(0),
B(Ldar), R(0),
/* 66 S> */ B(Ldar), R(arg1),
- /* 72 E> */ B(LdrKeyedProperty), R(arg0), U8(7), R(0),
+ /* 72 E> */ B(LdrKeyedProperty), R(arg0), U8(8), R(0),
B(Ldar), R(0),
/* 78 S> */ B(Ldar), R(arg1),
- /* 84 E> */ B(LdrKeyedProperty), R(arg0), U8(9), R(0),
+ /* 84 E> */ B(LdrKeyedProperty), R(arg0), U8(10), R(0),
B(Ldar), R(0),
/* 90 S> */ B(Ldar), R(arg1),
- /* 96 E> */ B(LdrKeyedProperty), R(arg0), U8(11), R(0),
+ /* 96 E> */ B(LdrKeyedProperty), R(arg0), U8(12), R(0),
B(Ldar), R(0),
/* 102 S> */ B(Ldar), R(arg1),
- /* 108 E> */ B(LdrKeyedProperty), R(arg0), U8(13), R(0),
+ /* 108 E> */ B(LdrKeyedProperty), R(arg0), U8(14), R(0),
B(Ldar), R(0),
/* 114 S> */ B(Ldar), R(arg1),
- /* 120 E> */ B(LdrKeyedProperty), R(arg0), U8(15), R(0),
+ /* 120 E> */ B(LdrKeyedProperty), R(arg0), U8(16), R(0),
B(Ldar), R(0),
/* 126 S> */ B(Ldar), R(arg1),
- /* 132 E> */ B(LdrKeyedProperty), R(arg0), U8(17), R(0),
+ /* 132 E> */ B(LdrKeyedProperty), R(arg0), U8(18), R(0),
B(Ldar), R(0),
/* 138 S> */ B(Ldar), R(arg1),
- /* 144 E> */ B(LdrKeyedProperty), R(arg0), U8(19), R(0),
+ /* 144 E> */ B(LdrKeyedProperty), R(arg0), U8(20), R(0),
B(Ldar), R(0),
/* 150 S> */ B(Ldar), R(arg1),
- /* 156 E> */ B(LdrKeyedProperty), R(arg0), U8(21), R(0),
+ /* 156 E> */ B(LdrKeyedProperty), R(arg0), U8(22), R(0),
B(Ldar), R(0),
/* 162 S> */ B(Ldar), R(arg1),
- /* 168 E> */ B(LdrKeyedProperty), R(arg0), U8(23), R(0),
+ /* 168 E> */ B(LdrKeyedProperty), R(arg0), U8(24), R(0),
B(Ldar), R(0),
/* 174 S> */ B(Ldar), R(arg1),
- /* 180 E> */ B(LdrKeyedProperty), R(arg0), U8(25), R(0),
+ /* 180 E> */ B(LdrKeyedProperty), R(arg0), U8(26), R(0),
B(Ldar), R(0),
/* 186 S> */ B(Ldar), R(arg1),
- /* 192 E> */ B(LdrKeyedProperty), R(arg0), U8(27), R(0),
+ /* 192 E> */ B(LdrKeyedProperty), R(arg0), U8(28), R(0),
B(Ldar), R(0),
/* 198 S> */ B(Ldar), R(arg1),
- /* 204 E> */ B(LdrKeyedProperty), R(arg0), U8(29), R(0),
+ /* 204 E> */ B(LdrKeyedProperty), R(arg0), U8(30), R(0),
B(Ldar), R(0),
/* 210 S> */ B(Ldar), R(arg1),
- /* 216 E> */ B(LdrKeyedProperty), R(arg0), U8(31), R(0),
+ /* 216 E> */ B(LdrKeyedProperty), R(arg0), U8(32), R(0),
B(Ldar), R(0),
/* 222 S> */ B(Ldar), R(arg1),
- /* 228 E> */ B(LdrKeyedProperty), R(arg0), U8(33), R(0),
+ /* 228 E> */ B(LdrKeyedProperty), R(arg0), U8(34), R(0),
B(Ldar), R(0),
/* 234 S> */ B(Ldar), R(arg1),
- /* 240 E> */ B(LdrKeyedProperty), R(arg0), U8(35), R(0),
+ /* 240 E> */ B(LdrKeyedProperty), R(arg0), U8(36), R(0),
B(Ldar), R(0),
/* 246 S> */ B(Ldar), R(arg1),
- /* 252 E> */ B(LdrKeyedProperty), R(arg0), U8(37), R(0),
+ /* 252 E> */ B(LdrKeyedProperty), R(arg0), U8(38), R(0),
B(Ldar), R(0),
/* 258 S> */ B(Ldar), R(arg1),
- /* 264 E> */ B(LdrKeyedProperty), R(arg0), U8(39), R(0),
+ /* 264 E> */ B(LdrKeyedProperty), R(arg0), U8(40), R(0),
B(Ldar), R(0),
/* 270 S> */ B(Ldar), R(arg1),
- /* 276 E> */ B(LdrKeyedProperty), R(arg0), U8(41), R(0),
+ /* 276 E> */ B(LdrKeyedProperty), R(arg0), U8(42), R(0),
B(Ldar), R(0),
/* 282 S> */ B(Ldar), R(arg1),
- /* 288 E> */ B(LdrKeyedProperty), R(arg0), U8(43), R(0),
+ /* 288 E> */ B(LdrKeyedProperty), R(arg0), U8(44), R(0),
B(Ldar), R(0),
/* 294 S> */ B(Ldar), R(arg1),
- /* 300 E> */ B(LdrKeyedProperty), R(arg0), U8(45), R(0),
+ /* 300 E> */ B(LdrKeyedProperty), R(arg0), U8(46), R(0),
B(Ldar), R(0),
/* 306 S> */ B(Ldar), R(arg1),
- /* 312 E> */ B(LdrKeyedProperty), R(arg0), U8(47), R(0),
+ /* 312 E> */ B(LdrKeyedProperty), R(arg0), U8(48), R(0),
B(Ldar), R(0),
/* 318 S> */ B(Ldar), R(arg1),
- /* 324 E> */ B(LdrKeyedProperty), R(arg0), U8(49), R(0),
+ /* 324 E> */ B(LdrKeyedProperty), R(arg0), U8(50), R(0),
B(Ldar), R(0),
/* 330 S> */ B(Ldar), R(arg1),
- /* 336 E> */ B(LdrKeyedProperty), R(arg0), U8(51), R(0),
+ /* 336 E> */ B(LdrKeyedProperty), R(arg0), U8(52), R(0),
B(Ldar), R(0),
/* 342 S> */ B(Ldar), R(arg1),
- /* 348 E> */ B(LdrKeyedProperty), R(arg0), U8(53), R(0),
+ /* 348 E> */ B(LdrKeyedProperty), R(arg0), U8(54), R(0),
B(Ldar), R(0),
/* 354 S> */ B(Ldar), R(arg1),
- /* 360 E> */ B(LdrKeyedProperty), R(arg0), U8(55), R(0),
+ /* 360 E> */ B(LdrKeyedProperty), R(arg0), U8(56), R(0),
B(Ldar), R(0),
/* 366 S> */ B(Ldar), R(arg1),
- /* 372 E> */ B(LdrKeyedProperty), R(arg0), U8(57), R(0),
+ /* 372 E> */ B(LdrKeyedProperty), R(arg0), U8(58), R(0),
B(Ldar), R(0),
/* 378 S> */ B(Ldar), R(arg1),
- /* 384 E> */ B(LdrKeyedProperty), R(arg0), U8(59), R(0),
+ /* 384 E> */ B(LdrKeyedProperty), R(arg0), U8(60), R(0),
B(Ldar), R(0),
/* 390 S> */ B(Ldar), R(arg1),
- /* 396 E> */ B(LdrKeyedProperty), R(arg0), U8(61), R(0),
+ /* 396 E> */ B(LdrKeyedProperty), R(arg0), U8(62), R(0),
B(Ldar), R(0),
/* 402 S> */ B(Ldar), R(arg1),
- /* 408 E> */ B(LdrKeyedProperty), R(arg0), U8(63), R(0),
+ /* 408 E> */ B(LdrKeyedProperty), R(arg0), U8(64), R(0),
B(Ldar), R(0),
/* 414 S> */ B(Ldar), R(arg1),
- /* 420 E> */ B(LdrKeyedProperty), R(arg0), U8(65), R(0),
+ /* 420 E> */ B(LdrKeyedProperty), R(arg0), U8(66), R(0),
B(Ldar), R(0),
/* 426 S> */ B(Ldar), R(arg1),
- /* 432 E> */ B(LdrKeyedProperty), R(arg0), U8(67), R(0),
+ /* 432 E> */ B(LdrKeyedProperty), R(arg0), U8(68), R(0),
B(Ldar), R(0),
/* 438 S> */ B(Ldar), R(arg1),
- /* 444 E> */ B(LdrKeyedProperty), R(arg0), U8(69), R(0),
+ /* 444 E> */ B(LdrKeyedProperty), R(arg0), U8(70), R(0),
B(Ldar), R(0),
/* 450 S> */ B(Ldar), R(arg1),
- /* 456 E> */ B(LdrKeyedProperty), R(arg0), U8(71), R(0),
+ /* 456 E> */ B(LdrKeyedProperty), R(arg0), U8(72), R(0),
B(Ldar), R(0),
/* 462 S> */ B(Ldar), R(arg1),
- /* 468 E> */ B(LdrKeyedProperty), R(arg0), U8(73), R(0),
+ /* 468 E> */ B(LdrKeyedProperty), R(arg0), U8(74), R(0),
B(Ldar), R(0),
/* 474 S> */ B(Ldar), R(arg1),
- /* 480 E> */ B(LdrKeyedProperty), R(arg0), U8(75), R(0),
+ /* 480 E> */ B(LdrKeyedProperty), R(arg0), U8(76), R(0),
B(Ldar), R(0),
/* 486 S> */ B(Ldar), R(arg1),
- /* 492 E> */ B(LdrKeyedProperty), R(arg0), U8(77), R(0),
+ /* 492 E> */ B(LdrKeyedProperty), R(arg0), U8(78), R(0),
B(Ldar), R(0),
/* 498 S> */ B(Ldar), R(arg1),
- /* 504 E> */ B(LdrKeyedProperty), R(arg0), U8(79), R(0),
+ /* 504 E> */ B(LdrKeyedProperty), R(arg0), U8(80), R(0),
B(Ldar), R(0),
/* 510 S> */ B(Ldar), R(arg1),
- /* 516 E> */ B(LdrKeyedProperty), R(arg0), U8(81), R(0),
+ /* 516 E> */ B(LdrKeyedProperty), R(arg0), U8(82), R(0),
B(Ldar), R(0),
/* 522 S> */ B(Ldar), R(arg1),
- /* 528 E> */ B(LdrKeyedProperty), R(arg0), U8(83), R(0),
+ /* 528 E> */ B(LdrKeyedProperty), R(arg0), U8(84), R(0),
B(Ldar), R(0),
/* 534 S> */ B(Ldar), R(arg1),
- /* 540 E> */ B(LdrKeyedProperty), R(arg0), U8(85), R(0),
+ /* 540 E> */ B(LdrKeyedProperty), R(arg0), U8(86), R(0),
B(Ldar), R(0),
/* 546 S> */ B(Ldar), R(arg1),
- /* 552 E> */ B(LdrKeyedProperty), R(arg0), U8(87), R(0),
+ /* 552 E> */ B(LdrKeyedProperty), R(arg0), U8(88), R(0),
B(Ldar), R(0),
/* 558 S> */ B(Ldar), R(arg1),
- /* 564 E> */ B(LdrKeyedProperty), R(arg0), U8(89), R(0),
+ /* 564 E> */ B(LdrKeyedProperty), R(arg0), U8(90), R(0),
B(Ldar), R(0),
/* 570 S> */ B(Ldar), R(arg1),
- /* 576 E> */ B(LdrKeyedProperty), R(arg0), U8(91), R(0),
+ /* 576 E> */ B(LdrKeyedProperty), R(arg0), U8(92), R(0),
B(Ldar), R(0),
/* 582 S> */ B(Ldar), R(arg1),
- /* 588 E> */ B(LdrKeyedProperty), R(arg0), U8(93), R(0),
+ /* 588 E> */ B(LdrKeyedProperty), R(arg0), U8(94), R(0),
B(Ldar), R(0),
/* 594 S> */ B(Ldar), R(arg1),
- /* 600 E> */ B(LdrKeyedProperty), R(arg0), U8(95), R(0),
+ /* 600 E> */ B(LdrKeyedProperty), R(arg0), U8(96), R(0),
B(Ldar), R(0),
/* 606 S> */ B(Ldar), R(arg1),
- /* 612 E> */ B(LdrKeyedProperty), R(arg0), U8(97), R(0),
+ /* 612 E> */ B(LdrKeyedProperty), R(arg0), U8(98), R(0),
B(Ldar), R(0),
/* 618 S> */ B(Ldar), R(arg1),
- /* 624 E> */ B(LdrKeyedProperty), R(arg0), U8(99), R(0),
+ /* 624 E> */ B(LdrKeyedProperty), R(arg0), U8(100), R(0),
B(Ldar), R(0),
/* 630 S> */ B(Ldar), R(arg1),
- /* 636 E> */ B(LdrKeyedProperty), R(arg0), U8(101), R(0),
+ /* 636 E> */ B(LdrKeyedProperty), R(arg0), U8(102), R(0),
B(Ldar), R(0),
/* 642 S> */ B(Ldar), R(arg1),
- /* 648 E> */ B(LdrKeyedProperty), R(arg0), U8(103), R(0),
+ /* 648 E> */ B(LdrKeyedProperty), R(arg0), U8(104), R(0),
B(Ldar), R(0),
/* 654 S> */ B(Ldar), R(arg1),
- /* 660 E> */ B(LdrKeyedProperty), R(arg0), U8(105), R(0),
+ /* 660 E> */ B(LdrKeyedProperty), R(arg0), U8(106), R(0),
B(Ldar), R(0),
/* 666 S> */ B(Ldar), R(arg1),
- /* 672 E> */ B(LdrKeyedProperty), R(arg0), U8(107), R(0),
+ /* 672 E> */ B(LdrKeyedProperty), R(arg0), U8(108), R(0),
B(Ldar), R(0),
/* 678 S> */ B(Ldar), R(arg1),
- /* 684 E> */ B(LdrKeyedProperty), R(arg0), U8(109), R(0),
+ /* 684 E> */ B(LdrKeyedProperty), R(arg0), U8(110), R(0),
B(Ldar), R(0),
/* 690 S> */ B(Ldar), R(arg1),
- /* 696 E> */ B(LdrKeyedProperty), R(arg0), U8(111), R(0),
+ /* 696 E> */ B(LdrKeyedProperty), R(arg0), U8(112), R(0),
B(Ldar), R(0),
/* 702 S> */ B(Ldar), R(arg1),
- /* 708 E> */ B(LdrKeyedProperty), R(arg0), U8(113), R(0),
+ /* 708 E> */ B(LdrKeyedProperty), R(arg0), U8(114), R(0),
B(Ldar), R(0),
/* 714 S> */ B(Ldar), R(arg1),
- /* 720 E> */ B(LdrKeyedProperty), R(arg0), U8(115), R(0),
+ /* 720 E> */ B(LdrKeyedProperty), R(arg0), U8(116), R(0),
B(Ldar), R(0),
/* 726 S> */ B(Ldar), R(arg1),
- /* 732 E> */ B(LdrKeyedProperty), R(arg0), U8(117), R(0),
+ /* 732 E> */ B(LdrKeyedProperty), R(arg0), U8(118), R(0),
B(Ldar), R(0),
/* 738 S> */ B(Ldar), R(arg1),
- /* 744 E> */ B(LdrKeyedProperty), R(arg0), U8(119), R(0),
+ /* 744 E> */ B(LdrKeyedProperty), R(arg0), U8(120), R(0),
B(Ldar), R(0),
/* 750 S> */ B(Ldar), R(arg1),
- /* 756 E> */ B(LdrKeyedProperty), R(arg0), U8(121), R(0),
+ /* 756 E> */ B(LdrKeyedProperty), R(arg0), U8(122), R(0),
B(Ldar), R(0),
/* 762 S> */ B(Ldar), R(arg1),
- /* 768 E> */ B(LdrKeyedProperty), R(arg0), U8(123), R(0),
+ /* 768 E> */ B(LdrKeyedProperty), R(arg0), U8(124), R(0),
B(Ldar), R(0),
/* 774 S> */ B(Ldar), R(arg1),
- /* 780 E> */ B(LdrKeyedProperty), R(arg0), U8(125), R(0),
+ /* 780 E> */ B(LdrKeyedProperty), R(arg0), U8(126), R(0),
B(Ldar), R(0),
/* 786 S> */ B(Ldar), R(arg1),
- /* 792 E> */ B(LdrKeyedProperty), R(arg0), U8(127), R(0),
+ /* 792 E> */ B(LdrKeyedProperty), R(arg0), U8(128), R(0),
B(Ldar), R(0),
/* 798 S> */ B(Ldar), R(arg1),
- /* 804 E> */ B(LdrKeyedProperty), R(arg0), U8(129), R(0),
+ /* 804 E> */ B(LdrKeyedProperty), R(arg0), U8(130), R(0),
B(Ldar), R(0),
/* 810 S> */ B(Ldar), R(arg1),
- /* 816 E> */ B(LdrKeyedProperty), R(arg0), U8(131), R(0),
+ /* 816 E> */ B(LdrKeyedProperty), R(arg0), U8(132), R(0),
B(Ldar), R(0),
/* 822 S> */ B(Ldar), R(arg1),
- /* 828 E> */ B(LdrKeyedProperty), R(arg0), U8(133), R(0),
+ /* 828 E> */ B(LdrKeyedProperty), R(arg0), U8(134), R(0),
B(Ldar), R(0),
/* 834 S> */ B(Ldar), R(arg1),
- /* 840 E> */ B(LdrKeyedProperty), R(arg0), U8(135), R(0),
+ /* 840 E> */ B(LdrKeyedProperty), R(arg0), U8(136), R(0),
B(Ldar), R(0),
/* 846 S> */ B(Ldar), R(arg1),
- /* 852 E> */ B(LdrKeyedProperty), R(arg0), U8(137), R(0),
+ /* 852 E> */ B(LdrKeyedProperty), R(arg0), U8(138), R(0),
B(Ldar), R(0),
/* 858 S> */ B(Ldar), R(arg1),
- /* 864 E> */ B(LdrKeyedProperty), R(arg0), U8(139), R(0),
+ /* 864 E> */ B(LdrKeyedProperty), R(arg0), U8(140), R(0),
B(Ldar), R(0),
/* 870 S> */ B(Ldar), R(arg1),
- /* 876 E> */ B(LdrKeyedProperty), R(arg0), U8(141), R(0),
+ /* 876 E> */ B(LdrKeyedProperty), R(arg0), U8(142), R(0),
B(Ldar), R(0),
/* 882 S> */ B(Ldar), R(arg1),
- /* 888 E> */ B(LdrKeyedProperty), R(arg0), U8(143), R(0),
+ /* 888 E> */ B(LdrKeyedProperty), R(arg0), U8(144), R(0),
B(Ldar), R(0),
/* 894 S> */ B(Ldar), R(arg1),
- /* 900 E> */ B(LdrKeyedProperty), R(arg0), U8(145), R(0),
+ /* 900 E> */ B(LdrKeyedProperty), R(arg0), U8(146), R(0),
B(Ldar), R(0),
/* 906 S> */ B(Ldar), R(arg1),
- /* 912 E> */ B(LdrKeyedProperty), R(arg0), U8(147), R(0),
+ /* 912 E> */ B(LdrKeyedProperty), R(arg0), U8(148), R(0),
B(Ldar), R(0),
/* 918 S> */ B(Ldar), R(arg1),
- /* 924 E> */ B(LdrKeyedProperty), R(arg0), U8(149), R(0),
+ /* 924 E> */ B(LdrKeyedProperty), R(arg0), U8(150), R(0),
B(Ldar), R(0),
/* 930 S> */ B(Ldar), R(arg1),
- /* 936 E> */ B(LdrKeyedProperty), R(arg0), U8(151), R(0),
+ /* 936 E> */ B(LdrKeyedProperty), R(arg0), U8(152), R(0),
B(Ldar), R(0),
/* 942 S> */ B(Ldar), R(arg1),
- /* 948 E> */ B(LdrKeyedProperty), R(arg0), U8(153), R(0),
+ /* 948 E> */ B(LdrKeyedProperty), R(arg0), U8(154), R(0),
B(Ldar), R(0),
/* 954 S> */ B(Ldar), R(arg1),
- /* 960 E> */ B(LdrKeyedProperty), R(arg0), U8(155), R(0),
+ /* 960 E> */ B(LdrKeyedProperty), R(arg0), U8(156), R(0),
B(Ldar), R(0),
/* 966 S> */ B(Ldar), R(arg1),
- /* 972 E> */ B(LdrKeyedProperty), R(arg0), U8(157), R(0),
+ /* 972 E> */ B(LdrKeyedProperty), R(arg0), U8(158), R(0),
B(Ldar), R(0),
/* 978 S> */ B(Ldar), R(arg1),
- /* 984 E> */ B(LdrKeyedProperty), R(arg0), U8(159), R(0),
+ /* 984 E> */ B(LdrKeyedProperty), R(arg0), U8(160), R(0),
B(Ldar), R(0),
/* 990 S> */ B(Ldar), R(arg1),
- /* 996 E> */ B(LdrKeyedProperty), R(arg0), U8(161), R(0),
+ /* 996 E> */ B(LdrKeyedProperty), R(arg0), U8(162), R(0),
B(Ldar), R(0),
/* 1002 S> */ B(Ldar), R(arg1),
- /* 1008 E> */ B(LdrKeyedProperty), R(arg0), U8(163), R(0),
+ /* 1008 E> */ B(LdrKeyedProperty), R(arg0), U8(164), R(0),
B(Ldar), R(0),
/* 1014 S> */ B(Ldar), R(arg1),
- /* 1020 E> */ B(LdrKeyedProperty), R(arg0), U8(165), R(0),
+ /* 1020 E> */ B(LdrKeyedProperty), R(arg0), U8(166), R(0),
B(Ldar), R(0),
/* 1026 S> */ B(Ldar), R(arg1),
- /* 1032 E> */ B(LdrKeyedProperty), R(arg0), U8(167), R(0),
+ /* 1032 E> */ B(LdrKeyedProperty), R(arg0), U8(168), R(0),
B(Ldar), R(0),
/* 1038 S> */ B(Ldar), R(arg1),
- /* 1044 E> */ B(LdrKeyedProperty), R(arg0), U8(169), R(0),
+ /* 1044 E> */ B(LdrKeyedProperty), R(arg0), U8(170), R(0),
B(Ldar), R(0),
/* 1050 S> */ B(Ldar), R(arg1),
- /* 1056 E> */ B(LdrKeyedProperty), R(arg0), U8(171), R(0),
+ /* 1056 E> */ B(LdrKeyedProperty), R(arg0), U8(172), R(0),
B(Ldar), R(0),
/* 1062 S> */ B(Ldar), R(arg1),
- /* 1068 E> */ B(LdrKeyedProperty), R(arg0), U8(173), R(0),
+ /* 1068 E> */ B(LdrKeyedProperty), R(arg0), U8(174), R(0),
B(Ldar), R(0),
/* 1074 S> */ B(Ldar), R(arg1),
- /* 1080 E> */ B(LdrKeyedProperty), R(arg0), U8(175), R(0),
+ /* 1080 E> */ B(LdrKeyedProperty), R(arg0), U8(176), R(0),
B(Ldar), R(0),
/* 1086 S> */ B(Ldar), R(arg1),
- /* 1092 E> */ B(LdrKeyedProperty), R(arg0), U8(177), R(0),
+ /* 1092 E> */ B(LdrKeyedProperty), R(arg0), U8(178), R(0),
B(Ldar), R(0),
/* 1098 S> */ B(Ldar), R(arg1),
- /* 1104 E> */ B(LdrKeyedProperty), R(arg0), U8(179), R(0),
+ /* 1104 E> */ B(LdrKeyedProperty), R(arg0), U8(180), R(0),
B(Ldar), R(0),
/* 1110 S> */ B(Ldar), R(arg1),
- /* 1116 E> */ B(LdrKeyedProperty), R(arg0), U8(181), R(0),
+ /* 1116 E> */ B(LdrKeyedProperty), R(arg0), U8(182), R(0),
B(Ldar), R(0),
/* 1122 S> */ B(Ldar), R(arg1),
- /* 1128 E> */ B(LdrKeyedProperty), R(arg0), U8(183), R(0),
+ /* 1128 E> */ B(LdrKeyedProperty), R(arg0), U8(184), R(0),
B(Ldar), R(0),
/* 1134 S> */ B(Ldar), R(arg1),
- /* 1140 E> */ B(LdrKeyedProperty), R(arg0), U8(185), R(0),
+ /* 1140 E> */ B(LdrKeyedProperty), R(arg0), U8(186), R(0),
B(Ldar), R(0),
/* 1146 S> */ B(Ldar), R(arg1),
- /* 1152 E> */ B(LdrKeyedProperty), R(arg0), U8(187), R(0),
+ /* 1152 E> */ B(LdrKeyedProperty), R(arg0), U8(188), R(0),
B(Ldar), R(0),
/* 1158 S> */ B(Ldar), R(arg1),
- /* 1164 E> */ B(LdrKeyedProperty), R(arg0), U8(189), R(0),
+ /* 1164 E> */ B(LdrKeyedProperty), R(arg0), U8(190), R(0),
B(Ldar), R(0),
/* 1170 S> */ B(Ldar), R(arg1),
- /* 1176 E> */ B(LdrKeyedProperty), R(arg0), U8(191), R(0),
+ /* 1176 E> */ B(LdrKeyedProperty), R(arg0), U8(192), R(0),
B(Ldar), R(0),
/* 1182 S> */ B(Ldar), R(arg1),
- /* 1188 E> */ B(LdrKeyedProperty), R(arg0), U8(193), R(0),
+ /* 1188 E> */ B(LdrKeyedProperty), R(arg0), U8(194), R(0),
B(Ldar), R(0),
/* 1194 S> */ B(Ldar), R(arg1),
- /* 1200 E> */ B(LdrKeyedProperty), R(arg0), U8(195), R(0),
+ /* 1200 E> */ B(LdrKeyedProperty), R(arg0), U8(196), R(0),
B(Ldar), R(0),
/* 1206 S> */ B(Ldar), R(arg1),
- /* 1212 E> */ B(LdrKeyedProperty), R(arg0), U8(197), R(0),
+ /* 1212 E> */ B(LdrKeyedProperty), R(arg0), U8(198), R(0),
B(Ldar), R(0),
/* 1218 S> */ B(Ldar), R(arg1),
- /* 1224 E> */ B(LdrKeyedProperty), R(arg0), U8(199), R(0),
+ /* 1224 E> */ B(LdrKeyedProperty), R(arg0), U8(200), R(0),
B(Ldar), R(0),
/* 1230 S> */ B(Ldar), R(arg1),
- /* 1236 E> */ B(LdrKeyedProperty), R(arg0), U8(201), R(0),
+ /* 1236 E> */ B(LdrKeyedProperty), R(arg0), U8(202), R(0),
B(Ldar), R(0),
/* 1242 S> */ B(Ldar), R(arg1),
- /* 1248 E> */ B(LdrKeyedProperty), R(arg0), U8(203), R(0),
+ /* 1248 E> */ B(LdrKeyedProperty), R(arg0), U8(204), R(0),
B(Ldar), R(0),
/* 1254 S> */ B(Ldar), R(arg1),
- /* 1260 E> */ B(LdrKeyedProperty), R(arg0), U8(205), R(0),
+ /* 1260 E> */ B(LdrKeyedProperty), R(arg0), U8(206), R(0),
B(Ldar), R(0),
/* 1266 S> */ B(Ldar), R(arg1),
- /* 1272 E> */ B(LdrKeyedProperty), R(arg0), U8(207), R(0),
+ /* 1272 E> */ B(LdrKeyedProperty), R(arg0), U8(208), R(0),
B(Ldar), R(0),
/* 1278 S> */ B(Ldar), R(arg1),
- /* 1284 E> */ B(LdrKeyedProperty), R(arg0), U8(209), R(0),
+ /* 1284 E> */ B(LdrKeyedProperty), R(arg0), U8(210), R(0),
B(Ldar), R(0),
/* 1290 S> */ B(Ldar), R(arg1),
- /* 1296 E> */ B(LdrKeyedProperty), R(arg0), U8(211), R(0),
+ /* 1296 E> */ B(LdrKeyedProperty), R(arg0), U8(212), R(0),
B(Ldar), R(0),
/* 1302 S> */ B(Ldar), R(arg1),
- /* 1308 E> */ B(LdrKeyedProperty), R(arg0), U8(213), R(0),
+ /* 1308 E> */ B(LdrKeyedProperty), R(arg0), U8(214), R(0),
B(Ldar), R(0),
/* 1314 S> */ B(Ldar), R(arg1),
- /* 1320 E> */ B(LdrKeyedProperty), R(arg0), U8(215), R(0),
+ /* 1320 E> */ B(LdrKeyedProperty), R(arg0), U8(216), R(0),
B(Ldar), R(0),
/* 1326 S> */ B(Ldar), R(arg1),
- /* 1332 E> */ B(LdrKeyedProperty), R(arg0), U8(217), R(0),
+ /* 1332 E> */ B(LdrKeyedProperty), R(arg0), U8(218), R(0),
B(Ldar), R(0),
/* 1338 S> */ B(Ldar), R(arg1),
- /* 1344 E> */ B(LdrKeyedProperty), R(arg0), U8(219), R(0),
+ /* 1344 E> */ B(LdrKeyedProperty), R(arg0), U8(220), R(0),
B(Ldar), R(0),
/* 1350 S> */ B(Ldar), R(arg1),
- /* 1356 E> */ B(LdrKeyedProperty), R(arg0), U8(221), R(0),
+ /* 1356 E> */ B(LdrKeyedProperty), R(arg0), U8(222), R(0),
B(Ldar), R(0),
/* 1362 S> */ B(Ldar), R(arg1),
- /* 1368 E> */ B(LdrKeyedProperty), R(arg0), U8(223), R(0),
+ /* 1368 E> */ B(LdrKeyedProperty), R(arg0), U8(224), R(0),
B(Ldar), R(0),
/* 1374 S> */ B(Ldar), R(arg1),
- /* 1380 E> */ B(LdrKeyedProperty), R(arg0), U8(225), R(0),
+ /* 1380 E> */ B(LdrKeyedProperty), R(arg0), U8(226), R(0),
B(Ldar), R(0),
/* 1386 S> */ B(Ldar), R(arg1),
- /* 1392 E> */ B(LdrKeyedProperty), R(arg0), U8(227), R(0),
+ /* 1392 E> */ B(LdrKeyedProperty), R(arg0), U8(228), R(0),
B(Ldar), R(0),
/* 1398 S> */ B(Ldar), R(arg1),
- /* 1404 E> */ B(LdrKeyedProperty), R(arg0), U8(229), R(0),
+ /* 1404 E> */ B(LdrKeyedProperty), R(arg0), U8(230), R(0),
B(Ldar), R(0),
/* 1410 S> */ B(Ldar), R(arg1),
- /* 1416 E> */ B(LdrKeyedProperty), R(arg0), U8(231), R(0),
+ /* 1416 E> */ B(LdrKeyedProperty), R(arg0), U8(232), R(0),
B(Ldar), R(0),
/* 1422 S> */ B(Ldar), R(arg1),
- /* 1428 E> */ B(LdrKeyedProperty), R(arg0), U8(233), R(0),
+ /* 1428 E> */ B(LdrKeyedProperty), R(arg0), U8(234), R(0),
B(Ldar), R(0),
/* 1434 S> */ B(Ldar), R(arg1),
- /* 1440 E> */ B(LdrKeyedProperty), R(arg0), U8(235), R(0),
+ /* 1440 E> */ B(LdrKeyedProperty), R(arg0), U8(236), R(0),
B(Ldar), R(0),
/* 1446 S> */ B(Ldar), R(arg1),
- /* 1452 E> */ B(LdrKeyedProperty), R(arg0), U8(237), R(0),
+ /* 1452 E> */ B(LdrKeyedProperty), R(arg0), U8(238), R(0),
B(Ldar), R(0),
/* 1458 S> */ B(Ldar), R(arg1),
- /* 1464 E> */ B(LdrKeyedProperty), R(arg0), U8(239), R(0),
+ /* 1464 E> */ B(LdrKeyedProperty), R(arg0), U8(240), R(0),
B(Ldar), R(0),
/* 1470 S> */ B(Ldar), R(arg1),
- /* 1476 E> */ B(LdrKeyedProperty), R(arg0), U8(241), R(0),
+ /* 1476 E> */ B(LdrKeyedProperty), R(arg0), U8(242), R(0),
B(Ldar), R(0),
/* 1482 S> */ B(Ldar), R(arg1),
- /* 1488 E> */ B(LdrKeyedProperty), R(arg0), U8(243), R(0),
+ /* 1488 E> */ B(LdrKeyedProperty), R(arg0), U8(244), R(0),
B(Ldar), R(0),
/* 1494 S> */ B(Ldar), R(arg1),
- /* 1500 E> */ B(LdrKeyedProperty), R(arg0), U8(245), R(0),
+ /* 1500 E> */ B(LdrKeyedProperty), R(arg0), U8(246), R(0),
B(Ldar), R(0),
/* 1506 S> */ B(Ldar), R(arg1),
- /* 1512 E> */ B(LdrKeyedProperty), R(arg0), U8(247), R(0),
+ /* 1512 E> */ B(LdrKeyedProperty), R(arg0), U8(248), R(0),
B(Ldar), R(0),
/* 1518 S> */ B(Ldar), R(arg1),
- /* 1524 E> */ B(LdrKeyedProperty), R(arg0), U8(249), R(0),
+ /* 1524 E> */ B(LdrKeyedProperty), R(arg0), U8(250), R(0),
B(Ldar), R(0),
/* 1530 S> */ B(Ldar), R(arg1),
- /* 1536 E> */ B(LdrKeyedProperty), R(arg0), U8(251), R(0),
+ /* 1536 E> */ B(LdrKeyedProperty), R(arg0), U8(252), R(0),
B(Ldar), R(0),
/* 1542 S> */ B(Ldar), R(arg1),
- /* 1548 E> */ B(LdrKeyedProperty), R(arg0), U8(253), R(0),
+ /* 1548 E> */ B(LdrKeyedProperty), R(arg0), U8(254), R(0),
B(Ldar), R(0),
/* 1554 S> */ B(Ldar), R(arg1),
- /* 1560 E> */ B(LdrKeyedProperty), R(arg0), U8(255), R(0),
+ /* 1560 E> */ B(Wide), B(LdrKeyedProperty), R16(arg0), U16(256), R16(0),
B(Ldar), R(0),
/* 1566 S> */ B(Ldar), R(arg1),
- /* 1575 E> */ B(Wide), B(LdaKeyedProperty), R16(arg0), U16(257),
+ /* 1575 E> */ B(Wide), B(LdaKeyedProperty), R16(arg0), U16(258),
/* 1579 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden
index 7f456cf4fc..ed71814500 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden
@@ -3,8 +3,6 @@
#
---
-pool type: string
-execute: yes
wrap: no
test function name: f
@@ -19,13 +17,13 @@ bytecode array length: 9
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 16 S> */ B(LdaConstant), U8(0),
- /* 23 E> */ B(StaNamedPropertySloppy), R(arg0), U8(1), U8(1),
+ /* 23 E> */ B(StaNamedPropertySloppy), R(arg0), U8(1), U8(2),
B(LdaUndefined),
/* 32 S> */ B(Return),
]
constant pool: [
- "val",
- "name",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["val"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["name"],
]
handlers: [
]
@@ -41,13 +39,13 @@ bytecode array length: 9
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 16 S> */ B(LdaConstant), U8(0),
- /* 25 E> */ B(StaNamedPropertySloppy), R(arg0), U8(1), U8(1),
+ /* 25 E> */ B(StaNamedPropertySloppy), R(arg0), U8(1), U8(2),
B(LdaUndefined),
/* 34 S> */ B(Return),
]
constant pool: [
- "val",
- "key",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["val"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["key"],
]
handlers: [
]
@@ -65,12 +63,12 @@ bytecodes: [
/* 16 S> */ B(LdaSmi), U8(100),
B(Star), R(1),
B(LdaConstant), U8(0),
- /* 23 E> */ B(StaKeyedPropertySloppy), R(arg0), R(1), U8(1),
+ /* 23 E> */ B(StaKeyedPropertySloppy), R(arg0), R(1), U8(2),
B(LdaUndefined),
/* 32 S> */ B(Return),
]
constant pool: [
- "val",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["val"],
]
handlers: [
]
@@ -86,12 +84,12 @@ bytecode array length: 9
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 19 S> */ B(LdaConstant), U8(0),
- /* 24 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(1),
+ /* 24 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(2),
B(LdaUndefined),
/* 33 S> */ B(Return),
]
constant pool: [
- "val",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["val"],
]
handlers: [
]
@@ -107,13 +105,13 @@ bytecode array length: 12
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 16 S> */ B(LdaSmi), U8(-124),
- /* 26 E> */ B(LdaKeyedProperty), R(arg0), U8(1),
- /* 23 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(3),
+ /* 26 E> */ B(LdaKeyedProperty), R(arg0), U8(2),
+ /* 23 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(4),
B(LdaUndefined),
/* 34 S> */ B(Return),
]
constant pool: [
- "name",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["name"],
]
handlers: [
]
@@ -129,13 +127,13 @@ bytecode array length: 9
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 30 S> */ B(LdaConstant), U8(0),
- /* 37 E> */ B(StaNamedPropertyStrict), R(arg0), U8(1), U8(1),
+ /* 37 E> */ B(StaNamedPropertyStrict), R(arg0), U8(1), U8(2),
B(LdaUndefined),
/* 46 S> */ B(Return),
]
constant pool: [
- "val",
- "name",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["val"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["name"],
]
handlers: [
]
@@ -151,12 +149,12 @@ bytecode array length: 9
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 33 S> */ B(LdaConstant), U8(0),
- /* 38 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(1),
+ /* 38 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(2),
B(LdaUndefined),
/* 47 S> */ B(Return),
]
constant pool: [
- "val",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["val"],
]
handlers: [
]
@@ -298,272 +296,272 @@ snippet: "
"
frame size: 0
parameter count: 2
-bytecode array length: 781
+bytecode array length: 785
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 18 S> */ B(LdaSmi), U8(1),
- /* 25 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(1),
+ /* 25 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(2),
/* 32 S> */ B(LdaSmi), U8(1),
- /* 39 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(3),
+ /* 39 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(4),
/* 46 S> */ B(LdaSmi), U8(1),
- /* 53 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(5),
+ /* 53 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(6),
/* 60 S> */ B(LdaSmi), U8(1),
- /* 67 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(7),
+ /* 67 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(8),
/* 74 S> */ B(LdaSmi), U8(1),
- /* 81 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(9),
+ /* 81 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(10),
/* 88 S> */ B(LdaSmi), U8(1),
- /* 95 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(11),
+ /* 95 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(12),
/* 102 S> */ B(LdaSmi), U8(1),
- /* 109 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(13),
+ /* 109 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(14),
/* 116 S> */ B(LdaSmi), U8(1),
- /* 123 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(15),
+ /* 123 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(16),
/* 130 S> */ B(LdaSmi), U8(1),
- /* 137 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(17),
+ /* 137 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(18),
/* 144 S> */ B(LdaSmi), U8(1),
- /* 151 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(19),
+ /* 151 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(20),
/* 158 S> */ B(LdaSmi), U8(1),
- /* 165 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(21),
+ /* 165 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(22),
/* 172 S> */ B(LdaSmi), U8(1),
- /* 179 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(23),
+ /* 179 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(24),
/* 186 S> */ B(LdaSmi), U8(1),
- /* 193 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(25),
+ /* 193 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(26),
/* 200 S> */ B(LdaSmi), U8(1),
- /* 207 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(27),
+ /* 207 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(28),
/* 214 S> */ B(LdaSmi), U8(1),
- /* 221 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(29),
+ /* 221 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(30),
/* 228 S> */ B(LdaSmi), U8(1),
- /* 235 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(31),
+ /* 235 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(32),
/* 242 S> */ B(LdaSmi), U8(1),
- /* 249 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(33),
+ /* 249 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(34),
/* 256 S> */ B(LdaSmi), U8(1),
- /* 263 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(35),
+ /* 263 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(36),
/* 270 S> */ B(LdaSmi), U8(1),
- /* 277 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(37),
+ /* 277 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(38),
/* 284 S> */ B(LdaSmi), U8(1),
- /* 291 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(39),
+ /* 291 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(40),
/* 298 S> */ B(LdaSmi), U8(1),
- /* 305 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(41),
+ /* 305 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(42),
/* 312 S> */ B(LdaSmi), U8(1),
- /* 319 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(43),
+ /* 319 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(44),
/* 326 S> */ B(LdaSmi), U8(1),
- /* 333 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(45),
+ /* 333 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(46),
/* 340 S> */ B(LdaSmi), U8(1),
- /* 347 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(47),
+ /* 347 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(48),
/* 354 S> */ B(LdaSmi), U8(1),
- /* 361 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(49),
+ /* 361 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(50),
/* 368 S> */ B(LdaSmi), U8(1),
- /* 375 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(51),
+ /* 375 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(52),
/* 382 S> */ B(LdaSmi), U8(1),
- /* 389 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(53),
+ /* 389 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(54),
/* 396 S> */ B(LdaSmi), U8(1),
- /* 403 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(55),
+ /* 403 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(56),
/* 410 S> */ B(LdaSmi), U8(1),
- /* 417 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(57),
+ /* 417 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(58),
/* 424 S> */ B(LdaSmi), U8(1),
- /* 431 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(59),
+ /* 431 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(60),
/* 438 S> */ B(LdaSmi), U8(1),
- /* 445 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(61),
+ /* 445 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(62),
/* 452 S> */ B(LdaSmi), U8(1),
- /* 459 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(63),
+ /* 459 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(64),
/* 466 S> */ B(LdaSmi), U8(1),
- /* 473 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(65),
+ /* 473 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(66),
/* 480 S> */ B(LdaSmi), U8(1),
- /* 487 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(67),
+ /* 487 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(68),
/* 494 S> */ B(LdaSmi), U8(1),
- /* 501 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(69),
+ /* 501 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(70),
/* 508 S> */ B(LdaSmi), U8(1),
- /* 515 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(71),
+ /* 515 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(72),
/* 522 S> */ B(LdaSmi), U8(1),
- /* 529 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(73),
+ /* 529 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(74),
/* 536 S> */ B(LdaSmi), U8(1),
- /* 543 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(75),
+ /* 543 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(76),
/* 550 S> */ B(LdaSmi), U8(1),
- /* 557 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(77),
+ /* 557 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(78),
/* 564 S> */ B(LdaSmi), U8(1),
- /* 571 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(79),
+ /* 571 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(80),
/* 578 S> */ B(LdaSmi), U8(1),
- /* 585 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(81),
+ /* 585 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(82),
/* 592 S> */ B(LdaSmi), U8(1),
- /* 599 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(83),
+ /* 599 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(84),
/* 606 S> */ B(LdaSmi), U8(1),
- /* 613 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(85),
+ /* 613 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(86),
/* 620 S> */ B(LdaSmi), U8(1),
- /* 627 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(87),
+ /* 627 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(88),
/* 634 S> */ B(LdaSmi), U8(1),
- /* 641 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(89),
+ /* 641 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(90),
/* 648 S> */ B(LdaSmi), U8(1),
- /* 655 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(91),
+ /* 655 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(92),
/* 662 S> */ B(LdaSmi), U8(1),
- /* 669 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(93),
+ /* 669 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(94),
/* 676 S> */ B(LdaSmi), U8(1),
- /* 683 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(95),
+ /* 683 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(96),
/* 690 S> */ B(LdaSmi), U8(1),
- /* 697 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(97),
+ /* 697 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(98),
/* 704 S> */ B(LdaSmi), U8(1),
- /* 711 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(99),
+ /* 711 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(100),
/* 718 S> */ B(LdaSmi), U8(1),
- /* 725 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(101),
+ /* 725 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(102),
/* 732 S> */ B(LdaSmi), U8(1),
- /* 739 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(103),
+ /* 739 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(104),
/* 746 S> */ B(LdaSmi), U8(1),
- /* 753 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(105),
+ /* 753 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(106),
/* 760 S> */ B(LdaSmi), U8(1),
- /* 767 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(107),
+ /* 767 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(108),
/* 774 S> */ B(LdaSmi), U8(1),
- /* 781 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(109),
+ /* 781 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(110),
/* 788 S> */ B(LdaSmi), U8(1),
- /* 795 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(111),
+ /* 795 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(112),
/* 802 S> */ B(LdaSmi), U8(1),
- /* 809 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(113),
+ /* 809 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(114),
/* 816 S> */ B(LdaSmi), U8(1),
- /* 823 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(115),
+ /* 823 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(116),
/* 830 S> */ B(LdaSmi), U8(1),
- /* 837 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(117),
+ /* 837 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(118),
/* 844 S> */ B(LdaSmi), U8(1),
- /* 851 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(119),
+ /* 851 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(120),
/* 858 S> */ B(LdaSmi), U8(1),
- /* 865 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(121),
+ /* 865 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(122),
/* 872 S> */ B(LdaSmi), U8(1),
- /* 879 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(123),
+ /* 879 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(124),
/* 886 S> */ B(LdaSmi), U8(1),
- /* 893 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(125),
+ /* 893 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(126),
/* 900 S> */ B(LdaSmi), U8(1),
- /* 907 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(127),
+ /* 907 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(128),
/* 914 S> */ B(LdaSmi), U8(1),
- /* 921 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(129),
+ /* 921 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(130),
/* 928 S> */ B(LdaSmi), U8(1),
- /* 935 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(131),
+ /* 935 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(132),
/* 942 S> */ B(LdaSmi), U8(1),
- /* 949 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(133),
+ /* 949 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(134),
/* 956 S> */ B(LdaSmi), U8(1),
- /* 963 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(135),
+ /* 963 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(136),
/* 970 S> */ B(LdaSmi), U8(1),
- /* 977 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(137),
+ /* 977 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(138),
/* 984 S> */ B(LdaSmi), U8(1),
- /* 991 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(139),
+ /* 991 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(140),
/* 998 S> */ B(LdaSmi), U8(1),
- /* 1005 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(141),
+ /* 1005 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(142),
/* 1012 S> */ B(LdaSmi), U8(1),
- /* 1019 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(143),
+ /* 1019 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(144),
/* 1026 S> */ B(LdaSmi), U8(1),
- /* 1033 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(145),
+ /* 1033 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(146),
/* 1040 S> */ B(LdaSmi), U8(1),
- /* 1047 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(147),
+ /* 1047 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(148),
/* 1054 S> */ B(LdaSmi), U8(1),
- /* 1061 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(149),
+ /* 1061 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(150),
/* 1068 S> */ B(LdaSmi), U8(1),
- /* 1075 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(151),
+ /* 1075 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(152),
/* 1082 S> */ B(LdaSmi), U8(1),
- /* 1089 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(153),
+ /* 1089 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(154),
/* 1096 S> */ B(LdaSmi), U8(1),
- /* 1103 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(155),
+ /* 1103 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(156),
/* 1110 S> */ B(LdaSmi), U8(1),
- /* 1117 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(157),
+ /* 1117 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(158),
/* 1124 S> */ B(LdaSmi), U8(1),
- /* 1131 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(159),
+ /* 1131 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(160),
/* 1138 S> */ B(LdaSmi), U8(1),
- /* 1145 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(161),
+ /* 1145 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(162),
/* 1152 S> */ B(LdaSmi), U8(1),
- /* 1159 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(163),
+ /* 1159 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(164),
/* 1166 S> */ B(LdaSmi), U8(1),
- /* 1173 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(165),
+ /* 1173 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(166),
/* 1180 S> */ B(LdaSmi), U8(1),
- /* 1187 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(167),
+ /* 1187 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(168),
/* 1194 S> */ B(LdaSmi), U8(1),
- /* 1201 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(169),
+ /* 1201 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(170),
/* 1208 S> */ B(LdaSmi), U8(1),
- /* 1215 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(171),
+ /* 1215 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(172),
/* 1222 S> */ B(LdaSmi), U8(1),
- /* 1229 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(173),
+ /* 1229 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(174),
/* 1236 S> */ B(LdaSmi), U8(1),
- /* 1243 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(175),
+ /* 1243 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(176),
/* 1250 S> */ B(LdaSmi), U8(1),
- /* 1257 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(177),
+ /* 1257 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(178),
/* 1264 S> */ B(LdaSmi), U8(1),
- /* 1271 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(179),
+ /* 1271 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(180),
/* 1278 S> */ B(LdaSmi), U8(1),
- /* 1285 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(181),
+ /* 1285 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(182),
/* 1292 S> */ B(LdaSmi), U8(1),
- /* 1299 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(183),
+ /* 1299 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(184),
/* 1306 S> */ B(LdaSmi), U8(1),
- /* 1313 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(185),
+ /* 1313 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(186),
/* 1320 S> */ B(LdaSmi), U8(1),
- /* 1327 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(187),
+ /* 1327 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(188),
/* 1334 S> */ B(LdaSmi), U8(1),
- /* 1341 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(189),
+ /* 1341 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(190),
/* 1348 S> */ B(LdaSmi), U8(1),
- /* 1355 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(191),
+ /* 1355 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(192),
/* 1362 S> */ B(LdaSmi), U8(1),
- /* 1369 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(193),
+ /* 1369 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(194),
/* 1376 S> */ B(LdaSmi), U8(1),
- /* 1383 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(195),
+ /* 1383 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(196),
/* 1390 S> */ B(LdaSmi), U8(1),
- /* 1397 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(197),
+ /* 1397 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(198),
/* 1404 S> */ B(LdaSmi), U8(1),
- /* 1411 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(199),
+ /* 1411 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(200),
/* 1418 S> */ B(LdaSmi), U8(1),
- /* 1425 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(201),
+ /* 1425 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(202),
/* 1432 S> */ B(LdaSmi), U8(1),
- /* 1439 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(203),
+ /* 1439 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(204),
/* 1446 S> */ B(LdaSmi), U8(1),
- /* 1453 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(205),
+ /* 1453 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(206),
/* 1460 S> */ B(LdaSmi), U8(1),
- /* 1467 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(207),
+ /* 1467 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(208),
/* 1474 S> */ B(LdaSmi), U8(1),
- /* 1481 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(209),
+ /* 1481 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(210),
/* 1488 S> */ B(LdaSmi), U8(1),
- /* 1495 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(211),
+ /* 1495 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(212),
/* 1502 S> */ B(LdaSmi), U8(1),
- /* 1509 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(213),
+ /* 1509 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(214),
/* 1516 S> */ B(LdaSmi), U8(1),
- /* 1523 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(215),
+ /* 1523 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(216),
/* 1530 S> */ B(LdaSmi), U8(1),
- /* 1537 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(217),
+ /* 1537 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(218),
/* 1544 S> */ B(LdaSmi), U8(1),
- /* 1551 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(219),
+ /* 1551 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(220),
/* 1558 S> */ B(LdaSmi), U8(1),
- /* 1565 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(221),
+ /* 1565 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(222),
/* 1572 S> */ B(LdaSmi), U8(1),
- /* 1579 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(223),
+ /* 1579 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(224),
/* 1586 S> */ B(LdaSmi), U8(1),
- /* 1593 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(225),
+ /* 1593 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(226),
/* 1600 S> */ B(LdaSmi), U8(1),
- /* 1607 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(227),
+ /* 1607 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(228),
/* 1614 S> */ B(LdaSmi), U8(1),
- /* 1621 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(229),
+ /* 1621 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(230),
/* 1628 S> */ B(LdaSmi), U8(1),
- /* 1635 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(231),
+ /* 1635 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(232),
/* 1642 S> */ B(LdaSmi), U8(1),
- /* 1649 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(233),
+ /* 1649 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(234),
/* 1656 S> */ B(LdaSmi), U8(1),
- /* 1663 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(235),
+ /* 1663 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(236),
/* 1670 S> */ B(LdaSmi), U8(1),
- /* 1677 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(237),
+ /* 1677 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(238),
/* 1684 S> */ B(LdaSmi), U8(1),
- /* 1691 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(239),
+ /* 1691 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(240),
/* 1698 S> */ B(LdaSmi), U8(1),
- /* 1705 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(241),
+ /* 1705 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(242),
/* 1712 S> */ B(LdaSmi), U8(1),
- /* 1719 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(243),
+ /* 1719 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(244),
/* 1726 S> */ B(LdaSmi), U8(1),
- /* 1733 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(245),
+ /* 1733 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(246),
/* 1740 S> */ B(LdaSmi), U8(1),
- /* 1747 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(247),
+ /* 1747 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(248),
/* 1754 S> */ B(LdaSmi), U8(1),
- /* 1761 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(249),
+ /* 1761 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(250),
/* 1768 S> */ B(LdaSmi), U8(1),
- /* 1775 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(251),
+ /* 1775 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(252),
/* 1782 S> */ B(LdaSmi), U8(1),
- /* 1789 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(253),
+ /* 1789 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(254),
/* 1796 S> */ B(LdaSmi), U8(1),
- /* 1803 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(255),
+ /* 1803 E> */ B(Wide), B(StaNamedPropertySloppy), R16(arg0), U16(0), U16(256),
/* 1810 S> */ B(LdaSmi), U8(2),
- /* 1817 E> */ B(Wide), B(StaNamedPropertySloppy), R16(arg0), U16(0), U16(257),
+ /* 1817 E> */ B(Wide), B(StaNamedPropertySloppy), R16(arg0), U16(0), U16(258),
B(LdaUndefined),
/* 1822 S> */ B(Return),
]
constant pool: [
- "name",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["name"],
]
handlers: [
]
@@ -706,272 +704,272 @@ snippet: "
"
frame size: 0
parameter count: 2
-bytecode array length: 781
+bytecode array length: 785
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 33 S> */ B(LdaSmi), U8(1),
- /* 40 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(1),
+ /* 40 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(2),
/* 47 S> */ B(LdaSmi), U8(1),
- /* 54 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(3),
+ /* 54 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(4),
/* 61 S> */ B(LdaSmi), U8(1),
- /* 68 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(5),
+ /* 68 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(6),
/* 75 S> */ B(LdaSmi), U8(1),
- /* 82 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(7),
+ /* 82 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(8),
/* 89 S> */ B(LdaSmi), U8(1),
- /* 96 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(9),
+ /* 96 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(10),
/* 103 S> */ B(LdaSmi), U8(1),
- /* 110 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(11),
+ /* 110 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(12),
/* 117 S> */ B(LdaSmi), U8(1),
- /* 124 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(13),
+ /* 124 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(14),
/* 131 S> */ B(LdaSmi), U8(1),
- /* 138 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(15),
+ /* 138 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(16),
/* 145 S> */ B(LdaSmi), U8(1),
- /* 152 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(17),
+ /* 152 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(18),
/* 159 S> */ B(LdaSmi), U8(1),
- /* 166 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(19),
+ /* 166 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(20),
/* 173 S> */ B(LdaSmi), U8(1),
- /* 180 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(21),
+ /* 180 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(22),
/* 187 S> */ B(LdaSmi), U8(1),
- /* 194 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(23),
+ /* 194 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(24),
/* 201 S> */ B(LdaSmi), U8(1),
- /* 208 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(25),
+ /* 208 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(26),
/* 215 S> */ B(LdaSmi), U8(1),
- /* 222 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(27),
+ /* 222 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(28),
/* 229 S> */ B(LdaSmi), U8(1),
- /* 236 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(29),
+ /* 236 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(30),
/* 243 S> */ B(LdaSmi), U8(1),
- /* 250 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(31),
+ /* 250 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(32),
/* 257 S> */ B(LdaSmi), U8(1),
- /* 264 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(33),
+ /* 264 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(34),
/* 271 S> */ B(LdaSmi), U8(1),
- /* 278 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(35),
+ /* 278 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(36),
/* 285 S> */ B(LdaSmi), U8(1),
- /* 292 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(37),
+ /* 292 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(38),
/* 299 S> */ B(LdaSmi), U8(1),
- /* 306 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(39),
+ /* 306 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(40),
/* 313 S> */ B(LdaSmi), U8(1),
- /* 320 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(41),
+ /* 320 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(42),
/* 327 S> */ B(LdaSmi), U8(1),
- /* 334 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(43),
+ /* 334 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(44),
/* 341 S> */ B(LdaSmi), U8(1),
- /* 348 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(45),
+ /* 348 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(46),
/* 355 S> */ B(LdaSmi), U8(1),
- /* 362 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(47),
+ /* 362 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(48),
/* 369 S> */ B(LdaSmi), U8(1),
- /* 376 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(49),
+ /* 376 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(50),
/* 383 S> */ B(LdaSmi), U8(1),
- /* 390 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(51),
+ /* 390 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(52),
/* 397 S> */ B(LdaSmi), U8(1),
- /* 404 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(53),
+ /* 404 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(54),
/* 411 S> */ B(LdaSmi), U8(1),
- /* 418 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(55),
+ /* 418 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(56),
/* 425 S> */ B(LdaSmi), U8(1),
- /* 432 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(57),
+ /* 432 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(58),
/* 439 S> */ B(LdaSmi), U8(1),
- /* 446 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(59),
+ /* 446 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(60),
/* 453 S> */ B(LdaSmi), U8(1),
- /* 460 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(61),
+ /* 460 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(62),
/* 467 S> */ B(LdaSmi), U8(1),
- /* 474 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(63),
+ /* 474 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(64),
/* 481 S> */ B(LdaSmi), U8(1),
- /* 488 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(65),
+ /* 488 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(66),
/* 495 S> */ B(LdaSmi), U8(1),
- /* 502 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(67),
+ /* 502 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(68),
/* 509 S> */ B(LdaSmi), U8(1),
- /* 516 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(69),
+ /* 516 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(70),
/* 523 S> */ B(LdaSmi), U8(1),
- /* 530 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(71),
+ /* 530 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(72),
/* 537 S> */ B(LdaSmi), U8(1),
- /* 544 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(73),
+ /* 544 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(74),
/* 551 S> */ B(LdaSmi), U8(1),
- /* 558 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(75),
+ /* 558 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(76),
/* 565 S> */ B(LdaSmi), U8(1),
- /* 572 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(77),
+ /* 572 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(78),
/* 579 S> */ B(LdaSmi), U8(1),
- /* 586 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(79),
+ /* 586 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(80),
/* 593 S> */ B(LdaSmi), U8(1),
- /* 600 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(81),
+ /* 600 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(82),
/* 607 S> */ B(LdaSmi), U8(1),
- /* 614 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(83),
+ /* 614 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(84),
/* 621 S> */ B(LdaSmi), U8(1),
- /* 628 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(85),
+ /* 628 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(86),
/* 635 S> */ B(LdaSmi), U8(1),
- /* 642 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(87),
+ /* 642 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(88),
/* 649 S> */ B(LdaSmi), U8(1),
- /* 656 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(89),
+ /* 656 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(90),
/* 663 S> */ B(LdaSmi), U8(1),
- /* 670 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(91),
+ /* 670 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(92),
/* 677 S> */ B(LdaSmi), U8(1),
- /* 684 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(93),
+ /* 684 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(94),
/* 691 S> */ B(LdaSmi), U8(1),
- /* 698 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(95),
+ /* 698 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(96),
/* 705 S> */ B(LdaSmi), U8(1),
- /* 712 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(97),
+ /* 712 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(98),
/* 719 S> */ B(LdaSmi), U8(1),
- /* 726 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(99),
+ /* 726 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(100),
/* 733 S> */ B(LdaSmi), U8(1),
- /* 740 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(101),
+ /* 740 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(102),
/* 747 S> */ B(LdaSmi), U8(1),
- /* 754 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(103),
+ /* 754 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(104),
/* 761 S> */ B(LdaSmi), U8(1),
- /* 768 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(105),
+ /* 768 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(106),
/* 775 S> */ B(LdaSmi), U8(1),
- /* 782 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(107),
+ /* 782 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(108),
/* 789 S> */ B(LdaSmi), U8(1),
- /* 796 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(109),
+ /* 796 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(110),
/* 803 S> */ B(LdaSmi), U8(1),
- /* 810 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(111),
+ /* 810 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(112),
/* 817 S> */ B(LdaSmi), U8(1),
- /* 824 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(113),
+ /* 824 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(114),
/* 831 S> */ B(LdaSmi), U8(1),
- /* 838 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(115),
+ /* 838 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(116),
/* 845 S> */ B(LdaSmi), U8(1),
- /* 852 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(117),
+ /* 852 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(118),
/* 859 S> */ B(LdaSmi), U8(1),
- /* 866 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(119),
+ /* 866 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(120),
/* 873 S> */ B(LdaSmi), U8(1),
- /* 880 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(121),
+ /* 880 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(122),
/* 887 S> */ B(LdaSmi), U8(1),
- /* 894 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(123),
+ /* 894 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(124),
/* 901 S> */ B(LdaSmi), U8(1),
- /* 908 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(125),
+ /* 908 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(126),
/* 915 S> */ B(LdaSmi), U8(1),
- /* 922 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(127),
+ /* 922 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(128),
/* 929 S> */ B(LdaSmi), U8(1),
- /* 936 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(129),
+ /* 936 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(130),
/* 943 S> */ B(LdaSmi), U8(1),
- /* 950 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(131),
+ /* 950 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(132),
/* 957 S> */ B(LdaSmi), U8(1),
- /* 964 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(133),
+ /* 964 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(134),
/* 971 S> */ B(LdaSmi), U8(1),
- /* 978 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(135),
+ /* 978 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(136),
/* 985 S> */ B(LdaSmi), U8(1),
- /* 992 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(137),
+ /* 992 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(138),
/* 999 S> */ B(LdaSmi), U8(1),
- /* 1006 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(139),
+ /* 1006 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(140),
/* 1013 S> */ B(LdaSmi), U8(1),
- /* 1020 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(141),
+ /* 1020 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(142),
/* 1027 S> */ B(LdaSmi), U8(1),
- /* 1034 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(143),
+ /* 1034 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(144),
/* 1041 S> */ B(LdaSmi), U8(1),
- /* 1048 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(145),
+ /* 1048 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(146),
/* 1055 S> */ B(LdaSmi), U8(1),
- /* 1062 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(147),
+ /* 1062 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(148),
/* 1069 S> */ B(LdaSmi), U8(1),
- /* 1076 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(149),
+ /* 1076 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(150),
/* 1083 S> */ B(LdaSmi), U8(1),
- /* 1090 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(151),
+ /* 1090 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(152),
/* 1097 S> */ B(LdaSmi), U8(1),
- /* 1104 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(153),
+ /* 1104 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(154),
/* 1111 S> */ B(LdaSmi), U8(1),
- /* 1118 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(155),
+ /* 1118 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(156),
/* 1125 S> */ B(LdaSmi), U8(1),
- /* 1132 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(157),
+ /* 1132 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(158),
/* 1139 S> */ B(LdaSmi), U8(1),
- /* 1146 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(159),
+ /* 1146 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(160),
/* 1153 S> */ B(LdaSmi), U8(1),
- /* 1160 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(161),
+ /* 1160 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(162),
/* 1167 S> */ B(LdaSmi), U8(1),
- /* 1174 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(163),
+ /* 1174 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(164),
/* 1181 S> */ B(LdaSmi), U8(1),
- /* 1188 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(165),
+ /* 1188 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(166),
/* 1195 S> */ B(LdaSmi), U8(1),
- /* 1202 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(167),
+ /* 1202 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(168),
/* 1209 S> */ B(LdaSmi), U8(1),
- /* 1216 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(169),
+ /* 1216 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(170),
/* 1223 S> */ B(LdaSmi), U8(1),
- /* 1230 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(171),
+ /* 1230 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(172),
/* 1237 S> */ B(LdaSmi), U8(1),
- /* 1244 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(173),
+ /* 1244 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(174),
/* 1251 S> */ B(LdaSmi), U8(1),
- /* 1258 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(175),
+ /* 1258 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(176),
/* 1265 S> */ B(LdaSmi), U8(1),
- /* 1272 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(177),
+ /* 1272 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(178),
/* 1279 S> */ B(LdaSmi), U8(1),
- /* 1286 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(179),
+ /* 1286 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(180),
/* 1293 S> */ B(LdaSmi), U8(1),
- /* 1300 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(181),
+ /* 1300 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(182),
/* 1307 S> */ B(LdaSmi), U8(1),
- /* 1314 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(183),
+ /* 1314 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(184),
/* 1321 S> */ B(LdaSmi), U8(1),
- /* 1328 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(185),
+ /* 1328 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(186),
/* 1335 S> */ B(LdaSmi), U8(1),
- /* 1342 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(187),
+ /* 1342 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(188),
/* 1349 S> */ B(LdaSmi), U8(1),
- /* 1356 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(189),
+ /* 1356 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(190),
/* 1363 S> */ B(LdaSmi), U8(1),
- /* 1370 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(191),
+ /* 1370 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(192),
/* 1377 S> */ B(LdaSmi), U8(1),
- /* 1384 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(193),
+ /* 1384 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(194),
/* 1391 S> */ B(LdaSmi), U8(1),
- /* 1398 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(195),
+ /* 1398 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(196),
/* 1405 S> */ B(LdaSmi), U8(1),
- /* 1412 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(197),
+ /* 1412 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(198),
/* 1419 S> */ B(LdaSmi), U8(1),
- /* 1426 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(199),
+ /* 1426 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(200),
/* 1433 S> */ B(LdaSmi), U8(1),
- /* 1440 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(201),
+ /* 1440 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(202),
/* 1447 S> */ B(LdaSmi), U8(1),
- /* 1454 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(203),
+ /* 1454 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(204),
/* 1461 S> */ B(LdaSmi), U8(1),
- /* 1468 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(205),
+ /* 1468 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(206),
/* 1475 S> */ B(LdaSmi), U8(1),
- /* 1482 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(207),
+ /* 1482 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(208),
/* 1489 S> */ B(LdaSmi), U8(1),
- /* 1496 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(209),
+ /* 1496 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(210),
/* 1503 S> */ B(LdaSmi), U8(1),
- /* 1510 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(211),
+ /* 1510 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(212),
/* 1517 S> */ B(LdaSmi), U8(1),
- /* 1524 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(213),
+ /* 1524 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(214),
/* 1531 S> */ B(LdaSmi), U8(1),
- /* 1538 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(215),
+ /* 1538 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(216),
/* 1545 S> */ B(LdaSmi), U8(1),
- /* 1552 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(217),
+ /* 1552 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(218),
/* 1559 S> */ B(LdaSmi), U8(1),
- /* 1566 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(219),
+ /* 1566 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(220),
/* 1573 S> */ B(LdaSmi), U8(1),
- /* 1580 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(221),
+ /* 1580 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(222),
/* 1587 S> */ B(LdaSmi), U8(1),
- /* 1594 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(223),
+ /* 1594 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(224),
/* 1601 S> */ B(LdaSmi), U8(1),
- /* 1608 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(225),
+ /* 1608 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(226),
/* 1615 S> */ B(LdaSmi), U8(1),
- /* 1622 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(227),
+ /* 1622 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(228),
/* 1629 S> */ B(LdaSmi), U8(1),
- /* 1636 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(229),
+ /* 1636 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(230),
/* 1643 S> */ B(LdaSmi), U8(1),
- /* 1650 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(231),
+ /* 1650 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(232),
/* 1657 S> */ B(LdaSmi), U8(1),
- /* 1664 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(233),
+ /* 1664 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(234),
/* 1671 S> */ B(LdaSmi), U8(1),
- /* 1678 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(235),
+ /* 1678 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(236),
/* 1685 S> */ B(LdaSmi), U8(1),
- /* 1692 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(237),
+ /* 1692 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(238),
/* 1699 S> */ B(LdaSmi), U8(1),
- /* 1706 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(239),
+ /* 1706 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(240),
/* 1713 S> */ B(LdaSmi), U8(1),
- /* 1720 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(241),
+ /* 1720 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(242),
/* 1727 S> */ B(LdaSmi), U8(1),
- /* 1734 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(243),
+ /* 1734 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(244),
/* 1741 S> */ B(LdaSmi), U8(1),
- /* 1748 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(245),
+ /* 1748 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(246),
/* 1755 S> */ B(LdaSmi), U8(1),
- /* 1762 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(247),
+ /* 1762 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(248),
/* 1769 S> */ B(LdaSmi), U8(1),
- /* 1776 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(249),
+ /* 1776 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(250),
/* 1783 S> */ B(LdaSmi), U8(1),
- /* 1790 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(251),
+ /* 1790 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(252),
/* 1797 S> */ B(LdaSmi), U8(1),
- /* 1804 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(253),
+ /* 1804 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(254),
/* 1811 S> */ B(LdaSmi), U8(1),
- /* 1818 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(255),
+ /* 1818 E> */ B(Wide), B(StaNamedPropertyStrict), R16(arg0), U16(0), U16(256),
/* 1825 S> */ B(LdaSmi), U8(2),
- /* 1832 E> */ B(Wide), B(StaNamedPropertyStrict), R16(arg0), U16(0), U16(257),
+ /* 1832 E> */ B(Wide), B(StaNamedPropertyStrict), R16(arg0), U16(0), U16(258),
B(LdaUndefined),
/* 1837 S> */ B(Return),
]
constant pool: [
- "name",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["name"],
]
handlers: [
]
@@ -1113,267 +1111,267 @@ snippet: "
"
frame size: 0
parameter count: 3
-bytecode array length: 781
+bytecode array length: 785
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 21 S> */ B(LdaSmi), U8(1),
- /* 26 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(1),
+ /* 26 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(2),
/* 33 S> */ B(LdaSmi), U8(1),
- /* 38 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(3),
+ /* 38 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(4),
/* 45 S> */ B(LdaSmi), U8(1),
- /* 50 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(5),
+ /* 50 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(6),
/* 57 S> */ B(LdaSmi), U8(1),
- /* 62 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(7),
+ /* 62 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(8),
/* 69 S> */ B(LdaSmi), U8(1),
- /* 74 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(9),
+ /* 74 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(10),
/* 81 S> */ B(LdaSmi), U8(1),
- /* 86 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(11),
+ /* 86 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(12),
/* 93 S> */ B(LdaSmi), U8(1),
- /* 98 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(13),
+ /* 98 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(14),
/* 105 S> */ B(LdaSmi), U8(1),
- /* 110 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(15),
+ /* 110 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(16),
/* 117 S> */ B(LdaSmi), U8(1),
- /* 122 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(17),
+ /* 122 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(18),
/* 129 S> */ B(LdaSmi), U8(1),
- /* 134 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(19),
+ /* 134 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(20),
/* 141 S> */ B(LdaSmi), U8(1),
- /* 146 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(21),
+ /* 146 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(22),
/* 153 S> */ B(LdaSmi), U8(1),
- /* 158 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(23),
+ /* 158 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(24),
/* 165 S> */ B(LdaSmi), U8(1),
- /* 170 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(25),
+ /* 170 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(26),
/* 177 S> */ B(LdaSmi), U8(1),
- /* 182 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(27),
+ /* 182 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(28),
/* 189 S> */ B(LdaSmi), U8(1),
- /* 194 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(29),
+ /* 194 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(30),
/* 201 S> */ B(LdaSmi), U8(1),
- /* 206 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(31),
+ /* 206 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(32),
/* 213 S> */ B(LdaSmi), U8(1),
- /* 218 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(33),
+ /* 218 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(34),
/* 225 S> */ B(LdaSmi), U8(1),
- /* 230 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(35),
+ /* 230 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(36),
/* 237 S> */ B(LdaSmi), U8(1),
- /* 242 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(37),
+ /* 242 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(38),
/* 249 S> */ B(LdaSmi), U8(1),
- /* 254 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(39),
+ /* 254 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(40),
/* 261 S> */ B(LdaSmi), U8(1),
- /* 266 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(41),
+ /* 266 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(42),
/* 273 S> */ B(LdaSmi), U8(1),
- /* 278 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(43),
+ /* 278 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(44),
/* 285 S> */ B(LdaSmi), U8(1),
- /* 290 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(45),
+ /* 290 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(46),
/* 297 S> */ B(LdaSmi), U8(1),
- /* 302 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(47),
+ /* 302 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(48),
/* 309 S> */ B(LdaSmi), U8(1),
- /* 314 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(49),
+ /* 314 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(50),
/* 321 S> */ B(LdaSmi), U8(1),
- /* 326 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(51),
+ /* 326 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(52),
/* 333 S> */ B(LdaSmi), U8(1),
- /* 338 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(53),
+ /* 338 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(54),
/* 345 S> */ B(LdaSmi), U8(1),
- /* 350 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(55),
+ /* 350 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(56),
/* 357 S> */ B(LdaSmi), U8(1),
- /* 362 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(57),
+ /* 362 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(58),
/* 369 S> */ B(LdaSmi), U8(1),
- /* 374 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(59),
+ /* 374 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(60),
/* 381 S> */ B(LdaSmi), U8(1),
- /* 386 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(61),
+ /* 386 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(62),
/* 393 S> */ B(LdaSmi), U8(1),
- /* 398 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(63),
+ /* 398 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(64),
/* 405 S> */ B(LdaSmi), U8(1),
- /* 410 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(65),
+ /* 410 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(66),
/* 417 S> */ B(LdaSmi), U8(1),
- /* 422 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(67),
+ /* 422 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(68),
/* 429 S> */ B(LdaSmi), U8(1),
- /* 434 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(69),
+ /* 434 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(70),
/* 441 S> */ B(LdaSmi), U8(1),
- /* 446 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(71),
+ /* 446 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(72),
/* 453 S> */ B(LdaSmi), U8(1),
- /* 458 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(73),
+ /* 458 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(74),
/* 465 S> */ B(LdaSmi), U8(1),
- /* 470 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(75),
+ /* 470 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(76),
/* 477 S> */ B(LdaSmi), U8(1),
- /* 482 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(77),
+ /* 482 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(78),
/* 489 S> */ B(LdaSmi), U8(1),
- /* 494 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(79),
+ /* 494 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(80),
/* 501 S> */ B(LdaSmi), U8(1),
- /* 506 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(81),
+ /* 506 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(82),
/* 513 S> */ B(LdaSmi), U8(1),
- /* 518 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(83),
+ /* 518 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(84),
/* 525 S> */ B(LdaSmi), U8(1),
- /* 530 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(85),
+ /* 530 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(86),
/* 537 S> */ B(LdaSmi), U8(1),
- /* 542 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(87),
+ /* 542 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(88),
/* 549 S> */ B(LdaSmi), U8(1),
- /* 554 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(89),
+ /* 554 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(90),
/* 561 S> */ B(LdaSmi), U8(1),
- /* 566 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(91),
+ /* 566 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(92),
/* 573 S> */ B(LdaSmi), U8(1),
- /* 578 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(93),
+ /* 578 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(94),
/* 585 S> */ B(LdaSmi), U8(1),
- /* 590 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(95),
+ /* 590 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(96),
/* 597 S> */ B(LdaSmi), U8(1),
- /* 602 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(97),
+ /* 602 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(98),
/* 609 S> */ B(LdaSmi), U8(1),
- /* 614 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(99),
+ /* 614 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(100),
/* 621 S> */ B(LdaSmi), U8(1),
- /* 626 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(101),
+ /* 626 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(102),
/* 633 S> */ B(LdaSmi), U8(1),
- /* 638 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(103),
+ /* 638 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(104),
/* 645 S> */ B(LdaSmi), U8(1),
- /* 650 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(105),
+ /* 650 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(106),
/* 657 S> */ B(LdaSmi), U8(1),
- /* 662 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(107),
+ /* 662 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(108),
/* 669 S> */ B(LdaSmi), U8(1),
- /* 674 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(109),
+ /* 674 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(110),
/* 681 S> */ B(LdaSmi), U8(1),
- /* 686 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(111),
+ /* 686 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(112),
/* 693 S> */ B(LdaSmi), U8(1),
- /* 698 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(113),
+ /* 698 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(114),
/* 705 S> */ B(LdaSmi), U8(1),
- /* 710 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(115),
+ /* 710 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(116),
/* 717 S> */ B(LdaSmi), U8(1),
- /* 722 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(117),
+ /* 722 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(118),
/* 729 S> */ B(LdaSmi), U8(1),
- /* 734 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(119),
+ /* 734 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(120),
/* 741 S> */ B(LdaSmi), U8(1),
- /* 746 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(121),
+ /* 746 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(122),
/* 753 S> */ B(LdaSmi), U8(1),
- /* 758 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(123),
+ /* 758 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(124),
/* 765 S> */ B(LdaSmi), U8(1),
- /* 770 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(125),
+ /* 770 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(126),
/* 777 S> */ B(LdaSmi), U8(1),
- /* 782 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(127),
+ /* 782 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(128),
/* 789 S> */ B(LdaSmi), U8(1),
- /* 794 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(129),
+ /* 794 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(130),
/* 801 S> */ B(LdaSmi), U8(1),
- /* 806 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(131),
+ /* 806 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(132),
/* 813 S> */ B(LdaSmi), U8(1),
- /* 818 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(133),
+ /* 818 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(134),
/* 825 S> */ B(LdaSmi), U8(1),
- /* 830 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(135),
+ /* 830 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(136),
/* 837 S> */ B(LdaSmi), U8(1),
- /* 842 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(137),
+ /* 842 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(138),
/* 849 S> */ B(LdaSmi), U8(1),
- /* 854 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(139),
+ /* 854 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(140),
/* 861 S> */ B(LdaSmi), U8(1),
- /* 866 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(141),
+ /* 866 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(142),
/* 873 S> */ B(LdaSmi), U8(1),
- /* 878 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(143),
+ /* 878 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(144),
/* 885 S> */ B(LdaSmi), U8(1),
- /* 890 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(145),
+ /* 890 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(146),
/* 897 S> */ B(LdaSmi), U8(1),
- /* 902 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(147),
+ /* 902 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(148),
/* 909 S> */ B(LdaSmi), U8(1),
- /* 914 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(149),
+ /* 914 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(150),
/* 921 S> */ B(LdaSmi), U8(1),
- /* 926 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(151),
+ /* 926 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(152),
/* 933 S> */ B(LdaSmi), U8(1),
- /* 938 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(153),
+ /* 938 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(154),
/* 945 S> */ B(LdaSmi), U8(1),
- /* 950 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(155),
+ /* 950 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(156),
/* 957 S> */ B(LdaSmi), U8(1),
- /* 962 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(157),
+ /* 962 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(158),
/* 969 S> */ B(LdaSmi), U8(1),
- /* 974 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(159),
+ /* 974 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(160),
/* 981 S> */ B(LdaSmi), U8(1),
- /* 986 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(161),
+ /* 986 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(162),
/* 993 S> */ B(LdaSmi), U8(1),
- /* 998 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(163),
+ /* 998 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(164),
/* 1005 S> */ B(LdaSmi), U8(1),
- /* 1010 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(165),
+ /* 1010 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(166),
/* 1017 S> */ B(LdaSmi), U8(1),
- /* 1022 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(167),
+ /* 1022 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(168),
/* 1029 S> */ B(LdaSmi), U8(1),
- /* 1034 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(169),
+ /* 1034 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(170),
/* 1041 S> */ B(LdaSmi), U8(1),
- /* 1046 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(171),
+ /* 1046 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(172),
/* 1053 S> */ B(LdaSmi), U8(1),
- /* 1058 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(173),
+ /* 1058 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(174),
/* 1065 S> */ B(LdaSmi), U8(1),
- /* 1070 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(175),
+ /* 1070 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(176),
/* 1077 S> */ B(LdaSmi), U8(1),
- /* 1082 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(177),
+ /* 1082 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(178),
/* 1089 S> */ B(LdaSmi), U8(1),
- /* 1094 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(179),
+ /* 1094 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(180),
/* 1101 S> */ B(LdaSmi), U8(1),
- /* 1106 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(181),
+ /* 1106 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(182),
/* 1113 S> */ B(LdaSmi), U8(1),
- /* 1118 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(183),
+ /* 1118 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(184),
/* 1125 S> */ B(LdaSmi), U8(1),
- /* 1130 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(185),
+ /* 1130 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(186),
/* 1137 S> */ B(LdaSmi), U8(1),
- /* 1142 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(187),
+ /* 1142 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(188),
/* 1149 S> */ B(LdaSmi), U8(1),
- /* 1154 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(189),
+ /* 1154 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(190),
/* 1161 S> */ B(LdaSmi), U8(1),
- /* 1166 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(191),
+ /* 1166 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(192),
/* 1173 S> */ B(LdaSmi), U8(1),
- /* 1178 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(193),
+ /* 1178 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(194),
/* 1185 S> */ B(LdaSmi), U8(1),
- /* 1190 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(195),
+ /* 1190 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(196),
/* 1197 S> */ B(LdaSmi), U8(1),
- /* 1202 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(197),
+ /* 1202 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(198),
/* 1209 S> */ B(LdaSmi), U8(1),
- /* 1214 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(199),
+ /* 1214 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(200),
/* 1221 S> */ B(LdaSmi), U8(1),
- /* 1226 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(201),
+ /* 1226 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(202),
/* 1233 S> */ B(LdaSmi), U8(1),
- /* 1238 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(203),
+ /* 1238 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(204),
/* 1245 S> */ B(LdaSmi), U8(1),
- /* 1250 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(205),
+ /* 1250 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(206),
/* 1257 S> */ B(LdaSmi), U8(1),
- /* 1262 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(207),
+ /* 1262 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(208),
/* 1269 S> */ B(LdaSmi), U8(1),
- /* 1274 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(209),
+ /* 1274 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(210),
/* 1281 S> */ B(LdaSmi), U8(1),
- /* 1286 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(211),
+ /* 1286 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(212),
/* 1293 S> */ B(LdaSmi), U8(1),
- /* 1298 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(213),
+ /* 1298 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(214),
/* 1305 S> */ B(LdaSmi), U8(1),
- /* 1310 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(215),
+ /* 1310 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(216),
/* 1317 S> */ B(LdaSmi), U8(1),
- /* 1322 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(217),
+ /* 1322 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(218),
/* 1329 S> */ B(LdaSmi), U8(1),
- /* 1334 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(219),
+ /* 1334 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(220),
/* 1341 S> */ B(LdaSmi), U8(1),
- /* 1346 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(221),
+ /* 1346 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(222),
/* 1353 S> */ B(LdaSmi), U8(1),
- /* 1358 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(223),
+ /* 1358 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(224),
/* 1365 S> */ B(LdaSmi), U8(1),
- /* 1370 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(225),
+ /* 1370 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(226),
/* 1377 S> */ B(LdaSmi), U8(1),
- /* 1382 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(227),
+ /* 1382 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(228),
/* 1389 S> */ B(LdaSmi), U8(1),
- /* 1394 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(229),
+ /* 1394 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(230),
/* 1401 S> */ B(LdaSmi), U8(1),
- /* 1406 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(231),
+ /* 1406 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(232),
/* 1413 S> */ B(LdaSmi), U8(1),
- /* 1418 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(233),
+ /* 1418 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(234),
/* 1425 S> */ B(LdaSmi), U8(1),
- /* 1430 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(235),
+ /* 1430 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(236),
/* 1437 S> */ B(LdaSmi), U8(1),
- /* 1442 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(237),
+ /* 1442 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(238),
/* 1449 S> */ B(LdaSmi), U8(1),
- /* 1454 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(239),
+ /* 1454 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(240),
/* 1461 S> */ B(LdaSmi), U8(1),
- /* 1466 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(241),
+ /* 1466 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(242),
/* 1473 S> */ B(LdaSmi), U8(1),
- /* 1478 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(243),
+ /* 1478 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(244),
/* 1485 S> */ B(LdaSmi), U8(1),
- /* 1490 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(245),
+ /* 1490 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(246),
/* 1497 S> */ B(LdaSmi), U8(1),
- /* 1502 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(247),
+ /* 1502 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(248),
/* 1509 S> */ B(LdaSmi), U8(1),
- /* 1514 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(249),
+ /* 1514 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(250),
/* 1521 S> */ B(LdaSmi), U8(1),
- /* 1526 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(251),
+ /* 1526 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(252),
/* 1533 S> */ B(LdaSmi), U8(1),
- /* 1538 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(253),
+ /* 1538 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(254),
/* 1545 S> */ B(LdaSmi), U8(1),
- /* 1550 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(255),
+ /* 1550 E> */ B(Wide), B(StaKeyedPropertySloppy), R16(arg0), R16(arg1), U16(256),
/* 1557 S> */ B(LdaSmi), U8(2),
- /* 1562 E> */ B(Wide), B(StaKeyedPropertySloppy), R16(arg0), R16(arg1), U16(257),
+ /* 1562 E> */ B(Wide), B(StaKeyedPropertySloppy), R16(arg0), R16(arg1), U16(258),
B(LdaUndefined),
/* 1567 S> */ B(Return),
]
@@ -1520,267 +1518,267 @@ snippet: "
"
frame size: 0
parameter count: 3
-bytecode array length: 781
+bytecode array length: 785
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 37 S> */ B(LdaSmi), U8(1),
- /* 42 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(1),
+ /* 42 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(2),
/* 49 S> */ B(LdaSmi), U8(1),
- /* 54 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(3),
+ /* 54 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(4),
/* 61 S> */ B(LdaSmi), U8(1),
- /* 66 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(5),
+ /* 66 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(6),
/* 73 S> */ B(LdaSmi), U8(1),
- /* 78 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(7),
+ /* 78 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(8),
/* 85 S> */ B(LdaSmi), U8(1),
- /* 90 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(9),
+ /* 90 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(10),
/* 97 S> */ B(LdaSmi), U8(1),
- /* 102 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(11),
+ /* 102 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(12),
/* 109 S> */ B(LdaSmi), U8(1),
- /* 114 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(13),
+ /* 114 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(14),
/* 121 S> */ B(LdaSmi), U8(1),
- /* 126 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(15),
+ /* 126 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(16),
/* 133 S> */ B(LdaSmi), U8(1),
- /* 138 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(17),
+ /* 138 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(18),
/* 145 S> */ B(LdaSmi), U8(1),
- /* 150 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(19),
+ /* 150 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(20),
/* 157 S> */ B(LdaSmi), U8(1),
- /* 162 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(21),
+ /* 162 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(22),
/* 169 S> */ B(LdaSmi), U8(1),
- /* 174 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(23),
+ /* 174 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(24),
/* 181 S> */ B(LdaSmi), U8(1),
- /* 186 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(25),
+ /* 186 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(26),
/* 193 S> */ B(LdaSmi), U8(1),
- /* 198 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(27),
+ /* 198 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(28),
/* 205 S> */ B(LdaSmi), U8(1),
- /* 210 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(29),
+ /* 210 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(30),
/* 217 S> */ B(LdaSmi), U8(1),
- /* 222 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(31),
+ /* 222 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(32),
/* 229 S> */ B(LdaSmi), U8(1),
- /* 234 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(33),
+ /* 234 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(34),
/* 241 S> */ B(LdaSmi), U8(1),
- /* 246 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(35),
+ /* 246 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(36),
/* 253 S> */ B(LdaSmi), U8(1),
- /* 258 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(37),
+ /* 258 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(38),
/* 265 S> */ B(LdaSmi), U8(1),
- /* 270 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(39),
+ /* 270 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(40),
/* 277 S> */ B(LdaSmi), U8(1),
- /* 282 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(41),
+ /* 282 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(42),
/* 289 S> */ B(LdaSmi), U8(1),
- /* 294 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(43),
+ /* 294 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(44),
/* 301 S> */ B(LdaSmi), U8(1),
- /* 306 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(45),
+ /* 306 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(46),
/* 313 S> */ B(LdaSmi), U8(1),
- /* 318 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(47),
+ /* 318 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(48),
/* 325 S> */ B(LdaSmi), U8(1),
- /* 330 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(49),
+ /* 330 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(50),
/* 337 S> */ B(LdaSmi), U8(1),
- /* 342 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(51),
+ /* 342 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(52),
/* 349 S> */ B(LdaSmi), U8(1),
- /* 354 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(53),
+ /* 354 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(54),
/* 361 S> */ B(LdaSmi), U8(1),
- /* 366 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(55),
+ /* 366 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(56),
/* 373 S> */ B(LdaSmi), U8(1),
- /* 378 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(57),
+ /* 378 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(58),
/* 385 S> */ B(LdaSmi), U8(1),
- /* 390 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(59),
+ /* 390 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(60),
/* 397 S> */ B(LdaSmi), U8(1),
- /* 402 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(61),
+ /* 402 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(62),
/* 409 S> */ B(LdaSmi), U8(1),
- /* 414 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(63),
+ /* 414 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(64),
/* 421 S> */ B(LdaSmi), U8(1),
- /* 426 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(65),
+ /* 426 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(66),
/* 433 S> */ B(LdaSmi), U8(1),
- /* 438 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(67),
+ /* 438 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(68),
/* 445 S> */ B(LdaSmi), U8(1),
- /* 450 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(69),
+ /* 450 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(70),
/* 457 S> */ B(LdaSmi), U8(1),
- /* 462 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(71),
+ /* 462 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(72),
/* 469 S> */ B(LdaSmi), U8(1),
- /* 474 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(73),
+ /* 474 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(74),
/* 481 S> */ B(LdaSmi), U8(1),
- /* 486 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(75),
+ /* 486 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(76),
/* 493 S> */ B(LdaSmi), U8(1),
- /* 498 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(77),
+ /* 498 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(78),
/* 505 S> */ B(LdaSmi), U8(1),
- /* 510 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(79),
+ /* 510 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(80),
/* 517 S> */ B(LdaSmi), U8(1),
- /* 522 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(81),
+ /* 522 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(82),
/* 529 S> */ B(LdaSmi), U8(1),
- /* 534 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(83),
+ /* 534 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(84),
/* 541 S> */ B(LdaSmi), U8(1),
- /* 546 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(85),
+ /* 546 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(86),
/* 553 S> */ B(LdaSmi), U8(1),
- /* 558 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(87),
+ /* 558 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(88),
/* 565 S> */ B(LdaSmi), U8(1),
- /* 570 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(89),
+ /* 570 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(90),
/* 577 S> */ B(LdaSmi), U8(1),
- /* 582 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(91),
+ /* 582 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(92),
/* 589 S> */ B(LdaSmi), U8(1),
- /* 594 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(93),
+ /* 594 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(94),
/* 601 S> */ B(LdaSmi), U8(1),
- /* 606 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(95),
+ /* 606 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(96),
/* 613 S> */ B(LdaSmi), U8(1),
- /* 618 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(97),
+ /* 618 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(98),
/* 625 S> */ B(LdaSmi), U8(1),
- /* 630 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(99),
+ /* 630 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(100),
/* 637 S> */ B(LdaSmi), U8(1),
- /* 642 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(101),
+ /* 642 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(102),
/* 649 S> */ B(LdaSmi), U8(1),
- /* 654 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(103),
+ /* 654 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(104),
/* 661 S> */ B(LdaSmi), U8(1),
- /* 666 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(105),
+ /* 666 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(106),
/* 673 S> */ B(LdaSmi), U8(1),
- /* 678 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(107),
+ /* 678 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(108),
/* 685 S> */ B(LdaSmi), U8(1),
- /* 690 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(109),
+ /* 690 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(110),
/* 697 S> */ B(LdaSmi), U8(1),
- /* 702 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(111),
+ /* 702 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(112),
/* 709 S> */ B(LdaSmi), U8(1),
- /* 714 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(113),
+ /* 714 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(114),
/* 721 S> */ B(LdaSmi), U8(1),
- /* 726 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(115),
+ /* 726 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(116),
/* 733 S> */ B(LdaSmi), U8(1),
- /* 738 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(117),
+ /* 738 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(118),
/* 745 S> */ B(LdaSmi), U8(1),
- /* 750 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(119),
+ /* 750 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(120),
/* 757 S> */ B(LdaSmi), U8(1),
- /* 762 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(121),
+ /* 762 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(122),
/* 769 S> */ B(LdaSmi), U8(1),
- /* 774 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(123),
+ /* 774 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(124),
/* 781 S> */ B(LdaSmi), U8(1),
- /* 786 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(125),
+ /* 786 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(126),
/* 793 S> */ B(LdaSmi), U8(1),
- /* 798 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(127),
+ /* 798 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(128),
/* 805 S> */ B(LdaSmi), U8(1),
- /* 810 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(129),
+ /* 810 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(130),
/* 817 S> */ B(LdaSmi), U8(1),
- /* 822 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(131),
+ /* 822 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(132),
/* 829 S> */ B(LdaSmi), U8(1),
- /* 834 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(133),
+ /* 834 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(134),
/* 841 S> */ B(LdaSmi), U8(1),
- /* 846 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(135),
+ /* 846 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(136),
/* 853 S> */ B(LdaSmi), U8(1),
- /* 858 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(137),
+ /* 858 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(138),
/* 865 S> */ B(LdaSmi), U8(1),
- /* 870 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(139),
+ /* 870 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(140),
/* 877 S> */ B(LdaSmi), U8(1),
- /* 882 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(141),
+ /* 882 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(142),
/* 889 S> */ B(LdaSmi), U8(1),
- /* 894 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(143),
+ /* 894 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(144),
/* 901 S> */ B(LdaSmi), U8(1),
- /* 906 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(145),
+ /* 906 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(146),
/* 913 S> */ B(LdaSmi), U8(1),
- /* 918 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(147),
+ /* 918 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(148),
/* 925 S> */ B(LdaSmi), U8(1),
- /* 930 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(149),
+ /* 930 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(150),
/* 937 S> */ B(LdaSmi), U8(1),
- /* 942 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(151),
+ /* 942 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(152),
/* 949 S> */ B(LdaSmi), U8(1),
- /* 954 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(153),
+ /* 954 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(154),
/* 961 S> */ B(LdaSmi), U8(1),
- /* 966 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(155),
+ /* 966 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(156),
/* 973 S> */ B(LdaSmi), U8(1),
- /* 978 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(157),
+ /* 978 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(158),
/* 985 S> */ B(LdaSmi), U8(1),
- /* 990 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(159),
+ /* 990 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(160),
/* 997 S> */ B(LdaSmi), U8(1),
- /* 1002 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(161),
+ /* 1002 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(162),
/* 1009 S> */ B(LdaSmi), U8(1),
- /* 1014 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(163),
+ /* 1014 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(164),
/* 1021 S> */ B(LdaSmi), U8(1),
- /* 1026 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(165),
+ /* 1026 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(166),
/* 1033 S> */ B(LdaSmi), U8(1),
- /* 1038 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(167),
+ /* 1038 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(168),
/* 1045 S> */ B(LdaSmi), U8(1),
- /* 1050 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(169),
+ /* 1050 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(170),
/* 1057 S> */ B(LdaSmi), U8(1),
- /* 1062 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(171),
+ /* 1062 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(172),
/* 1069 S> */ B(LdaSmi), U8(1),
- /* 1074 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(173),
+ /* 1074 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(174),
/* 1081 S> */ B(LdaSmi), U8(1),
- /* 1086 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(175),
+ /* 1086 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(176),
/* 1093 S> */ B(LdaSmi), U8(1),
- /* 1098 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(177),
+ /* 1098 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(178),
/* 1105 S> */ B(LdaSmi), U8(1),
- /* 1110 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(179),
+ /* 1110 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(180),
/* 1117 S> */ B(LdaSmi), U8(1),
- /* 1122 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(181),
+ /* 1122 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(182),
/* 1129 S> */ B(LdaSmi), U8(1),
- /* 1134 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(183),
+ /* 1134 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(184),
/* 1141 S> */ B(LdaSmi), U8(1),
- /* 1146 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(185),
+ /* 1146 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(186),
/* 1153 S> */ B(LdaSmi), U8(1),
- /* 1158 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(187),
+ /* 1158 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(188),
/* 1165 S> */ B(LdaSmi), U8(1),
- /* 1170 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(189),
+ /* 1170 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(190),
/* 1177 S> */ B(LdaSmi), U8(1),
- /* 1182 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(191),
+ /* 1182 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(192),
/* 1189 S> */ B(LdaSmi), U8(1),
- /* 1194 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(193),
+ /* 1194 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(194),
/* 1201 S> */ B(LdaSmi), U8(1),
- /* 1206 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(195),
+ /* 1206 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(196),
/* 1213 S> */ B(LdaSmi), U8(1),
- /* 1218 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(197),
+ /* 1218 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(198),
/* 1225 S> */ B(LdaSmi), U8(1),
- /* 1230 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(199),
+ /* 1230 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(200),
/* 1237 S> */ B(LdaSmi), U8(1),
- /* 1242 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(201),
+ /* 1242 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(202),
/* 1249 S> */ B(LdaSmi), U8(1),
- /* 1254 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(203),
+ /* 1254 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(204),
/* 1261 S> */ B(LdaSmi), U8(1),
- /* 1266 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(205),
+ /* 1266 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(206),
/* 1273 S> */ B(LdaSmi), U8(1),
- /* 1278 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(207),
+ /* 1278 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(208),
/* 1285 S> */ B(LdaSmi), U8(1),
- /* 1290 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(209),
+ /* 1290 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(210),
/* 1297 S> */ B(LdaSmi), U8(1),
- /* 1302 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(211),
+ /* 1302 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(212),
/* 1309 S> */ B(LdaSmi), U8(1),
- /* 1314 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(213),
+ /* 1314 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(214),
/* 1321 S> */ B(LdaSmi), U8(1),
- /* 1326 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(215),
+ /* 1326 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(216),
/* 1333 S> */ B(LdaSmi), U8(1),
- /* 1338 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(217),
+ /* 1338 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(218),
/* 1345 S> */ B(LdaSmi), U8(1),
- /* 1350 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(219),
+ /* 1350 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(220),
/* 1357 S> */ B(LdaSmi), U8(1),
- /* 1362 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(221),
+ /* 1362 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(222),
/* 1369 S> */ B(LdaSmi), U8(1),
- /* 1374 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(223),
+ /* 1374 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(224),
/* 1381 S> */ B(LdaSmi), U8(1),
- /* 1386 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(225),
+ /* 1386 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(226),
/* 1393 S> */ B(LdaSmi), U8(1),
- /* 1398 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(227),
+ /* 1398 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(228),
/* 1405 S> */ B(LdaSmi), U8(1),
- /* 1410 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(229),
+ /* 1410 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(230),
/* 1417 S> */ B(LdaSmi), U8(1),
- /* 1422 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(231),
+ /* 1422 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(232),
/* 1429 S> */ B(LdaSmi), U8(1),
- /* 1434 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(233),
+ /* 1434 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(234),
/* 1441 S> */ B(LdaSmi), U8(1),
- /* 1446 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(235),
+ /* 1446 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(236),
/* 1453 S> */ B(LdaSmi), U8(1),
- /* 1458 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(237),
+ /* 1458 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(238),
/* 1465 S> */ B(LdaSmi), U8(1),
- /* 1470 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(239),
+ /* 1470 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(240),
/* 1477 S> */ B(LdaSmi), U8(1),
- /* 1482 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(241),
+ /* 1482 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(242),
/* 1489 S> */ B(LdaSmi), U8(1),
- /* 1494 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(243),
+ /* 1494 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(244),
/* 1501 S> */ B(LdaSmi), U8(1),
- /* 1506 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(245),
+ /* 1506 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(246),
/* 1513 S> */ B(LdaSmi), U8(1),
- /* 1518 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(247),
+ /* 1518 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(248),
/* 1525 S> */ B(LdaSmi), U8(1),
- /* 1530 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(249),
+ /* 1530 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(250),
/* 1537 S> */ B(LdaSmi), U8(1),
- /* 1542 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(251),
+ /* 1542 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(252),
/* 1549 S> */ B(LdaSmi), U8(1),
- /* 1554 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(253),
+ /* 1554 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(254),
/* 1561 S> */ B(LdaSmi), U8(1),
- /* 1566 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(255),
+ /* 1566 E> */ B(Wide), B(StaKeyedPropertyStrict), R16(arg0), R16(arg1), U16(256),
/* 1573 S> */ B(LdaSmi), U8(2),
- /* 1578 E> */ B(Wide), B(StaKeyedPropertyStrict), R16(arg0), R16(arg1), U16(257),
+ /* 1578 E> */ B(Wide), B(StaKeyedPropertyStrict), R16(arg0), R16(arg1), U16(258),
B(LdaUndefined),
/* 1583 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
index 3637f78230..03973619fd 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
@@ -3,8 +3,6 @@
#
---
-pool type: string
-execute: yes
wrap: yes
---
@@ -20,7 +18,7 @@ bytecodes: [
/* 49 S> */ B(Return),
]
constant pool: [
- "ab+d",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["ab+d"],
]
handlers: [
]
@@ -38,7 +36,7 @@ bytecodes: [
/* 58 S> */ B(Return),
]
constant pool: [
- "(\u005cw+)\u005cs(\u005cw+)",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["(\u005cw+)\u005cs(\u005cw+)"],
]
handlers: [
]
@@ -54,16 +52,16 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateRegExpLiteral), U8(0), U8(0), U8(0),
B(Star), R(1),
- /* 47 E> */ B(LdrNamedProperty), R(1), U8(1), U8(3), R(0),
+ /* 47 E> */ B(LdrNamedProperty), R(1), U8(1), U8(4), R(0),
B(LdaConstant), U8(2),
B(Star), R(2),
- /* 48 E> */ B(Call), R(0), R(1), U8(2), U8(1),
+ /* 48 E> */ B(Call), R(0), R(1), U8(2), U8(2),
/* 62 S> */ B(Return),
]
constant pool: [
- "ab+d",
- "exec",
- "abdd",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["ab+d"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["exec"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["abdd"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiteralsWide.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiteralsWide.golden
index 3c5499b4cf..3eb79ba725 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiteralsWide.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiteralsWide.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: yes
wrap: yes
---
@@ -789,263 +787,263 @@ bytecodes: [
/* 2616 S> */ B(Return),
]
constant pool: [
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::HEAP_NUMBER_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ HEAP_NUMBER_TYPE [1.23],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["ab+d"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden
index c632a76e69..de6e8935b3 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden
@@ -3,8 +3,6 @@
#
---
-pool type: number
-execute: yes
wrap: yes
---
@@ -18,20 +16,20 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 26
+bytecode array length: 28
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 45 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
/* 48 E> */ B(StackCheck),
/* 64 S> */ B(Ldar), R(0),
- /* 78 E> */ B(Add), R(0), U8(1),
+ /* 78 E> */ B(Add), R(0), U8(2),
B(Star), R(0),
/* 86 S> */ B(LdaSmi), U8(10),
- /* 95 E> */ B(TestGreaterThan), R(0),
+ /* 95 E> */ B(TestGreaterThan), R(0), U8(3),
B(JumpIfFalse), U8(4),
- /* 101 S> */ B(Jump), U8(4),
- B(Jump), U8(-16),
+ /* 101 S> */ B(Jump), U8(5),
+ B(JumpLoop), U8(-17), U8(0),
/* 110 S> */ B(Ldar), R(0),
/* 123 S> */ B(Return),
]
@@ -51,17 +49,17 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 23
+bytecode array length: 24
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 45 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
/* 48 E> */ B(StackCheck),
/* 55 S> */ B(Nop),
- /* 69 E> */ B(Add), R(0), U8(1),
+ /* 69 E> */ B(Add), R(0), U8(2),
B(Star), R(0),
/* 77 S> */ B(LdaSmi), U8(10),
- /* 86 E> */ B(TestGreaterThan), R(0),
+ /* 86 E> */ B(TestGreaterThan), R(0), U8(3),
B(JumpIfFalse), U8(4),
/* 92 S> */ B(Jump), U8(2),
/* 118 S> */ B(Ldar), R(0),
@@ -86,7 +84,7 @@ bytecodes: [
/* 45 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
/* 50 S> */ B(Nop),
- /* 64 E> */ B(Add), R(0), U8(1),
+ /* 64 E> */ B(Add), R(0), U8(2),
B(Star), R(0),
/* 72 S> */ B(Nop),
/* 85 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden
index 5cc49b2035..422fa12cb1 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden
@@ -3,8 +3,6 @@
#
---
-pool type: string
-execute: yes
wrap: no
test function name: f
@@ -20,12 +18,12 @@ bytecode array length: 8
bytecodes: [
/* 21 E> */ B(StackCheck),
/* 26 S> */ B(LdaSmi), U8(2),
- /* 28 E> */ B(StaGlobalSloppy), U8(0), U8(1),
+ /* 28 E> */ B(StaGlobalSloppy), U8(0), U8(2),
B(LdaUndefined),
/* 33 S> */ B(Return),
]
constant pool: [
- "a",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
]
handlers: [
]
@@ -41,12 +39,12 @@ bytecode array length: 8
bytecodes: [
/* 26 E> */ B(StackCheck),
/* 32 S> */ B(Ldar), R(arg0),
- /* 34 E> */ B(StaGlobalSloppy), U8(0), U8(1),
+ /* 34 E> */ B(StaGlobalSloppy), U8(0), U8(2),
B(LdaUndefined),
/* 39 S> */ B(Return),
]
constant pool: [
- "a",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
]
handlers: [
]
@@ -63,12 +61,12 @@ bytecode array length: 8
bytecodes: [
/* 35 E> */ B(StackCheck),
/* 40 S> */ B(LdaSmi), U8(2),
- /* 42 E> */ B(StaGlobalStrict), U8(0), U8(1),
+ /* 42 E> */ B(StaGlobalStrict), U8(0), U8(2),
B(LdaUndefined),
/* 47 S> */ B(Return),
]
constant pool: [
- "a",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
]
handlers: [
]
@@ -85,12 +83,12 @@ bytecode array length: 8
bytecodes: [
/* 17 E> */ B(StackCheck),
/* 22 S> */ B(LdaSmi), U8(2),
- /* 24 E> */ B(StaGlobalSloppy), U8(0), U8(1),
+ /* 24 E> */ B(StaGlobalSloppy), U8(0), U8(2),
B(LdaUndefined),
/* 29 S> */ B(Return),
]
constant pool: [
- "a",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
]
handlers: [
]
@@ -233,273 +231,273 @@ snippet: "
"
frame size: 0
parameter count: 2
-bytecode array length: 651
+bytecode array length: 655
bytecodes: [
/* 17 E> */ B(StackCheck),
/* 25 S> */ B(Nop),
- /* 26 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(1),
+ /* 26 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(2),
/* 35 S> */ B(Nop),
- /* 36 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(3),
+ /* 36 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(4),
/* 45 S> */ B(Nop),
- /* 46 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(5),
+ /* 46 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(6),
/* 55 S> */ B(Nop),
- /* 56 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(7),
+ /* 56 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(8),
/* 65 S> */ B(Nop),
- /* 66 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(9),
+ /* 66 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(10),
/* 75 S> */ B(Nop),
- /* 76 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(11),
+ /* 76 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(12),
/* 85 S> */ B(Nop),
- /* 86 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(13),
+ /* 86 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(14),
/* 95 S> */ B(Nop),
- /* 96 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(15),
+ /* 96 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(16),
/* 105 S> */ B(Nop),
- /* 106 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(17),
+ /* 106 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(18),
/* 115 S> */ B(Nop),
- /* 116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(19),
+ /* 116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(20),
/* 125 S> */ B(Nop),
- /* 126 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(21),
+ /* 126 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(22),
/* 135 S> */ B(Nop),
- /* 136 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(23),
+ /* 136 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(24),
/* 145 S> */ B(Nop),
- /* 146 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(25),
+ /* 146 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(26),
/* 155 S> */ B(Nop),
- /* 156 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(27),
+ /* 156 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(28),
/* 165 S> */ B(Nop),
- /* 166 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(29),
+ /* 166 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(30),
/* 175 S> */ B(Nop),
- /* 176 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(31),
+ /* 176 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(32),
/* 185 S> */ B(Nop),
- /* 186 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(33),
+ /* 186 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(34),
/* 195 S> */ B(Nop),
- /* 196 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(35),
+ /* 196 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(36),
/* 205 S> */ B(Nop),
- /* 206 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(37),
+ /* 206 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(38),
/* 215 S> */ B(Nop),
- /* 216 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(39),
+ /* 216 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(40),
/* 225 S> */ B(Nop),
- /* 226 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(41),
+ /* 226 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(42),
/* 235 S> */ B(Nop),
- /* 236 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(43),
+ /* 236 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(44),
/* 245 S> */ B(Nop),
- /* 246 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(45),
+ /* 246 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(46),
/* 255 S> */ B(Nop),
- /* 256 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(47),
+ /* 256 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(48),
/* 265 S> */ B(Nop),
- /* 266 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(49),
+ /* 266 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(50),
/* 275 S> */ B(Nop),
- /* 276 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(51),
+ /* 276 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(52),
/* 285 S> */ B(Nop),
- /* 286 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(53),
+ /* 286 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(54),
/* 295 S> */ B(Nop),
- /* 296 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(55),
+ /* 296 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(56),
/* 305 S> */ B(Nop),
- /* 306 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(57),
+ /* 306 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(58),
/* 315 S> */ B(Nop),
- /* 316 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(59),
+ /* 316 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(60),
/* 325 S> */ B(Nop),
- /* 326 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(61),
+ /* 326 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(62),
/* 335 S> */ B(Nop),
- /* 336 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(63),
+ /* 336 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(64),
/* 345 S> */ B(Nop),
- /* 346 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(65),
+ /* 346 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(66),
/* 355 S> */ B(Nop),
- /* 356 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(67),
+ /* 356 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(68),
/* 365 S> */ B(Nop),
- /* 366 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(69),
+ /* 366 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(70),
/* 375 S> */ B(Nop),
- /* 376 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(71),
+ /* 376 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(72),
/* 385 S> */ B(Nop),
- /* 386 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(73),
+ /* 386 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(74),
/* 395 S> */ B(Nop),
- /* 396 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(75),
+ /* 396 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(76),
/* 405 S> */ B(Nop),
- /* 406 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(77),
+ /* 406 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(78),
/* 415 S> */ B(Nop),
- /* 416 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(79),
+ /* 416 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(80),
/* 425 S> */ B(Nop),
- /* 426 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(81),
+ /* 426 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(82),
/* 435 S> */ B(Nop),
- /* 436 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(83),
+ /* 436 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(84),
/* 445 S> */ B(Nop),
- /* 446 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(85),
+ /* 446 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(86),
/* 455 S> */ B(Nop),
- /* 456 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(87),
+ /* 456 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(88),
/* 465 S> */ B(Nop),
- /* 466 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(89),
+ /* 466 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(90),
/* 475 S> */ B(Nop),
- /* 476 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(91),
+ /* 476 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(92),
/* 485 S> */ B(Nop),
- /* 486 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(93),
+ /* 486 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(94),
/* 495 S> */ B(Nop),
- /* 496 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(95),
+ /* 496 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(96),
/* 505 S> */ B(Nop),
- /* 506 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(97),
+ /* 506 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(98),
/* 515 S> */ B(Nop),
- /* 516 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(99),
+ /* 516 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(100),
/* 525 S> */ B(Nop),
- /* 526 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(101),
+ /* 526 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(102),
/* 535 S> */ B(Nop),
- /* 536 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(103),
+ /* 536 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(104),
/* 545 S> */ B(Nop),
- /* 546 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(105),
+ /* 546 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(106),
/* 555 S> */ B(Nop),
- /* 556 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(107),
+ /* 556 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(108),
/* 565 S> */ B(Nop),
- /* 566 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(109),
+ /* 566 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(110),
/* 575 S> */ B(Nop),
- /* 576 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(111),
+ /* 576 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(112),
/* 585 S> */ B(Nop),
- /* 586 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(113),
+ /* 586 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(114),
/* 595 S> */ B(Nop),
- /* 596 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(115),
+ /* 596 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(116),
/* 605 S> */ B(Nop),
- /* 606 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(117),
+ /* 606 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(118),
/* 615 S> */ B(Nop),
- /* 616 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(119),
+ /* 616 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(120),
/* 625 S> */ B(Nop),
- /* 626 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(121),
+ /* 626 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(122),
/* 635 S> */ B(Nop),
- /* 636 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(123),
+ /* 636 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(124),
/* 645 S> */ B(Nop),
- /* 646 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(125),
+ /* 646 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(126),
/* 655 S> */ B(Nop),
- /* 656 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(127),
+ /* 656 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(128),
/* 665 S> */ B(Nop),
- /* 666 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(129),
+ /* 666 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(130),
/* 675 S> */ B(Nop),
- /* 676 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(131),
+ /* 676 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(132),
/* 685 S> */ B(Nop),
- /* 686 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(133),
+ /* 686 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(134),
/* 695 S> */ B(Nop),
- /* 696 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(135),
+ /* 696 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(136),
/* 705 S> */ B(Nop),
- /* 706 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(137),
+ /* 706 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(138),
/* 715 S> */ B(Nop),
- /* 716 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(139),
+ /* 716 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(140),
/* 725 S> */ B(Nop),
- /* 726 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(141),
+ /* 726 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(142),
/* 735 S> */ B(Nop),
- /* 736 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(143),
+ /* 736 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(144),
/* 745 S> */ B(Nop),
- /* 746 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(145),
+ /* 746 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(146),
/* 755 S> */ B(Nop),
- /* 756 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(147),
+ /* 756 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(148),
/* 765 S> */ B(Nop),
- /* 766 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(149),
+ /* 766 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(150),
/* 775 S> */ B(Nop),
- /* 776 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(151),
+ /* 776 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(152),
/* 785 S> */ B(Nop),
- /* 786 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(153),
+ /* 786 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(154),
/* 795 S> */ B(Nop),
- /* 796 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(155),
+ /* 796 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(156),
/* 805 S> */ B(Nop),
- /* 806 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(157),
+ /* 806 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(158),
/* 815 S> */ B(Nop),
- /* 816 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(159),
+ /* 816 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(160),
/* 825 S> */ B(Nop),
- /* 826 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(161),
+ /* 826 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(162),
/* 835 S> */ B(Nop),
- /* 836 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(163),
+ /* 836 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(164),
/* 845 S> */ B(Nop),
- /* 846 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(165),
+ /* 846 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(166),
/* 855 S> */ B(Nop),
- /* 856 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(167),
+ /* 856 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(168),
/* 865 S> */ B(Nop),
- /* 866 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(169),
+ /* 866 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(170),
/* 875 S> */ B(Nop),
- /* 876 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(171),
+ /* 876 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(172),
/* 885 S> */ B(Nop),
- /* 886 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(173),
+ /* 886 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(174),
/* 895 S> */ B(Nop),
- /* 896 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(175),
+ /* 896 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(176),
/* 905 S> */ B(Nop),
- /* 906 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(177),
+ /* 906 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(178),
/* 915 S> */ B(Nop),
- /* 916 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(179),
+ /* 916 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(180),
/* 925 S> */ B(Nop),
- /* 926 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(181),
+ /* 926 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(182),
/* 935 S> */ B(Nop),
- /* 936 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(183),
+ /* 936 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(184),
/* 945 S> */ B(Nop),
- /* 946 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(185),
+ /* 946 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(186),
/* 955 S> */ B(Nop),
- /* 956 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(187),
+ /* 956 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(188),
/* 965 S> */ B(Nop),
- /* 966 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(189),
+ /* 966 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(190),
/* 975 S> */ B(Nop),
- /* 976 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(191),
+ /* 976 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(192),
/* 985 S> */ B(Nop),
- /* 986 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(193),
+ /* 986 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(194),
/* 995 S> */ B(Nop),
- /* 996 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(195),
+ /* 996 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(196),
/* 1005 S> */ B(Nop),
- /* 1006 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(197),
+ /* 1006 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(198),
/* 1015 S> */ B(Nop),
- /* 1016 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(199),
+ /* 1016 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(200),
/* 1025 S> */ B(Nop),
- /* 1026 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(201),
+ /* 1026 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(202),
/* 1035 S> */ B(Nop),
- /* 1036 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(203),
+ /* 1036 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(204),
/* 1045 S> */ B(Nop),
- /* 1046 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(205),
+ /* 1046 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(206),
/* 1055 S> */ B(Nop),
- /* 1056 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(207),
+ /* 1056 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(208),
/* 1065 S> */ B(Nop),
- /* 1066 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(209),
+ /* 1066 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(210),
/* 1075 S> */ B(Nop),
- /* 1076 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(211),
+ /* 1076 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(212),
/* 1085 S> */ B(Nop),
- /* 1086 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(213),
+ /* 1086 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(214),
/* 1095 S> */ B(Nop),
- /* 1096 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(215),
+ /* 1096 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(216),
/* 1105 S> */ B(Nop),
- /* 1106 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(217),
+ /* 1106 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(218),
/* 1115 S> */ B(Nop),
- /* 1116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(219),
+ /* 1116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(220),
/* 1125 S> */ B(Nop),
- /* 1126 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(221),
+ /* 1126 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(222),
/* 1135 S> */ B(Nop),
- /* 1136 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(223),
+ /* 1136 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(224),
/* 1145 S> */ B(Nop),
- /* 1146 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(225),
+ /* 1146 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(226),
/* 1155 S> */ B(Nop),
- /* 1156 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(227),
+ /* 1156 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(228),
/* 1165 S> */ B(Nop),
- /* 1166 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(229),
+ /* 1166 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(230),
/* 1175 S> */ B(Nop),
- /* 1176 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(231),
+ /* 1176 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(232),
/* 1185 S> */ B(Nop),
- /* 1186 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(233),
+ /* 1186 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(234),
/* 1195 S> */ B(Nop),
- /* 1196 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(235),
+ /* 1196 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(236),
/* 1205 S> */ B(Nop),
- /* 1206 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(237),
+ /* 1206 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(238),
/* 1215 S> */ B(Nop),
- /* 1216 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(239),
+ /* 1216 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(240),
/* 1225 S> */ B(Nop),
- /* 1226 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(241),
+ /* 1226 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(242),
/* 1235 S> */ B(Nop),
- /* 1236 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(243),
+ /* 1236 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(244),
/* 1245 S> */ B(Nop),
- /* 1246 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(245),
+ /* 1246 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(246),
/* 1255 S> */ B(Nop),
- /* 1256 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(247),
+ /* 1256 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(248),
/* 1265 S> */ B(Nop),
- /* 1266 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(249),
+ /* 1266 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(250),
/* 1275 S> */ B(Nop),
- /* 1276 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(251),
+ /* 1276 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(252),
/* 1285 S> */ B(Nop),
- /* 1286 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(253),
+ /* 1286 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(254),
/* 1295 S> */ B(Nop),
- /* 1296 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(255),
+ /* 1296 E> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(256),
/* 1305 S> */ B(LdaSmi), U8(2),
- /* 1307 E> */ B(Wide), B(StaGlobalSloppy), U16(1), U16(257),
+ /* 1307 E> */ B(Wide), B(StaGlobalSloppy), U16(1), U16(258),
B(LdaUndefined),
/* 1312 S> */ B(Return),
]
constant pool: [
- "name",
- "a",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["name"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
]
handlers: [
]
@@ -643,273 +641,273 @@ snippet: "
"
frame size: 0
parameter count: 2
-bytecode array length: 651
+bytecode array length: 655
bytecodes: [
/* 17 E> */ B(StackCheck),
/* 41 S> */ B(Nop),
- /* 42 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(1),
+ /* 42 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(2),
/* 51 S> */ B(Nop),
- /* 52 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(3),
+ /* 52 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(4),
/* 61 S> */ B(Nop),
- /* 62 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(5),
+ /* 62 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(6),
/* 71 S> */ B(Nop),
- /* 72 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(7),
+ /* 72 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(8),
/* 81 S> */ B(Nop),
- /* 82 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(9),
+ /* 82 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(10),
/* 91 S> */ B(Nop),
- /* 92 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(11),
+ /* 92 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(12),
/* 101 S> */ B(Nop),
- /* 102 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(13),
+ /* 102 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(14),
/* 111 S> */ B(Nop),
- /* 112 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(15),
+ /* 112 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(16),
/* 121 S> */ B(Nop),
- /* 122 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(17),
+ /* 122 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(18),
/* 131 S> */ B(Nop),
- /* 132 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(19),
+ /* 132 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(20),
/* 141 S> */ B(Nop),
- /* 142 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(21),
+ /* 142 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(22),
/* 151 S> */ B(Nop),
- /* 152 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(23),
+ /* 152 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(24),
/* 161 S> */ B(Nop),
- /* 162 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(25),
+ /* 162 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(26),
/* 171 S> */ B(Nop),
- /* 172 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(27),
+ /* 172 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(28),
/* 181 S> */ B(Nop),
- /* 182 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(29),
+ /* 182 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(30),
/* 191 S> */ B(Nop),
- /* 192 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(31),
+ /* 192 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(32),
/* 201 S> */ B(Nop),
- /* 202 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(33),
+ /* 202 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(34),
/* 211 S> */ B(Nop),
- /* 212 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(35),
+ /* 212 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(36),
/* 221 S> */ B(Nop),
- /* 222 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(37),
+ /* 222 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(38),
/* 231 S> */ B(Nop),
- /* 232 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(39),
+ /* 232 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(40),
/* 241 S> */ B(Nop),
- /* 242 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(41),
+ /* 242 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(42),
/* 251 S> */ B(Nop),
- /* 252 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(43),
+ /* 252 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(44),
/* 261 S> */ B(Nop),
- /* 262 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(45),
+ /* 262 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(46),
/* 271 S> */ B(Nop),
- /* 272 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(47),
+ /* 272 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(48),
/* 281 S> */ B(Nop),
- /* 282 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(49),
+ /* 282 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(50),
/* 291 S> */ B(Nop),
- /* 292 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(51),
+ /* 292 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(52),
/* 301 S> */ B(Nop),
- /* 302 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(53),
+ /* 302 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(54),
/* 311 S> */ B(Nop),
- /* 312 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(55),
+ /* 312 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(56),
/* 321 S> */ B(Nop),
- /* 322 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(57),
+ /* 322 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(58),
/* 331 S> */ B(Nop),
- /* 332 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(59),
+ /* 332 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(60),
/* 341 S> */ B(Nop),
- /* 342 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(61),
+ /* 342 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(62),
/* 351 S> */ B(Nop),
- /* 352 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(63),
+ /* 352 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(64),
/* 361 S> */ B(Nop),
- /* 362 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(65),
+ /* 362 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(66),
/* 371 S> */ B(Nop),
- /* 372 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(67),
+ /* 372 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(68),
/* 381 S> */ B(Nop),
- /* 382 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(69),
+ /* 382 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(70),
/* 391 S> */ B(Nop),
- /* 392 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(71),
+ /* 392 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(72),
/* 401 S> */ B(Nop),
- /* 402 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(73),
+ /* 402 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(74),
/* 411 S> */ B(Nop),
- /* 412 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(75),
+ /* 412 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(76),
/* 421 S> */ B(Nop),
- /* 422 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(77),
+ /* 422 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(78),
/* 431 S> */ B(Nop),
- /* 432 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(79),
+ /* 432 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(80),
/* 441 S> */ B(Nop),
- /* 442 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(81),
+ /* 442 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(82),
/* 451 S> */ B(Nop),
- /* 452 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(83),
+ /* 452 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(84),
/* 461 S> */ B(Nop),
- /* 462 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(85),
+ /* 462 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(86),
/* 471 S> */ B(Nop),
- /* 472 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(87),
+ /* 472 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(88),
/* 481 S> */ B(Nop),
- /* 482 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(89),
+ /* 482 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(90),
/* 491 S> */ B(Nop),
- /* 492 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(91),
+ /* 492 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(92),
/* 501 S> */ B(Nop),
- /* 502 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(93),
+ /* 502 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(94),
/* 511 S> */ B(Nop),
- /* 512 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(95),
+ /* 512 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(96),
/* 521 S> */ B(Nop),
- /* 522 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(97),
+ /* 522 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(98),
/* 531 S> */ B(Nop),
- /* 532 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(99),
+ /* 532 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(100),
/* 541 S> */ B(Nop),
- /* 542 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(101),
+ /* 542 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(102),
/* 551 S> */ B(Nop),
- /* 552 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(103),
+ /* 552 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(104),
/* 561 S> */ B(Nop),
- /* 562 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(105),
+ /* 562 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(106),
/* 571 S> */ B(Nop),
- /* 572 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(107),
+ /* 572 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(108),
/* 581 S> */ B(Nop),
- /* 582 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(109),
+ /* 582 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(110),
/* 591 S> */ B(Nop),
- /* 592 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(111),
+ /* 592 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(112),
/* 601 S> */ B(Nop),
- /* 602 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(113),
+ /* 602 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(114),
/* 611 S> */ B(Nop),
- /* 612 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(115),
+ /* 612 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(116),
/* 621 S> */ B(Nop),
- /* 622 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(117),
+ /* 622 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(118),
/* 631 S> */ B(Nop),
- /* 632 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(119),
+ /* 632 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(120),
/* 641 S> */ B(Nop),
- /* 642 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(121),
+ /* 642 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(122),
/* 651 S> */ B(Nop),
- /* 652 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(123),
+ /* 652 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(124),
/* 661 S> */ B(Nop),
- /* 662 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(125),
+ /* 662 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(126),
/* 671 S> */ B(Nop),
- /* 672 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(127),
+ /* 672 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(128),
/* 681 S> */ B(Nop),
- /* 682 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(129),
+ /* 682 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(130),
/* 691 S> */ B(Nop),
- /* 692 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(131),
+ /* 692 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(132),
/* 701 S> */ B(Nop),
- /* 702 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(133),
+ /* 702 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(134),
/* 711 S> */ B(Nop),
- /* 712 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(135),
+ /* 712 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(136),
/* 721 S> */ B(Nop),
- /* 722 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(137),
+ /* 722 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(138),
/* 731 S> */ B(Nop),
- /* 732 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(139),
+ /* 732 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(140),
/* 741 S> */ B(Nop),
- /* 742 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(141),
+ /* 742 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(142),
/* 751 S> */ B(Nop),
- /* 752 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(143),
+ /* 752 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(144),
/* 761 S> */ B(Nop),
- /* 762 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(145),
+ /* 762 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(146),
/* 771 S> */ B(Nop),
- /* 772 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(147),
+ /* 772 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(148),
/* 781 S> */ B(Nop),
- /* 782 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(149),
+ /* 782 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(150),
/* 791 S> */ B(Nop),
- /* 792 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(151),
+ /* 792 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(152),
/* 801 S> */ B(Nop),
- /* 802 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(153),
+ /* 802 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(154),
/* 811 S> */ B(Nop),
- /* 812 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(155),
+ /* 812 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(156),
/* 821 S> */ B(Nop),
- /* 822 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(157),
+ /* 822 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(158),
/* 831 S> */ B(Nop),
- /* 832 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(159),
+ /* 832 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(160),
/* 841 S> */ B(Nop),
- /* 842 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(161),
+ /* 842 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(162),
/* 851 S> */ B(Nop),
- /* 852 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(163),
+ /* 852 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(164),
/* 861 S> */ B(Nop),
- /* 862 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(165),
+ /* 862 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(166),
/* 871 S> */ B(Nop),
- /* 872 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(167),
+ /* 872 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(168),
/* 881 S> */ B(Nop),
- /* 882 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(169),
+ /* 882 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(170),
/* 891 S> */ B(Nop),
- /* 892 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(171),
+ /* 892 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(172),
/* 901 S> */ B(Nop),
- /* 902 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(173),
+ /* 902 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(174),
/* 911 S> */ B(Nop),
- /* 912 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(175),
+ /* 912 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(176),
/* 921 S> */ B(Nop),
- /* 922 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(177),
+ /* 922 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(178),
/* 931 S> */ B(Nop),
- /* 932 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(179),
+ /* 932 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(180),
/* 941 S> */ B(Nop),
- /* 942 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(181),
+ /* 942 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(182),
/* 951 S> */ B(Nop),
- /* 952 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(183),
+ /* 952 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(184),
/* 961 S> */ B(Nop),
- /* 962 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(185),
+ /* 962 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(186),
/* 971 S> */ B(Nop),
- /* 972 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(187),
+ /* 972 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(188),
/* 981 S> */ B(Nop),
- /* 982 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(189),
+ /* 982 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(190),
/* 991 S> */ B(Nop),
- /* 992 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(191),
+ /* 992 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(192),
/* 1001 S> */ B(Nop),
- /* 1002 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(193),
+ /* 1002 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(194),
/* 1011 S> */ B(Nop),
- /* 1012 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(195),
+ /* 1012 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(196),
/* 1021 S> */ B(Nop),
- /* 1022 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(197),
+ /* 1022 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(198),
/* 1031 S> */ B(Nop),
- /* 1032 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(199),
+ /* 1032 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(200),
/* 1041 S> */ B(Nop),
- /* 1042 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(201),
+ /* 1042 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(202),
/* 1051 S> */ B(Nop),
- /* 1052 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(203),
+ /* 1052 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(204),
/* 1061 S> */ B(Nop),
- /* 1062 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(205),
+ /* 1062 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(206),
/* 1071 S> */ B(Nop),
- /* 1072 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(207),
+ /* 1072 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(208),
/* 1081 S> */ B(Nop),
- /* 1082 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(209),
+ /* 1082 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(210),
/* 1091 S> */ B(Nop),
- /* 1092 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(211),
+ /* 1092 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(212),
/* 1101 S> */ B(Nop),
- /* 1102 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(213),
+ /* 1102 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(214),
/* 1111 S> */ B(Nop),
- /* 1112 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(215),
+ /* 1112 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(216),
/* 1121 S> */ B(Nop),
- /* 1122 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(217),
+ /* 1122 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(218),
/* 1131 S> */ B(Nop),
- /* 1132 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(219),
+ /* 1132 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(220),
/* 1141 S> */ B(Nop),
- /* 1142 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(221),
+ /* 1142 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(222),
/* 1151 S> */ B(Nop),
- /* 1152 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(223),
+ /* 1152 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(224),
/* 1161 S> */ B(Nop),
- /* 1162 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(225),
+ /* 1162 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(226),
/* 1171 S> */ B(Nop),
- /* 1172 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(227),
+ /* 1172 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(228),
/* 1181 S> */ B(Nop),
- /* 1182 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(229),
+ /* 1182 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(230),
/* 1191 S> */ B(Nop),
- /* 1192 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(231),
+ /* 1192 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(232),
/* 1201 S> */ B(Nop),
- /* 1202 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(233),
+ /* 1202 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(234),
/* 1211 S> */ B(Nop),
- /* 1212 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(235),
+ /* 1212 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(236),
/* 1221 S> */ B(Nop),
- /* 1222 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(237),
+ /* 1222 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(238),
/* 1231 S> */ B(Nop),
- /* 1232 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(239),
+ /* 1232 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(240),
/* 1241 S> */ B(Nop),
- /* 1242 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(241),
+ /* 1242 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(242),
/* 1251 S> */ B(Nop),
- /* 1252 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(243),
+ /* 1252 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(244),
/* 1261 S> */ B(Nop),
- /* 1262 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(245),
+ /* 1262 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(246),
/* 1271 S> */ B(Nop),
- /* 1272 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(247),
+ /* 1272 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(248),
/* 1281 S> */ B(Nop),
- /* 1282 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(249),
+ /* 1282 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(250),
/* 1291 S> */ B(Nop),
- /* 1292 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(251),
+ /* 1292 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(252),
/* 1301 S> */ B(Nop),
- /* 1302 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(253),
+ /* 1302 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(254),
/* 1311 S> */ B(Nop),
- /* 1312 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(255),
+ /* 1312 E> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(256),
/* 1321 S> */ B(LdaSmi), U8(2),
- /* 1323 E> */ B(Wide), B(StaGlobalStrict), U16(1), U16(257),
+ /* 1323 E> */ B(Wide), B(StaGlobalStrict), U16(1), U16(258),
B(LdaUndefined),
/* 1328 S> */ B(Return),
]
constant pool: [
- "name",
- "a",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["name"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConstants.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConstants.golden
index c28ac2a8c9..260153b8e6 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConstants.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConstants.golden
@@ -3,8 +3,6 @@
#
---
-pool type: string
-execute: yes
wrap: yes
---
@@ -20,7 +18,7 @@ bytecodes: [
/* 61 S> */ B(Return),
]
constant pool: [
- "This is a string",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["This is a string"],
]
handlers: [
]
@@ -40,8 +38,8 @@ bytecodes: [
/* 82 S> */ B(Return),
]
constant pool: [
- "First string",
- "Second string",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["First string"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["Second string"],
]
handlers: [
]
@@ -61,7 +59,7 @@ bytecodes: [
/* 79 S> */ B(Return),
]
constant pool: [
- "Same string",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["Same string"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden
index bc16a7b964..9334dbebfd 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden
@@ -3,8 +3,6 @@
#
---
-pool type: number
-execute: yes
wrap: yes
---
@@ -17,18 +15,18 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 32
+bytecode array length: 34
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
- B(Star), R(1),
B(Star), R(0),
+ B(Star), R(1),
/* 45 S> */ B(LdaSmi), U8(1),
- B(TestEqualStrict), R(0),
+ B(TestEqualStrict), R(1), U8(2),
B(Mov), R(0), R(2),
- B(JumpIfToBooleanTrue), U8(10),
+ B(JumpIfToBooleanTrue), U8(11),
B(LdaSmi), U8(2),
- B(TestEqualStrict), R(2),
+ B(TestEqualStrict), R(2), U8(3),
B(JumpIfTrue), U8(7),
B(Jump), U8(8),
/* 66 S> */ B(LdaSmi), U8(2),
@@ -53,25 +51,25 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 38
+bytecode array length: 40
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
- B(Star), R(1),
B(Star), R(0),
+ B(Star), R(1),
/* 45 S> */ B(LdaSmi), U8(1),
- B(TestEqualStrict), R(0),
+ B(TestEqualStrict), R(1), U8(2),
B(Mov), R(0), R(2),
- B(JumpIfToBooleanTrue), U8(10),
+ B(JumpIfToBooleanTrue), U8(11),
B(LdaSmi), U8(2),
- B(TestEqualStrict), R(2),
+ B(TestEqualStrict), R(2), U8(3),
B(JumpIfTrue), U8(10),
B(Jump), U8(14),
/* 66 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 73 S> */ B(Jump), U8(8),
/* 89 S> */ B(LdaSmi), U8(3),
- B(Star), R(1),
+ B(Star), R(0),
/* 96 S> */ B(Jump), U8(2),
B(LdaUndefined),
/* 105 S> */ B(Return),
@@ -91,24 +89,24 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 36
+bytecode array length: 38
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
- B(Star), R(1),
B(Star), R(0),
+ B(Star), R(1),
/* 45 S> */ B(LdaSmi), U8(1),
- B(TestEqualStrict), R(0),
+ B(TestEqualStrict), R(1), U8(2),
B(Mov), R(0), R(2),
- B(JumpIfToBooleanTrue), U8(10),
+ B(JumpIfToBooleanTrue), U8(11),
B(LdaSmi), U8(2),
- B(TestEqualStrict), R(2),
+ B(TestEqualStrict), R(2), U8(3),
B(JumpIfTrue), U8(8),
B(Jump), U8(12),
/* 66 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 98 S> */ B(LdaSmi), U8(3),
- B(Star), R(1),
+ B(Star), R(0),
/* 105 S> */ B(Jump), U8(2),
B(LdaUndefined),
/* 114 S> */ B(Return),
@@ -129,24 +127,24 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 36
+bytecode array length: 38
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
- B(Star), R(1),
B(Star), R(0),
+ B(Star), R(1),
/* 45 S> */ B(LdaSmi), U8(2),
- B(TestEqualStrict), R(0),
+ B(TestEqualStrict), R(1), U8(2),
B(Mov), R(0), R(2),
- B(JumpIfToBooleanTrue), U8(10),
+ B(JumpIfToBooleanTrue), U8(11),
B(LdaSmi), U8(3),
- B(TestEqualStrict), R(2),
+ B(TestEqualStrict), R(2), U8(3),
B(JumpIfTrue), U8(6),
B(Jump), U8(6),
/* 66 S> */ B(Jump), U8(10),
/* 82 S> */ B(Jump), U8(8),
/* 99 S> */ B(LdaSmi), U8(1),
- B(Star), R(1),
+ B(Star), R(0),
/* 106 S> */ B(Jump), U8(2),
B(LdaUndefined),
/* 115 S> */ B(Return),
@@ -167,29 +165,29 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 45
+bytecode array length: 47
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
- B(Star), R(1),
- /* 42 E> */ B(TypeOf),
B(Star), R(0),
+ /* 42 E> */ B(TypeOf),
+ B(Star), R(1),
/* 45 S> */ B(LdaSmi), U8(2),
- B(TestEqualStrict), R(0),
- B(Mov), R(0), R(2),
- B(JumpIfToBooleanTrue), U8(10),
+ B(TestEqualStrict), R(1), U8(2),
+ B(Mov), R(1), R(2),
+ B(JumpIfToBooleanTrue), U8(11),
B(LdaSmi), U8(3),
- B(TestEqualStrict), R(2),
+ B(TestEqualStrict), R(2), U8(3),
B(JumpIfTrue), U8(10),
B(Jump), U8(14),
/* 74 S> */ B(LdaSmi), U8(1),
- B(Star), R(1),
+ B(Star), R(0),
/* 81 S> */ B(Jump), U8(14),
/* 97 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 104 S> */ B(Jump), U8(8),
/* 121 S> */ B(LdaSmi), U8(3),
- B(Star), R(1),
+ B(Star), R(0),
/* 128 S> */ B(Jump), U8(2),
B(LdaUndefined),
/* 137 S> */ B(Return),
@@ -209,22 +207,22 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 31
+bytecode array length: 32
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
- B(Star), R(1),
B(Star), R(0),
+ B(Star), R(1),
/* 45 S> */ B(TypeOf),
- B(TestEqualStrict), R(0),
+ B(TestEqualStrict), R(1), U8(2),
B(Mov), R(0), R(2),
B(JumpIfToBooleanTrue), U8(4),
B(Jump), U8(8),
/* 74 S> */ B(LdaSmi), U8(1),
- B(Star), R(1),
+ B(Star), R(0),
/* 81 S> */ B(Jump), U8(8),
/* 98 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 105 S> */ B(Jump), U8(2),
B(LdaUndefined),
/* 114 S> */ B(Return),
@@ -311,158 +309,158 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 290
+bytecode array length: 292
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
- B(Star), R(1),
B(Star), R(0),
+ B(Star), R(1),
/* 45 S> */ B(LdaSmi), U8(1),
- B(TestEqualStrict), R(0),
+ B(TestEqualStrict), R(1), U8(2),
B(Mov), R(0), R(2),
- B(JumpIfToBooleanTrue), U8(10),
+ B(JumpIfToBooleanTrue), U8(11),
B(LdaSmi), U8(2),
- B(TestEqualStrict), R(2),
+ B(TestEqualStrict), R(2), U8(3),
B(JumpIfTrueConstant), U8(0),
B(JumpConstant), U8(1),
/* 68 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 77 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 86 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 95 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 104 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 113 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 122 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 131 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 140 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 149 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 158 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 167 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 176 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 185 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 194 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 203 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 212 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 221 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 230 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 239 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 248 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 257 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 266 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 275 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 284 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 293 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 302 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 311 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 320 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 329 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 338 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 347 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 356 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 365 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 374 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 383 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 392 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 401 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 410 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 419 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 428 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 437 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 446 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 455 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 464 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 473 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 482 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 491 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 500 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 509 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 518 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 527 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 536 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 545 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 554 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 563 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 572 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 581 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 590 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 599 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 608 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 617 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 626 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 635 S> */ B(LdaSmi), U8(2),
- B(Star), R(1),
+ B(Star), R(0),
/* 644 S> */ B(Jump), U8(8),
/* 662 S> */ B(LdaSmi), U8(3),
- B(Star), R(1),
+ B(Star), R(0),
/* 671 S> */ B(Jump), U8(2),
B(LdaUndefined),
/* 680 S> */ B(Return),
]
constant pool: [
- 262,
- 266,
+ Smi [262],
+ Smi [266],
]
handlers: [
]
@@ -481,35 +479,35 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 59
+bytecode array length: 62
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
- B(Star), R(2),
B(Star), R(0),
+ B(Star), R(2),
/* 45 S> */ B(LdaSmi), U8(1),
- B(TestEqualStrict), R(0),
+ B(TestEqualStrict), R(2), U8(5),
B(Mov), R(0), R(3),
- B(JumpIfToBooleanTrue), U8(10),
+ B(JumpIfToBooleanTrue), U8(11),
B(LdaSmi), U8(2),
- B(TestEqualStrict), R(3),
- B(JumpIfTrue), U8(33),
- B(Jump), U8(35),
- /* 77 E> */ B(AddSmi), U8(1), R(2), U8(1),
+ B(TestEqualStrict), R(3), U8(6),
+ B(JumpIfTrue), U8(34),
+ B(Jump), U8(36),
+ /* 77 E> */ B(AddSmi), U8(1), R(0), U8(2),
B(Star), R(1),
/* 70 S> */ B(LdaSmi), U8(2),
- B(TestEqualStrict), R(1),
+ B(TestEqualStrict), R(1), U8(3),
B(Mov), R(1), R(4),
B(JumpIfToBooleanTrue), U8(4),
B(Jump), U8(8),
/* 101 S> */ B(LdaSmi), U8(1),
- B(Star), R(2),
+ B(Star), R(0),
/* 108 S> */ B(Jump), U8(8),
/* 131 S> */ B(LdaSmi), U8(2),
- B(Star), R(2),
+ B(Star), R(0),
/* 138 S> */ B(Jump), U8(2),
/* 176 S> */ B(LdaSmi), U8(3),
- B(Star), R(2),
+ B(Star), R(0),
B(LdaUndefined),
/* 185 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ThisFunction.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ThisFunction.golden
index 582c087341..5d1c7c9143 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ThisFunction.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ThisFunction.golden
@@ -3,8 +3,6 @@
#
---
-pool type: number
-execute: yes
wrap: no
test function name: f
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Throw.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Throw.golden
index 4e7a0bc225..15afe2f4eb 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Throw.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Throw.golden
@@ -3,8 +3,6 @@
#
---
-pool type: string
-execute: yes
wrap: yes
---
@@ -37,7 +35,7 @@ bytecodes: [
/* 34 E> */ B(Throw),
]
constant pool: [
- "Error",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["Error"],
]
handlers: [
]
@@ -60,7 +58,7 @@ bytecodes: [
/* 72 S> */ B(Return),
]
constant pool: [
- "Error",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["Error"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
index 03f8b5496d..59052b85d8 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
@@ -3,8 +3,6 @@
#
---
-pool type: mixed
-execute: no
wrap: no
top level: yes
@@ -29,18 +27,18 @@ bytecodes: [
B(CreateObjectLiteral), U8(2), U8(0), U8(1), R(4),
B(Star), R(2),
B(CreateClosure), U8(3), U8(0),
- B(StaNamedPropertySloppy), R(4), U8(4), U8(3),
+ B(StaNamedPropertySloppy), R(4), U8(4), U8(4),
B(Mov), R(4), R(3),
B(CallRuntime), U16(Runtime::kInitializeVarGlobal), R(1), U8(3),
B(LdaUndefined),
/* 33 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["func"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden
index 17d4ef0fd4..c2f6113e71 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden
@@ -3,8 +3,6 @@
#
---
-pool type: string
-execute: yes
wrap: yes
---
@@ -13,16 +11,16 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 34
+bytecode array length: 35
bytecodes: [
/* 30 E> */ B(StackCheck),
B(Mov), R(context), R(1),
/* 40 S> */ B(LdaSmi), U8(1),
/* 75 S> */ B(Return),
- B(Jump), U8(25),
+ B(Jump), U8(26),
B(Star), R(2),
B(Ldar), R(closure),
- B(CreateCatchContext), R(2), U8(0),
+ B(CreateCatchContext), R(2), U8(0), U8(1),
B(Star), R(1),
B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
B(Ldar), R(1),
@@ -34,7 +32,8 @@ bytecodes: [
/* 75 S> */ B(Return),
]
constant pool: [
- "e",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["e"],
+ FIXED_ARRAY_TYPE,
]
handlers: [
[4, 7, 9],
@@ -48,16 +47,16 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 65
+bytecode array length: 67
bytecodes: [
/* 30 E> */ B(StackCheck),
B(Mov), R(context), R(2),
/* 47 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
- B(Jump), U8(22),
+ B(Jump), U8(23),
B(Star), R(3),
B(Ldar), R(closure),
- /* 49 E> */ B(CreateCatchContext), R(3), U8(0),
+ /* 49 E> */ B(CreateCatchContext), R(3), U8(0), U8(1),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
B(Ldar), R(2),
@@ -66,10 +65,10 @@ bytecodes: [
B(Mov), R(context), R(2),
/* 75 S> */ B(LdaSmi), U8(2),
B(Star), R(0),
- B(Jump), U8(26),
+ B(Jump), U8(27),
B(Star), R(3),
B(Ldar), R(closure),
- /* 77 E> */ B(CreateCatchContext), R(3), U8(1),
+ /* 77 E> */ B(CreateCatchContext), R(3), U8(2), U8(3),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
B(Ldar), R(2),
@@ -81,11 +80,13 @@ bytecodes: [
/* 103 S> */ B(Return),
]
constant pool: [
- "e1",
- "e2",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["e1"],
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["e2"],
+ FIXED_ARRAY_TYPE,
]
handlers: [
[4, 8, 10],
- [33, 37, 39],
+ [34, 38, 40],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden
index a42f90c844..61deb6e69c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden
@@ -3,8 +3,6 @@
#
---
-pool type: string
-execute: yes
wrap: yes
---
@@ -14,7 +12,7 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 51
+bytecode array length: 52
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
@@ -34,7 +32,7 @@ bytecodes: [
B(Star), R(0),
/* 72 E> */ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(3), U8(1),
B(LdaZero),
- B(TestEqualStrict), R(1),
+ B(TestEqualStrict), R(1), U8(0),
B(JumpIfTrue), U8(4),
B(Jump), U8(5),
B(Ldar), R(2),
@@ -55,7 +53,7 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 80
+bytecode array length: 82
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
@@ -64,10 +62,10 @@ bytecodes: [
B(Mov), R(context), R(5),
/* 51 S> */ B(LdaSmi), U8(2),
B(Star), R(0),
- B(Jump), U8(26),
+ B(Jump), U8(27),
B(Star), R(6),
B(Ldar), R(closure),
- /* 53 E> */ B(CreateCatchContext), R(6), U8(0),
+ /* 53 E> */ B(CreateCatchContext), R(6), U8(0), U8(1),
B(Star), R(5),
B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
B(Ldar), R(5),
@@ -87,7 +85,7 @@ bytecodes: [
B(Star), R(0),
/* 92 E> */ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(4), U8(1),
B(LdaZero),
- B(TestEqualStrict), R(2),
+ B(TestEqualStrict), R(2), U8(0),
B(JumpIfTrue), U8(4),
B(Jump), U8(5),
B(Ldar), R(3),
@@ -96,10 +94,11 @@ bytecodes: [
/* 99 S> */ B(Return),
]
constant pool: [
- "e",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["e"],
+ FIXED_ARRAY_TYPE,
]
handlers: [
- [8, 41, 47],
+ [8, 42, 48],
[11, 15, 17],
]
@@ -111,7 +110,7 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 105
+bytecode array length: 108
bytecodes: [
/* 30 E> */ B(StackCheck),
B(Mov), R(context), R(4),
@@ -119,10 +118,10 @@ bytecodes: [
B(Mov), R(context), R(6),
/* 55 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
- B(Jump), U8(26),
+ B(Jump), U8(27),
B(Star), R(7),
B(Ldar), R(closure),
- /* 57 E> */ B(CreateCatchContext), R(7), U8(0),
+ /* 57 E> */ B(CreateCatchContext), R(7), U8(0), U8(1),
B(Star), R(6),
B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
B(Ldar), R(6),
@@ -130,10 +129,10 @@ bytecodes: [
/* 74 S> */ B(LdaSmi), U8(2),
B(Star), R(0),
B(PopContext), R(1),
- B(Jump), U8(26),
+ B(Jump), U8(27),
B(Star), R(6),
B(Ldar), R(closure),
- /* 76 E> */ B(CreateCatchContext), R(6), U8(0),
+ /* 76 E> */ B(CreateCatchContext), R(6), U8(0), U8(2),
B(Star), R(5),
B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
B(Ldar), R(5),
@@ -153,7 +152,7 @@ bytecodes: [
B(Star), R(0),
/* 116 E> */ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(4), U8(1),
B(LdaZero),
- B(TestEqualStrict), R(2),
+ B(TestEqualStrict), R(2), U8(0),
B(JumpIfTrue), U8(4),
B(Jump), U8(5),
B(Ldar), R(3),
@@ -162,11 +161,13 @@ bytecodes: [
/* 123 S> */ B(Return),
]
constant pool: [
- "e",
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["e"],
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
]
handlers: [
- [4, 66, 72],
- [7, 40, 42],
+ [4, 68, 74],
+ [7, 41, 43],
[10, 14, 16],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden
index 1fe9354b6f..5e15e5981e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden
@@ -3,8 +3,6 @@
#
---
-pool type: string
-execute: yes
wrap: no
test function name: f
@@ -44,7 +42,7 @@ parameter count: 1
bytecode array length: 5
bytecodes: [
/* 22 E> */ B(StackCheck),
- /* 28 S> */ B(LdaGlobalInsideTypeof), U8(1),
+ /* 28 S> */ B(LdaGlobalInsideTypeof), U8(2),
B(TypeOf),
/* 46 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
index 0e2c767256..c9f8790384 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
@@ -3,8 +3,6 @@
#
---
-pool type: number
-execute: yes
wrap: yes
---
@@ -17,18 +15,18 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 22
+bytecode array length: 24
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 54 S> */ B(LdaSmi), U8(10),
- /* 54 E> */ B(TestEqual), R(0),
- B(JumpIfTrue), U8(11),
+ /* 54 E> */ B(TestEqual), R(0), U8(2),
+ B(JumpIfTrue), U8(12),
/* 45 E> */ B(StackCheck),
- /* 65 S> */ B(AddSmi), U8(10), R(0), U8(1),
+ /* 65 S> */ B(AddSmi), U8(10), R(0), U8(3),
B(Star), R(0),
- B(Jump), U8(-13),
+ B(JumpLoop), U8(-14), U8(0),
/* 79 S> */ B(Ldar), R(0),
/* 89 S> */ B(Return),
]
@@ -47,7 +45,7 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 18
+bytecode array length: 22
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaFalse),
@@ -57,8 +55,9 @@ bytecodes: [
B(ToBooleanLogicalNot),
B(Star), R(0),
/* 74 S> */ B(LdaFalse),
- /* 74 E> */ B(TestEqual), R(0),
- B(JumpIfTrue), U8(-9),
+ /* 74 E> */ B(TestEqual), R(0), U8(2),
+ B(JumpIfFalse), U8(5),
+ B(JumpLoop), U8(-12), U8(0),
/* 85 S> */ B(Ldar), R(0),
/* 95 S> */ B(Return),
]
@@ -80,7 +79,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), U8(101),
B(Star), R(0),
/* 47 S> */ B(LdaSmi), U8(3),
- B(Mul), R(0), U8(1),
+ B(Mul), R(0), U8(2),
B(LdaUndefined),
/* 67 S> */ B(Return),
]
@@ -95,7 +94,7 @@ snippet: "
var y = void (x * x - 1);
return y;
"
-frame size: 4
+frame size: 3
parameter count: 1
bytecode array length: 23
bytecodes: [
@@ -103,9 +102,9 @@ bytecodes: [
/* 42 S> */ B(Wide), B(LdaSmi), U16(1234),
B(Star), R(0),
/* 56 S> */ B(Nop),
- /* 66 E> */ B(Mul), R(0), U8(1),
- B(Star), R(3),
- B(SubSmi), U8(1), R(3), U8(2),
+ /* 66 E> */ B(Mul), R(0), U8(2),
+ B(Star), R(2),
+ B(SubSmi), U8(1), R(2), U8(3),
B(LdrUndefined), R(1),
B(Ldar), R(1),
/* 74 S> */ B(Nop),
@@ -129,7 +128,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), U8(13),
B(Star), R(0),
/* 46 S> */ B(LdaSmi), U8(-1),
- B(BitwiseXor), R(0), U8(1),
+ B(BitwiseXor), R(0), U8(2),
/* 57 S> */ B(Return),
]
constant pool: [
@@ -150,7 +149,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), U8(13),
B(Star), R(0),
/* 46 S> */ B(LdaSmi), U8(1),
- B(Mul), R(0), U8(1),
+ B(Mul), R(0), U8(2),
/* 57 S> */ B(Return),
]
constant pool: [
@@ -171,7 +170,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), U8(13),
B(Star), R(0),
/* 46 S> */ B(LdaSmi), U8(-1),
- B(Mul), R(0), U8(1),
+ B(Mul), R(0), U8(2),
/* 57 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
index a39a1cf6aa..fc7d322e60 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
@@ -3,8 +3,6 @@
#
---
-pool type: number
-execute: yes
wrap: yes
---
@@ -523,11 +521,11 @@ snippet: "
"
frame size: 157
parameter count: 1
-bytecode array length: 17
+bytecode array length: 18
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 1494 S> */ B(LdaSmi), U8(3),
- /* 1501 E> */ B(TestGreaterThan), R(2),
+ /* 1501 E> */ B(TestGreaterThan), R(2), U8(2),
B(JumpIfFalse), U8(7),
/* 1508 S> */ B(Wide), B(Ldar), R16(129),
/* 1536 S> */ B(Return),
@@ -705,18 +703,18 @@ snippet: "
"
frame size: 157
parameter count: 1
-bytecode array length: 34
+bytecode array length: 37
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 1503 S> */ B(LdaZero),
B(Star), R(0),
/* 1506 S> */ B(LdaSmi), U8(3),
- /* 1515 E> */ B(Wide), B(TestEqual), R16(129),
+ /* 1515 E> */ B(Wide), B(TestEqual), R16(129), U16(2),
B(JumpIfFalse), U8(10),
/* 1534 S> */ B(Wide), B(Mov), R16(0), R16(129),
B(Ldar), R(0),
/* 1540 S> */ B(LdaSmi), U8(3),
- /* 1547 E> */ B(TestGreaterThan), R(2),
+ /* 1547 E> */ B(TestGreaterThan), R(2), U8(3),
B(JumpIfFalse), U8(5),
/* 1554 S> */ B(Ldar), R(0),
/* 1580 S> */ B(Return),
@@ -893,7 +891,7 @@ snippet: "
"
frame size: 158
parameter count: 1
-bytecode array length: 53
+bytecode array length: 56
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 1503 S> */ B(LdaZero),
@@ -903,17 +901,17 @@ bytecodes: [
/* 1523 S> */ B(LdaZero),
B(Wide), B(Star), R16(128),
/* 1538 S> */ B(LdaSmi), U8(64),
- /* 1538 E> */ B(Wide), B(TestLessThan), R16(128),
- B(JumpIfFalse), U8(30),
+ /* 1538 E> */ B(Wide), B(TestLessThan), R16(128), U16(2),
+ B(JumpIfFalse), U8(31),
/* 1518 E> */ B(StackCheck),
/* 1555 S> */ B(Wide), B(Ldar), R16(128),
- /* 1561 E> */ B(Add), R(1), U8(2),
+ /* 1561 E> */ B(Add), R(1), U8(4),
B(Wide), B(Mov), R16(1), R16(157),
B(Star), R(1),
/* 1548 S> */ B(Wide), B(Ldar), R16(128),
- B(Inc), U8(1),
+ B(Inc), U8(3),
B(Wide), B(Star), R16(128),
- B(Jump), U8(-34),
+ B(JumpLoop), U8(-36), U8(0),
/* 1567 S> */ B(Wide), B(Ldar), R16(128),
/* 1580 S> */ B(Return),
]
@@ -1087,7 +1085,7 @@ snippet: "
"
frame size: 163
parameter count: 1
-bytecode array length: 84
+bytecode array length: 85
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 1503 S> */ B(Wide), B(LdaSmi), U16(1234),
@@ -1095,25 +1093,25 @@ bytecodes: [
/* 1518 S> */ B(LdaZero),
B(Star), R(1),
/* 1534 S> */ B(Ldar), R(0),
- B(JumpIfUndefined), U8(69),
- B(JumpIfNull), U8(67),
+ B(JumpIfUndefined), U8(70),
+ B(JumpIfNull), U8(68),
B(Wide), B(ToObject), R16(157),
B(Wide), B(ForInPrepare), R16(157), R16(158),
B(LdaZero),
B(Wide), B(Star), R16(161),
- /* 1526 S> */ B(Wide), B(ForInDone), R16(161), R16(160),
- B(JumpIfTrue), U8(44),
- B(Wide), B(ForInNext), R16(157), R16(161), R16(158), U16(2),
+ /* 1526 S> */ B(Wide), B(ForInContinue), R16(161), R16(160),
+ B(JumpIfFalse), U8(45),
+ B(Wide), B(ForInNext), R16(157), R16(161), R16(158), U16(3),
B(JumpIfUndefined), U8(22),
B(Wide), B(Star), R16(128),
/* 1521 E> */ B(StackCheck),
/* 1541 S> */ B(Wide), B(Ldar), R16(128),
- /* 1547 E> */ B(Add), R(1), U8(1),
+ /* 1547 E> */ B(Add), R(1), U8(2),
B(Wide), B(Mov), R16(1), R16(162),
B(Star), R(1),
/* 1544 E> */ B(Wide), B(ForInStep), R16(161),
B(Wide), B(Star), R16(161),
- B(Jump), U8(-48),
+ B(JumpLoop), U8(-48), U8(0),
/* 1553 S> */ B(Ldar), R(1),
/* 1564 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden
index b5a0df5da2..963c71f184 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden
@@ -3,32 +3,31 @@
#
---
-pool type: mixed
-execute: yes
wrap: yes
---
snippet: "
with ({x:42}) { return x; }
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 21
+bytecode array length: 22
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
B(Ldar), R(1),
- B(ToObject), R(2),
+ B(ToObject), R(1),
B(Ldar), R(closure),
- B(CreateWithContext), R(2),
+ B(CreateWithContext), R(1), U8(1),
B(PushContext), R(0),
- /* 50 S> */ B(LdaLookupSlot), U8(1),
+ /* 50 S> */ B(LdaLookupSlot), U8(2),
B(PopContext), R(0),
/* 62 S> */ B(Return),
]
constant pool: [
- InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
index dd03c24b3c..e5dca853a5 100644
--- a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
+++ b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
@@ -13,7 +13,6 @@
#include "include/v8.h"
#include "src/base/logging.h"
-#include "src/compiler.h"
#include "src/interpreter/interpreter.h"
#ifdef V8_OS_POSIX
@@ -41,12 +40,10 @@ class ProgramOptions final {
read_from_stdin_(false),
rebaseline_(false),
wrap_(true),
- execute_(true),
+ module_(false),
top_level_(false),
do_expressions_(false),
- verbose_(false),
- const_pool_type_(
- BytecodeExpectationsPrinter::ConstantPoolType::kMixed) {}
+ verbose_(false) {}
bool Validate() const;
void UpdateFromHeader(std::istream& stream); // NOLINT
@@ -61,14 +58,11 @@ class ProgramOptions final {
}
bool rebaseline() const { return rebaseline_; }
bool wrap() const { return wrap_; }
- bool execute() const { return execute_; }
+ bool module() const { return module_; }
bool top_level() const { return top_level_; }
bool do_expressions() const { return do_expressions_; }
bool verbose() const { return verbose_; }
bool suppress_runtime_errors() const { return rebaseline_ && !verbose_; }
- BytecodeExpectationsPrinter::ConstantPoolType const_pool_type() const {
- return const_pool_type_;
- }
std::vector<std::string> input_filenames() const { return input_filenames_; }
std::string output_filename() const { return output_filename_; }
std::string test_function_name() const { return test_function_name_; }
@@ -80,11 +74,10 @@ class ProgramOptions final {
bool read_from_stdin_;
bool rebaseline_;
bool wrap_;
- bool execute_;
+ bool module_;
bool top_level_;
bool do_expressions_;
bool verbose_;
- BytecodeExpectationsPrinter::ConstantPoolType const_pool_type_;
std::vector<std::string> input_filenames_;
std::string output_filename_;
std::string test_function_name_;
@@ -106,33 +99,6 @@ class V8InitializationScope final {
DISALLOW_COPY_AND_ASSIGN(V8InitializationScope);
};
-BytecodeExpectationsPrinter::ConstantPoolType ParseConstantPoolType(
- const char* type_string) {
- if (strcmp(type_string, "number") == 0) {
- return BytecodeExpectationsPrinter::ConstantPoolType::kNumber;
- } else if (strcmp(type_string, "string") == 0) {
- return BytecodeExpectationsPrinter::ConstantPoolType::kString;
- } else if (strcmp(type_string, "mixed") == 0) {
- return BytecodeExpectationsPrinter::ConstantPoolType::kMixed;
- }
- return BytecodeExpectationsPrinter::ConstantPoolType::kUnknown;
-}
-
-const char* ConstantPoolTypeToString(
- BytecodeExpectationsPrinter::ConstantPoolType type) {
- switch (type) {
- case BytecodeExpectationsPrinter::ConstantPoolType::kNumber:
- return "number";
- case BytecodeExpectationsPrinter::ConstantPoolType::kMixed:
- return "mixed";
- case BytecodeExpectationsPrinter::ConstantPoolType::kString:
- return "string";
- default:
- UNREACHABLE();
- return nullptr;
- }
-}
-
bool ParseBoolean(const char* string) {
if (strcmp(string, "yes") == 0) {
return true;
@@ -161,15 +127,14 @@ bool CollectGoldenFiles(std::vector<std::string>* golden_file_list,
DIR* directory = opendir(directory_path);
if (!directory) return false;
- dirent entry_buffer;
- dirent* entry;
-
- while (readdir_r(directory, &entry_buffer, &entry) == 0 && entry) {
+ dirent* entry = readdir(directory);
+ while (entry) {
if (StrEndsWith(entry->d_name, ".golden")) {
std::string golden_filename(kGoldenFilesPath);
golden_filename += entry->d_name;
golden_file_list->push_back(golden_filename);
}
+ entry = readdir(directory);
}
closedir(directory);
@@ -188,16 +153,14 @@ ProgramOptions ProgramOptions::FromCommandLine(int argc, char** argv) {
options.print_help_ = true;
} else if (strcmp(argv[i], "--raw-js") == 0) {
options.read_raw_js_snippet_ = true;
- } else if (strncmp(argv[i], "--pool-type=", 12) == 0) {
- options.const_pool_type_ = ParseConstantPoolType(argv[i] + 12);
} else if (strcmp(argv[i], "--stdin") == 0) {
options.read_from_stdin_ = true;
} else if (strcmp(argv[i], "--rebaseline") == 0) {
options.rebaseline_ = true;
} else if (strcmp(argv[i], "--no-wrap") == 0) {
options.wrap_ = false;
- } else if (strcmp(argv[i], "--no-execute") == 0) {
- options.execute_ = false;
+ } else if (strcmp(argv[i], "--module") == 0) {
+ options.module_ = true;
} else if (strcmp(argv[i], "--top-level") == 0) {
options.top_level_ = true;
} else if (strcmp(argv[i], "--do-expressions") == 0) {
@@ -239,12 +202,6 @@ bool ProgramOptions::Validate() const {
if (parsing_failed_) return false;
if (print_help_) return true;
- if (const_pool_type_ ==
- BytecodeExpectationsPrinter::ConstantPoolType::kUnknown) {
- REPORT_ERROR("Unknown constant pool type.");
- return false;
- }
-
if (!read_from_stdin_ && input_filenames_.empty()) {
REPORT_ERROR("No input file specified.");
return false;
@@ -282,6 +239,12 @@ bool ProgramOptions::Validate() const {
return false;
}
+ if (module_ && (!top_level_ || wrap_)) {
+ REPORT_ERROR(
+ "The flag --module currently requires --top-level and --no-wrap.");
+ return false;
+ }
+
return true;
}
@@ -294,10 +257,8 @@ void ProgramOptions::UpdateFromHeader(std::istream& stream) {
}
while (std::getline(stream, line)) {
- if (line.compare(0, 11, "pool type: ") == 0) {
- const_pool_type_ = ParseConstantPoolType(line.c_str() + 11);
- } else if (line.compare(0, 9, "execute: ") == 0) {
- execute_ = ParseBoolean(line.c_str() + 9);
+ if (line.compare(0, 8, "module: ") == 0) {
+ module_ = ParseBoolean(line.c_str() + 8);
} else if (line.compare(0, 6, "wrap: ") == 0) {
wrap_ = ParseBoolean(line.c_str() + 6);
} else if (line.compare(0, 20, "test function name: ") == 0) {
@@ -319,15 +280,13 @@ void ProgramOptions::UpdateFromHeader(std::istream& stream) {
void ProgramOptions::PrintHeader(std::ostream& stream) const { // NOLINT
stream << "---"
- "\npool type: "
- << ConstantPoolTypeToString(const_pool_type_)
- << "\nexecute: " << BooleanToString(execute_)
<< "\nwrap: " << BooleanToString(wrap_);
if (!test_function_name_.empty()) {
stream << "\ntest function name: " << test_function_name_;
}
+ if (module_) stream << "\nmodule: yes";
if (top_level_) stream << "\ntop level: yes";
if (do_expressions_) stream << "\ndo expressions: yes";
@@ -425,10 +384,9 @@ void GenerateExpectationsFile(std::ostream& stream, // NOLINT
v8::Local<v8::Context> context = v8::Context::New(platform.isolate());
v8::Context::Scope context_scope(context);
- BytecodeExpectationsPrinter printer(platform.isolate(),
- options.const_pool_type());
+ BytecodeExpectationsPrinter printer(platform.isolate());
printer.set_wrap(options.wrap());
- printer.set_execute(options.execute());
+ printer.set_module(options.module());
printer.set_top_level(options.top_level());
if (!options.test_function_name().empty()) {
printer.set_test_function_name(options.test_function_name());
@@ -482,7 +440,7 @@ void PrintUsage(const char* exec_path) {
" --stdin Read from standard input instead of file.\n"
" --rebaseline Rebaseline input snippet file.\n"
" --no-wrap Do not wrap the snippet in a function.\n"
- " --no-execute Do not execute after compilation.\n"
+ " --module Compile as JavaScript module.\n"
" --test-function-name=foo "
"Specify the name of the test function.\n"
" --top-level Process top level code, not the top-level function.\n"
@@ -494,9 +452,9 @@ void PrintUsage(const char* exec_path) {
" Specify the type of the entries in the constant pool "
"(default: mixed).\n"
"\n"
- "When using --rebaseline, flags --no-wrap, --no-execute, "
- "--test-function-name\nand --pool-type will be overridden by the "
- "options specified in the input file\nheader.\n\n"
+ "When using --rebaseline, flags --no-wrap, --test-function-name \n"
+ "and --pool-type will be overridden by the options specified in \n"
+ "the input file header.\n\n"
"Each raw JavaScript file is interpreted as a single snippet.\n\n"
"This tool is intended as a help in writing tests.\n"
"Please, DO NOT blindly copy and paste the output "
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index d82bad228b..fbcd297dd6 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -6,7 +6,6 @@
#include "src/v8.h"
-#include "src/compiler.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-generator.h"
#include "src/interpreter/interpreter.h"
@@ -57,12 +56,13 @@ namespace interpreter {
#define REPEAT_64_UNIQUE_VARS() REPEAT_32_UNIQUE_VARS() REPEAT_32_UNIQUE_VARS()
#define REPEAT_128_UNIQUE_VARS() REPEAT_64_UNIQUE_VARS() REPEAT_64_UNIQUE_VARS()
-#define REPEAT_249_UNIQUE_VARS() \
+#define REPEAT_250_UNIQUE_VARS() \
REPEAT_128_UNIQUE_VARS() \
REPEAT_64_UNIQUE_VARS() \
REPEAT_32_UNIQUE_VARS() \
REPEAT_16_UNIQUE_VARS() \
REPEAT_8_UNIQUE_VARS() \
+ UNIQUE_VAR() \
UNIQUE_VAR()
static const char* kGoldenFileDirectory =
@@ -72,7 +72,6 @@ class InitializedIgnitionHandleScope : public InitializedHandleScope {
public:
InitializedIgnitionHandleScope() {
i::FLAG_ignition = true;
- i::FLAG_ignition_osr = false; // TODO(4764): Disabled for now.
i::FLAG_always_opt = false;
i::FLAG_allow_natives_syntax = true;
CcTest::i_isolate()->interpreter()->Initialize();
@@ -152,12 +151,9 @@ bool CompareTexts(const std::string& generated, const std::string& expected) {
} while (true);
}
-using ConstantPoolType = BytecodeExpectationsPrinter::ConstantPoolType;
-
TEST(PrimitiveReturnStatements) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kNumber);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"",
@@ -188,8 +184,7 @@ TEST(PrimitiveReturnStatements) {
TEST(PrimitiveExpressions) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kNumber);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var x = 0; return x;\n",
@@ -224,8 +219,7 @@ TEST(PrimitiveExpressions) {
TEST(LogicalExpressions) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kNumber);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var x = 0; return x || 3;\n",
@@ -268,8 +262,7 @@ TEST(LogicalExpressions) {
TEST(Parameters) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kNumber);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
@@ -295,8 +288,7 @@ TEST(Parameters) {
TEST(IntegerConstants) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kNumber);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"return 12345678;\n",
@@ -311,8 +303,7 @@ TEST(IntegerConstants) {
TEST(HeapNumberConstants) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kNumber);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"return 1.2;\n",
@@ -331,8 +322,7 @@ TEST(HeapNumberConstants) {
TEST(StringConstants) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kString);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"return \"This is a string\";\n",
@@ -347,8 +337,7 @@ TEST(StringConstants) {
TEST(PropertyLoads) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kString);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
@@ -391,8 +380,7 @@ TEST(PropertyLoads) {
TEST(PropertyStores) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kString);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
@@ -457,8 +445,7 @@ TEST(PropertyStores) {
TEST(PropertyCall) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kString);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
@@ -485,8 +472,7 @@ TEST(PropertyCall) {
TEST(LoadGlobal) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kString);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
@@ -518,8 +504,7 @@ TEST(LoadGlobal) {
TEST(StoreGlobal) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kString);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
@@ -563,8 +548,7 @@ TEST(StoreGlobal) {
TEST(CallGlobal) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kString);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
@@ -584,8 +568,7 @@ TEST(CallGlobal) {
TEST(CallRuntime) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
@@ -609,8 +592,7 @@ TEST(CallRuntime) {
TEST(IfConditions) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
@@ -724,11 +706,9 @@ TEST(IfConditions) {
TEST(DeclareGlobals) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
- printer.set_execute(false);
printer.set_top_level(true);
const char* snippets[] = {
@@ -749,8 +729,7 @@ TEST(DeclareGlobals) {
TEST(BreakableBlocks) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var x = 0;\n"
@@ -796,8 +775,7 @@ TEST(BreakableBlocks) {
TEST(BasicLoops) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var x = 0;\n"
"while (false) { x = 99; break; continue; }\n"
@@ -950,8 +928,7 @@ TEST(BasicLoops) {
TEST(JumpsRequiringConstantWideOperands) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kNumber);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
REPEAT_256("var x = 0.1;\n")
REPEAT_32("var x = 0.2;\n")
@@ -970,8 +947,7 @@ TEST(JumpsRequiringConstantWideOperands) {
TEST(UnaryOperators) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kNumber);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var x = 0;\n"
"while (x != 10) {\n"
@@ -1008,8 +984,7 @@ TEST(UnaryOperators) {
TEST(Typeof) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kString);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
@@ -1031,8 +1006,7 @@ TEST(Typeof) {
TEST(Delete) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var a = {x:13, y:14}; return delete a.x;\n",
@@ -1057,8 +1031,7 @@ TEST(Delete) {
TEST(GlobalDelete) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
@@ -1095,8 +1068,7 @@ TEST(GlobalDelete) {
TEST(FunctionLiterals) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"return function(){ }\n",
@@ -1112,8 +1084,7 @@ TEST(FunctionLiterals) {
TEST(RegExpLiterals) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kString);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"return /ab+d/;\n",
@@ -1129,8 +1100,7 @@ TEST(RegExpLiterals) {
TEST(RegExpLiteralsWide) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var a;" //
@@ -1144,8 +1114,7 @@ TEST(RegExpLiteralsWide) {
TEST(ArrayLiterals) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"return [ 1, 2 ];\n",
@@ -1163,8 +1132,7 @@ TEST(ArrayLiterals) {
TEST(ArrayLiteralsWide) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var a;" //
@@ -1178,8 +1146,7 @@ TEST(ArrayLiteralsWide) {
TEST(ObjectLiterals) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"return { };\n",
@@ -1219,8 +1186,7 @@ TEST(ObjectLiterals) {
TEST(ObjectLiteralsWide) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var a;" //
REPEAT_256("\na = 1.23;") //
@@ -1233,11 +1199,9 @@ TEST(ObjectLiteralsWide) {
TEST(TopLevelObjectLiterals) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
- printer.set_execute(false);
printer.set_top_level(true);
const char* snippets[] = {
@@ -1250,8 +1214,7 @@ TEST(TopLevelObjectLiterals) {
TEST(TryCatch) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kString);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"try { return 1; } catch(e) { return 2; }\n",
@@ -1267,8 +1230,7 @@ TEST(TryCatch) {
TEST(TryFinally) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kString);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var a = 1;\n"
"try { a = 2; } finally { a = 3; }\n",
@@ -1287,8 +1249,7 @@ TEST(TryFinally) {
TEST(Throw) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kString);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"throw 1;\n",
@@ -1303,8 +1264,7 @@ TEST(Throw) {
TEST(CallNew) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
@@ -1338,8 +1298,7 @@ TEST(ContextVariables) {
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS + 3 + 249 == 256);
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var a; return function() { a = 1; };\n",
@@ -1354,7 +1313,7 @@ TEST(ContextVariables) {
"{ let b = 2; return function() { a + b; }; }\n",
"'use strict';\n"
- REPEAT_249_UNIQUE_VARS()
+ REPEAT_250_UNIQUE_VARS()
"eval();\n"
"var b = 100;\n"
"return b\n",
@@ -1366,8 +1325,7 @@ TEST(ContextVariables) {
TEST(ContextParameters) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
@@ -1387,8 +1345,7 @@ TEST(ContextParameters) {
TEST(OuterContextVariables) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
@@ -1418,8 +1375,7 @@ TEST(OuterContextVariables) {
TEST(CountOperators) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var a = 1; return ++a;\n",
@@ -1450,8 +1406,7 @@ TEST(CountOperators) {
TEST(GlobalCountOperators) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kString);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
@@ -1479,8 +1434,7 @@ TEST(GlobalCountOperators) {
TEST(CompoundExpressions) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var a = 1; a += 2;\n",
@@ -1499,8 +1453,7 @@ TEST(CompoundExpressions) {
TEST(GlobalCompoundExpressions) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kString);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
@@ -1520,8 +1473,7 @@ TEST(GlobalCompoundExpressions) {
TEST(CreateArguments) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kString);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
@@ -1545,8 +1497,7 @@ TEST(CreateArguments) {
TEST(CreateRestParameter) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kNumber);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
@@ -1566,8 +1517,7 @@ TEST(CreateRestParameter) {
TEST(ForIn) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"for (var p in null) {}\n",
@@ -1597,8 +1547,7 @@ TEST(ForIn) {
TEST(ForOf) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"for (var p of [0, 1, 2]) {}\n",
@@ -1620,8 +1569,7 @@ TEST(ForOf) {
TEST(Conditional) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kNumber);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"return 1 ? 2 : 3;\n",
@@ -1639,8 +1587,7 @@ TEST(Conditional) {
TEST(Switch) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kNumber);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var a = 1;\n"
"switch(a) {\n"
@@ -1707,8 +1654,7 @@ TEST(Switch) {
TEST(BasicBlockToBoolean) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kNumber);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var a = 1; if (a || a < 0) { return 1; }\n",
@@ -1723,8 +1669,7 @@ TEST(BasicBlockToBoolean) {
TEST(DeadCodeRemoval) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kNumber);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"return; var a = 1; a();\n",
@@ -1741,8 +1686,7 @@ TEST(DeadCodeRemoval) {
TEST(ThisFunction) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kNumber);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
@@ -1760,8 +1704,7 @@ TEST(ThisFunction) {
TEST(NewTarget) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"return new.target;\n",
@@ -1775,8 +1718,7 @@ TEST(NewTarget) {
TEST(RemoveRedundantLdar) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kNumber);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var ld_a = 1;\n" // This test is to check Ldar does not
"while(true) {\n" // get removed if the preceding Star is
@@ -1803,8 +1745,7 @@ TEST(RemoveRedundantLdar) {
TEST(AssignmentsInBinaryExpression) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kString);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var x = 0, y = 1;\n"
"return (x = 2, y = 3, x = 4, y = 5);\n",
@@ -1844,8 +1785,7 @@ TEST(AssignmentsInBinaryExpression) {
TEST(Eval) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kString);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"return eval('1;');\n",
};
@@ -1856,16 +1796,32 @@ TEST(Eval) {
TEST(LookupSlot) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kString);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
+ printer.set_test_function_name("f");
+ // clang-format off
const char* snippets[] = {
"eval('var x = 10;'); return x;\n",
"eval('var x = 10;'); return typeof x;\n",
"x = 20; return eval('');\n",
+
+ "var x = 20;\n"
+ "f = function(){\n"
+ " eval('var x = 10');\n"
+ " return x;\n"
+ "}\n"
+ "f();\n",
+
+ "x = 20;\n"
+ "f = function(){\n"
+ " eval('var x = 10');\n"
+ " return x;\n"
+ "}\n"
+ "f();\n"
};
+ // clang-format on
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("LookupSlot.golden")));
@@ -1873,8 +1829,7 @@ TEST(LookupSlot) {
TEST(CallLookupSlot) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"g = function(){}; eval(''); return g();\n",
};
@@ -1887,8 +1842,7 @@ TEST(CallLookupSlot) {
TEST(LookupSlotInEval) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kString);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
@@ -1917,8 +1871,7 @@ TEST(LookupSlotInEval) {
TEST(LookupSlotWideInEval) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
@@ -1953,8 +1906,7 @@ TEST(LookupSlotWideInEval) {
TEST(DeleteLookupSlotInEval) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kString);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
@@ -1990,8 +1942,7 @@ TEST(WideRegisters) {
std::string prologue(os.str());
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kNumber);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"x0 = x127;\n"
"return x0;\n",
@@ -2033,8 +1984,7 @@ TEST(WideRegisters) {
TEST(ConstVariable) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kString);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"const x = 10;\n",
@@ -2051,8 +2001,7 @@ TEST(ConstVariable) {
TEST(LetVariable) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kString);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"let x = 10;\n",
@@ -2071,8 +2020,7 @@ TEST(ConstVariableContextSlot) {
// TODO(mythria): Add tests for initialization of this via super calls.
// TODO(mythria): Add tests that walk the context chain.
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"const x = 10; function f1() {return x;}\n",
@@ -2089,8 +2037,7 @@ TEST(ConstVariableContextSlot) {
TEST(LetVariableContextSlot) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"let x = 10; function f1() {return x;}\n",
@@ -2110,8 +2057,7 @@ TEST(DoExpression) {
FLAG_harmony_do_expressions = true;
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kString);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var a = do { }; return a;\n",
@@ -2128,8 +2074,7 @@ TEST(DoExpression) {
TEST(WithStatement) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"with ({x:42}) { return x; }\n",
};
@@ -2140,8 +2085,7 @@ TEST(WithStatement) {
TEST(DoDebugger) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kString);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"debugger;\n",
};
@@ -2152,8 +2096,7 @@ TEST(DoDebugger) {
TEST(ClassDeclarations) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"class Person {\n"
" constructor(name) { this.name = name; }\n"
@@ -2183,8 +2126,7 @@ TEST(ClassDeclarations) {
TEST(ClassAndSuperClass) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("test");
const char* snippets[] = {
@@ -2242,8 +2184,7 @@ TEST(ClassAndSuperClass) {
TEST(Generators) {
InitializedIgnitionHandleScope scope;
- BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kMixed);
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
@@ -2262,6 +2203,47 @@ TEST(Generators) {
LoadGolden("Generators.golden")));
}
+TEST(Modules) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
+ printer.set_wrap(false);
+ printer.set_module(true);
+ printer.set_top_level(true);
+
+ const char* snippets[] = {
+ "import \"bar\";\n",
+
+ "import {foo} from \"bar\";\n",
+
+ "import {foo as goo} from \"bar\";\n"
+ "goo(42);\n"
+ "{ let x; { goo(42) } };\n",
+
+ "export var foo = 42;\n"
+ "foo++;\n"
+ "{ let x; { foo++ } };\n",
+
+ "export let foo = 42;\n"
+ "foo++;\n"
+ "{ let x; { foo++ } };\n",
+
+ "export const foo = 42;\n"
+ "foo++;\n"
+ "{ let x; { foo++ } };\n",
+
+ "export default (function () {});\n",
+
+ "export default (class {});\n",
+
+ "export {foo as goo} from \"bar\"\n",
+
+ "export * from \"bar\"\n",
+ };
+
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("Modules.golden")));
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc b/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
index 9591e2810e..3cb2beffd4 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
@@ -26,8 +26,8 @@ class InvokeIntrinsicHelper {
Handle<Object> Invoke(A... args) {
CHECK(IntrinsicsHelper::IsSupported(function_id_));
BytecodeArrayBuilder builder(isolate_, zone_, sizeof...(args), 0, 0);
- builder.CallRuntime(function_id_, builder.Parameter(0), sizeof...(args))
- .Return();
+ RegisterList reg_list(builder.Parameter(0).index(), sizeof...(args));
+ builder.CallRuntime(function_id_, reg_list).Return();
InterpreterTester tester(isolate_, builder.ToBytecodeArray(isolate_));
auto callable = tester.GetCallable<A...>();
return callable(args...).ToHandleChecked();
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter.cc b/deps/v8/test/cctest/interpreter/test-interpreter.cc
index 9572a2d731..77c146edaf 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <tuple>
+
#include "src/v8.h"
#include "src/execution.h"
@@ -267,7 +269,7 @@ TEST(InterpreterShiftOpsSmi) {
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 1);
FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot = feedback_spec.AddGeneralSlot();
+ FeedbackVectorSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
Handle<i::TypeFeedbackVector> vector =
NewTypeFeedbackVector(isolate, &feedback_spec);
@@ -306,7 +308,7 @@ TEST(InterpreterBinaryOpsSmi) {
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 1);
FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot = feedback_spec.AddGeneralSlot();
+ FeedbackVectorSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
Handle<i::TypeFeedbackVector> vector =
NewTypeFeedbackVector(isolate, &feedback_spec);
@@ -347,7 +349,7 @@ TEST(InterpreterBinaryOpsHeapNumber) {
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 1);
FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot = feedback_spec.AddGeneralSlot();
+ FeedbackVectorSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
Handle<i::TypeFeedbackVector> vector =
NewTypeFeedbackVector(isolate, &feedback_spec);
@@ -384,34 +386,42 @@ TEST(InterpreterStringAdd) {
Handle<Object> lhs;
Handle<Object> rhs;
Handle<Object> expected_value;
+ int32_t expected_feedback;
} test_cases[] = {
{factory->NewStringFromStaticChars("a"),
factory->NewStringFromStaticChars("b"),
- factory->NewStringFromStaticChars("ab")},
+ factory->NewStringFromStaticChars("ab"),
+ BinaryOperationFeedback::kString},
{factory->NewStringFromStaticChars("aaaaaa"),
factory->NewStringFromStaticChars("b"),
- factory->NewStringFromStaticChars("aaaaaab")},
+ factory->NewStringFromStaticChars("aaaaaab"),
+ BinaryOperationFeedback::kString},
{factory->NewStringFromStaticChars("aaa"),
factory->NewStringFromStaticChars("bbbbb"),
- factory->NewStringFromStaticChars("aaabbbbb")},
+ factory->NewStringFromStaticChars("aaabbbbb"),
+ BinaryOperationFeedback::kString},
{factory->NewStringFromStaticChars(""),
factory->NewStringFromStaticChars("b"),
- factory->NewStringFromStaticChars("b")},
+ factory->NewStringFromStaticChars("b"),
+ BinaryOperationFeedback::kString},
{factory->NewStringFromStaticChars("a"),
factory->NewStringFromStaticChars(""),
- factory->NewStringFromStaticChars("a")},
+ factory->NewStringFromStaticChars("a"),
+ BinaryOperationFeedback::kString},
{factory->NewStringFromStaticChars("1.11"), factory->NewHeapNumber(2.5),
- factory->NewStringFromStaticChars("1.112.5")},
+ factory->NewStringFromStaticChars("1.112.5"),
+ BinaryOperationFeedback::kAny},
{factory->NewStringFromStaticChars("-1.11"), factory->NewHeapNumber(2.56),
- factory->NewStringFromStaticChars("-1.112.56")},
+ factory->NewStringFromStaticChars("-1.112.56"),
+ BinaryOperationFeedback::kAny},
{factory->NewStringFromStaticChars(""), factory->NewHeapNumber(2.5),
- factory->NewStringFromStaticChars("2.5")},
+ factory->NewStringFromStaticChars("2.5"), BinaryOperationFeedback::kAny},
};
for (size_t i = 0; i < arraysize(test_cases); i++) {
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 1);
FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot = feedback_spec.AddGeneralSlot();
+ FeedbackVectorSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
Handle<i::TypeFeedbackVector> vector =
NewTypeFeedbackVector(isolate, &feedback_spec);
@@ -423,10 +433,15 @@ TEST(InterpreterStringAdd) {
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(isolate, bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array, vector);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*test_cases[i].expected_value));
+
+ Object* feedback = vector->Get(slot);
+ CHECK(feedback->IsSmi());
+ CHECK_EQ(test_cases[i].expected_feedback,
+ static_cast<Smi*>(feedback)->value());
}
}
@@ -461,13 +476,13 @@ TEST(InterpreterParameter8) {
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 8, 0, 0);
FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot = feedback_spec.AddGeneralSlot();
- FeedbackVectorSlot slot1 = feedback_spec.AddGeneralSlot();
- FeedbackVectorSlot slot2 = feedback_spec.AddGeneralSlot();
- FeedbackVectorSlot slot3 = feedback_spec.AddGeneralSlot();
- FeedbackVectorSlot slot4 = feedback_spec.AddGeneralSlot();
- FeedbackVectorSlot slot5 = feedback_spec.AddGeneralSlot();
- FeedbackVectorSlot slot6 = feedback_spec.AddGeneralSlot();
+ FeedbackVectorSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
+ FeedbackVectorSlot slot1 = feedback_spec.AddInterpreterBinaryOpICSlot();
+ FeedbackVectorSlot slot2 = feedback_spec.AddInterpreterBinaryOpICSlot();
+ FeedbackVectorSlot slot3 = feedback_spec.AddInterpreterBinaryOpICSlot();
+ FeedbackVectorSlot slot4 = feedback_spec.AddInterpreterBinaryOpICSlot();
+ FeedbackVectorSlot slot5 = feedback_spec.AddInterpreterBinaryOpICSlot();
+ FeedbackVectorSlot slot6 = feedback_spec.AddInterpreterBinaryOpICSlot();
Handle<i::TypeFeedbackVector> vector =
NewTypeFeedbackVector(isolate, &feedback_spec);
@@ -630,7 +645,7 @@ TEST(InterpreterBinaryOpTypeFeedback) {
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 1);
i::FeedbackVectorSpec feedback_spec(&zone);
- i::FeedbackVectorSlot slot0 = feedback_spec.AddGeneralSlot();
+ i::FeedbackVectorSlot slot0 = feedback_spec.AddInterpreterBinaryOpICSlot();
Handle<i::TypeFeedbackVector> vector =
i::NewTypeFeedbackVector(isolate, &feedback_spec);
@@ -734,7 +749,7 @@ TEST(InterpreterBinaryOpSmiTypeFeedback) {
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 1);
i::FeedbackVectorSpec feedback_spec(&zone);
- i::FeedbackVectorSlot slot0 = feedback_spec.AddGeneralSlot();
+ i::FeedbackVectorSlot slot0 = feedback_spec.AddInterpreterBinaryOpICSlot();
Handle<i::TypeFeedbackVector> vector =
i::NewTypeFeedbackVector(isolate, &feedback_spec);
@@ -784,10 +799,10 @@ TEST(InterpreterUnaryOpFeedback) {
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 4, 0, 0);
i::FeedbackVectorSpec feedback_spec(&zone);
- i::FeedbackVectorSlot slot0 = feedback_spec.AddGeneralSlot();
- i::FeedbackVectorSlot slot1 = feedback_spec.AddGeneralSlot();
- i::FeedbackVectorSlot slot2 = feedback_spec.AddGeneralSlot();
- i::FeedbackVectorSlot slot3 = feedback_spec.AddGeneralSlot();
+ i::FeedbackVectorSlot slot0 = feedback_spec.AddInterpreterBinaryOpICSlot();
+ i::FeedbackVectorSlot slot1 = feedback_spec.AddInterpreterBinaryOpICSlot();
+ i::FeedbackVectorSlot slot2 = feedback_spec.AddInterpreterBinaryOpICSlot();
+ i::FeedbackVectorSlot slot3 = feedback_spec.AddInterpreterBinaryOpICSlot();
Handle<i::TypeFeedbackVector> vector =
i::NewTypeFeedbackVector(isolate, &feedback_spec);
@@ -848,9 +863,9 @@ TEST(InterpreterBitwiseTypeFeedback) {
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 4, 0, 0);
i::FeedbackVectorSpec feedback_spec(&zone);
- i::FeedbackVectorSlot slot0 = feedback_spec.AddGeneralSlot();
- i::FeedbackVectorSlot slot1 = feedback_spec.AddGeneralSlot();
- i::FeedbackVectorSlot slot2 = feedback_spec.AddGeneralSlot();
+ i::FeedbackVectorSlot slot0 = feedback_spec.AddInterpreterBinaryOpICSlot();
+ i::FeedbackVectorSlot slot1 = feedback_spec.AddInterpreterBinaryOpICSlot();
+ i::FeedbackVectorSlot slot2 = feedback_spec.AddInterpreterBinaryOpICSlot();
Handle<i::TypeFeedbackVector> vector =
i::NewTypeFeedbackVector(isolate, &feedback_spec);
@@ -1234,12 +1249,13 @@ static void TestInterpreterCall(TailCallMode tail_call_mode) {
// Check with no args.
{
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 1);
-
+ Register reg = builder.register_allocator()->NewRegister();
+ RegisterList args = builder.register_allocator()->NewRegisterList(1);
builder.LoadNamedProperty(builder.Parameter(0), name, slot_index)
- .StoreAccumulatorInRegister(Register(0));
+ .StoreAccumulatorInRegister(reg)
+ .MoveRegister(builder.Parameter(0), args[0]);
- builder.Call(Register(0), builder.Parameter(0), 1, call_slot_index,
- tail_call_mode);
+ builder.Call(reg, args, call_slot_index, tail_call_mode);
builder.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -1256,11 +1272,12 @@ static void TestInterpreterCall(TailCallMode tail_call_mode) {
// Check that receiver is passed properly.
{
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 1);
-
+ Register reg = builder.register_allocator()->NewRegister();
+ RegisterList args = builder.register_allocator()->NewRegisterList(1);
builder.LoadNamedProperty(builder.Parameter(0), name, slot_index)
- .StoreAccumulatorInRegister(Register(0));
- builder.Call(Register(0), builder.Parameter(0), 1, call_slot_index,
- tail_call_mode);
+ .StoreAccumulatorInRegister(reg)
+ .MoveRegister(builder.Parameter(0), args[0]);
+ builder.Call(reg, args, call_slot_index, tail_call_mode);
builder.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -1279,17 +1296,19 @@ static void TestInterpreterCall(TailCallMode tail_call_mode) {
// Check with two parameters (+ receiver).
{
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 4);
+ Register reg = builder.register_allocator()->NewRegister();
+ RegisterList args = builder.register_allocator()->NewRegisterList(3);
builder.LoadNamedProperty(builder.Parameter(0), name, slot_index)
- .StoreAccumulatorInRegister(Register(0))
+ .StoreAccumulatorInRegister(reg)
.LoadAccumulatorWithRegister(builder.Parameter(0))
- .StoreAccumulatorInRegister(Register(1))
+ .StoreAccumulatorInRegister(args[0])
.LoadLiteral(Smi::FromInt(51))
- .StoreAccumulatorInRegister(Register(2))
+ .StoreAccumulatorInRegister(args[1])
.LoadLiteral(Smi::FromInt(11))
- .StoreAccumulatorInRegister(Register(3));
+ .StoreAccumulatorInRegister(args[2]);
- builder.Call(Register(0), Register(1), 3, call_slot_index, tail_call_mode);
+ builder.Call(reg, args, call_slot_index, tail_call_mode);
builder.Return();
@@ -1309,33 +1328,35 @@ static void TestInterpreterCall(TailCallMode tail_call_mode) {
// Check with 10 parameters (+ receiver).
{
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 12);
+ Register reg = builder.register_allocator()->NewRegister();
+ RegisterList args = builder.register_allocator()->NewRegisterList(11);
builder.LoadNamedProperty(builder.Parameter(0), name, slot_index)
- .StoreAccumulatorInRegister(Register(0))
+ .StoreAccumulatorInRegister(reg)
.LoadAccumulatorWithRegister(builder.Parameter(0))
- .StoreAccumulatorInRegister(Register(1))
+ .StoreAccumulatorInRegister(args[0])
.LoadLiteral(factory->NewStringFromAsciiChecked("a"))
- .StoreAccumulatorInRegister(Register(2))
+ .StoreAccumulatorInRegister(args[1])
.LoadLiteral(factory->NewStringFromAsciiChecked("b"))
- .StoreAccumulatorInRegister(Register(3))
+ .StoreAccumulatorInRegister(args[2])
.LoadLiteral(factory->NewStringFromAsciiChecked("c"))
- .StoreAccumulatorInRegister(Register(4))
+ .StoreAccumulatorInRegister(args[3])
.LoadLiteral(factory->NewStringFromAsciiChecked("d"))
- .StoreAccumulatorInRegister(Register(5))
+ .StoreAccumulatorInRegister(args[4])
.LoadLiteral(factory->NewStringFromAsciiChecked("e"))
- .StoreAccumulatorInRegister(Register(6))
+ .StoreAccumulatorInRegister(args[5])
.LoadLiteral(factory->NewStringFromAsciiChecked("f"))
- .StoreAccumulatorInRegister(Register(7))
+ .StoreAccumulatorInRegister(args[6])
.LoadLiteral(factory->NewStringFromAsciiChecked("g"))
- .StoreAccumulatorInRegister(Register(8))
+ .StoreAccumulatorInRegister(args[7])
.LoadLiteral(factory->NewStringFromAsciiChecked("h"))
- .StoreAccumulatorInRegister(Register(9))
+ .StoreAccumulatorInRegister(args[8])
.LoadLiteral(factory->NewStringFromAsciiChecked("i"))
- .StoreAccumulatorInRegister(Register(10))
+ .StoreAccumulatorInRegister(args[9])
.LoadLiteral(factory->NewStringFromAsciiChecked("j"))
- .StoreAccumulatorInRegister(Register(11));
+ .StoreAccumulatorInRegister(args[10]);
- builder.Call(Register(0), Register(1), 11, call_slot_index, tail_call_mode);
+ builder.Call(reg, args, call_slot_index, tail_call_mode);
builder.Return();
@@ -1390,9 +1411,9 @@ TEST(InterpreterJumps) {
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 0, 0, 2);
FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot = feedback_spec.AddGeneralSlot();
- FeedbackVectorSlot slot1 = feedback_spec.AddGeneralSlot();
- FeedbackVectorSlot slot2 = feedback_spec.AddGeneralSlot();
+ FeedbackVectorSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
+ FeedbackVectorSlot slot1 = feedback_spec.AddInterpreterBinaryOpICSlot();
+ FeedbackVectorSlot slot2 = feedback_spec.AddInterpreterBinaryOpICSlot();
Handle<i::TypeFeedbackVector> vector =
NewTypeFeedbackVector(isolate, &feedback_spec);
@@ -1408,7 +1429,7 @@ TEST(InterpreterJumps) {
.Jump(&label[2]);
SetRegister(builder, reg, 2048, scratch).Bind(&label[1]);
IncrementRegister(builder, reg, 2, scratch, vector->GetIndex(slot1))
- .Jump(&label[0]);
+ .JumpLoop(&label[0], 0);
SetRegister(builder, reg, 4096, scratch).Bind(&label[2]);
IncrementRegister(builder, reg, 4, scratch, vector->GetIndex(slot2))
.LoadAccumulatorWithRegister(reg)
@@ -1429,11 +1450,11 @@ TEST(InterpreterConditionalJumps) {
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 0, 0, 2);
FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot = feedback_spec.AddGeneralSlot();
- FeedbackVectorSlot slot1 = feedback_spec.AddGeneralSlot();
- FeedbackVectorSlot slot2 = feedback_spec.AddGeneralSlot();
- FeedbackVectorSlot slot3 = feedback_spec.AddGeneralSlot();
- FeedbackVectorSlot slot4 = feedback_spec.AddGeneralSlot();
+ FeedbackVectorSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
+ FeedbackVectorSlot slot1 = feedback_spec.AddInterpreterBinaryOpICSlot();
+ FeedbackVectorSlot slot2 = feedback_spec.AddInterpreterBinaryOpICSlot();
+ FeedbackVectorSlot slot3 = feedback_spec.AddInterpreterBinaryOpICSlot();
+ FeedbackVectorSlot slot4 = feedback_spec.AddInterpreterBinaryOpICSlot();
Handle<i::TypeFeedbackVector> vector =
NewTypeFeedbackVector(isolate, &feedback_spec);
@@ -1479,11 +1500,11 @@ TEST(InterpreterConditionalJumps2) {
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 0, 0, 2);
FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot = feedback_spec.AddGeneralSlot();
- FeedbackVectorSlot slot1 = feedback_spec.AddGeneralSlot();
- FeedbackVectorSlot slot2 = feedback_spec.AddGeneralSlot();
- FeedbackVectorSlot slot3 = feedback_spec.AddGeneralSlot();
- FeedbackVectorSlot slot4 = feedback_spec.AddGeneralSlot();
+ FeedbackVectorSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
+ FeedbackVectorSlot slot1 = feedback_spec.AddInterpreterBinaryOpICSlot();
+ FeedbackVectorSlot slot2 = feedback_spec.AddInterpreterBinaryOpICSlot();
+ FeedbackVectorSlot slot3 = feedback_spec.AddInterpreterBinaryOpICSlot();
+ FeedbackVectorSlot slot4 = feedback_spec.AddInterpreterBinaryOpICSlot();
Handle<i::TypeFeedbackVector> vector =
NewTypeFeedbackVector(isolate, &feedback_spec);
@@ -1529,7 +1550,7 @@ TEST(InterpreterJumpConstantWith16BitOperand) {
Zone zone(isolate->allocator());
FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot = feedback_spec.AddGeneralSlot();
+ FeedbackVectorSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
Handle<i::TypeFeedbackVector> vector =
NewTypeFeedbackVector(isolate, &feedback_spec);
@@ -1671,22 +1692,32 @@ TEST(InterpreterSmiComparisons) {
for (size_t j = 0; j < arraysize(inputs); j++) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
+ Zone zone(isolate->allocator());
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 0, 0, 1);
+ FeedbackVectorSpec feedback_spec(&zone);
+ FeedbackVectorSlot slot = feedback_spec.AddInterpreterCompareICSlot();
+ Handle<i::TypeFeedbackVector> vector =
+ NewTypeFeedbackVector(isolate, &feedback_spec);
+
Register r0(0);
builder.LoadLiteral(Smi::FromInt(inputs[i]))
.StoreAccumulatorInRegister(r0)
.LoadLiteral(Smi::FromInt(inputs[j]))
- .CompareOperation(comparison, r0)
+ .CompareOperation(comparison, r0, vector->GetIndex(slot))
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(isolate, bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array, vector);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->IsBoolean());
CHECK_EQ(return_value->BooleanValue(),
CompareC(comparison, inputs[i], inputs[j]));
+ Object* feedback = vector->Get(slot);
+ CHECK(feedback->IsSmi());
+ CHECK_EQ(CompareOperationFeedback::kSignedSmall,
+ static_cast<Smi*>(feedback)->value());
}
}
}
@@ -1708,22 +1739,32 @@ TEST(InterpreterHeapNumberComparisons) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Factory* factory = isolate->factory();
+ Zone zone(isolate->allocator());
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 0, 0, 1);
+ FeedbackVectorSpec feedback_spec(&zone);
+ FeedbackVectorSlot slot = feedback_spec.AddInterpreterCompareICSlot();
+ Handle<i::TypeFeedbackVector> vector =
+ NewTypeFeedbackVector(isolate, &feedback_spec);
+
Register r0(0);
builder.LoadLiteral(factory->NewHeapNumber(inputs[i]))
.StoreAccumulatorInRegister(r0)
.LoadLiteral(factory->NewHeapNumber(inputs[j]))
- .CompareOperation(comparison, r0)
+ .CompareOperation(comparison, r0, vector->GetIndex(slot))
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(isolate, bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array, vector);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->IsBoolean());
CHECK_EQ(return_value->BooleanValue(),
CompareC(comparison, inputs[i], inputs[j]));
+ Object* feedback = vector->Get(slot);
+ CHECK(feedback->IsSmi());
+ CHECK_EQ(CompareOperationFeedback::kNumber,
+ static_cast<Smi*>(feedback)->value());
}
}
}
@@ -1734,6 +1775,7 @@ TEST(InterpreterStringComparisons) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Factory* factory = isolate->factory();
+ Zone zone(isolate->allocator());
std::string inputs[] = {"A", "abc", "z", "", "Foo!", "Foo"};
@@ -1744,21 +1786,31 @@ TEST(InterpreterStringComparisons) {
CanonicalHandleScope canonical(isolate);
const char* lhs = inputs[i].c_str();
const char* rhs = inputs[j].c_str();
+
+ FeedbackVectorSpec feedback_spec(&zone);
+ FeedbackVectorSlot slot = feedback_spec.AddInterpreterCompareICSlot();
+ Handle<i::TypeFeedbackVector> vector =
+ NewTypeFeedbackVector(isolate, &feedback_spec);
+
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 0, 0, 1);
Register r0(0);
builder.LoadLiteral(factory->NewStringFromAsciiChecked(lhs))
.StoreAccumulatorInRegister(r0)
.LoadLiteral(factory->NewStringFromAsciiChecked(rhs))
- .CompareOperation(comparison, r0)
+ .CompareOperation(comparison, r0, vector->GetIndex(slot))
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(isolate, bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array, vector);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->IsBoolean());
CHECK_EQ(return_value->BooleanValue(),
CompareC(comparison, inputs[i], inputs[j]));
+ Object* feedback = vector->Get(slot);
+ CHECK(feedback->IsSmi());
+ CHECK_EQ(CompareOperationFeedback::kAny,
+ static_cast<Smi*>(feedback)->value());
}
}
}
@@ -1789,6 +1841,12 @@ TEST(InterpreterMixedComparisons) {
Isolate* isolate = handles.main_isolate();
Factory* factory = isolate->factory();
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 0, 0, 1);
+ Zone zone(isolate->allocator());
+
+ FeedbackVectorSpec feedback_spec(&zone);
+ FeedbackVectorSlot slot = feedback_spec.AddInterpreterCompareICSlot();
+ Handle<i::TypeFeedbackVector> vector =
+ NewTypeFeedbackVector(isolate, &feedback_spec);
Register r0(0);
if (pass == 0) {
@@ -1796,25 +1854,29 @@ TEST(InterpreterMixedComparisons) {
builder.LoadLiteral(factory->NewNumber(lhs))
.StoreAccumulatorInRegister(r0)
.LoadLiteral(factory->NewStringFromAsciiChecked(rhs_cstr))
- .CompareOperation(comparison, r0)
+ .CompareOperation(comparison, r0, vector->GetIndex(slot))
.Return();
} else {
// Comparison with HeapNumber on the rhs and String on the lhs
builder.LoadLiteral(factory->NewStringFromAsciiChecked(lhs_cstr))
.StoreAccumulatorInRegister(r0)
.LoadLiteral(factory->NewNumber(rhs))
- .CompareOperation(comparison, r0)
+ .CompareOperation(comparison, r0, vector->GetIndex(slot))
.Return();
}
Handle<BytecodeArray> bytecode_array =
builder.ToBytecodeArray(isolate);
- InterpreterTester tester(isolate, bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array, vector);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->IsBoolean());
CHECK_EQ(return_value->BooleanValue(),
CompareC(comparison, lhs, rhs, true));
+ Object* feedback = vector->Get(slot);
+ CHECK(feedback->IsSmi());
+ CHECK_EQ(CompareOperationFeedback::kAny,
+ static_cast<Smi*>(feedback)->value());
}
}
}
@@ -1910,7 +1972,7 @@ TEST(InterpreterInstanceOf) {
builder.LoadLiteral(cases[i]);
builder.StoreAccumulatorInRegister(r0)
.LoadLiteral(func)
- .CompareOperation(Token::Value::INSTANCEOF, r0)
+ .CompareOperation(Token::Value::INSTANCEOF, r0, 0)
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -1940,7 +2002,7 @@ TEST(InterpreterTestIn) {
builder.LoadLiteral(factory->NewStringFromAsciiChecked(properties[i]))
.StoreAccumulatorInRegister(r0)
.LoadLiteral(Handle<Object>::cast(array))
- .CompareOperation(Token::Value::IN, r0)
+ .CompareOperation(Token::Value::IN, r0, 0)
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -2069,12 +2131,13 @@ TEST(InterpreterCallRuntime) {
Isolate* isolate = handles.main_isolate();
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 2);
+ RegisterList args = builder.register_allocator()->NewRegisterList(2);
builder.LoadLiteral(Smi::FromInt(15))
- .StoreAccumulatorInRegister(Register(0))
+ .StoreAccumulatorInRegister(args[0])
.LoadLiteral(Smi::FromInt(40))
- .StoreAccumulatorInRegister(Register(1))
- .CallRuntime(Runtime::kAdd, Register(0), 2)
+ .StoreAccumulatorInRegister(args[1])
+ .CallRuntime(Runtime::kAdd, args)
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -2093,7 +2156,7 @@ TEST(InterpreterInvokeIntrinsic) {
builder.LoadLiteral(Smi::FromInt(15))
.StoreAccumulatorInRegister(Register(0))
- .CallRuntime(Runtime::kInlineIsArray, Register(0), 1)
+ .CallRuntime(Runtime::kInlineIsArray, Register(0))
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -3821,6 +3884,88 @@ TEST(InterpreterLookupSlot) {
}
}
+TEST(InterpreterLookupContextSlot) {
+ HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
+
+ const char* inner_function_prologue = "function inner() {";
+ const char* inner_function_epilogue = "};";
+ const char* outer_function_epilogue = "return inner();";
+
+ std::tuple<const char*, const char*, Handle<Object>> lookup_slot[] = {
+ // Eval in inner context.
+ std::make_tuple("var x = 0;", "eval(''); return x;",
+ handle(Smi::FromInt(0), isolate)),
+ std::make_tuple("var x = 0;", "eval('var x = 1'); return x;",
+ handle(Smi::FromInt(1), isolate)),
+ std::make_tuple("var x = 0;",
+ "'use strict'; eval('var x = 1'); return x;",
+ handle(Smi::FromInt(0), isolate)),
+ // Eval in outer context.
+ std::make_tuple("var x = 0; eval('');", "return x;",
+ handle(Smi::FromInt(0), isolate)),
+ std::make_tuple("var x = 0; eval('var x = 1');", "return x;",
+ handle(Smi::FromInt(1), isolate)),
+ std::make_tuple("'use strict'; var x = 0; eval('var x = 1');",
+ "return x;", handle(Smi::FromInt(0), isolate)),
+ };
+
+ for (size_t i = 0; i < arraysize(lookup_slot); i++) {
+ std::string body = std::string(std::get<0>(lookup_slot[i])) +
+ std::string(inner_function_prologue) +
+ std::string(std::get<1>(lookup_slot[i])) +
+ std::string(inner_function_epilogue) +
+ std::string(outer_function_epilogue);
+ std::string script = InterpreterTester::SourceForBody(body.c_str());
+
+ InterpreterTester tester(isolate, script.c_str());
+ auto callable = tester.GetCallable<>();
+
+ Handle<i::Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*std::get<2>(lookup_slot[i])));
+ }
+}
+
+TEST(InterpreterLookupGlobalSlot) {
+ HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
+
+ const char* inner_function_prologue = "function inner() {";
+ const char* inner_function_epilogue = "};";
+ const char* outer_function_epilogue = "return inner();";
+
+ std::tuple<const char*, const char*, Handle<Object>> lookup_slot[] = {
+ // Eval in inner context.
+ std::make_tuple("x = 0;", "eval(''); return x;",
+ handle(Smi::FromInt(0), isolate)),
+ std::make_tuple("x = 0;", "eval('var x = 1'); return x;",
+ handle(Smi::FromInt(1), isolate)),
+ std::make_tuple("x = 0;", "'use strict'; eval('var x = 1'); return x;",
+ handle(Smi::FromInt(0), isolate)),
+ // Eval in outer context.
+ std::make_tuple("x = 0; eval('');", "return x;",
+ handle(Smi::FromInt(0), isolate)),
+ std::make_tuple("x = 0; eval('var x = 1');", "return x;",
+ handle(Smi::FromInt(1), isolate)),
+ std::make_tuple("'use strict'; x = 0; eval('var x = 1');", "return x;",
+ handle(Smi::FromInt(0), isolate)),
+ };
+
+ for (size_t i = 0; i < arraysize(lookup_slot); i++) {
+ std::string body = std::string(std::get<0>(lookup_slot[i])) +
+ std::string(inner_function_prologue) +
+ std::string(std::get<1>(lookup_slot[i])) +
+ std::string(inner_function_epilogue) +
+ std::string(outer_function_epilogue);
+ std::string script = InterpreterTester::SourceForBody(body.c_str());
+
+ InterpreterTester tester(isolate, script.c_str());
+ auto callable = tester.GetCallable<>();
+
+ Handle<i::Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*std::get<2>(lookup_slot[i])));
+ }
+}
TEST(InterpreterCallLookupSlot) {
HandleAndZoneScope handles;
diff --git a/deps/v8/test/cctest/interpreter/test-source-positions.cc b/deps/v8/test/cctest/interpreter/test-source-positions.cc
index 3161f92db9..2b7f5c368a 100644
--- a/deps/v8/test/cctest/interpreter/test-source-positions.cc
+++ b/deps/v8/test/cctest/interpreter/test-source-positions.cc
@@ -9,7 +9,6 @@
#include "src/interpreter/bytecode-generator.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate.h"
-#include "src/parsing/parser.h"
#include "test/cctest/cctest.h"
#include "test/cctest/interpreter/source-position-matcher.h"
diff --git a/deps/v8/test/cctest/libplatform/test-tracing.cc b/deps/v8/test/cctest/libplatform/test-tracing.cc
index 2e15d6af9e..66fa0e05a5 100644
--- a/deps/v8/test/cctest/libplatform/test-tracing.cc
+++ b/deps/v8/test/cctest/libplatform/test-tracing.cc
@@ -35,7 +35,8 @@ TEST(TestTraceObject) {
TraceObject trace_object;
uint8_t category_enabled_flag = 41;
trace_object.Initialize('X', &category_enabled_flag, "Test.Trace",
- "Test.Scope", 42, 123, 0, NULL, NULL, NULL, 0);
+ "Test.Scope", 42, 123, 0, nullptr, nullptr, nullptr,
+ nullptr, 0);
CHECK_EQ('X', trace_object.phase());
CHECK_EQ(category_enabled_flag, *trace_object.category_enabled_flag());
CHECK_EQ(std::string("Test.Trace"), std::string(trace_object.name()));
@@ -44,6 +45,19 @@ TEST(TestTraceObject) {
CHECK_EQ(0, trace_object.cpu_duration());
}
+class ConvertableToTraceFormatMock : public v8::ConvertableToTraceFormat {
+ public:
+ explicit ConvertableToTraceFormatMock(int value) : value_(value) {}
+ void AppendAsTraceFormat(std::string* out) const override {
+ *out += "[" + std::to_string(value_) + "," + std::to_string(value_) + "]";
+ }
+
+ private:
+ int value_;
+
+ DISALLOW_COPY_AND_ASSIGN(ConvertableToTraceFormatMock);
+};
+
class MockTraceWriter : public TraceWriter {
public:
void AppendTraceEvent(TraceObject* trace_event) override {
@@ -75,7 +89,8 @@ TEST(TestTraceBufferRingBuffer) {
TraceObject* trace_object = ring_buffer->AddTraceEvent(&handles[i]);
CHECK_NOT_NULL(trace_object);
trace_object->Initialize('X', &category_enabled_flag, names[i].c_str(),
- "Test.Scope", 42, 123, 0, NULL, NULL, NULL, 0);
+ "Test.Scope", 42, 123, 0, nullptr, nullptr,
+ nullptr, nullptr, 0);
trace_object = ring_buffer->GetEventByHandle(handles[i]);
CHECK_NOT_NULL(trace_object);
CHECK_EQ('X', trace_object->phase());
@@ -128,13 +143,13 @@ TEST(TestJSONTraceWriter) {
TraceObject trace_object;
trace_object.InitializeForTesting(
'X', tracing_controller.GetCategoryGroupEnabled("v8-cat"), "Test0",
- v8::internal::tracing::kGlobalScope, 42, 123, 0, NULL, NULL, NULL,
- TRACE_EVENT_FLAG_HAS_ID, 11, 22, 100, 50, 33, 44);
+ v8::internal::tracing::kGlobalScope, 42, 123, 0, nullptr, nullptr,
+ nullptr, nullptr, TRACE_EVENT_FLAG_HAS_ID, 11, 22, 100, 50, 33, 44);
writer->AppendTraceEvent(&trace_object);
trace_object.InitializeForTesting(
'Y', tracing_controller.GetCategoryGroupEnabled("v8-cat"), "Test1",
- v8::internal::tracing::kGlobalScope, 43, 456, 0, NULL, NULL, NULL, 0,
- 55, 66, 110, 55, 77, 88);
+ v8::internal::tracing::kGlobalScope, 43, 456, 0, nullptr, nullptr,
+ nullptr, nullptr, 0, 55, 66, 110, 55, 77, 88);
writer->AppendTraceEvent(&trace_object);
tracing_controller.StopTracing();
}
@@ -264,6 +279,14 @@ TEST(TestTracingControllerMultipleArgsAndCopy) {
mm = "CHANGED";
mmm = "CHANGED";
+ TRACE_EVENT_INSTANT1("v8", "v8.Test", TRACE_EVENT_SCOPE_THREAD, "a1",
+ new ConvertableToTraceFormatMock(42));
+ std::unique_ptr<ConvertableToTraceFormatMock> trace_event_arg(
+ new ConvertableToTraceFormatMock(42));
+ TRACE_EVENT_INSTANT2("v8", "v8.Test", TRACE_EVENT_SCOPE_THREAD, "a1",
+ std::move(trace_event_arg), "a2",
+ new ConvertableToTraceFormatMock(123));
+
tracing_controller.StopTracing();
}
@@ -274,7 +297,7 @@ TEST(TestTracingControllerMultipleArgsAndCopy) {
GetJSONStrings(all_names, trace_str, "\"name\"", "\"", "\"");
GetJSONStrings(all_cats, trace_str, "\"cat\"", "\"", "\"");
- CHECK_EQ(all_args.size(), 22);
+ CHECK_EQ(all_args.size(), 24);
CHECK_EQ(all_args[0], "\"aa\":11");
CHECK_EQ(all_args[1], "\"bb\":22");
CHECK_EQ(all_args[2], "\"cc\":33");
@@ -303,6 +326,81 @@ TEST(TestTracingControllerMultipleArgsAndCopy) {
CHECK_EQ(all_names[20], "INIT");
CHECK_EQ(all_names[21], "INIT");
CHECK_EQ(all_args[21], "\"mm1\":\"INIT\",\"mm2\":\"\\\"INIT\\\"\"");
+ CHECK_EQ(all_args[22], "\"a1\":[42,42]");
+ CHECK_EQ(all_args[23], "\"a1\":[42,42],\"a2\":[123,123]");
+
+ i::V8::SetPlatformForTesting(old_platform);
+}
+
+namespace {
+
+class TraceStateObserverImpl : public Platform::TraceStateObserver {
+ public:
+ void OnTraceEnabled() override { ++enabled_count; }
+ void OnTraceDisabled() override { ++disabled_count; }
+
+ int enabled_count = 0;
+ int disabled_count = 0;
+};
+
+} // namespace
+
+TEST(TracingObservers) {
+ v8::Platform* old_platform = i::V8::GetCurrentPlatform();
+ v8::Platform* default_platform = v8::platform::CreateDefaultPlatform();
+ i::V8::SetPlatformForTesting(default_platform);
+
+ v8::platform::tracing::TracingController tracing_controller;
+ v8::platform::SetTracingController(default_platform, &tracing_controller);
+ MockTraceWriter* writer = new MockTraceWriter();
+ v8::platform::tracing::TraceBuffer* ring_buffer =
+ v8::platform::tracing::TraceBuffer::CreateTraceBufferRingBuffer(1,
+ writer);
+ tracing_controller.Initialize(ring_buffer);
+ v8::platform::tracing::TraceConfig* trace_config =
+ new v8::platform::tracing::TraceConfig();
+ trace_config->AddIncludedCategory("v8");
+
+ TraceStateObserverImpl observer;
+ default_platform->AddTraceStateObserver(&observer);
+
+ CHECK_EQ(0, observer.enabled_count);
+ CHECK_EQ(0, observer.disabled_count);
+
+ tracing_controller.StartTracing(trace_config);
+
+ CHECK_EQ(1, observer.enabled_count);
+ CHECK_EQ(0, observer.disabled_count);
+
+ TraceStateObserverImpl observer2;
+ default_platform->AddTraceStateObserver(&observer2);
+
+ CHECK_EQ(1, observer2.enabled_count);
+ CHECK_EQ(0, observer2.disabled_count);
+
+ default_platform->RemoveTraceStateObserver(&observer2);
+
+ CHECK_EQ(1, observer2.enabled_count);
+ CHECK_EQ(0, observer2.disabled_count);
+
+ tracing_controller.StopTracing();
+
+ CHECK_EQ(1, observer.enabled_count);
+ CHECK_EQ(1, observer.disabled_count);
+ CHECK_EQ(1, observer2.enabled_count);
+ CHECK_EQ(0, observer2.disabled_count);
+
+ default_platform->RemoveTraceStateObserver(&observer);
+
+ CHECK_EQ(1, observer.enabled_count);
+ CHECK_EQ(1, observer.disabled_count);
+
+ trace_config = new v8::platform::tracing::TraceConfig();
+ tracing_controller.StartTracing(trace_config);
+ tracing_controller.StopTracing();
+
+ CHECK_EQ(1, observer.enabled_count);
+ CHECK_EQ(1, observer.disabled_count);
i::V8::SetPlatformForTesting(old_platform);
}
diff --git a/deps/v8/test/cctest/libsampler/test-sampler.cc b/deps/v8/test/cctest/libsampler/test-sampler.cc
index b88d347914..2ec3b870df 100644
--- a/deps/v8/test/cctest/libsampler/test-sampler.cc
+++ b/deps/v8/test/cctest/libsampler/test-sampler.cc
@@ -6,6 +6,7 @@
#include "src/libsampler/sampler.h"
#include "src/base/platform/platform.h"
+#include "src/base/platform/time.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/parsing/test-scanner-streams.cc b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
new file mode 100644
index 0000000000..fffd1200f2
--- /dev/null
+++ b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
@@ -0,0 +1,448 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/factory.h" // for i::Factory::NewExternalStringFrom*Byte
+#include "src/objects-inl.h"
+#include "src/parsing/scanner-character-streams.h"
+#include "src/parsing/scanner.h"
+#include "src/type-feedback-vector-inl.h" // for include "src/factory.h"
+#include "test/cctest/cctest.h"
+
+namespace {
+
+// Implement ExternalSourceStream based on const char**.
+// This will take each string as one chunk. The last chunk must be empty.
+class ChunkSource : public v8::ScriptCompiler::ExternalSourceStream {
+ public:
+ explicit ChunkSource(const char** chunks) : current_(0) {
+ do {
+ chunks_.push_back(
+ {reinterpret_cast<const uint8_t*>(*chunks), strlen(*chunks)});
+ chunks++;
+ } while (chunks_.back().len > 0);
+ }
+ ChunkSource(const uint8_t* data, size_t len, bool extra_chunky)
+ : current_(0) {
+ // If extra_chunky, we'll use increasingly large chunk sizes.
+ // If not, we'll have a single chunk of full length.
+ size_t chunk_size = extra_chunky ? 1 : len;
+ for (size_t i = 0; i < len; i += chunk_size, chunk_size *= 2) {
+ chunks_.push_back({data + i, i::Min(chunk_size, len - i)});
+ }
+ chunks_.push_back({nullptr, 0});
+ }
+ ~ChunkSource() {}
+ bool SetBookmark() override { return false; }
+ void ResetToBookmark() override {}
+ size_t GetMoreData(const uint8_t** src) override {
+ DCHECK_LT(current_, chunks_.size());
+ Chunk& next = chunks_[current_++];
+ uint8_t* chunk = new uint8_t[next.len];
+ i::MemMove(chunk, next.ptr, next.len);
+ *src = chunk;
+ return next.len;
+ }
+
+ private:
+ struct Chunk {
+ const uint8_t* ptr;
+ size_t len;
+ };
+ std::vector<Chunk> chunks_;
+ size_t current_;
+};
+
+class TestExternalResource : public v8::String::ExternalStringResource {
+ public:
+ explicit TestExternalResource(uint16_t* data, int length)
+ : data_(data), length_(static_cast<size_t>(length)) {}
+
+ ~TestExternalResource() {}
+
+ const uint16_t* data() const { return data_; }
+ size_t length() const { return length_; }
+
+ private:
+ uint16_t* data_;
+ size_t length_;
+};
+
+class TestExternalOneByteResource
+ : public v8::String::ExternalOneByteStringResource {
+ public:
+ TestExternalOneByteResource(const char* data, size_t length)
+ : data_(data), length_(length) {}
+
+ const char* data() const { return data_; }
+ size_t length() const { return length_; }
+
+ private:
+ const char* data_;
+ size_t length_;
+};
+
+// A test string with all lengths of utf-8 encodings.
+const char unicode_utf8[] =
+ "abc" // 3x ascii
+ "\xc3\xa4" // a Umlaut, code point 228
+ "\xe2\xa8\xa0" // >> (math symbol), code point 10784
+ "\xf0\x9f\x92\xa9" // best character, code point 128169,
+ // as utf-16 surrogates: 55357 56489
+ "def"; // 3x ascii again.
+const uint16_t unicode_ucs2[] = {97, 98, 99, 228, 10784, 55357,
+ 56489, 100, 101, 102, 0};
+
+} // anonymous namespace
+
+TEST(Utf8StreamAsciiOnly) {
+ const char* chunks[] = {"abc", "def", "ghi", ""};
+ ChunkSource chunk_source(chunks);
+ std::unique_ptr<v8::internal::Utf16CharacterStream> stream(
+ v8::internal::ScannerStream::For(
+ &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8));
+
+ // Read the data without dying.
+ v8::internal::uc32 c;
+ do {
+ c = stream->Advance();
+ } while (c != v8::internal::Utf16CharacterStream::kEndOfInput);
+}
+
+TEST(Utf8StreamBOM) {
+ // Construct test string w/ UTF-8 BOM (byte order mark)
+ char data[3 + arraysize(unicode_utf8)] = {"\xef\xbb\xbf"};
+ strncpy(data + 3, unicode_utf8, arraysize(unicode_utf8));
+
+ const char* chunks[] = {data, "\0"};
+ ChunkSource chunk_source(chunks);
+ std::unique_ptr<v8::internal::Utf16CharacterStream> stream(
+ v8::internal::ScannerStream::For(
+ &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8));
+
+ // Read the data without tripping over the BOM.
+ for (size_t i = 0; unicode_ucs2[i]; i++) {
+ CHECK_EQ(unicode_ucs2[i], stream->Advance());
+ }
+ CHECK_EQ(v8::internal::Utf16CharacterStream::kEndOfInput, stream->Advance());
+
+ // Make sure seek works.
+ stream->Seek(0);
+ CHECK_EQ(unicode_ucs2[0], stream->Advance());
+
+ stream->Seek(5);
+ CHECK_EQ(unicode_ucs2[5], stream->Advance());
+}
+
+TEST(Utf8SplitBOM) {
+ // Construct chunks with a BOM split into two chunks.
+ char partial_bom[] = "\xef\xbb";
+ char data[1 + arraysize(unicode_utf8)] = {"\xbf"};
+ strncpy(data + 1, unicode_utf8, arraysize(unicode_utf8));
+
+ {
+ const char* chunks[] = {partial_bom, data, "\0"};
+ ChunkSource chunk_source(chunks);
+ std::unique_ptr<v8::internal::Utf16CharacterStream> stream(
+ v8::internal::ScannerStream::For(
+ &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8));
+
+ // Read the data without tripping over the BOM.
+ for (size_t i = 0; unicode_ucs2[i]; i++) {
+ CHECK_EQ(unicode_ucs2[i], stream->Advance());
+ }
+ }
+
+ // And now with single-byte BOM chunks.
+ char bom_byte_1[] = "\xef";
+ char bom_byte_2[] = "\xbb";
+ {
+ const char* chunks[] = {bom_byte_1, bom_byte_2, data, "\0"};
+ ChunkSource chunk_source(chunks);
+ std::unique_ptr<v8::internal::Utf16CharacterStream> stream(
+ v8::internal::ScannerStream::For(
+ &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8));
+
+ // Read the data without tripping over the BOM.
+ for (size_t i = 0; unicode_ucs2[i]; i++) {
+ CHECK_EQ(unicode_ucs2[i], stream->Advance());
+ }
+ }
+}
+
+TEST(Utf8ChunkBoundaries) {
+ // Test utf-8 parsing at chunk boundaries.
+
+ // Split the test string at each byte and pass it to the stream. This way,
+ // we'll have a split at each possible boundary.
+ size_t len = strlen(unicode_utf8);
+ char buffer[arraysize(unicode_utf8) + 3];
+ for (size_t i = 1; i < len; i++) {
+ // Copy source string into buffer, splitting it at i.
+ // Then add three chunks, 0..i-1, i..strlen-1, empty.
+ strncpy(buffer, unicode_utf8, i);
+ strncpy(buffer + i + 1, unicode_utf8 + i, len - i);
+ buffer[i] = '\0';
+ buffer[len + 1] = '\0';
+ buffer[len + 2] = '\0';
+ const char* chunks[] = {buffer, buffer + i + 1, buffer + len + 2};
+
+ ChunkSource chunk_source(chunks);
+ std::unique_ptr<v8::internal::Utf16CharacterStream> stream(
+ v8::internal::ScannerStream::For(
+ &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8));
+
+ for (size_t i = 0; unicode_ucs2[i]; i++) {
+ CHECK_EQ(unicode_ucs2[i], stream->Advance());
+ }
+ CHECK_EQ(v8::internal::Utf16CharacterStream::kEndOfInput,
+ stream->Advance());
+ }
+}
+
+TEST(Utf8SingleByteChunks) {
+ // Have each byte as a single-byte chunk.
+ size_t len = strlen(unicode_utf8);
+ char buffer[arraysize(unicode_utf8) + 4];
+ for (size_t i = 1; i < len - 1; i++) {
+ // Copy source string into buffer, make a single-byte chunk at i.
+ strncpy(buffer, unicode_utf8, i);
+ strncpy(buffer + i + 3, unicode_utf8 + i + 1, len - i - 1);
+ buffer[i] = '\0';
+ buffer[i + 1] = unicode_utf8[i];
+ buffer[i + 2] = '\0';
+ buffer[len + 2] = '\0';
+ buffer[len + 3] = '\0';
+ const char* chunks[] = {buffer, buffer + i + 1, buffer + i + 3,
+ buffer + len + 3};
+
+ ChunkSource chunk_source(chunks);
+ std::unique_ptr<v8::internal::Utf16CharacterStream> stream(
+ v8::internal::ScannerStream::For(
+ &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8));
+
+ for (size_t j = 0; unicode_ucs2[j]; j++) {
+ CHECK_EQ(unicode_ucs2[j], stream->Advance());
+ }
+ CHECK_EQ(v8::internal::Utf16CharacterStream::kEndOfInput,
+ stream->Advance());
+ }
+}
+
+#define CHECK_EQU(v1, v2) CHECK_EQ(static_cast<int>(v1), static_cast<int>(v2))
+
+void TestCharacterStream(const char* reference, i::Utf16CharacterStream* stream,
+ unsigned length, unsigned start, unsigned end) {
+ // Read streams one char at a time
+ unsigned i;
+ for (i = start; i < end; i++) {
+ CHECK_EQU(i, stream->pos());
+ CHECK_EQU(reference[i], stream->Advance());
+ }
+ CHECK_EQU(end, stream->pos());
+
+ // Pushback, re-read, pushback again.
+ while (i > end / 4) {
+ int32_t c0 = reference[i - 1];
+ CHECK_EQU(i, stream->pos());
+ stream->Back();
+ i--;
+ CHECK_EQU(i, stream->pos());
+ int32_t c1 = stream->Advance();
+ i++;
+ CHECK_EQU(i, stream->pos());
+ CHECK_EQ(c0, c1);
+ stream->Back();
+ i--;
+ CHECK_EQU(i, stream->pos());
+ }
+
+ // Seek + read streams one char at a time.
+ unsigned halfway = end / 2;
+ stream->Seek(stream->pos() + halfway - i);
+ for (i = halfway; i < end; i++) {
+ CHECK_EQU(i, stream->pos());
+ CHECK_EQU(reference[i], stream->Advance());
+ }
+ CHECK_EQU(i, stream->pos());
+ CHECK_LT(stream->Advance(), 0);
+
+ // Seek back, then seek beyond end of stream.
+ stream->Seek(start);
+ if (start < length) {
+ CHECK_EQU(stream->Advance(), reference[start]);
+ } else {
+ CHECK_LT(stream->Advance(), 0);
+ }
+ stream->Seek(length + 5);
+ CHECK_LT(stream->Advance(), 0);
+}
+
+#undef CHECK_EQU
+
+void TestCharacterStreams(const char* one_byte_source, unsigned length,
+ unsigned start = 0, unsigned end = 0) {
+ if (end == 0) end = length;
+
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::Factory* factory = isolate->factory();
+
+ // 2-byte external string
+ std::unique_ptr<i::uc16[]> uc16_buffer(new i::uc16[length]);
+ i::Vector<const i::uc16> two_byte_vector(uc16_buffer.get(),
+ static_cast<int>(length));
+ {
+ for (unsigned i = 0; i < length; i++) {
+ uc16_buffer[i] = static_cast<i::uc16>(one_byte_source[i]);
+ }
+ TestExternalResource resource(uc16_buffer.get(), length);
+ i::Handle<i::String> uc16_string(
+ factory->NewExternalStringFromTwoByte(&resource).ToHandleChecked());
+ std::unique_ptr<i::Utf16CharacterStream> uc16_stream(
+ i::ScannerStream::For(uc16_string, start, end));
+ TestCharacterStream(one_byte_source, uc16_stream.get(), length, start, end);
+ }
+
+ // 1-byte external string
+ i::Vector<const char> one_byte_vector(one_byte_source,
+ static_cast<int>(length));
+ i::Handle<i::String> one_byte_string =
+ factory->NewStringFromAscii(one_byte_vector).ToHandleChecked();
+ {
+ TestExternalOneByteResource one_byte_resource(one_byte_source, length);
+ i::Handle<i::String> ext_one_byte_string(
+ factory->NewExternalStringFromOneByte(&one_byte_resource)
+ .ToHandleChecked());
+ std::unique_ptr<i::Utf16CharacterStream> one_byte_stream(
+ i::ScannerStream::For(ext_one_byte_string, start, end));
+ TestCharacterStream(one_byte_source, one_byte_stream.get(), length, start,
+ end);
+ }
+
+ // 1-byte generic i::String
+ {
+ std::unique_ptr<i::Utf16CharacterStream> string_stream(
+ i::ScannerStream::For(one_byte_string, start, end));
+ TestCharacterStream(one_byte_source, string_stream.get(), length, start,
+ end);
+ }
+
+ // 2-byte generic i::String
+ {
+ i::Handle<i::String> two_byte_string =
+ factory->NewStringFromTwoByte(two_byte_vector).ToHandleChecked();
+ std::unique_ptr<i::Utf16CharacterStream> two_byte_string_stream(
+ i::ScannerStream::For(two_byte_string, start, end));
+ TestCharacterStream(one_byte_source, two_byte_string_stream.get(), length,
+ start, end);
+ }
+
+ // Streaming has no notion of start/end, so let's skip streaming tests for
+ // these cases.
+ if (start != 0 || end != length) return;
+
+ // 1-byte streaming stream, single + many chunks.
+ {
+ const uint8_t* data =
+ reinterpret_cast<const uint8_t*>(one_byte_vector.begin());
+ const uint8_t* data_end =
+ reinterpret_cast<const uint8_t*>(one_byte_vector.end());
+
+ ChunkSource single_chunk(data, data_end - data, false);
+ std::unique_ptr<i::Utf16CharacterStream> one_byte_streaming_stream(
+ i::ScannerStream::For(&single_chunk,
+ v8::ScriptCompiler::StreamedSource::ONE_BYTE));
+ TestCharacterStream(one_byte_source, one_byte_streaming_stream.get(),
+ length, start, end);
+
+ ChunkSource many_chunks(data, data_end - data, true);
+ one_byte_streaming_stream.reset(i::ScannerStream::For(
+ &many_chunks, v8::ScriptCompiler::StreamedSource::ONE_BYTE));
+ TestCharacterStream(one_byte_source, one_byte_streaming_stream.get(),
+ length, start, end);
+ }
+
+ // UTF-8 streaming stream, single + many chunks.
+ {
+ const uint8_t* data =
+ reinterpret_cast<const uint8_t*>(one_byte_vector.begin());
+ const uint8_t* data_end =
+ reinterpret_cast<const uint8_t*>(one_byte_vector.end());
+ ChunkSource chunks(data, data_end - data, false);
+ std::unique_ptr<i::Utf16CharacterStream> utf8_streaming_stream(
+ i::ScannerStream::For(&chunks,
+ v8::ScriptCompiler::StreamedSource::UTF8));
+ TestCharacterStream(one_byte_source, utf8_streaming_stream.get(), length,
+ start, end);
+
+ ChunkSource many_chunks(data, data_end - data, true);
+ utf8_streaming_stream.reset(i::ScannerStream::For(
+ &many_chunks, v8::ScriptCompiler::StreamedSource::UTF8));
+ TestCharacterStream(one_byte_source, utf8_streaming_stream.get(), length,
+ start, end);
+ }
+
+ // 2-byte streaming stream, single + many chunks.
+ {
+ const uint8_t* data =
+ reinterpret_cast<const uint8_t*>(two_byte_vector.begin());
+ const uint8_t* data_end =
+ reinterpret_cast<const uint8_t*>(two_byte_vector.end());
+ ChunkSource chunks(data, data_end - data, false);
+ std::unique_ptr<i::Utf16CharacterStream> two_byte_streaming_stream(
+ i::ScannerStream::For(&chunks,
+ v8::ScriptCompiler::StreamedSource::TWO_BYTE));
+ TestCharacterStream(one_byte_source, two_byte_streaming_stream.get(),
+ length, start, end);
+
+ ChunkSource many_chunks(data, data_end - data, true);
+ two_byte_streaming_stream.reset(i::ScannerStream::For(
+ &many_chunks, v8::ScriptCompiler::StreamedSource::TWO_BYTE));
+ TestCharacterStream(one_byte_source, two_byte_streaming_stream.get(),
+ length, start, end);
+ }
+}
+
+TEST(CharacterStreams) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handles(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+
+ TestCharacterStreams("abcdefghi", 9);
+ TestCharacterStreams("abc\0\n\r\x7f", 7);
+ TestCharacterStreams("\0", 1);
+ TestCharacterStreams("", 0);
+
+ // 4k large buffer.
+ char buffer[4096 + 1];
+ for (unsigned i = 0; i < arraysize(buffer); i++) {
+ buffer[i] = static_cast<char>(i & 0x7F);
+ }
+ buffer[arraysize(buffer) - 1] = '\0';
+ TestCharacterStreams(buffer, arraysize(buffer) - 1);
+ TestCharacterStreams(buffer, arraysize(buffer) - 1, 576, 3298);
+}
+
+// Regression test for crbug.com/651333. Read invalid utf-8.
+TEST(Regress651333) {
+ const uint8_t bytes[] =
+ "A\xf1"
+ "ad"; // Anad, with n == n-with-tilde.
+ const uint16_t unicode[] = {65, 65533, 97, 100};
+
+ // Run the test for all sub-strings 0..N of bytes, to make sure we hit the
+ // error condition in and at chunk boundaries.
+ for (size_t len = 0; len < arraysize(bytes); len++) {
+ // Read len bytes from bytes, and compare against the expected unicode
+ // characters. Expect kBadChar ( == Unicode replacement char == code point
+ // 65533) instead of the incorrectly coded Latin1 char.
+ ChunkSource chunks(bytes, len, false);
+ std::unique_ptr<i::Utf16CharacterStream> stream(i::ScannerStream::For(
+ &chunks, v8::ScriptCompiler::StreamedSource::UTF8));
+ for (size_t i = 0; i < len; i++) {
+ CHECK_EQ(unicode[i], stream->Advance());
+ }
+ CHECK_EQ(i::Utf16CharacterStream::kEndOfInput, stream->Advance());
+ }
+}
diff --git a/deps/v8/test/cctest/parsing/test-scanner.cc b/deps/v8/test/cctest/parsing/test-scanner.cc
new file mode 100644
index 0000000000..2577aa5868
--- /dev/null
+++ b/deps/v8/test/cctest/parsing/test-scanner.cc
@@ -0,0 +1,87 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests v8::internal::Scanner. Note that presently most unit tests for the
+// Scanner are in cctest/test-parsing.cc, rather than here.
+
+#include "src/handles-inl.h"
+#include "src/parsing/scanner-character-streams.h"
+#include "src/parsing/scanner.h"
+#include "src/unicode-cache.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+
+namespace {
+
+const char src_simple[] = "function foo() { var x = 2 * a() + b; }";
+
+std::unique_ptr<Scanner> make_scanner(const char* src) {
+ std::unique_ptr<Scanner> scanner(new Scanner(new UnicodeCache()));
+ scanner->Initialize(ScannerStream::ForTesting(src).release());
+ return scanner;
+}
+
+} // anonymous namespace
+
+// DCHECK_TOK checks token equality, but by checking for equality of the token
+// names. That should have the same result, but has much nicer error messaages.
+#define DCHECK_TOK(a, b) DCHECK_EQ(Token::Name(a), Token::Name(b))
+
+TEST(Bookmarks) {
+ // Scan through the given source and record the tokens for use as reference
+ // below.
+ std::vector<Token::Value> tokens;
+ {
+ auto scanner = make_scanner(src_simple);
+ do {
+ tokens.push_back(scanner->Next());
+ } while (scanner->current_token() != Token::EOS);
+ }
+
+ // For each position:
+ // - Scan through file,
+ // - set a bookmark once the position is reached,
+ // - scan a bit more,
+ // - reset to the bookmark, and
+ // - scan until the end.
+ // At each step, compare to the reference token sequence generated above.
+ for (size_t bookmark_pos = 0; bookmark_pos < tokens.size(); bookmark_pos++) {
+ auto scanner = make_scanner(src_simple);
+ Scanner::BookmarkScope bookmark(scanner.get());
+
+ for (size_t i = 0; i < std::min(bookmark_pos + 10, tokens.size()); i++) {
+ if (i == bookmark_pos) {
+ bookmark.Set();
+ }
+ DCHECK_TOK(tokens[i], scanner->Next());
+ }
+
+ bookmark.Apply();
+ for (size_t i = bookmark_pos; i < tokens.size(); i++) {
+ DCHECK_TOK(tokens[i], scanner->Next());
+ }
+ }
+}
+
+TEST(AllThePushbacks) {
+ const struct {
+ const char* src;
+ const Token::Value tokens[5]; // Large enough for any of the test cases.
+ } test_cases[] = {
+ {"<-x", {Token::LT, Token::SUB, Token::IDENTIFIER, Token::EOS}},
+ {"<!x", {Token::LT, Token::NOT, Token::IDENTIFIER, Token::EOS}},
+ {"<!-x",
+ {Token::LT, Token::NOT, Token::SUB, Token::IDENTIFIER, Token::EOS}},
+ {"<!-- xx -->\nx", {Token::IDENTIFIER, Token::EOS}},
+ };
+
+ for (const auto& test_case : test_cases) {
+ auto scanner = make_scanner(test_case.src);
+ for (size_t i = 0; test_case.tokens[i] != Token::EOS; i++) {
+ DCHECK_TOK(test_case.tokens[i], scanner->Next());
+ }
+ DCHECK_TOK(Token::EOS, scanner->Next());
+ }
+}
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index 9667afb703..63c25c5b8c 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -319,7 +319,7 @@ static void CheckAccessorArgsCorrect(
CHECK(info.Data()
->Equals(info.GetIsolate()->GetCurrentContext(), v8_str("data"))
.FromJust());
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK(info.GetIsolate() == CcTest::isolate());
CHECK(info.This() == info.Holder());
CHECK(info.Data()
diff --git a/deps/v8/test/cctest/test-api-interceptors.cc b/deps/v8/test/cctest/test-api-interceptors.cc
index 6e4c6028e9..572487976e 100644
--- a/deps/v8/test/cctest/test-api-interceptors.cc
+++ b/deps/v8/test/cctest/test-api-interceptors.cc
@@ -13,7 +13,6 @@
#include "src/compilation-cache.h"
#include "src/execution.h"
#include "src/objects.h"
-#include "src/parsing/parser.h"
#include "src/unicode-inl.h"
#include "src/utils.h"
@@ -239,6 +238,26 @@ void CheckThisNamedPropertyHandler(
.FromJust());
}
+void CheckThisIndexedPropertyDefiner(
+ uint32_t index, const v8::PropertyDescriptor& desc,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CheckReturnValue(info, FUNCTION_ADDR(CheckThisIndexedPropertyDefiner));
+ ApiTestFuzzer::Fuzz();
+ CHECK(info.This()
+ ->Equals(info.GetIsolate()->GetCurrentContext(), bottom)
+ .FromJust());
+}
+
+void CheckThisNamedPropertyDefiner(
+ Local<Name> property, const v8::PropertyDescriptor& desc,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CheckReturnValue(info, FUNCTION_ADDR(CheckThisNamedPropertyDefiner));
+ ApiTestFuzzer::Fuzz();
+ CHECK(info.This()
+ ->Equals(info.GetIsolate()->GetCurrentContext(), bottom)
+ .FromJust());
+}
+
void CheckThisIndexedPropertySetter(
uint32_t index, Local<Value> value,
const v8::PropertyCallbackInfo<v8::Value>& info) {
@@ -249,6 +268,23 @@ void CheckThisIndexedPropertySetter(
.FromJust());
}
+void CheckThisIndexedPropertyDescriptor(
+ uint32_t index, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CheckReturnValue(info, FUNCTION_ADDR(CheckThisIndexedPropertyDescriptor));
+ ApiTestFuzzer::Fuzz();
+ CHECK(info.This()
+ ->Equals(info.GetIsolate()->GetCurrentContext(), bottom)
+ .FromJust());
+}
+
+void CheckThisNamedPropertyDescriptor(
+ Local<Name> property, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CheckReturnValue(info, FUNCTION_ADDR(CheckThisNamedPropertyDescriptor));
+ ApiTestFuzzer::Fuzz();
+ CHECK(info.This()
+ ->Equals(info.GetIsolate()->GetCurrentContext(), bottom)
+ .FromJust());
+}
void CheckThisNamedPropertySetter(
Local<Name> property, Local<Value> value,
@@ -341,11 +377,284 @@ void InterceptorHasOwnPropertyGetter(
void InterceptorHasOwnPropertyGetterGC(
Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+}
+
+} // namespace
+
+int query_counter_int = 0;
+
+namespace {
+void QueryCallback(Local<Name> property,
+ const v8::PropertyCallbackInfo<v8::Integer>& info) {
+ query_counter_int++;
+}
+
+} // namespace
+
+// Examples that show when the query callback is triggered.
+THREADED_TEST(QueryInterceptor) {
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::FunctionTemplate> templ =
+ v8::FunctionTemplate::New(CcTest::isolate());
+ templ->InstanceTemplate()->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(0, 0, QueryCallback));
+ LocalContext env;
+ env->Global()
+ ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked())
+ .FromJust();
+ CHECK_EQ(query_counter_int, 0);
+ v8::Local<Value> result =
+ v8_compile("Object.getOwnPropertyDescriptor(obj, 'x');")
+ ->Run(env.local())
+ .ToLocalChecked();
+ CHECK_EQ(query_counter_int, 1);
+ CHECK_EQ(v8::PropertyAttribute::None,
+ static_cast<v8::PropertyAttribute>(
+ result->Int32Value(env.local()).FromJust()));
+
+ v8_compile("Object.defineProperty(obj, 'not_enum', {value: 17});")
+ ->Run(env.local())
+ .ToLocalChecked();
+ CHECK_EQ(query_counter_int, 2);
+
+ v8_compile(
+ "Object.defineProperty(obj, 'enum', {value: 17, enumerable: true, "
+ "writable: true});")
+ ->Run(env.local())
+ .ToLocalChecked();
+ CHECK_EQ(query_counter_int, 3);
+
+ CHECK(v8_compile("obj.propertyIsEnumerable('enum');")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->BooleanValue(env.local())
+ .FromJust());
+ CHECK_EQ(query_counter_int, 4);
+
+ CHECK(!v8_compile("obj.propertyIsEnumerable('not_enum');")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->BooleanValue(env.local())
+ .FromJust());
+ CHECK_EQ(query_counter_int, 5);
+
+ CHECK(v8_compile("obj.hasOwnProperty('enum');")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->BooleanValue(env.local())
+ .FromJust());
+ CHECK_EQ(query_counter_int, 5);
+
+ CHECK(v8_compile("obj.hasOwnProperty('not_enum');")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->BooleanValue(env.local())
+ .FromJust());
+ CHECK_EQ(query_counter_int, 5);
+
+ CHECK(!v8_compile("obj.hasOwnProperty('x');")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->BooleanValue(env.local())
+ .FromJust());
+ CHECK_EQ(query_counter_int, 6);
+
+ CHECK(!v8_compile("obj.propertyIsEnumerable('undef');")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->BooleanValue(env.local())
+ .FromJust());
+ CHECK_EQ(query_counter_int, 7);
+
+ v8_compile("Object.defineProperty(obj, 'enum', {value: 42});")
+ ->Run(env.local())
+ .ToLocalChecked();
+ CHECK_EQ(query_counter_int, 8);
+
+ v8_compile("Object.isFrozen('obj.x');")->Run(env.local()).ToLocalChecked();
+ CHECK_EQ(query_counter_int, 8);
+}
+
+bool get_was_called = false;
+bool set_was_called = false;
+
+int set_was_called_counter = 0;
+
+namespace {
+void GetterCallback(Local<Name> property,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ get_was_called = true;
+}
+
+void SetterCallback(Local<Name> property, Local<Value> value,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ set_was_called = true;
+ set_was_called_counter++;
+}
+
+} // namespace
+
+// Check that get callback is called in defineProperty with accessor descriptor.
+THREADED_TEST(DefinerCallbackAccessorInterceptor) {
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::FunctionTemplate> templ =
+ v8::FunctionTemplate::New(CcTest::isolate());
+ templ->InstanceTemplate()->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(GetterCallback, SetterCallback));
+ LocalContext env;
+ env->Global()
+ ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked())
+ .FromJust();
+
+ get_was_called = false;
+ set_was_called = false;
+
+ v8_compile("Object.defineProperty(obj, 'x', {set: function() {return 17;}});")
+ ->Run(env.local())
+ .ToLocalChecked();
+ CHECK_EQ(get_was_called, true);
+ CHECK_EQ(set_was_called, false);
+}
+
+// Check that set callback is called for function declarations.
+THREADED_TEST(SetterCallbackFunctionDeclarationInterceptor) {
+ v8::HandleScope scope(CcTest::isolate());
+ LocalContext env;
+ v8::Local<v8::FunctionTemplate> templ =
+ v8::FunctionTemplate::New(CcTest::isolate());
+
+ v8::Local<ObjectTemplate> object_template = templ->InstanceTemplate();
+ object_template->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(nullptr, SetterCallback));
+ v8::Local<v8::Context> ctx =
+ v8::Context::New(CcTest::isolate(), nullptr, object_template);
+
+ set_was_called_counter = 0;
+
+ // Declare function.
+ v8::Local<v8::String> code = v8_str("function x() {return 42;}; x();");
+ CHECK_EQ(42, v8::Script::Compile(ctx, code)
+ .ToLocalChecked()
+ ->Run(ctx)
+ .ToLocalChecked()
+ ->Int32Value(ctx)
+ .FromJust());
+ CHECK_EQ(set_was_called_counter, 1);
+
+ // Redeclare function.
+ code = v8_str("function x() {return 43;}; x();");
+ CHECK_EQ(43, v8::Script::Compile(ctx, code)
+ .ToLocalChecked()
+ ->Run(ctx)
+ .ToLocalChecked()
+ ->Int32Value(ctx)
+ .FromJust());
+ CHECK_EQ(set_was_called_counter, 2);
+
+ // Redefine function.
+ code = v8_str("x = function() {return 44;}; x();");
+ CHECK_EQ(44, v8::Script::Compile(ctx, code)
+ .ToLocalChecked()
+ ->Run(ctx)
+ .ToLocalChecked()
+ ->Int32Value(ctx)
+ .FromJust());
+ CHECK_EQ(set_was_called_counter, 3);
+}
+
+// Check that function re-declarations throw if they are read-only.
+THREADED_TEST(SetterCallbackFunctionDeclarationInterceptorThrow) {
+ v8::HandleScope scope(CcTest::isolate());
+ LocalContext env;
+ v8::Local<v8::FunctionTemplate> templ =
+ v8::FunctionTemplate::New(CcTest::isolate());
+
+ v8::Local<ObjectTemplate> object_template = templ->InstanceTemplate();
+ object_template->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(nullptr, SetterCallback));
+ v8::Local<v8::Context> ctx =
+ v8::Context::New(CcTest::isolate(), nullptr, object_template);
+
+ set_was_called = false;
+
+ v8::Local<v8::String> code = v8_str(
+ "function x() {return 42;};"
+ "Object.defineProperty(this, 'x', {"
+ "configurable: false, "
+ "writable: false});"
+ "x();");
+ CHECK_EQ(42, v8::Script::Compile(ctx, code)
+ .ToLocalChecked()
+ ->Run(ctx)
+ .ToLocalChecked()
+ ->Int32Value(ctx)
+ .FromJust());
+
+ CHECK_EQ(set_was_called, true);
+
+ v8::TryCatch try_catch(CcTest::isolate());
+ set_was_called = false;
+
+ // Redeclare function that is read-only.
+ code = v8_str("function x() {return 43;};");
+ CHECK(v8::Script::Compile(ctx, code).ToLocalChecked()->Run(ctx).IsEmpty());
+ CHECK(try_catch.HasCaught());
+
+ CHECK_EQ(set_was_called, false);
+}
+
+bool get_was_called_in_order = false;
+bool define_was_called_in_order = false;
+
+namespace {
+
+void GetterCallbackOrder(Local<Name> property,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ get_was_called_in_order = true;
+ CHECK_EQ(define_was_called_in_order, true);
+ info.GetReturnValue().Set(property);
+}
+
+void DefinerCallbackOrder(Local<Name> property,
+ const v8::PropertyDescriptor& desc,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CHECK_EQ(get_was_called_in_order, false); // Define called before get.
+ define_was_called_in_order = true;
}
} // namespace
+// Check that definer callback is called before getter callback.
+THREADED_TEST(DefinerCallbackGetAndDefine) {
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::FunctionTemplate> templ =
+ v8::FunctionTemplate::New(CcTest::isolate());
+ templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ GetterCallbackOrder, SetterCallback, 0, 0, 0, DefinerCallbackOrder));
+ LocalContext env;
+ env->Global()
+ ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked())
+ .FromJust();
+
+ CHECK_EQ(get_was_called_in_order, false);
+ CHECK_EQ(define_was_called_in_order, false);
+
+ v8_compile("Object.defineProperty(obj, 'x', {set: function() {return 17;}});")
+ ->Run(env.local())
+ .ToLocalChecked();
+ CHECK_EQ(get_was_called_in_order, true);
+ CHECK_EQ(define_was_called_in_order, true);
+}
THREADED_TEST(InterceptorHasOwnProperty) {
LocalContext context;
@@ -1103,6 +1412,503 @@ THREADED_TEST(NamedPropertyHandlerGetter) {
.FromJust());
}
+namespace {
+void NotInterceptingPropertyDefineCallback(
+ Local<Name> name, const v8::PropertyDescriptor& desc,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ // Do not intercept by not calling info.GetReturnValue().Set().
+}
+
+void InterceptingPropertyDefineCallback(
+ Local<Name> name, const v8::PropertyDescriptor& desc,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ // Intercept the callback by setting a non-empty handle
+ info.GetReturnValue().Set(name);
+}
+
+void CheckDescriptorInDefineCallback(
+ Local<Name> name, const v8::PropertyDescriptor& desc,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CHECK(!desc.has_writable());
+ CHECK(!desc.has_value());
+ CHECK(!desc.has_enumerable());
+ CHECK(desc.has_configurable());
+ CHECK(!desc.configurable());
+ CHECK(desc.has_get());
+ CHECK(desc.get()->IsFunction());
+ CHECK(desc.has_set());
+ CHECK(desc.set()->IsUndefined());
+ // intercept the callback by setting a non-empty handle
+ info.GetReturnValue().Set(name);
+}
+} // namespace
+
+THREADED_TEST(PropertyDefinerCallback) {
+ v8::HandleScope scope(CcTest::isolate());
+ LocalContext env;
+
+ { // Intercept defineProperty()
+ v8::Local<v8::FunctionTemplate> templ =
+ v8::FunctionTemplate::New(CcTest::isolate());
+ templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ 0, 0, 0, 0, 0, NotInterceptingPropertyDefineCallback));
+ env->Global()
+ ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked())
+ .FromJust();
+ const char* code =
+ "obj.x = 17; "
+ "Object.defineProperty(obj, 'x', {value: 42});"
+ "obj.x;";
+ CHECK_EQ(42, v8_compile(code)
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ }
+
+ { // Intercept defineProperty() for correct accessor descriptor
+ v8::Local<v8::FunctionTemplate> templ =
+ v8::FunctionTemplate::New(CcTest::isolate());
+ templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ 0, 0, 0, 0, 0, CheckDescriptorInDefineCallback));
+ env->Global()
+ ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked())
+ .FromJust();
+ const char* code =
+ "obj.x = 17; "
+ "Object.defineProperty(obj, 'x', {"
+ "get: function(){ return 42; }, "
+ "set: undefined,"
+ "configurable: 0"
+ "});"
+ "obj.x;";
+ CHECK_EQ(17, v8_compile(code)
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ }
+
+ { // Do not intercept defineProperty()
+ v8::Local<v8::FunctionTemplate> templ2 =
+ v8::FunctionTemplate::New(CcTest::isolate());
+ templ2->InstanceTemplate()->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(
+ 0, 0, 0, 0, 0, InterceptingPropertyDefineCallback));
+ env->Global()
+ ->Set(env.local(), v8_str("obj"), templ2->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked())
+ .FromJust();
+
+ const char* code =
+ "obj.x = 17; "
+ "Object.defineProperty(obj, 'x', {value: 42});"
+ "obj.x;";
+ CHECK_EQ(17, v8_compile(code)
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ }
+}
+
+namespace {
+void NotInterceptingPropertyDefineCallbackIndexed(
+ uint32_t index, const v8::PropertyDescriptor& desc,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ // Do not intercept by not calling info.GetReturnValue().Set()
+}
+
+void InterceptingPropertyDefineCallbackIndexed(
+ uint32_t index, const v8::PropertyDescriptor& desc,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ // intercept the callback by setting a non-empty handle
+ info.GetReturnValue().Set(index);
+}
+
+void CheckDescriptorInDefineCallbackIndexed(
+ uint32_t index, const v8::PropertyDescriptor& desc,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CHECK(!desc.has_writable());
+ CHECK(!desc.has_value());
+ CHECK(desc.has_enumerable());
+ CHECK(desc.enumerable());
+ CHECK(!desc.has_configurable());
+ CHECK(desc.has_get());
+ CHECK(desc.get()->IsFunction());
+ CHECK(desc.has_set());
+ CHECK(desc.set()->IsUndefined());
+ // intercept the callback by setting a non-empty handle
+ info.GetReturnValue().Set(index);
+}
+} // namespace
+
+THREADED_TEST(PropertyDefinerCallbackIndexed) {
+ v8::HandleScope scope(CcTest::isolate());
+ LocalContext env;
+
+ { // Intercept defineProperty()
+ v8::Local<v8::FunctionTemplate> templ =
+ v8::FunctionTemplate::New(CcTest::isolate());
+ templ->InstanceTemplate()->SetHandler(
+ v8::IndexedPropertyHandlerConfiguration(
+ 0, 0, 0, 0, 0, NotInterceptingPropertyDefineCallbackIndexed));
+ env->Global()
+ ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked())
+ .FromJust();
+ const char* code =
+ "obj[2] = 17; "
+ "Object.defineProperty(obj, 2, {value: 42});"
+ "obj[2];";
+ CHECK_EQ(42, v8_compile(code)
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ }
+
+ { // Intercept defineProperty() for correct accessor descriptor
+ v8::Local<v8::FunctionTemplate> templ =
+ v8::FunctionTemplate::New(CcTest::isolate());
+ templ->InstanceTemplate()->SetHandler(
+ v8::IndexedPropertyHandlerConfiguration(
+ 0, 0, 0, 0, 0, CheckDescriptorInDefineCallbackIndexed));
+ env->Global()
+ ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked())
+ .FromJust();
+ const char* code =
+ "obj[2] = 17; "
+ "Object.defineProperty(obj, 2, {"
+ "get: function(){ return 42; }, "
+ "set: undefined,"
+ "enumerable: true"
+ "});"
+ "obj[2];";
+ CHECK_EQ(17, v8_compile(code)
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ }
+
+ { // Do not intercept defineProperty()
+ v8::Local<v8::FunctionTemplate> templ2 =
+ v8::FunctionTemplate::New(CcTest::isolate());
+ templ2->InstanceTemplate()->SetHandler(
+ v8::IndexedPropertyHandlerConfiguration(
+ 0, 0, 0, 0, 0, InterceptingPropertyDefineCallbackIndexed));
+ env->Global()
+ ->Set(env.local(), v8_str("obj"), templ2->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked())
+ .FromJust();
+
+ const char* code =
+ "obj[2] = 17; "
+ "Object.defineProperty(obj, 2, {value: 42});"
+ "obj[2];";
+ CHECK_EQ(17, v8_compile(code)
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ }
+}
+
+// Test that freeze() is intercepted.
+THREADED_TEST(PropertyDefinerCallbackForFreeze) {
+ v8::HandleScope scope(CcTest::isolate());
+ LocalContext env;
+ v8::Local<v8::FunctionTemplate> templ =
+ v8::FunctionTemplate::New(CcTest::isolate());
+ templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ 0, 0, 0, 0, 0, InterceptingPropertyDefineCallback));
+ env->Global()
+ ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked())
+ .FromJust();
+ const char* code =
+ "obj.x = 17; "
+ "Object.freeze(obj.x); "
+ "Object.isFrozen(obj.x);";
+
+ CHECK(v8_compile(code)
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->BooleanValue(env.local())
+ .FromJust());
+}
+
+// Check that the descriptor passed to the callback is enumerable.
+namespace {
+void CheckEnumerablePropertyDefineCallback(
+ Local<Name> name, const v8::PropertyDescriptor& desc,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CHECK(desc.has_value());
+ CHECK_EQ(42, desc.value()
+ ->Int32Value(info.GetIsolate()->GetCurrentContext())
+ .FromJust());
+ CHECK(desc.has_enumerable());
+ CHECK(desc.enumerable());
+ CHECK(!desc.has_writable());
+
+ // intercept the callback by setting a non-empty handle
+ info.GetReturnValue().Set(name);
+}
+} // namespace
+THREADED_TEST(PropertyDefinerCallbackEnumerable) {
+ v8::HandleScope scope(CcTest::isolate());
+ LocalContext env;
+ v8::Local<v8::FunctionTemplate> templ =
+ v8::FunctionTemplate::New(CcTest::isolate());
+ templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ 0, 0, 0, 0, 0, CheckEnumerablePropertyDefineCallback));
+ env->Global()
+ ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked())
+ .FromJust();
+ const char* code =
+ "obj.x = 17; "
+ "Object.defineProperty(obj, 'x', {value: 42, enumerable: true});"
+ "obj.x;";
+ CHECK_EQ(17, v8_compile(code)
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+}
+
+// Check that the descriptor passed to the callback is configurable.
+namespace {
+void CheckConfigurablePropertyDefineCallback(
+ Local<Name> name, const v8::PropertyDescriptor& desc,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CHECK(desc.has_value());
+ CHECK_EQ(42, desc.value()
+ ->Int32Value(info.GetIsolate()->GetCurrentContext())
+ .FromJust());
+ CHECK(desc.has_configurable());
+ CHECK(desc.configurable());
+
+ // intercept the callback by setting a non-empty handle
+ info.GetReturnValue().Set(name);
+}
+} // namespace
+THREADED_TEST(PropertyDefinerCallbackConfigurable) {
+ v8::HandleScope scope(CcTest::isolate());
+ LocalContext env;
+ v8::Local<v8::FunctionTemplate> templ =
+ v8::FunctionTemplate::New(CcTest::isolate());
+ templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ 0, 0, 0, 0, 0, CheckConfigurablePropertyDefineCallback));
+ env->Global()
+ ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked())
+ .FromJust();
+ const char* code =
+ "obj.x = 17; "
+ "Object.defineProperty(obj, 'x', {value: 42, configurable: true});"
+ "obj.x;";
+ CHECK_EQ(17, v8_compile(code)
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+}
+
+// Check that the descriptor passed to the callback is writable.
+namespace {
+void CheckWritablePropertyDefineCallback(
+ Local<Name> name, const v8::PropertyDescriptor& desc,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CHECK(desc.has_writable());
+ CHECK(desc.writable());
+
+ // intercept the callback by setting a non-empty handle
+ info.GetReturnValue().Set(name);
+}
+} // namespace
+THREADED_TEST(PropertyDefinerCallbackWritable) {
+ v8::HandleScope scope(CcTest::isolate());
+ LocalContext env;
+ v8::Local<v8::FunctionTemplate> templ =
+ v8::FunctionTemplate::New(CcTest::isolate());
+ templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ 0, 0, 0, 0, 0, CheckWritablePropertyDefineCallback));
+ env->Global()
+ ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked())
+ .FromJust();
+ const char* code =
+ "obj.x = 17; "
+ "Object.defineProperty(obj, 'x', {value: 42, writable: true});"
+ "obj.x;";
+ CHECK_EQ(17, v8_compile(code)
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+}
+
+// Check that the descriptor passed to the callback has a getter.
+namespace {
+void CheckGetterPropertyDefineCallback(
+ Local<Name> name, const v8::PropertyDescriptor& desc,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CHECK(desc.has_get());
+ CHECK(!desc.has_set());
+ // intercept the callback by setting a non-empty handle
+ info.GetReturnValue().Set(name);
+}
+} // namespace
+THREADED_TEST(PropertyDefinerCallbackWithGetter) {
+ v8::HandleScope scope(CcTest::isolate());
+ LocalContext env;
+ v8::Local<v8::FunctionTemplate> templ =
+ v8::FunctionTemplate::New(CcTest::isolate());
+ templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ 0, 0, 0, 0, 0, CheckGetterPropertyDefineCallback));
+ env->Global()
+ ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked())
+ .FromJust();
+ const char* code =
+ "obj.x = 17;"
+ "Object.defineProperty(obj, 'x', {get: function() {return 42;}});"
+ "obj.x;";
+ CHECK_EQ(17, v8_compile(code)
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+}
+
+// Check that the descriptor passed to the callback has a setter.
+namespace {
+void CheckSetterPropertyDefineCallback(
+ Local<Name> name, const v8::PropertyDescriptor& desc,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CHECK(desc.has_set());
+ CHECK(!desc.has_get());
+ // intercept the callback by setting a non-empty handle
+ info.GetReturnValue().Set(name);
+}
+} // namespace
+THREADED_TEST(PropertyDefinerCallbackWithSetter) {
+ v8::HandleScope scope(CcTest::isolate());
+ LocalContext env;
+ v8::Local<v8::FunctionTemplate> templ =
+ v8::FunctionTemplate::New(CcTest::isolate());
+ templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ 0, 0, 0, 0, 0, CheckSetterPropertyDefineCallback));
+ env->Global()
+ ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked())
+ .FromJust();
+ const char* code =
+ "Object.defineProperty(obj, 'x', {set: function() {return 42;}});"
+ "obj.x = 17;";
+ CHECK_EQ(17, v8_compile(code)
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+}
+
+namespace {
+void EmptyPropertyDescriptorCallback(
+ Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ // Do not intercept by not calling info.GetReturnValue().Set().
+}
+
+void InterceptingPropertyDescriptorCallback(
+ Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ // Intercept the callback by setting a different descriptor.
+ const char* code =
+ "var desc = {value: 42};"
+ "desc;";
+ Local<Value> descriptor = v8_compile(code)
+ ->Run(info.GetIsolate()->GetCurrentContext())
+ .ToLocalChecked();
+ info.GetReturnValue().Set(descriptor);
+}
+} // namespace
+
+THREADED_TEST(PropertyDescriptorCallback) {
+ v8::HandleScope scope(CcTest::isolate());
+ LocalContext env;
+
+ { // Normal behavior of getOwnPropertyDescriptor() with empty callback.
+ v8::Local<v8::FunctionTemplate> templ =
+ v8::FunctionTemplate::New(CcTest::isolate());
+ templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ 0, 0, EmptyPropertyDescriptorCallback, 0, 0, 0));
+ env->Global()
+ ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked())
+ .FromJust();
+ const char* code =
+ "obj.x = 17; "
+ "var desc = Object.getOwnPropertyDescriptor(obj, 'x');"
+ "desc.value;";
+ CHECK_EQ(17, v8_compile(code)
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ }
+
+ { // Intercept getOwnPropertyDescriptor().
+ v8::Local<v8::FunctionTemplate> templ =
+ v8::FunctionTemplate::New(CcTest::isolate());
+ templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ 0, 0, InterceptingPropertyDescriptorCallback, 0, 0, 0));
+ env->Global()
+ ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked())
+ .FromJust();
+ const char* code =
+ "obj.x = 17; "
+ "var desc = Object.getOwnPropertyDescriptor(obj, 'x');"
+ "desc.value;";
+ CHECK_EQ(42, v8_compile(code)
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ }
+}
int echo_indexed_call_count = 0;
@@ -1145,7 +1951,6 @@ THREADED_TEST(PropertyHandlerInPrototype) {
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- // Set up a prototype chain with three interceptors.
v8::Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
templ->InstanceTemplate()->SetHandler(v8::IndexedPropertyHandlerConfiguration(
CheckThisIndexedPropertyHandler, CheckThisIndexedPropertySetter,
@@ -1194,6 +1999,63 @@ THREADED_TEST(PropertyHandlerInPrototype) {
CompileRun("for (var p in obj) ;");
}
+TEST(PropertyHandlerInPrototypeWithDefine) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
+ templ->InstanceTemplate()->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+ CheckThisIndexedPropertyHandler, CheckThisIndexedPropertySetter,
+ CheckThisIndexedPropertyDescriptor, CheckThisIndexedPropertyDeleter,
+ CheckThisIndexedPropertyEnumerator, CheckThisIndexedPropertyDefiner));
+
+ templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ CheckThisNamedPropertyHandler, CheckThisNamedPropertySetter,
+ CheckThisNamedPropertyDescriptor, CheckThisNamedPropertyDeleter,
+ CheckThisNamedPropertyEnumerator, CheckThisNamedPropertyDefiner));
+
+ bottom = templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked();
+ Local<v8::Object> top = templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked();
+ Local<v8::Object> middle = templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked();
+
+ bottom->SetPrototype(env.local(), middle).FromJust();
+ middle->SetPrototype(env.local(), top).FromJust();
+ env->Global()->Set(env.local(), v8_str("obj"), bottom).FromJust();
+
+ // Indexed and named get.
+ CompileRun("obj[0]");
+ CompileRun("obj.x");
+
+ // Indexed and named set.
+ CompileRun("obj[1] = 42");
+ CompileRun("obj.y = 42");
+
+ // Indexed and named deleter.
+ CompileRun("delete obj[0]");
+ CompileRun("delete obj.x");
+
+ // Enumerators.
+ CompileRun("for (var p in obj) ;");
+
+ // Indexed and named definer.
+ CompileRun("Object.defineProperty(obj, 2, {});");
+ CompileRun("Object.defineProperty(obj, 'z', {});");
+
+ // Indexed and named propertyDescriptor.
+ CompileRun("Object.getOwnPropertyDescriptor(obj, 2);");
+ CompileRun("Object.getOwnPropertyDescriptor(obj, 'z');");
+}
+
bool is_bootstrapping = false;
static void PrePropertyHandlerGet(
@@ -3789,6 +4651,9 @@ THREADED_TEST(NonMaskingInterceptorOwnProperty) {
ExpectInt32("obj.whatever", 239);
CompileRun("obj.whatever = 4;");
+
+ // obj.whatever exists, thus it is not affected by the non-masking
+ // interceptor.
ExpectInt32("obj.whatever", 4);
CompileRun("delete obj.whatever;");
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 484d2f3226..8317a06aa2 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -47,7 +47,7 @@
#include "src/execution.h"
#include "src/futex-emulation.h"
#include "src/objects.h"
-#include "src/parsing/parser.h"
+#include "src/parsing/preparse-data.h"
#include "src/profiler/cpu-profiler.h"
#include "src/unicode-inl.h"
#include "src/utils.h"
@@ -453,11 +453,11 @@ THREADED_TEST(ScriptUsingStringResource) {
CHECK_EQ(static_cast<const String::ExternalStringResourceBase*>(resource),
source->GetExternalStringResourceBase(&encoding));
CHECK_EQ(String::TWO_BYTE_ENCODING, encoding);
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(0, dispose_count);
}
CcTest::i_isolate()->compilation_cache()->Clear();
- CcTest::heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
CHECK_EQ(1, dispose_count);
}
@@ -484,11 +484,11 @@ THREADED_TEST(ScriptUsingOneByteStringResource) {
Local<Value> value = script->Run(env.local()).ToLocalChecked();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value(env.local()).FromJust());
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(0, dispose_count);
}
CcTest::i_isolate()->compilation_cache()->Clear();
- CcTest::heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
CHECK_EQ(1, dispose_count);
}
@@ -504,8 +504,8 @@ THREADED_TEST(ScriptMakingExternalString) {
v8::NewStringType::kNormal)
.ToLocalChecked();
// Trigger GCs so that the newly allocated string moves to old gen.
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
+ CcTest::CollectGarbage(i::NEW_SPACE); // in survivor space now
+ CcTest::CollectGarbage(i::NEW_SPACE); // in old gen now
CHECK_EQ(source->IsExternal(), false);
CHECK_EQ(source->IsExternalOneByte(), false);
String::Encoding encoding = String::UNKNOWN_ENCODING;
@@ -518,11 +518,11 @@ THREADED_TEST(ScriptMakingExternalString) {
Local<Value> value = script->Run(env.local()).ToLocalChecked();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value(env.local()).FromJust());
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(0, dispose_count);
}
CcTest::i_isolate()->compilation_cache()->Clear();
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(1, dispose_count);
}
@@ -535,8 +535,8 @@ THREADED_TEST(ScriptMakingExternalOneByteString) {
v8::HandleScope scope(env->GetIsolate());
Local<String> source = v8_str(c_source);
// Trigger GCs so that the newly allocated string moves to old gen.
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
+ CcTest::CollectGarbage(i::NEW_SPACE); // in survivor space now
+ CcTest::CollectGarbage(i::NEW_SPACE); // in old gen now
bool success = source->MakeExternal(
new TestOneByteResource(i::StrDup(c_source), &dispose_count));
CHECK(success);
@@ -544,11 +544,11 @@ THREADED_TEST(ScriptMakingExternalOneByteString) {
Local<Value> value = script->Run(env.local()).ToLocalChecked();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value(env.local()).FromJust());
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(0, dispose_count);
}
CcTest::i_isolate()->compilation_cache()->Clear();
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(1, dispose_count);
}
@@ -558,8 +558,8 @@ TEST(MakingExternalStringConditions) {
v8::HandleScope scope(env->GetIsolate());
// Free some space in the new space so that we can check freshness.
- CcTest::heap()->CollectGarbage(i::NEW_SPACE);
- CcTest::heap()->CollectGarbage(i::NEW_SPACE);
+ CcTest::CollectGarbage(i::NEW_SPACE);
+ CcTest::CollectGarbage(i::NEW_SPACE);
uint16_t* two_byte_string = AsciiToTwoByteString("s1");
Local<String> local_string =
@@ -571,8 +571,8 @@ TEST(MakingExternalStringConditions) {
// We should refuse to externalize new space strings.
CHECK(!local_string->CanMakeExternal());
// Trigger GCs so that the newly allocated string moves to old gen.
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
+ CcTest::CollectGarbage(i::NEW_SPACE); // in survivor space now
+ CcTest::CollectGarbage(i::NEW_SPACE); // in old gen now
// Old space strings should be accepted.
CHECK(local_string->CanMakeExternal());
}
@@ -583,15 +583,15 @@ TEST(MakingExternalOneByteStringConditions) {
v8::HandleScope scope(env->GetIsolate());
// Free some space in the new space so that we can check freshness.
- CcTest::heap()->CollectGarbage(i::NEW_SPACE);
- CcTest::heap()->CollectGarbage(i::NEW_SPACE);
+ CcTest::CollectGarbage(i::NEW_SPACE);
+ CcTest::CollectGarbage(i::NEW_SPACE);
Local<String> local_string = v8_str("s1");
// We should refuse to externalize new space strings.
CHECK(!local_string->CanMakeExternal());
// Trigger GCs so that the newly allocated string moves to old gen.
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
+ CcTest::CollectGarbage(i::NEW_SPACE); // in survivor space now
+ CcTest::CollectGarbage(i::NEW_SPACE); // in old gen now
// Old space strings should be accepted.
CHECK(local_string->CanMakeExternal());
}
@@ -612,8 +612,8 @@ TEST(MakingExternalUnalignedOneByteString) {
// Trigger GCs so that the newly allocated string moves to old gen.
i::heap::SimulateFullSpace(CcTest::heap()->old_space());
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
+ CcTest::CollectGarbage(i::NEW_SPACE); // in survivor space now
+ CcTest::CollectGarbage(i::NEW_SPACE); // in old gen now
// Turn into external string with unaligned resource data.
const char* c_cons = "_abcdefghijklmnopqrstuvwxyz";
@@ -626,8 +626,8 @@ TEST(MakingExternalUnalignedOneByteString) {
CHECK(success);
// Trigger GCs and force evacuation.
- CcTest::heap()->CollectAllGarbage();
- CcTest::heap()->CollectAllGarbage(i::Heap::kReduceMemoryFootprintMask);
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(i::Heap::kReduceMemoryFootprintMask);
}
@@ -642,14 +642,14 @@ THREADED_TEST(UsingExternalString) {
.ToLocalChecked();
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
// Trigger GCs so that the newly allocated string moves to old gen.
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
+ CcTest::CollectGarbage(i::NEW_SPACE); // in survivor space now
+ CcTest::CollectGarbage(i::NEW_SPACE); // in old gen now
i::Handle<i::String> isymbol =
factory->InternalizeString(istring);
CHECK(isymbol->IsInternalizedString());
}
- CcTest::heap()->CollectAllGarbage();
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
@@ -665,14 +665,14 @@ THREADED_TEST(UsingExternalOneByteString) {
.ToLocalChecked();
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
// Trigger GCs so that the newly allocated string moves to old gen.
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
+ CcTest::CollectGarbage(i::NEW_SPACE); // in survivor space now
+ CcTest::CollectGarbage(i::NEW_SPACE); // in old gen now
i::Handle<i::String> isymbol =
factory->InternalizeString(istring);
CHECK(isymbol->IsInternalizedString());
}
- CcTest::heap()->CollectAllGarbage();
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
@@ -739,12 +739,12 @@ THREADED_TEST(ScavengeExternalString) {
new TestResource(two_byte_string, &dispose_count))
.ToLocalChecked();
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
- CcTest::heap()->CollectGarbage(i::NEW_SPACE);
+ CcTest::CollectGarbage(i::NEW_SPACE);
in_new_space = CcTest::heap()->InNewSpace(*istring);
CHECK(in_new_space || CcTest::heap()->old_space()->Contains(*istring));
CHECK_EQ(0, dispose_count);
}
- CcTest::heap()->CollectGarbage(in_new_space ? i::NEW_SPACE : i::OLD_SPACE);
+ CcTest::CollectGarbage(in_new_space ? i::NEW_SPACE : i::OLD_SPACE);
CHECK_EQ(1, dispose_count);
}
@@ -763,12 +763,12 @@ THREADED_TEST(ScavengeExternalOneByteString) {
new TestOneByteResource(i::StrDup(one_byte_string), &dispose_count))
.ToLocalChecked();
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
- CcTest::heap()->CollectGarbage(i::NEW_SPACE);
+ CcTest::CollectGarbage(i::NEW_SPACE);
in_new_space = CcTest::heap()->InNewSpace(*istring);
CHECK(in_new_space || CcTest::heap()->old_space()->Contains(*istring));
CHECK_EQ(0, dispose_count);
}
- CcTest::heap()->CollectGarbage(in_new_space ? i::NEW_SPACE : i::OLD_SPACE);
+ CcTest::CollectGarbage(in_new_space ? i::NEW_SPACE : i::OLD_SPACE);
CHECK_EQ(1, dispose_count);
}
@@ -812,11 +812,11 @@ TEST(ExternalStringWithDisposeHandling) {
Local<Value> value = script->Run(env.local()).ToLocalChecked();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value(env.local()).FromJust());
- CcTest::heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
CHECK_EQ(0, TestOneByteResourceWithDisposeControl::dispose_count);
}
CcTest::i_isolate()->compilation_cache()->Clear();
- CcTest::heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
CHECK_EQ(1, TestOneByteResourceWithDisposeControl::dispose_calls);
CHECK_EQ(0, TestOneByteResourceWithDisposeControl::dispose_count);
@@ -835,11 +835,11 @@ TEST(ExternalStringWithDisposeHandling) {
Local<Value> value = script->Run(env.local()).ToLocalChecked();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value(env.local()).FromJust());
- CcTest::heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
CHECK_EQ(0, TestOneByteResourceWithDisposeControl::dispose_count);
}
CcTest::i_isolate()->compilation_cache()->Clear();
- CcTest::heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
CHECK_EQ(1, TestOneByteResourceWithDisposeControl::dispose_calls);
CHECK_EQ(1, TestOneByteResourceWithDisposeControl::dispose_count);
}
@@ -897,8 +897,8 @@ THREADED_TEST(StringConcat) {
CHECK_EQ(68, value->Int32Value(env.local()).FromJust());
}
CcTest::i_isolate()->compilation_cache()->Clear();
- CcTest::heap()->CollectAllGarbage();
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
@@ -1581,6 +1581,26 @@ THREADED_TEST(IsGeneratorFunctionOrObject) {
CHECK(!func->IsGeneratorObject());
}
+THREADED_TEST(IsAsyncFunction) {
+ i::FLAG_harmony_async_await = true;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ CompileRun("async function foo() {}");
+ v8::Local<Value> foo = CompileRun("foo");
+
+ CHECK(foo->IsAsyncFunction());
+ CHECK(foo->IsFunction());
+ CHECK(!foo->IsGeneratorFunction());
+ CHECK(!foo->IsGeneratorObject());
+
+ CompileRun("function bar() {}");
+ v8::Local<Value> bar = CompileRun("bar");
+
+ CHECK(!bar->IsAsyncFunction());
+ CHECK(bar->IsFunction());
+}
THREADED_TEST(ArgumentsObject) {
LocalContext env;
@@ -2651,7 +2671,7 @@ static void CheckAlignedPointerInInternalField(Local<v8::Object> obj,
void* value) {
CHECK_EQ(0, static_cast<int>(reinterpret_cast<uintptr_t>(value) & 0x1));
obj->SetAlignedPointerInInternalField(0, value);
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(value, obj->GetAlignedPointerFromInternalField(0));
}
@@ -2707,14 +2727,14 @@ THREADED_TEST(SetAlignedPointerInInternalFields) {
void* values[] = {heap_allocated_1, heap_allocated_2};
obj->SetAlignedPointerInInternalFields(2, indices, values);
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(heap_allocated_1, obj->GetAlignedPointerFromInternalField(0));
CHECK_EQ(heap_allocated_2, obj->GetAlignedPointerFromInternalField(1));
indices[0] = 1;
indices[1] = 0;
obj->SetAlignedPointerInInternalFields(2, indices, values);
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(heap_allocated_2, obj->GetAlignedPointerFromInternalField(0));
CHECK_EQ(heap_allocated_1, obj->GetAlignedPointerFromInternalField(1));
@@ -2726,7 +2746,7 @@ static void CheckAlignedPointerInEmbedderData(LocalContext* env, int index,
void* value) {
CHECK_EQ(0, static_cast<int>(reinterpret_cast<uintptr_t>(value) & 0x1));
(*env)->SetAlignedPointerInEmbedderData(index, value);
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(value, (*env)->GetAlignedPointerFromEmbedderData(index));
}
@@ -2756,7 +2776,7 @@ THREADED_TEST(EmbedderDataAlignedPointers) {
for (int i = 0; i < 100; i++) {
env->SetAlignedPointerInEmbedderData(i, AlignedTestPointer(i));
}
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
for (int i = 0; i < 100; i++) {
CHECK_EQ(AlignedTestPointer(i), env->GetAlignedPointerFromEmbedderData(i));
}
@@ -2788,7 +2808,7 @@ THREADED_TEST(IdentityHash) {
// Ensure that the test starts with an fresh heap to test whether the hash
// code is based on the address.
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
Local<v8::Object> obj = v8::Object::New(isolate);
int hash = obj->GetIdentityHash();
int hash1 = obj->GetIdentityHash();
@@ -2798,7 +2818,7 @@ THREADED_TEST(IdentityHash) {
// objects should not be assigned the same hash code. If the test below fails
// the random number generator should be evaluated.
CHECK_NE(hash, hash2);
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
int hash3 = v8::Object::New(isolate)->GetIdentityHash();
// Make sure that the identity hash is not based on the initial address of
// the object alone. If the test below fails the random number generator
@@ -2874,7 +2894,7 @@ TEST(SymbolIdentityHash) {
int hash = symbol->GetIdentityHash();
int hash1 = symbol->GetIdentityHash();
CHECK_EQ(hash, hash1);
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
int hash3 = symbol->GetIdentityHash();
CHECK_EQ(hash, hash3);
}
@@ -2885,7 +2905,7 @@ TEST(SymbolIdentityHash) {
int hash = js_symbol->GetIdentityHash();
int hash1 = js_symbol->GetIdentityHash();
CHECK_EQ(hash, hash1);
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
int hash3 = js_symbol->GetIdentityHash();
CHECK_EQ(hash, hash3);
}
@@ -2901,7 +2921,7 @@ TEST(StringIdentityHash) {
int hash = str->GetIdentityHash();
int hash1 = str->GetIdentityHash();
CHECK_EQ(hash, hash1);
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
int hash3 = str->GetIdentityHash();
CHECK_EQ(hash, hash3);
@@ -2921,7 +2941,7 @@ THREADED_TEST(SymbolProperties) {
v8::Local<v8::Symbol> sym2 = v8::Symbol::New(isolate, v8_str("my-symbol"));
v8::Local<v8::Symbol> sym3 = v8::Symbol::New(isolate, v8_str("sym3"));
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// Check basic symbol functionality.
CHECK(sym1->IsSymbol());
@@ -2990,7 +3010,7 @@ THREADED_TEST(SymbolProperties) {
CHECK_EQ(num_props + 1,
obj->GetPropertyNames(env.local()).ToLocalChecked()->Length());
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK(obj->SetAccessor(env.local(), sym3, SymbolAccessorGetter,
SymbolAccessorSetter)
@@ -3100,7 +3120,7 @@ THREADED_TEST(PrivatePropertiesOnProxies) {
v8::Local<v8::Private> priv2 =
v8::Private::New(isolate, v8_str("my-private"));
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK(priv2->Name()
->Equals(env.local(),
@@ -3142,7 +3162,7 @@ THREADED_TEST(PrivatePropertiesOnProxies) {
CHECK_EQ(num_props + 1,
proxy->GetPropertyNames(env.local()).ToLocalChecked()->Length());
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// Add another property and delete it afterwards to force the object in
// slow case.
@@ -3194,7 +3214,7 @@ THREADED_TEST(PrivateProperties) {
v8::Local<v8::Private> priv2 =
v8::Private::New(isolate, v8_str("my-private"));
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK(priv2->Name()
->Equals(env.local(),
@@ -3236,7 +3256,7 @@ THREADED_TEST(PrivateProperties) {
CHECK_EQ(num_props + 1,
obj->GetPropertyNames(env.local()).ToLocalChecked()->Length());
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// Add another property and delete it afterwards to force the object in
// slow case.
@@ -3385,7 +3405,7 @@ THREADED_TEST(ArrayBuffer_ApiInternalToExternal) {
CheckInternalFieldsAreZero(ab);
CHECK_EQ(1024, static_cast<int>(ab->ByteLength()));
CHECK(!ab->IsExternal());
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
ScopedArrayBufferContents ab_contents(ab->Externalize());
CHECK(ab->IsExternal());
@@ -3661,7 +3681,7 @@ THREADED_TEST(SharedArrayBuffer_ApiInternalToExternal) {
CheckInternalFieldsAreZero(ab);
CHECK_EQ(1024, static_cast<int>(ab->ByteLength()));
CHECK(!ab->IsExternal());
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
ScopedSharedArrayBufferContents ab_contents(ab->Externalize());
CHECK(ab->IsExternal());
@@ -3778,7 +3798,7 @@ THREADED_TEST(HiddenProperties) {
v8::Local<v8::String> empty = v8_str("");
v8::Local<v8::String> prop_name = v8_str("prop_name");
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// Make sure delete of a non-existent hidden value works
obj->DeletePrivate(env.local(), key).FromJust();
@@ -3796,7 +3816,7 @@ THREADED_TEST(HiddenProperties) {
->Int32Value(env.local())
.FromJust());
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// Make sure we do not find the hidden property.
CHECK(!obj->Has(env.local(), empty).FromJust());
@@ -3820,7 +3840,7 @@ THREADED_TEST(HiddenProperties) {
->Int32Value(env.local())
.FromJust());
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// Add another property and delete it afterwards to force the object in
// slow case.
@@ -3844,7 +3864,7 @@ THREADED_TEST(HiddenProperties) {
->Int32Value(env.local())
.FromJust());
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK(obj->SetPrivate(env.local(), key, v8::Integer::New(isolate, 2002))
.FromJust());
@@ -4135,7 +4155,7 @@ void SecondPassCallback(const v8::WeakCallbackInfo<TwoPassCallbackData>& data) {
if (!trigger_gc) return;
auto data_2 = new TwoPassCallbackData(data.GetIsolate(), instance_counter);
data_2->SetWeak();
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
@@ -4156,7 +4176,7 @@ TEST(TwoPassPhantomCallbacks) {
data->SetWeak();
}
CHECK_EQ(static_cast<int>(kLength), instance_counter);
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
EmptyMessageQueues(isolate);
CHECK_EQ(0, instance_counter);
}
@@ -4175,7 +4195,7 @@ TEST(TwoPassPhantomCallbacksNestedGc) {
array[10]->MarkTriggerGc();
array[15]->MarkTriggerGc();
CHECK_EQ(static_cast<int>(kLength), instance_counter);
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
EmptyMessageQueues(isolate);
CHECK_EQ(0, instance_counter);
}
@@ -4286,8 +4306,7 @@ void TestGlobalValueMap() {
}
CHECK_EQ(initial_handle_count + 1, global_handles->global_handles_count());
if (map.IsWeak()) {
- CcTest::i_isolate()->heap()->CollectAllGarbage(
- i::Heap::kAbortIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
} else {
map.Clear();
}
@@ -4518,9 +4537,7 @@ THREADED_TEST(ApiObjectGroups) {
iso->SetReferenceFromGroup(id2, g2c1.handle);
}
// Do a single full GC, ensure incremental marking is stopped.
- v8::internal::Heap* heap =
- reinterpret_cast<v8::internal::Isolate*>(iso)->heap();
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// All object should be alive.
CHECK_EQ(0, counter.NumberOfWeakCalls());
@@ -4545,7 +4562,7 @@ THREADED_TEST(ApiObjectGroups) {
iso->SetReferenceFromGroup(id2, g2c1.handle);
}
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// All objects should be gone. 5 global handles in total.
CHECK_EQ(5, counter.NumberOfWeakCalls());
@@ -4556,7 +4573,7 @@ THREADED_TEST(ApiObjectGroups) {
g2c1.handle.SetWeak(&g2c1, &WeakPointerCallback,
v8::WeakCallbackType::kParameter);
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(7, counter.NumberOfWeakCalls());
}
@@ -4623,9 +4640,7 @@ THREADED_TEST(ApiObjectGroupsForSubtypes) {
iso->SetReferenceFromGroup(id2, g2c1.handle);
}
// Do a single full GC, ensure incremental marking is stopped.
- v8::internal::Heap* heap =
- reinterpret_cast<v8::internal::Isolate*>(iso)->heap();
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// All object should be alive.
CHECK_EQ(0, counter.NumberOfWeakCalls());
@@ -4650,7 +4665,7 @@ THREADED_TEST(ApiObjectGroupsForSubtypes) {
iso->SetReferenceFromGroup(id2, g2c1.handle);
}
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// All objects should be gone. 5 global handles in total.
CHECK_EQ(5, counter.NumberOfWeakCalls());
@@ -4661,7 +4676,7 @@ THREADED_TEST(ApiObjectGroupsForSubtypes) {
g2c1.handle.SetWeak(&g2c1, &WeakPointerCallback,
v8::WeakCallbackType::kParameter);
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(7, counter.NumberOfWeakCalls());
}
@@ -4746,9 +4761,7 @@ THREADED_TEST(ApiObjectGroupsCycle) {
iso->SetReferenceFromGroup(id4, g1s1.handle);
}
// Do a single full GC
- v8::internal::Heap* heap =
- reinterpret_cast<v8::internal::Isolate*>(iso)->heap();
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// All object should be alive.
CHECK_EQ(0, counter.NumberOfWeakCalls());
@@ -4777,7 +4790,7 @@ THREADED_TEST(ApiObjectGroupsCycle) {
iso->SetReferenceFromGroup(id4, g1s1.handle);
}
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// All objects should be gone. 9 global handles in total.
CHECK_EQ(9, counter.NumberOfWeakCalls());
@@ -5077,7 +5090,7 @@ TEST(NativeWeakMap) {
CHECK(value->Equals(env.local(), weak_map->Get(obj2)).FromJust());
CHECK(value->Equals(env.local(), weak_map->Get(sym1)).FromJust());
}
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
{
HandleScope scope(isolate);
CHECK(value->Equals(env.local(), weak_map->Get(local1)).FromJust());
@@ -5099,7 +5112,7 @@ TEST(NativeWeakMap) {
s1.handle.SetWeak(&s1, &WeakPointerCallback,
v8::WeakCallbackType::kParameter);
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(3, counter.NumberOfWeakCalls());
CHECK(o1.handle.IsEmpty());
@@ -7458,13 +7471,11 @@ TEST(ExceptionExtensions) {
CHECK(context.IsEmpty());
}
-
static const char* kNativeCallInExtensionSource =
"function call_runtime_last_index_of(x) {"
- " return %StringLastIndexOf(x, 'bob', 10);"
+ " return %StringLastIndexOf(x, 'bob');"
"}";
-
static const char* kNativeCallTest =
"call_runtime_last_index_of('bobbobboellebobboellebobbob');";
@@ -7478,7 +7489,7 @@ TEST(NativeCallInExtensions) {
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
v8::Local<Value> result = CompileRun(kNativeCallTest);
- CHECK(result->Equals(context, v8::Integer::New(CcTest::isolate(), 3))
+ CHECK(result->Equals(context, v8::Integer::New(CcTest::isolate(), 24))
.FromJust());
}
@@ -7770,9 +7781,9 @@ static void IndependentWeakHandle(bool global_gc, bool interlinked) {
b->Set(context, v8_str("x"), a).FromJust();
}
if (global_gc) {
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
} else {
- CcTest::heap()->CollectGarbage(i::NEW_SPACE);
+ CcTest::CollectGarbage(i::NEW_SPACE);
}
// We are relying on this creating a big flag array and reserving the space
// up front.
@@ -7792,9 +7803,9 @@ static void IndependentWeakHandle(bool global_gc, bool interlinked) {
object_b.handle.MarkIndependent();
CHECK(object_b.handle.IsIndependent());
if (global_gc) {
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
} else {
- CcTest::heap()->CollectGarbage(i::NEW_SPACE);
+ CcTest::CollectGarbage(i::NEW_SPACE);
}
// A single GC should be enough to reclaim the memory, since we are using
// phantom handles.
@@ -7891,9 +7902,9 @@ void InternalFieldCallback(bool global_gc) {
}
}
if (global_gc) {
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
} else {
- CcTest::heap()->CollectGarbage(i::NEW_SPACE);
+ CcTest::CollectGarbage(i::NEW_SPACE);
}
CHECK_EQ(1729, t1->x());
@@ -7938,9 +7949,9 @@ void v8::internal::HeapTester::ResetWeakHandle(bool global_gc) {
object_a.handle.Reset(iso, a);
object_b.handle.Reset(iso, b);
if (global_gc) {
- CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
} else {
- CcTest::heap()->CollectGarbage(i::NEW_SPACE);
+ CcTest::CollectGarbage(i::NEW_SPACE);
}
}
@@ -7956,9 +7967,9 @@ void v8::internal::HeapTester::ResetWeakHandle(bool global_gc) {
CHECK(object_b.handle.IsIndependent());
}
if (global_gc) {
- CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
} else {
- CcTest::heap()->CollectGarbage(i::NEW_SPACE);
+ CcTest::CollectGarbage(i::NEW_SPACE);
}
CHECK(object_a.flag);
CHECK(object_b.flag);
@@ -7970,12 +7981,11 @@ THREADED_HEAP_TEST(ResetWeakHandle) {
v8::internal::HeapTester::ResetWeakHandle(true);
}
+static void InvokeScavenge() { CcTest::CollectGarbage(i::NEW_SPACE); }
-static void InvokeScavenge() { CcTest::heap()->CollectGarbage(i::NEW_SPACE); }
-
-
-static void InvokeMarkSweep() { CcTest::heap()->CollectAllGarbage(); }
-
+static void InvokeMarkSweep() {
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+}
static void ForceScavenge2(
const v8::WeakCallbackInfo<FlagAndPersistent>& data) {
@@ -8051,7 +8061,7 @@ static void ArgumentsTestCallback(
CHECK(v8::Integer::New(isolate, 3)->Equals(context, args[2]).FromJust());
CHECK(v8::Undefined(isolate)->Equals(context, args[3]).FromJust());
v8::HandleScope scope(args.GetIsolate());
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
@@ -9381,7 +9391,7 @@ static bool security_check_with_gc_called;
static bool SecurityTestCallbackWithGC(Local<v8::Context> accessing_context,
Local<v8::Object> accessed_object,
Local<v8::Value> data) {
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
security_check_with_gc_called = true;
return true;
}
@@ -12169,7 +12179,7 @@ static void InterceptorCallICFastApi(
reinterpret_cast<int*>(v8::External::Cast(*info.Data())->Value());
++(*call_count);
if ((*call_count) % 20 == 0) {
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
}
@@ -12226,8 +12236,8 @@ static void GenerateSomeGarbage() {
void DirectApiCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
static int count = 0;
if (count++ % 3 == 0) {
- CcTest::heap()->CollectAllGarbage();
- // This should move the stub
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ // This should move the stub
GenerateSomeGarbage(); // This should ensure the old stub memory is flushed
}
}
@@ -12296,7 +12306,7 @@ static int p_getter_count_3;
static Local<Value> DoDirectGetter() {
if (++p_getter_count_3 % 3 == 0) {
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
GenerateSomeGarbage();
}
return v8_str("Direct Getter Result");
@@ -14016,8 +14026,8 @@ static void CheckSurvivingGlobalObjectsCount(int expected) {
// the first garbage collection but some of the maps have already
// been marked at that point. Therefore some of the maps are not
// collected until the second garbage collection.
- CcTest::heap()->CollectAllGarbage();
- CcTest::heap()->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
int count = GetGlobalObjectsCount();
#ifdef DEBUG
if (count != expected) CcTest::heap()->TracePathToGlobal();
@@ -14118,7 +14128,8 @@ TEST(WeakCallbackApi) {
handle, WeakApiCallback, v8::WeakCallbackType::kParameter);
}
reinterpret_cast<i::Isolate*>(isolate)->heap()->CollectAllGarbage(
- i::Heap::kAbortIncrementalMarkingMask);
+ i::Heap::kAbortIncrementalMarkingMask,
+ i::GarbageCollectionReason::kTesting);
// Verify disposed.
CHECK_EQ(initial_handles, globals->global_handles_count());
}
@@ -14160,7 +14171,7 @@ THREADED_TEST(NewPersistentHandleFromWeakCallback) {
handle1.SetWeak(&handle1, NewPersistentHandleCallback1,
v8::WeakCallbackType::kParameter);
handle2.Reset();
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
@@ -14170,7 +14181,7 @@ v8::Persistent<v8::Object> to_be_disposed;
void DisposeAndForceGcCallback2(
const v8::WeakCallbackInfo<v8::Persistent<v8::Object>>& data) {
to_be_disposed.Reset();
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
@@ -14194,7 +14205,7 @@ THREADED_TEST(DoNotUseDeletedNodesInSecondLevelGc) {
handle1.SetWeak(&handle1, DisposeAndForceGcCallback1,
v8::WeakCallbackType::kParameter);
to_be_disposed.Reset(isolate, handle2);
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
void DisposingCallback(
@@ -14232,7 +14243,7 @@ THREADED_TEST(NoGlobalHandlesOrphaningDueToWeakCallback) {
v8::WeakCallbackType::kParameter);
handle3.SetWeak(&handle3, HandleCreatingCallback1,
v8::WeakCallbackType::kParameter);
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
EmptyMessageQueues(isolate);
}
@@ -14282,11 +14293,6 @@ THREADED_TEST(NestedHandleScopeAndContexts) {
}
-static bool MatchPointers(void* key1, void* key2) {
- return key1 == key2;
-}
-
-
struct SymbolInfo {
size_t id;
size_t size;
@@ -14793,14 +14799,14 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
i::Heap* heap = i_isolate->heap();
// Start with a clean slate.
- heap->CollectAllAvailableGarbage("TestSetJitCodeEventHandler_Prepare");
+ heap->CollectAllAvailableGarbage(i::GarbageCollectionReason::kTesting);
{
v8::HandleScope scope(isolate);
- v8::base::HashMap code(MatchPointers);
+ v8::base::HashMap code;
code_map = &code;
- v8::base::HashMap lineinfo(MatchPointers);
+ v8::base::HashMap lineinfo;
jitcode_line_info = &lineinfo;
saw_bar = 0;
@@ -14837,7 +14843,7 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
}
// Force code movement.
- heap->CollectAllAvailableGarbage("TestSetJitCodeEventHandler_Move");
+ heap->CollectAllAvailableGarbage(i::GarbageCollectionReason::kTesting);
isolate->SetJitCodeEventHandler(v8::kJitCodeEventDefault, NULL);
@@ -14863,10 +14869,10 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
CompileRun(script);
// Now get code through initial iteration.
- v8::base::HashMap code(MatchPointers);
+ v8::base::HashMap code;
code_map = &code;
- v8::base::HashMap lineinfo(MatchPointers);
+ v8::base::HashMap lineinfo;
jitcode_line_info = &lineinfo;
isolate->SetJitCodeEventHandler(v8::kJitCodeEventEnumExisting,
@@ -14887,8 +14893,7 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
isolate->Dispose();
}
-
-THREADED_TEST(ExternalAllocatedMemory) {
+TEST(ExternalAllocatedMemory) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope outer(isolate);
v8::Local<Context> env(Context::New(isolate));
@@ -14899,7 +14904,8 @@ THREADED_TEST(ExternalAllocatedMemory) {
isolate->AdjustAmountOfExternalAllocatedMemory(kSize));
CHECK_EQ(baseline,
isolate->AdjustAmountOfExternalAllocatedMemory(-kSize));
- const int64_t kTriggerGCSize = i::kExternalAllocationLimit + 1;
+ const int64_t kTriggerGCSize =
+ CcTest::i_isolate()->heap()->external_memory_hard_limit() + 1;
CHECK_EQ(baseline + kTriggerGCSize,
isolate->AdjustAmountOfExternalAllocatedMemory(kTriggerGCSize));
CHECK_EQ(baseline,
@@ -14911,7 +14917,8 @@ TEST(Regress51719) {
i::FLAG_incremental_marking = false;
CcTest::InitializeVM();
- const int64_t kTriggerGCSize = i::kExternalAllocationLimit + 1;
+ const int64_t kTriggerGCSize =
+ CcTest::i_isolate()->heap()->external_memory_hard_limit() + 1;
v8::Isolate* isolate = CcTest::isolate();
isolate->AdjustAmountOfExternalAllocatedMemory(kTriggerGCSize);
}
@@ -16004,6 +16011,292 @@ TEST(DefineOwnProperty) {
}
}
+TEST(DefineProperty) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ v8::Local<v8::Name> p;
+
+ CompileRun(
+ "var a = {};"
+ "var b = [];"
+ "Object.defineProperty(a, 'v1', {value: 23});"
+ "Object.defineProperty(a, 'v2', {value: 23, configurable: true});");
+
+ v8::Local<v8::Object> obj = v8::Local<v8::Object>::Cast(
+ env->Global()->Get(env.local(), v8_str("a")).ToLocalChecked());
+ v8::Local<v8::Array> arr = v8::Local<v8::Array>::Cast(
+ env->Global()->Get(env.local(), v8_str("b")).ToLocalChecked());
+
+ v8::PropertyDescriptor desc(v8_num(42));
+ {
+ // Use a data descriptor.
+
+ // Cannot change a non-configurable property.
+ p = v8_str("v1");
+ v8::TryCatch try_catch(isolate);
+ CHECK(!obj->DefineProperty(env.local(), p, desc).FromJust());
+ CHECK(!try_catch.HasCaught());
+ v8::Local<v8::Value> val = obj->Get(env.local(), p).ToLocalChecked();
+ CHECK(val->IsNumber());
+ CHECK_EQ(23.0, val->NumberValue(env.local()).FromJust());
+
+ // Change a configurable property.
+ p = v8_str("v2");
+ obj->DefineProperty(env.local(), p, desc).FromJust();
+ CHECK(obj->DefineProperty(env.local(), p, desc).FromJust());
+ CHECK(!try_catch.HasCaught());
+ val = obj->Get(env.local(), p).ToLocalChecked();
+ CHECK(val->IsNumber());
+ CHECK_EQ(42.0, val->NumberValue(env.local()).FromJust());
+
+ // Check that missing writable has default value false.
+ p = v8_str("v12");
+ CHECK(obj->DefineProperty(env.local(), p, desc).FromJust());
+ CHECK(!try_catch.HasCaught());
+ val = obj->Get(env.local(), p).ToLocalChecked();
+ CHECK(val->IsNumber());
+ CHECK_EQ(42.0, val->NumberValue(env.local()).FromJust());
+ v8::PropertyDescriptor desc2(v8_num(43));
+ CHECK(!obj->DefineProperty(env.local(), p, desc2).FromJust());
+ val = obj->Get(env.local(), p).ToLocalChecked();
+ CHECK_EQ(42.0, val->NumberValue(env.local()).FromJust());
+ CHECK(!try_catch.HasCaught());
+ }
+
+ {
+ // Set a regular property.
+ p = v8_str("v3");
+ v8::TryCatch try_catch(isolate);
+ CHECK(obj->DefineProperty(env.local(), p, desc).FromJust());
+ CHECK(!try_catch.HasCaught());
+ v8::Local<v8::Value> val = obj->Get(env.local(), p).ToLocalChecked();
+ CHECK(val->IsNumber());
+ CHECK_EQ(42.0, val->NumberValue(env.local()).FromJust());
+ }
+
+ {
+ // Set an indexed property.
+ v8::TryCatch try_catch(isolate);
+ CHECK(obj->DefineProperty(env.local(), v8_str("1"), desc).FromJust());
+ CHECK(!try_catch.HasCaught());
+ v8::Local<v8::Value> val = obj->Get(env.local(), 1).ToLocalChecked();
+ CHECK(val->IsNumber());
+ CHECK_EQ(42.0, val->NumberValue(env.local()).FromJust());
+ }
+
+ {
+ // No special case when changing array length.
+ v8::TryCatch try_catch(isolate);
+ // Use a writable descriptor, otherwise the next test, that changes
+ // the array length will fail.
+ v8::PropertyDescriptor desc(v8_num(42), true);
+ CHECK(arr->DefineProperty(env.local(), v8_str("length"), desc).FromJust());
+ CHECK(!try_catch.HasCaught());
+ }
+
+ {
+ // Special cases for arrays: index exceeds the array's length.
+ v8::TryCatch try_catch(isolate);
+ CHECK(arr->DefineProperty(env.local(), v8_str("100"), desc).FromJust());
+ CHECK(!try_catch.HasCaught());
+ CHECK_EQ(101U, arr->Length());
+ v8::Local<v8::Value> val = arr->Get(env.local(), 100).ToLocalChecked();
+ CHECK(val->IsNumber());
+ CHECK_EQ(42.0, val->NumberValue(env.local()).FromJust());
+
+ // Set an existing entry.
+ CHECK(arr->DefineProperty(env.local(), v8_str("0"), desc).FromJust());
+ CHECK(!try_catch.HasCaught());
+ val = arr->Get(env.local(), 0).ToLocalChecked();
+ CHECK(val->IsNumber());
+ CHECK_EQ(42.0, val->NumberValue(env.local()).FromJust());
+ }
+
+ {
+ // Use a generic descriptor.
+ v8::PropertyDescriptor desc_generic;
+
+ p = v8_str("v4");
+ v8::TryCatch try_catch(isolate);
+ CHECK(obj->DefineProperty(env.local(), p, desc_generic).FromJust());
+ CHECK(!try_catch.HasCaught());
+ v8::Local<v8::Value> val = obj->Get(env.local(), p).ToLocalChecked();
+ CHECK(val->IsUndefined());
+
+ obj->Set(env.local(), p, v8_num(1)).FromJust();
+ CHECK(!try_catch.HasCaught());
+
+ val = obj->Get(env.local(), p).ToLocalChecked();
+ CHECK(val->IsUndefined());
+ CHECK(!try_catch.HasCaught());
+ }
+
+ {
+ // Use a data descriptor with undefined value.
+ v8::PropertyDescriptor desc_empty(v8::Undefined(isolate));
+
+ v8::TryCatch try_catch(isolate);
+ CHECK(obj->DefineProperty(env.local(), p, desc_empty).FromJust());
+ CHECK(!try_catch.HasCaught());
+ v8::Local<v8::Value> val = obj->Get(env.local(), p).ToLocalChecked();
+ CHECK(val->IsUndefined());
+ CHECK(!try_catch.HasCaught());
+ }
+
+ {
+ // Use a descriptor with attribute == v8::ReadOnly.
+ v8::PropertyDescriptor desc_read_only(v8_num(42), false);
+ desc_read_only.set_enumerable(true);
+ desc_read_only.set_configurable(true);
+
+ p = v8_str("v5");
+ v8::TryCatch try_catch(isolate);
+ CHECK(obj->DefineProperty(env.local(), p, desc_read_only).FromJust());
+ CHECK(!try_catch.HasCaught());
+ v8::Local<v8::Value> val = obj->Get(env.local(), p).ToLocalChecked();
+ CHECK(val->IsNumber());
+ CHECK_EQ(42.0, val->NumberValue(env.local()).FromJust());
+ CHECK_EQ(v8::ReadOnly,
+ obj->GetPropertyAttributes(env.local(), p).FromJust());
+ CHECK(!try_catch.HasCaught());
+ }
+
+ {
+ // Use an accessor descriptor with empty handles.
+ v8::PropertyDescriptor desc_empty(v8::Undefined(isolate),
+ v8::Undefined(isolate));
+
+ p = v8_str("v6");
+ v8::TryCatch try_catch(isolate);
+ CHECK(obj->DefineProperty(env.local(), p, desc_empty).FromJust());
+ CHECK(!try_catch.HasCaught());
+ v8::Local<v8::Value> val = obj->Get(env.local(), p).ToLocalChecked();
+ CHECK(val->IsUndefined());
+ CHECK(!try_catch.HasCaught());
+ }
+
+ {
+ // Use an accessor descriptor.
+ CompileRun(
+ "var set = function(x) {this.val = 2*x;};"
+ "var get = function() {return this.val || 0;};");
+
+ v8::Local<v8::Function> get = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(env.local(), v8_str("get")).ToLocalChecked());
+ v8::Local<v8::Function> set = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(env.local(), v8_str("set")).ToLocalChecked());
+ v8::PropertyDescriptor desc(get, set);
+
+ p = v8_str("v7");
+ v8::TryCatch try_catch(isolate);
+ CHECK(obj->DefineProperty(env.local(), p, desc).FromJust());
+ CHECK(!try_catch.HasCaught());
+
+ v8::Local<v8::Value> val = obj->Get(env.local(), p).ToLocalChecked();
+ CHECK(val->IsNumber());
+ CHECK_EQ(0.0, val->NumberValue(env.local()).FromJust());
+ CHECK(!try_catch.HasCaught());
+
+ obj->Set(env.local(), p, v8_num(7)).FromJust();
+ CHECK(!try_catch.HasCaught());
+
+ val = obj->Get(env.local(), p).ToLocalChecked();
+ CHECK(val->IsNumber());
+ CHECK_EQ(14.0, val->NumberValue(env.local()).FromJust());
+ CHECK(!try_catch.HasCaught());
+ }
+
+ {
+ // Redefine an existing property.
+
+ // desc = {value: 42, enumerable: true}
+ v8::PropertyDescriptor desc(v8_num(42));
+ desc.set_enumerable(true);
+
+ p = v8_str("v8");
+ v8::TryCatch try_catch(isolate);
+ CHECK(obj->DefineProperty(env.local(), p, desc).FromJust());
+ CHECK(!try_catch.HasCaught());
+
+ // desc = {enumerable: true}
+ v8::PropertyDescriptor desc_true((v8::Local<v8::Value>()));
+ desc_true.set_enumerable(true);
+
+ // Successful redefinition because all present attributes have the same
+ // value as the current descriptor.
+ CHECK(obj->DefineProperty(env.local(), p, desc_true).FromJust());
+ CHECK(!try_catch.HasCaught());
+
+ // desc = {}
+ v8::PropertyDescriptor desc_empty;
+ // Successful redefinition because no attributes are overwritten in the
+ // current descriptor.
+ CHECK(obj->DefineProperty(env.local(), p, desc_empty).FromJust());
+ CHECK(!try_catch.HasCaught());
+
+ // desc = {enumerable: false}
+ v8::PropertyDescriptor desc_false((v8::Local<v8::Value>()));
+ desc_false.set_enumerable(false);
+ // Not successful because we cannot define a different value for enumerable.
+ CHECK(!obj->DefineProperty(env.local(), p, desc_false).FromJust());
+ CHECK(!try_catch.HasCaught());
+ }
+
+ {
+ // Redefine a property that has a getter.
+ CompileRun("var get = function() {};");
+ v8::Local<v8::Function> get = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(env.local(), v8_str("get")).ToLocalChecked());
+
+ // desc = {get: function() {}}
+ v8::PropertyDescriptor desc(get, v8::Local<v8::Function>());
+ v8::TryCatch try_catch(isolate);
+
+ p = v8_str("v9");
+ CHECK(obj->DefineProperty(env.local(), p, desc).FromJust());
+ CHECK(!try_catch.HasCaught());
+
+ // desc_empty = {}
+ // Successful because we are not redefining the current getter.
+ v8::PropertyDescriptor desc_empty;
+ CHECK(obj->DefineProperty(env.local(), p, desc_empty).FromJust());
+ CHECK(!try_catch.HasCaught());
+
+ // desc = {get: function() {}}
+ // Successful because we redefine the getter with its current value.
+ CHECK(obj->DefineProperty(env.local(), p, desc).FromJust());
+ CHECK(!try_catch.HasCaught());
+
+ // desc = {get: undefined}
+ v8::PropertyDescriptor desc_undefined(v8::Undefined(isolate),
+ v8::Local<v8::Function>());
+ // Not successful because we cannot redefine with the current value of get
+ // with undefined.
+ CHECK(!obj->DefineProperty(env.local(), p, desc_undefined).FromJust());
+ CHECK(!try_catch.HasCaught());
+ }
+
+ CompileRun("Object.freeze(a);");
+ {
+ // We cannot change non-extensible objects.
+ v8::TryCatch try_catch(isolate);
+ CHECK(!obj->DefineProperty(env.local(), v8_str("v10"), desc).FromJust());
+ CHECK(!try_catch.HasCaught());
+ }
+
+ v8::Local<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
+ templ->SetAccessCheckCallback(AccessAlwaysBlocked);
+ v8::Local<v8::Object> access_checked =
+ templ->NewInstance(env.local()).ToLocalChecked();
+ {
+ v8::TryCatch try_catch(isolate);
+ CHECK(access_checked->DefineProperty(env.local(), v8_str("v11"), desc)
+ .IsNothing());
+ CHECK(try_catch.HasCaught());
+ }
+}
THREADED_TEST(GetCurrentContextWhenNotInContext) {
i::Isolate* isolate = CcTest::i_isolate();
@@ -16177,7 +16470,7 @@ static void ObjectWithExternalArrayTestHelper(Local<Context> context,
"}"
"sum;");
// Force GC to trigger verification.
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(28, result->Int32Value(context).FromJust());
// Make sure out-of-range loads do not throw.
@@ -16393,12 +16686,12 @@ static void FixedTypedArrayTestHelper(i::ExternalArrayType array_type,
CHECK_EQ(FixedTypedArrayClass::kInstanceType,
fixed_array->map()->instance_type());
CHECK_EQ(kElementCount, fixed_array->length());
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
for (int i = 0; i < kElementCount; i++) {
fixed_array->set(i, static_cast<ElementType>(i));
}
// Force GC to trigger verification.
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
for (int i = 0; i < kElementCount; i++) {
CHECK_EQ(static_cast<int64_t>(static_cast<ElementType>(i)),
static_cast<int64_t>(fixed_array->get_scalar(i)));
@@ -16588,10 +16881,10 @@ THREADED_TEST(SkipArrayBufferBackingStoreDuringGC) {
Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, store_ptr, 8);
// Should not crash
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
- CcTest::heap()->CollectAllGarbage();
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectGarbage(i::NEW_SPACE); // in survivor space now
+ CcTest::CollectGarbage(i::NEW_SPACE); // in old gen now
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// Should not move the pointer
CHECK_EQ(ab->GetContents().Data(), store_ptr);
@@ -16609,15 +16902,15 @@ THREADED_TEST(SkipArrayBufferDuringScavenge) {
reinterpret_cast<uint8_t*>(*reinterpret_cast<uintptr_t*>(*tmp));
// Make `store_ptr` point to from space
- CcTest::heap()->CollectGarbage(i::NEW_SPACE);
+ CcTest::CollectGarbage(i::NEW_SPACE);
// Create ArrayBuffer with pointer-that-cannot-be-visited in the backing store
Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, store_ptr, 8);
// Should not crash,
// i.e. backing store pointer should not be treated as a heap object pointer
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
+ CcTest::CollectGarbage(i::NEW_SPACE); // in survivor space now
+ CcTest::CollectGarbage(i::NEW_SPACE); // in old gen now
// Use `ab` to silence compiler warning
CHECK_EQ(ab->GetContents().Data(), store_ptr);
@@ -17918,7 +18211,8 @@ TEST(TestIdleNotification) {
bool finished = false;
for (int i = 0; i < 200 && !finished; i++) {
if (i < 10 && CcTest::heap()->incremental_marking()->IsStopped()) {
- CcTest::heap()->StartIdleIncrementalMarking();
+ CcTest::heap()->StartIdleIncrementalMarking(
+ i::GarbageCollectionReason::kTesting);
}
finished = env->GetIsolate()->IdleNotificationDeadline(
(v8::base::TimeTicks::HighResolutionNow().ToInternalValue() /
@@ -17937,7 +18231,7 @@ TEST(TestIdleNotification) {
TEST(Regress2333) {
LocalContext env;
for (int i = 0; i < 3; i++) {
- CcTest::heap()->CollectGarbage(i::NEW_SPACE);
+ CcTest::CollectGarbage(i::NEW_SPACE);
}
}
@@ -18075,7 +18369,7 @@ TEST(ExternalizeOldSpaceTwoByteCons) {
->ToString(env.local())
.ToLocalChecked();
CHECK(v8::Utils::OpenHandle(*cons)->IsConsString());
- CcTest::heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
CHECK(CcTest::heap()->old_space()->Contains(*v8::Utils::OpenHandle(*cons)));
TestResource* resource = new TestResource(
@@ -18099,7 +18393,7 @@ TEST(ExternalizeOldSpaceOneByteCons) {
->ToString(env.local())
.ToLocalChecked();
CHECK(v8::Utils::OpenHandle(*cons)->IsConsString());
- CcTest::heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
CHECK(CcTest::heap()->old_space()->Contains(*v8::Utils::OpenHandle(*cons)));
TestOneByteResource* resource =
@@ -18143,7 +18437,7 @@ TEST(VisitExternalStrings) {
v8::Local<v8::String> string3 =
v8::String::NewExternalTwoByte(env->GetIsolate(), resource[3])
.ToLocalChecked();
- CcTest::heap()->CollectAllAvailableGarbage(); // Tenure string.
+ CcTest::CollectAllAvailableGarbage(); // Tenure string.
// Turn into a symbol.
i::Handle<i::String> string3_i = v8::Utils::OpenHandle(*string3);
CHECK(!CcTest::i_isolate()->factory()->InternalizeString(
@@ -18230,7 +18524,7 @@ TEST(ExternalInternalizedStringCollectedAtGC) {
// Garbage collector deals swift blows to evil.
CcTest::i_isolate()->compilation_cache()->Clear();
- CcTest::heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
// Ring has been destroyed. Free Peoples of Middle-earth Rejoice.
CHECK_EQ(1, destroyed);
@@ -18431,7 +18725,7 @@ TEST(Regress528) {
other_context->Enter();
CompileRun(source_simple);
other_context->Exit();
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
if (GetGlobalObjectsCount() == 1) break;
}
CHECK_GE(2, gc_count);
@@ -18453,7 +18747,7 @@ TEST(Regress528) {
other_context->Enter();
CompileRun(source_eval);
other_context->Exit();
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
if (GetGlobalObjectsCount() == 1) break;
}
CHECK_GE(2, gc_count);
@@ -18480,7 +18774,7 @@ TEST(Regress528) {
other_context->Enter();
CompileRun(source_exception);
other_context->Exit();
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
if (GetGlobalObjectsCount() == 1) break;
}
CHECK_GE(2, gc_count);
@@ -19097,8 +19391,7 @@ void PrologueCallbackAlloc(v8::Isolate* isolate,
Local<Object> obj = Object::New(isolate);
CHECK(!obj.IsEmpty());
- CcTest::heap()->CollectAllGarbage(
- i::Heap::kAbortIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
}
@@ -19117,8 +19410,7 @@ void EpilogueCallbackAlloc(v8::Isolate* isolate,
Local<Object> obj = Object::New(isolate);
CHECK(!obj.IsEmpty());
- CcTest::heap()->CollectAllGarbage(
- i::Heap::kAbortIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
}
@@ -19131,26 +19423,26 @@ TEST(GCCallbacksOld) {
context->GetIsolate()->AddGCEpilogueCallback(EpilogueCallback);
CHECK_EQ(0, prologue_call_count);
CHECK_EQ(0, epilogue_call_count);
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(1, prologue_call_count);
CHECK_EQ(1, epilogue_call_count);
context->GetIsolate()->AddGCPrologueCallback(PrologueCallbackSecond);
context->GetIsolate()->AddGCEpilogueCallback(EpilogueCallbackSecond);
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(1, prologue_call_count_second);
CHECK_EQ(1, epilogue_call_count_second);
context->GetIsolate()->RemoveGCPrologueCallback(PrologueCallback);
context->GetIsolate()->RemoveGCEpilogueCallback(EpilogueCallback);
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(2, prologue_call_count_second);
CHECK_EQ(2, epilogue_call_count_second);
context->GetIsolate()->RemoveGCPrologueCallback(PrologueCallbackSecond);
context->GetIsolate()->RemoveGCEpilogueCallback(EpilogueCallbackSecond);
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(2, prologue_call_count_second);
@@ -19166,26 +19458,26 @@ TEST(GCCallbacks) {
isolate->AddGCEpilogueCallback(EpilogueCallback);
CHECK_EQ(0, prologue_call_count);
CHECK_EQ(0, epilogue_call_count);
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(1, prologue_call_count);
CHECK_EQ(1, epilogue_call_count);
isolate->AddGCPrologueCallback(PrologueCallbackSecond);
isolate->AddGCEpilogueCallback(EpilogueCallbackSecond);
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(1, prologue_call_count_second);
CHECK_EQ(1, epilogue_call_count_second);
isolate->RemoveGCPrologueCallback(PrologueCallback);
isolate->RemoveGCEpilogueCallback(EpilogueCallback);
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(2, prologue_call_count_second);
CHECK_EQ(2, epilogue_call_count_second);
isolate->RemoveGCPrologueCallback(PrologueCallbackSecond);
isolate->RemoveGCEpilogueCallback(EpilogueCallbackSecond);
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(2, prologue_call_count_second);
@@ -19195,8 +19487,7 @@ TEST(GCCallbacks) {
CHECK_EQ(0, epilogue_call_count_alloc);
isolate->AddGCPrologueCallback(PrologueCallbackAlloc);
isolate->AddGCEpilogueCallback(EpilogueCallbackAlloc);
- CcTest::heap()->CollectAllGarbage(
- i::Heap::kAbortIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
CHECK_EQ(1, prologue_call_count_alloc);
CHECK_EQ(1, epilogue_call_count_alloc);
isolate->RemoveGCPrologueCallback(PrologueCallbackAlloc);
@@ -19374,7 +19665,7 @@ TEST(ContainsOnlyOneByte) {
void FailedAccessCheckCallbackGC(Local<v8::Object> target,
v8::AccessType type,
Local<v8::Value> data) {
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CcTest::isolate()->ThrowException(
v8::Exception::Error(v8_str("cross context")));
}
@@ -19817,12 +20108,10 @@ class InitDefaultIsolateThread : public v8::base::Thread {
void Run() {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- const intptr_t pageSizeMult =
- v8::internal::Page::kPageSize / v8::internal::MB;
switch (testCase_) {
case SetResourceConstraints: {
- create_params.constraints.set_max_semi_space_size(1 * pageSizeMult);
- create_params.constraints.set_max_old_space_size(4 * pageSizeMult);
+ create_params.constraints.set_max_semi_space_size(1);
+ create_params.constraints.set_max_old_space_size(6);
break;
}
default:
@@ -19999,7 +20288,7 @@ TEST(DontDeleteCellLoadIC) {
"})()",
"ReferenceError: cell is not defined");
CompileRun("cell = \"new_second\";");
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
ExpectString("readCell()", "new_second");
ExpectString("readCell()", "new_second");
}
@@ -20069,8 +20358,8 @@ TEST(PersistentHandleInNewSpaceVisitor) {
object1.SetWrapperClassId(42);
CHECK_EQ(42, object1.WrapperClassId());
- CcTest::heap()->CollectAllGarbage();
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
v8::Persistent<v8::Object> object2(isolate, v8::Object::New(isolate));
CHECK_EQ(0, object2.WrapperClassId());
@@ -20747,7 +21036,7 @@ THREADED_TEST(Regress1516) {
CHECK_LE(1, elements);
// We have to abort incremental marking here to abandon black pages.
- CcTest::heap()->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
CHECK_GT(elements, CountLiveMapsInMapCache(CcTest::i_isolate()->context()));
}
@@ -21488,12 +21777,15 @@ int* LookupCounter(const char* name) {
const char* kMegamorphicTestProgram =
"function CreateClass(name) {\n"
" var src = \n"
- " ` function ${name}() {};` +\n"
+ " ` function ${name}() { this.a = 0; };` +\n"
" ` ${name}.prototype.foo = function() {};` +\n"
" ` ${name};\\n`;\n"
" return (0, eval)(src);\n"
"}\n"
- "function fooify(obj) { obj.foo(); };\n"
+ "function trigger_ics(obj, v) {\n"
+ " obj.foo();\n"
+ " obj.a = v;\n"
+ "};\n"
"var objs = [];\n"
"for (var i = 0; i < 50; i++) {\n"
" var Class = CreateClass('Class' + i);\n"
@@ -21502,7 +21794,7 @@ const char* kMegamorphicTestProgram =
"}\n"
"for (var i = 0; i < 1000; i++) {\n"
" for (var obj of objs) {\n"
- " fooify(obj);\n"
+ " trigger_ics(obj, 1);\n"
" }\n"
"}\n";
@@ -21538,6 +21830,7 @@ void TestStubCache(bool primary) {
i::CodeStub::LoadICTF, i::CodeStub::LoadICTrampolineTF,
i::CodeStub::KeyedLoadIC, i::CodeStub::KeyedLoadICTrampoline,
i::CodeStub::StoreIC, i::CodeStub::StoreICTrampoline,
+ i::CodeStub::StoreICTF, i::CodeStub::StoreICTrampolineTF,
i::CodeStub::KeyedStoreIC, i::CodeStub::KeyedStoreICTrampoline,
};
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -21558,17 +21851,18 @@ void TestStubCache(bool primary) {
int updates = updates_counter - initial_updates;
const int kClassesCount = 50;
const int kIterationsCount = 1000;
- CHECK_LE(kClassesCount, updates);
+ const int kICKinds = 2; // LoadIC and StoreIC
+ CHECK_LE(kClassesCount * kICKinds, updates);
// Check that updates and misses counts are bounded.
// If there are too many updates then most likely the stub cache does not
// work properly.
- CHECK_LE(updates, kClassesCount * 2);
- CHECK_LE(1, misses);
- CHECK_LE(misses, kClassesCount * 2);
+ CHECK_LE(updates, kClassesCount * 2 * kICKinds);
+ CHECK_LE(kICKinds, misses);
+ CHECK_LE(misses, kClassesCount * 2 * kICKinds);
// 2 is for PREMONOMORPHIC and MONOMORPHIC states,
// 4 is for POLYMORPHIC states,
// and all the others probes are for MEGAMORPHIC state.
- CHECK_EQ(kIterationsCount * kClassesCount - 2 - 4, probes);
+ CHECK_EQ((kIterationsCount * kClassesCount - 2 - 4) * kICKinds, probes);
}
isolate->Dispose();
}
@@ -23339,6 +23633,140 @@ TEST(EventLogging) {
CHECK_EQ(1, last_event_status);
}
+TEST(PropertyDescriptor) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ { // empty descriptor
+ v8::PropertyDescriptor desc;
+ CHECK(!desc.has_value());
+ CHECK(!desc.has_set());
+ CHECK(!desc.has_get());
+ CHECK(!desc.has_enumerable());
+ CHECK(!desc.has_configurable());
+ CHECK(!desc.has_writable());
+ }
+ {
+ // data descriptor
+ v8::PropertyDescriptor desc(v8_num(42));
+ desc.set_enumerable(false);
+ CHECK(desc.value() == v8_num(42));
+ CHECK(desc.has_value());
+ CHECK(!desc.has_set());
+ CHECK(!desc.has_get());
+ CHECK(desc.has_enumerable());
+ CHECK(!desc.enumerable());
+ CHECK(!desc.has_configurable());
+ CHECK(!desc.has_writable());
+ }
+ {
+ // data descriptor
+ v8::PropertyDescriptor desc(v8_num(42));
+ desc.set_configurable(true);
+ CHECK(desc.value() == v8_num(42));
+ CHECK(desc.has_value());
+ CHECK(!desc.has_set());
+ CHECK(!desc.has_get());
+ CHECK(desc.has_configurable());
+ CHECK(desc.configurable());
+ CHECK(!desc.has_enumerable());
+ CHECK(!desc.has_writable());
+ }
+ {
+ // data descriptor
+ v8::PropertyDescriptor desc(v8_num(42));
+ desc.set_configurable(false);
+ CHECK(desc.value() == v8_num(42));
+ CHECK(desc.has_value());
+ CHECK(!desc.has_set());
+ CHECK(!desc.has_get());
+ CHECK(desc.has_configurable());
+ CHECK(!desc.configurable());
+ CHECK(!desc.has_enumerable());
+ CHECK(!desc.has_writable());
+ }
+ {
+ // data descriptor
+ v8::PropertyDescriptor desc(v8_num(42), false);
+ CHECK(desc.value() == v8_num(42));
+ CHECK(desc.has_value());
+ CHECK(!desc.has_set());
+ CHECK(!desc.has_get());
+ CHECK(!desc.has_enumerable());
+ CHECK(!desc.has_configurable());
+ CHECK(desc.has_writable());
+ CHECK(!desc.writable());
+ }
+ {
+ // data descriptor
+ v8::PropertyDescriptor desc(v8::Local<v8::Value>(), true);
+ CHECK(!desc.has_value());
+ CHECK(!desc.has_set());
+ CHECK(!desc.has_get());
+ CHECK(!desc.has_enumerable());
+ CHECK(!desc.has_configurable());
+ CHECK(desc.has_writable());
+ CHECK(desc.writable());
+ }
+ {
+ // accessor descriptor
+ CompileRun("var set = function() {return 43;};");
+
+ v8::Local<v8::Function> set =
+ v8::Local<v8::Function>::Cast(context->Global()
+ ->Get(context.local(), v8_str("set"))
+ .ToLocalChecked());
+ v8::PropertyDescriptor desc(v8::Undefined(isolate), set);
+ desc.set_configurable(false);
+ CHECK(!desc.has_value());
+ CHECK(desc.has_get());
+ CHECK(desc.get() == v8::Undefined(isolate));
+ CHECK(desc.has_set());
+ CHECK(desc.set() == set);
+ CHECK(!desc.has_enumerable());
+ CHECK(desc.has_configurable());
+ CHECK(!desc.configurable());
+ CHECK(!desc.has_writable());
+ }
+ {
+ // accessor descriptor with Proxy
+ CompileRun(
+ "var set = new Proxy(function() {}, {});"
+ "var get = undefined;");
+
+ v8::Local<v8::Value> get =
+ v8::Local<v8::Value>::Cast(context->Global()
+ ->Get(context.local(), v8_str("get"))
+ .ToLocalChecked());
+ v8::Local<v8::Function> set =
+ v8::Local<v8::Function>::Cast(context->Global()
+ ->Get(context.local(), v8_str("set"))
+ .ToLocalChecked());
+ v8::PropertyDescriptor desc(get, set);
+ desc.set_configurable(false);
+ CHECK(!desc.has_value());
+ CHECK(desc.get() == v8::Undefined(isolate));
+ CHECK(desc.has_get());
+ CHECK(desc.set() == set);
+ CHECK(desc.has_set());
+ CHECK(!desc.has_enumerable());
+ CHECK(desc.has_configurable());
+ CHECK(!desc.configurable());
+ CHECK(!desc.has_writable());
+ }
+ {
+ // accessor descriptor with empty function handle
+ v8::Local<v8::Function> get = v8::Local<v8::Function>();
+ v8::PropertyDescriptor desc(get, get);
+ CHECK(!desc.has_value());
+ CHECK(!desc.has_get());
+ CHECK(!desc.has_set());
+ CHECK(!desc.has_enumerable());
+ CHECK(!desc.has_configurable());
+ CHECK(!desc.has_writable());
+ }
+}
TEST(Promises) {
LocalContext context;
@@ -24141,7 +24569,7 @@ TEST(StreamingUtf8ScriptWithSplitCharactersValidEdgeCases) {
TEST(StreamingUtf8ScriptWithSplitCharactersInvalidEdgeCases) {
// Test cases where a UTF-8 character is split over several chunks. Those
// cases are not supported (the embedder should give the data in big enough
- // chunks), but we shouldn't crash, just produce a parse error.
+ // chunks), but we shouldn't crash and parse this just fine.
const char* reference = "\xec\x92\x81";
char chunk1[] =
"function foo() {\n"
@@ -24158,7 +24586,7 @@ TEST(StreamingUtf8ScriptWithSplitCharactersInvalidEdgeCases) {
chunk3[0] = reference[2];
const char* chunks[] = {chunk1, chunk2, chunk3, "foo();", NULL};
- RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8, false);
+ RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
diff --git a/deps/v8/test/cctest/test-api.h b/deps/v8/test/cctest/test-api.h
index f9a335a7f4..6194a11405 100644
--- a/deps/v8/test/cctest/test-api.h
+++ b/deps/v8/test/cctest/test-api.h
@@ -4,6 +4,7 @@
#include "src/v8.h"
+#include "src/api.h"
#include "src/isolate.h"
#include "src/vm-state.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-array-list.cc b/deps/v8/test/cctest/test-array-list.cc
index abcbf110e7..9f2970dd6c 100644
--- a/deps/v8/test/cctest/test-array-list.cc
+++ b/deps/v8/test/cctest/test-array-list.cc
@@ -7,6 +7,13 @@
#include "src/v8.h"
#include "src/factory.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/factory.h -> src/objects-inl.h
+#include "src/objects-inl.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/type-feedback-vector.h ->
+// src/type-feedback-vector-inl.h
+#include "src/type-feedback-vector-inl.h"
#include "test/cctest/cctest.h"
namespace {
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index 93a19c1a14..de024f8869 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -30,11 +30,11 @@
#include "src/v8.h"
#include "test/cctest/cctest.h"
-#include "src/arm/assembler-arm-inl.h"
#include "src/arm/simulator-arm.h"
#include "src/base/utils/random-number-generator.h"
#include "src/disassembler.h"
#include "src/factory.h"
+#include "src/macro-assembler.h"
#include "src/ostreams.h"
using namespace v8::base;
@@ -244,9 +244,8 @@ TEST(4) {
Assembler assm(isolate, NULL, 0);
Label L, C;
-
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatureScope scope(&assm, VFP3);
+ if (CpuFeatures::IsSupported(VFPv3)) {
+ CpuFeatureScope scope(&assm, VFPv3);
__ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
@@ -267,12 +266,12 @@ TEST(4) {
// Load t.x and t.y, switch values, and store back to the struct.
__ vldr(s0, r4, offsetof(T, x));
- __ vldr(s31, r4, offsetof(T, y));
- __ vmov(s16, s0);
- __ vmov(s0, s31);
- __ vmov(s31, s16);
+ __ vldr(s1, r4, offsetof(T, y));
+ __ vmov(s2, s0);
+ __ vmov(s0, s1);
+ __ vmov(s1, s2);
__ vstr(s0, r4, offsetof(T, x));
- __ vstr(s31, r4, offsetof(T, y));
+ __ vstr(s1, r4, offsetof(T, y));
// Move a literal into a register that can be encoded in the instruction.
__ vmov(d4, 1.0);
@@ -285,13 +284,13 @@ TEST(4) {
// Convert from floating point to integer.
__ vmov(d4, 2.0);
- __ vcvt_s32_f64(s31, d4);
- __ vstr(s31, r4, offsetof(T, i));
+ __ vcvt_s32_f64(s1, d4);
+ __ vstr(s1, r4, offsetof(T, i));
// Convert from integer to floating point.
__ mov(lr, Operand(42));
- __ vmov(s31, lr);
- __ vcvt_f64_s32(d4, s31);
+ __ vmov(s1, lr);
+ __ vcvt_f64_s32(d4, s1);
__ vstr(d4, r4, offsetof(T, f));
// Convert from fixed point to floating point.
@@ -450,62 +449,57 @@ static void TestRoundingMode(VCVTTypes types,
Assembler assm(isolate, NULL, 0);
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatureScope scope(&assm, VFP3);
-
- Label wrong_exception;
-
- __ vmrs(r1);
- // Set custom FPSCR.
- __ bic(r2, r1, Operand(kVFPRoundingModeMask | kVFPExceptionMask));
- __ orr(r2, r2, Operand(mode));
- __ vmsr(r2);
-
- // Load value, convert, and move back result to r0 if everything went well.
- __ vmov(d1, value);
- switch (types) {
- case s32_f64:
- __ vcvt_s32_f64(s0, d1, kFPSCRRounding);
- break;
-
- case u32_f64:
- __ vcvt_u32_f64(s0, d1, kFPSCRRounding);
- break;
-
- default:
- UNREACHABLE();
- break;
- }
- // Check for vfp exceptions
- __ vmrs(r2);
- __ tst(r2, Operand(kVFPExceptionMask));
- // Check that we behaved as expected.
- __ b(&wrong_exception,
- expected_exception ? eq : ne);
- // There was no exception. Retrieve the result and return.
- __ vmov(r0, s0);
- __ mov(pc, Operand(lr));
+ Label wrong_exception;
- // The exception behaviour is not what we expected.
- // Load a special value and return.
- __ bind(&wrong_exception);
- __ mov(r0, Operand(11223344));
- __ mov(pc, Operand(lr));
+ __ vmrs(r1);
+ // Set custom FPSCR.
+ __ bic(r2, r1, Operand(kVFPRoundingModeMask | kVFPExceptionMask));
+ __ orr(r2, r2, Operand(mode));
+ __ vmsr(r2);
+
+ // Load value, convert, and move back result to r0 if everything went well.
+ __ vmov(d1, value);
+ switch (types) {
+ case s32_f64:
+ __ vcvt_s32_f64(s0, d1, kFPSCRRounding);
+ break;
+
+ case u32_f64:
+ __ vcvt_u32_f64(s0, d1, kFPSCRRounding);
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ // Check for vfp exceptions
+ __ vmrs(r2);
+ __ tst(r2, Operand(kVFPExceptionMask));
+ // Check that we behaved as expected.
+ __ b(&wrong_exception, expected_exception ? eq : ne);
+ // There was no exception. Retrieve the result and return.
+ __ vmov(r0, s0);
+ __ mov(pc, Operand(lr));
- CodeDesc desc;
- assm.GetCode(&desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ // The exception behaviour is not what we expected.
+ // Load a special value and return.
+ __ bind(&wrong_exception);
+ __ mov(r0, Operand(11223344));
+ __ mov(pc, Operand(lr));
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
#ifdef DEBUG
- OFStream os(stdout);
- code->Print(os);
+ OFStream os(stdout);
+ code->Print(os);
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
- int res =
- reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
- ::printf("res = %d\n", res);
- CHECK_EQ(expected, res);
- }
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ int res =
+ reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+ ::printf("res = %d\n", res);
+ CHECK_EQ(expected, res);
}
@@ -1051,9 +1045,8 @@ TEST(13) {
Assembler assm(isolate, NULL, 0);
Label L, C;
-
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatureScope scope(&assm, VFP3);
+ if (CpuFeatures::IsSupported(VFPv3)) {
+ CpuFeatureScope scope(&assm, VFPv3);
__ stm(db_w, sp, r4.bit() | lr.bit());
@@ -2381,6 +2374,400 @@ TEST(ARMv8_vsel) {
}
}
+TEST(ARMv8_vminmax_f64) {
+ // Test the vminnm and vmaxnm floating point instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ Assembler assm(isolate, NULL, 0);
+
+ struct Inputs {
+ double left_;
+ double right_;
+ };
+
+ struct Results {
+ double vminnm_;
+ double vmaxnm_;
+ };
+
+ if (CpuFeatures::IsSupported(ARMv8)) {
+ CpuFeatureScope scope(&assm, ARMv8);
+
+ // Create a helper function:
+ // void TestVminmax(const Inputs* inputs,
+ // Results* results);
+ __ vldr(d1, r0, offsetof(Inputs, left_));
+ __ vldr(d2, r0, offsetof(Inputs, right_));
+
+ __ vminnm(d0, d1, d2);
+ __ vstr(d0, r1, offsetof(Results, vminnm_));
+ __ vmaxnm(d0, d1, d2);
+ __ vstr(d0, r1, offsetof(Results, vmaxnm_));
+
+ __ bx(lr);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ OFStream os(stdout);
+ code->Print(os);
+#endif
+ F4 f = FUNCTION_CAST<F4>(code->entry());
+ Object* dummy = nullptr;
+ USE(dummy);
+
+#define CHECK_VMINMAX(left, right, vminnm, vmaxnm) \
+ do { \
+ Inputs inputs = {left, right}; \
+ Results results; \
+ dummy = CALL_GENERATED_CODE(isolate, f, &inputs, &results, 0, 0, 0); \
+ /* Use a bit_cast to correctly identify -0.0 and NaNs. */ \
+ CHECK_EQ(bit_cast<uint64_t>(vminnm), bit_cast<uint64_t>(results.vminnm_)); \
+ CHECK_EQ(bit_cast<uint64_t>(vmaxnm), bit_cast<uint64_t>(results.vmaxnm_)); \
+ } while (0);
+
+ double nan_a = bit_cast<double>(UINT64_C(0x7ff8000000000001));
+ double nan_b = bit_cast<double>(UINT64_C(0x7ff8000000000002));
+
+ CHECK_VMINMAX(1.0, -1.0, -1.0, 1.0);
+ CHECK_VMINMAX(-1.0, 1.0, -1.0, 1.0);
+ CHECK_VMINMAX(0.0, -1.0, -1.0, 0.0);
+ CHECK_VMINMAX(-1.0, 0.0, -1.0, 0.0);
+ CHECK_VMINMAX(-0.0, -1.0, -1.0, -0.0);
+ CHECK_VMINMAX(-1.0, -0.0, -1.0, -0.0);
+ CHECK_VMINMAX(0.0, 1.0, 0.0, 1.0);
+ CHECK_VMINMAX(1.0, 0.0, 0.0, 1.0);
+
+ CHECK_VMINMAX(0.0, 0.0, 0.0, 0.0);
+ CHECK_VMINMAX(-0.0, -0.0, -0.0, -0.0);
+ CHECK_VMINMAX(-0.0, 0.0, -0.0, 0.0);
+ CHECK_VMINMAX(0.0, -0.0, -0.0, 0.0);
+
+ CHECK_VMINMAX(0.0, nan_a, 0.0, 0.0);
+ CHECK_VMINMAX(nan_a, 0.0, 0.0, 0.0);
+ CHECK_VMINMAX(nan_a, nan_b, nan_a, nan_a);
+ CHECK_VMINMAX(nan_b, nan_a, nan_b, nan_b);
+
+#undef CHECK_VMINMAX
+ }
+}
+
+TEST(ARMv8_vminmax_f32) {
+ // Test the vminnm and vmaxnm floating point instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ Assembler assm(isolate, NULL, 0);
+
+ struct Inputs {
+ float left_;
+ float right_;
+ };
+
+ struct Results {
+ float vminnm_;
+ float vmaxnm_;
+ };
+
+ if (CpuFeatures::IsSupported(ARMv8)) {
+ CpuFeatureScope scope(&assm, ARMv8);
+
+ // Create a helper function:
+ // void TestVminmax(const Inputs* inputs,
+ // Results* results);
+ __ vldr(s1, r0, offsetof(Inputs, left_));
+ __ vldr(s2, r0, offsetof(Inputs, right_));
+
+ __ vminnm(s0, s1, s2);
+ __ vstr(s0, r1, offsetof(Results, vminnm_));
+ __ vmaxnm(s0, s1, s2);
+ __ vstr(s0, r1, offsetof(Results, vmaxnm_));
+
+ __ bx(lr);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ OFStream os(stdout);
+ code->Print(os);
+#endif
+ F4 f = FUNCTION_CAST<F4>(code->entry());
+ Object* dummy = nullptr;
+ USE(dummy);
+
+#define CHECK_VMINMAX(left, right, vminnm, vmaxnm) \
+ do { \
+ Inputs inputs = {left, right}; \
+ Results results; \
+ dummy = CALL_GENERATED_CODE(isolate, f, &inputs, &results, 0, 0, 0); \
+ /* Use a bit_cast to correctly identify -0.0 and NaNs. */ \
+ CHECK_EQ(bit_cast<uint32_t>(vminnm), bit_cast<uint32_t>(results.vminnm_)); \
+ CHECK_EQ(bit_cast<uint32_t>(vmaxnm), bit_cast<uint32_t>(results.vmaxnm_)); \
+ } while (0);
+
+ float nan_a = bit_cast<float>(UINT32_C(0x7fc00001));
+ float nan_b = bit_cast<float>(UINT32_C(0x7fc00002));
+
+ CHECK_VMINMAX(1.0f, -1.0f, -1.0f, 1.0f);
+ CHECK_VMINMAX(-1.0f, 1.0f, -1.0f, 1.0f);
+ CHECK_VMINMAX(0.0f, -1.0f, -1.0f, 0.0f);
+ CHECK_VMINMAX(-1.0f, 0.0f, -1.0f, 0.0f);
+ CHECK_VMINMAX(-0.0f, -1.0f, -1.0f, -0.0f);
+ CHECK_VMINMAX(-1.0f, -0.0f, -1.0f, -0.0f);
+ CHECK_VMINMAX(0.0f, 1.0f, 0.0f, 1.0f);
+ CHECK_VMINMAX(1.0f, 0.0f, 0.0f, 1.0f);
+
+ CHECK_VMINMAX(0.0f, 0.0f, 0.0f, 0.0f);
+ CHECK_VMINMAX(-0.0f, -0.0f, -0.0f, -0.0f);
+ CHECK_VMINMAX(-0.0f, 0.0f, -0.0f, 0.0f);
+ CHECK_VMINMAX(0.0f, -0.0f, -0.0f, 0.0f);
+
+ CHECK_VMINMAX(0.0f, nan_a, 0.0f, 0.0f);
+ CHECK_VMINMAX(nan_a, 0.0f, 0.0f, 0.0f);
+ CHECK_VMINMAX(nan_a, nan_b, nan_a, nan_a);
+ CHECK_VMINMAX(nan_b, nan_a, nan_b, nan_b);
+
+#undef CHECK_VMINMAX
+ }
+}
+
+template <typename T, typename Inputs, typename Results>
+static F4 GenerateMacroFloatMinMax(MacroAssembler& assm) {
+ T a = T::from_code(0); // d0/s0
+ T b = T::from_code(1); // d1/s1
+ T c = T::from_code(2); // d2/s2
+
+ // Create a helper function:
+ // void TestFloatMinMax(const Inputs* inputs,
+ // Results* results);
+ Label ool_min_abc, ool_min_aab, ool_min_aba;
+ Label ool_max_abc, ool_max_aab, ool_max_aba;
+
+ Label done_min_abc, done_min_aab, done_min_aba;
+ Label done_max_abc, done_max_aab, done_max_aba;
+
+ // a = min(b, c);
+ __ vldr(b, r0, offsetof(Inputs, left_));
+ __ vldr(c, r0, offsetof(Inputs, right_));
+ __ FloatMin(a, b, c, &ool_min_abc);
+ __ bind(&done_min_abc);
+ __ vstr(a, r1, offsetof(Results, min_abc_));
+
+ // a = min(a, b);
+ __ vldr(a, r0, offsetof(Inputs, left_));
+ __ vldr(b, r0, offsetof(Inputs, right_));
+ __ FloatMin(a, a, b, &ool_min_aab);
+ __ bind(&done_min_aab);
+ __ vstr(a, r1, offsetof(Results, min_aab_));
+
+ // a = min(b, a);
+ __ vldr(b, r0, offsetof(Inputs, left_));
+ __ vldr(a, r0, offsetof(Inputs, right_));
+ __ FloatMin(a, b, a, &ool_min_aba);
+ __ bind(&done_min_aba);
+ __ vstr(a, r1, offsetof(Results, min_aba_));
+
+ // a = max(b, c);
+ __ vldr(b, r0, offsetof(Inputs, left_));
+ __ vldr(c, r0, offsetof(Inputs, right_));
+ __ FloatMax(a, b, c, &ool_max_abc);
+ __ bind(&done_max_abc);
+ __ vstr(a, r1, offsetof(Results, max_abc_));
+
+ // a = max(a, b);
+ __ vldr(a, r0, offsetof(Inputs, left_));
+ __ vldr(b, r0, offsetof(Inputs, right_));
+ __ FloatMax(a, a, b, &ool_max_aab);
+ __ bind(&done_max_aab);
+ __ vstr(a, r1, offsetof(Results, max_aab_));
+
+ // a = max(b, a);
+ __ vldr(b, r0, offsetof(Inputs, left_));
+ __ vldr(a, r0, offsetof(Inputs, right_));
+ __ FloatMax(a, b, a, &ool_max_aba);
+ __ bind(&done_max_aba);
+ __ vstr(a, r1, offsetof(Results, max_aba_));
+
+ __ bx(lr);
+
+ // Generate out-of-line cases.
+ __ bind(&ool_min_abc);
+ __ FloatMinOutOfLine(a, b, c);
+ __ b(&done_min_abc);
+
+ __ bind(&ool_min_aab);
+ __ FloatMinOutOfLine(a, a, b);
+ __ b(&done_min_aab);
+
+ __ bind(&ool_min_aba);
+ __ FloatMinOutOfLine(a, b, a);
+ __ b(&done_min_aba);
+
+ __ bind(&ool_max_abc);
+ __ FloatMaxOutOfLine(a, b, c);
+ __ b(&done_max_abc);
+
+ __ bind(&ool_max_aab);
+ __ FloatMaxOutOfLine(a, a, b);
+ __ b(&done_max_aab);
+
+ __ bind(&ool_max_aba);
+ __ FloatMaxOutOfLine(a, b, a);
+ __ b(&done_max_aba);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = assm.isolate()->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ OFStream os(stdout);
+ code->Print(os);
+#endif
+ return FUNCTION_CAST<F4>(code->entry());
+}
+
+TEST(macro_float_minmax_f64) {
+ // Test the FloatMin and FloatMax macros.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0, CodeObjectRequired::kYes);
+
+ struct Inputs {
+ double left_;
+ double right_;
+ };
+
+ struct Results {
+ // Check all register aliasing possibilities in order to exercise all
+ // code-paths in the macro assembler.
+ double min_abc_;
+ double min_aab_;
+ double min_aba_;
+ double max_abc_;
+ double max_aab_;
+ double max_aba_;
+ };
+
+ F4 f = GenerateMacroFloatMinMax<DwVfpRegister, Inputs, Results>(assm);
+
+ Object* dummy = nullptr;
+ USE(dummy);
+
+#define CHECK_MINMAX(left, right, min, max) \
+ do { \
+ Inputs inputs = {left, right}; \
+ Results results; \
+ dummy = CALL_GENERATED_CODE(isolate, f, &inputs, &results, 0, 0, 0); \
+ /* Use a bit_cast to correctly identify -0.0 and NaNs. */ \
+ CHECK_EQ(bit_cast<uint64_t>(min), bit_cast<uint64_t>(results.min_abc_)); \
+ CHECK_EQ(bit_cast<uint64_t>(min), bit_cast<uint64_t>(results.min_aab_)); \
+ CHECK_EQ(bit_cast<uint64_t>(min), bit_cast<uint64_t>(results.min_aba_)); \
+ CHECK_EQ(bit_cast<uint64_t>(max), bit_cast<uint64_t>(results.max_abc_)); \
+ CHECK_EQ(bit_cast<uint64_t>(max), bit_cast<uint64_t>(results.max_aab_)); \
+ CHECK_EQ(bit_cast<uint64_t>(max), bit_cast<uint64_t>(results.max_aba_)); \
+ } while (0)
+
+ double nan_a = bit_cast<double>(UINT64_C(0x7ff8000000000001));
+ double nan_b = bit_cast<double>(UINT64_C(0x7ff8000000000002));
+
+ CHECK_MINMAX(1.0, -1.0, -1.0, 1.0);
+ CHECK_MINMAX(-1.0, 1.0, -1.0, 1.0);
+ CHECK_MINMAX(0.0, -1.0, -1.0, 0.0);
+ CHECK_MINMAX(-1.0, 0.0, -1.0, 0.0);
+ CHECK_MINMAX(-0.0, -1.0, -1.0, -0.0);
+ CHECK_MINMAX(-1.0, -0.0, -1.0, -0.0);
+ CHECK_MINMAX(0.0, 1.0, 0.0, 1.0);
+ CHECK_MINMAX(1.0, 0.0, 0.0, 1.0);
+
+ CHECK_MINMAX(0.0, 0.0, 0.0, 0.0);
+ CHECK_MINMAX(-0.0, -0.0, -0.0, -0.0);
+ CHECK_MINMAX(-0.0, 0.0, -0.0, 0.0);
+ CHECK_MINMAX(0.0, -0.0, -0.0, 0.0);
+
+ CHECK_MINMAX(0.0, nan_a, nan_a, nan_a);
+ CHECK_MINMAX(nan_a, 0.0, nan_a, nan_a);
+ CHECK_MINMAX(nan_a, nan_b, nan_a, nan_a);
+ CHECK_MINMAX(nan_b, nan_a, nan_b, nan_b);
+
+#undef CHECK_MINMAX
+}
+
+TEST(macro_float_minmax_f32) {
+ // Test the FloatMin and FloatMax macros.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0, CodeObjectRequired::kYes);
+
+ struct Inputs {
+ float left_;
+ float right_;
+ };
+
+ struct Results {
+ // Check all register aliasing possibilities in order to exercise all
+ // code-paths in the macro assembler.
+ float min_abc_;
+ float min_aab_;
+ float min_aba_;
+ float max_abc_;
+ float max_aab_;
+ float max_aba_;
+ };
+
+ F4 f = GenerateMacroFloatMinMax<SwVfpRegister, Inputs, Results>(assm);
+ Object* dummy = nullptr;
+ USE(dummy);
+
+#define CHECK_MINMAX(left, right, min, max) \
+ do { \
+ Inputs inputs = {left, right}; \
+ Results results; \
+ dummy = CALL_GENERATED_CODE(isolate, f, &inputs, &results, 0, 0, 0); \
+ /* Use a bit_cast to correctly identify -0.0 and NaNs. */ \
+ CHECK_EQ(bit_cast<uint32_t>(min), bit_cast<uint32_t>(results.min_abc_)); \
+ CHECK_EQ(bit_cast<uint32_t>(min), bit_cast<uint32_t>(results.min_aab_)); \
+ CHECK_EQ(bit_cast<uint32_t>(min), bit_cast<uint32_t>(results.min_aba_)); \
+ CHECK_EQ(bit_cast<uint32_t>(max), bit_cast<uint32_t>(results.max_abc_)); \
+ CHECK_EQ(bit_cast<uint32_t>(max), bit_cast<uint32_t>(results.max_aab_)); \
+ CHECK_EQ(bit_cast<uint32_t>(max), bit_cast<uint32_t>(results.max_aba_)); \
+ } while (0)
+
+ float nan_a = bit_cast<float>(UINT32_C(0x7fc00001));
+ float nan_b = bit_cast<float>(UINT32_C(0x7fc00002));
+
+ CHECK_MINMAX(1.0f, -1.0f, -1.0f, 1.0f);
+ CHECK_MINMAX(-1.0f, 1.0f, -1.0f, 1.0f);
+ CHECK_MINMAX(0.0f, -1.0f, -1.0f, 0.0f);
+ CHECK_MINMAX(-1.0f, 0.0f, -1.0f, 0.0f);
+ CHECK_MINMAX(-0.0f, -1.0f, -1.0f, -0.0f);
+ CHECK_MINMAX(-1.0f, -0.0f, -1.0f, -0.0f);
+ CHECK_MINMAX(0.0f, 1.0f, 0.0f, 1.0f);
+ CHECK_MINMAX(1.0f, 0.0f, 0.0f, 1.0f);
+
+ CHECK_MINMAX(0.0f, 0.0f, 0.0f, 0.0f);
+ CHECK_MINMAX(-0.0f, -0.0f, -0.0f, -0.0f);
+ CHECK_MINMAX(-0.0f, 0.0f, -0.0f, 0.0f);
+ CHECK_MINMAX(0.0f, -0.0f, -0.0f, 0.0f);
+
+ CHECK_MINMAX(0.0f, nan_a, nan_a, nan_a);
+ CHECK_MINMAX(nan_a, 0.0f, nan_a, nan_a);
+ CHECK_MINMAX(nan_a, nan_b, nan_a, nan_a);
+ CHECK_MINMAX(nan_b, nan_a, nan_b, nan_b);
+
+#undef CHECK_MINMAX
+}
+
TEST(unaligned_loads) {
// All supported ARM targets allow unaligned accesses.
CcTest::InitializeVM();
@@ -2495,6 +2882,55 @@ TEST(unaligned_stores) {
}
}
+TEST(vswp) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ Assembler assm(isolate, NULL, 0);
+
+ typedef struct {
+ double result0;
+ double result1;
+ double result2;
+ double result3;
+ } T;
+ T t;
+
+ __ vmov(d0, 1.0);
+ __ vmov(d1, -1.0);
+ __ vswp(d0, d1);
+ __ vstr(d0, r0, offsetof(T, result0));
+ __ vstr(d1, r0, offsetof(T, result1));
+
+ if (CpuFeatures::IsSupported(VFP32DREGS)) {
+ __ vmov(d30, 1.0);
+ __ vmov(d31, -1.0);
+ __ vswp(d30, d31);
+ __ vstr(d30, r0, offsetof(T, result2));
+ __ vstr(d31, r0, offsetof(T, result3));
+ }
+
+ __ bx(lr);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ OFStream os(stdout);
+ code->Print(os);
+#endif
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
+ USE(dummy);
+ CHECK_EQ(-1.0, t.result0);
+ CHECK_EQ(1.0, t.result1);
+ if (CpuFeatures::IsSupported(VFP32DREGS)) {
+ CHECK_EQ(-1.0, t.result2);
+ CHECK_EQ(1.0, t.result3);
+ }
+}
+
TEST(regress4292_b) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
index e73b40e96b..08d3c606c0 100644
--- a/deps/v8/test/cctest/test-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -5386,4 +5386,130 @@ TEST(Trampoline) {
CHECK_EQ(res, 0);
}
+template <class T>
+struct TestCaseMaddMsub {
+ T fr, fs, ft, fd_add, fd_sub;
+};
+
+template <typename T, typename F>
+void helper_madd_msub_maddf_msubf(F func) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+
+ T x = std::sqrt(static_cast<T>(2.0));
+ T y = std::sqrt(static_cast<T>(3.0));
+ T z = std::sqrt(static_cast<T>(5.0));
+ T x2 = 11.11, y2 = 22.22, z2 = 33.33;
+ TestCaseMaddMsub<T> test_cases[] = {
+ {x, y, z, 0.0, 0.0},
+ {x, y, -z, 0.0, 0.0},
+ {x, -y, z, 0.0, 0.0},
+ {x, -y, -z, 0.0, 0.0},
+ {-x, y, z, 0.0, 0.0},
+ {-x, y, -z, 0.0, 0.0},
+ {-x, -y, z, 0.0, 0.0},
+ {-x, -y, -z, 0.0, 0.0},
+ {-3.14, 0.2345, -123.000056, 0.0, 0.0},
+ {7.3, -23.257, -357.1357, 0.0, 0.0},
+ {x2, y2, z2, 0.0, 0.0},
+ {x2, y2, -z2, 0.0, 0.0},
+ {x2, -y2, z2, 0.0, 0.0},
+ {x2, -y2, -z2, 0.0, 0.0},
+ {-x2, y2, z2, 0.0, 0.0},
+ {-x2, y2, -z2, 0.0, 0.0},
+ {-x2, -y2, z2, 0.0, 0.0},
+ {-x2, -y2, -z2, 0.0, 0.0},
+ };
+
+ if (std::is_same<T, float>::value) {
+ __ lwc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr)));
+ __ lwc1(f6, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fs)));
+ __ lwc1(f8, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, ft)));
+ __ lwc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr)));
+ } else if (std::is_same<T, double>::value) {
+ __ ldc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr)));
+ __ ldc1(f6, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fs)));
+ __ ldc1(f8, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, ft)));
+ __ ldc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr)));
+ } else {
+ UNREACHABLE();
+ }
+
+ func(assm);
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ const size_t kTableLength = sizeof(test_cases) / sizeof(TestCaseMaddMsub<T>);
+ TestCaseMaddMsub<T> tc;
+ for (size_t i = 0; i < kTableLength; i++) {
+ tc.fr = test_cases[i].fr;
+ tc.fs = test_cases[i].fs;
+ tc.ft = test_cases[i].ft;
+
+ (CALL_GENERATED_CODE(isolate, f, &tc, 0, 0, 0, 0));
+
+ T res_add = tc.fr + (tc.fs * tc.ft);
+ T res_sub = 0;
+ if (IsMipsArchVariant(kMips32r2)) {
+ res_sub = (tc.fs * tc.ft) - tc.fr;
+ } else if (IsMipsArchVariant(kMips32r6)) {
+ res_sub = tc.fr - (tc.fs * tc.ft);
+ } else {
+ UNREACHABLE();
+ }
+
+ CHECK_EQ(tc.fd_add, res_add);
+ CHECK_EQ(tc.fd_sub, res_sub);
+ }
+}
+
+TEST(madd_msub_s) {
+ if (!IsMipsArchVariant(kMips32r2)) return;
+ helper_madd_msub_maddf_msubf<float>([](MacroAssembler& assm) {
+ __ madd_s(f10, f4, f6, f8);
+ __ swc1(f10, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_add)));
+ __ msub_s(f16, f4, f6, f8);
+ __ swc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_sub)));
+ });
+}
+
+TEST(madd_msub_d) {
+ if (!IsMipsArchVariant(kMips32r2)) return;
+ helper_madd_msub_maddf_msubf<double>([](MacroAssembler& assm) {
+ __ madd_d(f10, f4, f6, f8);
+ __ sdc1(f10, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_add)));
+ __ msub_d(f16, f4, f6, f8);
+ __ sdc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_sub)));
+ });
+}
+
+TEST(maddf_msubf_s) {
+ if (!IsMipsArchVariant(kMips32r6)) return;
+ helper_madd_msub_maddf_msubf<float>([](MacroAssembler& assm) {
+ __ maddf_s(f4, f6, f8);
+ __ swc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_add)));
+ __ msubf_s(f16, f6, f8);
+ __ swc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_sub)));
+ });
+}
+
+TEST(maddf_msubf_d) {
+ if (!IsMipsArchVariant(kMips32r6)) return;
+ helper_madd_msub_maddf_msubf<double>([](MacroAssembler& assm) {
+ __ maddf_d(f4, f6, f8);
+ __ sdc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_add)));
+ __ msubf_d(f16, f6, f8);
+ __ sdc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_sub)));
+ });
+}
+
#undef __
diff --git a/deps/v8/test/cctest/test-assembler-mips64.cc b/deps/v8/test/cctest/test-assembler-mips64.cc
index 9529dab242..b0315343b5 100644
--- a/deps/v8/test/cctest/test-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-assembler-mips64.cc
@@ -5934,5 +5934,128 @@ TEST(Trampoline) {
CHECK_EQ(res, 0);
}
+template <class T>
+struct TestCaseMaddMsub {
+ T fr, fs, ft, fd_add, fd_sub;
+};
+
+template <typename T, typename F>
+void helper_madd_msub_maddf_msubf(F func) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+
+ T x = std::sqrt(static_cast<T>(2.0));
+ T y = std::sqrt(static_cast<T>(3.0));
+ T z = std::sqrt(static_cast<T>(5.0));
+ T x2 = 11.11, y2 = 22.22, z2 = 33.33;
+ TestCaseMaddMsub<T> test_cases[] = {
+ {x, y, z, 0.0, 0.0},
+ {x, y, -z, 0.0, 0.0},
+ {x, -y, z, 0.0, 0.0},
+ {x, -y, -z, 0.0, 0.0},
+ {-x, y, z, 0.0, 0.0},
+ {-x, y, -z, 0.0, 0.0},
+ {-x, -y, z, 0.0, 0.0},
+ {-x, -y, -z, 0.0, 0.0},
+ {-3.14, 0.2345, -123.000056, 0.0, 0.0},
+ {7.3, -23.257, -357.1357, 0.0, 0.0},
+ {x2, y2, z2, 0.0, 0.0},
+ {x2, y2, -z2, 0.0, 0.0},
+ {x2, -y2, z2, 0.0, 0.0},
+ {x2, -y2, -z2, 0.0, 0.0},
+ {-x2, y2, z2, 0.0, 0.0},
+ {-x2, y2, -z2, 0.0, 0.0},
+ {-x2, -y2, z2, 0.0, 0.0},
+ {-x2, -y2, -z2, 0.0, 0.0},
+ };
+
+ if (std::is_same<T, float>::value) {
+ __ lwc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr)));
+ __ lwc1(f6, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fs)));
+ __ lwc1(f8, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, ft)));
+ __ lwc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr)));
+ } else if (std::is_same<T, double>::value) {
+ __ ldc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr)));
+ __ ldc1(f6, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fs)));
+ __ ldc1(f8, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, ft)));
+ __ ldc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr)));
+ } else {
+ UNREACHABLE();
+ }
+
+ func(assm);
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ const size_t kTableLength = sizeof(test_cases) / sizeof(TestCaseMaddMsub<T>);
+ TestCaseMaddMsub<T> tc;
+ for (size_t i = 0; i < kTableLength; i++) {
+ tc.fr = test_cases[i].fr;
+ tc.fs = test_cases[i].fs;
+ tc.ft = test_cases[i].ft;
+
+ (CALL_GENERATED_CODE(isolate, f, &tc, 0, 0, 0, 0));
+
+ T res_add = tc.fr + (tc.fs * tc.ft);
+ T res_sub;
+ if (kArchVariant != kMips64r6) {
+ res_sub = (tc.fs * tc.ft) - tc.fr;
+ } else {
+ res_sub = tc.fr - (tc.fs * tc.ft);
+ }
+
+ CHECK_EQ(tc.fd_add, res_add);
+ CHECK_EQ(tc.fd_sub, res_sub);
+ }
+}
+
+TEST(madd_msub_s) {
+ if (kArchVariant == kMips64r6) return;
+ helper_madd_msub_maddf_msubf<float>([](MacroAssembler& assm) {
+ __ madd_s(f10, f4, f6, f8);
+ __ swc1(f10, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_add)));
+ __ msub_s(f16, f4, f6, f8);
+ __ swc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_sub)));
+ });
+}
+
+TEST(madd_msub_d) {
+ if (kArchVariant == kMips64r6) return;
+ helper_madd_msub_maddf_msubf<double>([](MacroAssembler& assm) {
+ __ madd_d(f10, f4, f6, f8);
+ __ sdc1(f10, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_add)));
+ __ msub_d(f16, f4, f6, f8);
+ __ sdc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_sub)));
+ });
+}
+
+TEST(maddf_msubf_s) {
+ if (kArchVariant != kMips64r6) return;
+ helper_madd_msub_maddf_msubf<float>([](MacroAssembler& assm) {
+ __ maddf_s(f4, f6, f8);
+ __ swc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_add)));
+ __ msubf_s(f16, f6, f8);
+ __ swc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_sub)));
+ });
+}
+
+TEST(maddf_msubf_d) {
+ if (kArchVariant != kMips64r6) return;
+ helper_madd_msub_maddf_msubf<double>([](MacroAssembler& assm) {
+ __ maddf_d(f4, f6, f8);
+ __ sdc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_add)));
+ __ msubf_d(f16, f6, f8);
+ __ sdc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_sub)));
+ });
+}
#undef __
diff --git a/deps/v8/test/cctest/test-ast-types.cc b/deps/v8/test/cctest/test-ast-types.cc
new file mode 100644
index 0000000000..39d2d70eb0
--- /dev/null
+++ b/deps/v8/test/cctest/test-ast-types.cc
@@ -0,0 +1,1904 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "src/crankshaft/hydrogen-types.h"
+#include "src/factory.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/factory.h -> src/objects-inl.h
+#include "src/ast/ast-types.h"
+#include "src/objects-inl.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/type-feedback-vector.h ->
+// src/type-feedback-vector-inl.h
+#include "src/type-feedback-vector-inl.h"
+#include "test/cctest/ast-types-fuzz.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+
+namespace {
+
+// Testing auxiliaries (breaking the Type abstraction).
+
+static bool IsInteger(double x) {
+ return nearbyint(x) == x && !i::IsMinusZero(x); // Allows for infinities.
+}
+
+static bool IsInteger(i::Object* x) {
+ return x->IsNumber() && IsInteger(x->Number());
+}
+
+typedef uint32_t bitset;
+
+struct Tests {
+ typedef AstTypes::TypeVector::iterator TypeIterator;
+ typedef AstTypes::MapVector::iterator MapIterator;
+ typedef AstTypes::ValueVector::iterator ValueIterator;
+
+ Isolate* isolate;
+ HandleScope scope;
+ Zone zone;
+ AstTypes T;
+
+ Tests()
+ : isolate(CcTest::InitIsolateOnce()),
+ scope(isolate),
+ zone(isolate->allocator()),
+ T(&zone, isolate, isolate->random_number_generator()) {}
+
+ bool IsBitset(AstType* type) { return type->IsBitsetForTesting(); }
+ bool IsUnion(AstType* type) { return type->IsUnionForTesting(); }
+ AstBitsetType::bitset AsBitset(AstType* type) {
+ return type->AsBitsetForTesting();
+ }
+ AstUnionType* AsUnion(AstType* type) { return type->AsUnionForTesting(); }
+
+ bool Equal(AstType* type1, AstType* type2) {
+ return type1->Equals(type2) &&
+ this->IsBitset(type1) == this->IsBitset(type2) &&
+ this->IsUnion(type1) == this->IsUnion(type2) &&
+ type1->NumClasses() == type2->NumClasses() &&
+ type1->NumConstants() == type2->NumConstants() &&
+ (!this->IsBitset(type1) ||
+ this->AsBitset(type1) == this->AsBitset(type2)) &&
+ (!this->IsUnion(type1) ||
+ this->AsUnion(type1)->LengthForTesting() ==
+ this->AsUnion(type2)->LengthForTesting());
+ }
+
+ void CheckEqual(AstType* type1, AstType* type2) {
+ CHECK(Equal(type1, type2));
+ }
+
+ void CheckSub(AstType* type1, AstType* type2) {
+ CHECK(type1->Is(type2));
+ CHECK(!type2->Is(type1));
+ if (this->IsBitset(type1) && this->IsBitset(type2)) {
+ CHECK(this->AsBitset(type1) != this->AsBitset(type2));
+ }
+ }
+
+ void CheckSubOrEqual(AstType* type1, AstType* type2) {
+ CHECK(type1->Is(type2));
+ if (this->IsBitset(type1) && this->IsBitset(type2)) {
+ CHECK((this->AsBitset(type1) | this->AsBitset(type2)) ==
+ this->AsBitset(type2));
+ }
+ }
+
+ void CheckUnordered(AstType* type1, AstType* type2) {
+ CHECK(!type1->Is(type2));
+ CHECK(!type2->Is(type1));
+ if (this->IsBitset(type1) && this->IsBitset(type2)) {
+ CHECK(this->AsBitset(type1) != this->AsBitset(type2));
+ }
+ }
+
+ void CheckOverlap(AstType* type1, AstType* type2) {
+ CHECK(type1->Maybe(type2));
+ CHECK(type2->Maybe(type1));
+ }
+
+ void CheckDisjoint(AstType* type1, AstType* type2) {
+ CHECK(!type1->Is(type2));
+ CHECK(!type2->Is(type1));
+ CHECK(!type1->Maybe(type2));
+ CHECK(!type2->Maybe(type1));
+ }
+
+ void IsSomeType() {
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* t = *it;
+ CHECK(1 ==
+ this->IsBitset(t) + t->IsClass() + t->IsConstant() + t->IsRange() +
+ this->IsUnion(t) + t->IsArray() + t->IsFunction() +
+ t->IsContext());
+ }
+ }
+
+ void Bitset() {
+ // None and Any are bitsets.
+ CHECK(this->IsBitset(T.None));
+ CHECK(this->IsBitset(T.Any));
+
+ CHECK(bitset(0) == this->AsBitset(T.None));
+ CHECK(bitset(0xfffffffeu) == this->AsBitset(T.Any));
+
+ // Union(T1, T2) is bitset for bitsets T1,T2
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* union12 = T.Union(type1, type2);
+ CHECK(!(this->IsBitset(type1) && this->IsBitset(type2)) ||
+ this->IsBitset(union12));
+ }
+ }
+
+ // Intersect(T1, T2) is bitset for bitsets T1,T2
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* intersect12 = T.Intersect(type1, type2);
+ CHECK(!(this->IsBitset(type1) && this->IsBitset(type2)) ||
+ this->IsBitset(intersect12));
+ }
+ }
+
+ // Union(T1, T2) is bitset if T2 is bitset and T1->Is(T2)
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* union12 = T.Union(type1, type2);
+ CHECK(!(this->IsBitset(type2) && type1->Is(type2)) ||
+ this->IsBitset(union12));
+ }
+ }
+
+ // Union(T1, T2) is bitwise disjunction for bitsets T1,T2
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* union12 = T.Union(type1, type2);
+ if (this->IsBitset(type1) && this->IsBitset(type2)) {
+ CHECK((this->AsBitset(type1) | this->AsBitset(type2)) ==
+ this->AsBitset(union12));
+ }
+ }
+ }
+
+ // Intersect(T1, T2) is bitwise conjunction for bitsets T1,T2 (modulo None)
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ if (this->IsBitset(type1) && this->IsBitset(type2)) {
+ AstType* intersect12 = T.Intersect(type1, type2);
+ bitset bits = this->AsBitset(type1) & this->AsBitset(type2);
+ CHECK(bits == this->AsBitset(intersect12));
+ }
+ }
+ }
+ }
+
+ void PointwiseRepresentation() {
+ // Check we can decompose type into semantics and representation and
+ // then compose it back to get an equivalent type.
+ int counter = 0;
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ counter++;
+ printf("Counter: %i\n", counter);
+ fflush(stdout);
+ AstType* type1 = *it1;
+ AstType* representation = T.Representation(type1);
+ AstType* semantic = T.Semantic(type1);
+ AstType* composed = T.Union(representation, semantic);
+ CHECK(type1->Equals(composed));
+ }
+
+ // Pointwiseness of Union.
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* representation1 = T.Representation(type1);
+ AstType* semantic1 = T.Semantic(type1);
+ AstType* representation2 = T.Representation(type2);
+ AstType* semantic2 = T.Semantic(type2);
+ AstType* direct_union = T.Union(type1, type2);
+ AstType* representation_union =
+ T.Union(representation1, representation2);
+ AstType* semantic_union = T.Union(semantic1, semantic2);
+ AstType* composed_union = T.Union(representation_union, semantic_union);
+ CHECK(direct_union->Equals(composed_union));
+ }
+ }
+
+ // Pointwiseness of Intersect.
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* representation1 = T.Representation(type1);
+ AstType* semantic1 = T.Semantic(type1);
+ AstType* representation2 = T.Representation(type2);
+ AstType* semantic2 = T.Semantic(type2);
+ AstType* direct_intersection = T.Intersect(type1, type2);
+ AstType* representation_intersection =
+ T.Intersect(representation1, representation2);
+ AstType* semantic_intersection = T.Intersect(semantic1, semantic2);
+ AstType* composed_intersection =
+ T.Union(representation_intersection, semantic_intersection);
+ CHECK(direct_intersection->Equals(composed_intersection));
+ }
+ }
+
+ // Pointwiseness of Is.
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* representation1 = T.Representation(type1);
+ AstType* semantic1 = T.Semantic(type1);
+ AstType* representation2 = T.Representation(type2);
+ AstType* semantic2 = T.Semantic(type2);
+ bool representation_is = representation1->Is(representation2);
+ bool semantic_is = semantic1->Is(semantic2);
+ bool direct_is = type1->Is(type2);
+ CHECK(direct_is == (semantic_is && representation_is));
+ }
+ }
+ }
+
+ void Class() {
+ // Constructor
+ for (MapIterator mt = T.maps.begin(); mt != T.maps.end(); ++mt) {
+ Handle<i::Map> map = *mt;
+ AstType* type = T.Class(map);
+ CHECK(type->IsClass());
+ }
+
+ // Map attribute
+ for (MapIterator mt = T.maps.begin(); mt != T.maps.end(); ++mt) {
+ Handle<i::Map> map = *mt;
+ AstType* type = T.Class(map);
+ CHECK(*map == *type->AsClass()->Map());
+ }
+
+ // Functionality & Injectivity: Class(M1) = Class(M2) iff M1 = M2
+ for (MapIterator mt1 = T.maps.begin(); mt1 != T.maps.end(); ++mt1) {
+ for (MapIterator mt2 = T.maps.begin(); mt2 != T.maps.end(); ++mt2) {
+ Handle<i::Map> map1 = *mt1;
+ Handle<i::Map> map2 = *mt2;
+ AstType* type1 = T.Class(map1);
+ AstType* type2 = T.Class(map2);
+ CHECK(Equal(type1, type2) == (*map1 == *map2));
+ }
+ }
+ }
+
+ void Constant() {
+ // Constructor
+ for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
+ Handle<i::Object> value = *vt;
+ AstType* type = T.Constant(value);
+ CHECK(type->IsConstant());
+ }
+
+ // Value attribute
+ for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
+ Handle<i::Object> value = *vt;
+ AstType* type = T.Constant(value);
+ CHECK(*value == *type->AsConstant()->Value());
+ }
+
+ // Functionality & Injectivity: Constant(V1) = Constant(V2) iff V1 = V2
+ for (ValueIterator vt1 = T.values.begin(); vt1 != T.values.end(); ++vt1) {
+ for (ValueIterator vt2 = T.values.begin(); vt2 != T.values.end(); ++vt2) {
+ Handle<i::Object> value1 = *vt1;
+ Handle<i::Object> value2 = *vt2;
+ AstType* type1 = T.Constant(value1);
+ AstType* type2 = T.Constant(value2);
+ CHECK(Equal(type1, type2) == (*value1 == *value2));
+ }
+ }
+
+ // Typing of numbers
+ Factory* fac = isolate->factory();
+ CHECK(T.Constant(fac->NewNumber(0))->Is(T.UnsignedSmall));
+ CHECK(T.Constant(fac->NewNumber(1))->Is(T.UnsignedSmall));
+ CHECK(T.Constant(fac->NewNumber(0x3fffffff))->Is(T.UnsignedSmall));
+ CHECK(T.Constant(fac->NewNumber(-1))->Is(T.Negative31));
+ CHECK(T.Constant(fac->NewNumber(-0x3fffffff))->Is(T.Negative31));
+ CHECK(T.Constant(fac->NewNumber(-0x40000000))->Is(T.Negative31));
+ CHECK(T.Constant(fac->NewNumber(0x40000000))->Is(T.Unsigned31));
+ CHECK(!T.Constant(fac->NewNumber(0x40000000))->Is(T.Unsigned30));
+ CHECK(T.Constant(fac->NewNumber(0x7fffffff))->Is(T.Unsigned31));
+ CHECK(!T.Constant(fac->NewNumber(0x7fffffff))->Is(T.Unsigned30));
+ CHECK(T.Constant(fac->NewNumber(-0x40000001))->Is(T.Negative32));
+ CHECK(!T.Constant(fac->NewNumber(-0x40000001))->Is(T.Negative31));
+ CHECK(T.Constant(fac->NewNumber(-0x7fffffff))->Is(T.Negative32));
+ CHECK(!T.Constant(fac->NewNumber(-0x7fffffff - 1))->Is(T.Negative31));
+ if (SmiValuesAre31Bits()) {
+ CHECK(!T.Constant(fac->NewNumber(0x40000000))->Is(T.UnsignedSmall));
+ CHECK(!T.Constant(fac->NewNumber(0x7fffffff))->Is(T.UnsignedSmall));
+ CHECK(!T.Constant(fac->NewNumber(-0x40000001))->Is(T.SignedSmall));
+ CHECK(!T.Constant(fac->NewNumber(-0x7fffffff - 1))->Is(T.SignedSmall));
+ } else {
+ CHECK(SmiValuesAre32Bits());
+ CHECK(T.Constant(fac->NewNumber(0x40000000))->Is(T.UnsignedSmall));
+ CHECK(T.Constant(fac->NewNumber(0x7fffffff))->Is(T.UnsignedSmall));
+ CHECK(T.Constant(fac->NewNumber(-0x40000001))->Is(T.SignedSmall));
+ CHECK(T.Constant(fac->NewNumber(-0x7fffffff - 1))->Is(T.SignedSmall));
+ }
+ CHECK(T.Constant(fac->NewNumber(0x80000000u))->Is(T.Unsigned32));
+ CHECK(!T.Constant(fac->NewNumber(0x80000000u))->Is(T.Unsigned31));
+ CHECK(T.Constant(fac->NewNumber(0xffffffffu))->Is(T.Unsigned32));
+ CHECK(!T.Constant(fac->NewNumber(0xffffffffu))->Is(T.Unsigned31));
+ CHECK(T.Constant(fac->NewNumber(0xffffffffu + 1.0))->Is(T.PlainNumber));
+ CHECK(!T.Constant(fac->NewNumber(0xffffffffu + 1.0))->Is(T.Integral32));
+ CHECK(T.Constant(fac->NewNumber(-0x7fffffff - 2.0))->Is(T.PlainNumber));
+ CHECK(!T.Constant(fac->NewNumber(-0x7fffffff - 2.0))->Is(T.Integral32));
+ CHECK(T.Constant(fac->NewNumber(0.1))->Is(T.PlainNumber));
+ CHECK(!T.Constant(fac->NewNumber(0.1))->Is(T.Integral32));
+ CHECK(T.Constant(fac->NewNumber(-10.1))->Is(T.PlainNumber));
+ CHECK(!T.Constant(fac->NewNumber(-10.1))->Is(T.Integral32));
+ CHECK(T.Constant(fac->NewNumber(10e60))->Is(T.PlainNumber));
+ CHECK(!T.Constant(fac->NewNumber(10e60))->Is(T.Integral32));
+ CHECK(T.Constant(fac->NewNumber(-1.0 * 0.0))->Is(T.MinusZero));
+ CHECK(T.Constant(fac->NewNumber(std::numeric_limits<double>::quiet_NaN()))
+ ->Is(T.NaN));
+ CHECK(T.Constant(fac->NewNumber(V8_INFINITY))->Is(T.PlainNumber));
+ CHECK(!T.Constant(fac->NewNumber(V8_INFINITY))->Is(T.Integral32));
+ CHECK(T.Constant(fac->NewNumber(-V8_INFINITY))->Is(T.PlainNumber));
+ CHECK(!T.Constant(fac->NewNumber(-V8_INFINITY))->Is(T.Integral32));
+ }
+
+ void Range() {
+ // Constructor
+ for (ValueIterator i = T.integers.begin(); i != T.integers.end(); ++i) {
+ for (ValueIterator j = T.integers.begin(); j != T.integers.end(); ++j) {
+ double min = (*i)->Number();
+ double max = (*j)->Number();
+ if (min > max) std::swap(min, max);
+ AstType* type = T.Range(min, max);
+ CHECK(type->IsRange());
+ }
+ }
+
+ // Range attributes
+ for (ValueIterator i = T.integers.begin(); i != T.integers.end(); ++i) {
+ for (ValueIterator j = T.integers.begin(); j != T.integers.end(); ++j) {
+ double min = (*i)->Number();
+ double max = (*j)->Number();
+ if (min > max) std::swap(min, max);
+ AstType* type = T.Range(min, max);
+ CHECK(min == type->AsRange()->Min());
+ CHECK(max == type->AsRange()->Max());
+ }
+ }
+
+ // Functionality & Injectivity:
+ // Range(min1, max1) = Range(min2, max2) <=> min1 = min2 /\ max1 = max2
+ for (ValueIterator i1 = T.integers.begin(); i1 != T.integers.end(); ++i1) {
+ for (ValueIterator j1 = i1; j1 != T.integers.end(); ++j1) {
+ for (ValueIterator i2 = T.integers.begin(); i2 != T.integers.end();
+ ++i2) {
+ for (ValueIterator j2 = i2; j2 != T.integers.end(); ++j2) {
+ double min1 = (*i1)->Number();
+ double max1 = (*j1)->Number();
+ double min2 = (*i2)->Number();
+ double max2 = (*j2)->Number();
+ if (min1 > max1) std::swap(min1, max1);
+ if (min2 > max2) std::swap(min2, max2);
+ AstType* type1 = T.Range(min1, max1);
+ AstType* type2 = T.Range(min2, max2);
+ CHECK(Equal(type1, type2) == (min1 == min2 && max1 == max2));
+ }
+ }
+ }
+ }
+ }
+
+ void Context() {
+ // Constructor
+ for (int i = 0; i < 20; ++i) {
+ AstType* type = T.Random();
+ AstType* context = T.Context(type);
+ CHECK(context->IsContext());
+ }
+
+ // Attributes
+ for (int i = 0; i < 20; ++i) {
+ AstType* type = T.Random();
+ AstType* context = T.Context(type);
+ CheckEqual(type, context->AsContext()->Outer());
+ }
+
+ // Functionality & Injectivity: Context(T1) = Context(T2) iff T1 = T2
+ for (int i = 0; i < 20; ++i) {
+ for (int j = 0; j < 20; ++j) {
+ AstType* type1 = T.Random();
+ AstType* type2 = T.Random();
+ AstType* context1 = T.Context(type1);
+ AstType* context2 = T.Context(type2);
+ CHECK(Equal(context1, context2) == Equal(type1, type2));
+ }
+ }
+ }
+
+ void Array() {
+ // Constructor
+ for (int i = 0; i < 20; ++i) {
+ AstType* type = T.Random();
+ AstType* array = T.Array1(type);
+ CHECK(array->IsArray());
+ }
+
+ // Attributes
+ for (int i = 0; i < 20; ++i) {
+ AstType* type = T.Random();
+ AstType* array = T.Array1(type);
+ CheckEqual(type, array->AsArray()->Element());
+ }
+
+ // Functionality & Injectivity: Array(T1) = Array(T2) iff T1 = T2
+ for (int i = 0; i < 20; ++i) {
+ for (int j = 0; j < 20; ++j) {
+ AstType* type1 = T.Random();
+ AstType* type2 = T.Random();
+ AstType* array1 = T.Array1(type1);
+ AstType* array2 = T.Array1(type2);
+ CHECK(Equal(array1, array2) == Equal(type1, type2));
+ }
+ }
+ }
+
+ void Function() {
+ // Constructors
+ for (int i = 0; i < 20; ++i) {
+ for (int j = 0; j < 20; ++j) {
+ for (int k = 0; k < 20; ++k) {
+ AstType* type1 = T.Random();
+ AstType* type2 = T.Random();
+ AstType* type3 = T.Random();
+ AstType* function0 = T.Function0(type1, type2);
+ AstType* function1 = T.Function1(type1, type2, type3);
+ AstType* function2 = T.Function2(type1, type2, type3);
+ CHECK(function0->IsFunction());
+ CHECK(function1->IsFunction());
+ CHECK(function2->IsFunction());
+ }
+ }
+ }
+
+ // Attributes
+ for (int i = 0; i < 20; ++i) {
+ for (int j = 0; j < 20; ++j) {
+ for (int k = 0; k < 20; ++k) {
+ AstType* type1 = T.Random();
+ AstType* type2 = T.Random();
+ AstType* type3 = T.Random();
+ AstType* function0 = T.Function0(type1, type2);
+ AstType* function1 = T.Function1(type1, type2, type3);
+ AstType* function2 = T.Function2(type1, type2, type3);
+ CHECK_EQ(0, function0->AsFunction()->Arity());
+ CHECK_EQ(1, function1->AsFunction()->Arity());
+ CHECK_EQ(2, function2->AsFunction()->Arity());
+ CheckEqual(type1, function0->AsFunction()->Result());
+ CheckEqual(type1, function1->AsFunction()->Result());
+ CheckEqual(type1, function2->AsFunction()->Result());
+ CheckEqual(type2, function0->AsFunction()->Receiver());
+ CheckEqual(type2, function1->AsFunction()->Receiver());
+ CheckEqual(T.Any, function2->AsFunction()->Receiver());
+ CheckEqual(type3, function1->AsFunction()->Parameter(0));
+ CheckEqual(type2, function2->AsFunction()->Parameter(0));
+ CheckEqual(type3, function2->AsFunction()->Parameter(1));
+ }
+ }
+ }
+
+ // Functionality & Injectivity: Function(Ts1) = Function(Ts2) iff Ts1 = Ts2
+ for (int i = 0; i < 20; ++i) {
+ for (int j = 0; j < 20; ++j) {
+ for (int k = 0; k < 20; ++k) {
+ AstType* type1 = T.Random();
+ AstType* type2 = T.Random();
+ AstType* type3 = T.Random();
+ AstType* function01 = T.Function0(type1, type2);
+ AstType* function02 = T.Function0(type1, type3);
+ AstType* function03 = T.Function0(type3, type2);
+ AstType* function11 = T.Function1(type1, type2, type2);
+ AstType* function12 = T.Function1(type1, type2, type3);
+ AstType* function21 = T.Function2(type1, type2, type2);
+ AstType* function22 = T.Function2(type1, type2, type3);
+ AstType* function23 = T.Function2(type1, type3, type2);
+ CHECK(Equal(function01, function02) == Equal(type2, type3));
+ CHECK(Equal(function01, function03) == Equal(type1, type3));
+ CHECK(Equal(function11, function12) == Equal(type2, type3));
+ CHECK(Equal(function21, function22) == Equal(type2, type3));
+ CHECK(Equal(function21, function23) == Equal(type2, type3));
+ }
+ }
+ }
+ }
+
+ void Of() {
+ // Constant(V)->Is(Of(V))
+ for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
+ Handle<i::Object> value = *vt;
+ AstType* const_type = T.Constant(value);
+ AstType* of_type = T.Of(value);
+ CHECK(const_type->Is(of_type));
+ }
+
+ // If Of(V)->Is(T), then Constant(V)->Is(T)
+ for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ Handle<i::Object> value = *vt;
+ AstType* type = *it;
+ AstType* const_type = T.Constant(value);
+ AstType* of_type = T.Of(value);
+ CHECK(!of_type->Is(type) || const_type->Is(type));
+ }
+ }
+
+ // If Constant(V)->Is(T), then Of(V)->Is(T) or T->Maybe(Constant(V))
+ for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ Handle<i::Object> value = *vt;
+ AstType* type = *it;
+ AstType* const_type = T.Constant(value);
+ AstType* of_type = T.Of(value);
+ CHECK(!const_type->Is(type) || of_type->Is(type) ||
+ type->Maybe(const_type));
+ }
+ }
+ }
+
+ void NowOf() {
+ // Constant(V)->NowIs(NowOf(V))
+ for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
+ Handle<i::Object> value = *vt;
+ AstType* const_type = T.Constant(value);
+ AstType* nowof_type = T.NowOf(value);
+ CHECK(const_type->NowIs(nowof_type));
+ }
+
+ // NowOf(V)->Is(Of(V))
+ for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
+ Handle<i::Object> value = *vt;
+ AstType* nowof_type = T.NowOf(value);
+ AstType* of_type = T.Of(value);
+ CHECK(nowof_type->Is(of_type));
+ }
+
+ // If NowOf(V)->NowIs(T), then Constant(V)->NowIs(T)
+ for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ Handle<i::Object> value = *vt;
+ AstType* type = *it;
+ AstType* const_type = T.Constant(value);
+ AstType* nowof_type = T.NowOf(value);
+ CHECK(!nowof_type->NowIs(type) || const_type->NowIs(type));
+ }
+ }
+
+ // If Constant(V)->NowIs(T),
+ // then NowOf(V)->NowIs(T) or T->Maybe(Constant(V))
+ for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ Handle<i::Object> value = *vt;
+ AstType* type = *it;
+ AstType* const_type = T.Constant(value);
+ AstType* nowof_type = T.NowOf(value);
+ CHECK(!const_type->NowIs(type) || nowof_type->NowIs(type) ||
+ type->Maybe(const_type));
+ }
+ }
+
+ // If Constant(V)->Is(T),
+ // then NowOf(V)->Is(T) or T->Maybe(Constant(V))
+ for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ Handle<i::Object> value = *vt;
+ AstType* type = *it;
+ AstType* const_type = T.Constant(value);
+ AstType* nowof_type = T.NowOf(value);
+ CHECK(!const_type->Is(type) || nowof_type->Is(type) ||
+ type->Maybe(const_type));
+ }
+ }
+ }
+
+ void MinMax() {
+ // If b is regular numeric bitset, then Range(b->Min(), b->Max())->Is(b).
+ // TODO(neis): Need to ignore representation for this to be true.
+ /*
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ if (this->IsBitset(type) && type->Is(T.Number) &&
+ !type->Is(T.None) && !type->Is(T.NaN)) {
+ AstType* range = T.Range(
+ isolate->factory()->NewNumber(type->Min()),
+ isolate->factory()->NewNumber(type->Max()));
+ CHECK(range->Is(type));
+ }
+ }
+ */
+
+ // If b is regular numeric bitset, then b->Min() and b->Max() are integers.
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ if (this->IsBitset(type) && type->Is(T.Number) && !type->Is(T.NaN)) {
+ CHECK(IsInteger(type->Min()) && IsInteger(type->Max()));
+ }
+ }
+
+ // If b1 and b2 are regular numeric bitsets with b1->Is(b2), then
+ // b1->Min() >= b2->Min() and b1->Max() <= b2->Max().
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ if (this->IsBitset(type1) && type1->Is(type2) && type2->Is(T.Number) &&
+ !type1->Is(T.NaN) && !type2->Is(T.NaN)) {
+ CHECK(type1->Min() >= type2->Min());
+ CHECK(type1->Max() <= type2->Max());
+ }
+ }
+ }
+
+ // Lub(Range(x,y))->Min() <= x and y <= Lub(Range(x,y))->Max()
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ if (type->IsRange()) {
+ AstType* lub = AstBitsetType::NewForTesting(AstBitsetType::Lub(type));
+ CHECK(lub->Min() <= type->Min() && type->Max() <= lub->Max());
+ }
+ }
+
+ // Rangification: If T->Is(Range(-inf,+inf)) and T is inhabited, then
+ // T->Is(Range(T->Min(), T->Max())).
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ CHECK(!type->Is(T.Integer) || !type->IsInhabited() ||
+ type->Is(T.Range(type->Min(), type->Max())));
+ }
+ }
+
+ void BitsetGlb() {
+ // Lower: (T->BitsetGlb())->Is(T)
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ AstType* glb = AstBitsetType::NewForTesting(AstBitsetType::Glb(type));
+ CHECK(glb->Is(type));
+ }
+
+ // Greatest: If T1->IsBitset() and T1->Is(T2), then T1->Is(T2->BitsetGlb())
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* glb2 = AstBitsetType::NewForTesting(AstBitsetType::Glb(type2));
+ CHECK(!this->IsBitset(type1) || !type1->Is(type2) || type1->Is(glb2));
+ }
+ }
+
+ // Monotonicity: T1->Is(T2) implies (T1->BitsetGlb())->Is(T2->BitsetGlb())
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* glb1 = AstBitsetType::NewForTesting(AstBitsetType::Glb(type1));
+ AstType* glb2 = AstBitsetType::NewForTesting(AstBitsetType::Glb(type2));
+ CHECK(!type1->Is(type2) || glb1->Is(glb2));
+ }
+ }
+ }
+
+ void BitsetLub() {
+ // Upper: T->Is(T->BitsetLub())
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ AstType* lub = AstBitsetType::NewForTesting(AstBitsetType::Lub(type));
+ CHECK(type->Is(lub));
+ }
+
+ // Least: If T2->IsBitset() and T1->Is(T2), then (T1->BitsetLub())->Is(T2)
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* lub1 = AstBitsetType::NewForTesting(AstBitsetType::Lub(type1));
+ CHECK(!this->IsBitset(type2) || !type1->Is(type2) || lub1->Is(type2));
+ }
+ }
+
+ // Monotonicity: T1->Is(T2) implies (T1->BitsetLub())->Is(T2->BitsetLub())
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* lub1 = AstBitsetType::NewForTesting(AstBitsetType::Lub(type1));
+ AstType* lub2 = AstBitsetType::NewForTesting(AstBitsetType::Lub(type2));
+ CHECK(!type1->Is(type2) || lub1->Is(lub2));
+ }
+ }
+ }
+
+ void Is1() {
+ // Least Element (Bottom): None->Is(T)
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ CHECK(T.None->Is(type));
+ }
+
+ // Greatest Element (Top): T->Is(Any)
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ CHECK(type->Is(T.Any));
+ }
+
+ // Bottom Uniqueness: T->Is(None) implies T = None
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ if (type->Is(T.None)) CheckEqual(type, T.None);
+ }
+
+ // Top Uniqueness: Any->Is(T) implies T = Any
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ if (T.Any->Is(type)) CheckEqual(type, T.Any);
+ }
+
+ // Reflexivity: T->Is(T)
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ CHECK(type->Is(type));
+ }
+
+ // Transitivity: T1->Is(T2) and T2->Is(T3) implies T1->Is(T3)
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* type3 = *it3;
+ CHECK(!(type1->Is(type2) && type2->Is(type3)) || type1->Is(type3));
+ }
+ }
+ }
+
+ // Antisymmetry: T1->Is(T2) and T2->Is(T1) iff T1 = T2
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ CHECK((type1->Is(type2) && type2->Is(type1)) == Equal(type1, type2));
+ }
+ }
+
+ // (In-)Compatibilities.
+ for (TypeIterator i = T.types.begin(); i != T.types.end(); ++i) {
+ for (TypeIterator j = T.types.begin(); j != T.types.end(); ++j) {
+ AstType* type1 = *i;
+ AstType* type2 = *j;
+ CHECK(!type1->Is(type2) || this->IsBitset(type2) ||
+ this->IsUnion(type2) || this->IsUnion(type1) ||
+ (type1->IsClass() && type2->IsClass()) ||
+ (type1->IsConstant() && type2->IsConstant()) ||
+ (type1->IsConstant() && type2->IsRange()) ||
+ (this->IsBitset(type1) && type2->IsRange()) ||
+ (type1->IsRange() && type2->IsRange()) ||
+ (type1->IsContext() && type2->IsContext()) ||
+ (type1->IsArray() && type2->IsArray()) ||
+ (type1->IsFunction() && type2->IsFunction()) ||
+ !type1->IsInhabited());
+ }
+ }
+ }
+
+ void Is2() {
+ // Class(M1)->Is(Class(M2)) iff M1 = M2
+ for (MapIterator mt1 = T.maps.begin(); mt1 != T.maps.end(); ++mt1) {
+ for (MapIterator mt2 = T.maps.begin(); mt2 != T.maps.end(); ++mt2) {
+ Handle<i::Map> map1 = *mt1;
+ Handle<i::Map> map2 = *mt2;
+ AstType* class_type1 = T.Class(map1);
+ AstType* class_type2 = T.Class(map2);
+ CHECK(class_type1->Is(class_type2) == (*map1 == *map2));
+ }
+ }
+
+ // Range(X1, Y1)->Is(Range(X2, Y2)) iff X1 >= X2 /\ Y1 <= Y2
+ for (ValueIterator i1 = T.integers.begin(); i1 != T.integers.end(); ++i1) {
+ for (ValueIterator j1 = i1; j1 != T.integers.end(); ++j1) {
+ for (ValueIterator i2 = T.integers.begin(); i2 != T.integers.end();
+ ++i2) {
+ for (ValueIterator j2 = i2; j2 != T.integers.end(); ++j2) {
+ double min1 = (*i1)->Number();
+ double max1 = (*j1)->Number();
+ double min2 = (*i2)->Number();
+ double max2 = (*j2)->Number();
+ if (min1 > max1) std::swap(min1, max1);
+ if (min2 > max2) std::swap(min2, max2);
+ AstType* type1 = T.Range(min1, max1);
+ AstType* type2 = T.Range(min2, max2);
+ CHECK(type1->Is(type2) == (min1 >= min2 && max1 <= max2));
+ }
+ }
+ }
+ }
+
+ // Constant(V1)->Is(Constant(V2)) iff V1 = V2
+ for (ValueIterator vt1 = T.values.begin(); vt1 != T.values.end(); ++vt1) {
+ for (ValueIterator vt2 = T.values.begin(); vt2 != T.values.end(); ++vt2) {
+ Handle<i::Object> value1 = *vt1;
+ Handle<i::Object> value2 = *vt2;
+ AstType* const_type1 = T.Constant(value1);
+ AstType* const_type2 = T.Constant(value2);
+ CHECK(const_type1->Is(const_type2) == (*value1 == *value2));
+ }
+ }
+
+ // Context(T1)->Is(Context(T2)) iff T1 = T2
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* outer1 = *it1;
+ AstType* outer2 = *it2;
+ AstType* type1 = T.Context(outer1);
+ AstType* type2 = T.Context(outer2);
+ CHECK(type1->Is(type2) == outer1->Equals(outer2));
+ }
+ }
+
+ // Array(T1)->Is(Array(T2)) iff T1 = T2
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* element1 = *it1;
+ AstType* element2 = *it2;
+ AstType* type1 = T.Array1(element1);
+ AstType* type2 = T.Array1(element2);
+ CHECK(type1->Is(type2) == element1->Equals(element2));
+ }
+ }
+
+ // Function0(S1, T1)->Is(Function0(S2, T2)) iff S1 = S2 and T1 = T2
+ for (TypeIterator i = T.types.begin(); i != T.types.end(); ++i) {
+ for (TypeIterator j = T.types.begin(); j != T.types.end(); ++j) {
+ AstType* result1 = *i;
+ AstType* receiver1 = *j;
+ AstType* type1 = T.Function0(result1, receiver1);
+ AstType* result2 = T.Random();
+ AstType* receiver2 = T.Random();
+ AstType* type2 = T.Function0(result2, receiver2);
+ CHECK(type1->Is(type2) ==
+ (result1->Equals(result2) && receiver1->Equals(receiver2)));
+ }
+ }
+
+ // Range-specific subtyping
+
+ // If IsInteger(v) then Constant(v)->Is(Range(v, v)).
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ if (type->IsConstant() && IsInteger(*type->AsConstant()->Value())) {
+ CHECK(type->Is(T.Range(type->AsConstant()->Value()->Number(),
+ type->AsConstant()->Value()->Number())));
+ }
+ }
+
+ // If Constant(x)->Is(Range(min,max)) then IsInteger(v) and min <= x <= max.
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ if (type1->IsConstant() && type2->IsRange() && type1->Is(type2)) {
+ double x = type1->AsConstant()->Value()->Number();
+ double min = type2->AsRange()->Min();
+ double max = type2->AsRange()->Max();
+ CHECK(IsInteger(x) && min <= x && x <= max);
+ }
+ }
+ }
+
+ // Lub(Range(x,y))->Is(T.Union(T.Integral32, T.OtherNumber))
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ if (type->IsRange()) {
+ AstType* lub = AstBitsetType::NewForTesting(AstBitsetType::Lub(type));
+ CHECK(lub->Is(T.PlainNumber));
+ }
+ }
+
+ // Subtyping between concrete basic types
+
+ CheckUnordered(T.Boolean, T.Null);
+ CheckUnordered(T.Undefined, T.Null);
+ CheckUnordered(T.Boolean, T.Undefined);
+
+ CheckSub(T.SignedSmall, T.Number);
+ CheckSub(T.Signed32, T.Number);
+ CheckSubOrEqual(T.SignedSmall, T.Signed32);
+ CheckUnordered(T.SignedSmall, T.MinusZero);
+ CheckUnordered(T.Signed32, T.Unsigned32);
+
+ CheckSub(T.UniqueName, T.Name);
+ CheckSub(T.String, T.Name);
+ CheckSub(T.InternalizedString, T.String);
+ CheckSub(T.InternalizedString, T.UniqueName);
+ CheckSub(T.InternalizedString, T.Name);
+ CheckSub(T.Symbol, T.UniqueName);
+ CheckSub(T.Symbol, T.Name);
+ CheckUnordered(T.String, T.UniqueName);
+ CheckUnordered(T.String, T.Symbol);
+ CheckUnordered(T.InternalizedString, T.Symbol);
+
+ CheckSub(T.Object, T.Receiver);
+ CheckSub(T.Proxy, T.Receiver);
+ CheckSub(T.OtherObject, T.Object);
+ CheckSub(T.OtherUndetectable, T.Object);
+ CheckSub(T.OtherObject, T.Object);
+
+ CheckUnordered(T.Object, T.Proxy);
+ CheckUnordered(T.OtherObject, T.Undetectable);
+
+ // Subtyping between concrete structural types
+
+ CheckSub(T.ObjectClass, T.Object);
+ CheckSub(T.ArrayClass, T.OtherObject);
+ CheckSub(T.UninitializedClass, T.Internal);
+ CheckUnordered(T.ObjectClass, T.ArrayClass);
+ CheckUnordered(T.UninitializedClass, T.Null);
+ CheckUnordered(T.UninitializedClass, T.Undefined);
+
+ CheckSub(T.SmiConstant, T.SignedSmall);
+ CheckSub(T.SmiConstant, T.Signed32);
+ CheckSub(T.SmiConstant, T.Number);
+ CheckSub(T.ObjectConstant1, T.Object);
+ CheckSub(T.ObjectConstant2, T.Object);
+ CheckSub(T.ArrayConstant, T.Object);
+ CheckSub(T.ArrayConstant, T.OtherObject);
+ CheckSub(T.ArrayConstant, T.Receiver);
+ CheckSub(T.UninitializedConstant, T.Internal);
+ CheckUnordered(T.ObjectConstant1, T.ObjectConstant2);
+ CheckUnordered(T.ObjectConstant1, T.ArrayConstant);
+ CheckUnordered(T.UninitializedConstant, T.Null);
+ CheckUnordered(T.UninitializedConstant, T.Undefined);
+
+ CheckUnordered(T.ObjectConstant1, T.ObjectClass);
+ CheckUnordered(T.ObjectConstant2, T.ObjectClass);
+ CheckUnordered(T.ObjectConstant1, T.ArrayClass);
+ CheckUnordered(T.ObjectConstant2, T.ArrayClass);
+ CheckUnordered(T.ArrayConstant, T.ObjectClass);
+
+ CheckSub(T.NumberArray, T.OtherObject);
+ CheckSub(T.NumberArray, T.Receiver);
+ CheckSub(T.NumberArray, T.Object);
+ CheckUnordered(T.StringArray, T.AnyArray);
+
+ CheckSub(T.MethodFunction, T.Object);
+ CheckSub(T.NumberFunction1, T.Object);
+ CheckUnordered(T.SignedFunction1, T.NumberFunction1);
+ CheckUnordered(T.NumberFunction1, T.NumberFunction2);
+ }
+
+ void NowIs() {
+ // Least Element (Bottom): None->NowIs(T)
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ CHECK(T.None->NowIs(type));
+ }
+
+ // Greatest Element (Top): T->NowIs(Any)
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ CHECK(type->NowIs(T.Any));
+ }
+
+ // Bottom Uniqueness: T->NowIs(None) implies T = None
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ if (type->NowIs(T.None)) CheckEqual(type, T.None);
+ }
+
+ // Top Uniqueness: Any->NowIs(T) implies T = Any
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ if (T.Any->NowIs(type)) CheckEqual(type, T.Any);
+ }
+
+ // Reflexivity: T->NowIs(T)
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ CHECK(type->NowIs(type));
+ }
+
+ // Transitivity: T1->NowIs(T2) and T2->NowIs(T3) implies T1->NowIs(T3)
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* type3 = *it3;
+ CHECK(!(type1->NowIs(type2) && type2->NowIs(type3)) ||
+ type1->NowIs(type3));
+ }
+ }
+ }
+
+ // Antisymmetry: T1->NowIs(T2) and T2->NowIs(T1) iff T1 = T2
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ CHECK((type1->NowIs(type2) && type2->NowIs(type1)) ==
+ Equal(type1, type2));
+ }
+ }
+
+ // T1->Is(T2) implies T1->NowIs(T2)
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ CHECK(!type1->Is(type2) || type1->NowIs(type2));
+ }
+ }
+
+ // Constant(V1)->NowIs(Constant(V2)) iff V1 = V2
+ for (ValueIterator vt1 = T.values.begin(); vt1 != T.values.end(); ++vt1) {
+ for (ValueIterator vt2 = T.values.begin(); vt2 != T.values.end(); ++vt2) {
+ Handle<i::Object> value1 = *vt1;
+ Handle<i::Object> value2 = *vt2;
+ AstType* const_type1 = T.Constant(value1);
+ AstType* const_type2 = T.Constant(value2);
+ CHECK(const_type1->NowIs(const_type2) == (*value1 == *value2));
+ }
+ }
+
+ // Class(M1)->NowIs(Class(M2)) iff M1 = M2
+ for (MapIterator mt1 = T.maps.begin(); mt1 != T.maps.end(); ++mt1) {
+ for (MapIterator mt2 = T.maps.begin(); mt2 != T.maps.end(); ++mt2) {
+ Handle<i::Map> map1 = *mt1;
+ Handle<i::Map> map2 = *mt2;
+ AstType* class_type1 = T.Class(map1);
+ AstType* class_type2 = T.Class(map2);
+ CHECK(class_type1->NowIs(class_type2) == (*map1 == *map2));
+ }
+ }
+
+ // Constant(V)->NowIs(Class(M)) iff V has map M
+ for (MapIterator mt = T.maps.begin(); mt != T.maps.end(); ++mt) {
+ for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
+ Handle<i::Map> map = *mt;
+ Handle<i::Object> value = *vt;
+ AstType* const_type = T.Constant(value);
+ AstType* class_type = T.Class(map);
+ CHECK((value->IsHeapObject() &&
+ i::HeapObject::cast(*value)->map() == *map) ==
+ const_type->NowIs(class_type));
+ }
+ }
+
+ // Class(M)->NowIs(Constant(V)) never
+ for (MapIterator mt = T.maps.begin(); mt != T.maps.end(); ++mt) {
+ for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
+ Handle<i::Map> map = *mt;
+ Handle<i::Object> value = *vt;
+ AstType* const_type = T.Constant(value);
+ AstType* class_type = T.Class(map);
+ CHECK(!class_type->NowIs(const_type));
+ }
+ }
+ }
+
+ void Contains() {
+ // T->Contains(V) iff Constant(V)->Is(T)
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
+ AstType* type = *it;
+ Handle<i::Object> value = *vt;
+ AstType* const_type = T.Constant(value);
+ CHECK(type->Contains(value) == const_type->Is(type));
+ }
+ }
+ }
+
+ void NowContains() {
+ // T->NowContains(V) iff Constant(V)->NowIs(T)
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
+ AstType* type = *it;
+ Handle<i::Object> value = *vt;
+ AstType* const_type = T.Constant(value);
+ CHECK(type->NowContains(value) == const_type->NowIs(type));
+ }
+ }
+
+ // T->Contains(V) implies T->NowContains(V)
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
+ AstType* type = *it;
+ Handle<i::Object> value = *vt;
+ CHECK(!type->Contains(value) || type->NowContains(value));
+ }
+ }
+
+ // NowOf(V)->Is(T) implies T->NowContains(V)
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
+ AstType* type = *it;
+ Handle<i::Object> value = *vt;
+ AstType* nowof_type = T.Of(value);
+ CHECK(!nowof_type->NowIs(type) || type->NowContains(value));
+ }
+ }
+ }
+
+ void Maybe() {
+ // T->Maybe(Any) iff T inhabited
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ CHECK(type->Maybe(T.Any) == type->IsInhabited());
+ }
+
+ // T->Maybe(None) never
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ CHECK(!type->Maybe(T.None));
+ }
+
+ // Reflexivity upto Inhabitation: T->Maybe(T) iff T inhabited
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ CHECK(type->Maybe(type) == type->IsInhabited());
+ }
+
+ // Symmetry: T1->Maybe(T2) iff T2->Maybe(T1)
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ CHECK(type1->Maybe(type2) == type2->Maybe(type1));
+ }
+ }
+
+ // T1->Maybe(T2) implies T1, T2 inhabited
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ CHECK(!type1->Maybe(type2) ||
+ (type1->IsInhabited() && type2->IsInhabited()));
+ }
+ }
+
+ // T1->Maybe(T2) implies Intersect(T1, T2) inhabited
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* intersect12 = T.Intersect(type1, type2);
+ CHECK(!type1->Maybe(type2) || intersect12->IsInhabited());
+ }
+ }
+
+ // T1->Is(T2) and T1 inhabited implies T1->Maybe(T2)
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ CHECK(!(type1->Is(type2) && type1->IsInhabited()) ||
+ type1->Maybe(type2));
+ }
+ }
+
+ // Constant(V1)->Maybe(Constant(V2)) iff V1 = V2
+ for (ValueIterator vt1 = T.values.begin(); vt1 != T.values.end(); ++vt1) {
+ for (ValueIterator vt2 = T.values.begin(); vt2 != T.values.end(); ++vt2) {
+ Handle<i::Object> value1 = *vt1;
+ Handle<i::Object> value2 = *vt2;
+ AstType* const_type1 = T.Constant(value1);
+ AstType* const_type2 = T.Constant(value2);
+ CHECK(const_type1->Maybe(const_type2) == (*value1 == *value2));
+ }
+ }
+
+ // Class(M1)->Maybe(Class(M2)) iff M1 = M2
+ for (MapIterator mt1 = T.maps.begin(); mt1 != T.maps.end(); ++mt1) {
+ for (MapIterator mt2 = T.maps.begin(); mt2 != T.maps.end(); ++mt2) {
+ Handle<i::Map> map1 = *mt1;
+ Handle<i::Map> map2 = *mt2;
+ AstType* class_type1 = T.Class(map1);
+ AstType* class_type2 = T.Class(map2);
+ CHECK(class_type1->Maybe(class_type2) == (*map1 == *map2));
+ }
+ }
+
+ // Constant(V)->Maybe(Class(M)) never
+ // This does NOT hold!
+ /*
+ for (MapIterator mt = T.maps.begin(); mt != T.maps.end(); ++mt) {
+ for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
+ Handle<i::Map> map = *mt;
+ Handle<i::Object> value = *vt;
+ AstType* const_type = T.Constant(value);
+ AstType* class_type = T.Class(map);
+ CHECK(!const_type->Maybe(class_type));
+ }
+ }
+ */
+
+ // Class(M)->Maybe(Constant(V)) never
+ // This does NOT hold!
+ /*
+ for (MapIterator mt = T.maps.begin(); mt != T.maps.end(); ++mt) {
+ for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
+ Handle<i::Map> map = *mt;
+ Handle<i::Object> value = *vt;
+ AstType* const_type = T.Constant(value);
+ AstType* class_type = T.Class(map);
+ CHECK(!class_type->Maybe(const_type));
+ }
+ }
+ */
+
+ // Basic types
+ CheckDisjoint(T.Boolean, T.Null);
+ CheckDisjoint(T.Undefined, T.Null);
+ CheckDisjoint(T.Boolean, T.Undefined);
+ CheckOverlap(T.SignedSmall, T.Number);
+ CheckOverlap(T.NaN, T.Number);
+ CheckDisjoint(T.Signed32, T.NaN);
+ CheckOverlap(T.UniqueName, T.Name);
+ CheckOverlap(T.String, T.Name);
+ CheckOverlap(T.InternalizedString, T.String);
+ CheckOverlap(T.InternalizedString, T.UniqueName);
+ CheckOverlap(T.InternalizedString, T.Name);
+ CheckOverlap(T.Symbol, T.UniqueName);
+ CheckOverlap(T.Symbol, T.Name);
+ CheckOverlap(T.String, T.UniqueName);
+ CheckDisjoint(T.String, T.Symbol);
+ CheckDisjoint(T.InternalizedString, T.Symbol);
+ CheckOverlap(T.Object, T.Receiver);
+ CheckOverlap(T.OtherObject, T.Object);
+ CheckOverlap(T.Proxy, T.Receiver);
+ CheckDisjoint(T.Object, T.Proxy);
+
+ // Structural types
+ CheckOverlap(T.ObjectClass, T.Object);
+ CheckOverlap(T.ArrayClass, T.Object);
+ CheckOverlap(T.ObjectClass, T.ObjectClass);
+ CheckOverlap(T.ArrayClass, T.ArrayClass);
+ CheckDisjoint(T.ObjectClass, T.ArrayClass);
+ CheckOverlap(T.SmiConstant, T.SignedSmall);
+ CheckOverlap(T.SmiConstant, T.Signed32);
+ CheckOverlap(T.SmiConstant, T.Number);
+ CheckOverlap(T.ObjectConstant1, T.Object);
+ CheckOverlap(T.ObjectConstant2, T.Object);
+ CheckOverlap(T.ArrayConstant, T.Object);
+ CheckOverlap(T.ArrayConstant, T.Receiver);
+ CheckOverlap(T.ObjectConstant1, T.ObjectConstant1);
+ CheckDisjoint(T.ObjectConstant1, T.ObjectConstant2);
+ CheckDisjoint(T.ObjectConstant1, T.ArrayConstant);
+ CheckOverlap(T.ObjectConstant1, T.ArrayClass);
+ CheckOverlap(T.ObjectConstant2, T.ArrayClass);
+ CheckOverlap(T.ArrayConstant, T.ObjectClass);
+ CheckOverlap(T.NumberArray, T.Receiver);
+ CheckDisjoint(T.NumberArray, T.AnyArray);
+ CheckDisjoint(T.NumberArray, T.StringArray);
+ CheckOverlap(T.MethodFunction, T.Object);
+ CheckDisjoint(T.SignedFunction1, T.NumberFunction1);
+ CheckDisjoint(T.SignedFunction1, T.NumberFunction2);
+ CheckDisjoint(T.NumberFunction1, T.NumberFunction2);
+ CheckDisjoint(T.SignedFunction1, T.MethodFunction);
+ CheckOverlap(T.ObjectConstant1, T.ObjectClass); // !!!
+ CheckOverlap(T.ObjectConstant2, T.ObjectClass); // !!!
+ CheckOverlap(T.NumberClass, T.Intersect(T.Number, T.Tagged)); // !!!
+ }
+
+ void Union1() {
+ // Identity: Union(T, None) = T
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ AstType* union_type = T.Union(type, T.None);
+ CheckEqual(union_type, type);
+ }
+
+ // Domination: Union(T, Any) = Any
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ AstType* union_type = T.Union(type, T.Any);
+ CheckEqual(union_type, T.Any);
+ }
+
+ // Idempotence: Union(T, T) = T
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ AstType* union_type = T.Union(type, type);
+ CheckEqual(union_type, type);
+ }
+
+ // Commutativity: Union(T1, T2) = Union(T2, T1)
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* union12 = T.Union(type1, type2);
+ AstType* union21 = T.Union(type2, type1);
+ CheckEqual(union12, union21);
+ }
+ }
+
+ // Associativity: Union(T1, Union(T2, T3)) = Union(Union(T1, T2), T3)
+ // This does NOT hold! For example:
+ // (Unsigned32 \/ Range(0,5)) \/ Range(-5,0) = Unsigned32 \/ Range(-5,0)
+ // Unsigned32 \/ (Range(0,5) \/ Range(-5,0)) = Unsigned32 \/ Range(-5,5)
+ /*
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* type3 = *it3;
+ AstType* union12 = T.Union(type1, type2);
+ AstType* union23 = T.Union(type2, type3);
+ AstType* union1_23 = T.Union(type1, union23);
+ AstType* union12_3 = T.Union(union12, type3);
+ CheckEqual(union1_23, union12_3);
+ }
+ }
+ }
+ */
+
+ // Meet: T1->Is(Union(T1, T2)) and T2->Is(Union(T1, T2))
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* union12 = T.Union(type1, type2);
+ CHECK(type1->Is(union12));
+ CHECK(type2->Is(union12));
+ }
+ }
+
+ // Upper Boundedness: T1->Is(T2) implies Union(T1, T2) = T2
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* union12 = T.Union(type1, type2);
+ if (type1->Is(type2)) CheckEqual(union12, type2);
+ }
+ }
+
+ // Monotonicity: T1->Is(T2) implies Union(T1, T3)->Is(Union(T2, T3))
+ // This does NOT hold. For example:
+ // Range(-5,-1) <= Signed32
+ // Range(-5,-1) \/ Range(1,5) = Range(-5,5) </= Signed32 \/ Range(1,5)
+ /*
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* type3 = *it3;
+ AstType* union13 = T.Union(type1, type3);
+ AstType* union23 = T.Union(type2, type3);
+ CHECK(!type1->Is(type2) || union13->Is(union23));
+ }
+ }
+ }
+ */
+ }
+
+ void Union2() {
+ // Monotonicity: T1->Is(T3) and T2->Is(T3) implies Union(T1, T2)->Is(T3)
+ // This does NOT hold. For example:
+ // Range(-2^33, -2^33) <= OtherNumber
+ // Range(2^33, 2^33) <= OtherNumber
+ // Range(-2^33, 2^33) </= OtherNumber
+ /*
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* type3 = *it3;
+ AstType* union12 = T.Union(type1, type2);
+ CHECK(!(type1->Is(type3) && type2->Is(type3)) || union12->Is(type3));
+ }
+ }
+ }
+ */
+ }
+
+ void Union3() {
+ // Monotonicity: T1->Is(T2) or T1->Is(T3) implies T1->Is(Union(T2, T3))
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ HandleScope scope(isolate);
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ for (TypeIterator it3 = it2; it3 != T.types.end(); ++it3) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* type3 = *it3;
+ AstType* union23 = T.Union(type2, type3);
+ CHECK(!(type1->Is(type2) || type1->Is(type3)) || type1->Is(union23));
+ }
+ }
+ }
+ }
+
+ void Union4() {
+ // Class-class
+ CheckSub(T.Union(T.ObjectClass, T.ArrayClass), T.Object);
+ CheckOverlap(T.Union(T.ObjectClass, T.ArrayClass), T.OtherObject);
+ CheckOverlap(T.Union(T.ObjectClass, T.ArrayClass), T.Receiver);
+ CheckDisjoint(T.Union(T.ObjectClass, T.ArrayClass), T.Number);
+
+ // Constant-constant
+ CheckSub(T.Union(T.ObjectConstant1, T.ObjectConstant2), T.Object);
+ CheckOverlap(T.Union(T.ObjectConstant1, T.ArrayConstant), T.OtherObject);
+ CheckUnordered(T.Union(T.ObjectConstant1, T.ObjectConstant2),
+ T.ObjectClass);
+ CheckOverlap(T.Union(T.ObjectConstant1, T.ArrayConstant), T.OtherObject);
+ CheckDisjoint(T.Union(T.ObjectConstant1, T.ArrayConstant), T.Number);
+ CheckOverlap(T.Union(T.ObjectConstant1, T.ArrayConstant),
+ T.ObjectClass); // !!!
+
+ // Bitset-array
+ CHECK(this->IsBitset(T.Union(T.AnyArray, T.Receiver)));
+ CHECK(this->IsUnion(T.Union(T.NumberArray, T.Number)));
+
+ CheckEqual(T.Union(T.AnyArray, T.Receiver), T.Receiver);
+ CheckEqual(T.Union(T.AnyArray, T.OtherObject), T.OtherObject);
+ CheckUnordered(T.Union(T.AnyArray, T.String), T.Receiver);
+ CheckOverlap(T.Union(T.NumberArray, T.String), T.Object);
+ CheckDisjoint(T.Union(T.NumberArray, T.String), T.Number);
+
+ // Bitset-function
+ CHECK(this->IsBitset(T.Union(T.MethodFunction, T.Object)));
+ CHECK(this->IsUnion(T.Union(T.NumberFunction1, T.Number)));
+
+ CheckEqual(T.Union(T.MethodFunction, T.Object), T.Object);
+ CheckUnordered(T.Union(T.NumberFunction1, T.String), T.Object);
+ CheckOverlap(T.Union(T.NumberFunction2, T.String), T.Object);
+ CheckDisjoint(T.Union(T.NumberFunction1, T.String), T.Number);
+
+ // Bitset-class
+ CheckSub(T.Union(T.ObjectClass, T.SignedSmall),
+ T.Union(T.Object, T.Number));
+ CheckSub(T.Union(T.ObjectClass, T.OtherObject), T.Object);
+ CheckUnordered(T.Union(T.ObjectClass, T.String), T.OtherObject);
+ CheckOverlap(T.Union(T.ObjectClass, T.String), T.Object);
+ CheckDisjoint(T.Union(T.ObjectClass, T.String), T.Number);
+
+ // Bitset-constant
+ CheckSub(T.Union(T.ObjectConstant1, T.Signed32),
+ T.Union(T.Object, T.Number));
+ CheckSub(T.Union(T.ObjectConstant1, T.OtherObject), T.Object);
+ CheckUnordered(T.Union(T.ObjectConstant1, T.String), T.OtherObject);
+ CheckOverlap(T.Union(T.ObjectConstant1, T.String), T.Object);
+ CheckDisjoint(T.Union(T.ObjectConstant1, T.String), T.Number);
+
+ // Class-constant
+ CheckSub(T.Union(T.ObjectConstant1, T.ArrayClass), T.Object);
+ CheckUnordered(T.ObjectClass, T.Union(T.ObjectConstant1, T.ArrayClass));
+ CheckSub(T.Union(T.ObjectConstant1, T.ArrayClass),
+ T.Union(T.Receiver, T.Object));
+ CheckUnordered(T.Union(T.ObjectConstant1, T.ArrayClass), T.ArrayConstant);
+ CheckOverlap(T.Union(T.ObjectConstant1, T.ArrayClass), T.ObjectConstant2);
+ CheckOverlap(T.Union(T.ObjectConstant1, T.ArrayClass),
+ T.ObjectClass); // !!!
+
+ // Bitset-union
+ CheckSub(T.NaN,
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Number));
+ CheckSub(T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Signed32),
+ T.Union(T.ObjectConstant1, T.Union(T.Number, T.ArrayClass)));
+
+ // Class-union
+ CheckSub(T.Union(T.ObjectClass, T.Union(T.ObjectConstant1, T.ObjectClass)),
+ T.Object);
+ CheckEqual(T.Union(T.Union(T.ArrayClass, T.ObjectConstant2), T.ArrayClass),
+ T.Union(T.ArrayClass, T.ObjectConstant2));
+
+ // Constant-union
+ CheckEqual(T.Union(T.ObjectConstant1,
+ T.Union(T.ObjectConstant1, T.ObjectConstant2)),
+ T.Union(T.ObjectConstant2, T.ObjectConstant1));
+ CheckEqual(
+ T.Union(T.Union(T.ArrayConstant, T.ObjectConstant2), T.ObjectConstant1),
+ T.Union(T.ObjectConstant2,
+ T.Union(T.ArrayConstant, T.ObjectConstant1)));
+
+ // Array-union
+ CheckEqual(T.Union(T.AnyArray, T.Union(T.NumberArray, T.AnyArray)),
+ T.Union(T.AnyArray, T.NumberArray));
+ CheckSub(T.Union(T.AnyArray, T.NumberArray), T.OtherObject);
+
+ // Function-union
+ CheckEqual(T.Union(T.NumberFunction1, T.NumberFunction2),
+ T.Union(T.NumberFunction2, T.NumberFunction1));
+ CheckSub(T.Union(T.SignedFunction1, T.MethodFunction), T.Object);
+
+ // Union-union
+ CheckEqual(T.Union(T.Union(T.ObjectConstant2, T.ObjectConstant1),
+ T.Union(T.ObjectConstant1, T.ObjectConstant2)),
+ T.Union(T.ObjectConstant2, T.ObjectConstant1));
+ CheckEqual(T.Union(T.Union(T.Number, T.ArrayClass),
+ T.Union(T.SignedSmall, T.Receiver)),
+ T.Union(T.Number, T.Receiver));
+ }
+
+ void Intersect() {
+ // Identity: Intersect(T, Any) = T
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ AstType* intersect_type = T.Intersect(type, T.Any);
+ CheckEqual(intersect_type, type);
+ }
+
+ // Domination: Intersect(T, None) = None
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ AstType* intersect_type = T.Intersect(type, T.None);
+ CheckEqual(intersect_type, T.None);
+ }
+
+ // Idempotence: Intersect(T, T) = T
+ for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+ AstType* type = *it;
+ AstType* intersect_type = T.Intersect(type, type);
+ CheckEqual(intersect_type, type);
+ }
+
+ // Commutativity: Intersect(T1, T2) = Intersect(T2, T1)
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* intersect12 = T.Intersect(type1, type2);
+ AstType* intersect21 = T.Intersect(type2, type1);
+ CheckEqual(intersect12, intersect21);
+ }
+ }
+
+ // Associativity:
+ // Intersect(T1, Intersect(T2, T3)) = Intersect(Intersect(T1, T2), T3)
+ // This does NOT hold. For example:
+ // (Class(..stringy1..) /\ Class(..stringy2..)) /\ Constant(..string..) =
+ // None
+ // Class(..stringy1..) /\ (Class(..stringy2..) /\ Constant(..string..)) =
+ // Constant(..string..)
+ /*
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* type3 = *it3;
+ AstType* intersect12 = T.Intersect(type1, type2);
+ AstType* intersect23 = T.Intersect(type2, type3);
+ AstType* intersect1_23 = T.Intersect(type1, intersect23);
+ AstType* intersect12_3 = T.Intersect(intersect12, type3);
+ CheckEqual(intersect1_23, intersect12_3);
+ }
+ }
+ }
+ */
+
+ // Join: Intersect(T1, T2)->Is(T1) and Intersect(T1, T2)->Is(T2)
+ // This does NOT hold. For example:
+ // Class(..stringy..) /\ Constant(..string..) = Constant(..string..)
+ // Currently, not even the disjunction holds:
+ // Class(Internal/TaggedPtr) /\ (Any/Untagged \/ Context(..)) =
+ // Class(Internal/TaggedPtr) \/ Context(..)
+ /*
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* intersect12 = T.Intersect(type1, type2);
+ CHECK(intersect12->Is(type1));
+ CHECK(intersect12->Is(type2));
+ }
+ }
+ */
+
+ // Lower Boundedness: T1->Is(T2) implies Intersect(T1, T2) = T1
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* intersect12 = T.Intersect(type1, type2);
+ if (type1->Is(type2)) CheckEqual(intersect12, type1);
+ }
+ }
+
+ // Monotonicity: T1->Is(T2) implies Intersect(T1, T3)->Is(Intersect(T2, T3))
+ // This does NOT hold. For example:
+ // Class(OtherObject/TaggedPtr) <= Any/TaggedPtr
+ // Class(OtherObject/TaggedPtr) /\ Any/UntaggedInt1 = Class(..)
+ // Any/TaggedPtr /\ Any/UntaggedInt1 = None
+ /*
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* type3 = *it3;
+ AstType* intersect13 = T.Intersect(type1, type3);
+ AstType* intersect23 = T.Intersect(type2, type3);
+ CHECK(!type1->Is(type2) || intersect13->Is(intersect23));
+ }
+ }
+ }
+ */
+
+ // Monotonicity: T1->Is(T3) or T2->Is(T3) implies Intersect(T1, T2)->Is(T3)
+ // This does NOT hold. For example:
+ // Class(..stringy..) <= Class(..stringy..)
+ // Class(..stringy..) /\ Constant(..string..) = Constant(..string..)
+ // Constant(..string..) </= Class(..stringy..)
+ /*
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* type3 = *it3;
+ AstType* intersect12 = T.Intersect(type1, type2);
+ CHECK(!(type1->Is(type3) || type2->Is(type3)) ||
+ intersect12->Is(type3));
+ }
+ }
+ }
+ */
+
+ // Monotonicity: T1->Is(T2) and T1->Is(T3) implies T1->Is(Intersect(T2, T3))
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ HandleScope scope(isolate);
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* type3 = *it3;
+ AstType* intersect23 = T.Intersect(type2, type3);
+ CHECK(!(type1->Is(type2) && type1->Is(type3)) ||
+ type1->Is(intersect23));
+ }
+ }
+ }
+
+ // Bitset-class
+ CheckEqual(T.Intersect(T.ObjectClass, T.Object), T.ObjectClass);
+ CheckEqual(T.Semantic(T.Intersect(T.ObjectClass, T.Number)), T.None);
+
+ // Bitset-array
+ CheckEqual(T.Intersect(T.NumberArray, T.Object), T.NumberArray);
+ CheckEqual(T.Semantic(T.Intersect(T.AnyArray, T.Proxy)), T.None);
+
+ // Bitset-function
+ CheckEqual(T.Intersect(T.MethodFunction, T.Object), T.MethodFunction);
+ CheckEqual(T.Semantic(T.Intersect(T.NumberFunction1, T.Proxy)), T.None);
+
+ // Bitset-union
+ CheckEqual(T.Intersect(T.Object, T.Union(T.ObjectConstant1, T.ObjectClass)),
+ T.Union(T.ObjectConstant1, T.ObjectClass));
+ CheckEqual(T.Semantic(T.Intersect(T.Union(T.ArrayClass, T.ObjectConstant1),
+ T.Number)),
+ T.None);
+
+ // Class-constant
+ CHECK(T.Intersect(T.ObjectConstant1, T.ObjectClass)->IsInhabited()); // !!!
+ CHECK(T.Intersect(T.ArrayClass, T.ObjectConstant2)->IsInhabited());
+
+ // Array-union
+ CheckEqual(T.Intersect(T.NumberArray, T.Union(T.NumberArray, T.ArrayClass)),
+ T.NumberArray);
+ CheckEqual(T.Intersect(T.AnyArray, T.Union(T.Object, T.SmiConstant)),
+ T.AnyArray);
+ CHECK(!T.Intersect(T.Union(T.AnyArray, T.ArrayConstant), T.NumberArray)
+ ->IsInhabited());
+
+ // Function-union
+ CheckEqual(
+ T.Intersect(T.MethodFunction, T.Union(T.String, T.MethodFunction)),
+ T.MethodFunction);
+ CheckEqual(T.Intersect(T.NumberFunction1, T.Union(T.Object, T.SmiConstant)),
+ T.NumberFunction1);
+ CHECK(!T.Intersect(T.Union(T.MethodFunction, T.Name), T.NumberFunction2)
+ ->IsInhabited());
+
+ // Class-union
+ CheckEqual(
+ T.Intersect(T.ArrayClass, T.Union(T.ObjectConstant2, T.ArrayClass)),
+ T.ArrayClass);
+ CheckEqual(T.Intersect(T.ArrayClass, T.Union(T.Object, T.SmiConstant)),
+ T.ArrayClass);
+ CHECK(T.Intersect(T.Union(T.ObjectClass, T.ArrayConstant), T.ArrayClass)
+ ->IsInhabited()); // !!!
+
+ // Constant-union
+ CheckEqual(T.Intersect(T.ObjectConstant1,
+ T.Union(T.ObjectConstant1, T.ObjectConstant2)),
+ T.ObjectConstant1);
+ CheckEqual(T.Intersect(T.SmiConstant, T.Union(T.Number, T.ObjectConstant2)),
+ T.SmiConstant);
+ CHECK(
+ T.Intersect(T.Union(T.ArrayConstant, T.ObjectClass), T.ObjectConstant1)
+ ->IsInhabited()); // !!!
+
+ // Union-union
+ CheckEqual(T.Intersect(T.Union(T.Number, T.ArrayClass),
+ T.Union(T.SignedSmall, T.Receiver)),
+ T.Union(T.SignedSmall, T.ArrayClass));
+ CheckEqual(T.Intersect(T.Union(T.Number, T.ObjectClass),
+ T.Union(T.Signed32, T.OtherObject)),
+ T.Union(T.Signed32, T.ObjectClass));
+ CheckEqual(T.Intersect(T.Union(T.ObjectConstant2, T.ObjectConstant1),
+ T.Union(T.ObjectConstant1, T.ObjectConstant2)),
+ T.Union(T.ObjectConstant2, T.ObjectConstant1));
+ CheckEqual(
+ T.Intersect(T.Union(T.ArrayClass,
+ T.Union(T.ObjectConstant2, T.ObjectConstant1)),
+ T.Union(T.ObjectConstant1,
+ T.Union(T.ArrayConstant, T.ObjectConstant2))),
+ T.Union(T.ArrayConstant,
+ T.Union(T.ObjectConstant2, T.ObjectConstant1))); // !!!
+ }
+
+ void Distributivity() {
+ // Union(T1, Intersect(T2, T3)) = Intersect(Union(T1, T2), Union(T1, T3))
+ // This does NOT hold. For example:
+ // Untagged \/ (Untagged /\ Class(../Tagged)) = Untagged \/ Class(../Tagged)
+ // (Untagged \/ Untagged) /\ (Untagged \/ Class(../Tagged)) =
+ // Untagged /\ (Untagged \/ Class(../Tagged)) = Untagged
+ // because Untagged <= Untagged \/ Class(../Tagged)
+ /*
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* type3 = *it3;
+ AstType* union12 = T.Union(type1, type2);
+ AstType* union13 = T.Union(type1, type3);
+ AstType* intersect23 = T.Intersect(type2, type3);
+ AstType* union1_23 = T.Union(type1, intersect23);
+ AstType* intersect12_13 = T.Intersect(union12, union13);
+ CHECK(Equal(union1_23, intersect12_13));
+ }
+ }
+ }
+ */
+
+ // Intersect(T1, Union(T2, T3)) = Union(Intersect(T1, T2), Intersect(T1,T3))
+ // This does NOT hold. For example:
+ // Untagged /\ (Untagged \/ Class(../Tagged)) = Untagged
+ // (Untagged /\ Untagged) \/ (Untagged /\ Class(../Tagged)) =
+ // Untagged \/ Class(../Tagged)
+ /*
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ AstType* type3 = *it3;
+ AstType* intersect12 = T.Intersect(type1, type2);
+ AstType* intersect13 = T.Intersect(type1, type3);
+ AstType* union23 = T.Union(type2, type3);
+ AstType* intersect1_23 = T.Intersect(type1, union23);
+ AstType* union12_13 = T.Union(intersect12, intersect13);
+ CHECK(Equal(intersect1_23, union12_13));
+ }
+ }
+ }
+ */
+ }
+
+ void GetRange() {
+ // GetRange(Range(a, b)) = Range(a, b).
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ AstType* type1 = *it1;
+ if (type1->IsRange()) {
+ AstRangeType* range = type1->GetRange()->AsRange();
+ CHECK(type1->Min() == range->Min());
+ CHECK(type1->Max() == range->Max());
+ }
+ }
+
+ // GetRange(Union(Constant(x), Range(min,max))) == Range(min, max).
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ if (type1->IsConstant() && type2->IsRange()) {
+ AstType* u = T.Union(type1, type2);
+
+ CHECK(type2->Min() == u->GetRange()->Min());
+ CHECK(type2->Max() == u->GetRange()->Max());
+ }
+ }
+ }
+ }
+
+ void HTypeFromType() {
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ AstType* type1 = *it1;
+ AstType* type2 = *it2;
+ HType htype1 = HType::FromType(type1);
+ HType htype2 = HType::FromType(type2);
+ CHECK(!type1->Is(type2) || htype1.IsSubtypeOf(htype2));
+ }
+ }
+ }
+};
+
+} // namespace
+
+TEST(AstIsSomeType_zone) { Tests().IsSomeType(); }
+
+TEST(AstPointwiseRepresentation_zone) { Tests().PointwiseRepresentation(); }
+
+TEST(AstBitsetType_zone) { Tests().Bitset(); }
+
+TEST(AstClassType_zone) { Tests().Class(); }
+
+TEST(AstConstantType_zone) { Tests().Constant(); }
+
+TEST(AstRangeType_zone) { Tests().Range(); }
+
+TEST(AstArrayType_zone) { Tests().Array(); }
+
+TEST(AstFunctionType_zone) { Tests().Function(); }
+
+TEST(AstOf_zone) { Tests().Of(); }
+
+TEST(AstNowOf_zone) { Tests().NowOf(); }
+
+TEST(AstMinMax_zone) { Tests().MinMax(); }
+
+TEST(AstBitsetGlb_zone) { Tests().BitsetGlb(); }
+
+TEST(AstBitsetLub_zone) { Tests().BitsetLub(); }
+
+TEST(AstIs1_zone) { Tests().Is1(); }
+
+TEST(AstIs2_zone) { Tests().Is2(); }
+
+TEST(AstNowIs_zone) { Tests().NowIs(); }
+
+TEST(AstContains_zone) { Tests().Contains(); }
+
+TEST(AstNowContains_zone) { Tests().NowContains(); }
+
+TEST(AstMaybe_zone) { Tests().Maybe(); }
+
+TEST(AstUnion1_zone) { Tests().Union1(); }
+
+TEST(AstUnion2_zone) { Tests().Union2(); }
+
+TEST(AstUnion3_zone) { Tests().Union3(); }
+
+TEST(AstUnion4_zone) { Tests().Union4(); }
+
+TEST(AstIntersect_zone) { Tests().Intersect(); }
+
+TEST(AstDistributivity_zone) { Tests().Distributivity(); }
+
+TEST(AstGetRange_zone) { Tests().GetRange(); }
+
+TEST(AstHTypeFromType_zone) { Tests().HTypeFromType(); }
diff --git a/deps/v8/test/cctest/test-ast.cc b/deps/v8/test/cctest/test-ast.cc
index c2cc89828e..5512bfce96 100644
--- a/deps/v8/test/cctest/test-ast.cc
+++ b/deps/v8/test/cctest/test-ast.cc
@@ -30,6 +30,7 @@
#include "src/v8.h"
#include "src/ast/ast.h"
+#include "src/zone/accounting-allocator.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
@@ -38,7 +39,7 @@ TEST(List) {
List<AstNode*>* list = new List<AstNode*>(0);
CHECK_EQ(0, list->length());
- v8::base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
AstValueFactory value_factory(&zone, 0);
AstNodeFactory factory(&value_factory);
@@ -58,3 +59,43 @@ TEST(List) {
CHECK_EQ(0, list->length());
delete list;
}
+
+TEST(ConcatStrings) {
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator);
+ AstValueFactory value_factory(&zone, 0);
+
+ const AstRawString* one_byte = value_factory.GetOneByteString("a");
+
+ uint16_t two_byte_buffer[] = {
+ 0x3b1,
+ };
+ const AstRawString* two_byte = value_factory.GetTwoByteString(
+ Vector<const uint16_t>(two_byte_buffer, 1));
+
+ const AstRawString* expectation = value_factory.GetOneByteString("aa");
+ const AstRawString* result = value_factory.ConcatStrings(one_byte, one_byte);
+ CHECK(result->is_one_byte());
+ CHECK_EQ(expectation, result);
+
+ uint16_t expectation_buffer_one_two[] = {'a', 0x3b1};
+ expectation = value_factory.GetTwoByteString(
+ Vector<const uint16_t>(expectation_buffer_one_two, 2));
+ result = value_factory.ConcatStrings(one_byte, two_byte);
+ CHECK(!result->is_one_byte());
+ CHECK_EQ(expectation, result);
+
+ uint16_t expectation_buffer_two_one[] = {0x3b1, 'a'};
+ expectation = value_factory.GetTwoByteString(
+ Vector<const uint16_t>(expectation_buffer_two_one, 2));
+ result = value_factory.ConcatStrings(two_byte, one_byte);
+ CHECK(!result->is_one_byte());
+ CHECK_EQ(expectation, result);
+
+ uint16_t expectation_buffer_two_two[] = {0x3b1, 0x3b1};
+ expectation = value_factory.GetTwoByteString(
+ Vector<const uint16_t>(expectation_buffer_two_two, 2));
+ result = value_factory.ConcatStrings(two_byte, two_byte);
+ CHECK(!result->is_one_byte());
+ CHECK_EQ(expectation, result);
+}
diff --git a/deps/v8/test/cctest/test-bit-vector.cc b/deps/v8/test/cctest/test-bit-vector.cc
index 6b9fbc7ee2..99c5a68d45 100644
--- a/deps/v8/test/cctest/test-bit-vector.cc
+++ b/deps/v8/test/cctest/test-bit-vector.cc
@@ -35,7 +35,7 @@
using namespace v8::internal;
TEST(BitVector) {
- v8::base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
{
BitVector v(15, &zone);
diff --git a/deps/v8/test/cctest/test-code-cache.cc b/deps/v8/test/cctest/test-code-cache.cc
index 817fa15b34..b0f020d960 100644
--- a/deps/v8/test/cctest/test-code-cache.cc
+++ b/deps/v8/test/cctest/test-code-cache.cc
@@ -4,8 +4,17 @@
#include "src/v8.h"
+#include "src/factory.h"
+#include "src/isolate.h"
#include "src/list.h"
#include "src/objects.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/factory.h -> src/objects-inl.h
+#include "src/objects-inl.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/type-feedback-vector.h ->
+// src/type-feedback-vector-inl.h
+#include "src/type-feedback-vector-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/test-code-layout.cc b/deps/v8/test/cctest/test-code-layout.cc
index a88c8783e6..7e80dd6417 100644
--- a/deps/v8/test/cctest/test-code-layout.cc
+++ b/deps/v8/test/cctest/test-code-layout.cc
@@ -2,7 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/factory.h"
+#include "src/isolate.h"
#include "src/objects.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/factory.h -> src/objects-inl.h
+#include "src/objects-inl.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/type-feedback-vector.h ->
+// src/type-feedback-vector-inl.h
+#include "src/type-feedback-vector-inl.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index 2d153e3822..588f430131 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -3,6 +3,9 @@
// found in the LICENSE file.
#include "src/base/utils/random-number-generator.h"
+#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
+#include "src/compiler/node.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
#include "test/cctest/compiler/code-assembler-tester.h"
@@ -132,7 +135,7 @@ TEST(TryToName) {
Label passed(&m), failed(&m);
Label if_keyisindex(&m), if_keyisunique(&m), if_bailout(&m);
- Variable var_index(&m, MachineRepresentation::kWord32);
+ Variable var_index(&m, MachineType::PointerRepresentation());
m.TryToName(key, &if_keyisindex, &var_index, &if_keyisunique, &if_bailout);
@@ -140,8 +143,8 @@ TEST(TryToName) {
m.GotoUnless(
m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kKeyIsIndex))),
&failed);
- m.Branch(m.Word32Equal(m.SmiToWord32(expected_arg), var_index.value()),
- &passed, &failed);
+ m.Branch(m.WordEqual(m.SmiUntag(expected_arg), var_index.value()), &passed,
+ &failed);
m.Bind(&if_keyisunique);
m.GotoUnless(
@@ -181,9 +184,17 @@ TEST(TryToName) {
}
{
- // TryToName(<negative smi>) => bailout.
+ // TryToName(<negative smi>) => if_keyisindex: smi value.
+ // A subsequent bounds check needs to take care of this case.
Handle<Object> key(Smi::FromInt(-1), isolate);
- ft.CheckTrue(key, expect_bailout);
+ ft.CheckTrue(key, expect_index, key);
+ }
+
+ {
+ // TryToName(<heap number with int value>) => if_keyisindex: number.
+ Handle<Object> key(isolate->factory()->NewHeapNumber(153));
+ Handle<Object> index(Smi::FromInt(153), isolate);
+ ft.CheckTrue(key, expect_index, index);
}
{
@@ -206,6 +217,31 @@ TEST(TryToName) {
}
{
+ // TryToName(<internalized uncacheable number string>) => bailout
+ Handle<Object> key =
+ isolate->factory()->InternalizeUtf8String("4294967294");
+ ft.CheckTrue(key, expect_bailout);
+ }
+
+ {
+ // TryToName(<non-internalized number string>) => if_keyisindex: number.
+ Handle<String> key = isolate->factory()->NewStringFromAsciiChecked("153");
+ uint32_t dummy;
+ CHECK(key->AsArrayIndex(&dummy));
+ CHECK(key->HasHashCode());
+ CHECK(!key->IsInternalizedString());
+ Handle<Object> index(Smi::FromInt(153), isolate);
+ ft.CheckTrue(key, expect_index, index);
+ }
+
+ {
+ // TryToName(<number string without cached index>) => bailout.
+ Handle<String> key = isolate->factory()->NewStringFromAsciiChecked("153");
+ CHECK(!key->HasHashCode());
+ ft.CheckTrue(key, expect_bailout);
+ }
+
+ {
// TryToName(<non-internalized string>) => bailout.
Handle<Object> key = isolate->factory()->NewStringFromAsciiChecked("test");
ft.CheckTrue(key, expect_bailout);
@@ -232,7 +268,7 @@ void TestNameDictionaryLookup() {
Label passed(&m), failed(&m);
Label if_found(&m), if_not_found(&m);
- Variable var_name_index(&m, MachineRepresentation::kWord32);
+ Variable var_name_index(&m, MachineType::PointerRepresentation());
m.NameDictionaryLookup<Dictionary>(dictionary, unique_name, &if_found,
&var_name_index, &if_not_found);
@@ -338,7 +374,7 @@ void TestNumberDictionaryLookup() {
Label passed(&m), failed(&m);
Label if_found(&m), if_not_found(&m);
- Variable var_entry(&m, MachineRepresentation::kWord32);
+ Variable var_entry(&m, MachineType::PointerRepresentation());
m.NumberDictionaryLookup<Dictionary>(dictionary, key, &if_found, &var_entry,
&if_not_found);
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index aef10f1f7a..bce3fb2394 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -30,10 +30,11 @@
#include "src/v8.h"
+#include "src/api.h"
#include "src/compiler.h"
#include "src/disasm.h"
+#include "src/factory.h"
#include "src/interpreter/interpreter.h"
-#include "src/parsing/parser.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
@@ -841,3 +842,26 @@ TEST(IgnitionEntryTrampolineSelfHealing) {
CHECK_NE(*isolate->builtins()->InterpreterEntryTrampoline(), f2->code());
CHECK_EQ(23.0, GetGlobalProperty("result2")->Number());
}
+
+TEST(InvocationCount) {
+ FLAG_allow_natives_syntax = true;
+ FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ CompileRun(
+ "function bar() {};"
+ "function foo() { return bar(); };"
+ "foo();");
+ Handle<JSFunction> foo = Handle<JSFunction>::cast(GetGlobalProperty("foo"));
+ CHECK_EQ(1, foo->feedback_vector()->invocation_count());
+ CompileRun("foo()");
+ CHECK_EQ(2, foo->feedback_vector()->invocation_count());
+ CompileRun("bar()");
+ CHECK_EQ(2, foo->feedback_vector()->invocation_count());
+ CompileRun("foo(); foo()");
+ CHECK_EQ(4, foo->feedback_vector()->invocation_count());
+ CompileRun("%BaselineFunctionOnNextCall(foo);");
+ CompileRun("foo();");
+ CHECK_EQ(5, foo->feedback_vector()->invocation_count());
+}
diff --git a/deps/v8/test/cctest/test-conversions.cc b/deps/v8/test/cctest/test-conversions.cc
index 9569dc8678..87dc99c3f0 100644
--- a/deps/v8/test/cctest/test-conversions.cc
+++ b/deps/v8/test/cctest/test-conversions.cc
@@ -27,9 +27,20 @@
#include <stdlib.h>
-#include "src/v8.h"
-
#include "src/base/platform/platform.h"
+#include "src/conversions.h"
+#include "src/factory.h"
+#include "src/isolate.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/factory.h -> src/objects-inl.h
+#include "src/objects-inl.h"
+#include "src/objects.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/type-feedback-vector.h ->
+// src/type-feedback-vector-inl.h
+#include "src/type-feedback-vector-inl.h"
+#include "src/unicode-cache.h"
+#include "src/v8.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 83c4e33c45..2f92f54d37 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -1023,6 +1023,7 @@ TEST(BoundFunctionCall) {
// This tests checks distribution of the samples through the source lines.
static void TickLines(bool optimize) {
+ if (!optimize) i::FLAG_crankshaft = false;
CcTest::InitializeVM();
LocalContext env;
i::FLAG_allow_natives_syntax = true;
@@ -1032,10 +1033,15 @@ static void TickLines(bool optimize) {
i::HandleScope scope(isolate);
i::EmbeddedVector<char, 512> script;
+ i::EmbeddedVector<char, 64> optimize_call;
const char* func_name = "func";
- const char* opt_func =
- optimize ? "%OptimizeFunctionOnNextCall" : "%NeverOptimizeFunction";
+ if (optimize) {
+ i::SNPrintF(optimize_call, "%%OptimizeFunctionOnNextCall(%s);\n",
+ func_name);
+ } else {
+ optimize_call[0] = '\0';
+ }
i::SNPrintF(script,
"function %s() {\n"
" var n = 0;\n"
@@ -1045,10 +1051,10 @@ static void TickLines(bool optimize) {
" n += m * m * m;\n"
" }\n"
"}\n"
- "%s();"
- "%s(%s);\n"
+ "%s();\n"
+ "%s"
"%s();\n",
- func_name, func_name, opt_func, func_name, func_name);
+ func_name, func_name, optimize_call.start(), func_name);
CompileRun(script.start());
@@ -1164,7 +1170,7 @@ TEST(FunctionCallSample) {
// Collect garbage that might have be generated while installing
// extensions.
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CompileRun(call_function_test_source);
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
@@ -1671,9 +1677,11 @@ static void CheckFunctionDetails(v8::Isolate* isolate,
int script_id, int line, int column) {
v8::Local<v8::Context> context = isolate->GetCurrentContext();
CHECK(v8_str(name)->Equals(context, node->GetFunctionName()).FromJust());
+ CHECK_EQ(0, strcmp(name, node->GetFunctionNameStr()));
CHECK(v8_str(script_name)
->Equals(context, node->GetScriptResourceName())
.FromJust());
+ CHECK_EQ(0, strcmp(script_name, node->GetScriptResourceNameStr()));
CHECK_EQ(script_id, node->GetScriptId());
CHECK_EQ(line, node->GetLineNumber());
CHECK_EQ(column, node->GetColumnNumber());
diff --git a/deps/v8/test/cctest/test-date.cc b/deps/v8/test/cctest/test-date.cc
index a745949fbd..aa9f9f7ca1 100644
--- a/deps/v8/test/cctest/test-date.cc
+++ b/deps/v8/test/cctest/test-date.cc
@@ -25,9 +25,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/v8.h"
-
+#include "src/date.h"
#include "src/global-handles.h"
+#include "src/isolate.h"
+#include "src/v8.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 925eaf4c27..9d63e7b3a6 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -371,8 +371,8 @@ void CheckDebuggerUnloaded(bool check_functions) {
CHECK(!CcTest::i_isolate()->debug()->debug_info_list_);
// Collect garbage to ensure weak handles are cleared.
- CcTest::heap()->CollectAllGarbage();
- CcTest::heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(Heap::kMakeHeapIterableMask);
// Iterate the head and check that there are no debugger related objects left.
HeapIterator iterator(CcTest::heap());
@@ -800,10 +800,10 @@ static void DebugEventBreakPointCollectGarbage(
break_point_hit_count++;
if (break_point_hit_count % 2 == 0) {
// Scavenge.
- CcTest::heap()->CollectGarbage(v8::internal::NEW_SPACE);
+ CcTest::CollectGarbage(v8::internal::NEW_SPACE);
} else {
// Mark sweep compact.
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
}
}
@@ -824,7 +824,7 @@ static void DebugEventBreak(
// Run the garbage collector to enforce heap verification if option
// --verify-heap is set.
- CcTest::heap()->CollectGarbage(v8::internal::NEW_SPACE);
+ CcTest::CollectGarbage(v8::internal::NEW_SPACE);
// Set the break flag again to come back here as soon as possible.
v8::Debug::DebugBreak(CcTest::isolate());
@@ -1217,12 +1217,12 @@ static void CallAndGC(v8::Local<v8::Context> context,
CHECK_EQ(1 + i * 3, break_point_hit_count);
// Scavenge and call function.
- CcTest::heap()->CollectGarbage(v8::internal::NEW_SPACE);
+ CcTest::CollectGarbage(v8::internal::NEW_SPACE);
f->Call(context, recv, 0, NULL).ToLocalChecked();
CHECK_EQ(2 + i * 3, break_point_hit_count);
// Mark sweep (and perhaps compact) and call function.
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
f->Call(context, recv, 0, NULL).ToLocalChecked();
CHECK_EQ(3 + i * 3, break_point_hit_count);
}
@@ -2080,7 +2080,7 @@ TEST(ScriptBreakPointLineTopLevel) {
->Get(context, v8_str(env->GetIsolate(), "f"))
.ToLocalChecked());
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 3, -1);
diff --git a/deps/v8/test/cctest/test-decls.cc b/deps/v8/test/cctest/test-decls.cc
index e4506aee50..d7d71c2383 100644
--- a/deps/v8/test/cctest/test-decls.cc
+++ b/deps/v8/test/cctest/test-decls.cc
@@ -29,6 +29,7 @@
#include "src/v8.h"
+#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "test/cctest/cctest.h"
@@ -144,7 +145,7 @@ void DeclarationContext::Check(const char* source, int get, int set, int query,
InitializeIfNeeded();
// A retry after a GC may pollute the counts, so perform gc now
// to avoid that.
- CcTest::heap()->CollectGarbage(v8::internal::NEW_SPACE);
+ CcTest::CollectGarbage(v8::internal::NEW_SPACE);
HandleScope scope(CcTest::isolate());
TryCatch catcher(CcTest::isolate());
catcher.SetVerbose(true);
@@ -175,7 +176,7 @@ void DeclarationContext::Check(const char* source, int get, int set, int query,
}
}
// Clean slate for the next test.
- CcTest::heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
}
@@ -248,9 +249,7 @@ TEST(Unknown) {
{ DeclarationContext context;
context.Check("function x() { }; x",
1, // access
- 0,
- 0,
- EXPECT_RESULT);
+ 1, 1, EXPECT_RESULT);
}
}
@@ -284,9 +283,7 @@ TEST(Absent) {
{ AbsentPropertyContext context;
context.Check("function x() { }; x",
1, // access
- 0,
- 0,
- EXPECT_RESULT);
+ 1, 1, EXPECT_RESULT);
}
{ AbsentPropertyContext context;
@@ -354,9 +351,7 @@ TEST(Appearing) {
{ AppearingPropertyContext context;
context.Check("function x() { }; x",
1, // access
- 0,
- 0,
- EXPECT_RESULT);
+ 1, 1, EXPECT_RESULT);
}
}
@@ -485,11 +480,7 @@ TEST(ExistsInHiddenPrototype) {
}
{ ExistsInHiddenPrototypeContext context;
- context.Check("function x() { }; x",
- 0,
- 0,
- 0,
- EXPECT_RESULT);
+ context.Check("function x() { }; x", 0, 1, 1, EXPECT_RESULT);
}
}
diff --git a/deps/v8/test/cctest/test-deoptimization.cc b/deps/v8/test/cctest/test-deoptimization.cc
index 7ba16b59f8..0d435c5e10 100644
--- a/deps/v8/test/cctest/test-deoptimization.cc
+++ b/deps/v8/test/cctest/test-deoptimization.cc
@@ -109,7 +109,8 @@ class AllowNativesSyntaxNoInlining {
// Abort any ongoing incremental marking to make sure that all weak global
// handle callbacks are processed.
static void NonIncrementalGC(i::Isolate* isolate) {
- isolate->heap()->CollectAllGarbage();
+ isolate->heap()->CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask,
+ i::GarbageCollectionReason::kTesting);
}
diff --git a/deps/v8/test/cctest/test-dictionary.cc b/deps/v8/test/cctest/test-dictionary.cc
index 0756de6c1d..c1184fa2b4 100644
--- a/deps/v8/test/cctest/test-dictionary.cc
+++ b/deps/v8/test/cctest/test-dictionary.cc
@@ -56,7 +56,7 @@ static void TestHashMap(Handle<HashMap> table) {
CHECK_EQ(table->Lookup(b), CcTest::heap()->the_hole_value());
// Keys still have to be valid after objects were moved.
- CcTest::heap()->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
CHECK_EQ(table->NumberOfElements(), 1);
CHECK_EQ(table->Lookup(a), *b);
CHECK_EQ(table->Lookup(b), CcTest::heap()->the_hole_value());
@@ -126,7 +126,7 @@ static void TestHashSet(Handle<HashSet> table) {
CHECK(!table->Has(isolate, b));
// Keys still have to be valid after objects were moved.
- CcTest::heap()->CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
CHECK_EQ(table->NumberOfElements(), 1);
CHECK(table->Has(isolate, a));
CHECK(!table->Has(isolate, b));
diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc
index d5f594962c..1c6d360d7f 100644
--- a/deps/v8/test/cctest/test-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-disasm-arm.cc
@@ -505,8 +505,8 @@ TEST(msr_mrs_disasm) {
TEST(Vfp) {
SET_UP();
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatureScope scope(&assm, VFP3);
+ if (CpuFeatures::IsSupported(VFPv3)) {
+ CpuFeatureScope scope(&assm, VFPv3);
COMPARE(vmov(d0, r2, r3),
"ec432b10 vmov d0, r2, r3");
COMPARE(vmov(r2, r3, d0),
@@ -737,6 +737,7 @@ TEST(Vfp) {
"eeba9bcf vcvt.f64.s32 d9, d9, #2");
if (CpuFeatures::IsSupported(VFP32DREGS)) {
+ CpuFeatureScope scope(&assm, VFP32DREGS);
COMPARE(vmov(d3, d27),
"eeb03b6b vmov.f64 d3, d27");
COMPARE(vmov(d18, d7),
@@ -840,6 +841,7 @@ TEST(ARMv8_vrintX_disasm) {
SET_UP();
if (CpuFeatures::IsSupported(ARMv8)) {
+ CpuFeatureScope scope(&assm, ARMv8);
COMPARE(vrinta(d0, d0), "feb80b40 vrinta.f64.f64 d0, d0");
COMPARE(vrinta(d2, d3), "feb82b43 vrinta.f64.f64 d2, d3");
@@ -860,10 +862,26 @@ TEST(ARMv8_vrintX_disasm) {
}
+TEST(ARMv8_vminmax_disasm) {
+ SET_UP();
+
+ if (CpuFeatures::IsSupported(ARMv8)) {
+ CpuFeatureScope scope(&assm, ARMv8);
+ COMPARE(vmaxnm(d0, d1, d2), "fe810b02 vmaxnm.f64 d0, d1, d2");
+ COMPARE(vminnm(d3, d4, d5), "fe843b45 vminnm.f64 d3, d4, d5");
+ COMPARE(vmaxnm(s6, s7, s8), "fe833a84 vmaxnm.f32 s6, s7, s8");
+ COMPARE(vminnm(s9, s10, s11), "fec54a65 vminnm.f32 s9, s10, s11");
+ }
+
+ VERIFY_RUN();
+}
+
+
TEST(ARMv8_vselX_disasm) {
SET_UP();
if (CpuFeatures::IsSupported(ARMv8)) {
+ CpuFeatureScope scope(&assm, ARMv8);
// Native instructions.
COMPARE(vsel(eq, d0, d1, d2),
"fe010b02 vseleq.f64 d0, d1, d2");
@@ -918,6 +936,10 @@ TEST(Neon) {
"f3886a11 vmovl.u8 q3, d1");
COMPARE(vmovl(NeonU8, q4, d2),
"f3888a12 vmovl.u8 q4, d2");
+ COMPARE(vswp(d0, d31),
+ "f3b2002f vswp d0, d31");
+ COMPARE(vswp(d16, d14),
+ "f3f2000e vswp d16, d14");
}
VERIFY_RUN();
@@ -1165,10 +1187,33 @@ TEST(Barrier) {
COMPARE(dsb(ISH),
"f57ff04b dsb ish");
- COMPARE(isb(ISH),
- "f57ff06b isb ish");
+ COMPARE(isb(SY),
+ "f57ff06f isb sy");
+ } else {
+ // ARMv6 uses CP15 to implement barriers. The BarrierOption argument is
+ // ignored.
+ COMPARE(dmb(ISH),
+ "ee070fba mcr (CP15DMB)");
+ COMPARE(dsb(OSH),
+ "ee070f9a mcr (CP15DSB)");
+ COMPARE(isb(SY),
+ "ee070f95 mcr (CP15ISB)");
}
+ // ARMv6 barriers.
+ // Details available in ARM DDI 0406C.b, B3-1750.
+ COMPARE(mcr(p15, 0, r0, cr7, cr10, 5), "ee070fba mcr (CP15DMB)");
+ COMPARE(mcr(p15, 0, r0, cr7, cr10, 4), "ee070f9a mcr (CP15DSB)");
+ COMPARE(mcr(p15, 0, r0, cr7, cr5, 4), "ee070f95 mcr (CP15ISB)");
+ // Rt is ignored.
+ COMPARE(mcr(p15, 0, lr, cr7, cr10, 5), "ee07efba mcr (CP15DMB)");
+ COMPARE(mcr(p15, 0, lr, cr7, cr10, 4), "ee07ef9a mcr (CP15DSB)");
+ COMPARE(mcr(p15, 0, lr, cr7, cr5, 4), "ee07ef95 mcr (CP15ISB)");
+ // The mcr instruction can be conditional.
+ COMPARE(mcr(p15, 0, r0, cr7, cr10, 5, eq), "0e070fba mcreq (CP15DMB)");
+ COMPARE(mcr(p15, 0, r0, cr7, cr10, 4, ne), "1e070f9a mcrne (CP15DSB)");
+ COMPARE(mcr(p15, 0, r0, cr7, cr5, 4, mi), "4e070f95 mcrmi (CP15ISB)");
+
VERIFY_RUN();
}
diff --git a/deps/v8/test/cctest/test-disasm-mips.cc b/deps/v8/test/cctest/test-disasm-mips.cc
index b4f81ec3b1..010657d468 100644
--- a/deps/v8/test/cctest/test-disasm-mips.cc
+++ b/deps/v8/test/cctest/test-disasm-mips.cc
@@ -1090,3 +1090,20 @@ TEST(ctc1_cfc1_disasm) {
COMPARE(cfc1(a0, FCSR), "4444f800 cfc1 a0, FCSR");
VERIFY_RUN();
}
+
+TEST(madd_msub_maddf_msubf) {
+ SET_UP();
+ if (IsMipsArchVariant(kMips32r2)) {
+ COMPARE(madd_s(f4, f6, f8, f10), "4cca4120 madd.s f4, f6, f8, f10");
+ COMPARE(madd_d(f4, f6, f8, f10), "4cca4121 madd.d f4, f6, f8, f10");
+ COMPARE(msub_s(f4, f6, f8, f10), "4cca4128 msub.s f4, f6, f8, f10");
+ COMPARE(msub_d(f4, f6, f8, f10), "4cca4129 msub.d f4, f6, f8, f10");
+ }
+ if (IsMipsArchVariant(kMips32r6)) {
+ COMPARE(maddf_s(f4, f8, f10), "460a4118 maddf.s f4, f8, f10");
+ COMPARE(maddf_d(f4, f8, f10), "462a4118 maddf.d f4, f8, f10");
+ COMPARE(msubf_s(f4, f8, f10), "460a4119 msubf.s f4, f8, f10");
+ COMPARE(msubf_d(f4, f8, f10), "462a4119 msubf.d f4, f8, f10");
+ }
+ VERIFY_RUN();
+}
diff --git a/deps/v8/test/cctest/test-disasm-mips64.cc b/deps/v8/test/cctest/test-disasm-mips64.cc
index dc6f34e462..fbc370daa1 100644
--- a/deps/v8/test/cctest/test-disasm-mips64.cc
+++ b/deps/v8/test/cctest/test-disasm-mips64.cc
@@ -699,6 +699,10 @@ TEST(Type0) {
COMPARE(dsbh(s6, s7), "7c17b0a4 dsbh s6, s7");
COMPARE(dsbh(v0, v1), "7c0310a4 dsbh v0, v1");
+ COMPARE(dins_(a0, a1, 31, 1), "7ca4ffc7 dins a0, a1, 31, 1");
+ COMPARE(dins_(s6, s7, 30, 2), "7ef6ff87 dins s6, s7, 30, 2");
+ COMPARE(dins_(v0, v1, 0, 32), "7c62f807 dins v0, v1, 0, 32");
+
COMPARE(dshd(a0, a1), "7c052164 dshd a0, a1");
COMPARE(dshd(s6, s7), "7c17b164 dshd s6, s7");
COMPARE(dshd(v0, v1), "7c031164 dshd v0, v1");
@@ -1272,3 +1276,20 @@ TEST(ctc1_cfc1_disasm) {
COMPARE(cfc1(a0, FCSR), "4444f800 cfc1 a0, FCSR");
VERIFY_RUN();
}
+
+TEST(madd_msub_maddf_msubf) {
+ SET_UP();
+ if (kArchVariant == kMips64r2) {
+ COMPARE(madd_s(f4, f6, f8, f10), "4cca4120 madd.s f4, f6, f8, f10");
+ COMPARE(madd_d(f4, f6, f8, f10), "4cca4121 madd.d f4, f6, f8, f10");
+ COMPARE(msub_s(f4, f6, f8, f10), "4cca4128 msub.s f4, f6, f8, f10");
+ COMPARE(msub_d(f4, f6, f8, f10), "4cca4129 msub.d f4, f6, f8, f10");
+ }
+ if (kArchVariant == kMips64r6) {
+ COMPARE(maddf_s(f4, f8, f10), "460a4118 maddf.s f4, f8, f10");
+ COMPARE(maddf_d(f4, f8, f10), "462a4118 maddf.d f4, f8, f10");
+ COMPARE(msubf_s(f4, f8, f10), "460a4119 msubf.s f4, f8, f10");
+ COMPARE(msubf_d(f4, f8, f10), "462a4119 msubf.d f4, f8, f10");
+ }
+ VERIFY_RUN();
+}
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index 284ca859be..af8beaaa83 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -50,7 +50,7 @@ TEST(DisasmX64) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- v8::internal::byte buffer[4096];
+ v8::internal::byte buffer[8192];
Assembler assm(isolate, buffer, sizeof buffer);
DummyStaticFunction(NULL); // just bloody use it (DELETE; debugging)
@@ -420,7 +420,8 @@ TEST(DisasmX64) {
__ ucomiss(xmm0, xmm1);
__ ucomiss(xmm0, Operand(rbx, rcx, times_4, 10000));
}
- // SSE 2 instructions
+
+ // SSE2 instructions
{
__ cvttsd2si(rdx, Operand(rbx, rcx, times_4, 10000));
__ cvttsd2si(rdx, xmm1);
@@ -467,6 +468,13 @@ TEST(DisasmX64) {
__ punpckldq(xmm1, xmm11);
__ punpckldq(xmm5, Operand(rdx, 4));
__ punpckhdq(xmm8, xmm15);
+
+#define EMIT_SSE2_INSTR(instruction, notUsed1, notUsed2, notUsed3) \
+ __ instruction(xmm5, xmm1); \
+ __ instruction(xmm5, Operand(rdx, 4));
+
+ SSE2_INSTRUCTION_LIST(EMIT_SSE2_INSTR)
+#undef EMIT_SSE2_INSTR
}
// cmov.
@@ -490,6 +498,24 @@ TEST(DisasmX64) {
}
{
+ if (CpuFeatures::IsSupported(SSE3)) {
+ CpuFeatureScope scope(&assm, SSE3);
+ __ lddqu(xmm1, Operand(rdx, 4));
+ }
+ }
+
+#define EMIT_SSE34_INSTR(instruction, notUsed1, notUsed2, notUsed3, notUsed4) \
+ __ instruction(xmm5, xmm1); \
+ __ instruction(xmm5, Operand(rdx, 4));
+
+ {
+ if (CpuFeatures::IsSupported(SSSE3)) {
+ CpuFeatureScope scope(&assm, SSSE3);
+ SSSE3_INSTRUCTION_LIST(EMIT_SSE34_INSTR)
+ }
+ }
+
+ {
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope scope(&assm, SSE4_1);
__ insertps(xmm5, xmm1, 123);
@@ -539,12 +565,10 @@ TEST(DisasmX64) {
__ movups(xmm5, xmm1);
__ movups(xmm5, Operand(rdx, 4));
__ movups(Operand(rdx, 4), xmm5);
- __ paddd(xmm5, xmm1);
- __ paddd(xmm5, Operand(rdx, 4));
- __ psubd(xmm5, xmm1);
- __ psubd(xmm5, Operand(rdx, 4));
__ pmulld(xmm5, xmm1);
__ pmulld(xmm5, Operand(rdx, 4));
+ __ pmullw(xmm5, xmm1);
+ __ pmullw(xmm5, Operand(rdx, 4));
__ pmuludq(xmm5, xmm1);
__ pmuludq(xmm5, Operand(rdx, 4));
__ psrldq(xmm5, 123);
@@ -553,8 +577,11 @@ TEST(DisasmX64) {
__ cvtps2dq(xmm5, Operand(rdx, 4));
__ cvtdq2ps(xmm5, xmm1);
__ cvtdq2ps(xmm5, Operand(rdx, 4));
+
+ SSE4_INSTRUCTION_LIST(EMIT_SSE34_INSTR)
}
}
+#undef EMIT_SSE34_INSTR
// AVX instruction
{
@@ -678,6 +705,41 @@ TEST(DisasmX64) {
__ vcmpnltpd(xmm5, xmm4, Operand(rbx, rcx, times_4, 10000));
__ vcmpnlepd(xmm5, xmm4, xmm1);
__ vcmpnlepd(xmm5, xmm4, Operand(rbx, rcx, times_4, 10000));
+
+#define EMIT_SSE2_AVXINSTR(instruction, notUsed1, notUsed2, notUsed3) \
+ __ v##instruction(xmm10, xmm5, xmm1); \
+ __ v##instruction(xmm10, xmm5, Operand(rdx, 4));
+
+#define EMIT_SSE34_AVXINSTR(instruction, notUsed1, notUsed2, notUsed3, \
+ notUsed4) \
+ __ v##instruction(xmm10, xmm5, xmm1); \
+ __ v##instruction(xmm10, xmm5, Operand(rdx, 4));
+
+ SSE2_INSTRUCTION_LIST(EMIT_SSE2_AVXINSTR)
+ SSSE3_INSTRUCTION_LIST(EMIT_SSE34_AVXINSTR)
+ SSE4_INSTRUCTION_LIST(EMIT_SSE34_AVXINSTR)
+#undef EMIT_SSE2_AVXINSTR
+#undef EMIT_SSE34_AVXINSTR
+
+ __ vlddqu(xmm1, Operand(rbx, rcx, times_4, 10000));
+ __ vpsllw(xmm0, xmm15, 21);
+ __ vpsrlw(xmm0, xmm15, 21);
+ __ vpsraw(xmm0, xmm15, 21);
+ __ vpsrad(xmm0, xmm15, 21);
+ __ vpextrb(rax, xmm2, 12);
+ __ vpextrb(Operand(rbx, rcx, times_4, 10000), xmm2, 12);
+ __ vpextrw(rax, xmm2, 5);
+ __ vpextrw(Operand(rbx, rcx, times_4, 10000), xmm2, 5);
+ __ vpextrd(rax, xmm2, 2);
+ __ vpextrd(Operand(rbx, rcx, times_4, 10000), xmm2, 2);
+
+ __ vpinsrb(xmm1, xmm2, rax, 12);
+ __ vpinsrb(xmm1, xmm2, Operand(rbx, rcx, times_4, 10000), 12);
+ __ vpinsrw(xmm1, xmm2, rax, 5);
+ __ vpinsrw(xmm1, xmm2, Operand(rbx, rcx, times_4, 10000), 5);
+ __ vpinsrd(xmm1, xmm2, rax, 2);
+ __ vpinsrd(xmm1, xmm2, Operand(rbx, rcx, times_4, 10000), 2);
+ __ vpshufd(xmm1, xmm2, 85);
}
}
diff --git a/deps/v8/test/cctest/test-feedback-vector.cc b/deps/v8/test/cctest/test-feedback-vector.cc
index 4322e746e2..af9c6feef0 100644
--- a/deps/v8/test/cctest/test-feedback-vector.cc
+++ b/deps/v8/test/cctest/test-feedback-vector.cc
@@ -41,8 +41,8 @@ TEST(VectorStructure) {
// Empty vectors are the empty fixed array.
StaticFeedbackVectorSpec empty;
Handle<TypeFeedbackVector> vector = NewTypeFeedbackVector(isolate, &empty);
- CHECK(Handle<FixedArray>::cast(vector)
- .is_identical_to(factory->empty_fixed_array()));
+ CHECK(Handle<FixedArray>::cast(vector).is_identical_to(
+ factory->empty_type_feedback_vector()));
// Which can nonetheless be queried.
CHECK(vector->is_empty());
@@ -199,8 +199,6 @@ TEST(VectorCallICStates) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
-
// Make sure function f has a call that uses a type feedback slot.
CompileRun(
"function foo() { return 17; }"
@@ -219,7 +217,7 @@ TEST(VectorCallICStates) {
CHECK_EQ(GENERIC, nexus.StateFromFeedback());
// After a collection, state should remain GENERIC.
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(GENERIC, nexus.StateFromFeedback());
}
@@ -229,8 +227,6 @@ TEST(VectorCallFeedbackForArray) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
-
// Make sure function f has a call that uses a type feedback slot.
CompileRun(
"function foo() { return 17; }"
@@ -246,7 +242,7 @@ TEST(VectorCallFeedbackForArray) {
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
CHECK(nexus.GetFeedback()->IsAllocationSite());
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// It should stay monomorphic even after a GC.
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
}
@@ -273,6 +269,11 @@ TEST(VectorCallCounts) {
CompileRun("f(foo); f(foo);");
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
CHECK_EQ(3, nexus.ExtractCallCount());
+
+ // Send the IC megamorphic, but we should still have incrementing counts.
+ CompileRun("f(function() { return 12; });");
+ CHECK_EQ(GENERIC, nexus.StateFromFeedback());
+ CHECK_EQ(4, nexus.ExtractCallCount());
}
TEST(VectorConstructCounts) {
@@ -289,13 +290,21 @@ TEST(VectorConstructCounts) {
Handle<JSFunction> f = GetFunction("f");
Handle<TypeFeedbackVector> feedback_vector =
Handle<TypeFeedbackVector>(f->feedback_vector(), isolate);
+
FeedbackVectorSlot slot(0);
+ CallICNexus nexus(feedback_vector, slot);
+ CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
+
CHECK(feedback_vector->Get(slot)->IsWeakCell());
CompileRun("f(Foo); f(Foo);");
- FeedbackVectorSlot cslot(1);
- CHECK(feedback_vector->Get(cslot)->IsSmi());
- CHECK_EQ(3, Smi::cast(feedback_vector->Get(cslot))->value());
+ CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
+ CHECK_EQ(3, nexus.ExtractCallCount());
+
+ // Send the IC megamorphic, but we should still have incrementing counts.
+ CompileRun("f(function() {});");
+ CHECK_EQ(GENERIC, nexus.StateFromFeedback());
+ CHECK_EQ(4, nexus.ExtractCallCount());
}
TEST(VectorLoadICStates) {
@@ -304,7 +313,6 @@ TEST(VectorLoadICStates) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
// Make sure function f has a call that uses a type feedback slot.
CompileRun(
@@ -348,7 +356,7 @@ TEST(VectorLoadICStates) {
CHECK(!nexus.FindFirstMap());
// After a collection, state should not be reset to PREMONOMORPHIC.
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
CHECK_EQ(MEGAMORPHIC, nexus.StateFromFeedback());
}
@@ -551,8 +559,7 @@ TEST(ReferenceContextAllocatesNoSlots) {
CHECK_SLOT_KIND(helper, 3, FeedbackVectorSlotKind::STORE_IC);
CHECK_SLOT_KIND(helper, 4, FeedbackVectorSlotKind::LOAD_IC);
CHECK_SLOT_KIND(helper, 5, FeedbackVectorSlotKind::LOAD_IC);
- // Binary operation feedback is a general slot.
- CHECK_SLOT_KIND(helper, 6, FeedbackVectorSlotKind::GENERAL);
+ CHECK_SLOT_KIND(helper, 6, FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC);
}
}
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index d2f44ce1be..771d3f116d 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -10,14 +10,14 @@
#include "src/v8.h"
#include "src/compilation-cache.h"
-#include "src/compiler.h"
+#include "src/compilation-dependencies.h"
+#include "src/compilation-info.h"
#include "src/execution.h"
#include "src/factory.h"
#include "src/field-type.h"
#include "src/global-handles.h"
#include "src/ic/stub-cache.h"
#include "src/macro-assembler.h"
-#include "src/types.h"
using namespace v8::internal;
@@ -604,10 +604,10 @@ static void TestGeneralizeRepresentation(
// Create new maps by generalizing representation of propX field.
Handle<Map> field_owner(map->FindFieldOwner(property_index), isolate);
- CompilationInfo info(ArrayVector("testing"), isolate, &zone);
- CHECK(!info.dependencies()->HasAborted());
+ CompilationDependencies dependencies(isolate, &zone);
+ CHECK(!dependencies.HasAborted());
- info.dependencies()->AssumeFieldType(field_owner);
+ dependencies.AssumeFieldType(field_owner);
Handle<Map> new_map =
Map::ReconfigureProperty(map, property_index, kData, NONE,
@@ -624,21 +624,21 @@ static void TestGeneralizeRepresentation(
CHECK(map->is_deprecated());
CHECK_NE(*map, *new_map);
CHECK_EQ(expected_field_type_dependency && !field_owner->is_deprecated(),
- info.dependencies()->HasAborted());
+ dependencies.HasAborted());
} else if (expected_deprecation) {
CHECK(!map->is_stable());
CHECK(map->is_deprecated());
CHECK(field_owner->is_deprecated());
CHECK_NE(*map, *new_map);
- CHECK(!info.dependencies()->HasAborted());
+ CHECK(!dependencies.HasAborted());
} else {
CHECK(!field_owner->is_deprecated());
CHECK(map->is_stable()); // Map did not change, must be left stable.
CHECK_EQ(*map, *new_map);
- CHECK_EQ(expected_field_type_dependency, info.dependencies()->HasAborted());
+ CHECK_EQ(expected_field_type_dependency, dependencies.HasAborted());
}
{
@@ -652,7 +652,7 @@ static void TestGeneralizeRepresentation(
}
}
- info.dependencies()->Rollback(); // Properly cleanup compilation info.
+ dependencies.Rollback(); // Properly cleanup compilation info.
// Update all deprecated maps and check that they are now the same.
Handle<Map> updated_map = Map::Update(map);
@@ -983,9 +983,9 @@ static void TestReconfigureDataFieldAttribute_GeneralizeRepresentation(
Zone zone(isolate->allocator());
Handle<Map> field_owner(map->FindFieldOwner(kSplitProp), isolate);
- CompilationInfo info(ArrayVector("testing"), isolate, &zone);
- CHECK(!info.dependencies()->HasAborted());
- info.dependencies()->AssumeFieldType(field_owner);
+ CompilationDependencies dependencies(isolate, &zone);
+ CHECK(!dependencies.HasAborted());
+ dependencies.AssumeFieldType(field_owner);
// Reconfigure attributes of property |kSplitProp| of |map2| to NONE, which
// should generalize representations in |map1|.
@@ -1003,8 +1003,8 @@ static void TestReconfigureDataFieldAttribute_GeneralizeRepresentation(
expectations.SetDataField(i, expected_representation, expected_type);
}
CHECK(map->is_deprecated());
- CHECK(!info.dependencies()->HasAborted());
- info.dependencies()->Rollback(); // Properly cleanup compilation info.
+ CHECK(!dependencies.HasAborted());
+ dependencies.Rollback(); // Properly cleanup compilation info.
CHECK_NE(*map, *new_map);
CHECK(!new_map->is_deprecated());
@@ -1068,9 +1068,9 @@ static void TestReconfigureDataFieldAttribute_GeneralizeRepresentationTrivial(
Zone zone(isolate->allocator());
Handle<Map> field_owner(map->FindFieldOwner(kSplitProp), isolate);
- CompilationInfo info(ArrayVector("testing"), isolate, &zone);
- CHECK(!info.dependencies()->HasAborted());
- info.dependencies()->AssumeFieldType(field_owner);
+ CompilationDependencies dependencies(isolate, &zone);
+ CHECK(!dependencies.HasAborted());
+ dependencies.AssumeFieldType(field_owner);
// Reconfigure attributes of property |kSplitProp| of |map2| to NONE, which
// should generalize representations in |map1|.
@@ -1092,8 +1092,8 @@ static void TestReconfigureDataFieldAttribute_GeneralizeRepresentationTrivial(
}
CHECK(!map->is_deprecated());
CHECK_EQ(*map, *new_map);
- CHECK_EQ(expected_field_type_dependency, info.dependencies()->HasAborted());
- info.dependencies()->Rollback(); // Properly cleanup compilation info.
+ CHECK_EQ(expected_field_type_dependency, dependencies.HasAborted());
+ dependencies.Rollback(); // Properly cleanup compilation info.
CHECK(!new_map->is_deprecated());
CHECK(expectations.Check(*new_map));
@@ -1599,9 +1599,9 @@ static void TestReconfigureElementsKind_GeneralizeRepresentation(
Zone zone(isolate->allocator());
Handle<Map> field_owner(map->FindFieldOwner(kDiffProp), isolate);
- CompilationInfo info(ArrayVector("testing"), isolate, &zone);
- CHECK(!info.dependencies()->HasAborted());
- info.dependencies()->AssumeFieldType(field_owner);
+ CompilationDependencies dependencies(isolate, &zone);
+ CHECK(!dependencies.HasAborted());
+ dependencies.AssumeFieldType(field_owner);
// Reconfigure elements kinds of |map2|, which should generalize
// representations in |map|.
@@ -1617,8 +1617,8 @@ static void TestReconfigureElementsKind_GeneralizeRepresentation(
expectations.SetDataField(kDiffProp, expected_representation, expected_type);
CHECK(map->is_deprecated());
- CHECK(!info.dependencies()->HasAborted());
- info.dependencies()->Rollback(); // Properly cleanup compilation info.
+ CHECK(!dependencies.HasAborted());
+ dependencies.Rollback(); // Properly cleanup compilation info.
CHECK_NE(*map, *new_map);
CHECK(!new_map->is_deprecated());
@@ -1692,9 +1692,9 @@ static void TestReconfigureElementsKind_GeneralizeRepresentationTrivial(
Zone zone(isolate->allocator());
Handle<Map> field_owner(map->FindFieldOwner(kDiffProp), isolate);
- CompilationInfo info(ArrayVector("testing"), isolate, &zone);
- CHECK(!info.dependencies()->HasAborted());
- info.dependencies()->AssumeFieldType(field_owner);
+ CompilationDependencies dependencies(isolate, &zone);
+ CHECK(!dependencies.HasAborted());
+ dependencies.AssumeFieldType(field_owner);
// Reconfigure elements kinds of |map2|, which should generalize
// representations in |map|.
@@ -1713,8 +1713,8 @@ static void TestReconfigureElementsKind_GeneralizeRepresentationTrivial(
expectations.SetDataField(kDiffProp, expected_representation, expected_type);
CHECK(!map->is_deprecated());
CHECK_EQ(*map, *new_map);
- CHECK_EQ(expected_field_type_dependency, info.dependencies()->HasAborted());
- info.dependencies()->Rollback(); // Properly cleanup compilation info.
+ CHECK_EQ(expected_field_type_dependency, dependencies.HasAborted());
+ dependencies.Rollback(); // Properly cleanup compilation info.
CHECK(!new_map->is_deprecated());
CHECK(expectations.Check(*new_map));
@@ -2418,8 +2418,8 @@ TEST(FieldTypeConvertSimple) {
Zone zone(isolate->allocator());
- CHECK_EQ(FieldType::Any()->Convert(&zone), Type::NonInternal());
- CHECK_EQ(FieldType::None()->Convert(&zone), Type::None());
+ CHECK_EQ(FieldType::Any()->Convert(&zone), AstType::NonInternal());
+ CHECK_EQ(FieldType::None()->Convert(&zone), AstType::None());
}
// TODO(ishell): add this test once IS_ACCESSOR_FIELD_SUPPORTED is supported.
diff --git a/deps/v8/test/cctest/test-flags.cc b/deps/v8/test/cctest/test-flags.cc
index e423fdc7e0..fd49dae1e9 100644
--- a/deps/v8/test/cctest/test-flags.cc
+++ b/deps/v8/test/cctest/test-flags.cc
@@ -27,6 +27,7 @@
#include <stdlib.h>
+#include "src/flags.h"
#include "src/v8.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-global-handles.cc b/deps/v8/test/cctest/test-global-handles.cc
index 06e7466dc6..d777432f2f 100644
--- a/deps/v8/test/cctest/test-global-handles.cc
+++ b/deps/v8/test/cctest/test-global-handles.cc
@@ -25,8 +25,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "src/api.h"
+#include "src/factory.h"
#include "src/global-handles.h"
-
+#include "src/isolate.h"
+#include "src/objects.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
@@ -349,7 +352,7 @@ TEST(EternalHandles) {
CHECK(!eternals[i].IsEmpty());
}
- isolate->heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
for (int i = 0; i < kArrayLength; i++) {
for (int j = 0; j < 2; j++) {
@@ -442,7 +445,7 @@ TEST(FinalizerWeakness) {
g.SetWeak(&g, finalizer, v8::WeakCallbackType::kFinalizer);
}
- CcTest::i_isolate()->heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
CHECK(!g.IsEmpty());
v8::HandleScope scope(isolate);
@@ -465,7 +468,7 @@ TEST(PhatomHandlesWithoutCallbacks) {
}
CHECK_EQ(0, isolate->NumberOfPhantomHandleResetsSinceLastCall());
- CcTest::i_isolate()->heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
CHECK_EQ(2, isolate->NumberOfPhantomHandleResetsSinceLastCall());
CHECK_EQ(0, isolate->NumberOfPhantomHandleResetsSinceLastCall());
}
diff --git a/deps/v8/test/cctest/test-hashmap.cc b/deps/v8/test/cctest/test-hashmap.cc
index 2d423b4543..b1adc7b005 100644
--- a/deps/v8/test/cctest/test-hashmap.cc
+++ b/deps/v8/test/cctest/test-hashmap.cc
@@ -34,17 +34,13 @@
using namespace v8::internal;
-static bool DefaultMatchFun(void* a, void* b) {
- return a == b;
-}
-
typedef uint32_t (*IntKeyHash)(uint32_t key);
class IntSet {
public:
- explicit IntSet(IntKeyHash hash) : hash_(hash), map_(DefaultMatchFun) {}
+ explicit IntSet(IntKeyHash hash) : hash_(hash) {}
void Insert(int x) {
CHECK_NE(0, x); // 0 corresponds to (void*)NULL - illegal key value
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index b0a2e00202..17893b3b48 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -62,12 +62,8 @@ class NamedEntriesDetector {
if (strcmp(entry->name(), "C2") == 0) has_C2 = true;
}
- static bool AddressesMatch(void* key1, void* key2) {
- return key1 == key2;
- }
-
void CheckAllReachables(i::HeapEntry* root) {
- v8::base::HashMap visited(AddressesMatch);
+ v8::base::HashMap visited;
i::List<i::HeapEntry*> list(10);
list.Add(root);
CheckEntry(root);
@@ -135,17 +131,12 @@ static bool HasString(const v8::HeapGraphNode* node, const char* contents) {
}
-static bool AddressesMatch(void* key1, void* key2) {
- return key1 == key2;
-}
-
-
// Check that snapshot has no unretained entries except root.
static bool ValidateSnapshot(const v8::HeapSnapshot* snapshot, int depth = 3) {
i::HeapSnapshot* heap_snapshot = const_cast<i::HeapSnapshot*>(
reinterpret_cast<const i::HeapSnapshot*>(snapshot));
- v8::base::HashMap visited(AddressesMatch);
+ v8::base::HashMap visited;
i::List<i::HeapGraphEdge>& edges = heap_snapshot->edges();
for (int i = 0; i < edges.length(); ++i) {
v8::base::HashMap::Entry* entry = visited.LookupOrInsert(
@@ -501,7 +492,7 @@ void CheckSimdSnapshot(const char* program, const char* var_name) {
// 28 @ 13523 entry with no retainer: /hidden/ system / AllocationSite
// 44 @ 767 $map: /hidden/ system / Map
// 44 @ 59 $map: /hidden/ system / Map
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
@@ -738,7 +729,7 @@ TEST(HeapSnapshotAddressReuse) {
CompileRun(
"for (var i = 0; i < 10000; ++i)\n"
" a[i] = new A();\n");
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
const v8::HeapSnapshot* snapshot2 = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot2));
@@ -780,7 +771,7 @@ TEST(HeapEntryIdsAndArrayShift) {
"for (var i = 0; i < 1; ++i)\n"
" a.shift();\n");
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
const v8::HeapSnapshot* snapshot2 = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot2));
@@ -821,7 +812,7 @@ TEST(HeapEntryIdsAndGC) {
const v8::HeapSnapshot* snapshot1 = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot1));
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
const v8::HeapSnapshot* snapshot2 = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot2));
@@ -1150,7 +1141,7 @@ TEST(HeapSnapshotObjectsStats) {
// We have to call GC 6 times. In other case the garbage will be
// the reason of flakiness.
for (int i = 0; i < 6; ++i) {
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
v8::SnapshotObjectId initial_id;
@@ -1305,7 +1296,7 @@ TEST(HeapObjectIds) {
}
heap_profiler->StopTrackingHeapObjects();
- CcTest::heap()->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
for (int i = 0; i < kLength; i++) {
v8::SnapshotObjectId id = heap_profiler->GetObjectId(objects[i]);
@@ -2557,8 +2548,34 @@ TEST(ArrayGrowLeftTrim) {
heap_profiler->StopTrackingHeapObjects();
}
+TEST(TrackHeapAllocationsWithInlining) {
+ v8::HandleScope scope(v8::Isolate::GetCurrent());
+ LocalContext env;
+
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+ heap_profiler->StartTrackingHeapObjects(true);
+
+ CompileRun(record_trace_tree_source);
+
+ AllocationTracker* tracker =
+ reinterpret_cast<i::HeapProfiler*>(heap_profiler)->allocation_tracker();
+ CHECK(tracker);
+ // Resolve all function locations.
+ tracker->PrepareForSerialization();
+ // Print for better diagnostics in case of failure.
+ tracker->trace_tree()->Print(tracker);
+
+ const char* names[] = {"", "start", "f_0_0"};
+ AllocationTraceNode* node = FindNode(tracker, ArrayVector(names));
+ CHECK(node);
+ CHECK_GE(node->allocation_count(), 12u);
+ CHECK_GE(node->allocation_size(), 4 * node->allocation_count());
+ heap_profiler->StopTrackingHeapObjects();
+}
-TEST(TrackHeapAllocations) {
+TEST(TrackHeapAllocationsWithoutInlining) {
+ i::FLAG_turbo_inlining = false;
+ i::FLAG_max_inlined_source_size = 0; // Disable inlining
v8::HandleScope scope(v8::Isolate::GetCurrent());
LocalContext env;
@@ -3033,7 +3050,7 @@ TEST(SamplingHeapProfiler) {
" eval(\"new Array(100)\");\n"
"}\n");
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
std::unique_ptr<v8::AllocationProfile> profile(
heap_profiler->GetAllocationProfile());
@@ -3087,7 +3104,7 @@ TEST(SamplingHeapProfilerLeftTrimming) {
" a.shift();\n"
"}\n");
- CcTest::heap()->CollectGarbage(v8::internal::NEW_SPACE);
+ CcTest::CollectGarbage(v8::internal::NEW_SPACE);
// Should not crash.
heap_profiler->StopSamplingHeapProfiler();
diff --git a/deps/v8/test/cctest/test-identity-map.cc b/deps/v8/test/cctest/test-identity-map.cc
index 648508cbd1..bf07b033db 100644
--- a/deps/v8/test/cctest/test-identity-map.cc
+++ b/deps/v8/test/cctest/test-identity-map.cc
@@ -2,11 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
+#include "src/factory.h"
#include "src/identity-map.h"
-#include "src/zone.h"
-
+#include "src/isolate.h"
+#include "src/objects.h"
+#include "src/zone/zone.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/factory.h -> src/objects-inl.h
+#include "src/objects-inl.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/type-feedback-vector.h ->
+// src/type-feedback-vector-inl.h
+#include "src/type-feedback-vector-inl.h"
+#include "src/v8.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -327,7 +335,7 @@ TEST(ExplicitGC) {
}
// Do an explicit, real GC.
- t.heap()->CollectGarbage(i::NEW_SPACE);
+ t.heap()->CollectGarbage(i::NEW_SPACE, i::GarbageCollectionReason::kTesting);
// Check that searching for the numbers finds the same values.
for (size_t i = 0; i < arraysize(num_keys); i++) {
@@ -379,7 +387,7 @@ TEST(CanonicalHandleScope) {
Handle<String> string2(*string1);
CHECK_EQ(number1.location(), number2.location());
CHECK_EQ(string1.location(), string2.location());
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
Handle<HeapNumber> number3(*number2);
Handle<String> string3(*string2);
CHECK_EQ(number1.location(), number3.location());
diff --git a/deps/v8/test/cctest/test-javascript-arm64.cc b/deps/v8/test/cctest/test-javascript-arm64.cc
index 3f7d9d17c3..aa4988ef06 100644
--- a/deps/v8/test/cctest/test-javascript-arm64.cc
+++ b/deps/v8/test/cctest/test-javascript-arm64.cc
@@ -34,7 +34,6 @@
#include "src/compilation-cache.h"
#include "src/execution.h"
#include "src/isolate.h"
-#include "src/parsing/parser.h"
#include "src/unicode-inl.h"
#include "src/utils.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-js-arm64-variables.cc b/deps/v8/test/cctest/test-js-arm64-variables.cc
index 38b22f9b1b..f6958fd422 100644
--- a/deps/v8/test/cctest/test-js-arm64-variables.cc
+++ b/deps/v8/test/cctest/test-js-arm64-variables.cc
@@ -36,7 +36,6 @@
#include "src/compilation-cache.h"
#include "src/execution.h"
#include "src/isolate.h"
-#include "src/parsing/parser.h"
#include "src/unicode-inl.h"
#include "src/utils.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-list.cc b/deps/v8/test/cctest/test-list.cc
index 20c13f6e65..1ffff19ef8 100644
--- a/deps/v8/test/cctest/test-list.cc
+++ b/deps/v8/test/cctest/test-list.cc
@@ -27,6 +27,8 @@
#include <stdlib.h>
#include <string.h>
+#include "src/list-inl.h"
+#include "src/list.h"
#include "src/v8.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-liveedit.cc b/deps/v8/test/cctest/test-liveedit.cc
index bae3ed5ac4..6cc6c70214 100644
--- a/deps/v8/test/cctest/test-liveedit.cc
+++ b/deps/v8/test/cctest/test-liveedit.cc
@@ -95,7 +95,7 @@ void CompareStringsOneWay(const char* s1, const char* s2,
int expected_diff_parameter = -1) {
StringCompareInput input(s1, s2);
- v8::base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
DiffChunkStruct* first_chunk;
diff --git a/deps/v8/test/cctest/test-lockers.cc b/deps/v8/test/cctest/test-lockers.cc
index 09546e94f3..4d4f3e2177 100644
--- a/deps/v8/test/cctest/test-lockers.cc
+++ b/deps/v8/test/cctest/test-lockers.cc
@@ -36,7 +36,6 @@
#include "src/compilation-cache.h"
#include "src/execution.h"
#include "src/isolate.h"
-#include "src/parsing/parser.h"
#include "src/unicode-inl.h"
#include "src/utils.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index b6bb2569dd..91029286f7 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -34,13 +34,13 @@
#include <cmath>
#endif // __linux__
-#include "src/v8.h"
-
-#include "src/log.h"
+#include "src/api.h"
#include "src/log-utils.h"
+#include "src/log.h"
#include "src/profiler/cpu-profiler.h"
#include "src/snapshot/natives.h"
#include "src/utils.h"
+#include "src/v8.h"
#include "src/v8threads.h"
#include "src/version.h"
#include "src/vm-state-inl.h"
@@ -486,7 +486,7 @@ TEST(EquivalenceOfLoggingAndTraversal) {
"})(this);");
logger->StopProfiler();
reinterpret_cast<i::Isolate*>(isolate)->heap()->CollectAllGarbage(
- i::Heap::kMakeHeapIterableMask);
+ i::Heap::kMakeHeapIterableMask, i::GarbageCollectionReason::kTesting);
logger->StringEvent("test-logging-done", "");
// Iterate heap to find compiled functions, will write to log.
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips.cc b/deps/v8/test/cctest/test-macro-assembler-mips.cc
index 057c370304..9561db691e 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips.cc
@@ -1306,4 +1306,57 @@ TEST(Uldc1) {
}
}
+static const std::vector<uint32_t> sltu_test_values() {
+ static const uint32_t kValues[] = {
+ 0, 1, 0x7ffe, 0x7fff, 0x8000,
+ 0x8001, 0xfffe, 0xffff, 0xffff7ffe, 0xffff7fff,
+ 0xffff8000, 0xffff8001, 0xfffffffe, 0xffffffff,
+ };
+ return std::vector<uint32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+template <typename Func>
+bool run_Sltu(uint32_t rs, uint32_t rd, Func GenerateSltuInstructionFunc) {
+ typedef int32_t (*F_CVT)(uint32_t x0, uint32_t x1, int x2, int x3, int x4);
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assm;
+
+ GenerateSltuInstructionFunc(masm, rd);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
+ int32_t res = reinterpret_cast<int32_t>(
+ CALL_GENERATED_CODE(isolate, f, rs, rd, 0, 0, 0));
+ return res == 1;
+}
+
+TEST(Sltu) {
+ CcTest::InitializeVM();
+
+ FOR_UINT32_INPUTS(i, sltu_test_values) {
+ FOR_UINT32_INPUTS(j, sltu_test_values) {
+ uint32_t rs = *i;
+ uint32_t rd = *j;
+
+ CHECK_EQ(rs < rd, run_Sltu(rs, rd,
+ [](MacroAssembler* masm, uint32_t imm) {
+ __ Sltu(v0, a0, Operand(imm));
+ }));
+ CHECK_EQ(rs < rd,
+ run_Sltu(rs, rd, [](MacroAssembler* masm,
+ uint32_t imm) { __ Sltu(v0, a0, a1); }));
+ }
+ }
+}
+
#undef __
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips64.cc b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
index 5f9451027c..1dc260ff01 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
@@ -1901,4 +1901,68 @@ TEST(Uldc1) {
}
}
+static const std::vector<uint64_t> sltu_test_values() {
+ static const uint64_t kValues[] = {
+ 0,
+ 1,
+ 0x7ffe,
+ 0x7fff,
+ 0x8000,
+ 0x8001,
+ 0xfffe,
+ 0xffff,
+ 0xffffffffffff7ffe,
+ 0xffffffffffff7fff,
+ 0xffffffffffff8000,
+ 0xffffffffffff8001,
+ 0xfffffffffffffffe,
+ 0xffffffffffffffff,
+ };
+ return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+template <typename Func>
+bool run_Sltu(uint64_t rs, uint64_t rd, Func GenerateSltuInstructionFunc) {
+ typedef int64_t (*F_CVT)(uint64_t x0, uint64_t x1, int x2, int x3, int x4);
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assm;
+
+ GenerateSltuInstructionFunc(masm, rd);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
+ int64_t res = reinterpret_cast<int64_t>(
+ CALL_GENERATED_CODE(isolate, f, rs, rd, 0, 0, 0));
+ return res == 1;
+}
+
+TEST(Sltu) {
+ CcTest::InitializeVM();
+
+ FOR_UINT64_INPUTS(i, sltu_test_values) {
+ FOR_UINT64_INPUTS(j, sltu_test_values) {
+ uint64_t rs = *i;
+ uint64_t rd = *j;
+
+ CHECK_EQ(rs < rd, run_Sltu(rs, rd,
+ [](MacroAssembler* masm, uint64_t imm) {
+ __ Sltu(v0, a0, Operand(imm));
+ }));
+ CHECK_EQ(rs < rd,
+ run_Sltu(rs, rd, [](MacroAssembler* masm,
+ uint64_t imm) { __ Sltu(v0, a0, a1); }));
+ }
+ }
+}
+
#undef __
diff --git a/deps/v8/test/cctest/test-mementos.cc b/deps/v8/test/cctest/test-mementos.cc
index b26aad03a5..da5ce8ce69 100644
--- a/deps/v8/test/cctest/test-mementos.cc
+++ b/deps/v8/test/cctest/test-mementos.cc
@@ -25,6 +25,16 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "src/factory.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/factory.h -> src/objects-inl.h
+#include "src/objects-inl.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/type-feedback-vector.h ->
+// src/type-feedback-vector-inl.h
+#include "src/type-feedback-vector-inl.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
@@ -36,7 +46,7 @@ static void SetUpNewSpaceWithPoisonedMementoAtTop() {
NewSpace* new_space = heap->new_space();
// Make sure we can allocate some objects without causing a GC later.
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// Allocate a string, the GC may suspect a memento behind the string.
Handle<SeqOneByteString> string =
@@ -62,8 +72,7 @@ TEST(Regress340063) {
// Call GC to see if we can handle a poisonous memento right after the
// current new space top pointer.
- CcTest::i_isolate()->heap()->CollectAllGarbage(
- Heap::kAbortIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
}
@@ -80,8 +89,7 @@ TEST(Regress470390) {
// Call GC to see if we can handle a poisonous memento right after the
// current new space top pointer.
- CcTest::i_isolate()->heap()->CollectAllGarbage(
- Heap::kAbortIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
}
@@ -93,5 +101,5 @@ TEST(BadMementoAfterTopForceScavenge) {
SetUpNewSpaceWithPoisonedMementoAtTop();
// Force GC to test the poisoned memento handling
- CcTest::i_isolate()->heap()->CollectGarbage(i::NEW_SPACE);
+ CcTest::CollectGarbage(i::NEW_SPACE);
}
diff --git a/deps/v8/test/cctest/test-modules.cc b/deps/v8/test/cctest/test-modules.cc
new file mode 100644
index 0000000000..c33a5a124b
--- /dev/null
+++ b/deps/v8/test/cctest/test-modules.cc
@@ -0,0 +1,111 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/flags.h"
+
+#include "test/cctest/cctest.h"
+
+namespace {
+
+using v8::Context;
+using v8::HandleScope;
+using v8::Isolate;
+using v8::Local;
+using v8::MaybeLocal;
+using v8::Module;
+using v8::ScriptCompiler;
+using v8::ScriptOrigin;
+using v8::String;
+using v8::Value;
+
+MaybeLocal<Module> AlwaysEmptyResolveCallback(Local<Context> context,
+ Local<String> specifier,
+ Local<Module> referrer,
+ Local<Value> data) {
+ return MaybeLocal<Module>();
+}
+
+static int g_count = 0;
+MaybeLocal<Module> FailOnSecondCallResolveCallback(Local<Context> context,
+ Local<String> specifier,
+ Local<Module> referrer,
+ Local<Value> data) {
+ if (g_count++ > 0) return MaybeLocal<Module>();
+ Local<String> source_text = v8_str("");
+ ScriptOrigin origin(v8_str("module.js"));
+ ScriptCompiler::Source source(source_text, origin);
+ return ScriptCompiler::CompileModule(CcTest::isolate(), &source)
+ .ToLocalChecked();
+}
+
+TEST(ModuleInstantiationFailures) {
+ Isolate* isolate = CcTest::isolate();
+ HandleScope scope(isolate);
+ LocalContext env;
+
+ Local<String> source_text = v8_str(
+ "import './foo.js';"
+ "export {} from './bar.js';");
+ ScriptOrigin origin(v8_str("file.js"));
+ ScriptCompiler::Source source(source_text, origin);
+ Local<Module> module =
+ ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ CHECK_EQ(2, module->GetModuleRequestsLength());
+ CHECK(v8_str("./foo.js")->StrictEquals(module->GetModuleRequest(0)));
+ CHECK(v8_str("./bar.js")->StrictEquals(module->GetModuleRequest(1)));
+
+ // Instantiation should fail.
+ CHECK(!module->Instantiate(env.local(), AlwaysEmptyResolveCallback));
+
+ // Start over again...
+ module = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+
+ // Instantiation should fail if a sub-module fails to resolve.
+ g_count = 0;
+ CHECK(!module->Instantiate(env.local(), FailOnSecondCallResolveCallback));
+}
+
+static MaybeLocal<Module> CompileSpecifierAsModuleResolveCallback(
+ Local<Context> context, Local<String> specifier, Local<Module> referrer,
+ Local<Value> data) {
+ ScriptOrigin origin(v8_str("module.js"));
+ ScriptCompiler::Source source(specifier, origin);
+ return ScriptCompiler::CompileModule(CcTest::isolate(), &source)
+ .ToLocalChecked();
+}
+
+TEST(ModuleEvaluation) {
+ Isolate* isolate = CcTest::isolate();
+ HandleScope scope(isolate);
+ LocalContext env;
+
+ Local<String> source_text = v8_str(
+ "import 'Object.expando = 5';"
+ "import 'Object.expando *= 2';");
+ ScriptOrigin origin(v8_str("file.js"));
+ ScriptCompiler::Source source(source_text, origin);
+ Local<Module> module =
+ ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ CHECK(module->Instantiate(env.local(),
+ CompileSpecifierAsModuleResolveCallback));
+ CHECK(!module->Evaluate(env.local()).IsEmpty());
+ ExpectInt32("Object.expando", 10);
+}
+
+TEST(EmbedderData) {
+ Isolate* isolate = CcTest::isolate();
+ HandleScope scope(isolate);
+ LocalContext env;
+
+ Local<String> source_text = v8_str("");
+ ScriptOrigin origin(v8_str("file.js"));
+ ScriptCompiler::Source source(source_text, origin);
+ Local<Module> module =
+ ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ CHECK(module->GetEmbedderData()->IsUndefined());
+ module->SetEmbedderData(v8_num(42));
+ CHECK_EQ(42, Local<v8::Int32>::Cast(module->GetEmbedderData())->Value());
+}
+
+} // anonymous namespace
diff --git a/deps/v8/test/cctest/test-object.cc b/deps/v8/test/cctest/test-object.cc
index e078bfcdb0..396318121a 100644
--- a/deps/v8/test/cctest/test-object.cc
+++ b/deps/v8/test/cctest/test-object.cc
@@ -2,8 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/factory.h"
+#include "src/handles-inl.h"
+#include "src/handles.h"
+#include "src/isolate.h"
+#include "src/objects.h"
#include "src/v8.h"
-
#include "test/cctest/cctest.h"
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 658e73ca84..921cebcad6 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -38,6 +38,7 @@
#include "src/ast/ast.h"
#include "src/compiler.h"
#include "src/execution.h"
+#include "src/flags.h"
#include "src/isolate.h"
#include "src/objects.h"
#include "src/parsing/parse-info.h"
@@ -71,17 +72,17 @@ TEST(ScanKeywords) {
size_t length = strlen(key_token.keyword);
CHECK(static_cast<int>(sizeof(buffer)) >= length);
{
- i::ExternalOneByteStringUtf16CharacterStream stream(keyword, length);
+ auto stream = i::ScannerStream::ForTesting(keyword, length);
i::Scanner scanner(&unicode_cache);
- scanner.Initialize(&stream);
+ scanner.Initialize(stream.get());
CHECK_EQ(key_token.token, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
}
// Removing characters will make keyword matching fail.
{
- i::ExternalOneByteStringUtf16CharacterStream stream(keyword, length - 1);
+ auto stream = i::ScannerStream::ForTesting(keyword, length - 1);
i::Scanner scanner(&unicode_cache);
- scanner.Initialize(&stream);
+ scanner.Initialize(stream.get());
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
}
@@ -90,9 +91,9 @@ TEST(ScanKeywords) {
for (int j = 0; j < static_cast<int>(arraysize(chars_to_append)); ++j) {
i::MemMove(buffer, keyword, length);
buffer[length] = chars_to_append[j];
- i::ExternalOneByteStringUtf16CharacterStream stream(buffer, length + 1);
+ auto stream = i::ScannerStream::ForTesting(buffer, length + 1);
i::Scanner scanner(&unicode_cache);
- scanner.Initialize(&stream);
+ scanner.Initialize(stream.get());
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
}
@@ -100,9 +101,9 @@ TEST(ScanKeywords) {
{
i::MemMove(buffer, keyword, length);
buffer[length - 1] = '_';
- i::ExternalOneByteStringUtf16CharacterStream stream(buffer, length);
+ auto stream = i::ScannerStream::ForTesting(buffer, length);
i::Scanner scanner(&unicode_cache);
- scanner.Initialize(&stream);
+ scanner.Initialize(stream.get());
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
}
@@ -166,10 +167,10 @@ TEST(ScanHTMLEndComments) {
uintptr_t stack_limit = CcTest::i_isolate()->stack_guard()->real_climit();
for (int i = 0; tests[i]; i++) {
const char* source = tests[i];
- i::ExternalOneByteStringUtf16CharacterStream stream(source);
+ auto stream = i::ScannerStream::ForTesting(source);
i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
- scanner.Initialize(&stream);
+ scanner.Initialize(stream.get());
i::Zone zone(CcTest::i_isolate()->allocator());
i::AstValueFactory ast_value_factory(
&zone, CcTest::i_isolate()->heap()->HashSeed());
@@ -183,10 +184,10 @@ TEST(ScanHTMLEndComments) {
for (int i = 0; fail_tests[i]; i++) {
const char* source = fail_tests[i];
- i::ExternalOneByteStringUtf16CharacterStream stream(source);
+ auto stream = i::ScannerStream::ForTesting(source);
i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
- scanner.Initialize(&stream);
+ scanner.Initialize(stream.get());
i::Zone zone(CcTest::i_isolate()->allocator());
i::AstValueFactory ast_value_factory(
&zone, CcTest::i_isolate()->heap()->HashSeed());
@@ -340,11 +341,10 @@ TEST(StandAlonePreParser) {
uintptr_t stack_limit = CcTest::i_isolate()->stack_guard()->real_climit();
for (int i = 0; programs[i]; i++) {
- const char* program = programs[i];
- i::ExternalOneByteStringUtf16CharacterStream stream(program);
+ auto stream = i::ScannerStream::ForTesting(programs[i]);
i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
- scanner.Initialize(&stream);
+ scanner.Initialize(stream.get());
i::Zone zone(CcTest::i_isolate()->allocator());
i::AstValueFactory ast_value_factory(
@@ -374,11 +374,10 @@ TEST(StandAlonePreParserNoNatives) {
uintptr_t stack_limit = CcTest::i_isolate()->stack_guard()->real_climit();
for (int i = 0; programs[i]; i++) {
- const char* program = programs[i];
- i::ExternalOneByteStringUtf16CharacterStream stream(program);
+ auto stream = i::ScannerStream::ForTesting(programs[i]);
i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
- scanner.Initialize(&stream);
+ scanner.Initialize(stream.get());
// Preparser defaults to disallowing natives syntax.
i::Zone zone(CcTest::i_isolate()->allocator());
@@ -444,10 +443,10 @@ TEST(RegressChromium62639) {
// and then used the invalid currently scanned literal. This always
// failed in debug mode, and sometimes crashed in release mode.
- i::ExternalOneByteStringUtf16CharacterStream stream(program);
+ auto stream = i::ScannerStream::ForTesting(program);
i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
- scanner.Initialize(&stream);
+ scanner.Initialize(stream.get());
i::Zone zone(CcTest::i_isolate()->allocator());
i::AstValueFactory ast_value_factory(&zone,
CcTest::i_isolate()->heap()->HashSeed());
@@ -464,7 +463,6 @@ TEST(RegressChromium62639) {
TEST(Regress928) {
v8::V8::Initialize();
i::Isolate* isolate = CcTest::i_isolate();
- i::Factory* factory = isolate->factory();
// Preparsing didn't consider the catch clause of a try statement
// as with-content, which made it assume that a function inside
@@ -478,11 +476,10 @@ TEST(Regress928) {
"var bar = function () { /* second */ }";
v8::HandleScope handles(CcTest::isolate());
- i::Handle<i::String> source = factory->NewStringFromAsciiChecked(program);
- i::GenericStringUtf16CharacterStream stream(source, 0, source->length());
+ auto stream = i::ScannerStream::ForTesting(program);
i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
- scanner.Initialize(&stream);
+ scanner.Initialize(stream.get());
i::Zone zone(CcTest::i_isolate()->allocator());
i::AstValueFactory ast_value_factory(&zone,
CcTest::i_isolate()->heap()->HashSeed());
@@ -528,11 +525,10 @@ TEST(PreParseOverflow) {
uintptr_t stack_limit = CcTest::i_isolate()->stack_guard()->real_climit();
- i::ExternalOneByteStringUtf16CharacterStream stream(program.get(),
- kProgramSize);
+ auto stream = i::ScannerStream::ForTesting(program.get(), kProgramSize);
i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
- scanner.Initialize(&stream);
+ scanner.Initialize(stream.get());
i::Zone zone(CcTest::i_isolate()->allocator());
i::AstValueFactory ast_value_factory(&zone,
@@ -545,189 +541,6 @@ TEST(PreParseOverflow) {
}
-class TestExternalResource: public v8::String::ExternalStringResource {
- public:
- explicit TestExternalResource(uint16_t* data, int length)
- : data_(data), length_(static_cast<size_t>(length)) { }
-
- ~TestExternalResource() { }
-
- const uint16_t* data() const {
- return data_;
- }
-
- size_t length() const {
- return length_;
- }
- private:
- uint16_t* data_;
- size_t length_;
-};
-
-
-#define CHECK_EQU(v1, v2) CHECK_EQ(static_cast<int>(v1), static_cast<int>(v2))
-
-void TestCharacterStream(const char* one_byte_source, unsigned length,
- unsigned start = 0, unsigned end = 0) {
- if (end == 0) end = length;
- unsigned sub_length = end - start;
- i::Isolate* isolate = CcTest::i_isolate();
- i::Factory* factory = isolate->factory();
- i::HandleScope test_scope(isolate);
- std::unique_ptr<i::uc16[]> uc16_buffer(new i::uc16[length]);
- for (unsigned i = 0; i < length; i++) {
- uc16_buffer[i] = static_cast<i::uc16>(one_byte_source[i]);
- }
- i::Vector<const char> one_byte_vector(one_byte_source,
- static_cast<int>(length));
- i::Handle<i::String> one_byte_string =
- factory->NewStringFromAscii(one_byte_vector).ToHandleChecked();
- TestExternalResource resource(uc16_buffer.get(), length);
- i::Handle<i::String> uc16_string(
- factory->NewExternalStringFromTwoByte(&resource).ToHandleChecked());
- ScriptResource one_byte_resource(one_byte_source, length);
- i::Handle<i::String> ext_one_byte_string(
- factory->NewExternalStringFromOneByte(&one_byte_resource)
- .ToHandleChecked());
-
- i::ExternalTwoByteStringUtf16CharacterStream uc16_stream(
- i::Handle<i::ExternalTwoByteString>::cast(uc16_string), start, end);
- i::ExternalOneByteStringUtf16CharacterStream one_byte_stream(
- i::Handle<i::ExternalOneByteString>::cast(ext_one_byte_string), start,
- end);
- i::GenericStringUtf16CharacterStream string_stream(one_byte_string, start,
- end);
- i::ExternalOneByteStringUtf16CharacterStream utf8_stream(one_byte_source,
- end);
- utf8_stream.SeekForward(start);
-
- unsigned i = start;
- while (i < end) {
- // Read streams one char at a time
- CHECK_EQU(i, uc16_stream.pos());
- CHECK_EQU(i, string_stream.pos());
- CHECK_EQU(i, utf8_stream.pos());
- CHECK_EQU(i, one_byte_stream.pos());
- int32_t c0 = one_byte_source[i];
- int32_t c1 = uc16_stream.Advance();
- int32_t c2 = string_stream.Advance();
- int32_t c3 = utf8_stream.Advance();
- int32_t c4 = one_byte_stream.Advance();
- i++;
- CHECK_EQ(c0, c1);
- CHECK_EQ(c0, c2);
- CHECK_EQ(c0, c3);
- CHECK_EQ(c0, c4);
- CHECK_EQU(i, uc16_stream.pos());
- CHECK_EQU(i, string_stream.pos());
- CHECK_EQU(i, utf8_stream.pos());
- CHECK_EQU(i, one_byte_stream.pos());
- }
- while (i > start + sub_length / 4) {
- // Pushback, re-read, pushback again.
- int32_t c0 = one_byte_source[i - 1];
- CHECK_EQU(i, uc16_stream.pos());
- CHECK_EQU(i, string_stream.pos());
- CHECK_EQU(i, utf8_stream.pos());
- CHECK_EQU(i, one_byte_stream.pos());
- uc16_stream.PushBack(c0);
- string_stream.PushBack(c0);
- utf8_stream.PushBack(c0);
- one_byte_stream.PushBack(c0);
- i--;
- CHECK_EQU(i, uc16_stream.pos());
- CHECK_EQU(i, string_stream.pos());
- CHECK_EQU(i, utf8_stream.pos());
- CHECK_EQU(i, one_byte_stream.pos());
- int32_t c1 = uc16_stream.Advance();
- int32_t c2 = string_stream.Advance();
- int32_t c3 = utf8_stream.Advance();
- int32_t c4 = one_byte_stream.Advance();
- i++;
- CHECK_EQU(i, uc16_stream.pos());
- CHECK_EQU(i, string_stream.pos());
- CHECK_EQU(i, utf8_stream.pos());
- CHECK_EQU(i, one_byte_stream.pos());
- CHECK_EQ(c0, c1);
- CHECK_EQ(c0, c2);
- CHECK_EQ(c0, c3);
- CHECK_EQ(c0, c4);
- uc16_stream.PushBack(c0);
- string_stream.PushBack(c0);
- utf8_stream.PushBack(c0);
- one_byte_stream.PushBack(c0);
- i--;
- CHECK_EQU(i, uc16_stream.pos());
- CHECK_EQU(i, string_stream.pos());
- CHECK_EQU(i, utf8_stream.pos());
- CHECK_EQU(i, one_byte_stream.pos());
- }
- unsigned halfway = start + sub_length / 2;
- uc16_stream.SeekForward(halfway - i);
- string_stream.SeekForward(halfway - i);
- utf8_stream.SeekForward(halfway - i);
- one_byte_stream.SeekForward(halfway - i);
- i = halfway;
- CHECK_EQU(i, uc16_stream.pos());
- CHECK_EQU(i, string_stream.pos());
- CHECK_EQU(i, utf8_stream.pos());
- CHECK_EQU(i, one_byte_stream.pos());
-
- while (i < end) {
- // Read streams one char at a time
- CHECK_EQU(i, uc16_stream.pos());
- CHECK_EQU(i, string_stream.pos());
- CHECK_EQU(i, utf8_stream.pos());
- CHECK_EQU(i, one_byte_stream.pos());
- int32_t c0 = one_byte_source[i];
- int32_t c1 = uc16_stream.Advance();
- int32_t c2 = string_stream.Advance();
- int32_t c3 = utf8_stream.Advance();
- int32_t c4 = one_byte_stream.Advance();
- i++;
- CHECK_EQ(c0, c1);
- CHECK_EQ(c0, c2);
- CHECK_EQ(c0, c3);
- CHECK_EQ(c0, c4);
- CHECK_EQU(i, uc16_stream.pos());
- CHECK_EQU(i, string_stream.pos());
- CHECK_EQU(i, utf8_stream.pos());
- CHECK_EQU(i, one_byte_stream.pos());
- }
-
- int32_t c1 = uc16_stream.Advance();
- int32_t c2 = string_stream.Advance();
- int32_t c3 = utf8_stream.Advance();
- int32_t c4 = one_byte_stream.Advance();
- CHECK_LT(c1, 0);
- CHECK_LT(c2, 0);
- CHECK_LT(c3, 0);
- CHECK_LT(c4, 0);
-}
-
-#undef CHECK_EQU
-
-TEST(CharacterStreams) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope handles(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(context);
-
- TestCharacterStream("abc\0\n\r\x7f", 7);
- static const unsigned kBigStringSize = 4096;
- char buffer[kBigStringSize + 1];
- for (unsigned i = 0; i < kBigStringSize; i++) {
- buffer[i] = static_cast<char>(i & 0x7f);
- }
- TestCharacterStream(buffer, kBigStringSize);
-
- TestCharacterStream(buffer, kBigStringSize, 576, 3298);
-
- TestCharacterStream("\0", 1);
- TestCharacterStream("", 0);
-}
-
-
void TestStreamScanner(i::Utf16CharacterStream* stream,
i::Token::Value* expected_tokens,
int skip_pos = 0, // Zero means not skipping.
@@ -750,9 +563,9 @@ void TestStreamScanner(i::Utf16CharacterStream* stream,
TEST(StreamScanner) {
v8::V8::Initialize();
-
const char* str1 = "{ foo get for : */ <- \n\n /*foo*/ bib";
- i::ExternalOneByteStringUtf16CharacterStream stream1(str1);
+ std::unique_ptr<i::Utf16CharacterStream> stream1(
+ i::ScannerStream::ForTesting(str1));
i::Token::Value expectations1[] = {
i::Token::LBRACE,
i::Token::IDENTIFIER,
@@ -767,10 +580,11 @@ TEST(StreamScanner) {
i::Token::EOS,
i::Token::ILLEGAL
};
- TestStreamScanner(&stream1, expectations1, 0, 0);
+ TestStreamScanner(stream1.get(), expectations1, 0, 0);
const char* str2 = "case default const {THIS\nPART\nSKIPPED} do";
- i::ExternalOneByteStringUtf16CharacterStream stream2(str2);
+ std::unique_ptr<i::Utf16CharacterStream> stream2(
+ i::ScannerStream::ForTesting(str2));
i::Token::Value expectations2[] = {
i::Token::CASE,
i::Token::DEFAULT,
@@ -784,7 +598,7 @@ TEST(StreamScanner) {
};
CHECK_EQ('{', str2[19]);
CHECK_EQ('}', str2[37]);
- TestStreamScanner(&stream2, expectations2, 20, 37);
+ TestStreamScanner(stream2.get(), expectations2, 20, 37);
const char* str3 = "{}}}}";
i::Token::Value expectations3[] = {
@@ -800,17 +614,17 @@ TEST(StreamScanner) {
for (int i = 0; i <= 4; i++) {
expectations3[6 - i] = i::Token::ILLEGAL;
expectations3[5 - i] = i::Token::EOS;
- i::ExternalOneByteStringUtf16CharacterStream stream3(str3);
- TestStreamScanner(&stream3, expectations3, 1, 1 + i);
+ std::unique_ptr<i::Utf16CharacterStream> stream3(
+ i::ScannerStream::ForTesting(str3));
+ TestStreamScanner(stream3.get(), expectations3, 1, 1 + i);
}
}
-
void TestScanRegExp(const char* re_source, const char* expected) {
- i::ExternalOneByteStringUtf16CharacterStream stream(re_source);
+ auto stream = i::ScannerStream::ForTesting(re_source);
i::HandleScope scope(CcTest::i_isolate());
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
- scanner.Initialize(&stream);
+ scanner.Initialize(stream.get());
i::Token::Value start = scanner.peek();
CHECK(start == i::Token::DIV || start == i::Token::ASSIGN_DIV);
@@ -819,9 +633,10 @@ void TestScanRegExp(const char* re_source, const char* expected) {
i::Zone zone(CcTest::i_isolate()->allocator());
i::AstValueFactory ast_value_factory(&zone,
CcTest::i_isolate()->heap()->HashSeed());
+ const i::AstRawString* current_symbol =
+ scanner.CurrentSymbol(&ast_value_factory);
ast_value_factory.Internalize(CcTest::i_isolate());
- i::Handle<i::String> val =
- scanner.CurrentSymbol(&ast_value_factory)->string();
+ i::Handle<i::String> val = current_symbol->string();
i::DisallowHeapAllocation no_alloc;
i::String::FlatContent content = val->GetFlatContent();
CHECK(content.IsOneByte());
@@ -869,74 +684,26 @@ TEST(RegExpScanning) {
TestScanRegExp("/=?/", "=?");
}
+static int Ucs2CharLength(unibrow::uchar c) {
+ if (c == unibrow::Utf8::kIncomplete || c == unibrow::Utf8::kBufferEmpty) {
+ return 0;
+ } else if (c < 0xffff) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
static int Utf8LengthHelper(const char* s) {
- int len = i::StrLength(s);
- int character_length = len;
- for (int i = 0; i < len; i++) {
- unsigned char c = s[i];
- int input_offset = 0;
- int output_adjust = 0;
- if (c > 0x7f) {
- if (c < 0xc0) continue;
- if (c >= 0xf0) {
- if (c >= 0xf8) {
- // 5 and 6 byte UTF-8 sequences turn into a kBadChar for each UTF-8
- // byte.
- continue; // Handle first UTF-8 byte.
- }
- if ((c & 7) == 0 && ((s[i + 1] & 0x30) == 0)) {
- // This 4 byte sequence could have been coded as a 3 byte sequence.
- // Record a single kBadChar for the first byte and continue.
- continue;
- }
- input_offset = 3;
- // 4 bytes of UTF-8 turn into 2 UTF-16 code units.
- character_length -= 2;
- } else if (c >= 0xe0) {
- if ((c & 0xf) == 0 && ((s[i + 1] & 0x20) == 0)) {
- // This 3 byte sequence could have been coded as a 2 byte sequence.
- // Record a single kBadChar for the first byte and continue.
- continue;
- }
- if (c == 0xed) {
- unsigned char d = s[i + 1];
- if ((d < 0x80) || (d > 0x9f)) {
- // This 3 byte sequence is part of a surrogate pair which is not
- // supported by UTF-8. Record a single kBadChar for the first byte
- // and continue.
- continue;
- }
- }
- input_offset = 2;
- // 3 bytes of UTF-8 turn into 1 UTF-16 code unit.
- output_adjust = 2;
- } else {
- if ((c & 0x1e) == 0) {
- // This 2 byte sequence could have been coded as a 1 byte sequence.
- // Record a single kBadChar for the first byte and continue.
- continue;
- }
- input_offset = 1;
- // 2 bytes of UTF-8 turn into 1 UTF-16 code unit.
- output_adjust = 1;
- }
- bool bad = false;
- for (int j = 1; j <= input_offset; j++) {
- if ((s[i + j] & 0xc0) != 0x80) {
- // Bad UTF-8 sequence turns the first in the sequence into kBadChar,
- // which is a single UTF-16 code unit.
- bad = true;
- break;
- }
- }
- if (!bad) {
- i += input_offset;
- character_length -= output_adjust;
- }
- }
+ unibrow::Utf8::Utf8IncrementalBuffer buffer(unibrow::Utf8::kBufferEmpty);
+ int length = 0;
+ for (; *s != '\0'; s++) {
+ unibrow::uchar tmp = unibrow::Utf8::ValueOfIncremental(*s, &buffer);
+ length += Ucs2CharLength(tmp);
}
- return character_length;
+ unibrow::uchar tmp = unibrow::Utf8::ValueOfIncrementalFinish(&buffer);
+ length += Ucs2CharLength(tmp);
+ return length;
}
@@ -1046,7 +813,7 @@ TEST(ScopeUsesArgumentsSuperThis) {
info.set_global();
CHECK(parser.Parse(&info));
CHECK(i::Rewriter::Rewrite(&info));
- i::Scope::Analyze(&info);
+ i::DeclarationScope::Analyze(&info, i::AnalyzeMode::kRegular);
CHECK(info.literal() != NULL);
i::DeclarationScope* script_scope = info.literal()->scope();
@@ -1166,169 +933,206 @@ TEST(ScopePositions) {
};
const SourceData source_data[] = {
- { " with ({}) ", "{ block; }", " more;", i::WITH_SCOPE, i::SLOPPY },
- { " with ({}) ", "{ block; }", "; more;", i::WITH_SCOPE, i::SLOPPY },
- { " with ({}) ", "{\n"
- " block;\n"
- " }", "\n"
- " more;", i::WITH_SCOPE, i::SLOPPY },
- { " with ({}) ", "statement;", " more;", i::WITH_SCOPE, i::SLOPPY },
- { " with ({}) ", "statement", "\n"
- " more;", i::WITH_SCOPE, i::SLOPPY },
- { " with ({})\n"
- " ", "statement;", "\n"
- " more;", i::WITH_SCOPE, i::SLOPPY },
- { " try {} catch ", "(e) { block; }", " more;",
- i::CATCH_SCOPE, i::SLOPPY },
- { " try {} catch ", "(e) { block; }", "; more;",
- i::CATCH_SCOPE, i::SLOPPY },
- { " try {} catch ", "(e) {\n"
- " block;\n"
- " }", "\n"
- " more;", i::CATCH_SCOPE, i::SLOPPY },
- { " try {} catch ", "(e) { block; }", " finally { block; } more;",
- i::CATCH_SCOPE, i::SLOPPY },
- { " start;\n"
- " ", "{ let block; }", " more;", i::BLOCK_SCOPE, i::STRICT },
- { " start;\n"
- " ", "{ let block; }", "; more;", i::BLOCK_SCOPE, i::STRICT },
- { " start;\n"
- " ", "{\n"
- " let block;\n"
- " }", "\n"
- " more;", i::BLOCK_SCOPE, i::STRICT },
- { " start;\n"
- " function fun", "(a,b) { infunction; }", " more;",
- i::FUNCTION_SCOPE, i::SLOPPY },
- { " start;\n"
- " function fun", "(a,b) {\n"
- " infunction;\n"
- " }", "\n"
- " more;", i::FUNCTION_SCOPE, i::SLOPPY },
- { " start;\n", "(a,b) => a + b", "; more;",
- i::FUNCTION_SCOPE, i::SLOPPY },
- { " start;\n", "(a,b) => { return a+b; }", "\nmore;",
- i::FUNCTION_SCOPE, i::SLOPPY },
- { " start;\n"
- " (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::SLOPPY },
- { " for ", "(let x = 1 ; x < 10; ++ x) { block; }", " more;",
- i::BLOCK_SCOPE, i::STRICT },
- { " for ", "(let x = 1 ; x < 10; ++ x) { block; }", "; more;",
- i::BLOCK_SCOPE, i::STRICT },
- { " for ", "(let x = 1 ; x < 10; ++ x) {\n"
- " block;\n"
- " }", "\n"
- " more;", i::BLOCK_SCOPE, i::STRICT },
- { " for ", "(let x = 1 ; x < 10; ++ x) statement;", " more;",
- i::BLOCK_SCOPE, i::STRICT },
- { " for ", "(let x = 1 ; x < 10; ++ x) statement", "\n"
- " more;", i::BLOCK_SCOPE, i::STRICT },
- { " for ", "(let x = 1 ; x < 10; ++ x)\n"
- " statement;", "\n"
- " more;", i::BLOCK_SCOPE, i::STRICT },
- { " for ", "(let x in {}) { block; }", " more;",
- i::BLOCK_SCOPE, i::STRICT },
- { " for ", "(let x in {}) { block; }", "; more;",
- i::BLOCK_SCOPE, i::STRICT },
- { " for ", "(let x in {}) {\n"
- " block;\n"
- " }", "\n"
- " more;", i::BLOCK_SCOPE, i::STRICT },
- { " for ", "(let x in {}) statement;", " more;",
- i::BLOCK_SCOPE, i::STRICT },
- { " for ", "(let x in {}) statement", "\n"
- " more;", i::BLOCK_SCOPE, i::STRICT },
- { " for ", "(let x in {})\n"
- " statement;", "\n"
- " more;", i::BLOCK_SCOPE, i::STRICT },
- // Check that 6-byte and 4-byte encodings of UTF-8 strings do not throw
- // the preparser off in terms of byte offsets.
- // 6 byte encoding.
- { " 'foo\355\240\201\355\260\211';\n"
- " (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::SLOPPY },
- // 4 byte encoding.
- { " 'foo\360\220\220\212';\n"
- " (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::SLOPPY },
- // 3 byte encoding of \u0fff.
- { " 'foo\340\277\277';\n"
- " (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::SLOPPY },
- // Broken 6 byte encoding with missing last byte.
- { " 'foo\355\240\201\355\211';\n"
- " (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::SLOPPY },
- // Broken 3 byte encoding of \u0fff with missing last byte.
- { " 'foo\340\277';\n"
- " (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::SLOPPY },
- // Broken 3 byte encoding of \u0fff with missing 2 last bytes.
- { " 'foo\340';\n"
- " (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::SLOPPY },
- // Broken 3 byte encoding of \u00ff should be a 2 byte encoding.
- { " 'foo\340\203\277';\n"
- " (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::SLOPPY },
- // Broken 3 byte encoding of \u007f should be a 2 byte encoding.
- { " 'foo\340\201\277';\n"
- " (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::SLOPPY },
- // Unpaired lead surrogate.
- { " 'foo\355\240\201';\n"
- " (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::SLOPPY },
- // Unpaired lead surrogate where following code point is a 3 byte sequence.
- { " 'foo\355\240\201\340\277\277';\n"
- " (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::SLOPPY },
- // Unpaired lead surrogate where following code point is a 4 byte encoding
- // of a trail surrogate.
- { " 'foo\355\240\201\360\215\260\211';\n"
- " (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::SLOPPY },
- // Unpaired trail surrogate.
- { " 'foo\355\260\211';\n"
- " (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::SLOPPY },
- // 2 byte encoding of \u00ff.
- { " 'foo\303\277';\n"
- " (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::SLOPPY },
- // Broken 2 byte encoding of \u00ff with missing last byte.
- { " 'foo\303';\n"
- " (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::SLOPPY },
- // Broken 2 byte encoding of \u007f should be a 1 byte encoding.
- { " 'foo\301\277';\n"
- " (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::SLOPPY },
- // Illegal 5 byte encoding.
- { " 'foo\370\277\277\277\277';\n"
- " (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::SLOPPY },
- // Illegal 6 byte encoding.
- { " 'foo\374\277\277\277\277\277';\n"
- " (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::SLOPPY },
- // Illegal 0xfe byte
- { " 'foo\376\277\277\277\277\277\277';\n"
- " (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::SLOPPY },
- // Illegal 0xff byte
- { " 'foo\377\277\277\277\277\277\277\277';\n"
- " (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::SLOPPY },
- { " 'foo';\n"
- " (function fun", "(a,b) { 'bar\355\240\201\355\260\213'; }", ")();",
- i::FUNCTION_SCOPE, i::SLOPPY },
- { " 'foo';\n"
- " (function fun", "(a,b) { 'bar\360\220\220\214'; }", ")();",
- i::FUNCTION_SCOPE, i::SLOPPY },
- { NULL, NULL, NULL, i::EVAL_SCOPE, i::SLOPPY }
- };
+ {" with ({}) ", "{ block; }", " more;", i::WITH_SCOPE, i::SLOPPY},
+ {" with ({}) ", "{ block; }", "; more;", i::WITH_SCOPE, i::SLOPPY},
+ {" with ({}) ",
+ "{\n"
+ " block;\n"
+ " }",
+ "\n"
+ " more;",
+ i::WITH_SCOPE, i::SLOPPY},
+ {" with ({}) ", "statement;", " more;", i::WITH_SCOPE, i::SLOPPY},
+ {" with ({}) ", "statement",
+ "\n"
+ " more;",
+ i::WITH_SCOPE, i::SLOPPY},
+ {" with ({})\n"
+ " ",
+ "statement;",
+ "\n"
+ " more;",
+ i::WITH_SCOPE, i::SLOPPY},
+ {" try {} catch ", "(e) { block; }", " more;", i::CATCH_SCOPE,
+ i::SLOPPY},
+ {" try {} catch ", "(e) { block; }", "; more;", i::CATCH_SCOPE,
+ i::SLOPPY},
+ {" try {} catch ",
+ "(e) {\n"
+ " block;\n"
+ " }",
+ "\n"
+ " more;",
+ i::CATCH_SCOPE, i::SLOPPY},
+ {" try {} catch ", "(e) { block; }", " finally { block; } more;",
+ i::CATCH_SCOPE, i::SLOPPY},
+ {" start;\n"
+ " ",
+ "{ let block; }", " more;", i::BLOCK_SCOPE, i::STRICT},
+ {" start;\n"
+ " ",
+ "{ let block; }", "; more;", i::BLOCK_SCOPE, i::STRICT},
+ {" start;\n"
+ " ",
+ "{\n"
+ " let block;\n"
+ " }",
+ "\n"
+ " more;",
+ i::BLOCK_SCOPE, i::STRICT},
+ {" start;\n"
+ " function fun",
+ "(a,b) { infunction; }", " more;", i::FUNCTION_SCOPE, i::SLOPPY},
+ {" start;\n"
+ " function fun",
+ "(a,b) {\n"
+ " infunction;\n"
+ " }",
+ "\n"
+ " more;",
+ i::FUNCTION_SCOPE, i::SLOPPY},
+ {" start;\n", "(a,b) => a + b", "; more;", i::FUNCTION_SCOPE, i::SLOPPY},
+ {" start;\n", "(a,b) => { return a+b; }", "\nmore;", i::FUNCTION_SCOPE,
+ i::SLOPPY},
+ {" start;\n"
+ " (function fun",
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ {" for ", "(let x = 1 ; x < 10; ++ x) { block; }", " more;",
+ i::BLOCK_SCOPE, i::STRICT},
+ {" for ", "(let x = 1 ; x < 10; ++ x) { block; }", "; more;",
+ i::BLOCK_SCOPE, i::STRICT},
+ {" for ",
+ "(let x = 1 ; x < 10; ++ x) {\n"
+ " block;\n"
+ " }",
+ "\n"
+ " more;",
+ i::BLOCK_SCOPE, i::STRICT},
+ {" for ", "(let x = 1 ; x < 10; ++ x) statement;", " more;",
+ i::BLOCK_SCOPE, i::STRICT},
+ {" for ", "(let x = 1 ; x < 10; ++ x) statement",
+ "\n"
+ " more;",
+ i::BLOCK_SCOPE, i::STRICT},
+ {" for ",
+ "(let x = 1 ; x < 10; ++ x)\n"
+ " statement;",
+ "\n"
+ " more;",
+ i::BLOCK_SCOPE, i::STRICT},
+ {" for ", "(let x in {}) { block; }", " more;", i::BLOCK_SCOPE,
+ i::STRICT},
+ {" for ", "(let x in {}) { block; }", "; more;", i::BLOCK_SCOPE,
+ i::STRICT},
+ {" for ",
+ "(let x in {}) {\n"
+ " block;\n"
+ " }",
+ "\n"
+ " more;",
+ i::BLOCK_SCOPE, i::STRICT},
+ {" for ", "(let x in {}) statement;", " more;", i::BLOCK_SCOPE,
+ i::STRICT},
+ {" for ", "(let x in {}) statement",
+ "\n"
+ " more;",
+ i::BLOCK_SCOPE, i::STRICT},
+ {" for ",
+ "(let x in {})\n"
+ " statement;",
+ "\n"
+ " more;",
+ i::BLOCK_SCOPE, i::STRICT},
+ // Check that 6-byte and 4-byte encodings of UTF-8 strings do not throw
+ // the preparser off in terms of byte offsets.
+ // 2 surrogates, encode a character that doesn't need a surrogate.
+ {" 'foo\355\240\201\355\260\211';\n"
+ " (function fun",
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ // 4 byte encoding.
+ {" 'foo\360\220\220\212';\n"
+ " (function fun",
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ // 3 byte encoding of \u0fff.
+ {" 'foo\340\277\277';\n"
+ " (function fun",
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ // 3 byte surrogate, followed by broken 2-byte surrogate w/ impossible 2nd
+ // byte and last byte missing.
+ {" 'foo\355\240\201\355\211';\n"
+ " (function fun",
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ // Broken 3 byte encoding of \u0fff with missing last byte.
+ {" 'foo\340\277';\n"
+ " (function fun",
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ // Broken 3 byte encoding of \u0fff with missing 2 last bytes.
+ {" 'foo\340';\n"
+ " (function fun",
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ // Broken 3 byte encoding of \u00ff should be a 2 byte encoding.
+ {" 'foo\340\203\277';\n"
+ " (function fun",
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ // Broken 3 byte encoding of \u007f should be a 2 byte encoding.
+ {" 'foo\340\201\277';\n"
+ " (function fun",
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ // Unpaired lead surrogate.
+ {" 'foo\355\240\201';\n"
+ " (function fun",
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ // Unpaired lead surrogate where following code point is a 3 byte
+ // sequence.
+ {" 'foo\355\240\201\340\277\277';\n"
+ " (function fun",
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ // Unpaired lead surrogate where following code point is a 4 byte encoding
+ // of a trail surrogate.
+ {" 'foo\355\240\201\360\215\260\211';\n"
+ " (function fun",
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ // Unpaired trail surrogate.
+ {" 'foo\355\260\211';\n"
+ " (function fun",
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ // 2 byte encoding of \u00ff.
+ {" 'foo\303\277';\n"
+ " (function fun",
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ // Broken 2 byte encoding of \u00ff with missing last byte.
+ {" 'foo\303';\n"
+ " (function fun",
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ // Broken 2 byte encoding of \u007f should be a 1 byte encoding.
+ {" 'foo\301\277';\n"
+ " (function fun",
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ // Illegal 5 byte encoding.
+ {" 'foo\370\277\277\277\277';\n"
+ " (function fun",
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ // Illegal 6 byte encoding.
+ {" 'foo\374\277\277\277\277\277';\n"
+ " (function fun",
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ // Illegal 0xfe byte
+ {" 'foo\376\277\277\277\277\277\277';\n"
+ " (function fun",
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ // Illegal 0xff byte
+ {" 'foo\377\277\277\277\277\277\277\277';\n"
+ " (function fun",
+ "(a,b) { infunction; }", ")();", i::FUNCTION_SCOPE, i::SLOPPY},
+ {" 'foo';\n"
+ " (function fun",
+ "(a,b) { 'bar\355\240\201\355\260\213'; }", ")();", i::FUNCTION_SCOPE,
+ i::SLOPPY},
+ {" 'foo';\n"
+ " (function fun",
+ "(a,b) { 'bar\360\220\220\214'; }", ")();", i::FUNCTION_SCOPE,
+ i::SLOPPY},
+ {NULL, NULL, NULL, i::EVAL_SCOPE, i::SLOPPY}};
i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
@@ -1490,6 +1294,7 @@ enum ParserFlag {
kAllowHarmonyAsyncAwait,
kAllowHarmonyRestrictiveGenerators,
kAllowHarmonyTrailingCommas,
+ kAllowHarmonyClassFields,
};
enum ParserSyncTestResult {
@@ -1514,6 +1319,8 @@ void SetParserFlags(i::ParserBase<Traits>* parser,
flags.Contains(kAllowHarmonyRestrictiveGenerators));
parser->set_allow_harmony_trailing_commas(
flags.Contains(kAllowHarmonyTrailingCommas));
+ parser->set_allow_harmony_class_fields(
+ flags.Contains(kAllowHarmonyClassFields));
}
@@ -1533,14 +1340,15 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
i::CompleteParserRecorder log;
if (test_preparser) {
i::Scanner scanner(isolate->unicode_cache());
- i::GenericStringUtf16CharacterStream stream(source, 0, source->length());
+ std::unique_ptr<i::Utf16CharacterStream> stream(
+ i::ScannerStream::For(source));
i::Zone zone(CcTest::i_isolate()->allocator());
i::AstValueFactory ast_value_factory(
&zone, CcTest::i_isolate()->heap()->HashSeed());
i::PreParser preparser(&zone, &scanner, &ast_value_factory, &log,
stack_limit);
SetParserFlags(&preparser, flags);
- scanner.Initialize(&stream);
+ scanner.Initialize(stream.get());
i::PreParser::PreParseResult result =
preparser.PreParseProgram(&preparser_materialized_literals, is_module);
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
@@ -2896,6 +2704,7 @@ TEST(StrictObjectLiteralChecking) {
TEST(ErrorsObjectLiteralChecking) {
+ // clang-format off
const char* context_data[][2] = {
{"\"use strict\"; var myobject = {", "};"},
{"var myobject = {", "};"},
@@ -2912,14 +2721,60 @@ TEST(ErrorsObjectLiteralChecking) {
// Parsing FunctionLiteral for getter or setter fails
"get foo( +",
"get foo() \"error\"",
+ // Various forbidden forms
+ "static x: 0",
+ "static x(){}",
+ "static async x(){}",
+ "static get x(){}",
+ "static get x : 0",
+ "static x",
+ "static 0",
+ "*x: 0",
+ "*x",
+ "*get x(){}",
+ "*set x(y){}",
+ "get *x(){}",
+ "set *x(y){}",
+ "get x*(){}",
+ "set x*(y){}",
+ "x = 0",
+ "* *x(){}",
+ "x*(){}",
+ // This should fail without --harmony-async-await
+ "async x(){}",
NULL
};
+ // clang-format on
RunParserSyncTest(context_data, statement_data, kError);
+
+ // clang-format off
+ const char* async_data[] = {
+ "static async x(){}",
+ "static async x : 0",
+ "static async get x : 0",
+ "async static x(){}",
+ "*async x(){}",
+ "async *x(){}",
+ "async x*(){}",
+ "async x : 0",
+ "async 0 : 0",
+ "async get x(){}",
+ "async get *x(){}",
+ "async set x(y){}",
+ "async get : 0",
+ NULL
+ };
+ // clang-format on
+
+ static const ParserFlag always_flags[] = {kAllowHarmonyAsyncAwait};
+ RunParserSyncTest(context_data, async_data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
}
TEST(NoErrorsObjectLiteralChecking) {
+ // clang-format off
const char* context_data[][2] = {
{"var myobject = {", "};"},
{"var myobject = {", ",};"},
@@ -2968,6 +2823,19 @@ TEST(NoErrorsObjectLiteralChecking) {
"1: 1, set 2(v) {}",
"get: 1, get foo() {}",
"set: 1, set foo(_) {}",
+ // Potentially confusing cases
+ "get(){}",
+ "set(){}",
+ "static(){}",
+ "async(){}",
+ "*get() {}",
+ "*set() {}",
+ "*static() {}",
+ "*async(){}",
+ "get : 0",
+ "set : 0",
+ "static : 0",
+ "async : 0",
// Keywords, future reserved and strict future reserved are also allowed as
// property names.
"if: 4",
@@ -2977,8 +2845,28 @@ TEST(NoErrorsObjectLiteralChecking) {
"arguments: 8",
NULL
};
+ // clang-format on
RunParserSyncTest(context_data, statement_data, kSuccess);
+
+ // clang-format off
+ const char* async_data[] = {
+ "async x(){}",
+ "async 0(){}",
+ "async get(){}",
+ "async set(){}",
+ "async static(){}",
+ "async async(){}",
+ "async : 0",
+ "async(){}",
+ "*async(){}",
+ NULL
+ };
+ // clang-format on
+
+ static const ParserFlag always_flags[] = {kAllowHarmonyAsyncAwait};
+ RunParserSyncTest(context_data, async_data, kSuccess, NULL, 0, always_flags,
+ arraysize(always_flags));
}
@@ -3317,14 +3205,15 @@ TEST(SerializationOfMaybeAssignmentFlag) {
i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
i::Context* context = f->context();
i::AstValueFactory avf(&zone, isolate->heap()->HashSeed());
- avf.Internalize(isolate);
const i::AstRawString* name = avf.GetOneByteString("result");
+ avf.Internalize(isolate);
i::Handle<i::String> str = name->string();
CHECK(str->IsInternalizedString());
- i::DeclarationScope* script_scope = new (&zone) i::DeclarationScope(&zone);
+ i::DeclarationScope* script_scope =
+ new (&zone) i::DeclarationScope(&zone, &avf);
i::Scope* s = i::Scope::DeserializeScopeChain(
- isolate, &zone, context, script_scope, &avf,
- i::Scope::DeserializationMode::kKeepScopeInfo);
+ isolate, &zone, context->scope_info(), script_scope, &avf,
+ i::Scope::DeserializationMode::kIncludingVariables);
CHECK(s != script_scope);
CHECK(name != NULL);
@@ -3366,14 +3255,15 @@ TEST(IfArgumentsArrayAccessedThenParametersMaybeAssigned) {
i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
i::Context* context = f->context();
i::AstValueFactory avf(&zone, isolate->heap()->HashSeed());
+ const i::AstRawString* name_x = avf.GetOneByteString("x");
avf.Internalize(isolate);
- i::DeclarationScope* script_scope = new (&zone) i::DeclarationScope(&zone);
+ i::DeclarationScope* script_scope =
+ new (&zone) i::DeclarationScope(&zone, &avf);
i::Scope* s = i::Scope::DeserializeScopeChain(
- isolate, &zone, context, script_scope, &avf,
- i::Scope::DeserializationMode::kKeepScopeInfo);
+ isolate, &zone, context->scope_info(), script_scope, &avf,
+ i::Scope::DeserializationMode::kIncludingVariables);
CHECK(s != script_scope);
- const i::AstRawString* name_x = avf.GetOneByteString("x");
// Get result from f's function context (that is g's outer context)
i::Variable* var_x = s->Lookup(name_x);
@@ -3470,7 +3360,7 @@ TEST(InnerAssignment) {
{ "(function(x) { eval(''); })", true, false },
};
- // Used to trigger lazy compilation of function
+ // Used to trigger lazy parsing of the outer function.
int comment_len = 2048;
i::ScopedVector<char> comment(comment_len + 1);
i::SNPrintF(comment, "/*%0*d*/", comment_len - 4, 0);
@@ -3481,46 +3371,48 @@ TEST(InnerAssignment) {
const char* outer = outers[i].source;
int outer_len = Utf8LengthHelper(outer);
for (unsigned j = 0; j < arraysize(inners); ++j) {
- for (unsigned outer_lazy = 0; outer_lazy < 2; ++outer_lazy) {
- for (unsigned inner_lazy = 0; inner_lazy < 2; ++inner_lazy) {
- if (outers[i].strict && inners[j].with) continue;
- const char* inner = inners[j].source;
- int inner_len = Utf8LengthHelper(inner);
-
- int outer_comment_len = outer_lazy ? comment_len : 0;
- int inner_comment_len = inner_lazy ? comment_len : 0;
- const char* outer_comment = outer_lazy ? comment.start() : "";
- const char* inner_comment = inner_lazy ? comment.start() : "";
- int len = prefix_len + outer_comment_len + outer_len + midfix_len +
- inner_comment_len + inner_len + suffix_len;
- i::ScopedVector<char> program(len + 1);
-
- i::SNPrintF(program, "%s%s%s%s%s%s%s", prefix, outer_comment, outer,
- midfix, inner_comment, inner, suffix);
- i::Handle<i::String> source =
- factory->InternalizeUtf8String(program.start());
- source->PrintOn(stdout);
- printf("\n");
-
- i::Handle<i::Script> script = factory->NewScript(source);
- i::Zone zone(CcTest::i_isolate()->allocator());
- i::ParseInfo info(&zone, script);
- i::Parser parser(&info);
- CHECK(parser.Parse(&info));
- CHECK(i::Compiler::Analyze(&info));
- CHECK(info.literal() != NULL);
-
- i::Scope* scope = info.literal()->scope();
- i::Scope* inner_scope = scope->inner_scope();
- DCHECK_NOT_NULL(inner_scope);
- DCHECK_NULL(inner_scope->sibling());
- const i::AstRawString* var_name =
- info.ast_value_factory()->GetOneByteString("x");
- i::Variable* var = inner_scope->Lookup(var_name);
- bool expected = outers[i].assigned || inners[j].assigned;
- CHECK(var != NULL);
- CHECK(var->is_used() || !expected);
- CHECK((var->maybe_assigned() == i::kMaybeAssigned) == expected);
+ for (unsigned lazy = 0; lazy < 2; ++lazy) {
+ if (outers[i].strict && inners[j].with) continue;
+ const char* inner = inners[j].source;
+ int inner_len = Utf8LengthHelper(inner);
+
+ const char* comment_chars = lazy ? comment.start() : "";
+ int len = prefix_len + (lazy ? comment_len : 0) + outer_len +
+ midfix_len + inner_len + suffix_len;
+ i::ScopedVector<char> program(len + 1);
+
+ i::SNPrintF(program, "%s%s%s%s%s%s", comment_chars, prefix, outer,
+ midfix, inner, suffix);
+ i::Handle<i::String> source =
+ factory->InternalizeUtf8String(program.start());
+ source->PrintOn(stdout);
+ printf("\n");
+
+ i::Handle<i::Script> script = factory->NewScript(source);
+ i::Zone zone(CcTest::i_isolate()->allocator());
+ i::ParseInfo info(&zone, script);
+ i::Parser parser(&info);
+ CHECK(parser.Parse(&info));
+ CHECK(i::Compiler::Analyze(&info));
+ CHECK(info.literal() != NULL);
+
+ i::Scope* scope = info.literal()->scope();
+ i::Scope* inner_scope = scope->inner_scope();
+ DCHECK_NOT_NULL(inner_scope);
+ DCHECK_NULL(inner_scope->sibling());
+ const i::AstRawString* var_name =
+ info.ast_value_factory()->GetOneByteString("x");
+ i::Variable* var = inner_scope->Lookup(var_name);
+ bool expected = outers[i].assigned || inners[j].assigned;
+ CHECK(var != NULL);
+ CHECK(var->is_used() || !expected);
+ bool is_maybe_assigned = var->maybe_assigned() == i::kMaybeAssigned;
+ if (i::FLAG_lazy_inner_functions) {
+ // If we parse inner functions lazily, allow being pessimistic about
+ // maybe_assigned.
+ CHECK(is_maybe_assigned || (is_maybe_assigned == expected));
+ } else {
+ CHECK(is_maybe_assigned == expected);
}
}
}
@@ -4392,6 +4284,10 @@ TEST(ClassBodyNoErrors) {
"*get() {}",
"*set() {}",
"static *g() {}",
+ "async(){}",
+ "*async(){}",
+ "static async(){}",
+ "static *async(){}",
// Escaped 'static' should be allowed anywhere
// static-as-PropertyName is.
@@ -4407,6 +4303,27 @@ TEST(ClassBodyNoErrors) {
// clang-format on
RunParserSyncTest(context_data, class_body_data, kSuccess);
+
+ // clang-format off
+ const char* async_data[] = {
+ "static async x(){}",
+ "static async(){}",
+ "static *async(){}",
+ "async x(){}",
+ "async 0(){}",
+ "async get(){}",
+ "async set(){}",
+ "async static(){}",
+ "async async(){}",
+ "async(){}",
+ "*async(){}",
+ NULL
+ };
+ // clang-format on
+
+ static const ParserFlag always_flags[] = {kAllowHarmonyAsyncAwait};
+ RunParserSyncTest(context_data, async_data, kSuccess, NULL, 0, always_flags,
+ arraysize(always_flags));
}
@@ -4462,6 +4379,160 @@ TEST(ClassPropertyNameNoErrors) {
RunParserSyncTest(context_data, name_data, kSuccess);
}
+TEST(ClassFieldsNoErrors) {
+ // clang-format off
+ // Tests proposed class fields syntax.
+ const char* context_data[][2] = {{"(class {", "});"},
+ {"(class extends Base {", "});"},
+ {"class C {", "}"},
+ {"class C extends Base {", "}"},
+ {NULL, NULL}};
+ const char* class_body_data[] = {
+ // Basic syntax
+ "a = 0;",
+ "a = 0; b",
+ "a = 0; b(){}",
+ "a = 0; *b(){}",
+ "a = 0; ['b'](){}",
+ "a;",
+ "a; b;",
+ "a; b(){}",
+ "a; *b(){}",
+ "a; ['b'](){}",
+ "['a'] = 0;",
+ "['a'] = 0; b",
+ "['a'] = 0; b(){}",
+ "['a'] = 0; *b(){}",
+ "['a'] = 0; ['b'](){}",
+ "['a'];",
+ "['a']; b;",
+ "['a']; b(){}",
+ "['a']; *b(){}",
+ "['a']; ['b'](){}",
+
+ "0 = 0;",
+ "0;",
+ "'a' = 0;",
+ "'a';",
+
+ "static a = 0;",
+ "static a;",
+ "static ['a'] = 0",
+ "static ['a']",
+ "static 0 = 0;",
+ "static 0;",
+ "static 'a' = 0;",
+ "static 'a';",
+
+ // ASI
+ "a = 0\n",
+ "a = 0\n b",
+ "a = 0\n b(){}",
+ "a\n",
+ "a\n b\n",
+ "a\n b(){}",
+ "a\n *b(){}",
+ "a\n ['b'](){}",
+ "['a'] = 0\n",
+ "['a'] = 0\n b",
+ "['a'] = 0\n b(){}",
+ "['a']\n",
+ "['a']\n b\n",
+ "['a']\n b(){}",
+ "['a']\n *b(){}",
+ "['a']\n ['b'](){}",
+
+ // ASI edge cases
+ "a\n get",
+ "get\n *a(){}",
+ "a\n static",
+
+ // Misc edge cases
+ "yield",
+ "yield = 0",
+ "yield\n a",
+ NULL
+ };
+ // clang-format on
+
+ static const ParserFlag without_async[] = {kAllowHarmonyClassFields};
+ RunParserSyncTest(context_data, class_body_data, kSuccess, NULL, 0,
+ without_async, arraysize(without_async));
+
+ // clang-format off
+ const char* async_data[] = {
+ "async;",
+ "async = 0;",
+ "static async;"
+ "async",
+ "async = 0",
+ "static async",
+ "async\n a(){}", // a field named async, and a method named a.
+ "async\n a",
+ "await;",
+ "await = 0;",
+ "await\n a",
+ NULL
+ };
+ // clang-format on
+
+ static const ParserFlag with_async[] = {kAllowHarmonyClassFields,
+ kAllowHarmonyAsyncAwait};
+ RunParserSyncTest(context_data, async_data, kSuccess, NULL, 0, with_async,
+ arraysize(with_async));
+}
+
+TEST(ClassFieldsErrors) {
+ // clang-format off
+ // Tests proposed class fields syntax.
+ const char* context_data[][2] = {{"(class {", "});"},
+ {"(class extends Base {", "});"},
+ {"class C {", "}"},
+ {"class C extends Base {", "}"},
+ {NULL, NULL}};
+ const char* class_body_data[] = {
+ "a : 0",
+ "a =",
+ "*a = 0",
+ "*a",
+ "get a",
+ "yield a",
+ "a : 0;",
+ "a =;",
+ "*a = 0;",
+ "*a;",
+ "get a;",
+ "yield a;",
+
+ // ASI requires a linebreak
+ "a b",
+ "a = 0 b",
+
+ // ASI requires that the next token is not part of any legal production
+ "a = 0\n *b(){}",
+ "a = 0\n ['b'](){}",
+ "get\n a",
+ NULL
+ };
+ // clang-format on
+
+ static const ParserFlag without_async[] = {kAllowHarmonyClassFields};
+ RunParserSyncTest(context_data, class_body_data, kError, NULL, 0,
+ without_async, arraysize(without_async));
+
+ // clang-format off
+ const char* async_data[] = {
+ "async a = 0",
+ "async a",
+ NULL
+ };
+ // clang-format on
+
+ static const ParserFlag with_async[] = {kAllowHarmonyClassFields,
+ kAllowHarmonyAsyncAwait};
+ RunParserSyncTest(context_data, async_data, kError, NULL, 0, with_async,
+ arraysize(with_async));
+}
TEST(ClassExpressionErrors) {
const char* context_data[][2] = {{"(", ");"},
@@ -4520,6 +4591,43 @@ TEST(ClassDeclarationErrors) {
RunParserSyncTest(context_data, class_data, kError);
}
+TEST(ClassAsyncErrors) {
+ // clang-format off
+ const char* context_data[][2] = {{"(class {", "});"},
+ {"(class extends Base {", "});"},
+ {"class C {", "}"},
+ {"class C extends Base {", "}"},
+ {NULL, NULL}};
+ const char* async_data[] = {
+ "*async x(){}",
+ "async *(){}",
+ "async *x(){}",
+ "async get x(){}",
+ "async set x(y){}",
+ "async x : 0",
+ "async : 0",
+
+ "async static x(){}",
+
+ "static *async x(){}",
+ "static async *(){}",
+ "static async *x(){}",
+ "static async get x(){}",
+ "static async set x(y){}",
+ "static async x : 0",
+ "static async : 0",
+ NULL
+ };
+ // clang-format on
+
+ // All of these are illegal whether or not async functions are permitted,
+ // although for different reasons.
+ RunParserSyncTest(context_data, async_data, kError);
+
+ static const ParserFlag always_flags[] = {kAllowHarmonyAsyncAwait};
+ RunParserSyncTest(context_data, async_data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
+}
TEST(ClassNameErrors) {
const char* context_data[][2] = {{"class ", "{}"},
@@ -5845,30 +5953,26 @@ TEST(EnumReserved) {
RunModuleParserSyncTest(context_data, kErrorSources, kError);
}
-static void CheckModuleEntry(const i::ModuleDescriptor::ModuleEntry* entry,
- const char* export_name, const char* local_name, const char* import_name,
- const char* module_request) {
+static void CheckEntry(const i::ModuleDescriptor::Entry* entry,
+ const char* export_name, const char* local_name,
+ const char* import_name, int module_request) {
CHECK_NOT_NULL(entry);
if (export_name == nullptr) {
CHECK_NULL(entry->export_name);
} else {
- entry->export_name->IsOneByteEqualTo(export_name);
+ CHECK(entry->export_name->IsOneByteEqualTo(export_name));
}
if (local_name == nullptr) {
CHECK_NULL(entry->local_name);
} else {
- entry->local_name->IsOneByteEqualTo(local_name);
+ CHECK(entry->local_name->IsOneByteEqualTo(local_name));
}
if (import_name == nullptr) {
CHECK_NULL(entry->import_name);
} else {
- entry->import_name->IsOneByteEqualTo(import_name);
- }
- if (module_request == nullptr) {
- CHECK_NULL(entry->module_request);
- } else {
- entry->module_request->IsOneByteEqualTo(module_request);
+ CHECK(entry->import_name->IsOneByteEqualTo(import_name));
}
+ CHECK_EQ(entry->module_request, module_request);
}
TEST(ModuleParsingInternals) {
@@ -5914,6 +6018,7 @@ TEST(ModuleParsingInternals) {
CHECK(outer_scope->is_script_scope());
CHECK_NULL(outer_scope->outer_scope());
CHECK(module_scope->is_module_scope());
+ const i::ModuleDescriptor::Entry* entry;
i::ZoneList<i::Declaration*>* declarations = module_scope->declarations();
CHECK_EQ(13, declarations->length());
@@ -5999,52 +6104,86 @@ TEST(ModuleParsingInternals) {
i::ModuleDescriptor* descriptor = module_scope->module();
CHECK_NOT_NULL(descriptor);
- CHECK_EQ(11, descriptor->exports().length());
- CheckModuleEntry(
- descriptor->exports().at(0), "y", "x", nullptr, nullptr);
- CheckModuleEntry(
- descriptor->exports().at(1), "b", nullptr, "a", "m.js");
- CheckModuleEntry(
- descriptor->exports().at(2), nullptr, nullptr, nullptr, "p.js");
- CheckModuleEntry(
- descriptor->exports().at(3), "foo", "foo", nullptr, nullptr);
- CheckModuleEntry(
- descriptor->exports().at(4), "goo", "goo", nullptr, nullptr);
- CheckModuleEntry(
- descriptor->exports().at(5), "hoo", "hoo", nullptr, nullptr);
- CheckModuleEntry(
- descriptor->exports().at(6), "joo", "joo", nullptr, nullptr);
- CheckModuleEntry(
- descriptor->exports().at(7), "default", "*default*", nullptr, nullptr);
- CheckModuleEntry(
- descriptor->exports().at(8), "bb", nullptr, "aa", "m.js"); // !!!
- CheckModuleEntry(
- descriptor->exports().at(9), "x", "x", nullptr, nullptr);
- CheckModuleEntry(
- descriptor->exports().at(10), "foob", "foob", nullptr, nullptr);
-
- CHECK_EQ(3, descriptor->special_imports().length());
- CheckModuleEntry(
- descriptor->special_imports().at(0), nullptr, nullptr, nullptr, "q.js");
- CheckModuleEntry(
- descriptor->special_imports().at(1), nullptr, "loo", nullptr, "bar.js");
- CheckModuleEntry(
- descriptor->special_imports().at(2), nullptr, "foob", nullptr, "bar.js");
+ CHECK_EQ(5, descriptor->module_requests().size());
+ for (const auto& elem : descriptor->module_requests()) {
+ if (elem.first->IsOneByteEqualTo("m.js"))
+ CHECK_EQ(elem.second, 0);
+ else if (elem.first->IsOneByteEqualTo("n.js"))
+ CHECK_EQ(elem.second, 1);
+ else if (elem.first->IsOneByteEqualTo("p.js"))
+ CHECK_EQ(elem.second, 2);
+ else if (elem.first->IsOneByteEqualTo("q.js"))
+ CHECK_EQ(elem.second, 3);
+ else if (elem.first->IsOneByteEqualTo("bar.js"))
+ CHECK_EQ(elem.second, 4);
+ else
+ CHECK(false);
+ }
+
+ CHECK_EQ(3, descriptor->special_exports().length());
+ CheckEntry(descriptor->special_exports().at(0), "b", nullptr, "a", 0);
+ CheckEntry(descriptor->special_exports().at(1), nullptr, nullptr, nullptr, 2);
+ CheckEntry(descriptor->special_exports().at(2), "bb", nullptr, "aa",
+ 0); // !!!
+
+ CHECK_EQ(8, descriptor->regular_exports().size());
+ entry = descriptor->regular_exports()
+ .find(declarations->at(3)->proxy()->raw_name())
+ ->second;
+ CheckEntry(entry, "foo", "foo", nullptr, -1);
+ entry = descriptor->regular_exports()
+ .find(declarations->at(4)->proxy()->raw_name())
+ ->second;
+ CheckEntry(entry, "goo", "goo", nullptr, -1);
+ entry = descriptor->regular_exports()
+ .find(declarations->at(5)->proxy()->raw_name())
+ ->second;
+ CheckEntry(entry, "hoo", "hoo", nullptr, -1);
+ entry = descriptor->regular_exports()
+ .find(declarations->at(6)->proxy()->raw_name())
+ ->second;
+ CheckEntry(entry, "joo", "joo", nullptr, -1);
+ entry = descriptor->regular_exports()
+ .find(declarations->at(7)->proxy()->raw_name())
+ ->second;
+ CheckEntry(entry, "default", "*default*", nullptr, -1);
+ entry = descriptor->regular_exports()
+ .find(declarations->at(12)->proxy()->raw_name())
+ ->second;
+ CheckEntry(entry, "foob", "foob", nullptr, -1);
+ // TODO(neis): The next lines are terrible. Find a better way.
+ auto name_x = declarations->at(0)->proxy()->raw_name();
+ CHECK_EQ(2, descriptor->regular_exports().count(name_x));
+ auto it = descriptor->regular_exports().equal_range(name_x).first;
+ entry = it->second;
+ if (entry->export_name->IsOneByteEqualTo("y")) {
+ CheckEntry(entry, "y", "x", nullptr, -1);
+ entry = (++it)->second;
+ CheckEntry(entry, "x", "x", nullptr, -1);
+ } else {
+ CheckEntry(entry, "x", "x", nullptr, -1);
+ entry = (++it)->second;
+ CheckEntry(entry, "y", "x", nullptr, -1);
+ }
+
+ CHECK_EQ(2, descriptor->namespace_imports().length());
+ CheckEntry(descriptor->namespace_imports().at(0), nullptr, "loo", nullptr, 4);
+ CheckEntry(descriptor->namespace_imports().at(1), nullptr, "foob", nullptr,
+ 4);
CHECK_EQ(4, descriptor->regular_imports().size());
- const i::ModuleDescriptor::ModuleEntry* entry;
entry = descriptor->regular_imports().find(
declarations->at(1)->proxy()->raw_name())->second;
- CheckModuleEntry(entry, nullptr, "z", "q", "m.js");
+ CheckEntry(entry, nullptr, "z", "q", 0);
entry = descriptor->regular_imports().find(
declarations->at(2)->proxy()->raw_name())->second;
- CheckModuleEntry(entry, nullptr, "n", "default", "n.js");
+ CheckEntry(entry, nullptr, "n", "default", 1);
entry = descriptor->regular_imports().find(
declarations->at(9)->proxy()->raw_name())->second;
- CheckModuleEntry(entry, nullptr, "mm", "m", "m.js");
+ CheckEntry(entry, nullptr, "mm", "m", 0);
entry = descriptor->regular_imports().find(
declarations->at(10)->proxy()->raw_name())->second;
- CheckModuleEntry(entry, nullptr, "aa", "aa", "m.js");
+ CheckEntry(entry, nullptr, "aa", "aa", 0);
}
@@ -6277,10 +6416,9 @@ TEST(DestructuringPositiveTests) {
RunParserSyncTest(context_data, data, kSuccess);
// v8:5201
- // TODO(lpy): The two test sets below should be merged once
- // we fix https://bugs.chromium.org/p/v8/issues/detail?id=4577
{
- const char* sloppy_context_data1[][2] = {
+ // clang-format off
+ const char* sloppy_context_data[][2] = {
{"var ", " = {};"},
{"function f(", ") {}"},
{"function f(argument1, ", ") {}"},
@@ -6289,26 +6427,17 @@ TEST(DestructuringPositiveTests) {
{"try {} catch(", ") {}"},
{NULL, NULL}
};
- const char* data1[] = {
+ const char* data[] = {
+ "{arguments}",
"{eval}",
+ "{x: arguments}",
"{x: eval}",
+ "{arguments = false}",
"{eval = false}",
NULL
};
- RunParserSyncTest(sloppy_context_data1, data1, kSuccess);
-
- const char* sloppy_context_data2[][2] = {
- {"var ", " = {};"},
- {"try {} catch(", ") {}"},
- {NULL, NULL}
- };
- const char* data2[] = {
- "{arguments}",
- "{x: arguments}",
- "{arguments = false}",
- NULL,
- };
- RunParserSyncTest(sloppy_context_data2, data2, kSuccess);
+ // clang-format on
+ RunParserSyncTest(sloppy_context_data, data, kSuccess);
}
}
@@ -7685,6 +7814,9 @@ TEST(AsyncAwait) {
"var asyncFn = async({ foo = 1 }) => foo;",
"var asyncFn = async({ foo = 1 } = {}) => foo;",
+
+ "function* g() { var f = async(yield); }",
+ "function* g() { var f = async(x = yield); }",
NULL
};
// clang-format on
@@ -7790,7 +7922,10 @@ TEST(AsyncAwaitErrors) {
"var f = async(x = await 1) => x;",
"var O = { async method(x = await 1) { return x; } };",
- "var f = async(x = await) => 1;",
+ "function* g() { var f = async yield => 1; }",
+ "function* g() { var f = async(yield) => 1; }",
+ "function* g() { var f = async(x = yield) => 1; }",
+ "function* g() { var f = async({x = yield}) => 1; }",
"class C { async constructor() {} }",
"class C {}; class C2 extends C { async constructor() {} }",
@@ -7863,6 +7998,7 @@ TEST(AsyncAwaitErrors) {
"var f = async(await) => 1;",
"var f = async(await = 1) => 1;",
"var f = async(...[await]) => 1;",
+ "var f = async(x = await) => 1;",
// v8:5190
"var f = async(1) => 1",
@@ -7871,6 +8007,12 @@ TEST(AsyncAwaitErrors) {
"var f = async({ foo = async(1) => 1 }) => 1",
"var f = async({ foo = async(a) => 1 })",
+ "var f = async(x = async(await)) => 1;",
+ "var f = async(x = { [await]: 1 }) => 1;",
+ "var f = async(x = class extends (await) { }) => 1;",
+ "var f = async(x = class { static [await]() {} }) => 1;",
+ "var f = async({ x = await }) => 1;",
+
NULL
};
// clang-format on
@@ -8150,3 +8292,38 @@ TEST(TrailingCommasInParametersErrors) {
RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
arraysize(always_flags));
}
+
+TEST(ArgumentsRedeclaration) {
+ {
+ // clang-format off
+ const char* context_data[][2] = {
+ { "function f(", ") {}" },
+ { NULL, NULL }
+ };
+ const char* success_data[] = {
+ "{arguments}",
+ "{arguments = false}",
+ "arg1, arguments",
+ "arg1, ...arguments",
+ NULL
+ };
+ // clang-format on
+ RunParserSyncTest(context_data, success_data, kSuccess);
+ }
+
+ {
+ // clang-format off
+ const char* context_data[][2] = {
+ { "function f() {", "}" },
+ { NULL, NULL }
+ };
+ const char* data[] = {
+ "const arguments = 1",
+ "let arguments",
+ "var arguments",
+ NULL
+ };
+ // clang-format on
+ RunParserSyncTest(context_data, data, kSuccess);
+ }
+}
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index 2a133bf1f8..272dec39b4 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -27,11 +27,11 @@
//
// Tests of profiles generator and utilities.
-#include "src/v8.h"
-
#include "include/v8-profiler.h"
+#include "src/api.h"
#include "src/profiler/cpu-profiler.h"
#include "src/profiler/profile-generator-inl.h"
+#include "src/v8.h"
#include "test/cctest/cctest.h"
#include "test/cctest/profiler-extension.h"
diff --git a/deps/v8/test/cctest/test-random-number-generator.cc b/deps/v8/test/cctest/test-random-number-generator.cc
index 8a855fe5d5..0615f15ab9 100644
--- a/deps/v8/test/cctest/test-random-number-generator.cc
+++ b/deps/v8/test/cctest/test-random-number-generator.cc
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "src/flags.h"
+#include "src/isolate.h"
#include "src/v8.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-representation.cc b/deps/v8/test/cctest/test-representation.cc
index fc1f531331..496547f0d7 100644
--- a/deps/v8/test/cctest/test-representation.cc
+++ b/deps/v8/test/cctest/test-representation.cc
@@ -28,7 +28,6 @@
#include "test/cctest/cctest.h"
#include "src/property-details.h"
-#include "src/types.h"
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/test-sampler-api.cc b/deps/v8/test/cctest/test-sampler-api.cc
index e2c1c25638..9d17e8bed1 100644
--- a/deps/v8/test/cctest/test-sampler-api.cc
+++ b/deps/v8/test/cctest/test-sampler-api.cc
@@ -7,6 +7,7 @@
#include <map>
#include <string>
#include "include/v8.h"
+#include "src/flags.h"
#include "src/simulator.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index b2bf51b1c6..37992fa7e7 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -38,7 +38,6 @@
#include "src/heap/spaces.h"
#include "src/macro-assembler.h"
#include "src/objects.h"
-#include "src/parsing/parser.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/code-serializer.h"
#include "src/snapshot/deserializer.h"
@@ -91,7 +90,8 @@ static Vector<const byte> Serialize(v8::Isolate* isolate) {
}
Isolate* internal_isolate = reinterpret_cast<Isolate*>(isolate);
- internal_isolate->heap()->CollectAllAvailableGarbage("serialize");
+ internal_isolate->heap()->CollectAllAvailableGarbage(
+ i::GarbageCollectionReason::kTesting);
StartupSerializer ser(internal_isolate,
v8::SnapshotCreator::FunctionCodeHandling::kClear);
ser.SerializeStrongReferences();
@@ -264,8 +264,10 @@ static void PartiallySerializeObject(Vector<const byte>* startup_blob_out,
isolate->bootstrapper()->SourceLookup<Natives>(i);
}
}
- heap->CollectAllGarbage();
- heap->CollectAllGarbage();
+ heap->CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask,
+ i::GarbageCollectionReason::kTesting);
+ heap->CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask,
+ i::GarbageCollectionReason::kTesting);
Object* raw_foo;
{
@@ -367,7 +369,8 @@ static void PartiallySerializeContext(Vector<const byte>* startup_blob_out,
}
// If we don't do this then we end up with a stray root pointing at the
// context even after we have disposed of env.
- heap->CollectAllGarbage();
+ heap->CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask,
+ i::GarbageCollectionReason::kTesting);
{
v8::HandleScope handle_scope(v8_isolate);
@@ -485,7 +488,8 @@ static void PartiallySerializeCustomContext(
}
// If we don't do this then we end up with a stray root pointing at the
// context even after we have disposed of env.
- isolate->heap()->CollectAllAvailableGarbage("snapshotting");
+ isolate->heap()->CollectAllAvailableGarbage(
+ i::GarbageCollectionReason::kTesting);
{
v8::HandleScope handle_scope(v8_isolate);
@@ -811,7 +815,7 @@ TEST(SnapshotDataBlobWithWarmup) {
// Running the warmup script has effect on whether functions are
// pre-compiled, but does not pollute the context.
CHECK(IsCompiled("Math.abs"));
- CHECK(!IsCompiled("Number.isFinite"));
+ CHECK(!IsCompiled("Number.parseInt"));
CHECK(CompileRun("Math.random")->IsFunction());
}
isolate->Dispose();
@@ -821,8 +825,8 @@ TEST(CustomSnapshotDataBlobWithWarmup) {
DisableTurbofan();
const char* source =
"function f() { return Math.abs(1); }\n"
- "function g() { return Number.isFinite(1); }\n"
- "Number.isNaN(1);"
+ "function g() { return Number.parseInt(1); }\n"
+ "Number.parseFloat(1);"
"var a = 5";
const char* warmup = "a = f()";
@@ -846,8 +850,8 @@ TEST(CustomSnapshotDataBlobWithWarmup) {
CHECK(IsCompiled("f"));
CHECK(IsCompiled("Math.abs"));
CHECK(!IsCompiled("g"));
- CHECK(!IsCompiled("Number.isFinite"));
- CHECK(!IsCompiled("Number.isNaN"));
+ CHECK(!IsCompiled("Number.parseInt"));
+ CHECK(!IsCompiled("Number.parseFloat"));
CHECK_EQ(5, CompileRun("a")->Int32Value(context).FromJust());
}
isolate->Dispose();
@@ -1178,13 +1182,13 @@ TEST(CodeSerializerThreeBigStrings) {
Vector<const uint8_t> source_b =
ConstructSource(STATIC_CHAR_VECTOR("var b = \""), STATIC_CHAR_VECTOR("b"),
- STATIC_CHAR_VECTOR("\";"), 600000);
+ STATIC_CHAR_VECTOR("\";"), 400000);
Handle<String> source_b_str =
f->NewStringFromOneByte(source_b).ToHandleChecked();
Vector<const uint8_t> source_c =
ConstructSource(STATIC_CHAR_VECTOR("var c = \""), STATIC_CHAR_VECTOR("c"),
- STATIC_CHAR_VECTOR("\";"), 500000);
+ STATIC_CHAR_VECTOR("\";"), 400000);
Handle<String> source_c_str =
f->NewStringFromOneByte(source_c).ToHandleChecked();
@@ -1217,10 +1221,10 @@ TEST(CodeSerializerThreeBigStrings) {
v8::Maybe<int32_t> result =
CompileRun("(a + b).length")
->Int32Value(v8::Isolate::GetCurrent()->GetCurrentContext());
- CHECK_EQ(600000 + 700000, result.FromJust());
+ CHECK_EQ(400000 + 700000, result.FromJust());
result = CompileRun("(b + c).length")
->Int32Value(v8::Isolate::GetCurrent()->GetCurrentContext());
- CHECK_EQ(500000 + 600000, result.FromJust());
+ CHECK_EQ(400000 + 400000, result.FromJust());
Heap* heap = isolate->heap();
v8::Local<v8::String> result_str =
CompileRun("a")
@@ -1895,7 +1899,6 @@ TEST(CodeSerializerEmbeddedObject) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
- Heap* heap = isolate->heap();
v8::HandleScope scope(CcTest::isolate());
size_t actual_size;
@@ -1935,7 +1938,7 @@ TEST(CodeSerializerEmbeddedObject) {
CHECK(rit2.rinfo()->target_object()->IsHeapNumber());
CHECK_EQ(0.3, HeapNumber::cast(rit2.rinfo()->target_object())->value());
- heap->CollectAllAvailableGarbage();
+ CcTest::CollectAllAvailableGarbage();
RelocIterator rit3(copy->code(),
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT));
diff --git a/deps/v8/test/cctest/test-simd.cc b/deps/v8/test/cctest/test-simd.cc
index 1f412affba..9f0195de1e 100644
--- a/deps/v8/test/cctest/test-simd.cc
+++ b/deps/v8/test/cctest/test-simd.cc
@@ -4,8 +4,17 @@
#include "src/v8.h"
+#include "src/factory.h"
+#include "src/isolate.h"
#include "src/objects.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/factory.h -> src/objects-inl.h
+#include "src/objects-inl.h"
#include "src/ostreams.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/type-feedback-vector.h ->
+// src/type-feedback-vector-inl.h
+#include "src/type-feedback-vector-inl.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/test-slots-buffer.cc b/deps/v8/test/cctest/test-slots-buffer.cc
deleted file mode 100644
index 4b8aeb7931..0000000000
--- a/deps/v8/test/cctest/test-slots-buffer.cc
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/heap/slots-buffer.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/heap/heap-utils.h"
-
-namespace v8 {
-namespace internal {
-
-TEST(SlotsBufferObjectSlotsRemoval) {
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
- Factory* factory = isolate->factory();
-
- SlotsBuffer* buffer = new SlotsBuffer(NULL);
- void* fake_object[1];
-
- Handle<FixedArray> array = factory->NewFixedArray(2, TENURED);
- CHECK(heap->old_space()->Contains(*array));
- array->set(0, reinterpret_cast<Object*>(fake_object), SKIP_WRITE_BARRIER);
-
- // Firstly, let's test the regular slots buffer entry.
- buffer->Add(HeapObject::RawField(*array, FixedArray::kHeaderSize));
- CHECK(reinterpret_cast<void*>(buffer->Get(0)) ==
- HeapObject::RawField(*array, FixedArray::kHeaderSize));
- SlotsBuffer::RemoveObjectSlots(CcTest::i_isolate()->heap(), buffer,
- array->address(),
- array->address() + array->Size());
- CHECK(reinterpret_cast<void*>(buffer->Get(0)) ==
- HeapObject::RawField(heap->empty_fixed_array(),
- FixedArrayBase::kLengthOffset));
-
- // Secondly, let's test the typed slots buffer entry.
- SlotsBuffer::AddTo(NULL, &buffer, SlotsBuffer::EMBEDDED_OBJECT_SLOT,
- array->address() + FixedArray::kHeaderSize,
- SlotsBuffer::FAIL_ON_OVERFLOW);
- CHECK(reinterpret_cast<void*>(buffer->Get(1)) ==
- reinterpret_cast<Object**>(SlotsBuffer::EMBEDDED_OBJECT_SLOT));
- CHECK(reinterpret_cast<void*>(buffer->Get(2)) ==
- HeapObject::RawField(*array, FixedArray::kHeaderSize));
- SlotsBuffer::RemoveObjectSlots(CcTest::i_isolate()->heap(), buffer,
- array->address(),
- array->address() + array->Size());
- CHECK(reinterpret_cast<void*>(buffer->Get(1)) ==
- HeapObject::RawField(heap->empty_fixed_array(),
- FixedArrayBase::kLengthOffset));
- CHECK(reinterpret_cast<void*>(buffer->Get(2)) ==
- HeapObject::RawField(heap->empty_fixed_array(),
- FixedArrayBase::kLengthOffset));
- delete buffer;
-}
-
-
-TEST(FilterInvalidSlotsBufferEntries) {
- FLAG_manual_evacuation_candidates_selection = true;
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
- Factory* factory = isolate->factory();
- SlotsBuffer* buffer = new SlotsBuffer(NULL);
-
- // Set up a fake black object that will contain a recorded SMI, a recorded
- // pointer to a new space object, and a recorded pointer to a non-evacuation
- // candidate object. These object should be filtered out. Additionally,
- // we point to an evacuation candidate object which should not be filtered
- // out.
-
- // Create fake object and mark it black.
- Handle<FixedArray> fake_object = factory->NewFixedArray(23, TENURED);
- MarkBit mark_bit = Marking::MarkBitFrom(*fake_object);
- Marking::MarkBlack(mark_bit);
-
- // Write a SMI into field one and record its address;
- Object** field_smi = fake_object->RawFieldOfElementAt(0);
- *field_smi = Smi::FromInt(100);
- buffer->Add(field_smi);
-
- // Write a new space reference into field 2 and record its address;
- Handle<FixedArray> new_space_object = factory->NewFixedArray(23);
- mark_bit = Marking::MarkBitFrom(*new_space_object);
- Marking::MarkBlack(mark_bit);
- Object** field_new_space = fake_object->RawFieldOfElementAt(1);
- *field_new_space = *new_space_object;
- buffer->Add(field_new_space);
-
- // Write an old space reference into field 3 which points to an object not on
- // an evacuation candidate.
- Handle<FixedArray> old_space_object_non_evacuation =
- factory->NewFixedArray(23, TENURED);
- mark_bit = Marking::MarkBitFrom(*old_space_object_non_evacuation);
- Marking::MarkBlack(mark_bit);
- Object** field_old_space_object_non_evacuation =
- fake_object->RawFieldOfElementAt(2);
- *field_old_space_object_non_evacuation = *old_space_object_non_evacuation;
- buffer->Add(field_old_space_object_non_evacuation);
-
- // Write an old space reference into field 4 which points to an object on an
- // evacuation candidate.
- heap::SimulateFullSpace(heap->old_space());
- Handle<FixedArray> valid_object =
- isolate->factory()->NewFixedArray(23, TENURED);
- Page* page = Page::FromAddress(valid_object->address());
- page->SetFlag(MemoryChunk::EVACUATION_CANDIDATE);
- Object** valid_field = fake_object->RawFieldOfElementAt(3);
- *valid_field = *valid_object;
- buffer->Add(valid_field);
-
- SlotsBuffer::RemoveInvalidSlots(heap, buffer);
- Object** kRemovedEntry = HeapObject::RawField(heap->empty_fixed_array(),
- FixedArrayBase::kLengthOffset);
- CHECK_EQ(buffer->Get(0), kRemovedEntry);
- CHECK_EQ(buffer->Get(1), kRemovedEntry);
- CHECK_EQ(buffer->Get(2), kRemovedEntry);
- CHECK_EQ(buffer->Get(3), valid_field);
-
- // Clean-up to make verify heap happy.
- mark_bit = Marking::MarkBitFrom(*fake_object);
- Marking::MarkWhite(mark_bit);
- mark_bit = Marking::MarkBitFrom(*new_space_object);
- Marking::MarkWhite(mark_bit);
- mark_bit = Marking::MarkBitFrom(*old_space_object_non_evacuation);
- Marking::MarkWhite(mark_bit);
-
- delete buffer;
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index afa8ecb7ec..9793ae7f18 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -40,6 +40,7 @@
#include "src/objects.h"
#include "src/unicode-decoder.h"
#include "test/cctest/cctest.h"
+#include "test/cctest/heap/heap-utils.h"
// Adapted from http://en.wikipedia.org/wiki/Multiply-with-carry
class MyRandomNumberGenerator {
@@ -1239,8 +1240,8 @@ TEST(SliceFromSlice) {
UNINITIALIZED_TEST(OneByteArrayJoin) {
v8::Isolate::CreateParams create_params;
// Set heap limits.
- create_params.constraints.set_max_semi_space_size(1 * Page::kPageSize / MB);
- create_params.constraints.set_max_old_space_size(6 * Page::kPageSize / MB);
+ create_params.constraints.set_max_semi_space_size(1);
+ create_params.constraints.set_max_old_space_size(6);
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
isolate->Enter();
@@ -1319,6 +1320,46 @@ TEST(RobustSubStringStub) {
CheckException("%_SubString(slice, 0, 17);");
}
+TEST(RobustSubStringStubExternalStrings) {
+ // Ensure that the specific combination of calling the SubStringStub on an
+ // external string and triggering a GC on string allocation does not crash.
+ // See crbug.com/649967.
+
+ FLAG_allow_natives_syntax = true;
+#ifdef VERIFY_HEAP
+ FLAG_verify_heap = true;
+#endif
+
+ CcTest::InitializeVM();
+ v8::HandleScope handle_scope(CcTest::isolate());
+
+ v8::Local<v8::String> underlying =
+ CompileRun(
+ "var str = 'abcdefghijklmnopqrstuvwxyz';"
+ "str")
+ ->ToString(CcTest::isolate()->GetCurrentContext())
+ .ToLocalChecked();
+ CHECK(v8::Utils::OpenHandle(*underlying)->IsSeqOneByteString());
+
+ const int length = underlying->Length();
+ uc16* two_byte = NewArray<uc16>(length + 1);
+ underlying->Write(two_byte);
+
+ Resource* resource = new Resource(two_byte, length);
+ CHECK(underlying->MakeExternal(resource));
+ CHECK(v8::Utils::OpenHandle(*underlying)->IsExternalTwoByteString());
+
+ v8::Local<v8::Script> script = v8_compile(v8_str("%_SubString(str, 5, 8)"));
+
+ // Trigger a GC on string allocation.
+ i::heap::SimulateFullSpace(CcTest::heap()->new_space());
+
+ v8::Local<v8::Value> result;
+ CHECK(script->Run(v8::Isolate::GetCurrent()->GetCurrentContext())
+ .ToLocal(&result));
+ Handle<String> string = v8::Utils::OpenHandle(v8::String::Cast(*result));
+ CHECK_EQ(0, strcmp("fgh", string->ToCString().get()));
+}
namespace {
diff --git a/deps/v8/test/cctest/test-symbols.cc b/deps/v8/test/cctest/test-symbols.cc
index 1024a27edf..220c52bd65 100644
--- a/deps/v8/test/cctest/test-symbols.cc
+++ b/deps/v8/test/cctest/test-symbols.cc
@@ -30,10 +30,18 @@
// of ConsStrings. These operations may not be very fast, but they
// should be possible without getting errors due to too deep recursion.
-#include "src/v8.h"
-
+#include "src/factory.h"
+#include "src/isolate.h"
#include "src/objects.h"
#include "src/ostreams.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/factory.h -> src/objects-inl.h
+#include "src/objects-inl.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/type-feedback-vector.h ->
+// src/type-feedback-vector-inl.h
+#include "src/type-feedback-vector-inl.h"
+#include "src/v8.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
@@ -63,8 +71,8 @@ TEST(Create) {
#endif
}
- CcTest::heap()->CollectGarbage(i::NEW_SPACE);
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectGarbage(i::NEW_SPACE);
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// All symbols should be distinct.
for (int i = 0; i < kNumSymbols; ++i) {
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index 06e6fb00cd..523704ba69 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "src/api.h"
+#include "src/isolate.h"
#include "src/v8.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-threads.cc b/deps/v8/test/cctest/test-threads.cc
index a9058a523a..afeb212253 100644
--- a/deps/v8/test/cctest/test-threads.cc
+++ b/deps/v8/test/cctest/test-threads.cc
@@ -30,7 +30,7 @@
#include "src/base/platform/platform.h"
#include "src/isolate.h"
-
+#include "src/list-inl.h"
class ThreadIdValidationThread : public v8::base::Thread {
public:
diff --git a/deps/v8/test/cctest/test-trace-event.cc b/deps/v8/test/cctest/test-trace-event.cc
index 190cb40782..88f295f301 100644
--- a/deps/v8/test/cctest/test-trace-event.cc
+++ b/deps/v8/test/cctest/test-trace-event.cc
@@ -71,11 +71,14 @@ class MockTracingPlatform : public v8::Platform {
void PerformDelayedTask() {}
- uint64_t AddTraceEvent(char phase, const uint8_t* category_enabled_flag,
- const char* name, const char* scope, uint64_t id,
- uint64_t bind_id, int num_args, const char** arg_names,
- const uint8_t* arg_types, const uint64_t* arg_values,
- unsigned int flags) override {
+ using Platform::AddTraceEvent;
+ uint64_t AddTraceEvent(
+ char phase, const uint8_t* category_enabled_flag, const char* name,
+ const char* scope, uint64_t id, uint64_t bind_id, int num_args,
+ const char** arg_names, const uint8_t* arg_types,
+ const uint64_t* arg_values,
+ std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
+ unsigned int flags) override {
MockTraceObject* to = new MockTraceObject(phase, std::string(name), id,
bind_id, num_args, flags);
trace_object_list_.Add(to);
diff --git a/deps/v8/test/cctest/test-transitions.cc b/deps/v8/test/cctest/test-transitions.cc
index b7eb50f1c9..2f00900057 100644
--- a/deps/v8/test/cctest/test-transitions.cc
+++ b/deps/v8/test/cctest/test-transitions.cc
@@ -12,6 +12,10 @@
#include "src/factory.h"
#include "src/field-type.h"
#include "src/global-handles.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/field-type.h -> src/objects-inl.h
+#include "src/objects-inl.h"
+#include "src/transitions.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/test-types.cc b/deps/v8/test/cctest/test-types.cc
index 7b7706febf..dd1b3e3703 100644
--- a/deps/v8/test/cctest/test-types.cc
+++ b/deps/v8/test/cctest/test-types.cc
@@ -4,13 +4,25 @@
#include <vector>
+#include "src/compiler/types.h"
#include "src/crankshaft/hydrogen-types.h"
-#include "src/types.h"
+#include "src/factory.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/factory.h -> src/objects-inl.h
+#include "src/objects-inl.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/type-feedback-vector.h ->
+// src/type-feedback-vector-inl.h
+#include "src/type-feedback-vector-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/types-fuzz.h"
using namespace v8::internal;
+using namespace v8::internal::compiler;
+namespace {
// Testing auxiliaries (breaking the Type abstraction).
@@ -29,7 +41,6 @@ typedef uint32_t bitset;
struct Tests {
typedef Types::TypeVector::iterator TypeIterator;
- typedef Types::MapVector::iterator MapIterator;
typedef Types::ValueVector::iterator ValueIterator;
Isolate* isolate;
@@ -52,7 +63,6 @@ struct Tests {
return type1->Equals(type2) &&
this->IsBitset(type1) == this->IsBitset(type2) &&
this->IsUnion(type1) == this->IsUnion(type2) &&
- type1->NumClasses() == type2->NumClasses() &&
type1->NumConstants() == type2->NumConstants() &&
(!this->IsBitset(type1) ||
this->AsBitset(type1) == this->AsBitset(type2)) &&
@@ -103,8 +113,8 @@ struct Tests {
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
Type* t = *it;
CHECK(1 ==
- this->IsBitset(t) + t->IsClass() + t->IsConstant() + t->IsRange() +
- this->IsUnion(t) + t->IsArray() + t->IsFunction() + t->IsContext());
+ this->IsBitset(t) + t->IsConstant() + t->IsRange() +
+ this->IsUnion(t));
}
}
@@ -177,101 +187,6 @@ struct Tests {
}
}
- void PointwiseRepresentation() {
- // Check we can decompose type into semantics and representation and
- // then compose it back to get an equivalent type.
- int counter = 0;
- for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
- counter++;
- printf("Counter: %i\n", counter);
- fflush(stdout);
- Type* type1 = *it1;
- Type* representation = T.Representation(type1);
- Type* semantic = T.Semantic(type1);
- Type* composed = T.Union(representation, semantic);
- CHECK(type1->Equals(composed));
- }
-
- // Pointwiseness of Union.
- for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
- for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- Type* type1 = *it1;
- Type* type2 = *it2;
- Type* representation1 = T.Representation(type1);
- Type* semantic1 = T.Semantic(type1);
- Type* representation2 = T.Representation(type2);
- Type* semantic2 = T.Semantic(type2);
- Type* direct_union = T.Union(type1, type2);
- Type* representation_union = T.Union(representation1, representation2);
- Type* semantic_union = T.Union(semantic1, semantic2);
- Type* composed_union = T.Union(representation_union, semantic_union);
- CHECK(direct_union->Equals(composed_union));
- }
- }
-
- // Pointwiseness of Intersect.
- for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
- for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- Type* type1 = *it1;
- Type* type2 = *it2;
- Type* representation1 = T.Representation(type1);
- Type* semantic1 = T.Semantic(type1);
- Type* representation2 = T.Representation(type2);
- Type* semantic2 = T.Semantic(type2);
- Type* direct_intersection = T.Intersect(type1, type2);
- Type* representation_intersection =
- T.Intersect(representation1, representation2);
- Type* semantic_intersection = T.Intersect(semantic1, semantic2);
- Type* composed_intersection =
- T.Union(representation_intersection, semantic_intersection);
- CHECK(direct_intersection->Equals(composed_intersection));
- }
- }
-
- // Pointwiseness of Is.
- for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
- for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- Type* type1 = *it1;
- Type* type2 = *it2;
- Type* representation1 = T.Representation(type1);
- Type* semantic1 = T.Semantic(type1);
- Type* representation2 = T.Representation(type2);
- Type* semantic2 = T.Semantic(type2);
- bool representation_is = representation1->Is(representation2);
- bool semantic_is = semantic1->Is(semantic2);
- bool direct_is = type1->Is(type2);
- CHECK(direct_is == (semantic_is && representation_is));
- }
- }
- }
-
- void Class() {
- // Constructor
- for (MapIterator mt = T.maps.begin(); mt != T.maps.end(); ++mt) {
- Handle<i::Map> map = *mt;
- Type* type = T.Class(map);
- CHECK(type->IsClass());
- }
-
- // Map attribute
- for (MapIterator mt = T.maps.begin(); mt != T.maps.end(); ++mt) {
- Handle<i::Map> map = *mt;
- Type* type = T.Class(map);
- CHECK(*map == *type->AsClass()->Map());
- }
-
- // Functionality & Injectivity: Class(M1) = Class(M2) iff M1 = M2
- for (MapIterator mt1 = T.maps.begin(); mt1 != T.maps.end(); ++mt1) {
- for (MapIterator mt2 = T.maps.begin(); mt2 != T.maps.end(); ++mt2) {
- Handle<i::Map> map1 = *mt1;
- Handle<i::Map> map2 = *mt2;
- Type* type1 = T.Class(map1);
- Type* type2 = T.Class(map2);
- CHECK(Equal(type1, type2) == (*map1 == *map2));
- }
- }
- }
-
void Constant() {
// Constructor
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
@@ -398,129 +313,6 @@ struct Tests {
}
}
- void Context() {
- // Constructor
- for (int i = 0; i < 20; ++i) {
- Type* type = T.Random();
- Type* context = T.Context(type);
- CHECK(context->IsContext());
- }
-
- // Attributes
- for (int i = 0; i < 20; ++i) {
- Type* type = T.Random();
- Type* context = T.Context(type);
- CheckEqual(type, context->AsContext()->Outer());
- }
-
- // Functionality & Injectivity: Context(T1) = Context(T2) iff T1 = T2
- for (int i = 0; i < 20; ++i) {
- for (int j = 0; j < 20; ++j) {
- Type* type1 = T.Random();
- Type* type2 = T.Random();
- Type* context1 = T.Context(type1);
- Type* context2 = T.Context(type2);
- CHECK(Equal(context1, context2) == Equal(type1, type2));
- }
- }
- }
-
- void Array() {
- // Constructor
- for (int i = 0; i < 20; ++i) {
- Type* type = T.Random();
- Type* array = T.Array1(type);
- CHECK(array->IsArray());
- }
-
- // Attributes
- for (int i = 0; i < 20; ++i) {
- Type* type = T.Random();
- Type* array = T.Array1(type);
- CheckEqual(type, array->AsArray()->Element());
- }
-
- // Functionality & Injectivity: Array(T1) = Array(T2) iff T1 = T2
- for (int i = 0; i < 20; ++i) {
- for (int j = 0; j < 20; ++j) {
- Type* type1 = T.Random();
- Type* type2 = T.Random();
- Type* array1 = T.Array1(type1);
- Type* array2 = T.Array1(type2);
- CHECK(Equal(array1, array2) == Equal(type1, type2));
- }
- }
- }
-
- void Function() {
- // Constructors
- for (int i = 0; i < 20; ++i) {
- for (int j = 0; j < 20; ++j) {
- for (int k = 0; k < 20; ++k) {
- Type* type1 = T.Random();
- Type* type2 = T.Random();
- Type* type3 = T.Random();
- Type* function0 = T.Function0(type1, type2);
- Type* function1 = T.Function1(type1, type2, type3);
- Type* function2 = T.Function2(type1, type2, type3);
- CHECK(function0->IsFunction());
- CHECK(function1->IsFunction());
- CHECK(function2->IsFunction());
- }
- }
- }
-
- // Attributes
- for (int i = 0; i < 20; ++i) {
- for (int j = 0; j < 20; ++j) {
- for (int k = 0; k < 20; ++k) {
- Type* type1 = T.Random();
- Type* type2 = T.Random();
- Type* type3 = T.Random();
- Type* function0 = T.Function0(type1, type2);
- Type* function1 = T.Function1(type1, type2, type3);
- Type* function2 = T.Function2(type1, type2, type3);
- CHECK_EQ(0, function0->AsFunction()->Arity());
- CHECK_EQ(1, function1->AsFunction()->Arity());
- CHECK_EQ(2, function2->AsFunction()->Arity());
- CheckEqual(type1, function0->AsFunction()->Result());
- CheckEqual(type1, function1->AsFunction()->Result());
- CheckEqual(type1, function2->AsFunction()->Result());
- CheckEqual(type2, function0->AsFunction()->Receiver());
- CheckEqual(type2, function1->AsFunction()->Receiver());
- CheckEqual(T.Any, function2->AsFunction()->Receiver());
- CheckEqual(type3, function1->AsFunction()->Parameter(0));
- CheckEqual(type2, function2->AsFunction()->Parameter(0));
- CheckEqual(type3, function2->AsFunction()->Parameter(1));
- }
- }
- }
-
- // Functionality & Injectivity: Function(Ts1) = Function(Ts2) iff Ts1 = Ts2
- for (int i = 0; i < 20; ++i) {
- for (int j = 0; j < 20; ++j) {
- for (int k = 0; k < 20; ++k) {
- Type* type1 = T.Random();
- Type* type2 = T.Random();
- Type* type3 = T.Random();
- Type* function01 = T.Function0(type1, type2);
- Type* function02 = T.Function0(type1, type3);
- Type* function03 = T.Function0(type3, type2);
- Type* function11 = T.Function1(type1, type2, type2);
- Type* function12 = T.Function1(type1, type2, type3);
- Type* function21 = T.Function2(type1, type2, type2);
- Type* function22 = T.Function2(type1, type2, type3);
- Type* function23 = T.Function2(type1, type3, type2);
- CHECK(Equal(function01, function02) == Equal(type2, type3));
- CHECK(Equal(function01, function03) == Equal(type1, type3));
- CHECK(Equal(function11, function12) == Equal(type2, type3));
- CHECK(Equal(function21, function22) == Equal(type2, type3));
- CHECK(Equal(function21, function23) == Equal(type2, type3));
- }
- }
- }
- }
-
void Of() {
// Constant(V)->Is(Of(V))
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
@@ -554,61 +346,6 @@ struct Tests {
}
}
- void NowOf() {
- // Constant(V)->NowIs(NowOf(V))
- for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
- Handle<i::Object> value = *vt;
- Type* const_type = T.Constant(value);
- Type* nowof_type = T.NowOf(value);
- CHECK(const_type->NowIs(nowof_type));
- }
-
- // NowOf(V)->Is(Of(V))
- for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
- Handle<i::Object> value = *vt;
- Type* nowof_type = T.NowOf(value);
- Type* of_type = T.Of(value);
- CHECK(nowof_type->Is(of_type));
- }
-
- // If NowOf(V)->NowIs(T), then Constant(V)->NowIs(T)
- for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
- for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- Handle<i::Object> value = *vt;
- Type* type = *it;
- Type* const_type = T.Constant(value);
- Type* nowof_type = T.NowOf(value);
- CHECK(!nowof_type->NowIs(type) || const_type->NowIs(type));
- }
- }
-
- // If Constant(V)->NowIs(T),
- // then NowOf(V)->NowIs(T) or T->Maybe(Constant(V))
- for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
- for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- Handle<i::Object> value = *vt;
- Type* type = *it;
- Type* const_type = T.Constant(value);
- Type* nowof_type = T.NowOf(value);
- CHECK(!const_type->NowIs(type) ||
- nowof_type->NowIs(type) || type->Maybe(const_type));
- }
- }
-
- // If Constant(V)->Is(T),
- // then NowOf(V)->Is(T) or T->Maybe(Constant(V))
- for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
- for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- Handle<i::Object> value = *vt;
- Type* type = *it;
- Type* const_type = T.Constant(value);
- Type* nowof_type = T.NowOf(value);
- CHECK(!const_type->Is(type) ||
- nowof_type->Is(type) || type->Maybe(const_type));
- }
- }
- }
-
void MinMax() {
// If b is regular numeric bitset, then Range(b->Min(), b->Max())->Is(b).
// TODO(neis): Need to ignore representation for this to be true.
@@ -784,31 +521,16 @@ struct Tests {
Type* type2 = *j;
CHECK(!type1->Is(type2) || this->IsBitset(type2) ||
this->IsUnion(type2) || this->IsUnion(type1) ||
- (type1->IsClass() && type2->IsClass()) ||
(type1->IsConstant() && type2->IsConstant()) ||
(type1->IsConstant() && type2->IsRange()) ||
(this->IsBitset(type1) && type2->IsRange()) ||
(type1->IsRange() && type2->IsRange()) ||
- (type1->IsContext() && type2->IsContext()) ||
- (type1->IsArray() && type2->IsArray()) ||
- (type1->IsFunction() && type2->IsFunction()) ||
!type1->IsInhabited());
}
}
}
void Is2() {
- // Class(M1)->Is(Class(M2)) iff M1 = M2
- for (MapIterator mt1 = T.maps.begin(); mt1 != T.maps.end(); ++mt1) {
- for (MapIterator mt2 = T.maps.begin(); mt2 != T.maps.end(); ++mt2) {
- Handle<i::Map> map1 = *mt1;
- Handle<i::Map> map2 = *mt2;
- Type* class_type1 = T.Class(map1);
- Type* class_type2 = T.Class(map2);
- CHECK(class_type1->Is(class_type2) == (*map1 == *map2));
- }
- }
-
// Range(X1, Y1)->Is(Range(X2, Y2)) iff X1 >= X2 /\ Y1 <= Y2
for (ValueIterator i1 = T.integers.begin();
i1 != T.integers.end(); ++i1) {
@@ -843,43 +565,6 @@ struct Tests {
}
}
- // Context(T1)->Is(Context(T2)) iff T1 = T2
- for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
- for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- Type* outer1 = *it1;
- Type* outer2 = *it2;
- Type* type1 = T.Context(outer1);
- Type* type2 = T.Context(outer2);
- CHECK(type1->Is(type2) == outer1->Equals(outer2));
- }
- }
-
- // Array(T1)->Is(Array(T2)) iff T1 = T2
- for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
- for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- Type* element1 = *it1;
- Type* element2 = *it2;
- Type* type1 = T.Array1(element1);
- Type* type2 = T.Array1(element2);
- CHECK(type1->Is(type2) == element1->Equals(element2));
- }
- }
-
- // Function0(S1, T1)->Is(Function0(S2, T2)) iff S1 = S2 and T1 = T2
- for (TypeIterator i = T.types.begin(); i != T.types.end(); ++i) {
- for (TypeIterator j = T.types.begin(); j != T.types.end(); ++j) {
- Type* result1 = *i;
- Type* receiver1 = *j;
- Type* type1 = T.Function0(result1, receiver1);
- Type* result2 = T.Random();
- Type* receiver2 = T.Random();
- Type* type2 = T.Function0(result2, receiver2);
- CHECK(type1->Is(type2) ==
- (result1->Equals(result2) && receiver1->Equals(receiver2)));
- }
- }
-
-
// Range-specific subtyping
// If IsInteger(v) then Constant(v)->Is(Range(v, v)).
@@ -949,13 +634,6 @@ struct Tests {
// Subtyping between concrete structural types
- CheckSub(T.ObjectClass, T.Object);
- CheckSub(T.ArrayClass, T.OtherObject);
- CheckSub(T.UninitializedClass, T.Internal);
- CheckUnordered(T.ObjectClass, T.ArrayClass);
- CheckUnordered(T.UninitializedClass, T.Null);
- CheckUnordered(T.UninitializedClass, T.Undefined);
-
CheckSub(T.SmiConstant, T.SignedSmall);
CheckSub(T.SmiConstant, T.Signed32);
CheckSub(T.SmiConstant, T.Number);
@@ -969,175 +647,6 @@ struct Tests {
CheckUnordered(T.ObjectConstant1, T.ArrayConstant);
CheckUnordered(T.UninitializedConstant, T.Null);
CheckUnordered(T.UninitializedConstant, T.Undefined);
-
- CheckUnordered(T.ObjectConstant1, T.ObjectClass);
- CheckUnordered(T.ObjectConstant2, T.ObjectClass);
- CheckUnordered(T.ObjectConstant1, T.ArrayClass);
- CheckUnordered(T.ObjectConstant2, T.ArrayClass);
- CheckUnordered(T.ArrayConstant, T.ObjectClass);
-
- CheckSub(T.NumberArray, T.OtherObject);
- CheckSub(T.NumberArray, T.Receiver);
- CheckSub(T.NumberArray, T.Object);
- CheckUnordered(T.StringArray, T.AnyArray);
-
- CheckSub(T.MethodFunction, T.Object);
- CheckSub(T.NumberFunction1, T.Object);
- CheckUnordered(T.SignedFunction1, T.NumberFunction1);
- CheckUnordered(T.NumberFunction1, T.NumberFunction2);
- }
-
- void NowIs() {
- // Least Element (Bottom): None->NowIs(T)
- for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- Type* type = *it;
- CHECK(T.None->NowIs(type));
- }
-
- // Greatest Element (Top): T->NowIs(Any)
- for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- Type* type = *it;
- CHECK(type->NowIs(T.Any));
- }
-
- // Bottom Uniqueness: T->NowIs(None) implies T = None
- for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- Type* type = *it;
- if (type->NowIs(T.None)) CheckEqual(type, T.None);
- }
-
- // Top Uniqueness: Any->NowIs(T) implies T = Any
- for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- Type* type = *it;
- if (T.Any->NowIs(type)) CheckEqual(type, T.Any);
- }
-
- // Reflexivity: T->NowIs(T)
- for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- Type* type = *it;
- CHECK(type->NowIs(type));
- }
-
- // Transitivity: T1->NowIs(T2) and T2->NowIs(T3) implies T1->NowIs(T3)
- for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
- for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
- Type* type1 = *it1;
- Type* type2 = *it2;
- Type* type3 = *it3;
- CHECK(!(type1->NowIs(type2) && type2->NowIs(type3)) ||
- type1->NowIs(type3));
- }
- }
- }
-
- // Antisymmetry: T1->NowIs(T2) and T2->NowIs(T1) iff T1 = T2
- for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
- for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- Type* type1 = *it1;
- Type* type2 = *it2;
- CHECK((type1->NowIs(type2) && type2->NowIs(type1)) ==
- Equal(type1, type2));
- }
- }
-
- // T1->Is(T2) implies T1->NowIs(T2)
- for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
- for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- Type* type1 = *it1;
- Type* type2 = *it2;
- CHECK(!type1->Is(type2) || type1->NowIs(type2));
- }
- }
-
- // Constant(V1)->NowIs(Constant(V2)) iff V1 = V2
- for (ValueIterator vt1 = T.values.begin(); vt1 != T.values.end(); ++vt1) {
- for (ValueIterator vt2 = T.values.begin(); vt2 != T.values.end(); ++vt2) {
- Handle<i::Object> value1 = *vt1;
- Handle<i::Object> value2 = *vt2;
- Type* const_type1 = T.Constant(value1);
- Type* const_type2 = T.Constant(value2);
- CHECK(const_type1->NowIs(const_type2) == (*value1 == *value2));
- }
- }
-
- // Class(M1)->NowIs(Class(M2)) iff M1 = M2
- for (MapIterator mt1 = T.maps.begin(); mt1 != T.maps.end(); ++mt1) {
- for (MapIterator mt2 = T.maps.begin(); mt2 != T.maps.end(); ++mt2) {
- Handle<i::Map> map1 = *mt1;
- Handle<i::Map> map2 = *mt2;
- Type* class_type1 = T.Class(map1);
- Type* class_type2 = T.Class(map2);
- CHECK(class_type1->NowIs(class_type2) == (*map1 == *map2));
- }
- }
-
- // Constant(V)->NowIs(Class(M)) iff V has map M
- for (MapIterator mt = T.maps.begin(); mt != T.maps.end(); ++mt) {
- for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
- Handle<i::Map> map = *mt;
- Handle<i::Object> value = *vt;
- Type* const_type = T.Constant(value);
- Type* class_type = T.Class(map);
- CHECK((value->IsHeapObject() &&
- i::HeapObject::cast(*value)->map() == *map)
- == const_type->NowIs(class_type));
- }
- }
-
- // Class(M)->NowIs(Constant(V)) never
- for (MapIterator mt = T.maps.begin(); mt != T.maps.end(); ++mt) {
- for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
- Handle<i::Map> map = *mt;
- Handle<i::Object> value = *vt;
- Type* const_type = T.Constant(value);
- Type* class_type = T.Class(map);
- CHECK(!class_type->NowIs(const_type));
- }
- }
- }
-
- void Contains() {
- // T->Contains(V) iff Constant(V)->Is(T)
- for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
- Type* type = *it;
- Handle<i::Object> value = *vt;
- Type* const_type = T.Constant(value);
- CHECK(type->Contains(value) == const_type->Is(type));
- }
- }
- }
-
- void NowContains() {
- // T->NowContains(V) iff Constant(V)->NowIs(T)
- for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
- Type* type = *it;
- Handle<i::Object> value = *vt;
- Type* const_type = T.Constant(value);
- CHECK(type->NowContains(value) == const_type->NowIs(type));
- }
- }
-
- // T->Contains(V) implies T->NowContains(V)
- for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
- Type* type = *it;
- Handle<i::Object> value = *vt;
- CHECK(!type->Contains(value) || type->NowContains(value));
- }
- }
-
- // NowOf(V)->Is(T) implies T->NowContains(V)
- for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
- Type* type = *it;
- Handle<i::Object> value = *vt;
- Type* nowof_type = T.Of(value);
- CHECK(!nowof_type->NowIs(type) || type->NowContains(value));
- }
- }
}
void Maybe() {
@@ -1209,45 +718,6 @@ struct Tests {
}
}
- // Class(M1)->Maybe(Class(M2)) iff M1 = M2
- for (MapIterator mt1 = T.maps.begin(); mt1 != T.maps.end(); ++mt1) {
- for (MapIterator mt2 = T.maps.begin(); mt2 != T.maps.end(); ++mt2) {
- Handle<i::Map> map1 = *mt1;
- Handle<i::Map> map2 = *mt2;
- Type* class_type1 = T.Class(map1);
- Type* class_type2 = T.Class(map2);
- CHECK(class_type1->Maybe(class_type2) == (*map1 == *map2));
- }
- }
-
- // Constant(V)->Maybe(Class(M)) never
- // This does NOT hold!
- /*
- for (MapIterator mt = T.maps.begin(); mt != T.maps.end(); ++mt) {
- for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
- Handle<i::Map> map = *mt;
- Handle<i::Object> value = *vt;
- Type* const_type = T.Constant(value);
- Type* class_type = T.Class(map);
- CHECK(!const_type->Maybe(class_type));
- }
- }
- */
-
- // Class(M)->Maybe(Constant(V)) never
- // This does NOT hold!
- /*
- for (MapIterator mt = T.maps.begin(); mt != T.maps.end(); ++mt) {
- for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
- Handle<i::Map> map = *mt;
- Handle<i::Object> value = *vt;
- Type* const_type = T.Constant(value);
- Type* class_type = T.Class(map);
- CHECK(!class_type->Maybe(const_type));
- }
- }
- */
-
// Basic types
CheckDisjoint(T.Boolean, T.Null);
CheckDisjoint(T.Undefined, T.Null);
@@ -1271,11 +741,6 @@ struct Tests {
CheckDisjoint(T.Object, T.Proxy);
// Structural types
- CheckOverlap(T.ObjectClass, T.Object);
- CheckOverlap(T.ArrayClass, T.Object);
- CheckOverlap(T.ObjectClass, T.ObjectClass);
- CheckOverlap(T.ArrayClass, T.ArrayClass);
- CheckDisjoint(T.ObjectClass, T.ArrayClass);
CheckOverlap(T.SmiConstant, T.SignedSmall);
CheckOverlap(T.SmiConstant, T.Signed32);
CheckOverlap(T.SmiConstant, T.Number);
@@ -1286,20 +751,6 @@ struct Tests {
CheckOverlap(T.ObjectConstant1, T.ObjectConstant1);
CheckDisjoint(T.ObjectConstant1, T.ObjectConstant2);
CheckDisjoint(T.ObjectConstant1, T.ArrayConstant);
- CheckOverlap(T.ObjectConstant1, T.ArrayClass);
- CheckOverlap(T.ObjectConstant2, T.ArrayClass);
- CheckOverlap(T.ArrayConstant, T.ObjectClass);
- CheckOverlap(T.NumberArray, T.Receiver);
- CheckDisjoint(T.NumberArray, T.AnyArray);
- CheckDisjoint(T.NumberArray, T.StringArray);
- CheckOverlap(T.MethodFunction, T.Object);
- CheckDisjoint(T.SignedFunction1, T.NumberFunction1);
- CheckDisjoint(T.SignedFunction1, T.NumberFunction2);
- CheckDisjoint(T.NumberFunction1, T.NumberFunction2);
- CheckDisjoint(T.SignedFunction1, T.MethodFunction);
- CheckOverlap(T.ObjectConstant1, T.ObjectClass); // !!!
- CheckOverlap(T.ObjectConstant2, T.ObjectClass); // !!!
- CheckOverlap(T.NumberClass, T.Intersect(T.Number, T.Tagged)); // !!!
}
void Union1() {
@@ -1435,49 +886,12 @@ struct Tests {
}
void Union4() {
- // Class-class
- CheckSub(T.Union(T.ObjectClass, T.ArrayClass), T.Object);
- CheckOverlap(T.Union(T.ObjectClass, T.ArrayClass), T.OtherObject);
- CheckOverlap(T.Union(T.ObjectClass, T.ArrayClass), T.Receiver);
- CheckDisjoint(T.Union(T.ObjectClass, T.ArrayClass), T.Number);
-
// Constant-constant
CheckSub(T.Union(T.ObjectConstant1, T.ObjectConstant2), T.Object);
CheckOverlap(T.Union(T.ObjectConstant1, T.ArrayConstant), T.OtherObject);
- CheckUnordered(
- T.Union(T.ObjectConstant1, T.ObjectConstant2), T.ObjectClass);
CheckOverlap(T.Union(T.ObjectConstant1, T.ArrayConstant), T.OtherObject);
CheckDisjoint(
T.Union(T.ObjectConstant1, T.ArrayConstant), T.Number);
- CheckOverlap(
- T.Union(T.ObjectConstant1, T.ArrayConstant), T.ObjectClass); // !!!
-
- // Bitset-array
- CHECK(this->IsBitset(T.Union(T.AnyArray, T.Receiver)));
- CHECK(this->IsUnion(T.Union(T.NumberArray, T.Number)));
-
- CheckEqual(T.Union(T.AnyArray, T.Receiver), T.Receiver);
- CheckEqual(T.Union(T.AnyArray, T.OtherObject), T.OtherObject);
- CheckUnordered(T.Union(T.AnyArray, T.String), T.Receiver);
- CheckOverlap(T.Union(T.NumberArray, T.String), T.Object);
- CheckDisjoint(T.Union(T.NumberArray, T.String), T.Number);
-
- // Bitset-function
- CHECK(this->IsBitset(T.Union(T.MethodFunction, T.Object)));
- CHECK(this->IsUnion(T.Union(T.NumberFunction1, T.Number)));
-
- CheckEqual(T.Union(T.MethodFunction, T.Object), T.Object);
- CheckUnordered(T.Union(T.NumberFunction1, T.String), T.Object);
- CheckOverlap(T.Union(T.NumberFunction2, T.String), T.Object);
- CheckDisjoint(T.Union(T.NumberFunction1, T.String), T.Number);
-
- // Bitset-class
- CheckSub(T.Union(T.ObjectClass, T.SignedSmall),
- T.Union(T.Object, T.Number));
- CheckSub(T.Union(T.ObjectClass, T.OtherObject), T.Object);
- CheckUnordered(T.Union(T.ObjectClass, T.String), T.OtherObject);
- CheckOverlap(T.Union(T.ObjectClass, T.String), T.Object);
- CheckDisjoint(T.Union(T.ObjectClass, T.String), T.Number);
// Bitset-constant
CheckSub(
@@ -1487,32 +901,6 @@ struct Tests {
CheckOverlap(T.Union(T.ObjectConstant1, T.String), T.Object);
CheckDisjoint(T.Union(T.ObjectConstant1, T.String), T.Number);
- // Class-constant
- CheckSub(T.Union(T.ObjectConstant1, T.ArrayClass), T.Object);
- CheckUnordered(T.ObjectClass, T.Union(T.ObjectConstant1, T.ArrayClass));
- CheckSub(T.Union(T.ObjectConstant1, T.ArrayClass),
- T.Union(T.Receiver, T.Object));
- CheckUnordered(T.Union(T.ObjectConstant1, T.ArrayClass), T.ArrayConstant);
- CheckOverlap(T.Union(T.ObjectConstant1, T.ArrayClass), T.ObjectConstant2);
- CheckOverlap(
- T.Union(T.ObjectConstant1, T.ArrayClass), T.ObjectClass); // !!!
-
- // Bitset-union
- CheckSub(
- T.NaN,
- T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Number));
- CheckSub(
- T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Signed32),
- T.Union(T.ObjectConstant1, T.Union(T.Number, T.ArrayClass)));
-
- // Class-union
- CheckSub(
- T.Union(T.ObjectClass, T.Union(T.ObjectConstant1, T.ObjectClass)),
- T.Object);
- CheckEqual(
- T.Union(T.Union(T.ArrayClass, T.ObjectConstant2), T.ArrayClass),
- T.Union(T.ArrayClass, T.ObjectConstant2));
-
// Constant-union
CheckEqual(
T.Union(
@@ -1524,27 +912,12 @@ struct Tests {
T.Union(
T.ObjectConstant2, T.Union(T.ArrayConstant, T.ObjectConstant1)));
- // Array-union
- CheckEqual(
- T.Union(T.AnyArray, T.Union(T.NumberArray, T.AnyArray)),
- T.Union(T.AnyArray, T.NumberArray));
- CheckSub(T.Union(T.AnyArray, T.NumberArray), T.OtherObject);
-
- // Function-union
- CheckEqual(
- T.Union(T.NumberFunction1, T.NumberFunction2),
- T.Union(T.NumberFunction2, T.NumberFunction1));
- CheckSub(T.Union(T.SignedFunction1, T.MethodFunction), T.Object);
-
// Union-union
CheckEqual(
T.Union(
T.Union(T.ObjectConstant2, T.ObjectConstant1),
T.Union(T.ObjectConstant1, T.ObjectConstant2)),
T.Union(T.ObjectConstant2, T.ObjectConstant1));
- CheckEqual(T.Union(T.Union(T.Number, T.ArrayClass),
- T.Union(T.SignedSmall, T.Receiver)),
- T.Union(T.Number, T.Receiver));
}
void Intersect() {
@@ -1580,48 +953,6 @@ struct Tests {
}
}
- // Associativity:
- // Intersect(T1, Intersect(T2, T3)) = Intersect(Intersect(T1, T2), T3)
- // This does NOT hold. For example:
- // (Class(..stringy1..) /\ Class(..stringy2..)) /\ Constant(..string..) =
- // None
- // Class(..stringy1..) /\ (Class(..stringy2..) /\ Constant(..string..)) =
- // Constant(..string..)
- /*
- for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
- for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
- Type* type1 = *it1;
- Type* type2 = *it2;
- Type* type3 = *it3;
- Type* intersect12 = T.Intersect(type1, type2);
- Type* intersect23 = T.Intersect(type2, type3);
- Type* intersect1_23 = T.Intersect(type1, intersect23);
- Type* intersect12_3 = T.Intersect(intersect12, type3);
- CheckEqual(intersect1_23, intersect12_3);
- }
- }
- }
- */
-
- // Join: Intersect(T1, T2)->Is(T1) and Intersect(T1, T2)->Is(T2)
- // This does NOT hold. For example:
- // Class(..stringy..) /\ Constant(..string..) = Constant(..string..)
- // Currently, not even the disjunction holds:
- // Class(Internal/TaggedPtr) /\ (Any/Untagged \/ Context(..)) =
- // Class(Internal/TaggedPtr) \/ Context(..)
- /*
- for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
- for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- Type* type1 = *it1;
- Type* type2 = *it2;
- Type* intersect12 = T.Intersect(type1, type2);
- CHECK(intersect12->Is(type1));
- CHECK(intersect12->Is(type2));
- }
- }
- */
-
// Lower Boundedness: T1->Is(T2) implies Intersect(T1, T2) = T1
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
@@ -1632,46 +963,6 @@ struct Tests {
}
}
- // Monotonicity: T1->Is(T2) implies Intersect(T1, T3)->Is(Intersect(T2, T3))
- // This does NOT hold. For example:
- // Class(OtherObject/TaggedPtr) <= Any/TaggedPtr
- // Class(OtherObject/TaggedPtr) /\ Any/UntaggedInt1 = Class(..)
- // Any/TaggedPtr /\ Any/UntaggedInt1 = None
- /*
- for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
- for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
- Type* type1 = *it1;
- Type* type2 = *it2;
- Type* type3 = *it3;
- Type* intersect13 = T.Intersect(type1, type3);
- Type* intersect23 = T.Intersect(type2, type3);
- CHECK(!type1->Is(type2) || intersect13->Is(intersect23));
- }
- }
- }
- */
-
- // Monotonicity: T1->Is(T3) or T2->Is(T3) implies Intersect(T1, T2)->Is(T3)
- // This does NOT hold. For example:
- // Class(..stringy..) <= Class(..stringy..)
- // Class(..stringy..) /\ Constant(..string..) = Constant(..string..)
- // Constant(..string..) </= Class(..stringy..)
- /*
- for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
- for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
- Type* type1 = *it1;
- Type* type2 = *it2;
- Type* type3 = *it3;
- Type* intersect12 = T.Intersect(type1, type2);
- CHECK(!(type1->Is(type3) || type2->Is(type3)) ||
- intersect12->Is(type3));
- }
- }
- }
- */
-
// Monotonicity: T1->Is(T2) and T1->Is(T3) implies T1->Is(Intersect(T2, T3))
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
HandleScope scope(isolate);
@@ -1687,63 +978,6 @@ struct Tests {
}
}
- // Bitset-class
- CheckEqual(T.Intersect(T.ObjectClass, T.Object), T.ObjectClass);
- CheckEqual(T.Semantic(T.Intersect(T.ObjectClass, T.Number)), T.None);
-
- // Bitset-array
- CheckEqual(T.Intersect(T.NumberArray, T.Object), T.NumberArray);
- CheckEqual(T.Semantic(T.Intersect(T.AnyArray, T.Proxy)), T.None);
-
- // Bitset-function
- CheckEqual(T.Intersect(T.MethodFunction, T.Object), T.MethodFunction);
- CheckEqual(T.Semantic(T.Intersect(T.NumberFunction1, T.Proxy)), T.None);
-
- // Bitset-union
- CheckEqual(
- T.Intersect(T.Object, T.Union(T.ObjectConstant1, T.ObjectClass)),
- T.Union(T.ObjectConstant1, T.ObjectClass));
- CheckEqual(T.Semantic(T.Intersect(T.Union(T.ArrayClass, T.ObjectConstant1),
- T.Number)),
- T.None);
-
- // Class-constant
- CHECK(T.Intersect(T.ObjectConstant1, T.ObjectClass)->IsInhabited()); // !!!
- CHECK(T.Intersect(T.ArrayClass, T.ObjectConstant2)->IsInhabited());
-
- // Array-union
- CheckEqual(
- T.Intersect(T.NumberArray, T.Union(T.NumberArray, T.ArrayClass)),
- T.NumberArray);
- CheckEqual(
- T.Intersect(T.AnyArray, T.Union(T.Object, T.SmiConstant)),
- T.AnyArray);
- CHECK(
- !T.Intersect(T.Union(T.AnyArray, T.ArrayConstant), T.NumberArray)
- ->IsInhabited());
-
- // Function-union
- CheckEqual(
- T.Intersect(T.MethodFunction, T.Union(T.String, T.MethodFunction)),
- T.MethodFunction);
- CheckEqual(
- T.Intersect(T.NumberFunction1, T.Union(T.Object, T.SmiConstant)),
- T.NumberFunction1);
- CHECK(
- !T.Intersect(T.Union(T.MethodFunction, T.Name), T.NumberFunction2)
- ->IsInhabited());
-
- // Class-union
- CheckEqual(
- T.Intersect(T.ArrayClass, T.Union(T.ObjectConstant2, T.ArrayClass)),
- T.ArrayClass);
- CheckEqual(
- T.Intersect(T.ArrayClass, T.Union(T.Object, T.SmiConstant)),
- T.ArrayClass);
- CHECK(
- T.Intersect(T.Union(T.ObjectClass, T.ArrayConstant), T.ArrayClass)
- ->IsInhabited()); // !!!
-
// Constant-union
CheckEqual(
T.Intersect(
@@ -1752,34 +986,13 @@ struct Tests {
CheckEqual(
T.Intersect(T.SmiConstant, T.Union(T.Number, T.ObjectConstant2)),
T.SmiConstant);
- CHECK(
- T.Intersect(
- T.Union(T.ArrayConstant, T.ObjectClass), T.ObjectConstant1)
- ->IsInhabited()); // !!!
// Union-union
- CheckEqual(T.Intersect(T.Union(T.Number, T.ArrayClass),
- T.Union(T.SignedSmall, T.Receiver)),
- T.Union(T.SignedSmall, T.ArrayClass));
- CheckEqual(T.Intersect(T.Union(T.Number, T.ObjectClass),
- T.Union(T.Signed32, T.OtherObject)),
- T.Union(T.Signed32, T.ObjectClass));
CheckEqual(
T.Intersect(
T.Union(T.ObjectConstant2, T.ObjectConstant1),
T.Union(T.ObjectConstant1, T.ObjectConstant2)),
T.Union(T.ObjectConstant2, T.ObjectConstant1));
- CheckEqual(
- T.Intersect(
- T.Union(
- T.ArrayClass,
- T.Union(T.ObjectConstant2, T.ObjectConstant1)),
- T.Union(
- T.ObjectConstant1,
- T.Union(T.ArrayConstant, T.ObjectConstant2))),
- T.Union(
- T.ArrayConstant,
- T.Union(T.ObjectConstant2, T.ObjectConstant1))); // !!!
}
void Distributivity() {
@@ -1856,70 +1069,42 @@ struct Tests {
}
}
}
-
- void HTypeFromType() {
- for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
- for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- Type* type1 = *it1;
- Type* type2 = *it2;
- HType htype1 = HType::FromType(type1);
- HType htype2 = HType::FromType(type2);
- CHECK(!type1->Is(type2) || htype1.IsSubtypeOf(htype2));
- }
- }
- }
};
-TEST(IsSomeType_zone) { Tests().IsSomeType(); }
-
-TEST(PointwiseRepresentation_zone) { Tests().PointwiseRepresentation(); }
-
-TEST(BitsetType_zone) { Tests().Bitset(); }
-
-TEST(ClassType_zone) { Tests().Class(); }
-
-TEST(ConstantType_zone) { Tests().Constant(); }
-
-TEST(RangeType_zone) { Tests().Range(); }
-
-TEST(ArrayType_zone) { Tests().Array(); }
-
-TEST(FunctionType_zone) { Tests().Function(); }
-
-TEST(Of_zone) { Tests().Of(); }
+} // namespace
-TEST(NowOf_zone) { Tests().NowOf(); }
+TEST(IsSomeType) { Tests().IsSomeType(); }
-TEST(MinMax_zone) { Tests().MinMax(); }
+TEST(BitsetType) { Tests().Bitset(); }
-TEST(BitsetGlb_zone) { Tests().BitsetGlb(); }
+TEST(ConstantType) { Tests().Constant(); }
-TEST(BitsetLub_zone) { Tests().BitsetLub(); }
+TEST(RangeType) { Tests().Range(); }
-TEST(Is1_zone) { Tests().Is1(); }
+TEST(Of) { Tests().Of(); }
-TEST(Is2_zone) { Tests().Is2(); }
+TEST(MinMax) { Tests().MinMax(); }
-TEST(NowIs_zone) { Tests().NowIs(); }
+TEST(BitsetGlb) { Tests().BitsetGlb(); }
-TEST(Contains_zone) { Tests().Contains(); }
+TEST(BitsetLub) { Tests().BitsetLub(); }
-TEST(NowContains_zone) { Tests().NowContains(); }
+TEST(Is1) { Tests().Is1(); }
-TEST(Maybe_zone) { Tests().Maybe(); }
+TEST(Is2) { Tests().Is2(); }
-TEST(Union1_zone) { Tests().Union1(); }
+TEST(Maybe) { Tests().Maybe(); }
-TEST(Union2_zone) { Tests().Union2(); }
+TEST(Union1) { Tests().Union1(); }
-TEST(Union3_zone) { Tests().Union3(); }
+TEST(Union2) { Tests().Union2(); }
-TEST(Union4_zone) { Tests().Union4(); }
+TEST(Union3) { Tests().Union3(); }
-TEST(Intersect_zone) { Tests().Intersect(); }
+TEST(Union4) { Tests().Union4(); }
-TEST(Distributivity_zone) { Tests().Distributivity(); }
+TEST(Intersect) { Tests().Intersect(); }
-TEST(GetRange_zone) { Tests().GetRange(); }
+TEST(Distributivity) { Tests().Distributivity(); }
-TEST(HTypeFromType_zone) { Tests().HTypeFromType(); }
+TEST(GetRange) { Tests().GetRange(); }
diff --git a/deps/v8/test/cctest/test-unboxed-doubles.cc b/deps/v8/test/cctest/test-unboxed-doubles.cc
index 6a1d87015b..dde26d2676 100644
--- a/deps/v8/test/cctest/test-unboxed-doubles.cc
+++ b/deps/v8/test/cctest/test-unboxed-doubles.cc
@@ -934,7 +934,7 @@ TEST(Regress436816) {
CHECK(object->map()->HasFastPointerLayout());
// Trigger GCs and heap verification.
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
@@ -991,7 +991,7 @@ TEST(DescriptorArrayTrimming) {
// Call GC that should trim both |map|'s descriptor array and layout
// descriptor.
- CcTest::heap()->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// The unused tail of the layout descriptor is now "clean" again.
CHECK(map->layout_descriptor()->IsConsistentWithMap(*map, true));
@@ -1057,7 +1057,7 @@ TEST(DoScavenge) {
CHECK(isolate->heap()->new_space()->Contains(*obj));
// Do scavenge so that |obj| is moved to survivor space.
- CcTest::heap()->CollectGarbage(i::NEW_SPACE);
+ CcTest::CollectGarbage(i::NEW_SPACE);
// Create temp object in the new space.
Handle<JSArray> temp = factory->NewJSArray(0, FAST_ELEMENTS);
@@ -1074,7 +1074,7 @@ TEST(DoScavenge) {
// Now |obj| moves to old gen and it has a double field that looks like
// a pointer to a from semi-space.
- CcTest::heap()->CollectGarbage(i::NEW_SPACE, "boom");
+ CcTest::CollectGarbage(i::NEW_SPACE);
CHECK(isolate->heap()->old_space()->Contains(*obj));
@@ -1155,14 +1155,14 @@ TEST(DoScavengeWithIncrementalWriteBarrier) {
CHECK(MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
// Trigger GCs so that |obj| moves to old gen.
- heap->CollectGarbage(i::NEW_SPACE); // in survivor space now
- heap->CollectGarbage(i::NEW_SPACE); // in old gen now
+ CcTest::CollectGarbage(i::NEW_SPACE); // in survivor space now
+ CcTest::CollectGarbage(i::NEW_SPACE); // in old gen now
CHECK(isolate->heap()->old_space()->Contains(*obj));
CHECK(isolate->heap()->old_space()->Contains(*obj_value));
CHECK(MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
- heap->CollectGarbage(i::OLD_SPACE, "boom");
+ CcTest::CollectGarbage(i::OLD_SPACE);
// |obj_value| must be evacuated.
CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
@@ -1412,7 +1412,7 @@ static void TestWriteBarrier(Handle<Map> map, Handle<Map> new_map,
obj->RawFastDoublePropertyAtPut(double_field_index, boom_value);
// Trigger GC to evacuate all candidates.
- CcTest::heap()->CollectGarbage(NEW_SPACE, "boom");
+ CcTest::CollectGarbage(NEW_SPACE);
if (check_tagged_value) {
FieldIndex tagged_field_index =
@@ -1491,7 +1491,7 @@ static void TestIncrementalWriteBarrier(Handle<Map> map, Handle<Map> new_map,
obj->RawFastDoublePropertyAtPut(double_field_index, boom_value);
// Trigger GC to evacuate all candidates.
- CcTest::heap()->CollectGarbage(OLD_SPACE, "boom");
+ CcTest::CollectGarbage(OLD_SPACE);
// Ensure that the values are still there and correct.
CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
diff --git a/deps/v8/test/cctest/test-unique.cc b/deps/v8/test/cctest/test-unique.cc
index d84279475d..980f0b6538 100644
--- a/deps/v8/test/cctest/test-unique.cc
+++ b/deps/v8/test/cctest/test-unique.cc
@@ -32,6 +32,13 @@
#include "src/crankshaft/unique.h"
#include "src/factory.h"
#include "src/global-handles.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/factory.h -> src/objects-inl.h
+#include "src/objects-inl.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/type-feedback-vector.h ->
+// src/type-feedback-vector-inl.h
+#include "src/type-feedback-vector-inl.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/test-utils.cc b/deps/v8/test/cctest/test-utils.cc
index 00702a5e19..463672ccc8 100644
--- a/deps/v8/test/cctest/test-utils.cc
+++ b/deps/v8/test/cctest/test-utils.cc
@@ -33,6 +33,7 @@
#include "src/base/platform/platform.h"
#include "src/collector.h"
+#include "src/conversions.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/test-weakmaps.cc b/deps/v8/test/cctest/test-weakmaps.cc
index 2d0e620d7d..eb5333f6c0 100644
--- a/deps/v8/test/cctest/test-weakmaps.cc
+++ b/deps/v8/test/cctest/test-weakmaps.cc
@@ -29,7 +29,16 @@
#include "src/v8.h"
+#include "src/factory.h"
#include "src/global-handles.h"
+#include "src/isolate.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/factory.h -> src/objects-inl.h
+#include "src/objects-inl.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/type-feedback-vector.h ->
+// src/type-feedback-vector-inl.h
+#include "src/type-feedback-vector-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
@@ -67,7 +76,6 @@ TEST(Weakness) {
LocalContext context;
Isolate* isolate = GetIsolateFrom(&context);
Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
HandleScope scope(isolate);
Handle<JSWeakMap> weakmap = AllocateJSWeakMap(isolate);
GlobalHandles* global_handles = isolate->global_handles();
@@ -96,7 +104,7 @@ TEST(Weakness) {
CHECK_EQ(2, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
// Force a full GC.
- heap->CollectAllGarbage(false);
+ CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
CHECK_EQ(0, NumberOfWeakCalls);
CHECK_EQ(2, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
CHECK_EQ(
@@ -112,7 +120,7 @@ TEST(Weakness) {
}
CHECK(global_handles->IsWeak(key.location()));
- heap->CollectAllGarbage(false);
+ CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
CHECK_EQ(1, NumberOfWeakCalls);
CHECK_EQ(0, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
CHECK_EQ(2,
@@ -124,7 +132,6 @@ TEST(Shrinking) {
LocalContext context;
Isolate* isolate = GetIsolateFrom(&context);
Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
HandleScope scope(isolate);
Handle<JSWeakMap> weakmap = AllocateJSWeakMap(isolate);
@@ -150,7 +157,7 @@ TEST(Shrinking) {
CHECK_EQ(32, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
CHECK_EQ(
0, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
- heap->CollectAllGarbage(false);
+ CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
CHECK_EQ(0, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
CHECK_EQ(
32, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
@@ -193,7 +200,7 @@ TEST(Regress2060a) {
// Force compacting garbage collection.
CHECK(FLAG_always_compact);
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
@@ -235,9 +242,9 @@ TEST(Regress2060b) {
// Force compacting garbage collection. The subsequent collections are used
// to verify that key references were actually updated.
CHECK(FLAG_always_compact);
- heap->CollectAllGarbage();
- heap->CollectAllGarbage();
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
@@ -254,5 +261,5 @@ TEST(Regress399527) {
// The weak map is marked black here but leaving the handle scope will make
// the object unreachable. Aborting incremental marking will clear all the
// marking bits which makes the weak map garbage.
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
diff --git a/deps/v8/test/cctest/test-weaksets.cc b/deps/v8/test/cctest/test-weaksets.cc
index ec6945aec7..c51c70a421 100644
--- a/deps/v8/test/cctest/test-weaksets.cc
+++ b/deps/v8/test/cctest/test-weaksets.cc
@@ -29,7 +29,16 @@
#include "src/v8.h"
+#include "src/factory.h"
#include "src/global-handles.h"
+#include "src/isolate.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/factory.h -> src/objects-inl.h
+#include "src/objects-inl.h"
+// FIXME(mstarzinger, marja): This is weird, but required because of the missing
+// (disallowed) include: src/type-feedback-vector.h ->
+// src/type-feedback-vector-inl.h
+#include "src/type-feedback-vector-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
@@ -70,7 +79,6 @@ TEST(WeakSet_Weakness) {
LocalContext context;
Isolate* isolate = GetIsolateFrom(&context);
Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
HandleScope scope(isolate);
Handle<JSWeakSet> weakset = AllocateJSWeakSet(isolate);
GlobalHandles* global_handles = isolate->global_handles();
@@ -95,7 +103,7 @@ TEST(WeakSet_Weakness) {
CHECK_EQ(1, ObjectHashTable::cast(weakset->table())->NumberOfElements());
// Force a full GC.
- heap->CollectAllGarbage(false);
+ CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
CHECK_EQ(0, NumberOfWeakCalls);
CHECK_EQ(1, ObjectHashTable::cast(weakset->table())->NumberOfElements());
CHECK_EQ(
@@ -111,7 +119,7 @@ TEST(WeakSet_Weakness) {
}
CHECK(global_handles->IsWeak(key.location()));
- heap->CollectAllGarbage(false);
+ CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
CHECK_EQ(1, NumberOfWeakCalls);
CHECK_EQ(0, ObjectHashTable::cast(weakset->table())->NumberOfElements());
CHECK_EQ(
@@ -123,7 +131,6 @@ TEST(WeakSet_Shrinking) {
LocalContext context;
Isolate* isolate = GetIsolateFrom(&context);
Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
HandleScope scope(isolate);
Handle<JSWeakSet> weakset = AllocateJSWeakSet(isolate);
@@ -149,7 +156,7 @@ TEST(WeakSet_Shrinking) {
CHECK_EQ(32, ObjectHashTable::cast(weakset->table())->NumberOfElements());
CHECK_EQ(
0, ObjectHashTable::cast(weakset->table())->NumberOfDeletedElements());
- heap->CollectAllGarbage(false);
+ CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
CHECK_EQ(0, ObjectHashTable::cast(weakset->table())->NumberOfElements());
CHECK_EQ(
32, ObjectHashTable::cast(weakset->table())->NumberOfDeletedElements());
@@ -192,7 +199,7 @@ TEST(WeakSet_Regress2060a) {
// Force compacting garbage collection.
CHECK(FLAG_always_compact);
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
@@ -234,7 +241,7 @@ TEST(WeakSet_Regress2060b) {
// Force compacting garbage collection. The subsequent collections are used
// to verify that key references were actually updated.
CHECK(FLAG_always_compact);
- heap->CollectAllGarbage();
- heap->CollectAllGarbage();
- heap->CollectAllGarbage();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
diff --git a/deps/v8/test/cctest/types-fuzz.h b/deps/v8/test/cctest/types-fuzz.h
index 7bf9700b40..16bfd737ea 100644
--- a/deps/v8/test/cctest/types-fuzz.h
+++ b/deps/v8/test/cctest/types-fuzz.h
@@ -29,16 +29,18 @@
#define V8_TEST_CCTEST_TYPES_H_
#include "src/base/utils/random-number-generator.h"
+#include "src/factory.h"
+#include "src/isolate.h"
#include "src/v8.h"
namespace v8 {
namespace internal {
-
+namespace compiler {
class Types {
public:
Types(Zone* zone, Isolate* isolate, v8::base::RandomNumberGenerator* rng)
- : zone_(zone), isolate_(isolate), rng_(rng) {
+ : zone_(zone), rng_(rng) {
#define DECLARE_TYPE(name, value) \
name = Type::name(); \
types.push_back(name);
@@ -50,22 +52,6 @@ class Types {
object_map = isolate->factory()->NewMap(
JS_OBJECT_TYPE, JSObject::kHeaderSize);
- array_map = isolate->factory()->NewMap(
- JS_ARRAY_TYPE, JSArray::kSize);
- number_map = isolate->factory()->NewMap(
- HEAP_NUMBER_TYPE, HeapNumber::kSize);
- uninitialized_map = isolate->factory()->uninitialized_map();
- ObjectClass = Type::Class(object_map, zone);
- ArrayClass = Type::Class(array_map, zone);
- NumberClass = Type::Class(number_map, zone);
- UninitializedClass = Type::Class(uninitialized_map, zone);
-
- maps.push_back(object_map);
- maps.push_back(array_map);
- maps.push_back(uninitialized_map);
- for (MapVector::iterator it = maps.begin(); it != maps.end(); ++it) {
- types.push_back(Type::Class(*it, zone));
- }
smi = handle(Smi::FromInt(666), isolate);
signed32 = isolate->factory()->NewHeapNumber(0x40000000);
@@ -104,24 +90,12 @@ class Types {
Integer = Type::Range(-V8_INFINITY, +V8_INFINITY, zone);
- NumberArray = Type::Array(Number, zone);
- StringArray = Type::Array(String, zone);
- AnyArray = Type::Array(Any, zone);
-
- SignedFunction1 = Type::Function(SignedSmall, SignedSmall, zone);
- NumberFunction1 = Type::Function(Number, Number, zone);
- NumberFunction2 = Type::Function(Number, Number, Number, zone);
- MethodFunction = Type::Function(String, Object, 0, zone);
-
for (int i = 0; i < 30; ++i) {
types.push_back(Fuzz());
}
}
Handle<i::Map> object_map;
- Handle<i::Map> array_map;
- Handle<i::Map> number_map;
- Handle<i::Map> uninitialized_map;
Handle<i::Smi> smi;
Handle<i::HeapNumber> signed32;
@@ -134,17 +108,9 @@ class Types {
PROPER_BITSET_TYPE_LIST(DECLARE_TYPE)
#undef DECLARE_TYPE
-#define DECLARE_TYPE(name, value) Type* Mask##name##ForTesting;
- MASK_BITSET_TYPE_LIST(DECLARE_TYPE)
-#undef DECLARE_TYPE
Type* SignedSmall;
Type* UnsignedSmall;
- Type* ObjectClass;
- Type* ArrayClass;
- Type* NumberClass;
- Type* UninitializedClass;
-
Type* SmiConstant;
Type* Signed32Constant;
Type* ObjectConstant1;
@@ -154,62 +120,25 @@ class Types {
Type* Integer;
- Type* NumberArray;
- Type* StringArray;
- Type* AnyArray;
-
- Type* SignedFunction1;
- Type* NumberFunction1;
- Type* NumberFunction2;
- Type* MethodFunction;
-
typedef std::vector<Type*> TypeVector;
- typedef std::vector<Handle<i::Map> > MapVector;
typedef std::vector<Handle<i::Object> > ValueVector;
TypeVector types;
- MapVector maps;
ValueVector values;
ValueVector integers; // "Integer" values used for range limits.
Type* Of(Handle<i::Object> value) { return Type::Of(value, zone_); }
- Type* NowOf(Handle<i::Object> value) { return Type::NowOf(value, zone_); }
-
- Type* Class(Handle<i::Map> map) { return Type::Class(map, zone_); }
-
Type* Constant(Handle<i::Object> value) {
return Type::Constant(value, zone_);
}
Type* Range(double min, double max) { return Type::Range(min, max, zone_); }
- Type* Context(Type* outer) { return Type::Context(outer, zone_); }
-
- Type* Array1(Type* element) { return Type::Array(element, zone_); }
-
- Type* Function0(Type* result, Type* receiver) {
- return Type::Function(result, receiver, 0, zone_);
- }
-
- Type* Function1(Type* result, Type* receiver, Type* arg) {
- Type* type = Type::Function(result, receiver, 1, zone_);
- type->AsFunction()->InitParameter(0, arg);
- return type;
- }
-
- Type* Function2(Type* result, Type* arg1, Type* arg2) {
- return Type::Function(result, arg1, arg2, zone_);
- }
-
Type* Union(Type* t1, Type* t2) { return Type::Union(t1, t2, zone_); }
Type* Intersect(Type* t1, Type* t2) { return Type::Intersect(t1, t2, zone_); }
- Type* Representation(Type* t) { return Type::Representation(t, zone_); }
-
- Type* Semantic(Type* t) { return Type::Semantic(t, zone_); }
-
Type* Random() {
return types[rng_->NextInt(static_cast<int>(types.size()))];
}
@@ -239,15 +168,11 @@ class Types {
}
return result;
}
- case 1: { // class
- int i = rng_->NextInt(static_cast<int>(maps.size()));
- return Type::Class(maps[i], zone_);
- }
- case 2: { // constant
+ case 1: { // constant
int i = rng_->NextInt(static_cast<int>(values.size()));
return Type::Constant(values[i], zone_);
}
- case 3: { // range
+ case 2: { // range
int i = rng_->NextInt(static_cast<int>(integers.size()));
int j = rng_->NextInt(static_cast<int>(integers.size()));
double min = integers[i]->Number();
@@ -255,42 +180,6 @@ class Types {
if (min > max) std::swap(min, max);
return Type::Range(min, max, zone_);
}
- case 4: { // context
- int depth = rng_->NextInt(3);
- Type* type = Type::Internal();
- for (int i = 0; i < depth; ++i) type = Type::Context(type, zone_);
- return type;
- }
- case 5: { // array
- Type* element = Fuzz(depth / 2);
- return Type::Array(element, zone_);
- }
- case 6:
- case 7: { // function
- Type* result = Fuzz(depth / 2);
- Type* receiver = Fuzz(depth / 2);
- int arity = rng_->NextInt(3);
- Type* type = Type::Function(result, receiver, arity, zone_);
- for (int i = 0; i < type->AsFunction()->Arity(); ++i) {
- Type* parameter = Fuzz(depth / 2);
- type->AsFunction()->InitParameter(i, parameter);
- }
- return type;
- }
- case 8: { // simd
- static const int num_simd_types =
- #define COUNT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) +1
- SIMD128_TYPES(COUNT_SIMD_TYPE);
- #undef COUNT_SIMD_TYPE
- Type* (*simd_constructors[num_simd_types])(Isolate*, Zone*) = {
- #define COUNT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
- &Type::Name,
- SIMD128_TYPES(COUNT_SIMD_TYPE)
- #undef COUNT_SIMD_TYPE
- };
- return simd_constructors[rng_->NextInt(num_simd_types)](isolate_,
- zone_);
- }
default: { // union
int n = rng_->NextInt(10);
Type* type = None;
@@ -308,11 +197,10 @@ class Types {
private:
Zone* zone_;
- Isolate* isolate_;
v8::base::RandomNumberGenerator* rng_;
};
-
+} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
index a978bdf1f7..3d8d484295 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
@@ -11,8 +11,8 @@
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
-#include "test/cctest/wasm/test-signatures.h"
#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/common/wasm/test-signatures.h"
// If the target architecture is 64-bit, enable all tests.
#if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
@@ -32,7 +32,6 @@
#define asu64(x) static_cast<uint64_t>(x)
#define B2(a, b) kExprBlock, a, b, kExprEnd
-#define B1(a) kExprBlock, a, kExprEnd
// Can't bridge macro land with nested macros.
#if V8_TARGET_ARCH_MIPS
@@ -835,8 +834,8 @@ WASM_EXEC_TEST(CallI64Parameter) {
WasmRunner<int32_t> r(&module);
BUILD(
r,
- WASM_I32_CONVERT_I64(WASM_CALL_FUNCTIONN(
- 19, index, WASM_I64V_9(0xbcd12340000000b),
+ WASM_I32_CONVERT_I64(WASM_CALL_FUNCTION(
+ index, WASM_I64V_9(0xbcd12340000000b),
WASM_I64V_9(0xbcd12340000000c), WASM_I32V_1(0xd),
WASM_I32_CONVERT_I64(WASM_I64V_9(0xbcd12340000000e)),
WASM_I64V_9(0xbcd12340000000f), WASM_I64V_10(0xbcd1234000000010),
@@ -1119,7 +1118,7 @@ WASM_EXEC_TEST(Call_Int64Sub) {
// Build the caller function.
WasmRunner<int64_t> r(&module, MachineType::Int64(), MachineType::Int64());
- BUILD(r, WASM_CALL_FUNCTION2(index, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ BUILD(r, WASM_CALL_FUNCTION(index, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
@@ -1154,7 +1153,11 @@ WASM_EXEC_TEST(LoadStoreI64_sx) {
ZERO_OFFSET, // --
kExprI64StoreMem, // --
ZERO_ALIGNMENT, // --
- ZERO_OFFSET // --
+ ZERO_OFFSET, // --
+ kExprI8Const, 0, // --
+ loads[m], // --
+ ZERO_ALIGNMENT, // --
+ ZERO_OFFSET, // --
};
r.Build(code, code + arraysize(code));
@@ -1256,10 +1259,9 @@ WASM_EXEC_TEST(F64ReinterpretI64) {
int64_t* memory = module.AddMemoryElems<int64_t>(8);
WasmRunner<int64_t> r(&module, MachineType::Int64());
- BUILD(r,
- WASM_BLOCK(WASM_STORE_MEM(MachineType::Float64(), WASM_ZERO,
- WASM_F64_REINTERPRET_I64(WASM_GET_LOCAL(0))),
- WASM_GET_LOCAL(0)));
+ BUILD(r, WASM_STORE_MEM(MachineType::Float64(), WASM_ZERO,
+ WASM_F64_REINTERPRET_I64(WASM_GET_LOCAL(0))),
+ WASM_GET_LOCAL(0));
FOR_INT32_INPUTS(i) {
int64_t expected = static_cast<int64_t>(*i) * 0x300010001;
@@ -1320,18 +1322,17 @@ WASM_EXEC_TEST(MemI64_Sum) {
WasmRunner<uint64_t> r(&module, MachineType::Int32());
const byte kSum = r.AllocateLocal(kAstI64);
- BUILD(r,
- WASM_BLOCK(
- WASM_WHILE(
- WASM_GET_LOCAL(0),
- WASM_BLOCK(
- WASM_SET_LOCAL(
- kSum, WASM_I64_ADD(WASM_GET_LOCAL(kSum),
- WASM_LOAD_MEM(MachineType::Int64(),
- WASM_GET_LOCAL(0)))),
- WASM_SET_LOCAL(
- 0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(8))))),
- WASM_GET_LOCAL(1)));
+ BUILD(
+ r,
+ WASM_WHILE(
+ WASM_GET_LOCAL(0),
+ WASM_BLOCK(
+ WASM_SET_LOCAL(kSum,
+ WASM_I64_ADD(WASM_GET_LOCAL(kSum),
+ WASM_LOAD_MEM(MachineType::Int64(),
+ WASM_GET_LOCAL(0)))),
+ WASM_SET_LOCAL(0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(8))))),
+ WASM_GET_LOCAL(1));
// Run 4 trials.
for (int i = 0; i < 3; i++) {
@@ -1353,7 +1354,8 @@ WASM_EXEC_TEST(StoreMemI64_alignment) {
for (byte i = 0; i <= 3; i++) {
WasmRunner<int64_t> r(&module, MachineType::Int64());
BUILD(r, WASM_STORE_MEM_ALIGNMENT(MachineType::Int64(), WASM_ZERO, i,
- WASM_GET_LOCAL(0)));
+ WASM_GET_LOCAL(0)),
+ WASM_GET_LOCAL(0));
module.RandomizeMemory(1111);
module.WriteMemory<int64_t>(&memory[0], 0);
@@ -1371,10 +1373,10 @@ WASM_EXEC_TEST(I64Global) {
int64_t* global = module.AddGlobal<int64_t>(kAstI64);
WasmRunner<int32_t> r(&module, MachineType::Int32());
// global = global + p0
- BUILD(r, B2(WASM_SET_GLOBAL(
- 0, WASM_I64_AND(WASM_GET_GLOBAL(0),
- WASM_I64_SCONVERT_I32(WASM_GET_LOCAL(0)))),
- WASM_ZERO));
+ BUILD(r, WASM_SET_GLOBAL(
+ 0, WASM_I64_AND(WASM_GET_GLOBAL(0),
+ WASM_I64_SCONVERT_I32(WASM_GET_LOCAL(0)))),
+ WASM_ZERO);
module.WriteMemory<int64_t>(global, 0xFFFFFFFFFFFFFFFFLL);
for (int i = 9; i < 444444; i += 111111) {
@@ -1464,7 +1466,7 @@ static void CompileCallIndirectMany(LocalType param) {
// with many many parameters.
TestSignatures sigs;
for (byte num_params = 0; num_params < 40; num_params++) {
- v8::base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
HandleScope scope(CcTest::InitIsolateOnce());
TestingModule module(kExecuteCompiled);
@@ -1477,11 +1479,11 @@ static void CompileCallIndirectMany(LocalType param) {
WasmFunctionCompiler t(sig, &module);
std::vector<byte> code;
- ADD_CODE(code, kExprI8Const, 0);
for (byte p = 0; p < num_params; p++) {
ADD_CODE(code, kExprGetLocal, p);
}
- ADD_CODE(code, kExprCallIndirect, static_cast<byte>(num_params), 1);
+ ADD_CODE(code, kExprI8Const, 0);
+ ADD_CODE(code, kExprCallIndirect, 1);
t.Build(&code[0], &code[0] + code.size());
t.Compile();
@@ -1504,7 +1506,7 @@ static void Run_WasmMixedCall_N(WasmExecutionMode execution_mode, int start) {
int num_params = static_cast<int>(arraysize(mixed)) - start;
for (int which = 0; which < num_params; which++) {
- v8::base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
TestingModule module(execution_mode);
module.AddMemory(1024);
@@ -1540,8 +1542,7 @@ static void Run_WasmMixedCall_N(WasmExecutionMode execution_mode, int start) {
}
// Call the selector function.
- ADD_CODE(code, kExprCallFunction, static_cast<byte>(num_params),
- static_cast<byte>(index));
+ ADD_CODE(code, kExprCallFunction, static_cast<byte>(index));
// Store the result in memory.
ADD_CODE(code,
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc b/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
index 4d39dd6ff7..007fc7a864 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
@@ -12,8 +12,8 @@
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
-#include "test/cctest/wasm/test-signatures.h"
#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/common/wasm/test-signatures.h"
using namespace v8::base;
using namespace v8::internal;
@@ -38,8 +38,9 @@ uint32_t GetMatchingRelocInfoCount(Handle<Code> code, RelocInfo::Mode rmode) {
}
WASM_EXEC_TEST(Int32AsmjsDivS) {
- WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
- MachineType::Int32());
+ TestingModule module(execution_mode);
+ module.origin = kAsmJsOrigin;
+ WasmRunner<int32_t> r(&module, MachineType::Int32(), MachineType::Int32());
BUILD(r, WASM_BINOP(kExprI32AsmjsDivS, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
const int32_t kMin = std::numeric_limits<int32_t>::min();
CHECK_EQ(0, r.Call(0, 100));
@@ -50,8 +51,9 @@ WASM_EXEC_TEST(Int32AsmjsDivS) {
}
WASM_EXEC_TEST(Int32AsmjsRemS) {
- WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
- MachineType::Int32());
+ TestingModule module(execution_mode);
+ module.origin = kAsmJsOrigin;
+ WasmRunner<int32_t> r(&module, MachineType::Int32(), MachineType::Int32());
BUILD(r, WASM_BINOP(kExprI32AsmjsRemS, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
const int32_t kMin = std::numeric_limits<int32_t>::min();
CHECK_EQ(33, r.Call(133, 100));
@@ -62,8 +64,9 @@ WASM_EXEC_TEST(Int32AsmjsRemS) {
}
WASM_EXEC_TEST(Int32AsmjsDivU) {
- WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
- MachineType::Int32());
+ TestingModule module(execution_mode);
+ module.origin = kAsmJsOrigin;
+ WasmRunner<int32_t> r(&module, MachineType::Int32(), MachineType::Int32());
BUILD(r, WASM_BINOP(kExprI32AsmjsDivU, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
const int32_t kMin = std::numeric_limits<int32_t>::min();
CHECK_EQ(0, r.Call(0, 100));
@@ -74,8 +77,9 @@ WASM_EXEC_TEST(Int32AsmjsDivU) {
}
WASM_EXEC_TEST(Int32AsmjsRemU) {
- WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
- MachineType::Int32());
+ TestingModule module(execution_mode);
+ module.origin = kAsmJsOrigin;
+ WasmRunner<int32_t> r(&module, MachineType::Int32(), MachineType::Int32());
BUILD(r, WASM_BINOP(kExprI32AsmjsRemU, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
const int32_t kMin = std::numeric_limits<int32_t>::min();
CHECK_EQ(17, r.Call(217, 100));
@@ -86,7 +90,9 @@ WASM_EXEC_TEST(Int32AsmjsRemU) {
}
WASM_EXEC_TEST(I32AsmjsSConvertF32) {
- WasmRunner<int32_t> r(execution_mode, MachineType::Float32());
+ TestingModule module(execution_mode);
+ module.origin = kAsmJsOrigin;
+ WasmRunner<int32_t> r(&module, MachineType::Float32());
BUILD(r, WASM_UNOP(kExprI32AsmjsSConvertF32, WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
@@ -96,7 +102,9 @@ WASM_EXEC_TEST(I32AsmjsSConvertF32) {
}
WASM_EXEC_TEST(I32AsmjsSConvertF64) {
- WasmRunner<int32_t> r(execution_mode, MachineType::Float64());
+ TestingModule module(execution_mode);
+ module.origin = kAsmJsOrigin;
+ WasmRunner<int32_t> r(&module, MachineType::Float64());
BUILD(r, WASM_UNOP(kExprI32AsmjsSConvertF64, WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
@@ -106,7 +114,9 @@ WASM_EXEC_TEST(I32AsmjsSConvertF64) {
}
WASM_EXEC_TEST(I32AsmjsUConvertF32) {
- WasmRunner<uint32_t> r(execution_mode, MachineType::Float32());
+ TestingModule module(execution_mode);
+ module.origin = kAsmJsOrigin;
+ WasmRunner<uint32_t> r(&module, MachineType::Float32());
BUILD(r, WASM_UNOP(kExprI32AsmjsUConvertF32, WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
@@ -116,7 +126,9 @@ WASM_EXEC_TEST(I32AsmjsUConvertF32) {
}
WASM_EXEC_TEST(I32AsmjsUConvertF64) {
- WasmRunner<uint32_t> r(execution_mode, MachineType::Float64());
+ TestingModule module(execution_mode);
+ module.origin = kAsmJsOrigin;
+ WasmRunner<uint32_t> r(&module, MachineType::Float64());
BUILD(r, WASM_UNOP(kExprI32AsmjsUConvertF64, WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
@@ -127,6 +139,7 @@ WASM_EXEC_TEST(I32AsmjsUConvertF64) {
WASM_EXEC_TEST(LoadMemI32_oob_asm) {
TestingModule module(execution_mode);
+ module.origin = kAsmJsOrigin;
int32_t* memory = module.AddMemoryElems<int32_t>(8);
WasmRunner<int32_t> r(&module, MachineType::Uint32());
module.RandomizeMemory(1112);
@@ -147,6 +160,7 @@ WASM_EXEC_TEST(LoadMemI32_oob_asm) {
WASM_EXEC_TEST(LoadMemF32_oob_asm) {
TestingModule module(execution_mode);
+ module.origin = kAsmJsOrigin;
float* memory = module.AddMemoryElems<float>(8);
WasmRunner<float> r(&module, MachineType::Uint32());
module.RandomizeMemory(1112);
@@ -167,6 +181,7 @@ WASM_EXEC_TEST(LoadMemF32_oob_asm) {
WASM_EXEC_TEST(LoadMemF64_oob_asm) {
TestingModule module(execution_mode);
+ module.origin = kAsmJsOrigin;
double* memory = module.AddMemoryElems<double>(8);
WasmRunner<double> r(&module, MachineType::Uint32());
module.RandomizeMemory(1112);
@@ -189,6 +204,7 @@ WASM_EXEC_TEST(LoadMemF64_oob_asm) {
WASM_EXEC_TEST(StoreMemI32_oob_asm) {
TestingModule module(execution_mode);
+ module.origin = kAsmJsOrigin;
int32_t* memory = module.AddMemoryElems<int32_t>(8);
WasmRunner<int32_t> r(&module, MachineType::Uint32(), MachineType::Uint32());
module.RandomizeMemory(1112);
@@ -224,6 +240,7 @@ WASM_EXEC_TEST(StoreMemI32_oob_asm) {
#define INT_LOAD_TEST(OP_TYPE) \
TEST(RunWasm_AsmCheckedRelocInfo##OP_TYPE) { \
TestingModule module(kExecuteCompiled); \
+ module.origin = kAsmJsOrigin; \
WasmRunner<int32_t> r(&module, MachineType::Uint32()); \
BUILD(r, WASM_UNOP(OP_TYPE, WASM_GET_LOCAL(0))); \
CHECK_EQ(1, GetMatchingRelocInfoCount(module.instance->function_code[0], \
@@ -238,6 +255,7 @@ FOREACH_INT_CHECKED_LOAD_OP(INT_LOAD_TEST)
#define INT_STORE_TEST(OP_TYPE) \
TEST(RunWasm_AsmCheckedRelocInfo##OP_TYPE) { \
TestingModule module(kExecuteCompiled); \
+ module.origin = kAsmJsOrigin; \
WasmRunner<int32_t> r(&module, MachineType::Uint32(), \
MachineType::Uint32()); \
BUILD(r, WASM_BINOP(OP_TYPE, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))); \
@@ -252,6 +270,7 @@ FOREACH_INT_CHECKED_STORE_OP(INT_STORE_TEST)
TEST(RunWasm_AsmCheckedLoadFloat32RelocInfo) {
TestingModule module(kExecuteCompiled);
+ module.origin = kAsmJsOrigin;
WasmRunner<float> r(&module, MachineType::Uint32());
BUILD(r, WASM_UNOP(kExprF32AsmjsLoadMem, WASM_GET_LOCAL(0)));
@@ -263,6 +282,7 @@ TEST(RunWasm_AsmCheckedLoadFloat32RelocInfo) {
TEST(RunWasm_AsmCheckedStoreFloat32RelocInfo) {
TestingModule module(kExecuteCompiled);
+ module.origin = kAsmJsOrigin;
WasmRunner<float> r(&module, MachineType::Uint32(), MachineType::Float32());
BUILD(r, WASM_BINOP(kExprF32AsmjsStoreMem, WASM_GET_LOCAL(0),
WASM_GET_LOCAL(1)));
@@ -275,6 +295,7 @@ TEST(RunWasm_AsmCheckedStoreFloat32RelocInfo) {
TEST(RunWasm_AsmCheckedLoadFloat64RelocInfo) {
TestingModule module(kExecuteCompiled);
+ module.origin = kAsmJsOrigin;
WasmRunner<double> r(&module, MachineType::Uint32());
BUILD(r, WASM_UNOP(kExprF64AsmjsLoadMem, WASM_GET_LOCAL(0)));
@@ -286,6 +307,7 @@ TEST(RunWasm_AsmCheckedLoadFloat64RelocInfo) {
TEST(RunWasm_AsmCheckedStoreFloat64RelocInfo) {
TestingModule module(kExecuteCompiled);
+ module.origin = kAsmJsOrigin;
WasmRunner<double> r(&module, MachineType::Uint32(), MachineType::Float64());
BUILD(r, WASM_BINOP(kExprF64AsmjsStoreMem, WASM_GET_LOCAL(0),
WASM_GET_LOCAL(1)));
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
index c4e03b50d6..0489d016d7 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
@@ -14,8 +14,8 @@
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
-#include "test/cctest/wasm/test-signatures.h"
#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/common/wasm/test-signatures.h"
using namespace v8::base;
using namespace v8::internal;
@@ -36,7 +36,7 @@ TEST(Run_WasmInt8Const_i) {
TEST(Run_WasmIfElse) {
WasmRunner<int32_t> r(kExecuteInterpreted, MachineType::Int32());
- BUILD(r, WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_I8(9), WASM_I8(10)));
+ BUILD(r, WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_I8(9), WASM_I8(10)));
CHECK_EQ(10, r.Call(0));
CHECK_EQ(9, r.Call(1));
}
@@ -65,31 +65,39 @@ TEST(Run_WasmNopsN) {
TEST(Run_WasmConstsN) {
const int kMaxConsts = 10;
- byte code[kMaxConsts * 2];
+ byte code[kMaxConsts * 3];
+ int32_t expected = 0;
for (int count = 1; count < kMaxConsts; count++) {
for (int i = 0; i < count; i++) {
- code[i * 2] = kExprI8Const;
- code[i * 2 + 1] = static_cast<byte>(count * 10 + i);
+ byte val = static_cast<byte>(count * 10 + i);
+ code[i * 3] = kExprI8Const;
+ code[i * 3 + 1] = val;
+ if (i == (count - 1)) {
+ code[i * 3 + 2] = kExprNop;
+ expected = val;
+ } else {
+ code[i * 3 + 2] = kExprDrop;
+ }
}
- byte expected = static_cast<byte>(count * 11 - 1);
WasmRunner<int32_t> r(kExecuteInterpreted);
- r.Build(code, code + (count * 2));
+ r.Build(code, code + (count * 3));
CHECK_EQ(expected, r.Call());
}
}
TEST(Run_WasmBlocksN) {
const int kMaxNops = 10;
- const int kExtra = 4;
+ const int kExtra = 5;
byte code[kMaxNops + kExtra];
for (int nops = 0; nops < kMaxNops; nops++) {
byte expected = static_cast<byte>(30 + nops);
memset(code, kExprNop, sizeof(code));
code[0] = kExprBlock;
- code[1 + nops] = kExprI8Const;
- code[1 + nops + 1] = expected;
- code[1 + nops + 2] = kExprEnd;
+ code[1] = kLocalI32;
+ code[2 + nops] = kExprI8Const;
+ code[2 + nops + 1] = expected;
+ code[2 + nops + 2] = kExprEnd;
WasmRunner<int32_t> r(kExecuteInterpreted);
r.Build(code, code + nops + kExtra);
@@ -106,14 +114,14 @@ TEST(Run_WasmBlockBreakN) {
for (int index = 0; index < nops; index++) {
memset(code, kExprNop, sizeof(code));
code[0] = kExprBlock;
+ code[1] = kLocalI32;
code[sizeof(code) - 1] = kExprEnd;
int expected = nops * 11 + index;
- code[1 + index + 0] = kExprI8Const;
- code[1 + index + 1] = static_cast<byte>(expected);
- code[1 + index + 2] = kExprBr;
- code[1 + index + 3] = ARITY_1;
- code[1 + index + 4] = 0;
+ code[2 + index + 0] = kExprI8Const;
+ code[2 + index + 1] = static_cast<byte>(expected);
+ code[2 + index + 2] = kExprBr;
+ code[2 + index + 3] = 0;
WasmRunner<int32_t> r(kExecuteInterpreted);
r.Build(code, code + kMaxNops + kExtra);
@@ -126,10 +134,10 @@ TEST(Run_Wasm_nested_ifs_i) {
WasmRunner<int32_t> r(kExecuteInterpreted, MachineType::Int32(),
MachineType::Int32());
- BUILD(r, WASM_IF_ELSE(
+ BUILD(r, WASM_IF_ELSE_I(
WASM_GET_LOCAL(0),
- WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_I8(11), WASM_I8(12)),
- WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_I8(13), WASM_I8(14))));
+ WASM_IF_ELSE_I(WASM_GET_LOCAL(1), WASM_I8(11), WASM_I8(12)),
+ WASM_IF_ELSE_I(WASM_GET_LOCAL(1), WASM_I8(13), WASM_I8(14))));
CHECK_EQ(11, r.Call(1, 1));
CHECK_EQ(12, r.Call(1, 0));
@@ -286,6 +294,45 @@ TEST(Breakpoint_I32And_disable) {
}
}
+TEST(GrowMemory) {
+ TestingModule module(kExecuteInterpreted);
+ WasmRunner<int32_t> r(&module, MachineType::Uint32());
+ module.AddMemory(WasmModule::kPageSize);
+ BUILD(r, WASM_GROW_MEMORY(WASM_GET_LOCAL(0)));
+ CHECK_EQ(1, r.Call(1));
+}
+
+TEST(GrowMemoryPreservesData) {
+ int32_t index = 16;
+ int32_t value = 2335;
+ TestingModule module(kExecuteInterpreted);
+ WasmRunner<int32_t> r(&module, MachineType::Uint32());
+ module.AddMemory(WasmModule::kPageSize);
+ BUILD(r, WASM_STORE_MEM(MachineType::Int32(), WASM_I32V(index),
+ WASM_I32V(value)),
+ WASM_GROW_MEMORY(WASM_GET_LOCAL(0)), WASM_DROP,
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_I32V(index)));
+ CHECK_EQ(value, r.Call(1));
+}
+
+TEST(GrowMemoryInvalidSize) {
+ {
+ // Grow memory by an invalid amount without initial memory.
+ TestingModule module(kExecuteInterpreted);
+ WasmRunner<int32_t> r(&module, MachineType::Uint32());
+ BUILD(r, WASM_GROW_MEMORY(WASM_GET_LOCAL(0)));
+ CHECK_EQ(-1, r.Call(1048575));
+ }
+ {
+ // Grow memory by an invalid amount without initial memory.
+ TestingModule module(kExecuteInterpreted);
+ WasmRunner<int32_t> r(&module, MachineType::Uint32());
+ module.AddMemory(WasmModule::kPageSize);
+ BUILD(r, WASM_GROW_MEMORY(WASM_GET_LOCAL(0)));
+ CHECK_EQ(-1, r.Call(1048575));
+ }
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
index 9dfba74ecc..c0307e0511 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
@@ -11,8 +11,8 @@
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
-#include "test/cctest/wasm/test-signatures.h"
#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/common/wasm/test-signatures.h"
using namespace v8::base;
using namespace v8::internal;
@@ -152,7 +152,7 @@ TEST(Run_CallJS_Add_jswrapped) {
WasmFunctionCompiler t(sigs.i_i(), &module);
uint32_t js_index =
module.AddJsFunction(sigs.i_i(), "(function(a) { return a + 99; })");
- BUILD(t, WASM_CALL_FUNCTION1(js_index, WASM_GET_LOCAL(0)));
+ BUILD(t, WASM_CALL_FUNCTION(js_index, WASM_GET_LOCAL(0)));
Handle<JSFunction> jsfunc = module.WrapCode(t.CompileAndAdd());
@@ -182,8 +182,7 @@ void RunJSSelectTest(int which) {
ADD_CODE(code, WASM_F64(inputs.arg_d(i)));
}
- ADD_CODE(code, kExprCallFunction, static_cast<byte>(num_params),
- static_cast<byte>(js_index));
+ ADD_CODE(code, kExprCallFunction, static_cast<byte>(js_index));
size_t end = code.size();
code.push_back(0);
@@ -420,7 +419,7 @@ void RunJSSelectAlignTest(int num_args, int num_params) {
ADD_CODE(code, WASM_GET_LOCAL(i));
}
- ADD_CODE(code, kExprCallFunction, static_cast<byte>(num_params), 0);
+ ADD_CODE(code, kExprCallFunction, 0);
size_t end = code.size();
code.push_back(0);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
index 8449a52ff3..b358208bc3 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -5,15 +5,15 @@
#include <stdlib.h>
#include <string.h>
-#include "src/wasm/encoder.h"
#include "src/wasm/module-decoder.h"
-#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-macro-gen.h"
+#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
#include "test/cctest/cctest.h"
-#include "test/cctest/wasm/test-signatures.h"
+#include "test/common/wasm/test-signatures.h"
+#include "test/common/wasm/wasm-module-runner.h"
using namespace v8::base;
using namespace v8::internal;
@@ -28,12 +28,26 @@ void TestModule(Zone* zone, WasmModuleBuilder* builder,
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
- WasmJs::InstallWasmFunctionMap(isolate, isolate->native_context());
- int32_t result =
- testing::CompileAndRunWasmModule(isolate, buffer.begin(), buffer.end());
+ testing::SetupIsolateForWasmModule(isolate);
+ int32_t result = testing::CompileAndRunWasmModule(
+ isolate, buffer.begin(), buffer.end(), ModuleOrigin::kWasmOrigin);
CHECK_EQ(expected_result, result);
}
+void TestModuleException(Zone* zone, WasmModuleBuilder* builder) {
+ ZoneBuffer buffer(zone);
+ builder->WriteTo(buffer);
+
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ testing::SetupIsolateForWasmModule(isolate);
+ v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
+ testing::CompileAndRunWasmModule(isolate, buffer.begin(), buffer.end(),
+ ModuleOrigin::kWasmOrigin);
+ CHECK(try_catch.HasCaught());
+ isolate->clear_pending_exception();
+}
+
void ExportAs(WasmFunctionBuilder* f, const char* name) {
f->SetExported();
f->SetName(name, static_cast<int>(strlen(name)));
@@ -49,13 +63,11 @@ void ExportAsMain(WasmFunctionBuilder* f) {
TEST(Run_WasmModule_Return114) {
static const int32_t kReturnValue = 114;
TestSignatures sigs;
- v8::base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- uint16_t f_index = builder->AddFunction();
- WasmFunctionBuilder* f = builder->FunctionAt(f_index);
- f->SetSignature(sigs.i_v());
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
ExportAsMain(f);
byte code[] = {WASM_I8(kReturnValue)};
f->EmitCode(code, sizeof(code));
@@ -63,136 +75,119 @@ TEST(Run_WasmModule_Return114) {
}
TEST(Run_WasmModule_CallAdd) {
- v8::base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
TestSignatures sigs;
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- uint16_t f1_index = builder->AddFunction();
- WasmFunctionBuilder* f = builder->FunctionAt(f1_index);
- f->SetSignature(sigs.i_ii());
+ WasmFunctionBuilder* f1 = builder->AddFunction(sigs.i_ii());
uint16_t param1 = 0;
uint16_t param2 = 1;
byte code1[] = {WASM_I32_ADD(WASM_GET_LOCAL(param1), WASM_GET_LOCAL(param2))};
- f->EmitCode(code1, sizeof(code1));
+ f1->EmitCode(code1, sizeof(code1));
- uint16_t f2_index = builder->AddFunction();
- f = builder->FunctionAt(f2_index);
- f->SetSignature(sigs.i_v());
+ WasmFunctionBuilder* f2 = builder->AddFunction(sigs.i_v());
- ExportAsMain(f);
- byte code2[] = {WASM_CALL_FUNCTION2(f1_index, WASM_I8(77), WASM_I8(22))};
- f->EmitCode(code2, sizeof(code2));
+ ExportAsMain(f2);
+ byte code2[] = {
+ WASM_CALL_FUNCTION(f1->func_index(), WASM_I8(77), WASM_I8(22))};
+ f2->EmitCode(code2, sizeof(code2));
TestModule(&zone, builder, 99);
}
TEST(Run_WasmModule_ReadLoadedDataSegment) {
static const byte kDataSegmentDest0 = 12;
- v8::base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
TestSignatures sigs;
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- uint16_t f_index = builder->AddFunction();
- WasmFunctionBuilder* f = builder->FunctionAt(f_index);
- f->SetSignature(sigs.i_v());
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
ExportAsMain(f);
byte code[] = {
WASM_LOAD_MEM(MachineType::Int32(), WASM_I8(kDataSegmentDest0))};
f->EmitCode(code, sizeof(code));
byte data[] = {0xaa, 0xbb, 0xcc, 0xdd};
- builder->AddDataSegment(new (&zone) WasmDataSegmentEncoder(
- &zone, data, sizeof(data), kDataSegmentDest0));
+ builder->AddDataSegment(data, sizeof(data), kDataSegmentDest0);
TestModule(&zone, builder, 0xddccbbaa);
}
TEST(Run_WasmModule_CheckMemoryIsZero) {
static const int kCheckSize = 16 * 1024;
- v8::base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
TestSignatures sigs;
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- uint16_t f_index = builder->AddFunction();
- WasmFunctionBuilder* f = builder->FunctionAt(f_index);
- f->SetSignature(sigs.i_v());
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
uint16_t localIndex = f->AddLocal(kAstI32);
ExportAsMain(f);
- byte code[] = {WASM_BLOCK(
+ byte code[] = {WASM_BLOCK_I(
WASM_WHILE(
WASM_I32_LTS(WASM_GET_LOCAL(localIndex), WASM_I32V_3(kCheckSize)),
WASM_IF_ELSE(
WASM_LOAD_MEM(MachineType::Int32(), WASM_GET_LOCAL(localIndex)),
- WASM_BRV(2, WASM_I8(-1)), WASM_INC_LOCAL_BY(localIndex, 4))),
+ WASM_BRV(3, WASM_I8(-1)), WASM_INC_LOCAL_BY(localIndex, 4))),
WASM_I8(11))};
f->EmitCode(code, sizeof(code));
TestModule(&zone, builder, 11);
}
TEST(Run_WasmModule_CallMain_recursive) {
- v8::base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
TestSignatures sigs;
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- uint16_t f_index = builder->AddFunction();
- WasmFunctionBuilder* f = builder->FunctionAt(f_index);
- f->SetSignature(sigs.i_v());
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
uint16_t localIndex = f->AddLocal(kAstI32);
ExportAsMain(f);
- byte code[] = {WASM_BLOCK(
+ byte code[] = {
WASM_SET_LOCAL(localIndex,
WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)),
- WASM_IF_ELSE(WASM_I32_LTS(WASM_GET_LOCAL(localIndex), WASM_I8(5)),
- WASM_BLOCK(WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO,
+ WASM_IF_ELSE_I(WASM_I32_LTS(WASM_GET_LOCAL(localIndex), WASM_I8(5)),
+ WASM_SEQ(WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO,
WASM_INC_LOCAL(localIndex)),
- WASM_BRV(1, WASM_CALL_FUNCTION0(0))),
- WASM_BRV(0, WASM_I8(55))))};
+ WASM_CALL_FUNCTION0(0)),
+ WASM_I8(55))};
f->EmitCode(code, sizeof(code));
TestModule(&zone, builder, 55);
}
TEST(Run_WasmModule_Global) {
- v8::base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
TestSignatures sigs;
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
uint32_t global1 = builder->AddGlobal(kAstI32, 0);
uint32_t global2 = builder->AddGlobal(kAstI32, 0);
- uint16_t f1_index = builder->AddFunction();
- WasmFunctionBuilder* f = builder->FunctionAt(f1_index);
- f->SetSignature(sigs.i_v());
+ WasmFunctionBuilder* f1 = builder->AddFunction(sigs.i_v());
byte code1[] = {
WASM_I32_ADD(WASM_GET_GLOBAL(global1), WASM_GET_GLOBAL(global2))};
- f->EmitCode(code1, sizeof(code1));
- uint16_t f2_index = builder->AddFunction();
- f = builder->FunctionAt(f2_index);
- f->SetSignature(sigs.i_v());
- ExportAsMain(f);
+ f1->EmitCode(code1, sizeof(code1));
+ WasmFunctionBuilder* f2 = builder->AddFunction(sigs.i_v());
+ ExportAsMain(f2);
byte code2[] = {WASM_SET_GLOBAL(global1, WASM_I32V_1(56)),
WASM_SET_GLOBAL(global2, WASM_I32V_1(41)),
- WASM_RETURN1(WASM_CALL_FUNCTION0(f1_index))};
- f->EmitCode(code2, sizeof(code2));
+ WASM_RETURN1(WASM_CALL_FUNCTION0(f1->func_index()))};
+ f2->EmitCode(code2, sizeof(code2));
TestModule(&zone, builder, 97);
}
TEST(Run_WasmModule_Serialization) {
- FLAG_expose_wasm = true;
static const char* kFunctionName = "increment";
- v8::base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- uint16_t f_index = builder->AddFunction();
TestSignatures sigs;
- WasmFunctionBuilder* f = builder->FunctionAt(f_index);
- f->SetSignature(sigs.i_i());
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_i());
byte code[] = {WASM_GET_LOCAL(0), kExprI32Const, 1, kExprI32Add};
f->EmitCode(code, sizeof(code));
ExportAs(f, kFunctionName);
@@ -202,10 +197,10 @@ TEST(Run_WasmModule_Serialization) {
Isolate* isolate = CcTest::InitIsolateOnce();
ErrorThrower thrower(isolate, "");
-
v8::WasmCompiledModule::SerializedModule data;
{
HandleScope scope(isolate);
+ testing::SetupIsolateForWasmModule(isolate);
ModuleResult decoding_result = DecodeWasmModule(
isolate, &zone, buffer.begin(), buffer.end(), false, kWasmOrigin);
@@ -215,8 +210,8 @@ TEST(Run_WasmModule_Serialization) {
MaybeHandle<FixedArray> compiled_module =
module->CompileFunctions(isolate, &thrower);
CHECK(!compiled_module.is_null());
- Handle<JSObject> module_obj =
- CreateCompiledModuleObject(isolate, compiled_module.ToHandleChecked());
+ Handle<JSObject> module_obj = CreateCompiledModuleObject(
+ isolate, compiled_module.ToHandleChecked(), ModuleOrigin::kWasmOrigin);
v8::Local<v8::Object> v8_module_obj = v8::Utils::ToLocal(module_obj);
CHECK(v8_module_obj->IsWebAssemblyCompiledModule());
@@ -226,15 +221,17 @@ TEST(Run_WasmModule_Serialization) {
}
v8::Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = isolate->array_buffer_allocator();
+ create_params.array_buffer_allocator =
+ CcTest::InitIsolateOnce()->array_buffer_allocator();
v8::Isolate* v8_isolate = v8::Isolate::New(create_params);
- isolate = reinterpret_cast<Isolate*>(v8_isolate);
{
v8::Isolate::Scope isolate_scope(v8_isolate);
v8::HandleScope new_scope(v8_isolate);
v8::Local<v8::Context> new_ctx = v8::Context::New(v8_isolate);
new_ctx->Enter();
+ isolate = reinterpret_cast<Isolate*>(v8_isolate);
+ testing::SetupIsolateForWasmModule(isolate);
v8::MaybeLocal<v8::WasmCompiledModule> deserialized =
v8::WasmCompiledModule::Deserialize(v8_isolate, data);
@@ -242,17 +239,178 @@ TEST(Run_WasmModule_Serialization) {
CHECK(deserialized.ToLocal(&compiled_module));
Handle<JSObject> module_object =
Handle<JSObject>::cast(v8::Utils::OpenHandle(*compiled_module));
- Handle<FixedArray> compiled_part =
- handle(FixedArray::cast(module_object->GetInternalField(0)));
Handle<JSObject> instance =
- WasmModule::Instantiate(isolate, compiled_part,
+ WasmModule::Instantiate(isolate, &thrower, module_object,
Handle<JSReceiver>::null(),
Handle<JSArrayBuffer>::null())
.ToHandleChecked();
Handle<Object> params[1] = {Handle<Object>(Smi::FromInt(41), isolate)};
- int32_t result = testing::CallFunction(isolate, instance, &thrower,
- kFunctionName, 1, params);
+ int32_t result = testing::CallWasmFunctionForTesting(
+ isolate, instance, &thrower, kFunctionName, 1, params,
+ ModuleOrigin::kWasmOrigin);
CHECK(result == 42);
new_ctx->Exit();
}
}
+
+TEST(MemorySize) {
+ // Initial memory size is 16, see wasm-module-builder.cc
+ static const int kExpectedValue = 16;
+ TestSignatures sigs;
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator);
+
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
+ ExportAsMain(f);
+ byte code[] = {WASM_MEMORY_SIZE};
+ f->EmitCode(code, sizeof(code));
+ TestModule(&zone, builder, kExpectedValue);
+}
+
+TEST(Run_WasmModule_MemSize_GrowMem) {
+ // Initial memory size = 16 + GrowMemory(10)
+ static const int kExpectedValue = 26;
+ TestSignatures sigs;
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator);
+
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
+ ExportAsMain(f);
+ byte code[] = {WASM_GROW_MEMORY(WASM_I8(10)), WASM_DROP, WASM_MEMORY_SIZE};
+ f->EmitCode(code, sizeof(code));
+ TestModule(&zone, builder, kExpectedValue);
+}
+
+TEST(Run_WasmModule_GrowMemoryInIf) {
+ TestSignatures sigs;
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator);
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
+ ExportAsMain(f);
+ byte code[] = {WASM_IF_ELSE_I(WASM_I32V(0), WASM_GROW_MEMORY(WASM_I32V(1)),
+ WASM_I32V(12))};
+ f->EmitCode(code, sizeof(code));
+ TestModule(&zone, builder, 12);
+}
+
+TEST(Run_WasmModule_GrowMemOobOffset) {
+ static const int kPageSize = 0x10000;
+ // Initial memory size = 16 + GrowMemory(10)
+ static const int index = kPageSize * 17 + 4;
+ int value = 0xaced;
+ TestSignatures sigs;
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator);
+
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
+ ExportAsMain(f);
+ byte code[] = {
+ WASM_GROW_MEMORY(WASM_I8(1)),
+ WASM_STORE_MEM(MachineType::Int32(), WASM_I32V(index), WASM_I32V(value))};
+ f->EmitCode(code, sizeof(code));
+ TestModuleException(&zone, builder);
+}
+
+TEST(Run_WasmModule_GrowMemOobFixedIndex) {
+ static const int kPageSize = 0x10000;
+ // Initial memory size = 16 + GrowMemory(10)
+ static const int index = kPageSize * 26 + 4;
+ int value = 0xaced;
+ TestSignatures sigs;
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ Zone zone(isolate->allocator());
+
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_i());
+ ExportAsMain(f);
+ byte code[] = {
+ WASM_GROW_MEMORY(WASM_GET_LOCAL(0)), WASM_DROP,
+ WASM_STORE_MEM(MachineType::Int32(), WASM_I32V(index), WASM_I32V(value)),
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_I32V(index))};
+ f->EmitCode(code, sizeof(code));
+
+ HandleScope scope(isolate);
+ ZoneBuffer buffer(&zone);
+ builder->WriteTo(buffer);
+ testing::SetupIsolateForWasmModule(isolate);
+
+ Handle<JSObject> instance = testing::CompileInstantiateWasmModuleForTesting(
+ isolate, &zone, buffer.begin(), buffer.end(), ModuleOrigin::kWasmOrigin);
+ CHECK(!instance.is_null());
+
+ // Initial memory size is 16 pages, should trap till index > MemSize on
+ // consecutive GrowMem calls
+ for (uint32_t i = 1; i < 5; i++) {
+ Handle<Object> params[1] = {Handle<Object>(Smi::FromInt(i), isolate)};
+ v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
+ testing::RunWasmModuleForTesting(isolate, instance, 1, params,
+ ModuleOrigin::kWasmOrigin);
+ CHECK(try_catch.HasCaught());
+ isolate->clear_pending_exception();
+ }
+
+ Handle<Object> params[1] = {Handle<Object>(Smi::FromInt(1), isolate)};
+ int32_t result = testing::RunWasmModuleForTesting(
+ isolate, instance, 1, params, ModuleOrigin::kWasmOrigin);
+ CHECK(result == 0xaced);
+}
+
+TEST(Run_WasmModule_GrowMemOobVariableIndex) {
+ static const int kPageSize = 0x10000;
+ int value = 0xaced;
+ TestSignatures sigs;
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator);
+
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_i());
+ ExportAsMain(f);
+ byte code[] = {
+ WASM_GROW_MEMORY(WASM_I8(1)), WASM_DROP,
+ WASM_STORE_MEM(MachineType::Int32(), WASM_GET_LOCAL(0), WASM_I32V(value)),
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_GET_LOCAL(0))};
+ f->EmitCode(code, sizeof(code));
+
+ HandleScope scope(isolate);
+ ZoneBuffer buffer(&zone);
+ builder->WriteTo(buffer);
+ testing::SetupIsolateForWasmModule(isolate);
+
+ Handle<JSObject> instance = testing::CompileInstantiateWasmModuleForTesting(
+ isolate, &zone, buffer.begin(), buffer.end(), ModuleOrigin::kWasmOrigin);
+
+ CHECK(!instance.is_null());
+
+ // Initial memory size is 16 pages, should trap till index > MemSize on
+ // consecutive GrowMem calls
+ for (int i = 1; i < 5; i++) {
+ Handle<Object> params[1] = {
+ Handle<Object>(Smi::FromInt((16 + i) * kPageSize - 3), isolate)};
+ v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
+ testing::RunWasmModuleForTesting(isolate, instance, 1, params,
+ ModuleOrigin::kWasmOrigin);
+ CHECK(try_catch.HasCaught());
+ isolate->clear_pending_exception();
+ }
+
+ for (int i = 1; i < 5; i++) {
+ Handle<Object> params[1] = {
+ Handle<Object>(Smi::FromInt((20 + i) * kPageSize - 4), isolate)};
+ int32_t result = testing::RunWasmModuleForTesting(
+ isolate, instance, 1, params, ModuleOrigin::kWasmOrigin);
+ CHECK(result == 0xaced);
+ }
+
+ v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
+ Handle<Object> params[1] = {
+ Handle<Object>(Smi::FromInt(25 * kPageSize), isolate)};
+ testing::RunWasmModuleForTesting(isolate, instance, 1, params,
+ ModuleOrigin::kWasmOrigin);
+ CHECK(try_catch.HasCaught());
+ isolate->clear_pending_exception();
+}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc b/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc
index 65b1d57bc1..e3a28f611b 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc
@@ -19,42 +19,43 @@ using namespace v8::internal::compiler;
TEST_BODY(float, F32, WASM_F32_ADD) \
TEST_BODY(double, F64, WASM_F64_ADD)
-#define LOAD_SET_GLOBAL_TEST_BODY(C_TYPE, MACHINE_TYPE, ADD) \
- TEST(WasmRelocateGlobal##MACHINE_TYPE) { \
- TestingModule module(kExecuteCompiled); \
- module.AddGlobal<C_TYPE>(kAst##MACHINE_TYPE); \
- module.AddGlobal<C_TYPE>(kAst##MACHINE_TYPE); \
- \
- WasmRunner<C_TYPE> r(&module, \
- WasmOpcodes::MachineTypeFor(kAst##MACHINE_TYPE)); \
- \
- /* global = global + p0 */ \
- BUILD(r, WASM_SET_GLOBAL(1, ADD(WASM_GET_GLOBAL(0), WASM_GET_LOCAL(0)))); \
- CHECK_EQ(1, module.instance->function_code.size()); \
- \
- int filter = 1 << RelocInfo::WASM_GLOBAL_REFERENCE; \
- \
- Handle<Code> code = module.instance->function_code[0]; \
- \
- Address old_start = module.instance->globals_start; \
- Address new_start = old_start + 1; \
- \
- Address old_addresses[2]; \
- uint32_t address_index = 0U; \
- for (RelocIterator it(*code, filter); !it.done(); it.next()) { \
- old_addresses[address_index] = it.rinfo()->wasm_global_reference(); \
- it.rinfo()->update_wasm_global_reference(old_start, new_start); \
- ++address_index; \
- } \
- CHECK_EQ(2U, address_index); \
- \
- address_index = 0U; \
- for (RelocIterator it(*code, filter); !it.done(); it.next()) { \
- CHECK_EQ(old_addresses[address_index] + 1, \
- it.rinfo()->wasm_global_reference()); \
- ++address_index; \
- } \
- CHECK_EQ(2U, address_index); \
+#define LOAD_SET_GLOBAL_TEST_BODY(C_TYPE, MACHINE_TYPE, ADD) \
+ TEST(WasmRelocateGlobal##MACHINE_TYPE) { \
+ TestingModule module(kExecuteCompiled); \
+ module.AddGlobal<C_TYPE>(kAst##MACHINE_TYPE); \
+ module.AddGlobal<C_TYPE>(kAst##MACHINE_TYPE); \
+ \
+ WasmRunner<C_TYPE> r(&module, \
+ WasmOpcodes::MachineTypeFor(kAst##MACHINE_TYPE)); \
+ \
+ /* global = global + p0 */ \
+ BUILD(r, WASM_SET_GLOBAL(1, ADD(WASM_GET_GLOBAL(0), WASM_GET_LOCAL(0))), \
+ WASM_GET_GLOBAL(0)); \
+ CHECK_EQ(1, module.instance->function_code.size()); \
+ \
+ int filter = 1 << RelocInfo::WASM_GLOBAL_REFERENCE; \
+ \
+ Handle<Code> code = module.instance->function_code[0]; \
+ \
+ Address old_start = module.instance->globals_start; \
+ Address new_start = old_start + 1; \
+ \
+ Address old_addresses[4]; \
+ uint32_t address_index = 0U; \
+ for (RelocIterator it(*code, filter); !it.done(); it.next()) { \
+ old_addresses[address_index] = it.rinfo()->wasm_global_reference(); \
+ it.rinfo()->update_wasm_global_reference(old_start, new_start); \
+ ++address_index; \
+ } \
+ CHECK_LE(address_index, 4U); \
+ \
+ address_index = 0U; \
+ for (RelocIterator it(*code, filter); !it.done(); it.next()) { \
+ CHECK_EQ(old_addresses[address_index] + 1, \
+ it.rinfo()->wasm_global_reference()); \
+ ++address_index; \
+ } \
+ CHECK_LE(address_index, 4U); \
}
FOREACH_TYPE(LOAD_SET_GLOBAL_TEST_BODY)
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
new file mode 100644
index 0000000000..76eac5e793
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
@@ -0,0 +1,49 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-macro-gen.h"
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/value-helper.h"
+#include "test/cctest/wasm/wasm-run-utils.h"
+
+using namespace v8::base;
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+using namespace v8::internal::wasm;
+
+WASM_EXEC_TEST(Splat) {
+ FLAG_wasm_simd_prototype = true;
+
+ // Store SIMD value in a local variable, use extract lane to check lane values
+ // This test is not a test for ExtractLane as Splat does not create
+ // interesting SIMD values.
+ //
+ // SetLocal(1, I32x4Splat(Local(0)));
+ // For each lane index
+ // if(Local(0) != I32x4ExtractLane(Local(1), index)
+ // return 0
+ //
+ // return 1
+ WasmRunner<int32_t> r(kExecuteCompiled, MachineType::Int32());
+ r.AllocateLocal(kAstS128);
+ BUILD(r,
+ WASM_BLOCK(
+ WASM_SET_LOCAL(1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(0))),
+ WASM_IF(WASM_I32_NE(WASM_GET_LOCAL(0), WASM_SIMD_I32x4_EXTRACT_LANE(
+ 0, WASM_GET_LOCAL(1))),
+ WASM_RETURN1(WASM_ZERO)),
+ WASM_IF(WASM_I32_NE(WASM_GET_LOCAL(0), WASM_SIMD_I32x4_EXTRACT_LANE(
+ 1, WASM_GET_LOCAL(1))),
+ WASM_RETURN1(WASM_ZERO)),
+ WASM_IF(WASM_I32_NE(WASM_GET_LOCAL(0), WASM_SIMD_I32x4_EXTRACT_LANE(
+ 2, WASM_GET_LOCAL(1))),
+ WASM_RETURN1(WASM_ZERO)),
+ WASM_IF(WASM_I32_NE(WASM_GET_LOCAL(0), WASM_SIMD_I32x4_EXTRACT_LANE(
+ 3, WASM_GET_LOCAL(1))),
+ WASM_RETURN1(WASM_ZERO)),
+ WASM_RETURN1(WASM_ONE)));
+
+ FOR_INT32_INPUTS(i) { CHECK_EQ(1, r.Call(*i)); }
+}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index 42ca816655..d9d9db80e1 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -12,8 +12,8 @@
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
-#include "test/cctest/wasm/test-signatures.h"
#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/common/wasm/test-signatures.h"
using namespace v8::base;
using namespace v8::internal;
@@ -21,10 +21,11 @@ using namespace v8::internal::compiler;
using namespace v8::internal::wasm;
// for even shorter tests.
-#define B2(a, b) kExprBlock, a, b, kExprEnd
-#define B1(a) kExprBlock, a, kExprEnd
-#define RET(x) x, kExprReturn, 1
-#define RET_I8(x) kExprI8Const, x, kExprReturn, 1
+#define B1(a) WASM_BLOCK(a)
+#define B2(a, b) WASM_BLOCK(a, b)
+#define B3(a, b, c) WASM_BLOCK(a, b, c)
+#define RET(x) x, kExprReturn
+#define RET_I8(x) kExprI8Const, x, kExprReturn
WASM_EXEC_TEST(Int8Const) {
WasmRunner<int32_t> r(execution_mode);
@@ -34,11 +35,11 @@ WASM_EXEC_TEST(Int8Const) {
CHECK_EQ(kExpectedValue, r.Call());
}
-WASM_EXEC_TEST(Int8Const_fallthru1) {
+WASM_EXEC_TEST(Int8Const_end) {
WasmRunner<int32_t> r(execution_mode);
- const byte kExpectedValue = 122;
- // kExpectedValue
- BUILD(r, WASM_I8(kExpectedValue));
+ const byte kExpectedValue = 121;
+ // return(kExpectedValue)
+ BUILD(r, WASM_I8(kExpectedValue), kExprEnd);
CHECK_EQ(kExpectedValue, r.Call());
}
@@ -46,7 +47,7 @@ WASM_EXEC_TEST(Int8Const_fallthru2) {
WasmRunner<int32_t> r(execution_mode);
const byte kExpectedValue = 123;
// -99 kExpectedValue
- BUILD(r, WASM_I8(-99), WASM_I8(kExpectedValue));
+ BUILD(r, WASM_I8(-99), WASM_DROP, WASM_I8(kExpectedValue));
CHECK_EQ(kExpectedValue, r.Call());
}
@@ -78,14 +79,6 @@ WASM_EXEC_TEST(Int32Const_many) {
}
}
-WASM_EXEC_TEST(MemorySize) {
- TestingModule module(execution_mode);
- WasmRunner<int32_t> r(&module);
- module.AddMemory(1024);
- BUILD(r, kExprMemorySize);
- CHECK_EQ(1024, r.Call());
-}
-
WASM_EXEC_TEST(Int32Param0) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
// return(local[0])
@@ -129,11 +122,11 @@ WASM_EXEC_TEST(Int32Add_P_fallthru) {
FOR_INT32_INPUTS(i) { CHECK_EQ(*i + 13, r.Call(*i)); }
}
-WASM_EXEC_TEST(Int32Add_P2) {
+static void RunInt32AddTest(WasmExecutionMode execution_mode, const byte* code,
+ size_t size) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
MachineType::Int32());
- // p0 + p1
- BUILD(r, WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ r.Build(code, code + size);
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
int32_t expected = static_cast<int32_t>(static_cast<uint32_t>(*i) +
@@ -143,6 +136,40 @@ WASM_EXEC_TEST(Int32Add_P2) {
}
}
+WASM_EXEC_TEST(Int32Add_P2) {
+ FLAG_wasm_mv_prototype = true;
+ static const byte code[] = {
+ WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))};
+ RunInt32AddTest(execution_mode, code, sizeof(code));
+}
+
+WASM_EXEC_TEST(Int32Add_block1) {
+ FLAG_wasm_mv_prototype = true;
+ static const byte code[] = {
+ WASM_BLOCK_TT(kAstI32, kAstI32, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
+ kExprI32Add};
+ RunInt32AddTest(execution_mode, code, sizeof(code));
+}
+
+WASM_EXEC_TEST(Int32Add_block2) {
+ FLAG_wasm_mv_prototype = true;
+ static const byte code[] = {
+ WASM_BLOCK_TT(kAstI32, kAstI32, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ kExprBr, DEPTH_0),
+ kExprI32Add};
+ RunInt32AddTest(execution_mode, code, sizeof(code));
+}
+
+WASM_EXEC_TEST(Int32Add_multi_if) {
+ FLAG_wasm_mv_prototype = true;
+ static const byte code[] = {
+ WASM_IF_ELSE_TT(kAstI32, kAstI32, WASM_GET_LOCAL(0),
+ WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
+ WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0))),
+ kExprI32Add};
+ RunInt32AddTest(execution_mode, code, sizeof(code));
+}
+
WASM_EXEC_TEST(Float32Add) {
WasmRunner<int32_t> r(execution_mode);
// int(11.5f + 44.5f)
@@ -433,14 +460,18 @@ WASM_EXEC_TEST(Int32DivS_trap_effect) {
module.AddMemoryElems<int32_t>(8);
WasmRunner<int32_t> r(&module, MachineType::Int32(), MachineType::Int32());
- BUILD(r,
- WASM_IF_ELSE(WASM_GET_LOCAL(0),
- WASM_I32_DIVS(WASM_STORE_MEM(MachineType::Int8(),
- WASM_ZERO, WASM_GET_LOCAL(0)),
- WASM_GET_LOCAL(1)),
- WASM_I32_DIVS(WASM_STORE_MEM(MachineType::Int8(),
- WASM_ZERO, WASM_GET_LOCAL(0)),
- WASM_GET_LOCAL(1))));
+ BUILD(r, WASM_IF_ELSE_I(
+ WASM_GET_LOCAL(0),
+ WASM_I32_DIVS(
+ WASM_BLOCK_I(WASM_STORE_MEM(MachineType::Int8(), WASM_ZERO,
+ WASM_GET_LOCAL(0)),
+ WASM_GET_LOCAL(0)),
+ WASM_GET_LOCAL(1)),
+ WASM_I32_DIVS(
+ WASM_BLOCK_I(WASM_STORE_MEM(MachineType::Int8(), WASM_ZERO,
+ WASM_GET_LOCAL(0)),
+ WASM_GET_LOCAL(0)),
+ WASM_GET_LOCAL(1))));
CHECK_EQ(0, r.Call(0, 100));
CHECK_TRAP(r.Call(8, 0));
CHECK_TRAP(r.Call(4, 0));
@@ -605,46 +636,6 @@ WASM_EXEC_TEST(Float32Neg) {
}
}
-WASM_EXEC_TEST(Float32SubMinusZero) {
- WasmRunner<float> r(execution_mode, MachineType::Float32());
- BUILD(r, WASM_F32_SUB(WASM_F32(-0.0), WASM_GET_LOCAL(0)));
-
- uint32_t sNanValue =
- bit_cast<uint32_t>(std::numeric_limits<float>::signaling_NaN());
- uint32_t qNanValue =
- bit_cast<uint32_t>(std::numeric_limits<float>::quiet_NaN());
- uint32_t payload = 0x00200000;
-
- uint32_t expected = (qNanValue & 0xffc00000) | payload;
- uint32_t operand = (sNanValue & 0xffc00000) | payload;
- CHECK_EQ(expected, bit_cast<uint32_t>(r.Call(bit_cast<float>(operand))));
-
- // Change the sign of the NaN.
- expected |= 0x80000000;
- operand |= 0x80000000;
- CHECK_EQ(expected, bit_cast<uint32_t>(r.Call(bit_cast<float>(operand))));
-}
-
-WASM_EXEC_TEST(Float64SubMinusZero) {
- WasmRunner<double> r(execution_mode, MachineType::Float64());
- BUILD(r, WASM_F64_SUB(WASM_F64(-0.0), WASM_GET_LOCAL(0)));
-
- uint64_t sNanValue =
- bit_cast<uint64_t>(std::numeric_limits<double>::signaling_NaN());
- uint64_t qNanValue =
- bit_cast<uint64_t>(std::numeric_limits<double>::quiet_NaN());
- uint64_t payload = 0x0000123456789abc;
-
- uint64_t expected = (qNanValue & 0xfff8000000000000) | payload;
- uint64_t operand = (sNanValue & 0xfff8000000000000) | payload;
- CHECK_EQ(expected, bit_cast<uint64_t>(r.Call(bit_cast<double>(operand))));
-
- // Change the sign of the NaN.
- expected |= 0x8000000000000000;
- operand |= 0x8000000000000000;
- CHECK_EQ(expected, bit_cast<uint64_t>(r.Call(bit_cast<double>(operand))));
-}
-
WASM_EXEC_TEST(Float64Neg) {
WasmRunner<double> r(execution_mode, MachineType::Float64());
BUILD(r, WASM_F64_NEG(WASM_GET_LOCAL(0)));
@@ -658,50 +649,52 @@ WASM_EXEC_TEST(Float64Neg) {
WASM_EXEC_TEST(IfElse_P) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
// if (p0) return 11; else return 22;
- BUILD(r, WASM_IF_ELSE(WASM_GET_LOCAL(0), // --
- WASM_I8(11), // --
- WASM_I8(22))); // --
+ BUILD(r, WASM_IF_ELSE_I(WASM_GET_LOCAL(0), // --
+ WASM_I8(11), // --
+ WASM_I8(22))); // --
FOR_INT32_INPUTS(i) {
int32_t expected = *i ? 11 : 22;
CHECK_EQ(expected, r.Call(*i));
}
}
+#define EMPTY
WASM_EXEC_TEST(If_empty1) {
WasmRunner<uint32_t> r(execution_mode, MachineType::Uint32(),
MachineType::Uint32());
- BUILD(r, WASM_GET_LOCAL(0), kExprIf, kExprEnd, WASM_GET_LOCAL(1));
+ BUILD(r, WASM_GET_LOCAL(0), kExprIf, kLocalVoid, kExprEnd, WASM_GET_LOCAL(1));
FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i - 9, *i)); }
}
WASM_EXEC_TEST(IfElse_empty1) {
WasmRunner<uint32_t> r(execution_mode, MachineType::Uint32(),
MachineType::Uint32());
- BUILD(r, WASM_GET_LOCAL(0), kExprIf, kExprElse, kExprEnd, WASM_GET_LOCAL(1));
+ BUILD(r, WASM_GET_LOCAL(0), kExprIf, kLocalVoid, kExprElse, kExprEnd,
+ WASM_GET_LOCAL(1));
FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i - 8, *i)); }
}
WASM_EXEC_TEST(IfElse_empty2) {
WasmRunner<uint32_t> r(execution_mode, MachineType::Uint32(),
MachineType::Uint32());
- BUILD(r, WASM_GET_LOCAL(0), kExprIf, WASM_ZERO, kExprElse, kExprEnd,
- WASM_GET_LOCAL(1));
+ BUILD(r, WASM_GET_LOCAL(0), kExprIf, kLocalVoid, WASM_NOP, kExprElse,
+ kExprEnd, WASM_GET_LOCAL(1));
FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i - 7, *i)); }
}
WASM_EXEC_TEST(IfElse_empty3) {
WasmRunner<uint32_t> r(execution_mode, MachineType::Uint32(),
MachineType::Uint32());
- BUILD(r, WASM_GET_LOCAL(0), kExprIf, kExprElse, WASM_ZERO, kExprEnd,
- WASM_GET_LOCAL(1));
+ BUILD(r, WASM_GET_LOCAL(0), kExprIf, kLocalVoid, kExprElse, WASM_NOP,
+ kExprEnd, WASM_GET_LOCAL(1));
FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i - 6, *i)); }
}
-WASM_EXEC_TEST(If_chain) {
+WASM_EXEC_TEST(If_chain1) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
// if (p0) 13; if (p0) 14; 15
- BUILD(r, WASM_IF(WASM_GET_LOCAL(0), WASM_I8(13)),
- WASM_IF(WASM_GET_LOCAL(0), WASM_I8(14)), WASM_I8(15));
+ BUILD(r, WASM_IF(WASM_GET_LOCAL(0), WASM_NOP),
+ WASM_IF(WASM_GET_LOCAL(0), WASM_NOP), WASM_I8(15));
FOR_INT32_INPUTS(i) { CHECK_EQ(15, r.Call(*i)); }
}
@@ -720,13 +713,22 @@ WASM_EXEC_TEST(If_chain_set) {
WASM_EXEC_TEST(IfElse_Unreachable1) {
WasmRunner<int32_t> r(execution_mode);
- // if (0) unreachable; else return 22;
- BUILD(r, WASM_IF_ELSE(WASM_ZERO, // --
- WASM_UNREACHABLE, // --
- WASM_I8(27))); // --
+ // 0 ? unreachable : 27
+ BUILD(r, WASM_IF_ELSE_I(WASM_ZERO, // --
+ WASM_UNREACHABLE, // --
+ WASM_I8(27))); // --
CHECK_EQ(27, r.Call());
}
+WASM_EXEC_TEST(IfElse_Unreachable2) {
+ WasmRunner<int32_t> r(execution_mode);
+ // 1 ? 28 : unreachable
+ BUILD(r, WASM_IF_ELSE_I(WASM_I8(1), // --
+ WASM_I8(28), // --
+ WASM_UNREACHABLE)); // --
+ CHECK_EQ(28, r.Call());
+}
+
WASM_EXEC_TEST(Return12) {
WasmRunner<int32_t> r(execution_mode);
@@ -737,7 +739,7 @@ WASM_EXEC_TEST(Return12) {
WASM_EXEC_TEST(Return17) {
WasmRunner<int32_t> r(execution_mode);
- BUILD(r, B1(RET_I8(17)));
+ BUILD(r, WASM_BLOCK(RET_I8(17)));
CHECK_EQ(17, r.Call());
}
@@ -794,10 +796,10 @@ WASM_EXEC_TEST(Select) {
WASM_EXEC_TEST(Select_strict1) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
// select(a=0, a=1, a=2); return a
- BUILD(r, B2(WASM_SELECT(WASM_SET_LOCAL(0, WASM_I8(0)),
- WASM_SET_LOCAL(0, WASM_I8(1)),
- WASM_SET_LOCAL(0, WASM_I8(2))),
- WASM_GET_LOCAL(0)));
+ BUILD(r, WASM_SELECT(WASM_TEE_LOCAL(0, WASM_I8(0)),
+ WASM_TEE_LOCAL(0, WASM_I8(1)),
+ WASM_TEE_LOCAL(0, WASM_I8(2))),
+ WASM_DROP, WASM_GET_LOCAL(0));
FOR_INT32_INPUTS(i) { CHECK_EQ(2, r.Call(*i)); }
}
@@ -806,8 +808,8 @@ WASM_EXEC_TEST(Select_strict2) {
r.AllocateLocal(kAstI32);
r.AllocateLocal(kAstI32);
// select(b=5, c=6, a)
- BUILD(r, WASM_SELECT(WASM_SET_LOCAL(1, WASM_I8(5)),
- WASM_SET_LOCAL(2, WASM_I8(6)), WASM_GET_LOCAL(0)));
+ BUILD(r, WASM_SELECT(WASM_TEE_LOCAL(1, WASM_I8(5)),
+ WASM_TEE_LOCAL(2, WASM_I8(6)), WASM_GET_LOCAL(0)));
FOR_INT32_INPUTS(i) {
int32_t expected = *i ? 5 : 6;
CHECK_EQ(expected, r.Call(*i));
@@ -819,9 +821,9 @@ WASM_EXEC_TEST(Select_strict3) {
r.AllocateLocal(kAstI32);
r.AllocateLocal(kAstI32);
// select(b=5, c=6, a=b)
- BUILD(r, WASM_SELECT(WASM_SET_LOCAL(1, WASM_I8(5)),
- WASM_SET_LOCAL(2, WASM_I8(6)),
- WASM_SET_LOCAL(0, WASM_GET_LOCAL(1))));
+ BUILD(r, WASM_SELECT(WASM_TEE_LOCAL(1, WASM_I8(5)),
+ WASM_TEE_LOCAL(2, WASM_I8(6)),
+ WASM_TEE_LOCAL(0, WASM_GET_LOCAL(1))));
FOR_INT32_INPUTS(i) {
int32_t expected = 5;
CHECK_EQ(expected, r.Call(*i));
@@ -830,26 +832,38 @@ WASM_EXEC_TEST(Select_strict3) {
WASM_EXEC_TEST(BrIf_strict) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(
- r,
- B2(B1(WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_I8(99)))),
- WASM_GET_LOCAL(0)));
+ BUILD(r, WASM_BLOCK_I(WASM_BRV_IF(0, WASM_GET_LOCAL(0),
+ WASM_TEE_LOCAL(0, WASM_I8(99)))));
- FOR_INT32_INPUTS(i) { CHECK_EQ(99, r.Call(*i)); }
+ FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
-WASM_EXEC_TEST(BrTable0a) {
+WASM_EXEC_TEST(Br_height) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
BUILD(r,
- B2(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(0))), WASM_I8(91)));
+ WASM_BLOCK_I(
+ WASM_BLOCK_I(WASM_BRV_IFD(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)),
+ WASM_RETURN1(WASM_I8(9)), WASM_I8(7), WASM_I8(7)),
+ WASM_BRV(0, WASM_I8(8))));
+
+ for (int32_t i = 0; i < 5; i++) {
+ int32_t expected = i != 0 ? 8 : 9;
+ CHECK_EQ(expected, r.Call(i));
+ }
+}
+
+WASM_EXEC_TEST(BrTable0a) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, B1(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(0)))),
+ WASM_I8(91));
FOR_INT32_INPUTS(i) { CHECK_EQ(91, r.Call(*i)); }
}
WASM_EXEC_TEST(BrTable0b) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
BUILD(r,
- B2(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 1, BR_TARGET(0), BR_TARGET(0))),
- WASM_I8(92)));
+ B1(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 1, BR_TARGET(0), BR_TARGET(0)))),
+ WASM_I8(92));
FOR_INT32_INPUTS(i) { CHECK_EQ(92, r.Call(*i)); }
}
@@ -857,9 +871,9 @@ WASM_EXEC_TEST(BrTable0c) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
BUILD(
r,
- B2(B2(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 1, BR_TARGET(0), BR_TARGET(1))),
- RET_I8(76)),
- WASM_I8(77)));
+ B1(B2(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 1, BR_TARGET(0), BR_TARGET(1))),
+ RET_I8(76))),
+ WASM_I8(77));
FOR_INT32_INPUTS(i) {
int32_t expected = *i == 0 ? 76 : 77;
CHECK_EQ(expected, r.Call(*i));
@@ -874,9 +888,10 @@ WASM_EXEC_TEST(BrTable1) {
WASM_EXEC_TEST(BrTable_loop) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, B2(WASM_LOOP(WASM_BR_TABLE(WASM_INC_LOCAL_BY(0, 1), 2, BR_TARGET(2),
- BR_TARGET(1), BR_TARGET(0))),
- RET_I8(99)),
+ BUILD(r,
+ B2(B1(WASM_LOOP(WASM_BR_TABLE(WASM_INC_LOCAL_BYV(0, 1), 2, BR_TARGET(2),
+ BR_TARGET(1), BR_TARGET(0)))),
+ RET_I8(99)),
WASM_I8(98));
CHECK_EQ(99, r.Call(0));
CHECK_EQ(98, r.Call(-1));
@@ -1021,10 +1036,9 @@ WASM_EXEC_TEST(I32ReinterpretF32) {
int32_t* memory = module.AddMemoryElems<int32_t>(8);
WasmRunner<int32_t> r(&module, MachineType::Int32());
- BUILD(r,
- WASM_BLOCK(WASM_STORE_MEM(MachineType::Float32(), WASM_ZERO,
- WASM_F32_REINTERPRET_I32(WASM_GET_LOCAL(0))),
- WASM_I8(107)));
+ BUILD(r, WASM_STORE_MEM(MachineType::Float32(), WASM_ZERO,
+ WASM_F32_REINTERPRET_I32(WASM_GET_LOCAL(0))),
+ WASM_I8(107));
FOR_INT32_INPUTS(i) {
int32_t expected = *i;
@@ -1033,13 +1047,14 @@ WASM_EXEC_TEST(I32ReinterpretF32) {
}
}
-WASM_EXEC_TEST(ReturnStore) {
+WASM_EXEC_TEST(LoadStoreLoad) {
TestingModule module(execution_mode);
int32_t* memory = module.AddMemoryElems<int32_t>(8);
WasmRunner<int32_t> r(&module);
BUILD(r, WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO,
- WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)));
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)),
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO));
FOR_INT32_INPUTS(i) {
int32_t expected = *i;
@@ -1061,7 +1076,7 @@ WASM_EXEC_TEST(VoidReturn1) {
const int32_t kExpected = -414444;
// Build the calling function.
WasmRunner<int32_t> r(&module);
- BUILD(r, B2(WASM_CALL_FUNCTION0(index), WASM_I32V_3(kExpected)));
+ BUILD(r, WASM_CALL_FUNCTION0(index), WASM_I32V_3(kExpected));
int32_t result = r.Call();
CHECK_EQ(kExpected, result);
@@ -1079,15 +1094,27 @@ WASM_EXEC_TEST(VoidReturn2) {
const int32_t kExpected = -414444;
// Build the calling function.
WasmRunner<int32_t> r(&module);
- BUILD(r, B2(WASM_CALL_FUNCTION0(index), WASM_I32V_3(kExpected)));
+ BUILD(r, B1(WASM_CALL_FUNCTION0(index)), WASM_I32V_3(kExpected));
int32_t result = r.Call();
CHECK_EQ(kExpected, result);
}
+WASM_EXEC_TEST(BrEmpty) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, WASM_BRV(0, WASM_GET_LOCAL(0)));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+}
+
+WASM_EXEC_TEST(BrIfEmpty) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+}
+
WASM_EXEC_TEST(Block_empty) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, kExprBlock, kExprEnd, WASM_GET_LOCAL(0));
+ BUILD(r, kExprBlock, kLocalVoid, kExprEnd, WASM_GET_LOCAL(0));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
@@ -1099,27 +1126,45 @@ WASM_EXEC_TEST(Block_empty_br1) {
WASM_EXEC_TEST(Block_empty_brif1) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, B1(WASM_BR_IF(0, WASM_ZERO)), WASM_GET_LOCAL(0));
+ BUILD(r, WASM_BLOCK(WASM_BR_IF(0, WASM_ZERO)), WASM_GET_LOCAL(0));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
WASM_EXEC_TEST(Block_empty_brif2) {
WasmRunner<uint32_t> r(execution_mode, MachineType::Uint32(),
MachineType::Uint32());
- BUILD(r, B1(WASM_BR_IF(0, WASM_GET_LOCAL(1))), WASM_GET_LOCAL(0));
+ BUILD(r, WASM_BLOCK(WASM_BR_IF(0, WASM_GET_LOCAL(1))), WASM_GET_LOCAL(0));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i, *i + 1)); }
}
+WASM_EXEC_TEST(Block_i) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, WASM_BLOCK_I(WASM_GET_LOCAL(0)));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+}
+
+WASM_EXEC_TEST(Block_f) {
+ WasmRunner<float> r(execution_mode, MachineType::Float32());
+ BUILD(r, WASM_BLOCK_F(WASM_GET_LOCAL(0)));
+ FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(*i, r.Call(*i)); }
+}
+
+WASM_EXEC_TEST(Block_d) {
+ WasmRunner<double> r(execution_mode, MachineType::Float64());
+ BUILD(r, WASM_BLOCK_D(WASM_GET_LOCAL(0)));
+ FOR_FLOAT64_INPUTS(i) { CHECK_FLOAT_EQ(*i, r.Call(*i)); }
+}
+
WASM_EXEC_TEST(Block_br2) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, B1(WASM_BRV(0, WASM_GET_LOCAL(0))));
+ BUILD(r, WASM_BLOCK_I(WASM_BRV(0, WASM_GET_LOCAL(0))));
FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
WASM_EXEC_TEST(Block_If_P) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- // { if (p0) return 51; return 52; }
- BUILD(r, B2( // --
+ // block { if (p0) break 51; 52; }
+ BUILD(r, WASM_BLOCK_I( // --
WASM_IF(WASM_GET_LOCAL(0), // --
WASM_BRV(1, WASM_I8(51))), // --
WASM_I8(52))); // --
@@ -1131,32 +1176,64 @@ WASM_EXEC_TEST(Block_If_P) {
WASM_EXEC_TEST(Loop_empty) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, kExprLoop, kExprEnd, WASM_GET_LOCAL(0));
+ BUILD(r, kExprLoop, kLocalVoid, kExprEnd, WASM_GET_LOCAL(0));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
+WASM_EXEC_TEST(Loop_i) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, WASM_LOOP_I(WASM_GET_LOCAL(0)));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+}
+
+WASM_EXEC_TEST(Loop_f) {
+ WasmRunner<float> r(execution_mode, MachineType::Float32());
+ BUILD(r, WASM_LOOP_F(WASM_GET_LOCAL(0)));
+ FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(*i, r.Call(*i)); }
+}
+
+WASM_EXEC_TEST(Loop_d) {
+ WasmRunner<double> r(execution_mode, MachineType::Float64());
+ BUILD(r, WASM_LOOP_D(WASM_GET_LOCAL(0)));
+ FOR_FLOAT64_INPUTS(i) { CHECK_FLOAT_EQ(*i, r.Call(*i)); }
+}
+
WASM_EXEC_TEST(Loop_empty_br1) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, WASM_LOOP(WASM_BR(1)), WASM_GET_LOCAL(0));
+ BUILD(r, B1(WASM_LOOP(WASM_BR(1))), WASM_GET_LOCAL(0));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
WASM_EXEC_TEST(Loop_empty_brif1) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, WASM_LOOP(WASM_BR_IF(1, WASM_ZERO)), WASM_GET_LOCAL(0));
+ BUILD(r, B1(WASM_LOOP(WASM_BR_IF(1, WASM_ZERO))), WASM_GET_LOCAL(0));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
WASM_EXEC_TEST(Loop_empty_brif2) {
WasmRunner<uint32_t> r(execution_mode, MachineType::Uint32(),
MachineType::Uint32());
- BUILD(r, WASM_LOOP(WASM_BR_IF(1, WASM_GET_LOCAL(1))), WASM_GET_LOCAL(0));
+ BUILD(r, WASM_LOOP_I(WASM_BRV_IF(1, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i, *i + 1)); }
}
+WASM_EXEC_TEST(Loop_empty_brif3) {
+ WasmRunner<uint32_t> r(execution_mode, MachineType::Uint32(),
+ MachineType::Uint32(), MachineType::Uint32());
+ BUILD(r, WASM_LOOP(WASM_BRV_IFD(1, WASM_GET_LOCAL(2), WASM_GET_LOCAL(0))),
+ WASM_GET_LOCAL(1));
+ FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_INPUTS(j) {
+ CHECK_EQ(*i, r.Call(0, *i, *j));
+ CHECK_EQ(*j, r.Call(1, *i, *j));
+ }
+ }
+}
+
WASM_EXEC_TEST(Block_BrIf_P) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, B2(WASM_BRV_IF(0, WASM_I8(51), WASM_GET_LOCAL(0)), WASM_I8(52)));
+ BUILD(r, WASM_BLOCK_I(WASM_BRV_IFD(0, WASM_I8(51), WASM_GET_LOCAL(0)),
+ WASM_I8(52)));
FOR_INT32_INPUTS(i) {
int32_t expected = *i ? 51 : 52;
CHECK_EQ(expected, r.Call(*i));
@@ -1166,11 +1243,11 @@ WASM_EXEC_TEST(Block_BrIf_P) {
WASM_EXEC_TEST(Block_IfElse_P_assign) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
// { if (p0) p0 = 71; else p0 = 72; return p0; }
- BUILD(r, B2( // --
- WASM_IF_ELSE(WASM_GET_LOCAL(0), // --
- WASM_SET_LOCAL(0, WASM_I8(71)), // --
- WASM_SET_LOCAL(0, WASM_I8(72))), // --
- WASM_GET_LOCAL(0)));
+ BUILD(r, // --
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), // --
+ WASM_SET_LOCAL(0, WASM_I8(71)), // --
+ WASM_SET_LOCAL(0, WASM_I8(72))), // --
+ WASM_GET_LOCAL(0));
FOR_INT32_INPUTS(i) {
int32_t expected = *i ? 71 : 72;
CHECK_EQ(expected, r.Call(*i));
@@ -1193,9 +1270,8 @@ WASM_EXEC_TEST(Block_IfElse_P_return) {
WASM_EXEC_TEST(Block_If_P_assign) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
// { if (p0) p0 = 61; p0; }
- BUILD(r,
- WASM_BLOCK(WASM_IF(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_I8(61))),
- WASM_GET_LOCAL(0)));
+ BUILD(r, WASM_IF(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_I8(61))),
+ WASM_GET_LOCAL(0));
FOR_INT32_INPUTS(i) {
int32_t expected = *i ? 61 : *i;
CHECK_EQ(expected, r.Call(*i));
@@ -1212,21 +1288,9 @@ WASM_EXEC_TEST(DanglingAssign) {
WASM_EXEC_TEST(ExprIf_P) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
// p0 ? 11 : 22;
- BUILD(r, WASM_IF_ELSE(WASM_GET_LOCAL(0), // --
- WASM_I8(11), // --
- WASM_I8(22))); // --
- FOR_INT32_INPUTS(i) {
- int32_t expected = *i ? 11 : 22;
- CHECK_EQ(expected, r.Call(*i));
- }
-}
-
-WASM_EXEC_TEST(ExprIf_P_fallthru) {
- WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- // p0 ? 11 : 22;
- BUILD(r, WASM_IF_ELSE(WASM_GET_LOCAL(0), // --
- WASM_I8(11), // --
- WASM_I8(22))); // --
+ BUILD(r, WASM_IF_ELSE_I(WASM_GET_LOCAL(0), // --
+ WASM_I8(11), // --
+ WASM_I8(22))); // --
FOR_INT32_INPUTS(i) {
int32_t expected = *i ? 11 : 22;
CHECK_EQ(expected, r.Call(*i));
@@ -1235,12 +1299,11 @@ WASM_EXEC_TEST(ExprIf_P_fallthru) {
WASM_EXEC_TEST(CountDown) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, WASM_BLOCK(
- WASM_LOOP(WASM_IF(
- WASM_GET_LOCAL(0),
- WASM_BRV(1, WASM_SET_LOCAL(0, WASM_I32_SUB(WASM_GET_LOCAL(0),
- WASM_I8(1)))))),
- WASM_GET_LOCAL(0)));
+ BUILD(r, WASM_LOOP(WASM_IFB(
+ WASM_GET_LOCAL(0),
+ WASM_SET_LOCAL(0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(1))),
+ WASM_BR(1))),
+ WASM_GET_LOCAL(0));
CHECK_EQ(0, r.Call(1));
CHECK_EQ(0, r.Call(10));
CHECK_EQ(0, r.Call(100));
@@ -1248,12 +1311,12 @@ WASM_EXEC_TEST(CountDown) {
WASM_EXEC_TEST(CountDown_fallthru) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, WASM_BLOCK(
- WASM_LOOP(WASM_IF(WASM_NOT(WASM_GET_LOCAL(0)), WASM_BREAK(1)),
- WASM_SET_LOCAL(
- 0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(1))),
- WASM_CONTINUE(0)),
- WASM_GET_LOCAL(0)));
+ BUILD(r, WASM_LOOP(
+ WASM_IF(WASM_NOT(WASM_GET_LOCAL(0)),
+ WASM_BRV(2, WASM_GET_LOCAL(0))),
+ WASM_SET_LOCAL(0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(1))),
+ WASM_CONTINUE(0)),
+ WASM_GET_LOCAL(0));
CHECK_EQ(0, r.Call(1));
CHECK_EQ(0, r.Call(10));
CHECK_EQ(0, r.Call(100));
@@ -1261,41 +1324,42 @@ WASM_EXEC_TEST(CountDown_fallthru) {
WASM_EXEC_TEST(WhileCountDown) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r,
- WASM_BLOCK(WASM_WHILE(WASM_GET_LOCAL(0),
- WASM_SET_LOCAL(0, WASM_I32_SUB(WASM_GET_LOCAL(0),
- WASM_I8(1)))),
- WASM_GET_LOCAL(0)));
+ BUILD(r, WASM_WHILE(
+ WASM_GET_LOCAL(0),
+ WASM_SET_LOCAL(0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(1)))),
+ WASM_GET_LOCAL(0));
CHECK_EQ(0, r.Call(1));
CHECK_EQ(0, r.Call(10));
CHECK_EQ(0, r.Call(100));
}
WASM_EXEC_TEST(Loop_if_break1) {
- WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, B2(WASM_LOOP(WASM_IF(WASM_GET_LOCAL(0), WASM_BREAK(1)),
- WASM_SET_LOCAL(0, WASM_I8(99))),
- WASM_GET_LOCAL(0)));
- CHECK_EQ(99, r.Call(0));
- CHECK_EQ(3, r.Call(3));
- CHECK_EQ(10000, r.Call(10000));
- CHECK_EQ(-29, r.Call(-29));
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
+ MachineType::Int32());
+ BUILD(r, WASM_LOOP(WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(2, WASM_GET_LOCAL(1))),
+ WASM_SET_LOCAL(0, WASM_I8(99))),
+ WASM_GET_LOCAL(0));
+ CHECK_EQ(99, r.Call(0, 11));
+ CHECK_EQ(65, r.Call(3, 65));
+ CHECK_EQ(10001, r.Call(10000, 10001));
+ CHECK_EQ(-29, r.Call(-28, -29));
}
WASM_EXEC_TEST(Loop_if_break2) {
- WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, B2(WASM_LOOP(WASM_BR_IF(1, WASM_GET_LOCAL(0)),
- WASM_SET_LOCAL(0, WASM_I8(99))),
- WASM_GET_LOCAL(0)));
- CHECK_EQ(99, r.Call(0));
- CHECK_EQ(3, r.Call(3));
- CHECK_EQ(10000, r.Call(10000));
- CHECK_EQ(-29, r.Call(-29));
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
+ MachineType::Int32());
+ BUILD(r, WASM_LOOP(WASM_BRV_IF(1, WASM_GET_LOCAL(1), WASM_GET_LOCAL(0)),
+ WASM_DROP, WASM_SET_LOCAL(0, WASM_I8(99))),
+ WASM_GET_LOCAL(0));
+ CHECK_EQ(99, r.Call(0, 33));
+ CHECK_EQ(3, r.Call(1, 3));
+ CHECK_EQ(10000, r.Call(99, 10000));
+ CHECK_EQ(-29, r.Call(-11, -29));
}
WASM_EXEC_TEST(Loop_if_break_fallthru) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, B1(WASM_LOOP(WASM_IF(WASM_GET_LOCAL(0), WASM_BREAK(1)),
+ BUILD(r, B1(WASM_LOOP(WASM_IF(WASM_GET_LOCAL(0), WASM_BR(2)),
WASM_SET_LOCAL(0, WASM_I8(93)))),
WASM_GET_LOCAL(0));
CHECK_EQ(93, r.Call(0));
@@ -1304,6 +1368,17 @@ WASM_EXEC_TEST(Loop_if_break_fallthru) {
CHECK_EQ(-22, r.Call(-22));
}
+WASM_EXEC_TEST(Loop_if_break_fallthru2) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, B1(B1(WASM_LOOP(WASM_IF(WASM_GET_LOCAL(0), WASM_BR(2)),
+ WASM_SET_LOCAL(0, WASM_I8(93))))),
+ WASM_GET_LOCAL(0));
+ CHECK_EQ(93, r.Call(0));
+ CHECK_EQ(3, r.Call(3));
+ CHECK_EQ(10001, r.Call(10001));
+ CHECK_EQ(-22, r.Call(-22));
+}
+
WASM_EXEC_TEST(IfBreak1) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
BUILD(r, WASM_IF(WASM_GET_LOCAL(0), WASM_SEQ(WASM_BR(0), WASM_UNREACHABLE)),
@@ -1396,7 +1471,7 @@ WASM_EXEC_TEST(LoadMem_offset_oob) {
uint32_t boundary = 24 - WasmOpcodes::MemSize(machineTypes[m]);
BUILD(r, WASM_LOAD_MEM_OFFSET(machineTypes[m], 8, WASM_GET_LOCAL(0)),
- WASM_ZERO);
+ WASM_DROP, WASM_ZERO);
CHECK_EQ(0, r.Call(boundary)); // in bounds.
@@ -1484,7 +1559,8 @@ WASM_EXEC_TEST(StoreMemI32_alignment) {
for (byte i = 0; i <= 2; ++i) {
WasmRunner<int32_t> r(&module, MachineType::Int32());
BUILD(r, WASM_STORE_MEM_ALIGNMENT(MachineType::Int32(), WASM_ZERO, i,
- WASM_GET_LOCAL(0)));
+ WASM_GET_LOCAL(0)),
+ WASM_GET_LOCAL(0));
module.RandomizeMemory(1111);
memory[0] = 0;
@@ -1500,7 +1576,8 @@ WASM_EXEC_TEST(StoreMemI32_offset) {
const int32_t kWritten = 0xaabbccdd;
BUILD(r, WASM_STORE_MEM_OFFSET(MachineType::Int32(), 4, WASM_GET_LOCAL(0),
- WASM_I32V_5(kWritten)));
+ WASM_I32V_5(kWritten)),
+ WASM_I32V_5(kWritten));
for (int i = 0; i < 2; ++i) {
module.RandomizeMemory(1111);
@@ -1566,18 +1643,17 @@ WASM_EXEC_TEST(MemI32_Sum) {
WasmRunner<uint32_t> r(&module, MachineType::Int32());
const byte kSum = r.AllocateLocal(kAstI32);
- BUILD(r,
- WASM_BLOCK(
- WASM_WHILE(
- WASM_GET_LOCAL(0),
- WASM_BLOCK(
- WASM_SET_LOCAL(
- kSum, WASM_I32_ADD(WASM_GET_LOCAL(kSum),
- WASM_LOAD_MEM(MachineType::Int32(),
- WASM_GET_LOCAL(0)))),
- WASM_SET_LOCAL(
- 0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(4))))),
- WASM_GET_LOCAL(1)));
+ BUILD(
+ r,
+ WASM_WHILE(
+ WASM_GET_LOCAL(0),
+ WASM_BLOCK(
+ WASM_SET_LOCAL(kSum,
+ WASM_I32_ADD(WASM_GET_LOCAL(kSum),
+ WASM_LOAD_MEM(MachineType::Int32(),
+ WASM_GET_LOCAL(0)))),
+ WASM_SET_LOCAL(0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(4))))),
+ WASM_GET_LOCAL(1));
// Run 4 trials.
for (int i = 0; i < 3; ++i) {
@@ -1597,10 +1673,24 @@ WASM_EXEC_TEST(CheckMachIntsZero) {
module.AddMemoryElems<uint32_t>(kNumElems);
WasmRunner<uint32_t> r(&module, MachineType::Int32());
- BUILD(r, kExprLoop, kExprGetLocal, 0, kExprIf, kExprGetLocal, 0,
- kExprI32LoadMem, 0, 0, kExprIf, kExprI8Const, 255, kExprReturn, ARITY_1,
- kExprEnd, kExprGetLocal, 0, kExprI8Const, 4, kExprI32Sub, kExprSetLocal,
- 0, kExprBr, ARITY_1, DEPTH_0, kExprEnd, kExprEnd, kExprI8Const, 0);
+ BUILD(r, // --
+ /**/ kExprLoop, kLocalVoid, // --
+ /* */ kExprGetLocal, 0, // --
+ /* */ kExprIf, kLocalVoid, // --
+ /* */ kExprGetLocal, 0, // --
+ /* */ kExprI32LoadMem, 0, 0, // --
+ /* */ kExprIf, kLocalVoid, // --
+ /* */ kExprI8Const, 255, // --
+ /* */ kExprReturn, // --
+ /* */ kExprEnd, // --
+ /* */ kExprGetLocal, 0, // --
+ /* */ kExprI8Const, 4, // --
+ /* */ kExprI32Sub, // --
+ /* */ kExprTeeLocal, 0, // --
+ /* */ kExprBr, DEPTH_0, // --
+ /* */ kExprEnd, // --
+ /**/ kExprEnd, // --
+ /**/ kExprI8Const, 0); // --
module.BlankMemory();
CHECK_EQ(0, r.Call((kNumElems - 1) * 4));
@@ -1619,20 +1709,18 @@ WASM_EXEC_TEST(MemF32_Sum) {
WasmRunner<int32_t> r(&module, MachineType::Int32());
const byte kSum = r.AllocateLocal(kAstF32);
- BUILD(r,
- WASM_BLOCK(
- WASM_WHILE(
- WASM_GET_LOCAL(0),
- WASM_BLOCK(
- WASM_SET_LOCAL(
- kSum, WASM_F32_ADD(WASM_GET_LOCAL(kSum),
- WASM_LOAD_MEM(MachineType::Float32(),
- WASM_GET_LOCAL(0)))),
- WASM_SET_LOCAL(
- 0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(4))))),
- WASM_STORE_MEM(MachineType::Float32(), WASM_ZERO,
- WASM_GET_LOCAL(kSum)),
- WASM_GET_LOCAL(0)));
+ BUILD(
+ r,
+ WASM_WHILE(
+ WASM_GET_LOCAL(0),
+ WASM_BLOCK(
+ WASM_SET_LOCAL(kSum,
+ WASM_F32_ADD(WASM_GET_LOCAL(kSum),
+ WASM_LOAD_MEM(MachineType::Float32(),
+ WASM_GET_LOCAL(0)))),
+ WASM_SET_LOCAL(0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(4))))),
+ WASM_STORE_MEM(MachineType::Float32(), WASM_ZERO, WASM_GET_LOCAL(kSum)),
+ WASM_GET_LOCAL(0));
CHECK_EQ(0, r.Call(4 * (kSize - 1)));
CHECK_NE(-99.25f, module.ReadMemory(&buffer[0]));
@@ -1651,19 +1739,17 @@ T GenerateAndRunFold(WasmExecutionMode execution_mode, WasmOpcode binop,
WasmRunner<int32_t> r(&module, MachineType::Int32());
const byte kAccum = r.AllocateLocal(astType);
- BUILD(r, WASM_BLOCK(
- WASM_SET_LOCAL(kAccum, WASM_LOAD_MEM(memType, WASM_ZERO)),
- WASM_WHILE(
- WASM_GET_LOCAL(0),
- WASM_BLOCK(
- WASM_SET_LOCAL(
+ BUILD(r, WASM_SET_LOCAL(kAccum, WASM_LOAD_MEM(memType, WASM_ZERO)),
+ WASM_WHILE(
+ WASM_GET_LOCAL(0),
+ WASM_BLOCK(WASM_SET_LOCAL(
kAccum, WASM_BINOP(binop, WASM_GET_LOCAL(kAccum),
WASM_LOAD_MEM(
memType, WASM_GET_LOCAL(0)))),
WASM_SET_LOCAL(0, WASM_I32_SUB(WASM_GET_LOCAL(0),
WASM_I8(sizeof(T)))))),
- WASM_STORE_MEM(memType, WASM_ZERO, WASM_GET_LOCAL(kAccum)),
- WASM_GET_LOCAL(0)));
+ WASM_STORE_MEM(memType, WASM_ZERO, WASM_GET_LOCAL(kAccum)),
+ WASM_GET_LOCAL(0));
r.Call(static_cast<int>(sizeof(T) * (size - 1)));
return module.ReadMemory(&memory[0]);
}
@@ -1689,19 +1775,20 @@ WASM_EXEC_TEST(Build_Wasm_Infinite_Loop_effect) {
WasmRunner<int32_t> r(&module, MachineType::Int32());
// Only build the graph and compile, don't run.
- BUILD(r, WASM_LOOP(WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)));
+ BUILD(r, WASM_LOOP(WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO), WASM_DROP),
+ WASM_ZERO);
}
WASM_EXEC_TEST(Unreachable0a) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, B2(WASM_BRV(0, WASM_I8(9)), RET(WASM_GET_LOCAL(0))));
+ BUILD(r, WASM_BLOCK_I(WASM_BRV(0, WASM_I8(9)), RET(WASM_GET_LOCAL(0))));
CHECK_EQ(9, r.Call(0));
CHECK_EQ(9, r.Call(1));
}
WASM_EXEC_TEST(Unreachable0b) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, B2(WASM_BRV(0, WASM_I8(7)), WASM_UNREACHABLE));
+ BUILD(r, WASM_BLOCK_I(WASM_BRV(0, WASM_I8(7)), WASM_UNREACHABLE));
CHECK_EQ(7, r.Call(0));
CHECK_EQ(7, r.Call(1));
}
@@ -1734,31 +1821,32 @@ TEST(Build_Wasm_UnreachableIf2) {
WASM_EXEC_TEST(Unreachable_Load) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, B2(WASM_BRV(0, WASM_GET_LOCAL(0)),
- WASM_LOAD_MEM(MachineType::Int8(), WASM_GET_LOCAL(0))));
+ BUILD(r, WASM_BLOCK_I(WASM_BRV(0, WASM_GET_LOCAL(0)),
+ WASM_LOAD_MEM(MachineType::Int8(), WASM_GET_LOCAL(0))));
CHECK_EQ(11, r.Call(11));
CHECK_EQ(21, r.Call(21));
}
WASM_EXEC_TEST(Infinite_Loop_not_taken1) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, B2(WASM_IF(WASM_GET_LOCAL(0), WASM_INFINITE_LOOP), WASM_I8(45)));
+ BUILD(r, WASM_IF(WASM_GET_LOCAL(0), WASM_INFINITE_LOOP), WASM_I8(45));
// Run the code, but don't go into the infinite loop.
CHECK_EQ(45, r.Call(0));
}
WASM_EXEC_TEST(Infinite_Loop_not_taken2) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, B1(WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_BRV(1, WASM_I8(45)),
- WASM_INFINITE_LOOP)));
+ BUILD(r,
+ WASM_BLOCK_I(WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_BRV(1, WASM_I8(45)),
+ WASM_INFINITE_LOOP)));
// Run the code, but don't go into the infinite loop.
CHECK_EQ(45, r.Call(1));
}
WASM_EXEC_TEST(Infinite_Loop_not_taken2_brif) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r,
- B2(WASM_BRV_IF(0, WASM_I8(45), WASM_GET_LOCAL(0)), WASM_INFINITE_LOOP));
+ BUILD(r, WASM_BLOCK_I(WASM_BRV_IF(0, WASM_I8(45), WASM_GET_LOCAL(0)),
+ WASM_INFINITE_LOOP));
// Run the code, but don't go into the infinite loop.
CHECK_EQ(45, r.Call(1));
}
@@ -1861,8 +1949,9 @@ WASM_EXEC_TEST(Int32Global) {
int32_t* global = module.AddGlobal<int32_t>(kAstI32);
WasmRunner<int32_t> r(&module, MachineType::Int32());
// global = global + p0
- BUILD(r, WASM_SET_GLOBAL(
- 0, WASM_I32_ADD(WASM_GET_GLOBAL(0), WASM_GET_LOCAL(0))));
+ BUILD(r,
+ WASM_SET_GLOBAL(0, WASM_I32_ADD(WASM_GET_GLOBAL(0), WASM_GET_LOCAL(0))),
+ WASM_ZERO);
*global = 116;
for (int i = 9; i < 444444; i += 111111) {
@@ -1883,7 +1972,8 @@ WASM_EXEC_TEST(Int32Globals_DontAlias) {
// global = global + p0
WasmRunner<int32_t> r(&module, MachineType::Int32());
BUILD(r, WASM_SET_GLOBAL(
- g, WASM_I32_ADD(WASM_GET_GLOBAL(g), WASM_GET_LOCAL(0))));
+ g, WASM_I32_ADD(WASM_GET_GLOBAL(g), WASM_GET_LOCAL(0))),
+ WASM_GET_GLOBAL(g));
// Check that reading/writing global number {g} doesn't alter the others.
*globals[g] = 116 * g;
@@ -1891,7 +1981,8 @@ WASM_EXEC_TEST(Int32Globals_DontAlias) {
for (int i = 9; i < 444444; i += 111113) {
int32_t sum = *globals[g] + i;
for (int j = 0; j < kNumGlobals; ++j) before[j] = *globals[j];
- r.Call(i);
+ int32_t result = r.Call(i);
+ CHECK_EQ(sum, result);
for (int j = 0; j < kNumGlobals; ++j) {
int32_t expected = j == g ? sum : before[j];
CHECK_EQ(expected, *globals[j]);
@@ -1905,10 +1996,10 @@ WASM_EXEC_TEST(Float32Global) {
float* global = module.AddGlobal<float>(kAstF32);
WasmRunner<int32_t> r(&module, MachineType::Int32());
// global = global + p0
- BUILD(r, B2(WASM_SET_GLOBAL(
- 0, WASM_F32_ADD(WASM_GET_GLOBAL(0),
- WASM_F32_SCONVERT_I32(WASM_GET_LOCAL(0)))),
- WASM_ZERO));
+ BUILD(r, WASM_SET_GLOBAL(
+ 0, WASM_F32_ADD(WASM_GET_GLOBAL(0),
+ WASM_F32_SCONVERT_I32(WASM_GET_LOCAL(0)))),
+ WASM_ZERO);
*global = 1.25;
for (int i = 9; i < 4444; i += 1111) {
@@ -1923,10 +2014,10 @@ WASM_EXEC_TEST(Float64Global) {
double* global = module.AddGlobal<double>(kAstF64);
WasmRunner<int32_t> r(&module, MachineType::Int32());
// global = global + p0
- BUILD(r, B2(WASM_SET_GLOBAL(
- 0, WASM_F64_ADD(WASM_GET_GLOBAL(0),
- WASM_F64_SCONVERT_I32(WASM_GET_LOCAL(0)))),
- WASM_ZERO));
+ BUILD(r, WASM_SET_GLOBAL(
+ 0, WASM_F64_ADD(WASM_GET_GLOBAL(0),
+ WASM_F64_SCONVERT_I32(WASM_GET_LOCAL(0)))),
+ WASM_ZERO);
*global = 1.25;
for (int i = 9; i < 4444; i += 1111) {
@@ -1948,14 +2039,11 @@ WASM_EXEC_TEST(MixedGlobals) {
WasmRunner<int32_t> r(&module, MachineType::Int32());
- BUILD(
- r,
- WASM_BLOCK(
- WASM_SET_GLOBAL(1, WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)),
- WASM_SET_GLOBAL(2, WASM_LOAD_MEM(MachineType::Uint32(), WASM_ZERO)),
- WASM_SET_GLOBAL(3, WASM_LOAD_MEM(MachineType::Float32(), WASM_ZERO)),
- WASM_SET_GLOBAL(4, WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO)),
- WASM_ZERO));
+ BUILD(r, WASM_SET_GLOBAL(1, WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)),
+ WASM_SET_GLOBAL(2, WASM_LOAD_MEM(MachineType::Uint32(), WASM_ZERO)),
+ WASM_SET_GLOBAL(3, WASM_LOAD_MEM(MachineType::Float32(), WASM_ZERO)),
+ WASM_SET_GLOBAL(4, WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO)),
+ WASM_ZERO);
memory[0] = 0xaa;
memory[1] = 0xcc;
@@ -2004,8 +2092,8 @@ WASM_EXEC_TEST(CallF32StackParameter) {
// Build the calling function.
WasmRunner<float> r(&module);
- BUILD(r, WASM_CALL_FUNCTIONN(
- 19, index, WASM_F32(1.0f), WASM_F32(2.0f), WASM_F32(4.0f),
+ BUILD(r, WASM_CALL_FUNCTION(
+ index, WASM_F32(1.0f), WASM_F32(2.0f), WASM_F32(4.0f),
WASM_F32(8.0f), WASM_F32(16.0f), WASM_F32(32.0f),
WASM_F32(64.0f), WASM_F32(128.0f), WASM_F32(256.0f),
WASM_F32(1.5f), WASM_F32(2.5f), WASM_F32(4.5f), WASM_F32(8.5f),
@@ -2028,13 +2116,13 @@ WASM_EXEC_TEST(CallF64StackParameter) {
// Build the calling function.
WasmRunner<double> r(&module);
- BUILD(r, WASM_CALL_FUNCTIONN(19, index, WASM_F64(1.0), WASM_F64(2.0),
- WASM_F64(4.0), WASM_F64(8.0), WASM_F64(16.0),
- WASM_F64(32.0), WASM_F64(64.0), WASM_F64(128.0),
- WASM_F64(256.0), WASM_F64(1.5), WASM_F64(2.5),
- WASM_F64(4.5), WASM_F64(8.5), WASM_F64(16.5),
- WASM_F64(32.5), WASM_F64(64.5), WASM_F64(128.5),
- WASM_F64(256.5), WASM_F64(512.5)));
+ BUILD(r, WASM_CALL_FUNCTION(index, WASM_F64(1.0), WASM_F64(2.0),
+ WASM_F64(4.0), WASM_F64(8.0), WASM_F64(16.0),
+ WASM_F64(32.0), WASM_F64(64.0), WASM_F64(128.0),
+ WASM_F64(256.0), WASM_F64(1.5), WASM_F64(2.5),
+ WASM_F64(4.5), WASM_F64(8.5), WASM_F64(16.5),
+ WASM_F64(32.5), WASM_F64(64.5), WASM_F64(128.5),
+ WASM_F64(256.5), WASM_F64(512.5)));
float result = r.Call();
CHECK_EQ(256.5, result);
@@ -2075,7 +2163,7 @@ WASM_EXEC_TEST(Call_Int32Add) {
// Build the caller function.
WasmRunner<int32_t> r(&module, MachineType::Int32(), MachineType::Int32());
- BUILD(r, WASM_CALL_FUNCTION2(index, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ BUILD(r, WASM_CALL_FUNCTION(index, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
@@ -2097,7 +2185,7 @@ WASM_EXEC_TEST(Call_Float32Sub) {
// Builder the caller function.
WasmRunner<float> r(&module, MachineType::Float32(), MachineType::Float32());
- BUILD(r, WASM_CALL_FUNCTION2(index, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ BUILD(r, WASM_CALL_FUNCTION(index, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_FLOAT32_INPUTS(i) {
FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(*i - *j, r.Call(*i, *j)); }
@@ -2109,13 +2197,11 @@ WASM_EXEC_TEST(Call_Float64Sub) {
double* memory = module.AddMemoryElems<double>(16);
WasmRunner<int32_t> r(&module);
- BUILD(r,
- WASM_BLOCK(WASM_STORE_MEM(
- MachineType::Float64(), WASM_ZERO,
- WASM_F64_SUB(
- WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO),
- WASM_LOAD_MEM(MachineType::Float64(), WASM_I8(8)))),
- WASM_I8(107)));
+ BUILD(r, WASM_STORE_MEM(
+ MachineType::Float64(), WASM_ZERO,
+ WASM_F64_SUB(WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO),
+ WASM_LOAD_MEM(MachineType::Float64(), WASM_I8(8)))),
+ WASM_I8(107));
FOR_FLOAT64_INPUTS(i) {
FOR_FLOAT64_INPUTS(j) {
@@ -2153,7 +2239,7 @@ static void Run_WasmMixedCall_N(WasmExecutionMode execution_mode, int start) {
int num_params = static_cast<int>(arraysize(mixed)) - start;
for (int which = 0; which < num_params; ++which) {
- v8::base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
TestingModule module(execution_mode);
module.AddMemory(1024);
@@ -2189,8 +2275,7 @@ static void Run_WasmMixedCall_N(WasmExecutionMode execution_mode, int start) {
}
// Call the selector function.
- ADD_CODE(code, kExprCallFunction, static_cast<byte>(num_params),
- static_cast<byte>(index));
+ ADD_CODE(code, kExprCallFunction, static_cast<byte>(index));
// Store the result in memory.
ADD_CODE(code,
@@ -2232,71 +2317,149 @@ WASM_EXEC_TEST(AddCall) {
WasmRunner<int32_t> r(&module, MachineType::Int32());
byte local = r.AllocateLocal(kAstI32);
- BUILD(r, B2(WASM_SET_LOCAL(local, WASM_I8(99)),
- WASM_I32_ADD(
- WASM_CALL_FUNCTION2(t1.function_index(), WASM_GET_LOCAL(0),
- WASM_GET_LOCAL(0)),
- WASM_CALL_FUNCTION2(t1.function_index(), WASM_GET_LOCAL(1),
- WASM_GET_LOCAL(local)))));
+ BUILD(r, WASM_SET_LOCAL(local, WASM_I8(99)),
+ WASM_I32_ADD(WASM_CALL_FUNCTION(t1.function_index(), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(0)),
+ WASM_CALL_FUNCTION(t1.function_index(), WASM_GET_LOCAL(1),
+ WASM_GET_LOCAL(local))));
CHECK_EQ(198, r.Call(0));
CHECK_EQ(200, r.Call(1));
CHECK_EQ(100, r.Call(-49));
}
-WASM_EXEC_TEST(CountDown_expr) {
- WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, WASM_LOOP(
- WASM_IF(WASM_NOT(WASM_GET_LOCAL(0)),
- WASM_BREAKV(1, WASM_GET_LOCAL(0))),
- WASM_SET_LOCAL(0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(1))),
- WASM_CONTINUE(0)));
- CHECK_EQ(0, r.Call(1));
- CHECK_EQ(0, r.Call(10));
- CHECK_EQ(0, r.Call(100));
+WASM_EXEC_TEST(MultiReturnSub) {
+ FLAG_wasm_mv_prototype = true;
+ LocalType storage[] = {kAstI32, kAstI32, kAstI32, kAstI32};
+ FunctionSig sig_ii_ii(2, 2, storage);
+ TestingModule module(execution_mode);
+ WasmFunctionCompiler t1(&sig_ii_ii, &module);
+ BUILD(t1, WASM_GET_LOCAL(1), WASM_GET_LOCAL(0));
+ t1.CompileAndAdd();
+
+ WasmRunner<int32_t> r(&module, MachineType::Int32(), MachineType::Int32());
+ BUILD(r, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1), kExprCallFunction, 0,
+ kExprI32Sub);
+
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ int32_t expected = static_cast<int32_t>(static_cast<uint32_t>(*j) -
+ static_cast<uint32_t>(*i));
+ CHECK_EQ(expected, r.Call(*i, *j));
+ }
+ }
+}
+
+template <typename T>
+void RunMultiReturnSelect(WasmExecutionMode execution_mode, LocalType type,
+ const T* inputs) {
+ FLAG_wasm_mv_prototype = true;
+ LocalType storage[] = {type, type, type, type, type, type};
+ const size_t kNumReturns = 2;
+ const size_t kNumParams = arraysize(storage) - kNumReturns;
+ FunctionSig sig(kNumReturns, kNumParams, storage);
+
+ for (size_t i = 0; i < kNumParams; i++) {
+ for (size_t j = 0; j < kNumParams; j++) {
+ for (int k = 0; k < 2; k++) {
+ TestingModule module(execution_mode);
+ WasmFunctionCompiler r1(&sig, &module);
+
+ BUILD(r1, WASM_GET_LOCAL(i), WASM_GET_LOCAL(j));
+ r1.CompileAndAdd();
+
+ MachineType machine_type = WasmOpcodes::MachineTypeFor(type);
+ WasmRunner<T> r2(&module, machine_type, machine_type, machine_type,
+ machine_type);
+
+ if (k == 0) {
+ BUILD(r2, WASM_CALL_FUNCTION(r1.function_index(), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1), WASM_GET_LOCAL(2),
+ WASM_GET_LOCAL(3)),
+ WASM_DROP);
+ } else {
+ BUILD(r2, WASM_CALL_FUNCTION(r1.function_index(), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1), WASM_GET_LOCAL(2),
+ WASM_GET_LOCAL(3)),
+ kExprSetLocal, 0, WASM_DROP, WASM_GET_LOCAL(0));
+ }
+
+ T expected = inputs[k == 0 ? i : j];
+ CHECK_EQ(expected, r2.Call(inputs[0], inputs[1], inputs[2], inputs[3]));
+ }
+ }
+ }
+}
+
+WASM_EXEC_TEST(MultiReturnSelect_i32) {
+ static const int32_t inputs[] = {3333333, 4444444, -55555555, -7777777};
+ RunMultiReturnSelect<int32_t>(execution_mode, kAstI32, inputs);
+}
+
+WASM_EXEC_TEST(MultiReturnSelect_f32) {
+ static const float inputs[] = {33.33333f, 444.4444f, -55555.555f, -77777.77f};
+ RunMultiReturnSelect<float>(execution_mode, kAstF32, inputs);
+}
+
+WASM_EXEC_TEST(MultiReturnSelect_i64) {
+#if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
+ // TODO(titzer): implement int64-lowering for multiple return values
+ static const int64_t inputs[] = {33333338888, 44444446666, -555555553333,
+ -77777771111};
+ RunMultiReturnSelect<int64_t>(execution_mode, kAstI64, inputs);
+#endif
+}
+
+WASM_EXEC_TEST(MultiReturnSelect_f64) {
+ static const double inputs[] = {3.333333, 44444.44, -55.555555, -7777.777};
+ RunMultiReturnSelect<double>(execution_mode, kAstF64, inputs);
}
WASM_EXEC_TEST(ExprBlock2a) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, B2(WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(1, WASM_I8(1))), WASM_I8(1)));
+ BUILD(r, WASM_BLOCK_I(WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(1, WASM_I8(1))),
+ WASM_I8(1)));
CHECK_EQ(1, r.Call(0));
CHECK_EQ(1, r.Call(1));
}
WASM_EXEC_TEST(ExprBlock2b) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, B2(WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(1, WASM_I8(1))), WASM_I8(2)));
+ BUILD(r, WASM_BLOCK_I(WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(1, WASM_I8(1))),
+ WASM_I8(2)));
CHECK_EQ(2, r.Call(0));
CHECK_EQ(1, r.Call(1));
}
WASM_EXEC_TEST(ExprBlock2c) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, B2(WASM_BRV_IF(0, WASM_I8(1), WASM_GET_LOCAL(0)), WASM_I8(1)));
+ BUILD(r, WASM_BLOCK_I(WASM_BRV_IFD(0, WASM_I8(1), WASM_GET_LOCAL(0)),
+ WASM_I8(1)));
CHECK_EQ(1, r.Call(0));
CHECK_EQ(1, r.Call(1));
}
WASM_EXEC_TEST(ExprBlock2d) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, B2(WASM_BRV_IF(0, WASM_I8(1), WASM_GET_LOCAL(0)), WASM_I8(2)));
+ BUILD(r, WASM_BLOCK_I(WASM_BRV_IFD(0, WASM_I8(1), WASM_GET_LOCAL(0)),
+ WASM_I8(2)));
CHECK_EQ(2, r.Call(0));
CHECK_EQ(1, r.Call(1));
}
WASM_EXEC_TEST(ExprBlock_ManualSwitch) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, WASM_BLOCK(WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(1)),
- WASM_BRV(1, WASM_I8(11))),
- WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(2)),
- WASM_BRV(1, WASM_I8(12))),
- WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(3)),
- WASM_BRV(1, WASM_I8(13))),
- WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(4)),
- WASM_BRV(1, WASM_I8(14))),
- WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(5)),
- WASM_BRV(1, WASM_I8(15))),
- WASM_I8(99)));
+ BUILD(r, WASM_BLOCK_I(WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(1)),
+ WASM_BRV(1, WASM_I8(11))),
+ WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(2)),
+ WASM_BRV(1, WASM_I8(12))),
+ WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(3)),
+ WASM_BRV(1, WASM_I8(13))),
+ WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(4)),
+ WASM_BRV(1, WASM_I8(14))),
+ WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(5)),
+ WASM_BRV(1, WASM_I8(15))),
+ WASM_I8(99)));
CHECK_EQ(99, r.Call(0));
CHECK_EQ(11, r.Call(1));
CHECK_EQ(12, r.Call(2));
@@ -2308,17 +2471,18 @@ WASM_EXEC_TEST(ExprBlock_ManualSwitch) {
WASM_EXEC_TEST(ExprBlock_ManualSwitch_brif) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, WASM_BLOCK(WASM_BRV_IF(0, WASM_I8(11),
+ BUILD(r,
+ WASM_BLOCK_I(WASM_BRV_IFD(0, WASM_I8(11),
WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(1))),
- WASM_BRV_IF(0, WASM_I8(12),
+ WASM_BRV_IFD(0, WASM_I8(12),
WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(2))),
- WASM_BRV_IF(0, WASM_I8(13),
+ WASM_BRV_IFD(0, WASM_I8(13),
WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(3))),
- WASM_BRV_IF(0, WASM_I8(14),
+ WASM_BRV_IFD(0, WASM_I8(14),
WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(4))),
- WASM_BRV_IF(0, WASM_I8(15),
+ WASM_BRV_IFD(0, WASM_I8(15),
WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(5))),
- WASM_I8(99)));
+ WASM_I8(99)));
CHECK_EQ(99, r.Call(0));
CHECK_EQ(11, r.Call(1));
CHECK_EQ(12, r.Call(2));
@@ -2328,14 +2492,14 @@ WASM_EXEC_TEST(ExprBlock_ManualSwitch_brif) {
CHECK_EQ(99, r.Call(6));
}
-WASM_EXEC_TEST(nested_ifs) {
+WASM_EXEC_TEST(If_nested) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
MachineType::Int32());
- BUILD(r, WASM_IF_ELSE(
+ BUILD(r, WASM_IF_ELSE_I(
WASM_GET_LOCAL(0),
- WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_I8(11), WASM_I8(12)),
- WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_I8(13), WASM_I8(14))));
+ WASM_IF_ELSE_I(WASM_GET_LOCAL(1), WASM_I8(11), WASM_I8(12)),
+ WASM_IF_ELSE_I(WASM_GET_LOCAL(1), WASM_I8(13), WASM_I8(14))));
CHECK_EQ(11, r.Call(1, 1));
CHECK_EQ(12, r.Call(1, 0));
@@ -2346,8 +2510,9 @@ WASM_EXEC_TEST(nested_ifs) {
WASM_EXEC_TEST(ExprBlock_if) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
- BUILD(r, B1(WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_I8(11)),
- WASM_BRV(1, WASM_I8(14)))));
+ BUILD(r,
+ WASM_BLOCK_I(WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_I8(11)),
+ WASM_BRV(1, WASM_I8(14)))));
CHECK_EQ(11, r.Call(1));
CHECK_EQ(14, r.Call(0));
@@ -2357,29 +2522,12 @@ WASM_EXEC_TEST(ExprBlock_nested_ifs) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
MachineType::Int32());
- BUILD(r, WASM_BLOCK(WASM_IF_ELSE(
- WASM_GET_LOCAL(0),
- WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_BRV(0, WASM_I8(11)),
- WASM_BRV(1, WASM_I8(12))),
- WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_BRV(0, WASM_I8(13)),
- WASM_BRV(1, WASM_I8(14))))));
-
- CHECK_EQ(11, r.Call(1, 1));
- CHECK_EQ(12, r.Call(1, 0));
- CHECK_EQ(13, r.Call(0, 1));
- CHECK_EQ(14, r.Call(0, 0));
-}
-
-WASM_EXEC_TEST(ExprLoop_nested_ifs) {
- WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
- MachineType::Int32());
-
- BUILD(r, WASM_LOOP(WASM_IF_ELSE(
+ BUILD(r, WASM_BLOCK_I(WASM_IF_ELSE_I(
WASM_GET_LOCAL(0),
- WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_BRV(1, WASM_I8(11)),
- WASM_BRV(3, WASM_I8(12))),
- WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_BRV(1, WASM_I8(13)),
- WASM_BRV(3, WASM_I8(14))))));
+ WASM_IF_ELSE_I(WASM_GET_LOCAL(1), WASM_BRV(0, WASM_I8(11)),
+ WASM_BRV(1, WASM_I8(12))),
+ WASM_IF_ELSE_I(WASM_GET_LOCAL(1), WASM_BRV(0, WASM_I8(13)),
+ WASM_BRV(1, WASM_I8(14))))));
CHECK_EQ(11, r.Call(1, 1));
CHECK_EQ(12, r.Call(1, 0));
@@ -2681,7 +2829,7 @@ static void CompileCallIndirectMany(LocalType param) {
// with many many parameters.
TestSignatures sigs;
for (byte num_params = 0; num_params < 40; ++num_params) {
- v8::base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
HandleScope scope(CcTest::InitIsolateOnce());
TestingModule module(kExecuteCompiled);
@@ -2694,11 +2842,11 @@ static void CompileCallIndirectMany(LocalType param) {
WasmFunctionCompiler t(sig, &module);
std::vector<byte> code;
- ADD_CODE(code, kExprI8Const, 0);
for (byte p = 0; p < num_params; ++p) {
ADD_CODE(code, kExprGetLocal, p);
}
- ADD_CODE(code, kExprCallIndirect, static_cast<byte>(num_params), 1);
+ ADD_CODE(code, kExprI8Const, 0);
+ ADD_CODE(code, kExprCallIndirect, 1);
t.Build(&code[0], &code[0] + code.size());
t.Compile();
@@ -2714,7 +2862,8 @@ TEST(Compile_Wasm_CallIndirect_Many_f64) { CompileCallIndirectMany(kAstF64); }
WASM_EXEC_TEST(Int32RemS_dead) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
MachineType::Int32());
- BUILD(r, WASM_I32_REMS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)), WASM_ZERO);
+ BUILD(r, WASM_I32_REMS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)), WASM_DROP,
+ WASM_ZERO);
const int32_t kMin = std::numeric_limits<int32_t>::min();
CHECK_EQ(0, r.Call(133, 100));
CHECK_EQ(0, r.Call(kMin, -1));
diff --git a/deps/v8/test/cctest/wasm/test-wasm-function-name-table.cc b/deps/v8/test/cctest/wasm/test-wasm-function-name-table.cc
index 1ae78dcb4e..9a4394204b 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-function-name-table.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-function-name-table.cc
@@ -41,9 +41,9 @@ void testFunctionNameTable(Vector<Vector<const char>> names) {
name.start() + name.length());
// Make every second function name null-terminated.
if (func_index % 2) all_names.push_back('\0');
- module.functions.push_back({nullptr, 0, 0,
- static_cast<uint32_t>(name_offset),
- static_cast<uint32_t>(name.length()), 0, 0});
+ module.functions.push_back(
+ {nullptr, 0, 0, static_cast<uint32_t>(name_offset),
+ static_cast<uint32_t>(name.length()), 0, 0, false, false});
++func_index;
}
diff --git a/deps/v8/test/cctest/wasm/test-wasm-stack.cc b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
index f2a848161b..2b51287e87 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-stack.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
@@ -6,8 +6,8 @@
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
-#include "test/cctest/wasm/test-signatures.h"
#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/common/wasm/test-signatures.h"
using namespace v8::base;
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
index 30f5d48a07..d4a2b4fe0b 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
@@ -6,8 +6,8 @@
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
-#include "test/cctest/wasm/test-signatures.h"
#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/common/wasm/test-signatures.h"
using namespace v8::base;
using namespace v8::internal;
@@ -104,8 +104,9 @@ TEST(IllegalLoad) {
WasmFunctionCompiler comp1(sigs.v_v(), &module, ArrayVector("mem_oob"));
// Set the execution context, such that a runtime error can be thrown.
comp1.SetModuleContext();
- BUILD(comp1, WASM_IF(WASM_ONE,
- WASM_LOAD_MEM(MachineType::Int32(), WASM_I32V_1(-3))));
+ BUILD(comp1, WASM_IF(WASM_ONE, WASM_SEQ(WASM_LOAD_MEM(MachineType::Int32(),
+ WASM_I32V_1(-3)),
+ WASM_DROP)));
uint32_t wasm_index = comp1.CompileAndAdd();
WasmFunctionCompiler comp2(sigs.v_v(), &module, ArrayVector("call_mem_oob"));
@@ -131,7 +132,7 @@ TEST(IllegalLoad) {
// The column is 1-based, so add 1 to the actual byte offset.
ExceptionInfo expected_exceptions[] = {
- {"<WASM UNNAMED>", static_cast<int>(wasm_index), 7}, // --
+ {"<WASM UNNAMED>", static_cast<int>(wasm_index), 8}, // --
{"<WASM UNNAMED>", static_cast<int>(wasm_index_2), 3}, // --
{"callFn", 1, 24} // --
};
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index 0a11fedfd1..93fcb89dba 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -11,8 +11,8 @@
#include <memory>
-#include "src/base/accounting-allocator.h"
#include "src/base/utils/random-number-generator.h"
+#include "src/zone/accounting-allocator.h"
#include "src/compiler/graph-visualizer.h"
#include "src/compiler/int64-lowering.h"
@@ -29,7 +29,7 @@
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
-#include "src/zone.h"
+#include "src/zone/zone.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/call-tester.h"
@@ -181,7 +181,7 @@ class TestingModule : public ModuleEnv {
module_.functions.reserve(kMaxFunctions);
}
uint32_t index = static_cast<uint32_t>(module->functions.size());
- module_.functions.push_back({sig, index, 0, 0, 0, 0, 0});
+ module_.functions.push_back({sig, index, 0, 0, 0, 0, 0, false, false});
instance->function_code.push_back(code);
if (interpreter_) {
const WasmFunction* function = &module->functions.back();
@@ -208,7 +208,7 @@ class TestingModule : public ModuleEnv {
Handle<String> name = isolate_->factory()->NewStringFromStaticChars("main");
Handle<JSObject> module_object = Handle<JSObject>(0, isolate_);
Handle<Code> code = instance->function_code[index];
- WasmJs::InstallWasmFunctionMap(isolate_, isolate_->native_context());
+ WasmJs::InstallWasmMapsIfNeeded(isolate_, isolate_->native_context());
Handle<Code> ret_code =
compiler::CompileJSToWasmWrapper(isolate_, this, code, index);
FunctionSig* funcSig = this->module->functions[index].sig;
@@ -230,7 +230,7 @@ class TestingModule : public ModuleEnv {
void AddIndirectFunctionTable(uint16_t* functions, uint32_t table_size) {
module_.function_tables.push_back(
- {table_size, table_size, std::vector<uint16_t>()});
+ {table_size, table_size, std::vector<int32_t>(), false, false});
for (uint32_t i = 0; i < table_size; ++i) {
module_.function_tables.back().values.push_back(functions[i]);
}
@@ -259,7 +259,7 @@ class TestingModule : public ModuleEnv {
WasmModule module_;
WasmModuleInstance instance_;
Isolate* isolate_;
- v8::base::AccountingAllocator allocator_;
+ v8::internal::AccountingAllocator allocator_;
uint32_t global_offset;
V8_ALIGNED(8) byte global_data[kMaxGlobalsSize]; // preallocated global data.
WasmInterpreter* interpreter_;
@@ -267,7 +267,8 @@ class TestingModule : public ModuleEnv {
const WasmGlobal* AddGlobal(LocalType type) {
byte size = WasmOpcodes::MemSize(WasmOpcodes::MachineTypeFor(type));
global_offset = (global_offset + size - 1) & ~(size - 1); // align
- module_.globals.push_back({0, 0, type, global_offset, false});
+ module_.globals.push_back(
+ {type, true, NO_INIT, global_offset, false, false});
global_offset += size;
// limit number of globals.
CHECK_LT(global_offset, kMaxGlobalsSize);
@@ -283,6 +284,13 @@ inline void TestBuildingGraph(Zone* zone, JSGraph* jsgraph, ModuleEnv* module,
DecodeResult result =
BuildTFGraph(zone->allocator(), &builder, module, sig, start, end);
if (result.failed()) {
+ if (!FLAG_trace_wasm_decoder) {
+ // Retry the compilation with the tracing flag on, to help in debugging.
+ FLAG_trace_wasm_decoder = true;
+ result =
+ BuildTFGraph(zone->allocator(), &builder, module, sig, start, end);
+ }
+
ptrdiff_t pc = result.error_pc - result.start;
ptrdiff_t pt = result.error_pt - result.start;
std::ostringstream str;
@@ -415,7 +423,8 @@ class WasmFunctionWrapper : public HandleAndZoneScope,
r.LowerGraph();
}
- CompilationInfo info(ArrayVector("testing"), isolate, graph()->zone());
+ CompilationInfo info(ArrayVector("testing"), isolate, graph()->zone(),
+ Code::ComputeFlags(Code::STUB));
code_ =
Pipeline::GenerateCodeForTesting(&info, descriptor, graph(), nullptr);
CHECK(!code_.is_null());
@@ -748,7 +757,7 @@ class WasmRunner {
WasmInterpreter* interpreter() { return compiler_.interpreter_; }
protected:
- v8::base::AccountingAllocator allocator_;
+ v8::internal::AccountingAllocator allocator_;
Zone zone;
bool compiled_;
LocalType storage_[WASM_RUNNER_MAX_NUM_PARAMETERS];
diff --git a/deps/v8/test/common/DEPS b/deps/v8/test/common/DEPS
new file mode 100644
index 0000000000..3e73aa244f
--- /dev/null
+++ b/deps/v8/test/common/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+src",
+]
diff --git a/deps/v8/test/cctest/wasm/test-signatures.h b/deps/v8/test/common/wasm/test-signatures.h
index 95f24cbb93..3bf53f6867 100644
--- a/deps/v8/test/cctest/wasm/test-signatures.h
+++ b/deps/v8/test/common/wasm/test-signatures.h
@@ -30,7 +30,9 @@ class TestSignatures {
sig_l_l(1, 1, kLongTypes4),
sig_l_ll(1, 2, kLongTypes4),
sig_i_ll(1, 2, kIntLongTypes4),
+ sig_f_f(1, 1, kFloatTypes4),
sig_f_ff(1, 2, kFloatTypes4),
+ sig_d_d(1, 1, kDoubleTypes4),
sig_d_dd(1, 2, kDoubleTypes4),
sig_v_v(0, 0, kIntTypes4),
sig_v_i(0, 1, kIntTypes4),
@@ -67,7 +69,9 @@ class TestSignatures {
FunctionSig* l_ll() { return &sig_l_ll; }
FunctionSig* i_ll() { return &sig_i_ll; }
+ FunctionSig* f_f() { return &sig_f_f; }
FunctionSig* f_ff() { return &sig_f_ff; }
+ FunctionSig* d_d() { return &sig_d_d; }
FunctionSig* d_dd() { return &sig_d_dd; }
FunctionSig* v_v() { return &sig_v_v; }
@@ -110,7 +114,9 @@ class TestSignatures {
FunctionSig sig_l_ll;
FunctionSig sig_i_ll;
+ FunctionSig sig_f_f;
FunctionSig sig_f_ff;
+ FunctionSig sig_d_d;
FunctionSig sig_d_dd;
FunctionSig sig_v_v;
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.cc b/deps/v8/test/common/wasm/wasm-module-runner.cc
new file mode 100644
index 0000000000..15c3ef433f
--- /dev/null
+++ b/deps/v8/test/common/wasm/wasm-module-runner.cc
@@ -0,0 +1,231 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/common/wasm/wasm-module-runner.h"
+
+#include "src/handles.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+#include "src/property-descriptor.h"
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-interpreter.h"
+#include "src/wasm/wasm-js.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-result.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace testing {
+
+uint32_t GetMinModuleMemSize(const WasmModule* module) {
+ return WasmModule::kPageSize * module->min_mem_pages;
+}
+
+const WasmModule* DecodeWasmModuleForTesting(Isolate* isolate, Zone* zone,
+ ErrorThrower* thrower,
+ const byte* module_start,
+ const byte* module_end,
+ ModuleOrigin origin) {
+ // Decode the module, but don't verify function bodies, since we'll
+ // be compiling them anyway.
+ ModuleResult decoding_result =
+ DecodeWasmModule(isolate, zone, module_start, module_end, false, origin);
+
+ std::unique_ptr<const WasmModule> module(decoding_result.val);
+ if (decoding_result.failed()) {
+ // Module verification failed. throw.
+ thrower->Error("WASM.compileRun() failed: %s",
+ decoding_result.error_msg.get());
+ return nullptr;
+ }
+
+ if (thrower->error()) return nullptr;
+ return module.release();
+}
+
+const Handle<JSObject> InstantiateModuleForTesting(Isolate* isolate,
+ ErrorThrower* thrower,
+ const WasmModule* module) {
+ CHECK(module != nullptr);
+
+ if (module->import_table.size() > 0) {
+ thrower->Error("Not supported: module has imports.");
+ }
+ if (module->export_table.size() == 0) {
+ thrower->Error("Not supported: module has no exports.");
+ }
+ if (thrower->error()) return Handle<JSObject>::null();
+
+ // Although we decoded the module for some pre-validation, run the bytes
+ // again through the normal pipeline.
+ MaybeHandle<JSObject> module_object = CreateModuleObjectFromBytes(
+ isolate, module->module_start, module->module_end, thrower,
+ ModuleOrigin::kWasmOrigin);
+ if (module_object.is_null()) {
+ thrower->Error("Module pre-validation failed.");
+ return Handle<JSObject>::null();
+ }
+ MaybeHandle<JSObject> maybe_instance = WasmModule::Instantiate(
+ isolate, thrower, module_object.ToHandleChecked(),
+ Handle<JSReceiver>::null(), Handle<JSArrayBuffer>::null());
+ Handle<JSObject> instance;
+ if (!maybe_instance.ToHandle(&instance)) {
+ return Handle<JSObject>::null();
+ }
+ return instance;
+}
+
+const Handle<JSObject> CompileInstantiateWasmModuleForTesting(
+ Isolate* isolate, Zone* zone, const byte* module_start,
+ const byte* module_end, ModuleOrigin origin) {
+ ErrorThrower thrower(isolate, "CompileInstantiateWasmModule");
+ std::unique_ptr<const WasmModule> module(DecodeWasmModuleForTesting(
+ isolate, zone, &thrower, module_start, module_end, origin));
+
+ if (module == nullptr) {
+ thrower.Error("Wasm module decode failed");
+ return Handle<JSObject>::null();
+ }
+ return InstantiateModuleForTesting(isolate, &thrower, module.get());
+}
+
+int32_t RunWasmModuleForTesting(Isolate* isolate, Handle<JSObject> instance,
+ int argc, Handle<Object> argv[],
+ ModuleOrigin origin) {
+ ErrorThrower thrower(isolate, "RunWasmModule");
+ const char* f_name = origin == ModuleOrigin::kAsmJsOrigin ? "caller" : "main";
+ return CallWasmFunctionForTesting(isolate, instance, &thrower, f_name, argc,
+ argv, origin);
+}
+
+int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
+ const byte* module_end, ModuleOrigin origin) {
+ HandleScope scope(isolate);
+ Zone zone(isolate->allocator());
+
+ Handle<JSObject> instance = CompileInstantiateWasmModuleForTesting(
+ isolate, &zone, module_start, module_end, origin);
+ if (instance.is_null()) {
+ return -1;
+ }
+ return RunWasmModuleForTesting(isolate, instance, 0, nullptr, origin);
+}
+
+int32_t InterpretWasmModule(Isolate* isolate, ErrorThrower* thrower,
+ const WasmModule* module, int function_index,
+ WasmVal* args) {
+ CHECK(module != nullptr);
+
+ Zone zone(isolate->allocator());
+ v8::internal::HandleScope scope(isolate);
+
+ if (module->import_table.size() > 0) {
+ thrower->Error("Not supported: module has imports.");
+ }
+ if (module->export_table.size() == 0) {
+ thrower->Error("Not supported: module has no exports.");
+ }
+
+ if (thrower->error()) return -1;
+
+ ModuleEnv module_env;
+ module_env.module = module;
+ module_env.origin = module->origin;
+
+ for (size_t i = 0; i < module->functions.size(); i++) {
+ FunctionBody body = {
+ &module_env, module->functions[i].sig, module->module_start,
+ module->module_start + module->functions[i].code_start_offset,
+ module->module_start + module->functions[i].code_end_offset};
+ DecodeResult result = VerifyWasmCode(isolate->allocator(), body);
+ if (result.failed()) {
+ thrower->Error("Function did not verify");
+ return -1;
+ }
+ }
+
+ // The code verifies, we create an instance to run it in the interpreter.
+ WasmModuleInstance instance(module);
+ instance.context = isolate->native_context();
+ instance.mem_size = GetMinModuleMemSize(module);
+ // TODO(ahaas): Move memory allocation to wasm-module.cc for better
+ // encapsulation.
+ instance.mem_start =
+ static_cast<byte*>(calloc(GetMinModuleMemSize(module), 1));
+ instance.globals_start = nullptr;
+ module_env.instance = &instance;
+
+ WasmInterpreter interpreter(&instance, isolate->allocator());
+
+ WasmInterpreter::Thread* thread = interpreter.GetThread(0);
+ thread->Reset();
+ thread->PushFrame(&(module->functions[function_index]), args);
+ WasmInterpreter::State interpreter_result = thread->Run();
+ if (instance.mem_start) {
+ free(instance.mem_start);
+ }
+ if (interpreter_result == WasmInterpreter::FINISHED) {
+ WasmVal val = thread->GetReturnValue();
+ return val.to<int32_t>();
+ } else if (thread->state() == WasmInterpreter::TRAPPED) {
+ return 0xdeadbeef;
+ } else {
+ thrower->Error(
+ "Interpreter did not finish execution within its step bound");
+ return -1;
+ }
+}
+
+int32_t CallWasmFunctionForTesting(Isolate* isolate, Handle<JSObject> instance,
+ ErrorThrower* thrower, const char* name,
+ int argc, Handle<Object> argv[],
+ ModuleOrigin origin) {
+ Handle<JSObject> exports_object;
+ if (origin == ModuleOrigin::kAsmJsOrigin) {
+ exports_object = instance;
+ } else {
+ Handle<Name> exports = isolate->factory()->InternalizeUtf8String("exports");
+ exports_object = Handle<JSObject>::cast(
+ JSObject::GetProperty(instance, exports).ToHandleChecked());
+ }
+ Handle<Name> main_name = isolate->factory()->NewStringFromAsciiChecked(name);
+ PropertyDescriptor desc;
+ Maybe<bool> property_found = JSReceiver::GetOwnPropertyDescriptor(
+ isolate, exports_object, main_name, &desc);
+ if (!property_found.FromMaybe(false)) return -1;
+
+ Handle<JSFunction> main_export = Handle<JSFunction>::cast(desc.value());
+
+ // Call the JS function.
+ Handle<Object> undefined = isolate->factory()->undefined_value();
+ MaybeHandle<Object> retval =
+ Execution::Call(isolate, main_export, undefined, argc, argv);
+
+ // The result should be a number.
+ if (retval.is_null()) {
+ thrower->Error("WASM.compileRun() failed: Invocation was null");
+ return -1;
+ }
+ Handle<Object> result = retval.ToHandleChecked();
+ if (result->IsSmi()) {
+ return Smi::cast(*result)->value();
+ }
+ if (result->IsHeapNumber()) {
+ return static_cast<int32_t>(HeapNumber::cast(*result)->value());
+ }
+ thrower->Error("WASM.compileRun() failed: Return value should be number");
+ return -1;
+}
+
+void SetupIsolateForWasmModule(Isolate* isolate) {
+ WasmJs::InstallWasmMapsIfNeeded(isolate, isolate->native_context());
+ WasmJs::InstallWasmModuleSymbolIfNeeded(isolate, isolate->global_object(),
+ isolate->native_context());
+}
+
+} // namespace testing
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.h b/deps/v8/test/common/wasm/wasm-module-runner.h
new file mode 100644
index 0000000000..780d23e06f
--- /dev/null
+++ b/deps/v8/test/common/wasm/wasm-module-runner.h
@@ -0,0 +1,66 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_MODULE_RUNNER_H_
+#define V8_WASM_MODULE_RUNNER_H_
+
+#include "src/handles.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+#include "src/wasm/wasm-interpreter.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-result.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace testing {
+
+// Decodes the given encoded module.
+const WasmModule* DecodeWasmModuleForTesting(Isolate* isolate, Zone* zone,
+ ErrorThrower* thrower,
+ const byte* module_start,
+ const byte* module_end,
+ ModuleOrigin origin);
+
+// Instantiates a module without any imports and exports.
+const Handle<JSObject> InstantiateModuleForTesting(Isolate* isolate,
+ ErrorThrower* thrower,
+ const WasmModule* module);
+
+int32_t CallWasmFunctionForTesting(Isolate* isolate, Handle<JSObject> instance,
+ ErrorThrower* thrower, const char* name,
+ int argc, Handle<Object> argv[],
+ ModuleOrigin origin);
+
+// Decode, verify, and run the function labeled "main" in the
+// given encoded module. The module should have no imports.
+int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
+ const byte* module_end, ModuleOrigin origin);
+
+// Interprets the given module, starting at the function specified by
+// {function_index}. The return type of the function has to be int32. The module
+// should not have any imports or exports
+int32_t InterpretWasmModule(Isolate* isolate, ErrorThrower* thrower,
+ const WasmModule* module, int function_index,
+ WasmVal* args);
+
+// Compiles WasmModule bytes and return an instance of the compiled module.
+const Handle<JSObject> CompileInstantiateWasmModuleForTesting(
+ Isolate* isolate, Zone* zone, const byte* module_start,
+ const byte* module_end, ModuleOrigin origin);
+
+// Runs the module instance with arguments.
+int32_t RunWasmModuleForTesting(Isolate* isolate, Handle<JSObject> instance,
+ int argc, Handle<Object> argv[],
+ ModuleOrigin origin);
+// Install function map, module symbol for testing
+void SetupIsolateForWasmModule(Isolate* isolate);
+} // namespace testing
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_MODULE_RUNNER_H_
diff --git a/deps/v8/test/fuzzer/fuzzer.gyp b/deps/v8/test/fuzzer/fuzzer.gyp
index c7c4cb47ba..f8629f6030 100644
--- a/deps/v8/test/fuzzer/fuzzer.gyp
+++ b/deps/v8/test/fuzzer/fuzzer.gyp
@@ -51,7 +51,7 @@
'target_name': 'parser_fuzzer_lib',
'type': 'static_library',
'dependencies': [
- 'fuzzer_support',
+ 'fuzzer_support_nocomponent',
],
'include_dirs': [
'../..',
@@ -110,6 +110,8 @@
],
'sources': [ ### gcmole(all) ###
'wasm.cc',
+ '../common/wasm/wasm-module-runner.cc',
+ '../common/wasm/wasm-module-runner.h',
],
},
{
@@ -136,12 +138,268 @@
],
'sources': [ ### gcmole(all) ###
'wasm-asmjs.cc',
+ '../common/wasm/wasm-module-runner.cc',
+ '../common/wasm/wasm-module-runner.h',
+ ],
+ },
+ {
+ 'target_name': 'v8_simple_wasm_code_fuzzer',
+ 'type': 'executable',
+ 'dependencies': [
+ 'wasm_code_fuzzer_lib',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [
+ 'fuzzer.cc',
+ ],
+ },
+ {
+ 'target_name': 'wasm_code_fuzzer_lib',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'fuzzer_support',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [ ### gcmole(all) ###
+ 'wasm-code.cc',
+ '../common/wasm/test-signatures.h',
+ '../common/wasm/wasm-module-runner.cc',
+ '../common/wasm/wasm-module-runner.h',
+ ],
+ },
+ {
+ 'target_name': 'v8_simple_wasm_data_section_fuzzer',
+ 'type': 'executable',
+ 'dependencies': [
+ 'wasm_data_section_fuzzer_lib',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [
+ 'fuzzer.cc',
+ ],
+ },
+ {
+ 'target_name': 'wasm_data_section_fuzzer_lib',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'fuzzer_support',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [ ### gcmole(all) ###
+ 'wasm-data-section.cc',
+ '../common/wasm/wasm-module-runner.cc',
+ '../common/wasm/wasm-module-runner.h',
+ 'wasm-section-fuzzers.cc',
+ 'wasm-section-fuzzers.h',
+ ],
+ },
+ {
+ 'target_name': 'v8_simple_wasm_function_sigs_section_fuzzer',
+ 'type': 'executable',
+ 'dependencies': [
+ 'wasm_function_sigs_section_fuzzer_lib',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [
+ 'fuzzer.cc',
+ ],
+ },
+ {
+ 'target_name': 'wasm_function_sigs_section_fuzzer_lib',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'fuzzer_support',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [ ### gcmole(all) ###
+ 'wasm-function-sigs-section.cc',
+ '../common/wasm/wasm-module-runner.cc',
+ '../common/wasm/wasm-module-runner.h',
+ 'wasm-section-fuzzers.cc',
+ 'wasm-section-fuzzers.h',
+ ],
+ },
+ {
+ 'target_name': 'v8_simple_wasm_globals_section_fuzzer',
+ 'type': 'executable',
+ 'dependencies': [
+ 'wasm_globals_section_fuzzer_lib',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [
+ 'fuzzer.cc',
+ ],
+ },
+ {
+ 'target_name': 'wasm_globals_section_fuzzer_lib',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'fuzzer_support',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [ ### gcmole(all) ###
+ 'wasm-globals-section.cc',
+ '../common/wasm/wasm-module-runner.cc',
+ '../common/wasm/wasm-module-runner.h',
+ 'wasm-section-fuzzers.cc',
+ 'wasm-section-fuzzers.h',
+ ],
+ },
+ {
+ 'target_name': 'v8_simple_wasm_imports_section_fuzzer',
+ 'type': 'executable',
+ 'dependencies': [
+ 'wasm_imports_section_fuzzer_lib',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [
+ 'fuzzer.cc',
+ ],
+ },
+ {
+ 'target_name': 'wasm_imports_section_fuzzer_lib',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'fuzzer_support',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [ ### gcmole(all) ###
+ 'wasm-imports-section.cc',
+ '../common/wasm/wasm-module-runner.cc',
+ '../common/wasm/wasm-module-runner.h',
+ 'wasm-section-fuzzers.cc',
+ 'wasm-section-fuzzers.h',
+ ],
+ },
+ {
+ 'target_name': 'v8_simple_wasm_memory_section_fuzzer',
+ 'type': 'executable',
+ 'dependencies': [
+ 'wasm_memory_section_fuzzer_lib',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [
+ 'fuzzer.cc',
+ ],
+ },
+ {
+ 'target_name': 'wasm_memory_section_fuzzer_lib',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'fuzzer_support',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [ ### gcmole(all) ###
+ 'wasm-memory-section.cc',
+ '../common/wasm/wasm-module-runner.cc',
+ '../common/wasm/wasm-module-runner.h',
+ 'wasm-section-fuzzers.cc',
+ 'wasm-section-fuzzers.h',
+ ],
+ },
+ {
+ 'target_name': 'v8_simple_wasm_names_section_fuzzer',
+ 'type': 'executable',
+ 'dependencies': [
+ 'wasm_names_section_fuzzer_lib',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [
+ 'fuzzer.cc',
+ ],
+ },
+ {
+ 'target_name': 'wasm_names_section_fuzzer_lib',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'fuzzer_support',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [ ### gcmole(all) ###
+ 'wasm-names-section.cc',
+ '../common/wasm/wasm-module-runner.cc',
+ '../common/wasm/wasm-module-runner.h',
+ 'wasm-section-fuzzers.cc',
+ 'wasm-section-fuzzers.h',
+ ],
+ },
+ {
+ 'target_name': 'v8_simple_wasm_types_section_fuzzer',
+ 'type': 'executable',
+ 'dependencies': [
+ 'wasm_types_section_fuzzer_lib',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [
+ 'fuzzer.cc',
+ ],
+ },
+ {
+ 'target_name': 'wasm_types_section_fuzzer_lib',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'fuzzer_support',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [ ### gcmole(all) ###
+ 'wasm-types-section.cc',
+ '../common/wasm/wasm-module-runner.cc',
+ '../common/wasm/wasm-module-runner.h',
+ 'wasm-section-fuzzers.cc',
+ 'wasm-section-fuzzers.h',
],
},
{
'target_name': 'fuzzer_support',
'type': 'static_library',
'dependencies': [
+ '../../src/v8.gyp:v8',
+ '../../src/v8.gyp:v8_libplatform',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [ ### gcmole(all) ###
+ 'fuzzer-support.cc',
+ 'fuzzer-support.h',
+ ],
+ },
+ {
+ 'target_name': 'fuzzer_support_nocomponent',
+ 'type': 'static_library',
+ 'dependencies': [
'../../src/v8.gyp:v8_libplatform',
],
'include_dirs': [
diff --git a/deps/v8/test/fuzzer/fuzzer.isolate b/deps/v8/test/fuzzer/fuzzer.isolate
index 2611c72104..6b93c53481 100644
--- a/deps/v8/test/fuzzer/fuzzer.isolate
+++ b/deps/v8/test/fuzzer/fuzzer.isolate
@@ -10,6 +10,14 @@
'<(PRODUCT_DIR)/v8_simple_regexp_fuzzer<(EXECUTABLE_SUFFIX)',
'<(PRODUCT_DIR)/v8_simple_wasm_fuzzer<(EXECUTABLE_SUFFIX)',
'<(PRODUCT_DIR)/v8_simple_wasm_asmjs_fuzzer<(EXECUTABLE_SUFFIX)',
+ '<(PRODUCT_DIR)/v8_simple_wasm_code_fuzzer<(EXECUTABLE_SUFFIX)',
+ '<(PRODUCT_DIR)/v8_simple_wasm_data_section_fuzzer<(EXECUTABLE_SUFFIX)',
+ '<(PRODUCT_DIR)/v8_simple_wasm_function_sigs_section_fuzzer<(EXECUTABLE_SUFFIX)',
+ '<(PRODUCT_DIR)/v8_simple_wasm_globals_section_fuzzer<(EXECUTABLE_SUFFIX)',
+ '<(PRODUCT_DIR)/v8_simple_wasm_imports_section_fuzzer<(EXECUTABLE_SUFFIX)',
+ '<(PRODUCT_DIR)/v8_simple_wasm_memory_section_fuzzer<(EXECUTABLE_SUFFIX)',
+ '<(PRODUCT_DIR)/v8_simple_wasm_names_section_fuzzer<(EXECUTABLE_SUFFIX)',
+ '<(PRODUCT_DIR)/v8_simple_wasm_types_section_fuzzer<(EXECUTABLE_SUFFIX)',
'./fuzzer.status',
'./testcfg.py',
'./json/',
@@ -17,6 +25,14 @@
'./regexp/',
'./wasm/',
'./wasm_asmjs/',
+ './wasm_code/',
+ './wasm_data_section/',
+ './wasm_function_sigs_section/',
+ './wasm_globals_section/',
+ './wasm_imports_section/',
+ './wasm_memory_section/',
+ './wasm_names_section/',
+ './wasm_types_section/',
],
},
'includes': [
diff --git a/deps/v8/test/fuzzer/testcfg.py b/deps/v8/test/fuzzer/testcfg.py
index 85a38eda08..830b459742 100644
--- a/deps/v8/test/fuzzer/testcfg.py
+++ b/deps/v8/test/fuzzer/testcfg.py
@@ -18,7 +18,10 @@ class FuzzerVariantGenerator(testsuite.VariantGenerator):
class FuzzerTestSuite(testsuite.TestSuite):
- SUB_TESTS = ( 'json', 'parser', 'regexp', 'wasm', 'wasm_asmjs', )
+ SUB_TESTS = ( 'json', 'parser', 'regexp', 'wasm', 'wasm_asmjs', 'wasm_code',
+ 'wasm_data_section', 'wasm_function_sigs_section',
+ 'wasm_globals_section', 'wasm_imports_section', 'wasm_memory_section',
+ 'wasm_names_section', 'wasm_types_section' )
def __init__(self, name, root):
super(FuzzerTestSuite, self).__init__(name, root)
diff --git a/deps/v8/test/fuzzer/wasm-asmjs.cc b/deps/v8/test/fuzzer/wasm-asmjs.cc
index cb8b86bad4..d3341fa5b3 100644
--- a/deps/v8/test/fuzzer/wasm-asmjs.cc
+++ b/deps/v8/test/fuzzer/wasm-asmjs.cc
@@ -12,8 +12,8 @@
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects.h"
-#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-module.h"
+#include "test/common/wasm/wasm-module-runner.h"
#include "test/fuzzer/fuzzer-support.h"
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
@@ -31,9 +31,9 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
v8::HandleScope handle_scope(isolate);
v8::Context::Scope context_scope(support->GetContext());
v8::TryCatch try_catch(isolate);
- v8::internal::WasmJs::InstallWasmFunctionMap(i_isolate,
- i_isolate->native_context());
- v8::internal::wasm::testing::CompileAndRunWasmModule(i_isolate, data,
- data + size, true);
+ v8::internal::wasm::testing::SetupIsolateForWasmModule(i_isolate);
+ v8::internal::wasm::testing::CompileAndRunWasmModule(
+ i_isolate, data, data + size,
+ v8::internal::wasm::ModuleOrigin::kAsmJsOrigin);
return 0;
}
diff --git a/deps/v8/test/fuzzer/wasm-code.cc b/deps/v8/test/fuzzer/wasm-code.cc
new file mode 100644
index 0000000000..13b665137d
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm-code.cc
@@ -0,0 +1,104 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "include/v8.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+#include "src/wasm/wasm-interpreter.h"
+#include "src/wasm/wasm-module-builder.h"
+#include "src/wasm/wasm-module.h"
+#include "test/common/wasm/test-signatures.h"
+#include "test/common/wasm/wasm-module-runner.h"
+#include "test/fuzzer/fuzzer-support.h"
+
+#define WASM_CODE_FUZZER_HASH_SEED 83
+
+using namespace v8::internal::wasm;
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
+ v8::Isolate* isolate = support->GetIsolate();
+ v8::internal::Isolate* i_isolate =
+ reinterpret_cast<v8::internal::Isolate*>(isolate);
+
+ // Clear any pending exceptions from a prior run.
+ if (i_isolate->has_pending_exception()) {
+ i_isolate->clear_pending_exception();
+ }
+
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::Scope context_scope(support->GetContext());
+ v8::TryCatch try_catch(isolate);
+
+ v8::internal::AccountingAllocator allocator;
+ v8::internal::Zone zone(&allocator);
+
+ TestSignatures sigs;
+
+ WasmModuleBuilder builder(&zone);
+
+ v8::internal::wasm::WasmFunctionBuilder* f =
+ builder.AddFunction(sigs.i_iii());
+ f->EmitCode(data, static_cast<uint32_t>(size));
+ f->SetExported();
+ f->SetName("main", 4);
+
+ ZoneBuffer buffer(&zone);
+ builder.WriteTo(buffer);
+
+ v8::internal::wasm::testing::SetupIsolateForWasmModule(i_isolate);
+
+ v8::internal::HandleScope scope(i_isolate);
+
+ ErrorThrower interpreter_thrower(i_isolate, "Interpreter");
+ std::unique_ptr<const WasmModule> module(testing::DecodeWasmModuleForTesting(
+ i_isolate, &zone, &interpreter_thrower, buffer.begin(), buffer.end(),
+ v8::internal::wasm::ModuleOrigin::kWasmOrigin));
+
+ if (module == nullptr) {
+ return 0;
+ }
+ int32_t result_interpreted;
+ {
+ WasmVal args[] = {WasmVal(1), WasmVal(2), WasmVal(3)};
+ result_interpreted = testing::InterpretWasmModule(
+ i_isolate, &interpreter_thrower, module.get(), 0, args);
+ }
+
+ ErrorThrower compiler_thrower(i_isolate, "Compiler");
+ v8::internal::Handle<v8::internal::JSObject> instance =
+ testing::InstantiateModuleForTesting(i_isolate, &compiler_thrower,
+ module.get());
+
+ if (!interpreter_thrower.error()) {
+ CHECK(!instance.is_null());
+ } else {
+ return 0;
+ }
+ int32_t result_compiled;
+ {
+ v8::internal::Handle<v8::internal::Object> arguments[] = {
+ v8::internal::handle(v8::internal::Smi::FromInt(1), i_isolate),
+ v8::internal::handle(v8::internal::Smi::FromInt(2), i_isolate),
+ v8::internal::handle(v8::internal::Smi::FromInt(3), i_isolate)};
+ result_compiled = testing::CallWasmFunctionForTesting(
+ i_isolate, instance, &compiler_thrower, "main", arraysize(arguments),
+ arguments, v8::internal::wasm::ModuleOrigin::kWasmOrigin);
+ }
+ if (result_interpreted == 0xdeadbeef) {
+ CHECK(i_isolate->has_pending_exception());
+ i_isolate->clear_pending_exception();
+ } else {
+ if (result_interpreted != result_compiled) {
+ V8_Fatal(__FILE__, __LINE__, "WasmCodeFuzzerHash=%x",
+ v8::internal::StringHasher::HashSequentialString(
+ data, static_cast<int>(size), WASM_CODE_FUZZER_HASH_SEED));
+ }
+ }
+ return 0;
+}
diff --git a/deps/v8/test/fuzzer/wasm-data-section.cc b/deps/v8/test/fuzzer/wasm-data-section.cc
new file mode 100644
index 0000000000..2fbdbcd906
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm-data-section.cc
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/fuzzer/wasm-section-fuzzers.h"
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ return fuzz_wasm_section(v8::internal::wasm::kDataSectionCode, data, size);
+}
diff --git a/deps/v8/test/fuzzer/wasm-function-sigs-section.cc b/deps/v8/test/fuzzer/wasm-function-sigs-section.cc
new file mode 100644
index 0000000000..a0b66e12d0
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm-function-sigs-section.cc
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/fuzzer/wasm-section-fuzzers.h"
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ return fuzz_wasm_section(v8::internal::wasm::kFunctionSectionCode, data,
+ size);
+}
diff --git a/deps/v8/test/fuzzer/wasm-globals-section.cc b/deps/v8/test/fuzzer/wasm-globals-section.cc
new file mode 100644
index 0000000000..a58ef0f2b5
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm-globals-section.cc
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/fuzzer/wasm-section-fuzzers.h"
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ return fuzz_wasm_section(v8::internal::wasm::kGlobalSectionCode, data, size);
+}
diff --git a/deps/v8/test/fuzzer/wasm-imports-section.cc b/deps/v8/test/fuzzer/wasm-imports-section.cc
new file mode 100644
index 0000000000..d6513e59dc
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm-imports-section.cc
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/fuzzer/wasm-section-fuzzers.h"
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ return fuzz_wasm_section(v8::internal::wasm::kImportSectionCode, data, size);
+}
diff --git a/deps/v8/test/fuzzer/wasm-memory-section.cc b/deps/v8/test/fuzzer/wasm-memory-section.cc
new file mode 100644
index 0000000000..77065f1729
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm-memory-section.cc
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/fuzzer/wasm-section-fuzzers.h"
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ return fuzz_wasm_section(v8::internal::wasm::kMemorySectionCode, data, size);
+}
diff --git a/deps/v8/test/fuzzer/wasm-names-section.cc b/deps/v8/test/fuzzer/wasm-names-section.cc
new file mode 100644
index 0000000000..01846823ff
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm-names-section.cc
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/fuzzer/wasm-section-fuzzers.h"
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ // TODO(titzer): Names section requires a preceding function section.
+ return fuzz_wasm_section(v8::internal::wasm::kNameSectionCode, data, size);
+}
diff --git a/deps/v8/test/fuzzer/wasm-section-fuzzers.cc b/deps/v8/test/fuzzer/wasm-section-fuzzers.cc
new file mode 100644
index 0000000000..e95beba0fd
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm-section-fuzzers.cc
@@ -0,0 +1,63 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/fuzzer/wasm-section-fuzzers.h"
+
+#include "include/v8.h"
+#include "src/isolate.h"
+#include "src/wasm/wasm-module-builder.h"
+#include "src/wasm/wasm-module.h"
+#include "src/zone/accounting-allocator.h"
+#include "src/zone/zone.h"
+#include "test/common/wasm/wasm-module-runner.h"
+#include "test/fuzzer/fuzzer-support.h"
+
+using namespace v8::internal::wasm;
+
+static const char* kNameString = "name";
+static const size_t kNameStringLength = 4;
+
+int fuzz_wasm_section(WasmSectionCode section, const uint8_t* data,
+ size_t size) {
+ v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
+ v8::Isolate* isolate = support->GetIsolate();
+ v8::internal::Isolate* i_isolate =
+ reinterpret_cast<v8::internal::Isolate*>(isolate);
+
+ // Clear any pending exceptions from a prior run.
+ if (i_isolate->has_pending_exception()) {
+ i_isolate->clear_pending_exception();
+ }
+
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::Scope context_scope(support->GetContext());
+ v8::TryCatch try_catch(isolate);
+
+ v8::internal::AccountingAllocator allocator;
+ v8::internal::Zone zone(&allocator);
+
+ ZoneBuffer buffer(&zone);
+ buffer.write_u32(kWasmMagic);
+ buffer.write_u32(kWasmVersion);
+ if (section == kNameSectionCode) {
+ buffer.write_u8(kUnknownSectionCode);
+ buffer.write_size(size + kNameStringLength + 1);
+ buffer.write_u8(kNameStringLength);
+ buffer.write(reinterpret_cast<const uint8_t*>(kNameString),
+ kNameStringLength);
+ buffer.write(data, size);
+ } else {
+ buffer.write_u8(section);
+ buffer.write_size(size);
+ buffer.write(data, size);
+ }
+
+ ErrorThrower thrower(i_isolate, "decoder");
+
+ std::unique_ptr<const WasmModule> module(testing::DecodeWasmModuleForTesting(
+ i_isolate, &zone, &thrower, buffer.begin(), buffer.end(), kWasmOrigin));
+
+ return 0;
+}
diff --git a/deps/v8/test/fuzzer/wasm-section-fuzzers.h b/deps/v8/test/fuzzer/wasm-section-fuzzers.h
new file mode 100644
index 0000000000..a28ada134e
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm-section-fuzzers.h
@@ -0,0 +1,16 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef WASM_SECTION_FUZZERS_H_
+#define WASM_SECTION_FUZZERS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "src/wasm/wasm-module.h"
+
+int fuzz_wasm_section(v8::internal::wasm::WasmSectionCode section,
+ const uint8_t* data, size_t size);
+
+#endif // WASM_SECTION_FUZZERS_H_
diff --git a/deps/v8/test/fuzzer/wasm-types-section.cc b/deps/v8/test/fuzzer/wasm-types-section.cc
new file mode 100644
index 0000000000..7d5fe65277
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm-types-section.cc
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/fuzzer/wasm-section-fuzzers.h"
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ return fuzz_wasm_section(v8::internal::wasm::kTypeSectionCode, data, size);
+}
diff --git a/deps/v8/test/fuzzer/wasm.cc b/deps/v8/test/fuzzer/wasm.cc
index 27259c6417..933be71344 100644
--- a/deps/v8/test/fuzzer/wasm.cc
+++ b/deps/v8/test/fuzzer/wasm.cc
@@ -12,8 +12,8 @@
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects.h"
-#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-module.h"
+#include "test/common/wasm/wasm-module-runner.h"
#include "test/fuzzer/fuzzer-support.h"
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
@@ -31,9 +31,8 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
v8::HandleScope handle_scope(isolate);
v8::Context::Scope context_scope(support->GetContext());
v8::TryCatch try_catch(isolate);
- v8::internal::WasmJs::InstallWasmFunctionMap(i_isolate,
- i_isolate->native_context());
- v8::internal::wasm::testing::CompileAndRunWasmModule(i_isolate, data,
- data + size, false);
+ v8::internal::wasm::testing::SetupIsolateForWasmModule(i_isolate);
+ v8::internal::wasm::testing::CompileAndRunWasmModule(
+ i_isolate, data, data + size, v8::internal::wasm::kWasmOrigin);
return 0;
}
diff --git a/deps/v8/test/fuzzer/wasm.tar.gz.sha1 b/deps/v8/test/fuzzer/wasm.tar.gz.sha1
new file mode 100644
index 0000000000..9fc4cf50d2
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm.tar.gz.sha1
@@ -0,0 +1 @@
+43dbe4810e9b08a5add1dd4076e26410e18c828c \ No newline at end of file
diff --git a/deps/v8/test/fuzzer/wasm/foo.wasm b/deps/v8/test/fuzzer/wasm/foo.wasm
deleted file mode 100644
index 79cd64b50c..0000000000
--- a/deps/v8/test/fuzzer/wasm/foo.wasm
+++ /dev/null
Binary files differ
diff --git a/deps/v8/test/fuzzer/wasm_asmjs.tar.gz.sha1 b/deps/v8/test/fuzzer/wasm_asmjs.tar.gz.sha1
new file mode 100644
index 0000000000..b8cf779dee
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm_asmjs.tar.gz.sha1
@@ -0,0 +1 @@
+3a2c9658f3f644c7b8c309201b964fedc2766f9c \ No newline at end of file
diff --git a/deps/v8/test/fuzzer/wasm_asmjs/foo.wasm b/deps/v8/test/fuzzer/wasm_asmjs/foo.wasm
deleted file mode 100644
index 79cd64b50c..0000000000
--- a/deps/v8/test/fuzzer/wasm_asmjs/foo.wasm
+++ /dev/null
Binary files differ
diff --git a/deps/v8/test/fuzzer/wasm_code/foo b/deps/v8/test/fuzzer/wasm_code/foo
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm_code/foo
diff --git a/deps/v8/test/fuzzer/wasm_data_section/foo b/deps/v8/test/fuzzer/wasm_data_section/foo
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm_data_section/foo
diff --git a/deps/v8/test/fuzzer/wasm_function_sigs_section/foo b/deps/v8/test/fuzzer/wasm_function_sigs_section/foo
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm_function_sigs_section/foo
diff --git a/deps/v8/test/fuzzer/wasm_globals_section/foo b/deps/v8/test/fuzzer/wasm_globals_section/foo
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm_globals_section/foo
diff --git a/deps/v8/test/fuzzer/wasm_imports_section/foo b/deps/v8/test/fuzzer/wasm_imports_section/foo
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm_imports_section/foo
diff --git a/deps/v8/test/fuzzer/wasm_memory_section/foo b/deps/v8/test/fuzzer/wasm_memory_section/foo
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm_memory_section/foo
diff --git a/deps/v8/test/fuzzer/wasm_names_section/foo b/deps/v8/test/fuzzer/wasm_names_section/foo
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm_names_section/foo
diff --git a/deps/v8/test/fuzzer/wasm_types_section/foo b/deps/v8/test/fuzzer/wasm_types_section/foo
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm_types_section/foo
diff --git a/deps/v8/test/inspector/BUILD.gn b/deps/v8/test/inspector/BUILD.gn
new file mode 100644
index 0000000000..cb96bf423c
--- /dev/null
+++ b/deps/v8/test/inspector/BUILD.gn
@@ -0,0 +1,39 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("../../gni/v8.gni")
+
+v8_executable("inspector-test") {
+ testonly = true
+
+ sources = [
+ "inspector-impl.cc",
+ "inspector-impl.h",
+ "inspector-test.cc",
+ "task-runner.cc",
+ "task-runner.h",
+ ]
+
+ configs = [
+ "../..:external_config",
+ "../..:internal_config_base",
+ ]
+
+ deps = [
+ "../..:v8_libplatform",
+ "//build/config/sanitizers:deps",
+ "//build/win:default_exe_manifest",
+ ]
+
+ if (is_component_build) {
+ # inspector-test can't be built against a shared library, so we
+ # need to depend on the underlying static target in that case.
+ deps += [ "../..:v8_maybe_snapshot" ]
+ } else {
+ deps += [ "../..:v8" ]
+ }
+
+ cflags = []
+ ldflags = []
+}
diff --git a/deps/v8/test/inspector/DEPS b/deps/v8/test/inspector/DEPS
new file mode 100644
index 0000000000..af99e05595
--- /dev/null
+++ b/deps/v8/test/inspector/DEPS
@@ -0,0 +1,10 @@
+include_rules = [
+ "-src",
+ "+src/base/macros.h",
+ "+src/base/platform/platform.h",
+ "+src/flags.h",
+ "+src/inspector/string-16.h",
+ "+src/locked-queue-inl.h",
+ "+src/utils.h",
+ "+src/vector.h",
+] \ No newline at end of file
diff --git a/deps/v8/test/inspector/OWNERS b/deps/v8/test/inspector/OWNERS
new file mode 100644
index 0000000000..9edb707b78
--- /dev/null
+++ b/deps/v8/test/inspector/OWNERS
@@ -0,0 +1,2 @@
+dgozman@chromium.org
+kozyatinskiy@chromium.org
diff --git a/deps/v8/test/inspector/console/let-const-with-api-expected.txt b/deps/v8/test/inspector/console/let-const-with-api-expected.txt
new file mode 100644
index 0000000000..a5b889632d
--- /dev/null
+++ b/deps/v8/test/inspector/console/let-const-with-api-expected.txt
@@ -0,0 +1,19 @@
+first "let a = 1;" result: wasThrown = false
+second "let a = 1;" result: wasThrown = true
+exception message: Uncaught SyntaxError: Identifier 'a' has already been declared
+ at <anonymous>:1:1
+{"result":{"type":"number","value":42,"description":"42"}}
+function dir(value) { [Command Line API] }
+function dirxml(value) { [Command Line API] }
+function keys(object) { [Command Line API] }
+function values(object) { [Command Line API] }
+function profile(title) { [Command Line API] }
+function profileEnd(title) { [Command Line API] }
+function inspect(object) { [Command Line API] }
+function copy(value) { [Command Line API] }
+function clear() { [Command Line API] }
+function debug(function) { [Command Line API] }
+function undebug(function) { [Command Line API] }
+function monitor(function) { [Command Line API] }
+function unmonitor(function) { [Command Line API] }
+function table(data, [columns]) { [Command Line API] } \ No newline at end of file
diff --git a/deps/v8/test/inspector/console/let-const-with-api.js b/deps/v8/test/inspector/console/let-const-with-api.js
new file mode 100644
index 0000000000..0280fe1174
--- /dev/null
+++ b/deps/v8/test/inspector/console/let-const-with-api.js
@@ -0,0 +1,52 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Protocol.Runtime.evaluate({ expression: "let a = 42;" }).then(step2);
+
+function step2(response)
+{
+ failIfError(response);
+ InspectorTest.log("first \"let a = 1;\" result: wasThrown = " + !!response.result.exceptionDetails);
+ Protocol.Runtime.evaluate({ expression: "let a = 239;" }).then(step3);
+}
+
+function step3(response)
+{
+ failIfError(response);
+ InspectorTest.log("second \"let a = 1;\" result: wasThrown = " + !!response.result.exceptionDetails);
+ if (response.result.exceptionDetails)
+ InspectorTest.log("exception message: " + response.result.exceptionDetails.text + " " + response.result.exceptionDetails.exception.description);
+ Protocol.Runtime.evaluate({ expression: "a" }).then(step4);
+}
+
+function step4(response)
+{
+ failIfError(response);
+ InspectorTest.log(JSON.stringify(response.result));
+ checkMethod(null);
+}
+
+var methods = [ "dir", "dirxml", "keys", "values", "profile", "profileEnd",
+ "inspect", "copy", "clear",
+ "debug", "undebug", "monitor", "unmonitor", "table" ];
+
+function checkMethod(response)
+{
+ failIfError(response);
+
+ if (response)
+ InspectorTest.log(response.result.result.description);
+
+ var method = methods.shift();
+ if (!method)
+ InspectorTest.completeTest();
+
+ Protocol.Runtime.evaluate({ expression: method, includeCommandLineAPI: true }).then(checkMethod);
+}
+
+function failIfError(response)
+{
+ if (response && response.error)
+ InspectorTest.log("FAIL: " + JSON.stringify(response.error));
+}
diff --git a/deps/v8/test/inspector/console/memory-setter-in-strict-mode-expected.txt b/deps/v8/test/inspector/console/memory-setter-in-strict-mode-expected.txt
new file mode 100644
index 0000000000..04b60d8c14
--- /dev/null
+++ b/deps/v8/test/inspector/console/memory-setter-in-strict-mode-expected.txt
@@ -0,0 +1,9 @@
+Tests checks that console.memory property can be set in strict mode (crbug.com/468611).
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : undefined
+ }
+ }
+} \ No newline at end of file
diff --git a/deps/v8/test/inspector/console/memory-setter-in-strict-mode.js b/deps/v8/test/inspector/console/memory-setter-in-strict-mode.js
new file mode 100644
index 0000000000..11db993bee
--- /dev/null
+++ b/deps/v8/test/inspector/console/memory-setter-in-strict-mode.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Tests checks that console.memory property can be set in strict mode (crbug.com/468611).")
+
+Protocol.Runtime.evaluate({ expression: "\"use strict\"\nconsole.memory = {};undefined" }).then(dumpResult);
+
+function dumpResult(result)
+{
+ InspectorTest.logMessage(result);
+ InspectorTest.completeTest();
+}
diff --git a/deps/v8/test/inspector/cpu-profiler/console-profile-end-parameterless-crash-expected.txt b/deps/v8/test/inspector/cpu-profiler/console-profile-end-parameterless-crash-expected.txt
new file mode 100644
index 0000000000..a28765a100
--- /dev/null
+++ b/deps/v8/test/inspector/cpu-profiler/console-profile-end-parameterless-crash-expected.txt
@@ -0,0 +1,3 @@
+Tests that "console.profileEnd()" does not cause crash. (webkit:105759)
+SUCCESS: found 2 profile headers
+SUCCESS: titled profile found \ No newline at end of file
diff --git a/deps/v8/test/inspector/cpu-profiler/console-profile-end-parameterless-crash.js b/deps/v8/test/inspector/cpu-profiler/console-profile-end-parameterless-crash.js
new file mode 100644
index 0000000000..d266bb0fdc
--- /dev/null
+++ b/deps/v8/test/inspector/cpu-profiler/console-profile-end-parameterless-crash.js
@@ -0,0 +1,46 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Tests that \"console.profileEnd()\" does not cause crash. (webkit:105759)");
+
+InspectorTest.addScript(`
+function collectProfiles()
+{
+ console.profile();
+ console.profile("titled");
+ console.profileEnd();
+ console.profileEnd();
+}`);
+
+InspectorTest.fail = function(message)
+{
+ InspectorTest.log("FAIL: " + message);
+ InspectorTest.completeTest();
+}
+
+Protocol.Profiler.enable();
+Protocol.Runtime.evaluate({ expression: "collectProfiles()"}).then(didCollectProfiles);
+
+var headers = [];
+Protocol.Profiler.onConsoleProfileFinished(function(messageObject)
+{
+ headers.push({
+ title: messageObject["params"]["title"]
+ });
+});
+
+function didCollectProfiles(messageObject)
+{
+ if (headers.length !== 2)
+ return InspectorTest.fail("Cannot retrive headers: " + JSON.stringify(messageObject, null, 4));
+ InspectorTest.log("SUCCESS: found 2 profile headers");
+ for (var i = 0; i < headers.length; i++) {
+ if (headers[i].title === "titled") {
+ InspectorTest.log("SUCCESS: titled profile found");
+ InspectorTest.completeTest();
+ return;
+ }
+ }
+ InspectorTest.fail("Cannot find titled profile");
+}
diff --git a/deps/v8/test/inspector/cpu-profiler/console-profile-expected.txt b/deps/v8/test/inspector/cpu-profiler/console-profile-expected.txt
new file mode 100644
index 0000000000..b3da7ba0c1
--- /dev/null
+++ b/deps/v8/test/inspector/cpu-profiler/console-profile-expected.txt
@@ -0,0 +1,3 @@
+Tests that console.profile/profileEnd will record CPU profile when inspector front-end is connected.
+SUCCESS: retrieved '42' profile
+SUCCESS: found 'collectProfiles' function in the profile \ No newline at end of file
diff --git a/deps/v8/test/inspector/cpu-profiler/console-profile.js b/deps/v8/test/inspector/cpu-profiler/console-profile.js
new file mode 100644
index 0000000000..84873cd169
--- /dev/null
+++ b/deps/v8/test/inspector/cpu-profiler/console-profile.js
@@ -0,0 +1,59 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Tests that console.profile/profileEnd will record CPU profile when inspector front-end is connected.");
+
+InspectorTest.addScript(`
+function collectProfiles()
+{
+ console.profile("outer");
+ console.profile(42);
+ console.profileEnd("outer");
+ console.profileEnd(42);
+}`);
+
+InspectorTest.fail = function(message)
+{
+ InspectorTest.log("FAIL: " + message);
+ InspectorTest.completeTest();
+}
+
+Protocol.Profiler.enable();
+Protocol.Runtime.evaluate({ expression: "collectProfiles()"}).then(didCollectProfiles);
+
+var headers = [];
+Protocol.Profiler.onConsoleProfileFinished(function(messageObject)
+{
+ headers.push({
+ profile: messageObject["params"]["profile"],
+ title: messageObject["params"]["title"]
+ });
+});
+
+function didCollectProfiles(messageObject)
+{
+ if (headers.length !== 2)
+ return InspectorTest.fail("Cannot retrive headers: " + JSON.stringify(messageObject, null, 4));
+ for (var i = 0; i < headers.length; i++) {
+ if (headers[i].title === "42") {
+ checkInnerProfile(headers[i].profile);
+ return;
+ }
+ }
+ InspectorTest.fail("Cannot find '42' profile header");
+}
+
+function checkInnerProfile(profile)
+{
+ InspectorTest.log("SUCCESS: retrieved '42' profile");
+ if (!findFunctionInProfile(profile.nodes, "collectProfiles"))
+ return InspectorTest.fail("collectProfiles function not found in the profile: " + JSON.stringify(profile, null, 4));
+ InspectorTest.log("SUCCESS: found 'collectProfiles' function in the profile");
+ InspectorTest.completeTest();
+}
+
+function findFunctionInProfile(nodes, functionName)
+{
+ return nodes.some(n => n.callFrame.functionName === functionName);
+}
diff --git a/deps/v8/test/inspector/cpu-profiler/enable-disable-expected.txt b/deps/v8/test/inspector/cpu-profiler/enable-disable-expected.txt
new file mode 100644
index 0000000000..05d3fd3d5f
--- /dev/null
+++ b/deps/v8/test/inspector/cpu-profiler/enable-disable-expected.txt
@@ -0,0 +1,8 @@
+Test that profiling can only be started when Profiler was enabled and that Profiler.disable command will stop recording all profiles.
+PASS: didFailToStartWhenDisabled
+PASS: didStartFrontendProfile
+PASS: console initiated profile started
+PASS: didStartConsoleProfile
+PASS: didDisableProfiler
+PASS: no front-end initiated profiles found
+PASS: didStopConsoleProfile \ No newline at end of file
diff --git a/deps/v8/test/inspector/cpu-profiler/enable-disable.js b/deps/v8/test/inspector/cpu-profiler/enable-disable.js
new file mode 100644
index 0000000000..3ed1b74b36
--- /dev/null
+++ b/deps/v8/test/inspector/cpu-profiler/enable-disable.js
@@ -0,0 +1,75 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Test that profiling can only be started when Profiler was enabled and that Profiler.disable command will stop recording all profiles.");
+
+Protocol.Profiler.start().then(didFailToStartWhenDisabled);
+disallowConsoleProfiles();
+
+function disallowConsoleProfiles()
+{
+ Protocol.Profiler.onConsoleProfileStarted(function(messageObject)
+ {
+ InspectorTest.log("FAIL: console profile started " + JSON.stringify(messageObject, null, 4));
+ });
+ Protocol.Profiler.onConsoleProfileFinished(function(messageObject)
+ {
+ InspectorTest.log("FAIL: unexpected profile received " + JSON.stringify(messageObject, null, 4));
+ });
+}
+function allowConsoleProfiles()
+{
+ Protocol.Profiler.onConsoleProfileStarted(function(messageObject)
+ {
+ InspectorTest.log("PASS: console initiated profile started");
+ });
+ Protocol.Profiler.onConsoleProfileFinished(function(messageObject)
+ {
+ InspectorTest.log("PASS: console initiated profile received");
+ });
+}
+function didFailToStartWhenDisabled(messageObject)
+{
+ if (!InspectorTest.expectedError("didFailToStartWhenDisabled", messageObject))
+ return;
+ allowConsoleProfiles();
+ Protocol.Profiler.enable();
+ Protocol.Profiler.start().then(didStartFrontendProfile);
+}
+function didStartFrontendProfile(messageObject)
+{
+ if (!InspectorTest.expectedSuccess("didStartFrontendProfile", messageObject))
+ return;
+ Protocol.Runtime.evaluate({expression: "console.profile('p1');"}).then(didStartConsoleProfile);
+}
+
+function didStartConsoleProfile(messageObject)
+{
+ if (!InspectorTest.expectedSuccess("didStartConsoleProfile", messageObject))
+ return;
+ Protocol.Profiler.disable().then(didDisableProfiler);
+}
+
+function didDisableProfiler(messageObject)
+{
+ if (!InspectorTest.expectedSuccess("didDisableProfiler", messageObject))
+ return;
+ Protocol.Profiler.enable();
+ Protocol.Profiler.stop().then(didStopFrontendProfile);
+}
+
+function didStopFrontendProfile(messageObject)
+{
+ if (!InspectorTest.expectedError("no front-end initiated profiles found", messageObject))
+ return;
+ disallowConsoleProfiles();
+ Protocol.Runtime.evaluate({expression: "console.profileEnd();"}).then(didStopConsoleProfile);
+}
+
+function didStopConsoleProfile(messageObject)
+{
+ if (!InspectorTest.expectedSuccess("didStopConsoleProfile", messageObject))
+ return;
+ InspectorTest.completeTest();
+}
diff --git a/deps/v8/test/inspector/cpu-profiler/record-cpu-profile-expected.txt b/deps/v8/test/inspector/cpu-profiler/record-cpu-profile-expected.txt
new file mode 100644
index 0000000000..d810093968
--- /dev/null
+++ b/deps/v8/test/inspector/cpu-profiler/record-cpu-profile-expected.txt
@@ -0,0 +1,7 @@
+Test that profiler is able to record a profile. Also it tests that profiler returns an error when it unable to find the profile.
+PASS: startFrontendProfile
+PASS: startConsoleProfile
+PASS: stopConsoleProfile
+PASS: stoppedFrontendProfile
+PASS: startFrontendProfileSecondTime
+PASS: stopFrontendProfileSecondTime \ No newline at end of file
diff --git a/deps/v8/test/inspector/cpu-profiler/record-cpu-profile.js b/deps/v8/test/inspector/cpu-profiler/record-cpu-profile.js
new file mode 100644
index 0000000000..3ce16fcfb5
--- /dev/null
+++ b/deps/v8/test/inspector/cpu-profiler/record-cpu-profile.js
@@ -0,0 +1,48 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Test that profiler is able to record a profile. Also it tests that profiler returns an error when it unable to find the profile.");
+
+Protocol.Profiler.enable();
+Protocol.Profiler.start().then(didStartFrontendProfile);
+function didStartFrontendProfile(messageObject)
+{
+ if (!InspectorTest.expectedSuccess("startFrontendProfile", messageObject))
+ return;
+ Protocol.Runtime.evaluate({expression: "console.profile('Profile 1');"}).then(didStartConsoleProfile);
+}
+
+function didStartConsoleProfile(messageObject)
+{
+ if (!InspectorTest.expectedSuccess("startConsoleProfile", messageObject))
+ return;
+ Protocol.Runtime.evaluate({expression: "console.profileEnd('Profile 1');"}).then(didStopConsoleProfile);
+}
+
+function didStopConsoleProfile(messageObject)
+{
+ if (!InspectorTest.expectedSuccess("stopConsoleProfile", messageObject))
+ return;
+ Protocol.Profiler.stop().then(didStopFrontendProfile);
+}
+
+function didStopFrontendProfile(messageObject)
+{
+ if (!InspectorTest.expectedSuccess("stoppedFrontendProfile", messageObject))
+ return;
+ Protocol.Profiler.start().then(didStartFrontendProfile2);
+}
+
+function didStartFrontendProfile2(messageObject)
+{
+ if (!InspectorTest.expectedSuccess("startFrontendProfileSecondTime", messageObject))
+ return;
+ Protocol.Profiler.stop().then(didStopFrontendProfile2);
+}
+
+function didStopFrontendProfile2(messageObject)
+{
+ InspectorTest.expectedSuccess("stopFrontendProfileSecondTime", messageObject)
+ InspectorTest.completeTest();
+}
diff --git a/deps/v8/test/inspector/cpu-profiler/stop-without-preceeding-start-expected.txt b/deps/v8/test/inspector/cpu-profiler/stop-without-preceeding-start-expected.txt
new file mode 100644
index 0000000000..91b5c9e6e2
--- /dev/null
+++ b/deps/v8/test/inspector/cpu-profiler/stop-without-preceeding-start-expected.txt
@@ -0,0 +1,2 @@
+Test that profiler doesn't crash when we call stop without preceeding start.
+PASS: ProfileAgent.stop \ No newline at end of file
diff --git a/deps/v8/test/inspector/cpu-profiler/stop-without-preceeding-start.js b/deps/v8/test/inspector/cpu-profiler/stop-without-preceeding-start.js
new file mode 100644
index 0000000000..3e4717a19c
--- /dev/null
+++ b/deps/v8/test/inspector/cpu-profiler/stop-without-preceeding-start.js
@@ -0,0 +1,12 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Test that profiler doesn't crash when we call stop without preceeding start.");
+
+Protocol.Profiler.stop().then(didStopProfile);
+function didStopProfile(messageObject)
+{
+ InspectorTest.expectedError("ProfileAgent.stop", messageObject);
+ InspectorTest.completeTest();
+}
diff --git a/deps/v8/test/inspector/debugger/access-obsolete-frame-expected.txt b/deps/v8/test/inspector/debugger/access-obsolete-frame-expected.txt
new file mode 100644
index 0000000000..643d382f24
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/access-obsolete-frame-expected.txt
@@ -0,0 +1,8 @@
+Paused on 'debugger;'
+resume
+restartFrame
+PASS, error message as expected
+evaluateOnFrame
+PASS, error message as expected
+setVariableValue
+PASS, error message as expected \ No newline at end of file
diff --git a/deps/v8/test/inspector/debugger/access-obsolete-frame.js b/deps/v8/test/inspector/debugger/access-obsolete-frame.js
new file mode 100644
index 0000000000..b5a96e1c3c
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/access-obsolete-frame.js
@@ -0,0 +1,67 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+InspectorTest.addScript(`
+function testFunction()
+{
+ debugger;
+}
+//# sourceURL=foo.js`);
+
+Protocol.Debugger.enable();
+
+Protocol.Debugger.oncePaused().then(handleDebuggerPausedOne);
+
+Protocol.Runtime.evaluate({ "expression": "setTimeout(testFunction, 0)" });
+
+var obsoleteTopFrameId;
+
+function handleDebuggerPausedOne(messageObject)
+{
+ InspectorTest.log("Paused on 'debugger;'");
+
+ var topFrame = messageObject.params.callFrames[0];
+ obsoleteTopFrameId = topFrame.callFrameId;
+
+ Protocol.Debugger.resume().then(callbackResume);
+}
+
+function callbackResume(response)
+{
+ InspectorTest.log("resume");
+ InspectorTest.log("restartFrame");
+ Protocol.Debugger.restartFrame({ callFrameId: obsoleteTopFrameId }).then(callbackRestartFrame);
+}
+
+function callbackRestartFrame(response)
+{
+ logErrorResponse(response);
+ InspectorTest.log("evaluateOnFrame");
+ Protocol.Debugger.evaluateOnCallFrame({ callFrameId: obsoleteTopFrameId, expression: "0"}).then(callbackEvaluate);
+}
+
+function callbackEvaluate(response)
+{
+ logErrorResponse(response);
+ InspectorTest.log("setVariableValue");
+ Protocol.Debugger.setVariableValue({ callFrameId: obsoleteTopFrameId, scopeNumber: 0, variableName: "a", newValue: { value: 0 } }).then(callbackSetVariableValue);
+}
+
+function callbackSetVariableValue(response)
+{
+ logErrorResponse(response);
+ InspectorTest.completeTest();
+}
+
+function logErrorResponse(response)
+{
+ if (response.error) {
+ if (response.error.message.indexOf("Can only perform operation while paused.") !== -1) {
+ InspectorTest.log("PASS, error message as expected");
+ return;
+ }
+ }
+ InspectorTest.log("FAIL, unexpected error message");
+ InspectorTest.log(JSON.stringify(response));
+}
diff --git a/deps/v8/test/inspector/debugger/async-console-count-doesnt-crash-expected.txt b/deps/v8/test/inspector/debugger/async-console-count-doesnt-crash-expected.txt
new file mode 100644
index 0000000000..aaaf9ebf96
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/async-console-count-doesnt-crash-expected.txt
@@ -0,0 +1 @@
+setTimeout(console.count, 0) doesn't crash with enabled async stacks.
diff --git a/deps/v8/test/inspector/debugger/async-console-count-doesnt-crash.js b/deps/v8/test/inspector/debugger/async-console-count-doesnt-crash.js
new file mode 100644
index 0000000000..d4941950b1
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/async-console-count-doesnt-crash.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("setTimeout(console.count, 0) doesn't crash with enabled async stacks.")
+
+Protocol.Debugger.enable();
+Protocol.Debugger.setAsyncCallStackDepth({ maxDepth: 1 });
+Protocol.Runtime.evaluate({ expression: "setTimeout(console.count, 0)" });
+InspectorTest.completeTestAfterPendingTimeouts();
diff --git a/deps/v8/test/inspector/debugger/call-frame-function-location-expected.txt b/deps/v8/test/inspector/debugger/call-frame-function-location-expected.txt
new file mode 100644
index 0000000000..8a34f45272
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/call-frame-function-location-expected.txt
@@ -0,0 +1,3 @@
+Paused on 'debugger;'
+Top frame location: {"scriptId":"42","lineNumber":3,"columnNumber":4}
+Top frame functionLocation: {"scriptId":"42","lineNumber":0,"columnNumber":21}
diff --git a/deps/v8/test/inspector/debugger/call-frame-function-location.js b/deps/v8/test/inspector/debugger/call-frame-function-location.js
new file mode 100644
index 0000000000..c91164a037
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/call-frame-function-location.js
@@ -0,0 +1,25 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+InspectorTest.addScript(
+`function testFunction()
+{
+ var a = 2;
+ debugger;
+}`);
+
+Protocol.Debugger.enable();
+Protocol.Debugger.oncePaused().then(handleDebuggerPaused);
+Protocol.Runtime.evaluate({ "expression": "setTimeout(testFunction, 0)" });
+
+function handleDebuggerPaused(messageObject)
+{
+ InspectorTest.log("Paused on 'debugger;'");
+ var topFrame = messageObject.params.callFrames[0];
+ topFrame.location.scriptId = "42";
+ topFrame.functionLocation.scriptId = "42";
+ InspectorTest.log("Top frame location: " + JSON.stringify(topFrame.location));
+ InspectorTest.log("Top frame functionLocation: " + JSON.stringify(topFrame.functionLocation));
+ InspectorTest.completeTest();
+}
diff --git a/deps/v8/test/inspector/debugger/command-line-api-with-bound-function-expected.txt b/deps/v8/test/inspector/debugger/command-line-api-with-bound-function-expected.txt
new file mode 100644
index 0000000000..4148ef860a
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/command-line-api-with-bound-function-expected.txt
@@ -0,0 +1,23 @@
+Check that debug and monitor methods from Command Line API works with bound function.
+debug foo and bar
+call foo and bar
+paused in foo
+paused in boo
+undebug foo and bar
+call foo and bar
+monitor foo and bar
+call foo and bar
+function foo called
+function boo called
+unmonitor foo and bar
+call foo and bar
+monitor and debug bar
+call bar
+function boo called
+paused in boo
+undebug bar
+call bar
+function boo called
+debug and unmonitor bar
+call bar
+paused in boo \ No newline at end of file
diff --git a/deps/v8/test/inspector/debugger/command-line-api-with-bound-function.js b/deps/v8/test/inspector/debugger/command-line-api-with-bound-function.js
new file mode 100644
index 0000000000..0f1ae21ebe
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/command-line-api-with-bound-function.js
@@ -0,0 +1,64 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Check that debug and monitor methods from Command Line API works with bound function.");
+
+InspectorTest.addScript(`
+function foo() {}
+function boo() {}
+var bar = boo.bind(null);
+
+function testFunction() {
+ console.log("> debug foo and bar");
+ debug(foo);
+ debug(bar);
+ console.log("> call foo and bar");
+ foo();
+ bar();
+ console.log("> undebug foo and bar");
+ undebug(foo);
+ undebug(bar);
+ console.log("> call foo and bar");
+ foo();
+ bar();
+
+ console.log("> monitor foo and bar");
+ monitor(foo);
+ monitor(bar);
+ console.log("> call foo and bar");
+ foo();
+ bar();
+ console.log("> unmonitor foo and bar");
+ unmonitor(foo);
+ unmonitor(bar);
+ console.log("> call foo and bar");
+ foo();
+ bar();
+
+ console.log("> monitor and debug bar");
+ monitor(bar);
+ debug(bar);
+ console.log("> call bar");
+ bar();
+ console.log("> undebug bar");
+ undebug(bar);
+ console.log("> call bar");
+ bar();
+ console.log("> debug and unmonitor bar");
+ debug(bar);
+ unmonitor(bar);
+ console.log("> call bar");
+ bar();
+}`);
+
+Protocol.Runtime.enable();
+Protocol.Debugger.enable();
+Protocol.Debugger.onPaused(message => {
+ var functionName = message.params.callFrames[0].functionName;
+ InspectorTest.log(`paused in ${functionName}`);
+ Protocol.Debugger.resume();
+});
+Protocol.Runtime.onConsoleAPICalled(message => InspectorTest.log(message.params.args[0].value));
+Protocol.Runtime.evaluate({ expression: "testFunction()", includeCommandLineAPI: true })
+ .then(InspectorTest.completeTest);
diff --git a/deps/v8/test/inspector/debugger/continue-to-location-expected.txt b/deps/v8/test/inspector/debugger/continue-to-location-expected.txt
new file mode 100644
index 0000000000..d0c6ce715a
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/continue-to-location-expected.txt
@@ -0,0 +1,31 @@
+Paused on debugger statement
+Paused after continueToLocation
+Stopped on line 8, expected 8, requested 8, (0-based numbers).
+Control parameter 'step' calculation result: 1, expected: 1
+SUCCESS
+Paused on debugger statement
+Paused after continueToLocation
+Stopped on line 8, expected 8, requested 8, (0-based numbers).
+Control parameter 'step' calculation result: 1, expected: 1
+SUCCESS
+Paused on debugger statement
+Paused after continueToLocation
+Stopped on line 17, expected 17, requested 12, (0-based numbers).
+Control parameter 'step' calculation result: 6, expected: 6
+SUCCESS
+Paused on debugger statement
+Paused after continueToLocation
+Stopped on line 17, expected 17, requested 13, (0-based numbers).
+Control parameter 'step' calculation result: 6, expected: 6
+SUCCESS
+Paused on debugger statement
+Paused after continueToLocation
+Stopped on line 17, expected 17, requested 17, (0-based numbers).
+Control parameter 'step' calculation result: 6, expected: 6
+SUCCESS
+Paused on debugger statement
+Paused after continueToLocation
+Stopped on line 17, expected 17, requested 17, (0-based numbers).
+Control parameter 'step' calculation result: 6, expected: 6
+SUCCESS
+
diff --git a/deps/v8/test/inspector/debugger/continue-to-location.js b/deps/v8/test/inspector/debugger/continue-to-location.js
new file mode 100644
index 0000000000..b72c8585e6
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/continue-to-location.js
@@ -0,0 +1,114 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+InspectorTest.addScript(
+`function statementsExample()
+{
+ var self = arguments.callee;
+
+ debugger;
+
+ self.step = 1;
+
+ self.step = 2;
+
+ void [
+ self.step = 3,
+ self.step = 4,
+ self.step = 5,
+ self.step = 6
+ ];
+
+ self.step = 7;
+}`);
+
+var scenario = [
+ // requested line number, expected control parameter 'step', expected line number
+ [ 8, 1, 8 ],
+ [ 8, 1, 8 ],
+ [ 12, 6, 17 ],
+ [ 13, 6, 17 ],
+ [ 17, 6, 17 ],
+ [ 17, 6, 17 ],
+];
+
+Protocol.Debugger.enable();
+
+Protocol.Runtime.evaluate({ "expression": "statementsExample" }).then(callbackEvalFunctionObject);
+
+function callbackEvalFunctionObject(response)
+{
+ var functionObjectId = response.result.result.objectId;
+ Protocol.Runtime.getProperties({ objectId: functionObjectId }).then(callbackFunctionDetails);
+}
+
+function callbackFunctionDetails(response)
+{
+ var result = response.result;
+ var scriptId;
+ for (var prop of result.internalProperties) {
+ if (prop.name === "[[FunctionLocation]]")
+ scriptId = prop.value.value.scriptId;
+ }
+
+ nextScenarioStep(0);
+
+ function nextScenarioStep(pos)
+ {
+ if (pos < scenario.length)
+ gotoSinglePassChain(scriptId, scenario[pos][0], scenario[pos][1], scenario[pos][2], nextScenarioStep.bind(this, pos + 1));
+ else
+ InspectorTest.completeTest();
+ }
+}
+
+function gotoSinglePassChain(scriptId, lineNumber, expectedResult, expectedLineNumber, next)
+{
+ Protocol.Debugger.oncePaused().then(handleDebuggerPausedOne);
+
+ Protocol.Runtime.evaluate({ "expression": "setTimeout(statementsExample, 0)" });
+
+ function handleDebuggerPausedOne(messageObject)
+ {
+ InspectorTest.log("Paused on debugger statement");
+
+ Protocol.Debugger.oncePaused().then(handleDebuggerPausedTwo);
+
+ Protocol.Debugger.continueToLocation({ location: { scriptId: scriptId, lineNumber: lineNumber, columnNumber: 0} }).then(logContinueToLocation);
+
+ function logContinueToLocation(response)
+ {
+ if (response.error) {
+ InspectorTest.log("Failed to execute continueToLocation " + JSON.stringify(response.error));
+ InspectorTest.completeTest();
+ }
+ }
+ }
+ function handleDebuggerPausedTwo(messageObject)
+ {
+ InspectorTest.log("Paused after continueToLocation");
+ var actualLineNumber = messageObject.params.callFrames[0].location.lineNumber;
+
+ InspectorTest.log("Stopped on line " + actualLineNumber + ", expected " + expectedLineNumber + ", requested " + lineNumber + ", (0-based numbers).");
+
+ Protocol.Debugger.oncePaused(handleDebuggerPausedUnexpected);
+
+ Protocol.Runtime.evaluate({ "expression": "statementsExample.step" }).then(callbackStepEvaluate);
+ }
+
+ function callbackStepEvaluate(response)
+ {
+ var resultValue = response.result.result.value;
+ InspectorTest.log("Control parameter 'step' calculation result: " + resultValue + ", expected: " + expectedResult);
+ InspectorTest.log(resultValue === expectedResult ? "SUCCESS" : "FAIL");
+ Protocol.Debugger.resume();
+ next();
+ }
+
+ function handleDebuggerPausedUnexpected(messageObject)
+ {
+ InspectorTest.log("Unexpected debugger pause");
+ InspectorTest.completeTest();
+ }
+}
diff --git a/deps/v8/test/inspector/debugger/doesnt-step-into-injected-script-expected.txt b/deps/v8/test/inspector/debugger/doesnt-step-into-injected-script-expected.txt
new file mode 100644
index 0000000000..7fd52a0ba9
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/doesnt-step-into-injected-script-expected.txt
@@ -0,0 +1,17 @@
+Check that stepInto at then end of the script go to next user script instead InjectedScriptSource.js.
+Stack trace:
+boo:0:38
+:0:50
+
+Perform stepInto
+Stack trace:
+boo:0:48
+:0:50
+
+Perform stepInto
+Stack trace:
+:0:51
+
+Perform stepInto
+Stack trace:
+foo:0:12 \ No newline at end of file
diff --git a/deps/v8/test/inspector/debugger/doesnt-step-into-injected-script.js b/deps/v8/test/inspector/debugger/doesnt-step-into-injected-script.js
new file mode 100644
index 0000000000..4d0d1d1f19
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/doesnt-step-into-injected-script.js
@@ -0,0 +1,32 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Check that stepInto at then end of the script go to next user script instead InjectedScriptSource.js.");
+
+InspectorTest.addScript(
+`function foo()
+{
+ return 239;
+}`);
+
+Protocol.Debugger.enable();
+Protocol.Debugger.onPaused(debuggerPaused);
+Protocol.Runtime.evaluate({ "expression": "(function boo() { setTimeout(foo, 0); debugger; })()" });
+
+var actions = [ "stepInto", "stepInto", "stepInto" ];
+function debuggerPaused(result)
+{
+ InspectorTest.log("Stack trace:");
+ for (var callFrame of result.params.callFrames)
+ InspectorTest.log(callFrame.functionName + ":" + callFrame.location.lineNumber + ":" + callFrame.location.columnNumber);
+ InspectorTest.log("");
+
+ var action = actions.shift();
+ if (!action) {
+ Protocol.Debugger.resume().then(InspectorTest.completeTest);
+ return;
+ }
+ InspectorTest.log("Perform " + action);
+ Protocol.Debugger[action]();
+}
diff --git a/deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name-expected.txt b/deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name-expected.txt
new file mode 100644
index 0000000000..b3dce305d8
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name-expected.txt
@@ -0,0 +1,19 @@
+{
+ id : <messageId>
+ result : {
+ result : [
+ [0] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
+ name : a
+ value : {
+ description : 2
+ type : number
+ value : 2
+ }
+ writable : true
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name.js b/deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name.js
new file mode 100644
index 0000000000..e2b38d8ec9
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name.js
@@ -0,0 +1,42 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+InspectorTest.addScript(
+`function testFunction()
+{
+ for (var a of [1]) {
+ ++a;
+ debugger;
+ }
+}`);
+
+Protocol.Debugger.enable();
+Protocol.Debugger.oncePaused().then(dumpScopeOnPause);
+Protocol.Runtime.evaluate({ "expression": "testFunction()" });
+
+var waitScopeObjects = 0;
+function dumpScopeOnPause(message)
+{
+ var scopeChain = message.params.callFrames[0].scopeChain;
+ var localScopeObjectIds = [];
+ for (var scope of scopeChain) {
+ if (scope.type === "local")
+ localScopeObjectIds.push(scope.object.objectId);
+ }
+ waitScopeObjects = localScopeObjectIds.length;
+ if (!waitScopeObjects) {
+ InspectorTest.completeTest();
+ } else {
+ for (var objectId of localScopeObjectIds)
+ Protocol.Runtime.getProperties({ "objectId" : objectId }).then(dumpProperties);
+ }
+}
+
+function dumpProperties(message)
+{
+ InspectorTest.logMessage(message);
+ --waitScopeObjects;
+ if (!waitScopeObjects)
+ Protocol.Debugger.resume().then(InspectorTest.completeTest);
+}
diff --git a/deps/v8/test/inspector/debugger/script-parsed-hash-expected.txt b/deps/v8/test/inspector/debugger/script-parsed-hash-expected.txt
new file mode 100644
index 0000000000..20fdb859fd
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/script-parsed-hash-expected.txt
@@ -0,0 +1,3 @@
+Hash received: 1C6D2E82E4E4F1BA4CB5762843D429DC872EBA18
+Hash received: EBF1ECD351E7A3294CB5762843D429DC872EBA18
+Hash received: 86A31E7131896CF01BA837945C2894385F369F24 \ No newline at end of file
diff --git a/deps/v8/test/inspector/debugger/script-parsed-hash.js b/deps/v8/test/inspector/debugger/script-parsed-hash.js
new file mode 100644
index 0000000000..5dd1dfacee
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/script-parsed-hash.js
@@ -0,0 +1,31 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var hashes = new Set(["1C6D2E82E4E4F1BA4CB5762843D429DC872EBA18",
+ "EBF1ECD351E7A3294CB5762843D429DC872EBA18",
+ "86A31E7131896CF01BA837945C2894385F369F24"]);
+Protocol.Debugger.enable();
+Protocol.Debugger.onScriptParsed(function(messageObject)
+{
+ if (hashes.has(messageObject.params.hash))
+ InspectorTest.log(`Hash received: ${messageObject.params.hash}`);
+ else
+ InspectorTest.log(`[FAIL]: unknown hash ${messageObject.params.hash}`);
+});
+
+function longScript() {
+ var longScript = "var b = 1;";
+ for (var i = 0; i < 2024; ++i)
+ longScript += "++b;";
+}
+
+Protocol.Runtime.enable();
+Protocol.Runtime.compileScript({ expression: "1", sourceURL: "foo1.js", persistScript: true });
+Protocol.Runtime.compileScript({ expression: "239", sourceURL: "foo2.js", persistScript: true });
+Protocol.Runtime.compileScript({ expression: "(" + longScript + ")()", sourceURL: "foo3.js", persistScript: true }).then(step2);
+
+function step2()
+{
+ InspectorTest.completeTest();
+}
diff --git a/deps/v8/test/inspector/debugger/set-blackbox-patterns-expected.txt b/deps/v8/test/inspector/debugger/set-blackbox-patterns-expected.txt
new file mode 100644
index 0000000000..fb54163107
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-blackbox-patterns-expected.txt
@@ -0,0 +1,25 @@
+Pattern parser error: Uncaught SyntaxError: Invalid regular expression: /(foo([)/: Unterminated character class
+Paused in
+(...):1
+Paused in
+(...):1
+Paused in
+qwe:3
+baz:3
+(...):1
+Paused in
+bar:3
+foo:3
+qwe:3
+baz:3
+(...):1
+Paused in
+qwe:4
+baz:3
+(...):1
+Paused in
+qwe:5
+baz:3
+(...):1
+Paused in
+(...):1
diff --git a/deps/v8/test/inspector/debugger/set-blackbox-patterns.js b/deps/v8/test/inspector/debugger/set-blackbox-patterns.js
new file mode 100644
index 0000000000..12e9e214d3
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-blackbox-patterns.js
@@ -0,0 +1,59 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+InspectorTest.addScript(
+`function bar()
+{
+ return 42;
+}`);
+
+InspectorTest.addScript(
+`function foo()
+{
+ var a = bar();
+ return a + 1;
+}
+//# sourceURL=foo.js`);
+
+InspectorTest.addScript(
+`function qwe()
+{
+ var a = foo();
+ return a + 1;
+}
+//# sourceURL=qwe.js`);
+
+InspectorTest.addScript(
+`function baz()
+{
+ var a = qwe();
+ return a + 1;
+}
+//# sourceURL=baz.js`);
+
+Protocol.Debugger.enable();
+Protocol.Debugger.setBlackboxPatterns({ patterns: [ "foo([" ] }).then(dumpError);
+
+function dumpError(message)
+{
+ InspectorTest.log(message.error.message);
+ Protocol.Debugger.onPaused(dumpStackAndRunNextCommand);
+ Protocol.Debugger.setBlackboxPatterns({ patterns: [ "baz\.js", "foo\.js" ] });
+ Protocol.Runtime.evaluate({ "expression": "debugger;baz()" });
+}
+
+var commands = [ "stepInto", "stepInto", "stepInto", "stepOut", "stepInto", "stepInto" ];
+function dumpStackAndRunNextCommand(message)
+{
+ InspectorTest.log("Paused in");
+ var callFrames = message.params.callFrames;
+ for (var callFrame of callFrames)
+ InspectorTest.log((callFrame.functionName || "(...)") + ":" + (callFrame.location.lineNumber + 1));
+ var command = commands.shift();
+ if (!command) {
+ InspectorTest.completeTest();
+ return;
+ }
+ Protocol.Debugger[command]();
+}
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling-expected.txt b/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling-expected.txt
new file mode 100644
index 0000000000..81685a2b8b
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling-expected.txt
@@ -0,0 +1,7 @@
+setBreakpointByUrl error: undefined
+setBreakpoint error: {
+ "code": -32602,
+ "message": "Invalid request",
+ "data": "location: object expected"
+}
+
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling.js b/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling.js
new file mode 100644
index 0000000000..8480aa6f75
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling.js
@@ -0,0 +1,17 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Protocol.Debugger.setBreakpointByUrl({ url: "http://example.com", lineNumber: 10 }).then(didSetBreakpointByUrlBeforeEnable);
+
+function didSetBreakpointByUrlBeforeEnable(message)
+{
+ InspectorTest.log("setBreakpointByUrl error: " + JSON.stringify(message.error, null, 2));
+ Protocol.Debugger.setBreakpoint().then(didSetBreakpointBeforeEnable);
+}
+
+function didSetBreakpointBeforeEnable(message)
+{
+ InspectorTest.log("setBreakpoint error: " + JSON.stringify(message.error, null, 2));
+ InspectorTest.completeTest();
+}
diff --git a/deps/v8/test/inspector/debugger/set-script-source-expected.txt b/deps/v8/test/inspector/debugger/set-script-source-expected.txt
new file mode 100644
index 0000000000..1b76ec5f95
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-script-source-expected.txt
@@ -0,0 +1,8 @@
+Function evaluate: {"type":"number","value":6,"description":"6"}
+PASS, result value: 6
+Function evaluate: {"type":"number","value":8,"description":"8"}
+PASS, result value: 8
+Has error reported: PASS
+Reported error is a compile error: PASS
+PASS, result value: 1
+
diff --git a/deps/v8/test/inspector/debugger/set-script-source.js b/deps/v8/test/inspector/debugger/set-script-source.js
new file mode 100644
index 0000000000..36944cca6c
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-script-source.js
@@ -0,0 +1,152 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+InspectorTest.addScript(
+`function TestExpression(a, b) {
+ return a + b;
+}`);
+
+// A general-purpose engine for sending a sequence of protocol commands.
+// The clients provide requests and response handlers, while the engine catches
+// errors and makes sure that once there's nothing to do completeTest() is called.
+// @param step is an object with command, params and callback fields
+function runRequestSeries(step) {
+ processStep(step);
+
+ function processStep(currentStep) {
+ try {
+ processStepOrFail(currentStep);
+ } catch (e) {
+ InspectorTest.log(e.stack);
+ InspectorTest.completeTest();
+ }
+ }
+
+ function processStepOrFail(currentStep) {
+ if (!currentStep) {
+ InspectorTest.completeTest();
+ return;
+ }
+ if (!currentStep.command) {
+ // A simple loopback step.
+ var next = currentStep.callback();
+ processStep(next);
+ return;
+ }
+
+ var innerCallback = function(response) {
+ var next;
+ if ("error" in response) {
+ if (!("errorHandler" in currentStep)) {
+ // Error message is not logged intentionally, it may be platform-specific.
+ InspectorTest.log("Protocol command '" + currentStep.command + "' failed");
+ InspectorTest.completeTest();
+ return;
+ }
+ try {
+ next = currentStep.errorHandler(response.error);
+ } catch (e) {
+ InspectorTest.log(e.stack);
+ InspectorTest.completeTest();
+ return;
+ }
+ } else {
+ try {
+ next = currentStep.callback(response.result);
+ } catch (e) {
+ InspectorTest.log(e.stack);
+ InspectorTest.completeTest();
+ return;
+ }
+ }
+ processStep(next);
+ }
+ var command = currentStep.command.split(".");
+ Protocol[command[0]][command[1]](currentStep.params).then(innerCallback);
+ }
+}
+
+function logEqualsCheck(actual, expected)
+{
+ if (actual === expected) {
+ InspectorTest.log("PASS, result value: " + actual);
+ } else {
+ InspectorTest.log("FAIL, actual value: " + actual + ", expected: " + expected);
+ }
+}
+function logCheck(description, success)
+{
+ InspectorTest.log(description + ": " + (success ? "PASS" : "FAIL"));
+}
+
+var firstStep = { callback: enableDebugger };
+
+runRequestSeries(firstStep);
+
+function enableDebugger() {
+ return { command: "Debugger.enable", params: {}, callback: evalFunction };
+}
+
+function evalFunction(response) {
+ var expression = "TestExpression(2, 4)";
+ return { command: "Runtime.evaluate", params: { expression: expression }, callback: callbackEvalFunction };
+}
+
+function callbackEvalFunction(result) {
+ InspectorTest.log("Function evaluate: " + JSON.stringify(result.result));
+ logEqualsCheck(result.result.value, 6);
+
+ return { command: "Runtime.evaluate", params: { expression: "TestExpression" }, callback: callbackEvalFunctionObject };
+}
+
+function callbackEvalFunctionObject(result) {
+ return { command: "Runtime.getProperties", params: { objectId: result.result.objectId }, callback: callbackFunctionDetails };
+}
+
+function callbackFunctionDetails(result)
+{
+ var scriptId;
+ for (var prop of result.internalProperties) {
+ if (prop.name === "[[FunctionLocation]]")
+ scriptId = prop.value.value.scriptId;
+ }
+ return createScriptManipulationArc(scriptId, null);
+}
+
+// Several steps with scriptId in context.
+function createScriptManipulationArc(scriptId, next) {
+ return { command: "Debugger.getScriptSource", params: { scriptId: scriptId }, callback: callbackGetScriptSource };
+
+ var originalText;
+
+ function callbackGetScriptSource(result) {
+ originalText = result.scriptSource;
+ var patched = originalText.replace("a + b", "a * b");
+
+ return { command: "Debugger.setScriptSource", params: { scriptId: scriptId, scriptSource: patched }, callback: callbackSetScriptSource };
+ }
+
+ function callbackSetScriptSource(result) {
+ var expression = "TestExpression(2, 4)";
+ return { command: "Runtime.evaluate", params: { expression: expression }, callback: callbackEvalFunction2 };
+ }
+
+ function callbackEvalFunction2(result) {
+ InspectorTest.log("Function evaluate: " + JSON.stringify(result.result));
+ logEqualsCheck(result.result.value, 8);
+
+ var patched = originalText.replace("a + b", "a # b");
+
+ return { command: "Debugger.setScriptSource", params: { scriptId: scriptId, scriptSource: patched }, callback: errorCallbackSetScriptSource2 };
+ }
+
+ function errorCallbackSetScriptSource2(result) {
+ var exceptionDetails = result.exceptionDetails;
+ logCheck("Has error reported", !!exceptionDetails);
+ logCheck("Reported error is a compile error", !!exceptionDetails);
+ if (exceptionDetails)
+ logEqualsCheck(exceptionDetails.lineNumber, 1);
+ return next;
+ }
+}
diff --git a/deps/v8/test/inspector/debugger/step-over-caught-exception-expected.txt b/deps/v8/test/inspector/debugger/step-over-caught-exception-expected.txt
new file mode 100644
index 0000000000..a18b0934cb
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/step-over-caught-exception-expected.txt
@@ -0,0 +1,4 @@
+testFunction:9
+testFunction:11
+testFunction:9
+testFunction:11 \ No newline at end of file
diff --git a/deps/v8/test/inspector/debugger/step-over-caught-exception.js b/deps/v8/test/inspector/debugger/step-over-caught-exception.js
new file mode 100644
index 0000000000..e00dcf27dc
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/step-over-caught-exception.js
@@ -0,0 +1,76 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+InspectorTest.addScript(
+`function testFunction()
+{
+ function foo()
+ {
+ try {
+ throw new Error();
+ } catch (e) {
+ }
+ }
+ debugger;
+ foo();
+ console.log("completed");
+}`);
+
+Protocol.Debugger.enable();
+Protocol.Runtime.enable();
+step1();
+
+function step1()
+{
+ Protocol.Runtime.evaluate({ "expression": "setTimeout(testFunction, 0);"});
+ var commands = [ "Print", "stepOver", "stepOver", "Print", "resume" ];
+ Protocol.Debugger.onPaused(function(messageObject)
+ {
+ var command = commands.shift();
+ if (command === "Print") {
+ var callFrames = messageObject.params.callFrames;
+ for (var callFrame of callFrames)
+ InspectorTest.log(callFrame.functionName + ":" + callFrame.location.lineNumber);
+ command = commands.shift();
+ }
+ if (command)
+ Protocol.Debugger[command]();
+ });
+
+ Protocol.Runtime.onConsoleAPICalled(function(messageObject)
+ {
+ if (messageObject.params.args[0].value === "completed") {
+ if (commands.length)
+ InspectorTest.log("[FAIL]: execution was resumed too earlier.")
+ step2();
+ }
+ });
+}
+
+function step2()
+{
+ Protocol.Runtime.evaluate({ "expression": "setTimeout(testFunction, 0);"});
+ var commands = [ "Print", "stepOver", "stepInto", "stepOver", "stepOver", "Print", "resume" ];
+ Protocol.Debugger.onPaused(function(messageObject)
+ {
+ var command = commands.shift();
+ if (command === "Print") {
+ var callFrames = messageObject.params.callFrames;
+ for (var callFrame of callFrames)
+ InspectorTest.log(callFrame.functionName + ":" + callFrame.location.lineNumber);
+ command = commands.shift();
+ }
+ if (command)
+ Protocol.Debugger[command]();
+ });
+
+ Protocol.Runtime.onConsoleAPICalled(function(messageObject)
+ {
+ if (messageObject.params.args[0].value === "completed") {
+ if (commands.length)
+ InspectorTest.log("[FAIL]: execution was resumed too earlier.")
+ InspectorTest.completeTest();
+ }
+ });
+}
diff --git a/deps/v8/test/inspector/debugger/stepping-with-blackboxed-ranges-expected.txt b/deps/v8/test/inspector/debugger/stepping-with-blackboxed-ranges-expected.txt
new file mode 100644
index 0000000000..acea22fd5f
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/stepping-with-blackboxed-ranges-expected.txt
@@ -0,0 +1,54 @@
+foo: 8:4
+blackboxedBoo: 3:12
+notBlackboxedFoo: 3:12
+blackboxedFoo: 10:12
+notBlackboxedBoo: 17:12
+testFunction: 2:4
+
+Try to set positions: [{"lineNumber":0,"columnNumber":0},{"lineNumber":0,"columnNumber":0}]
+Input positions array is not sorted or contains duplicate values.
+Try to set positions: [{"lineNumber":0,"columnNumber":1},{"lineNumber":0,"columnNumber":0}]
+Input positions array is not sorted or contains duplicate values.
+Try to set positions: [{"lineNumber":0,"columnNumber":-1}]
+Position missing 'column' or 'column' < 0.
+action: stepOut
+notBlackboxedFoo: 4:4
+blackboxedFoo: 10:12
+notBlackboxedBoo: 17:12
+testFunction: 2:4
+
+action: stepOut
+notBlackboxedBoo: 18:4
+testFunction: 2:4
+
+action: stepOut
+testFunction: 3:4
+
+action: stepInto
+notBlackboxedBoo: 16:12
+testFunction: 3:4
+
+action: stepOver
+action: stepInto
+notBlackboxedFoo: 2:12
+blackboxedFoo: 10:12
+notBlackboxedBoo: 17:12
+testFunction: 3:4
+
+action: stepOver
+action: stepInto
+foo: 8:4
+blackboxedBoo: 3:12
+notBlackboxedFoo: 3:12
+blackboxedFoo: 10:12
+notBlackboxedBoo: 17:12
+testFunction: 3:4
+
+action: stepOver
+action: stepInto
+foo: 10:0
+blackboxedBoo: 3:12
+notBlackboxedFoo: 3:12
+blackboxedFoo: 10:12
+notBlackboxedBoo: 17:12
+testFunction: 3:4
diff --git a/deps/v8/test/inspector/debugger/stepping-with-blackboxed-ranges.js b/deps/v8/test/inspector/debugger/stepping-with-blackboxed-ranges.js
new file mode 100644
index 0000000000..740634f68f
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/stepping-with-blackboxed-ranges.js
@@ -0,0 +1,126 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+InspectorTest.addScript(
+`function blackboxedBoo()
+{
+ var a = 42;
+ var b = foo();
+ return a + b;
+}
+//# sourceURL=blackboxed-script.js`);
+
+InspectorTest.addScript(
+`function notBlackboxedFoo()
+{
+ var a = 42;
+ var b = blackboxedBoo();
+ return a + b;
+}
+
+function blackboxedFoo()
+{
+ var a = 42;
+ var b = notBlackboxedFoo();
+ return a + b;
+}
+
+function notBlackboxedBoo()
+{
+ var a = 42;
+ var b = blackboxedFoo();
+ return a + b;
+}
+//# sourceURL=mixed-source.js`);
+
+InspectorTest.addScript(
+`function testFunction()
+{
+ notBlackboxedBoo(); // for setup ranges and stepOut
+ notBlackboxedBoo(); // for stepIn
+}
+
+function foo()
+{
+ debugger;
+ return 239;
+}`);
+
+Protocol.Debugger.oncePaused().then(setBlackboxedScriptRanges);
+Protocol.Debugger.enable().then(callTestFunction);
+
+function callTestFunction(response)
+{
+ Protocol.Runtime.evaluate({ expression: "setTimeout(testFunction, 0);"});
+}
+
+function setBlackboxedScriptRanges(response)
+{
+ var callFrames = response.params.callFrames;
+ printCallFrames(callFrames);
+ Protocol.Debugger.setBlackboxedRanges({
+ scriptId: callFrames[1].location.scriptId,
+ positions: [ { lineNumber: 0, columnNumber: 0 } ] // blackbox ranges for blackboxed.js
+ }).then(setIncorrectRanges.bind(null, callFrames[2].location.scriptId));
+}
+
+var incorrectPositions = [
+ [ { lineNumber: 0, columnNumber: 0 }, { lineNumber: 0, columnNumber: 0 } ],
+ [ { lineNumber: 0, columnNumber: 1 }, { lineNumber: 0, columnNumber: 0 } ],
+ [ { lineNumber: 0, columnNumber: -1 } ],
+];
+
+function setIncorrectRanges(scriptId, response)
+{
+ if (response.error)
+ InspectorTest.log(response.error.message);
+ var positions = incorrectPositions.shift();
+ if (!positions) {
+ setMixedSourceRanges(scriptId);
+ return;
+ }
+ InspectorTest.log("Try to set positions: " + JSON.stringify(positions));
+ Protocol.Debugger.setBlackboxedRanges({
+ scriptId: scriptId,
+ positions: positions
+ }).then(setIncorrectRanges.bind(null, scriptId));
+}
+
+function setMixedSourceRanges(scriptId)
+{
+ Protocol.Debugger.onPaused(runAction);
+ Protocol.Debugger.setBlackboxedRanges({
+ scriptId: scriptId,
+ positions: [ { lineNumber: 8, columnNumber: 0 }, { lineNumber: 15, columnNumber: 0 } ] // blackbox ranges for mixed.js
+ }).then(runAction);
+}
+
+var actions = [ "stepOut", "print", "stepOut", "print", "stepOut", "print",
+ "stepInto", "print", "stepOver", "stepInto", "print", "stepOver", "stepInto", "print",
+ "stepOver", "stepInto", "print" ];
+
+function runAction(response)
+{
+ var action = actions.shift();
+ if (!action)
+ InspectorTest.completeTest();
+
+ if (action === "print") {
+ printCallFrames(response.params.callFrames);
+ runAction({});
+ } else {
+ InspectorTest.log("action: " + action);
+ Protocol.Debugger[action]();
+ }
+}
+
+function printCallFrames(callFrames)
+{
+ var topCallFrame = callFrames[0];
+ if (topCallFrame.functionName.startsWith("blackboxed"))
+ InspectorTest.log("FAIL: blackboxed function in top call frame");
+ for (var callFrame of callFrames)
+ InspectorTest.log(callFrame.functionName + ": " + callFrame.location.lineNumber + ":" + callFrame.location.columnNumber);
+ InspectorTest.log("");
+}
diff --git a/deps/v8/test/inspector/debugger/update-call-frame-scopes-expected.txt b/deps/v8/test/inspector/debugger/update-call-frame-scopes-expected.txt
new file mode 100644
index 0000000000..ed52d231c2
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/update-call-frame-scopes-expected.txt
@@ -0,0 +1,7 @@
+Paused on 'debugger;'
+Variable value changed
+Stacktrace re-read again
+Scope variables downloaded anew
+New variable is 55, expected is 55, old was: 2
+SUCCESS
+
diff --git a/deps/v8/test/inspector/debugger/update-call-frame-scopes.js b/deps/v8/test/inspector/debugger/update-call-frame-scopes.js
new file mode 100644
index 0000000000..f4a0f12397
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/update-call-frame-scopes.js
@@ -0,0 +1,63 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+InspectorTest.addScript(
+`function TestFunction()
+{
+ var a = 2;
+ debugger;
+ debugger;
+}`);
+
+var newVariableValue = 55;
+
+Protocol.Debugger.enable();
+
+Protocol.Debugger.oncePaused().then(handleDebuggerPaused);
+
+Protocol.Runtime.evaluate({ "expression": "setTimeout(TestFunction, 0)" });
+
+function handleDebuggerPaused(messageObject)
+{
+ InspectorTest.log("Paused on 'debugger;'");
+
+ var topFrame = messageObject.params.callFrames[0];
+ var topFrameId = topFrame.callFrameId;
+ Protocol.Debugger.evaluateOnCallFrame({ "callFrameId": topFrameId, "expression": "a = " + newVariableValue }).then(callbackChangeValue);
+}
+
+function callbackChangeValue(response)
+{
+ InspectorTest.log("Variable value changed");
+ Protocol.Debugger.oncePaused().then(callbackGetBacktrace);
+ Protocol.Debugger.resume();
+}
+
+function callbackGetBacktrace(response)
+{
+ InspectorTest.log("Stacktrace re-read again");
+ var localScope = response.params.callFrames[0].scopeChain[0];
+ Protocol.Runtime.getProperties({ "objectId": localScope.object.objectId }).then(callbackGetProperties);
+}
+
+function callbackGetProperties(response)
+{
+ InspectorTest.log("Scope variables downloaded anew");
+ var varNamedA;
+ var propertyList = response.result.result;
+ for (var i = 0; i < propertyList.length; i++) {
+ if (propertyList[i].name === "a") {
+ varNamedA = propertyList[i];
+ break;
+ }
+ }
+ if (varNamedA) {
+ var actualValue = varNamedA.value.value;
+ InspectorTest.log("New variable is " + actualValue + ", expected is " + newVariableValue + ", old was: 2");
+ InspectorTest.log(actualValue === newVariableValue ? "SUCCESS" : "FAIL");
+ } else {
+ InspectorTest.log("Failed to find variable in scope");
+ }
+ InspectorTest.completeTest();
+}
diff --git a/deps/v8/test/inspector/inspector-impl.cc b/deps/v8/test/inspector/inspector-impl.cc
new file mode 100644
index 0000000000..57499215b9
--- /dev/null
+++ b/deps/v8/test/inspector/inspector-impl.cc
@@ -0,0 +1,201 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/inspector/inspector-impl.h"
+
+#include "include/v8.h"
+#include "src/inspector/string-16.h"
+
+namespace {
+
+const int kInspectorClientIndex = v8::Context::kDebugIdIndex + 1;
+
+class ChannelImpl final : public v8_inspector::V8Inspector::Channel {
+ public:
+ explicit ChannelImpl(InspectorClientImpl::FrontendChannel* frontend_channel)
+ : frontend_channel_(frontend_channel) {}
+ virtual ~ChannelImpl() = default;
+
+ private:
+ void sendProtocolResponse(int callId,
+ const v8_inspector::StringView& message) override {
+ frontend_channel_->SendMessageToFrontend(message);
+ }
+ void sendProtocolNotification(
+ const v8_inspector::StringView& message) override {
+ frontend_channel_->SendMessageToFrontend(message);
+ }
+ void flushProtocolNotifications() override {}
+
+ InspectorClientImpl::FrontendChannel* frontend_channel_;
+ DISALLOW_COPY_AND_ASSIGN(ChannelImpl);
+};
+
+InspectorClientImpl* InspectorClientFromContext(
+ v8::Local<v8::Context> context) {
+ InspectorClientImpl* inspector_client = static_cast<InspectorClientImpl*>(
+ context->GetAlignedPointerFromEmbedderData(kInspectorClientIndex));
+ CHECK(inspector_client);
+ return inspector_client;
+}
+
+v8_inspector::String16 ToString16(v8::Local<v8::String> str) {
+ std::unique_ptr<uint16_t[]> buffer(new uint16_t[str->Length()]);
+ str->Write(reinterpret_cast<uint16_t*>(buffer.get()), 0, str->Length());
+ return v8_inspector::String16(buffer.get(), str->Length());
+}
+
+void MessageHandler(v8::Local<v8::Message> message,
+ v8::Local<v8::Value> exception) {
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Local<v8::Context> context = isolate->GetEnteredContext();
+ if (context.IsEmpty()) return;
+ v8_inspector::V8Inspector* inspector =
+ InspectorClientImpl::InspectorFromContext(context);
+
+ v8::Local<v8::StackTrace> stack = message->GetStackTrace();
+ int script_id = message->GetScriptOrigin().ScriptID()->Value();
+ if (!stack.IsEmpty() && stack->GetFrameCount() > 0) {
+ int top_script_id = stack->GetFrame(0)->GetScriptId();
+ if (top_script_id == script_id) script_id = 0;
+ }
+ int line_number = message->GetLineNumber(context).FromMaybe(0);
+ int column_number = 0;
+ if (message->GetStartColumn(context).IsJust())
+ column_number = message->GetStartColumn(context).FromJust() + 1;
+
+ v8_inspector::StringView detailed_message;
+ v8_inspector::String16 message_text_string = ToString16(message->Get());
+ v8_inspector::StringView message_text(message_text_string.characters16(),
+ message_text_string.length());
+ v8_inspector::String16 url_string;
+ if (message->GetScriptOrigin().ResourceName()->IsString()) {
+ url_string =
+ ToString16(message->GetScriptOrigin().ResourceName().As<v8::String>());
+ }
+ v8_inspector::StringView url(url_string.characters16(), url_string.length());
+
+ inspector->exceptionThrown(context, message_text, exception, detailed_message,
+ url, line_number, column_number,
+ inspector->createStackTrace(stack), script_id);
+}
+
+} // namespace
+
+class ConnectTask : public TaskRunner::Task {
+ public:
+ ConnectTask(InspectorClientImpl* client, v8::base::Semaphore* ready_semaphore)
+ : client_(client), ready_semaphore_(ready_semaphore) {}
+ virtual ~ConnectTask() = default;
+
+ bool is_inspector_task() final { return true; }
+
+ void Run(v8::Isolate* isolate,
+ const v8::Global<v8::Context>& global_context) {
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context = global_context.Get(isolate);
+ client_->connect(context);
+ if (ready_semaphore_) ready_semaphore_->Signal();
+ }
+
+ private:
+ InspectorClientImpl* client_;
+ v8::base::Semaphore* ready_semaphore_;
+};
+
+InspectorClientImpl::InspectorClientImpl(TaskRunner* task_runner,
+ FrontendChannel* frontend_channel,
+ v8::base::Semaphore* ready_semaphore)
+ : isolate_(nullptr),
+ task_runner_(task_runner),
+ frontend_channel_(frontend_channel) {
+ task_runner_->Append(new ConnectTask(this, ready_semaphore));
+}
+
+InspectorClientImpl::~InspectorClientImpl() {}
+
+void InspectorClientImpl::connect(v8::Local<v8::Context> context) {
+ isolate_ = context->GetIsolate();
+ isolate_->AddMessageListener(MessageHandler);
+ channel_.reset(new ChannelImpl(frontend_channel_));
+
+ inspector_ = v8_inspector::V8Inspector::create(isolate_, this);
+ session_ = inspector_->connect(1, channel_.get(), v8_inspector::StringView());
+
+ context->SetAlignedPointerInEmbedderData(kInspectorClientIndex, this);
+ inspector_->contextCreated(
+ v8_inspector::V8ContextInfo(context, 1, v8_inspector::StringView()));
+ context_.Reset(isolate_, context);
+}
+
+v8::Local<v8::Context> InspectorClientImpl::ensureDefaultContextInGroup(int) {
+ CHECK(isolate_);
+ return context_.Get(isolate_);
+}
+
+double InspectorClientImpl::currentTimeMS() {
+ return v8::base::OS::TimeCurrentMillis();
+}
+
+void InspectorClientImpl::runMessageLoopOnPause(int) {
+ task_runner_->RunMessageLoop(true);
+}
+
+void InspectorClientImpl::quitMessageLoopOnPause() {
+ task_runner_->QuitMessageLoop();
+}
+
+v8_inspector::V8Inspector* InspectorClientImpl::InspectorFromContext(
+ v8::Local<v8::Context> context) {
+ return InspectorClientFromContext(context)->inspector_.get();
+}
+
+v8_inspector::V8InspectorSession* InspectorClientImpl::SessionFromContext(
+ v8::Local<v8::Context> context) {
+ return InspectorClientFromContext(context)->session_.get();
+}
+
+class SendMessageToBackendTask : public TaskRunner::Task {
+ public:
+ explicit SendMessageToBackendTask(const v8_inspector::String16& message)
+ : message_(message) {}
+
+ bool is_inspector_task() final { return true; }
+
+ void Run(v8::Isolate* isolate,
+ const v8::Global<v8::Context>& global_context) override {
+ v8_inspector::V8InspectorSession* session = nullptr;
+ {
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context = global_context.Get(isolate);
+ session = InspectorClientImpl::SessionFromContext(context);
+ CHECK(session);
+ }
+ v8_inspector::StringView message_view(
+ reinterpret_cast<const uint16_t*>(message_.characters16()),
+ message_.length());
+ session->dispatchProtocolMessage(message_view);
+ }
+
+ private:
+ v8_inspector::String16 message_;
+};
+
+TaskRunner* SendMessageToBackendExtension::backend_task_runner_ = nullptr;
+
+v8::Local<v8::FunctionTemplate>
+SendMessageToBackendExtension::GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name) {
+ return v8::FunctionTemplate::New(
+ isolate, SendMessageToBackendExtension::SendMessageToBackend);
+}
+
+void SendMessageToBackendExtension::SendMessageToBackend(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ CHECK(backend_task_runner_);
+ CHECK(args.Length() == 1 && args[0]->IsString());
+ v8::Local<v8::String> message = args[0].As<v8::String>();
+ backend_task_runner_->Append(
+ new SendMessageToBackendTask(ToString16(message)));
+}
diff --git a/deps/v8/test/inspector/inspector-impl.h b/deps/v8/test/inspector/inspector-impl.h
new file mode 100644
index 0000000000..f94bef1599
--- /dev/null
+++ b/deps/v8/test/inspector/inspector-impl.h
@@ -0,0 +1,79 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TEST_INSPECTOR_PROTOCOL_INSPECTOR_IMPL_H_
+#define V8_TEST_INSPECTOR_PROTOCOL_INSPECTOR_IMPL_H_
+
+#include "include/v8-inspector.h"
+#include "include/v8.h"
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+#include "test/inspector/task-runner.h"
+
+class InspectorClientImpl : public v8_inspector::V8InspectorClient {
+ public:
+ class FrontendChannel {
+ public:
+ virtual ~FrontendChannel() = default;
+ virtual void SendMessageToFrontend(
+ const v8_inspector::StringView& message) = 0;
+ };
+
+ InspectorClientImpl(TaskRunner* task_runner,
+ FrontendChannel* frontend_channel,
+ v8::base::Semaphore* ready_semaphore);
+ virtual ~InspectorClientImpl();
+
+ static v8_inspector::V8Inspector* InspectorFromContext(
+ v8::Local<v8::Context> context);
+
+ private:
+ // V8InspectorClient implementation.
+ v8::Local<v8::Context> ensureDefaultContextInGroup(
+ int context_group_id) override;
+ double currentTimeMS() override;
+ void runMessageLoopOnPause(int context_group_id) override;
+ void quitMessageLoopOnPause() override;
+
+ static v8_inspector::V8InspectorSession* SessionFromContext(
+ v8::Local<v8::Context> context);
+
+ friend class SendMessageToBackendTask;
+
+ friend class ConnectTask;
+ void connect(v8::Local<v8::Context> context);
+
+ std::unique_ptr<v8_inspector::V8Inspector> inspector_;
+ std::unique_ptr<v8_inspector::V8InspectorSession> session_;
+ std::unique_ptr<v8_inspector::V8Inspector::Channel> channel_;
+
+ v8::Isolate* isolate_;
+ v8::Global<v8::Context> context_;
+
+ TaskRunner* task_runner_;
+ FrontendChannel* frontend_channel_;
+
+ DISALLOW_COPY_AND_ASSIGN(InspectorClientImpl);
+};
+
+class SendMessageToBackendExtension : public v8::Extension {
+ public:
+ SendMessageToBackendExtension()
+ : v8::Extension("v8_inspector/frontend",
+ "native function sendMessageToBackend();") {}
+ virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name);
+
+ static void set_backend_task_runner(TaskRunner* task_runner) {
+ backend_task_runner_ = task_runner;
+ }
+
+ private:
+ static void SendMessageToBackend(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+
+ static TaskRunner* backend_task_runner_;
+};
+
+#endif // V8_TEST_INSPECTOR_PROTOCOL_INSPECTOR_IMPL_H_
diff --git a/deps/v8/test/inspector/inspector-test.cc b/deps/v8/test/inspector/inspector-test.cc
new file mode 100644
index 0000000000..872d211c75
--- /dev/null
+++ b/deps/v8/test/inspector/inspector-test.cc
@@ -0,0 +1,254 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if !defined(_WIN32) && !defined(_WIN64)
+#include <unistd.h> // NOLINT
+#endif // !defined(_WIN32) && !defined(_WIN64)
+
+#include <locale.h>
+
+#include "include/libplatform/libplatform.h"
+#include "include/v8.h"
+
+#include "src/base/platform/platform.h"
+#include "src/flags.h"
+#include "src/utils.h"
+#include "src/vector.h"
+
+#include "test/inspector/inspector-impl.h"
+#include "test/inspector/task-runner.h"
+
+namespace {
+
+void Exit() {
+ fflush(stdout);
+ fflush(stderr);
+ _exit(0);
+}
+
+class UtilsExtension : public v8::Extension {
+ public:
+ UtilsExtension()
+ : v8::Extension("v8_inspector/utils",
+ "native function print();"
+ "native function quit();"
+ "native function setlocale();") {}
+ virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name) {
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ if (name->Equals(context, v8::String::NewFromUtf8(
+ isolate, "print", v8::NewStringType::kNormal)
+ .ToLocalChecked())
+ .FromJust()) {
+ return v8::FunctionTemplate::New(isolate, UtilsExtension::Print);
+ } else if (name->Equals(context,
+ v8::String::NewFromUtf8(isolate, "quit",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked())
+ .FromJust()) {
+ return v8::FunctionTemplate::New(isolate, UtilsExtension::Quit);
+ } else if (name->Equals(context,
+ v8::String::NewFromUtf8(isolate, "setlocale",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked())
+ .FromJust()) {
+ return v8::FunctionTemplate::New(isolate, UtilsExtension::SetLocale);
+ }
+ return v8::Local<v8::FunctionTemplate>();
+ }
+
+ private:
+ static void Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ for (int i = 0; i < args.Length(); i++) {
+ v8::HandleScope handle_scope(args.GetIsolate());
+ if (i != 0) {
+ printf(" ");
+ }
+
+ // Explicitly catch potential exceptions in toString().
+ v8::TryCatch try_catch(args.GetIsolate());
+ v8::Local<v8::Value> arg = args[i];
+ v8::Local<v8::String> str_obj;
+
+ if (arg->IsSymbol()) {
+ arg = v8::Local<v8::Symbol>::Cast(arg)->Name();
+ }
+ if (!arg->ToString(args.GetIsolate()->GetCurrentContext())
+ .ToLocal(&str_obj)) {
+ try_catch.ReThrow();
+ return;
+ }
+
+ v8::String::Utf8Value str(str_obj);
+ int n =
+ static_cast<int>(fwrite(*str, sizeof(**str), str.length(), stdout));
+ if (n != str.length()) {
+ printf("Error in fwrite\n");
+ Quit(args);
+ }
+ }
+ printf("\n");
+ fflush(stdout);
+ }
+
+ static void Quit(const v8::FunctionCallbackInfo<v8::Value>& args) { Exit(); }
+
+ static void SetLocale(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 1 || !args[0]->IsString()) {
+ fprintf(stderr, "Internal error: setlocale get one string argument.");
+ Exit();
+ }
+ v8::String::Utf8Value str(args[0]);
+ setlocale(LC_NUMERIC, *str);
+ }
+};
+
+class SetTimeoutTask : public TaskRunner::Task {
+ public:
+ SetTimeoutTask(v8::Isolate* isolate, v8::Local<v8::Function> function)
+ : function_(isolate, function) {}
+ virtual ~SetTimeoutTask() {}
+
+ bool is_inspector_task() final { return false; }
+
+ void Run(v8::Isolate* isolate,
+ const v8::Global<v8::Context>& global_context) override {
+ v8::MicrotasksScope microtasks_scope(isolate,
+ v8::MicrotasksScope::kRunMicrotasks);
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context = global_context.Get(isolate);
+ v8::Context::Scope context_scope(context);
+
+ v8::Local<v8::Function> function = function_.Get(isolate);
+ v8::MaybeLocal<v8::Value> result;
+ v8_inspector::V8Inspector* inspector =
+ InspectorClientImpl::InspectorFromContext(context);
+ if (inspector) inspector->willExecuteScript(context, function->ScriptId());
+ result = function->Call(context, context->Global(), 0, nullptr);
+ if (inspector) inspector->didExecuteScript(context);
+ }
+
+ private:
+ v8::Global<v8::Function> function_;
+};
+
+class SetTimeoutExtension : public v8::Extension {
+ public:
+ SetTimeoutExtension()
+ : v8::Extension("v8_inspector/setTimeout",
+ "native function setTimeout();") {}
+
+ virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name) {
+ return v8::FunctionTemplate::New(isolate, SetTimeoutExtension::SetTimeout);
+ }
+
+ private:
+ static void SetTimeout(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 2 || !args[1]->IsNumber() ||
+ (!args[0]->IsFunction() && !args[0]->IsString()) ||
+ args[1].As<v8::Number>()->Value() != 0.0) {
+ fprintf(stderr,
+ "Internal error: only setTimeout(function, 0) is supported.");
+ Exit();
+ }
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ if (args[0]->IsFunction()) {
+ TaskRunner::FromContext(context)->Append(new SetTimeoutTask(
+ args.GetIsolate(), v8::Local<v8::Function>::Cast(args[0])));
+ } else {
+ v8::Local<v8::String> data = args[0].As<v8::String>();
+ std::unique_ptr<uint16_t[]> buffer(new uint16_t[data->Length()]);
+ data->Write(reinterpret_cast<uint16_t*>(buffer.get()), 0, data->Length());
+ v8_inspector::String16 source =
+ v8_inspector::String16(buffer.get(), data->Length());
+ TaskRunner::FromContext(context)->Append(new ExecuteStringTask(source));
+ }
+ }
+};
+
+v8_inspector::String16 ToString16(const v8_inspector::StringView& string) {
+ if (string.is8Bit())
+ return v8_inspector::String16(
+ reinterpret_cast<const char*>(string.characters8()), string.length());
+ return v8_inspector::String16(
+ reinterpret_cast<const uint16_t*>(string.characters16()),
+ string.length());
+}
+
+class FrontendChannelImpl : public InspectorClientImpl::FrontendChannel {
+ public:
+ explicit FrontendChannelImpl(TaskRunner* frontend_task_runner)
+ : frontend_task_runner_(frontend_task_runner) {}
+ virtual ~FrontendChannelImpl() {}
+
+ void SendMessageToFrontend(const v8_inspector::StringView& message) final {
+ v8_inspector::String16Builder script;
+ script.append("InspectorTest._dispatchMessage(");
+ script.append(ToString16(message));
+ script.append(")");
+ frontend_task_runner_->Append(new ExecuteStringTask(script.toString()));
+ }
+
+ private:
+ TaskRunner* frontend_task_runner_;
+};
+
+} // namespace
+
+int main(int argc, char* argv[]) {
+ v8::V8::InitializeICUDefaultLocation(argv[0]);
+ v8::Platform* platform = v8::platform::CreateDefaultPlatform();
+ v8::V8::InitializePlatform(platform);
+ v8::internal::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
+ v8::V8::InitializeExternalStartupData(argv[0]);
+ v8::V8::Initialize();
+
+ SetTimeoutExtension set_timeout_extension;
+ v8::RegisterExtension(&set_timeout_extension);
+ UtilsExtension utils_extension;
+ v8::RegisterExtension(&utils_extension);
+ SendMessageToBackendExtension send_message_to_backend_extension;
+ v8::RegisterExtension(&send_message_to_backend_extension);
+
+ v8::base::Semaphore ready_semaphore(0);
+
+ const char* backend_extensions[] = {"v8_inspector/setTimeout"};
+ v8::ExtensionConfiguration backend_configuration(
+ arraysize(backend_extensions), backend_extensions);
+ TaskRunner backend_runner(&backend_configuration, false, &ready_semaphore);
+ ready_semaphore.Wait();
+ SendMessageToBackendExtension::set_backend_task_runner(&backend_runner);
+
+ const char* frontend_extensions[] = {"v8_inspector/utils",
+ "v8_inspector/frontend"};
+ v8::ExtensionConfiguration frontend_configuration(
+ arraysize(frontend_extensions), frontend_extensions);
+ TaskRunner frontend_runner(&frontend_configuration, true, &ready_semaphore);
+ ready_semaphore.Wait();
+
+ FrontendChannelImpl frontend_channel(&frontend_runner);
+ InspectorClientImpl inspector_client(&backend_runner, &frontend_channel,
+ &ready_semaphore);
+ ready_semaphore.Wait();
+
+ for (int i = 1; i < argc; ++i) {
+ if (argv[i][0] == '-') break;
+
+ bool exists = false;
+ v8::internal::Vector<const char> chars =
+ v8::internal::ReadFile(argv[i], &exists, true);
+ if (!exists) {
+ fprintf(stderr, "Internal error: script file doesn't exists: %s\n",
+ argv[i]);
+ Exit();
+ }
+ v8_inspector::String16 source =
+ v8_inspector::String16::fromUTF8(chars.start(), chars.length());
+ frontend_runner.Append(new ExecuteStringTask(source));
+ }
+
+ frontend_runner.Join();
+ return 0;
+}
diff --git a/deps/v8/test/inspector/inspector.gyp b/deps/v8/test/inspector/inspector.gyp
new file mode 100644
index 0000000000..21a75f9eef
--- /dev/null
+++ b/deps/v8/test/inspector/inspector.gyp
@@ -0,0 +1,41 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'v8_code': 1,
+ 'inspector_protocol_sources': [
+ 'inspector-impl.cc',
+ 'inspector-impl.h',
+ 'inspector-test.cc',
+ 'task-runner.cc',
+ 'task-runner.h',
+ ],
+ },
+ 'includes': ['../../gypfiles/toolchain.gypi', '../../gypfiles/features.gypi'],
+ 'targets': [
+ {
+ 'target_name': 'inspector-test',
+ 'type': 'executable',
+ 'dependencies': [
+ '../../src/v8.gyp:v8_libplatform',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [
+ '<@(inspector_protocol_sources)',
+ ],
+ 'conditions': [
+ ['component=="shared_library"', {
+ # inspector-test can't be built against a shared library, so we
+ # need to depend on the underlying static target in that case.
+ 'dependencies': ['../../src/v8.gyp:v8_maybe_snapshot'],
+ }, {
+ 'dependencies': ['../../src/v8.gyp:v8'],
+ }],
+ ],
+ },
+ ],
+}
diff --git a/deps/v8/test/inspector/inspector.status b/deps/v8/test/inspector/inspector.status
new file mode 100644
index 0000000000..df922bbf4e
--- /dev/null
+++ b/deps/v8/test/inspector/inspector.status
@@ -0,0 +1,7 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[
+
+]
diff --git a/deps/v8/test/inspector/json-parse-expected.txt b/deps/v8/test/inspector/json-parse-expected.txt
new file mode 100644
index 0000000000..b11d6e2ee0
--- /dev/null
+++ b/deps/v8/test/inspector/json-parse-expected.txt
@@ -0,0 +1,9 @@
+{
+ id : 1
+ result : {
+ result : {
+ type : string
+ value : Привет мир
+ }
+ }
+}
diff --git a/deps/v8/test/inspector/json-parse.js b/deps/v8/test/inspector/json-parse.js
new file mode 100644
index 0000000000..2d88fea0f0
--- /dev/null
+++ b/deps/v8/test/inspector/json-parse.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const id = ++InspectorTest._requestId;
+var command = { "method": "Runtime.evaluate", "params": { expression: "\"!!!\"" }, "id": id };
+InspectorTest.sendRawCommand(id, JSON.stringify(command).replace("!!!", "\\u041F\\u0440\\u0438\\u0432\\u0435\\u0442 \\u043C\\u0438\\u0440"), step2);
+
+function step2(msg)
+{
+ msg.id = 1;
+ InspectorTest.logObject(msg);
+ InspectorTest.completeTest();
+}
diff --git a/deps/v8/test/inspector/protocol-test.js b/deps/v8/test/inspector/protocol-test.js
new file mode 100644
index 0000000000..7eb822ae2e
--- /dev/null
+++ b/deps/v8/test/inspector/protocol-test.js
@@ -0,0 +1,210 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+InspectorTest = {};
+InspectorTest._dispatchTable = new Map();
+InspectorTest._requestId = 0;
+InspectorTest._dumpInspectorProtocolMessages = false;
+InspectorTest._eventHandler = {};
+
+Protocol = new Proxy({}, {
+ get: function(target, agentName, receiver) {
+ return new Proxy({}, {
+ get: function(target, methodName, receiver) {
+ const eventPattern = /^on(ce)?([A-Z][A-Za-z0-9]+)/;
+ var match = eventPattern.exec(methodName);
+ if (!match) {
+ return (args) => InspectorTest._sendCommandPromise(`${agentName}.${methodName}`, args || {});
+ } else {
+ var eventName = match[2];
+ eventName = eventName.charAt(0).toLowerCase() + eventName.slice(1);
+ if (match[1])
+ return (args) => InspectorTest._waitForEventPromise(`${agentName}.${eventName}`, args || {});
+ else
+ return (listener) => { InspectorTest._eventHandler[`${agentName}.${eventName}`] = listener };
+ }
+ }
+ });
+ }
+});
+
+InspectorTest.log = print.bind(null);
+
+InspectorTest.logMessage = function(message)
+{
+ if (message.id)
+ message.id = "<messageId>";
+
+ const nonStableFields = new Set(["objectId", "scriptId", "exceptionId", "timestamp", "executionContextId", "callFrameId"]);
+ var objects = [ message ];
+ while (objects.length) {
+ var object = objects.shift();
+ for (var key in object) {
+ if (nonStableFields.has(key))
+ object[key] = `<${key}>`;
+ else if (typeof object[key] === "object")
+ objects.push(object[key]);
+ }
+ }
+
+ InspectorTest.logObject(message);
+ return message;
+}
+
+InspectorTest.logObject = function(object, title)
+{
+ var lines = [];
+
+ function dumpValue(value, prefix, prefixWithName)
+ {
+ if (typeof value === "object" && value !== null) {
+ if (value instanceof Array)
+ dumpItems(value, prefix, prefixWithName);
+ else
+ dumpProperties(value, prefix, prefixWithName);
+ } else {
+ lines.push(prefixWithName + String(value).replace(/\n/g, " "));
+ }
+ }
+
+ function dumpProperties(object, prefix, firstLinePrefix)
+ {
+ prefix = prefix || "";
+ firstLinePrefix = firstLinePrefix || prefix;
+ lines.push(firstLinePrefix + "{");
+
+ var propertyNames = Object.keys(object);
+ propertyNames.sort();
+ for (var i = 0; i < propertyNames.length; ++i) {
+ var name = propertyNames[i];
+ if (!object.hasOwnProperty(name))
+ continue;
+ var prefixWithName = " " + prefix + name + " : ";
+ dumpValue(object[name], " " + prefix, prefixWithName);
+ }
+ lines.push(prefix + "}");
+ }
+
+ function dumpItems(object, prefix, firstLinePrefix)
+ {
+ prefix = prefix || "";
+ firstLinePrefix = firstLinePrefix || prefix;
+ lines.push(firstLinePrefix + "[");
+ for (var i = 0; i < object.length; ++i)
+ dumpValue(object[i], " " + prefix, " " + prefix + "[" + i + "] : ");
+ lines.push(prefix + "]");
+ }
+
+ dumpValue(object, "", title);
+ InspectorTest.log(lines.join("\n"));
+}
+
+InspectorTest.completeTest = quit.bind(null);
+
+InspectorTest.completeTestAfterPendingTimeouts = function()
+{
+ Protocol.Runtime.evaluate({
+ expression: "new Promise(resolve => setTimeout(resolve, 0))",
+ awaitPromise: true }).then(InspectorTest.completeTest);
+}
+
+InspectorTest.addScript = function(string)
+{
+ return InspectorTest._sendCommandPromise("Runtime.evaluate", { "expression": string }).then(dumpErrorIfNeeded);
+
+ function dumpErrorIfNeeded(message)
+ {
+ if (message.error) {
+ InspectorTest.log("Error while executing '" + string + "': " + message.error.message);
+ InspectorTest.completeTest();
+ }
+ }
+};
+
+InspectorTest.startDumpingProtocolMessages = function()
+{
+ InspectorTest._dumpInspectorProtocolMessages = true;
+}
+
+InspectorTest.sendRawCommand = function(requestId, command, handler)
+{
+ if (InspectorTest._dumpInspectorProtocolMessages)
+ print("frontend: " + command);
+ InspectorTest._dispatchTable.set(requestId, handler);
+ sendMessageToBackend(command);
+}
+
+InspectorTest.checkExpectation = function(fail, name, messageObject)
+{
+ if (fail === !!messageObject.error) {
+ InspectorTest.log("PASS: " + name);
+ return true;
+ }
+
+ InspectorTest.log("FAIL: " + name + ": " + JSON.stringify(messageObject));
+ InspectorTest.completeTest();
+ return false;
+}
+InspectorTest.expectedSuccess = InspectorTest.checkExpectation.bind(null, false);
+InspectorTest.expectedError = InspectorTest.checkExpectation.bind(null, true);
+
+InspectorTest.runTestSuite = function(testSuite)
+{
+ function nextTest()
+ {
+ if (!testSuite.length) {
+ InspectorTest.completeTest();
+ return;
+ }
+ var fun = testSuite.shift();
+ InspectorTest.log("\nRunning test: " + fun.name);
+ fun(nextTest);
+ }
+ nextTest();
+}
+
+InspectorTest._sendCommandPromise = function(method, params)
+{
+ var requestId = ++InspectorTest._requestId;
+ var messageObject = { "id": requestId, "method": method, "params": params };
+ var fulfillCallback;
+ var promise = new Promise(fulfill => fulfillCallback = fulfill);
+ InspectorTest.sendRawCommand(requestId, JSON.stringify(messageObject), fulfillCallback);
+ return promise;
+}
+
+InspectorTest._waitForEventPromise = function(eventName)
+{
+ return new Promise(fulfill => InspectorTest._eventHandler[eventName] = fullfillAndClearListener.bind(null, fulfill));
+
+ function fullfillAndClearListener(fulfill, result)
+ {
+ delete InspectorTest._eventHandler[eventName];
+ fulfill(result);
+ }
+}
+
+InspectorTest._dispatchMessage = function(messageObject)
+{
+ if (InspectorTest._dumpInspectorProtocolMessages)
+ print("backend: " + JSON.stringify(messageObject));
+ try {
+ var messageId = messageObject["id"];
+ if (typeof messageId === "number") {
+ var handler = InspectorTest._dispatchTable.get(messageId);
+ if (handler) {
+ handler(messageObject);
+ InspectorTest._dispatchTable.delete(messageId);
+ }
+ } else {
+ var eventName = messageObject["method"];
+ var eventHandler = InspectorTest._eventHandler[eventName];
+ if (eventHandler)
+ eventHandler(messageObject);
+ }
+ } catch (e) {
+ InspectorTest.log("Exception when dispatching message: " + e + "\n" + e.stack + "\n message = " + JSON.stringify(messageObject, null, 2));
+ InspectorTest.completeTest();
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/await-promise-expected.txt b/deps/v8/test/inspector/runtime/await-promise-expected.txt
new file mode 100644
index 0000000000..e23ead86cd
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/await-promise-expected.txt
@@ -0,0 +1,119 @@
+Tests that Runtime.awaitPromise works.
+
+Running test: testResolvedPromise
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 239
+ type : number
+ value : 239
+ }
+ }
+}
+
+Running test: testRejectedPromise
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 0
+ exception : {
+ type : object
+ value : {
+ a : 1
+ }
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 0
+ stackTrace : {
+ callFrames : [
+ ]
+ }
+ text : Uncaught (in promise)
+ }
+ result : {
+ type : object
+ value : {
+ a : 1
+ }
+ }
+ }
+}
+
+Running test: testRejectedPromiseWithStack
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 0
+ exception : {
+ description : 239
+ type : number
+ value : 239
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 0
+ stackTrace : {
+ callFrames : [
+ ]
+ parent : {
+ callFrames : [
+ [0] : {
+ columnNumber : 4
+ functionName : rejectPromise
+ lineNumber : 17
+ scriptId : <scriptId>
+ url : test.js
+ }
+ [1] : {
+ columnNumber : 0
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ description : Promise.reject
+ }
+ }
+ text : Uncaught (in promise)
+ }
+ result : {
+ description : 239
+ type : number
+ value : 239
+ }
+ }
+}
+
+Running test: testPendingPromise
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 239
+ type : number
+ value : 239
+ }
+ }
+}
+
+Running test: testResolvedWithoutArgsPromise
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : undefined
+ }
+ }
+}
+
+Running test: testGarbageCollectedPromise
+{
+ error : {
+ code : -32000
+ message : Promise was collected
+ }
+ id : <messageId>
+}
diff --git a/deps/v8/test/inspector/runtime/await-promise.js b/deps/v8/test/inspector/runtime/await-promise.js
new file mode 100644
index 0000000000..dc0c132bab
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/await-promise.js
@@ -0,0 +1,116 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --expose_gc
+
+print("Tests that Runtime.awaitPromise works.");
+
+InspectorTest.addScript(
+`
+var resolveCallback;
+var rejectCallback;
+function createPromise()
+{
+ return new Promise((resolve, reject) => { resolveCallback = resolve; rejectCallback = reject });
+}
+
+function resolvePromise()
+{
+ resolveCallback(239);
+ resolveCallback = undefined;
+ rejectCallback = undefined;
+}
+
+function rejectPromise()
+{
+ rejectCallback(239);
+ resolveCallback = undefined;
+ rejectCallback = undefined;
+}
+
+//# sourceURL=test.js`);
+
+Protocol.Debugger.enable()
+ .then(() => Protocol.Debugger.setAsyncCallStackDepth({ maxDepth: 128 }))
+ .then(() => testSuite());
+
+function testSuite()
+{
+ InspectorTest.runTestSuite([
+ function testResolvedPromise(next)
+ {
+ Protocol.Runtime.evaluate({ expression: "Promise.resolve(239)"})
+ .then(result => Protocol.Runtime.awaitPromise({ promiseObjectId: result.result.result.objectId, returnByValue: false, generatePreview: true }))
+ .then(result => InspectorTest.logMessage(result))
+ .then(() => next());
+ },
+
+ function testRejectedPromise(next)
+ {
+ Protocol.Runtime.evaluate({ expression: "Promise.reject({ a : 1 })"})
+ .then(result => Protocol.Runtime.awaitPromise({ promiseObjectId: result.result.result.objectId, returnByValue: true, generatePreview: false }))
+ .then(result => InspectorTest.logMessage(result))
+ .then(() => next());
+ },
+
+ function testRejectedPromiseWithStack(next)
+ {
+ Protocol.Runtime.evaluate({ expression: "createPromise()"})
+ .then(result => scheduleRejectAndAwaitPromise(result))
+ .then(result => InspectorTest.logMessage(result))
+ .then(() => next());
+
+ function scheduleRejectAndAwaitPromise(result)
+ {
+ var promise = Protocol.Runtime.awaitPromise({ promiseObjectId: result.result.result.objectId });
+ Protocol.Runtime.evaluate({ expression: "rejectPromise()" });
+ return promise;
+ }
+ },
+
+ function testPendingPromise(next)
+ {
+ Protocol.Runtime.evaluate({ expression: "createPromise()"})
+ .then(result => scheduleFulfillAndAwaitPromise(result))
+ .then(result => InspectorTest.logMessage(result))
+ .then(() => next());
+
+ function scheduleFulfillAndAwaitPromise(result)
+ {
+ var promise = Protocol.Runtime.awaitPromise({ promiseObjectId: result.result.result.objectId });
+ Protocol.Runtime.evaluate({ expression: "resolvePromise()" });
+ return promise;
+ }
+ },
+
+ function testResolvedWithoutArgsPromise(next)
+ {
+ Protocol.Runtime.evaluate({ expression: "Promise.resolve()"})
+ .then(result => Protocol.Runtime.awaitPromise({ promiseObjectId: result.result.result.objectId, returnByValue: true, generatePreview: false }))
+ .then(result => InspectorTest.logMessage(result))
+ .then(() => next());
+ },
+
+ function testGarbageCollectedPromise(next)
+ {
+ Protocol.Runtime.evaluate({ expression: "new Promise(() => undefined)" })
+ .then(result => scheduleGCAndawaitPromise(result))
+ .then(result => InspectorTest.logMessage(result))
+ .then(() => next());
+
+ function scheduleGCAndawaitPromise(result)
+ {
+ var objectId = result.result.result.objectId;
+ var promise = Protocol.Runtime.awaitPromise({ promiseObjectId: objectId });
+ gcPromise(objectId);
+ return promise;
+ }
+
+ function gcPromise(objectId)
+ {
+ Protocol.Runtime.releaseObject({ objectId: objectId})
+ .then(() => Protocol.Runtime.evaluate({ expression: "gc()" }));
+ }
+ }
+ ]);
+}
diff --git a/deps/v8/test/inspector/runtime/call-function-on-async-expected.txt b/deps/v8/test/inspector/runtime/call-function-on-async-expected.txt
new file mode 100644
index 0000000000..2d558b85dd
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/call-function-on-async-expected.txt
@@ -0,0 +1,141 @@
+Tests that Runtime.callFunctionOn works with awaitPromise flag.
+
+Running test: testArguments
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : string
+ value : undefined|NaN|[object Object]|[object Object]
+ }
+ }
+}
+
+Running test: testSyntaxErrorInFunction
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 2
+ exception : {
+ className : SyntaxError
+ description : SyntaxError: Unexpected token }
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 1
+ scriptId : <scriptId>
+ text : Uncaught
+ }
+ result : {
+ className : SyntaxError
+ description : SyntaxError: Unexpected token }
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ }
+}
+
+Running test: testExceptionInFunctionExpression
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 15
+ exception : {
+ className : Error
+ description : Error at <anonymous>:1:22 at <anonymous>:1:36
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 0
+ scriptId : <scriptId>
+ text : Uncaught
+ }
+ result : {
+ className : Error
+ description : Error at <anonymous>:1:22 at <anonymous>:1:36
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ }
+}
+
+Running test: testFunctionReturnNotPromise
+{
+ code : -32000
+ message : Result of the function call is not a promise
+}
+
+Running test: testFunctionReturnResolvedPromiseReturnByValue
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : object
+ value : {
+ a : 3
+ }
+ }
+ }
+}
+
+Running test: testFunctionReturnResolvedPromiseWithPreview
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : a
+ type : number
+ value : 3
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+ }
+}
+
+Running test: testFunctionReturnRejectedPromise
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 0
+ exception : {
+ type : object
+ value : {
+ a : 3
+ }
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 0
+ stackTrace : {
+ callFrames : [
+ ]
+ }
+ text : Uncaught (in promise)
+ }
+ result : {
+ type : object
+ value : {
+ a : 3
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/deps/v8/test/inspector/runtime/call-function-on-async.js b/deps/v8/test/inspector/runtime/call-function-on-async.js
new file mode 100644
index 0000000000..4a72bbd40f
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/call-function-on-async.js
@@ -0,0 +1,129 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Tests that Runtime.callFunctionOn works with awaitPromise flag.");
+
+InspectorTest.runTestSuite([
+ function testArguments(next)
+ {
+ callFunctionOn(
+ "({a : 1})",
+ "function(arg1, arg2, arg3, arg4) { return \"\" + arg1 + \"|\" + arg2 + \"|\" + arg3 + \"|\" + arg4; }",
+ [ "undefined", "NaN", "({a:2})", "this"],
+ /* returnByValue */ true,
+ /* generatePreview */ false,
+ /* awaitPromise */ false)
+ .then((result) => InspectorTest.logMessage(result))
+ .then(() => next());
+ },
+
+ function testSyntaxErrorInFunction(next)
+ {
+ callFunctionOn(
+ "({a : 1})",
+ "\n }",
+ [],
+ /* returnByValue */ false,
+ /* generatePreview */ false,
+ /* awaitPromise */ true)
+ .then((result) => InspectorTest.logMessage(result))
+ .then(() => next());
+ },
+
+ function testExceptionInFunctionExpression(next)
+ {
+ callFunctionOn(
+ "({a : 1})",
+ "(function() { throw new Error() })()",
+ [],
+ /* returnByValue */ false,
+ /* generatePreview */ false,
+ /* awaitPromise */ true)
+ .then((result) => InspectorTest.logMessage(result))
+ .then(() => next());
+ },
+
+ function testFunctionReturnNotPromise(next)
+ {
+ callFunctionOn(
+ "({a : 1})",
+ "(function() { return 239; })",
+ [],
+ /* returnByValue */ false,
+ /* generatePreview */ false,
+ /* awaitPromise */ true)
+ .then((result) => InspectorTest.logMessage(result.error))
+ .then(() => next());
+ },
+
+ function testFunctionReturnResolvedPromiseReturnByValue(next)
+ {
+ callFunctionOn(
+ "({a : 1})",
+ "(function(arg) { return Promise.resolve({a : this.a + arg.a}); })",
+ [ "({a:2})" ],
+ /* returnByValue */ true,
+ /* generatePreview */ false,
+ /* awaitPromise */ true)
+ .then((result) => InspectorTest.logMessage(result))
+ .then(() => next());
+ },
+
+ function testFunctionReturnResolvedPromiseWithPreview(next)
+ {
+ callFunctionOn(
+ "({a : 1})",
+ "(function(arg) { return Promise.resolve({a : this.a + arg.a}); })",
+ [ "({a:2})" ],
+ /* returnByValue */ false,
+ /* generatePreview */ true,
+ /* awaitPromise */ true)
+ .then((result) => InspectorTest.logMessage(result))
+ .then(() => next());
+ },
+
+ function testFunctionReturnRejectedPromise(next)
+ {
+ callFunctionOn(
+ "({a : 1})",
+ "(function(arg) { return Promise.reject({a : this.a + arg.a}); })",
+ [ "({a:2})" ],
+ /* returnByValue */ true,
+ /* generatePreview */ false,
+ /* awaitPromise */ true)
+ .then((result) => InspectorTest.logMessage(result))
+ .then(() => next());
+ }
+]);
+
+function callFunctionOn(objectExpression, functionDeclaration, argumentExpressions, returnByValue, generatePreview, awaitPromise)
+{
+ var objectId;
+ var callArguments = [];
+ var promise = Protocol.Runtime.evaluate({ expression: objectExpression })
+ .then((result) => objectId = result.result.result.objectId)
+ for (let argumentExpression of argumentExpressions) {
+ promise = promise
+ .then(() => Protocol.Runtime.evaluate({ expression: argumentExpression }))
+ .then((result) => addArgument(result.result.result));
+ }
+ return promise.then(() => Protocol.Runtime.callFunctionOn({ objectId: objectId, functionDeclaration: functionDeclaration, arguments: callArguments, returnByValue: returnByValue, generatePreview: generatePreview, awaitPromise: awaitPromise }));
+
+ function addArgument(result)
+ {
+ if (result.objectId) {
+ callArguments.push({ objectId: result.objectId });
+ } else if (result.value) {
+ callArguments.push({ value: result.value })
+ } else if (result.unserializableValue) {
+ callArguments.push({ unserializableValue: result.unserializableValue });
+ } else if (result.type === "undefined") {
+ callArguments.push({});
+ } else {
+ InspectorTest.log("Unexpected argument object:");
+ InspectorTest.logMessage(result);
+ InspectorTest.completeTest();
+ }
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/clear-of-command-line-api-expected.txt b/deps/v8/test/inspector/runtime/clear-of-command-line-api-expected.txt
new file mode 100644
index 0000000000..142989b731
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/clear-of-command-line-api-expected.txt
@@ -0,0 +1,177 @@
+Tests that CommandLineAPI is presented only while evaluation.
+
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 15
+ type : number
+ value : 15
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 0
+ type : number
+ value : 0
+ }
+ }
+}
+setPropertyForMethod()
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 14
+ type : number
+ value : 14
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 0
+ type : number
+ value : 0
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 42
+ type : number
+ value : 42
+ }
+ }
+}
+defineValuePropertyForMethod()
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 14
+ type : number
+ value : 14
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 0
+ type : number
+ value : 0
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 42
+ type : number
+ value : 42
+ }
+ }
+}
+definePropertiesForMethod()
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 14
+ type : number
+ value : 14
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 0
+ type : number
+ value : 0
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 42
+ type : number
+ value : 42
+ }
+ }
+}
+defineAccessorPropertyForMethod()
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 14
+ type : number
+ value : 14
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 0
+ type : number
+ value : 0
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 42
+ type : number
+ value : 42
+ }
+ }
+}
+redefineGetOwnPropertyDescriptors()
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 14
+ type : number
+ value : 14
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 0
+ type : number
+ value : 0
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 42
+ type : number
+ value : 42
+ }
+ }
+} \ No newline at end of file
diff --git a/deps/v8/test/inspector/runtime/clear-of-command-line-api.js b/deps/v8/test/inspector/runtime/clear-of-command-line-api.js
new file mode 100644
index 0000000000..2af2f4917f
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/clear-of-command-line-api.js
@@ -0,0 +1,117 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Tests that CommandLineAPI is presented only while evaluation.");
+
+InspectorTest.addScript(
+`
+var methods = ["dir","dirxml","profile","profileEnd","clear","table","keys","values","debug","undebug","monitor","unmonitor","inspect","copy"];
+var window = this;
+function presentedAPIMethods()
+{
+ var methodCount = 0;
+ for (var method of methods) {
+ try {
+ if (eval("window." + method + "&&" + method + ".toString ? " + method + ".toString().indexOf(\\"[Command Line API]\\") !== -1 : false"))
+ ++methodCount;
+ } catch (e) {
+ }
+ }
+ methodCount += eval("\\"$_\\" in window ? $_ === 239 : false") ? 1 : 0;
+ return methodCount;
+}
+
+function setPropertyForMethod()
+{
+ window.dir = 42;
+}
+
+function defineValuePropertyForMethod()
+{
+ Object.defineProperty(window, "dir", { value: 42 });
+}
+
+function defineAccessorPropertyForMethod()
+{
+ Object.defineProperty(window, "dir", { set: function() {}, get: function(){ return 42 } });
+}
+
+function definePropertiesForMethod()
+{
+ Object.defineProperties(window, { "dir": { set: function() {}, get: function(){ return 42 } }});
+}
+
+var builtinGetOwnPropertyDescriptorOnObject;
+var builtinGetOwnPropertyDescriptorOnObjectPrototype;
+var builtinGetOwnPropertyDescriptorOnWindow;
+
+function redefineGetOwnPropertyDescriptors()
+{
+ builtinGetOwnPropertyDescriptorOnObject = Object.getOwnPropertyDescriptor;
+ Object.getOwnPropertyDescriptor = function() {}
+ builtinGetOwnPropertyDescriptorOnObjectPrototype = Object.prototype.getOwnPropertyDescriptor;
+ Object.prototype.getOwnPropertyDescriptor = function() {}
+ builtinGetOwnPropertyDescriptorOnWindow = window.getOwnPropertyDescriptor;
+ window.getOwnPropertyDescriptor = function() {}
+}
+
+function restoreGetOwnPropertyDescriptors()
+{
+ Object.getOwnPropertyDescriptor = builtinGetOwnPropertyDescriptorOnObject;
+ Object.prototype.getOwnPropertyDescriptor = builtinGetOwnPropertyDescriptorOnObjectPrototype;
+ window.getOwnPropertyDescriptor = builtinGetOwnPropertyDescriptorOnWindow;
+}`);
+
+runExpressionAndDumpPresentedMethods("")
+ .then(dumpLeftMethods)
+ .then(() => runExpressionAndDumpPresentedMethods("setPropertyForMethod()"))
+ .then(dumpLeftMethods)
+ .then(dumpDir)
+ .then(() => runExpressionAndDumpPresentedMethods("defineValuePropertyForMethod()"))
+ .then(dumpLeftMethods)
+ .then(dumpDir)
+ .then(() => runExpressionAndDumpPresentedMethods("definePropertiesForMethod()"))
+ .then(dumpLeftMethods)
+ .then(dumpDir)
+ .then(() => runExpressionAndDumpPresentedMethods("defineAccessorPropertyForMethod()"))
+ .then(dumpLeftMethods)
+ .then(dumpDir)
+ .then(() => runExpressionAndDumpPresentedMethods("redefineGetOwnPropertyDescriptors()"))
+ .then(dumpLeftMethods)
+ .then(dumpDir)
+ .then(() => evaluate("restoreGetOwnPropertyDescriptors()", false))
+ .then(InspectorTest.completeTest);
+
+function evaluate(expression, includeCommandLineAPI)
+{
+ return Protocol.Runtime.evaluate({ expression: expression, objectGroup: "console", includeCommandLineAPI: includeCommandLineAPI });
+}
+
+function setLastEvaluationResultTo239()
+{
+ return evaluate("239", false);
+}
+
+function runExpressionAndDumpPresentedMethods(expression)
+{
+ InspectorTest.log(expression);
+ return setLastEvaluationResultTo239()
+ .then(() => evaluate(expression + "; var a = presentedAPIMethods(); a", true))
+ .then((result) => InspectorTest.logMessage(result));
+}
+
+function dumpLeftMethods()
+{
+ // Should always be zero.
+ return setLastEvaluationResultTo239()
+ .then(() => evaluate("presentedAPIMethods()", false))
+ .then((result) => InspectorTest.logMessage(result));
+}
+
+function dumpDir()
+{
+ // Should always be presented.
+ return evaluate("dir", false)
+ .then((result) => InspectorTest.logMessage(result));
+}
diff --git a/deps/v8/test/inspector/runtime/compile-script-expected.txt b/deps/v8/test/inspector/runtime/compile-script-expected.txt
new file mode 100644
index 0000000000..3d6d580487
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/compile-script-expected.txt
@@ -0,0 +1,66 @@
+Compiling script: foo1.js
+ persist: false
+compilation result:
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 2
+ exception : {
+ className : SyntaxError
+ description : SyntaxError: Unexpected end of input
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 1
+ scriptId : <scriptId>
+ text : Uncaught
+ }
+ }
+}
+-----
+Compiling script: foo2.js
+ persist: true
+Debugger.scriptParsed: foo2.js
+compilation result:
+{
+ id : <messageId>
+ result : {
+ scriptId : <scriptId>
+ }
+}
+-----
+Compiling script: foo3.js
+ persist: false
+compilation result:
+{
+ id : <messageId>
+ result : {
+ }
+}
+-----
+Compiling script: foo4.js
+ persist: false
+compilation result:
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 13
+ exception : {
+ className : SyntaxError
+ description : SyntaxError: Unexpected identifier
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 0
+ scriptId : <scriptId>
+ text : Uncaught
+ }
+ }
+}
+----- \ No newline at end of file
diff --git a/deps/v8/test/inspector/runtime/compile-script.js b/deps/v8/test/inspector/runtime/compile-script.js
new file mode 100644
index 0000000000..4f1c6468e1
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/compile-script.js
@@ -0,0 +1,50 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var executionContextId;
+
+Protocol.Debugger.enable().then(onDebuggerEnabled);
+
+function onDebuggerEnabled()
+{
+ Protocol.Runtime.enable();
+ Protocol.Debugger.onScriptParsed(onScriptParsed);
+ Protocol.Runtime.onExecutionContextCreated(onExecutionContextCreated);
+}
+
+function onScriptParsed(messageObject)
+{
+ if (!messageObject.params.url)
+ return;
+ InspectorTest.log("Debugger.scriptParsed: " + messageObject.params.url);
+}
+
+function onExecutionContextCreated(messageObject)
+{
+ executionContextId = messageObject.params.context.id;
+ testCompileScript("\n (", false, "foo1.js")
+ .then(() => testCompileScript("239", true, "foo2.js"))
+ .then(() => testCompileScript("239", false, "foo3.js"))
+ .then(() => testCompileScript("testfunction f()\n{\n return 0;\n}\n", false, "foo4.js"))
+ .then(() => InspectorTest.completeTest());
+}
+
+function testCompileScript(expression, persistScript, sourceURL)
+{
+ InspectorTest.log("Compiling script: " + sourceURL);
+ InspectorTest.log(" persist: " + persistScript);
+ return Protocol.Runtime.compileScript({
+ expression: expression,
+ sourceURL: sourceURL,
+ persistScript: persistScript,
+ executionContextId: executionContextId
+ }).then(onCompiled);
+
+ function onCompiled(messageObject)
+ {
+ InspectorTest.log("compilation result: ");
+ InspectorTest.logMessage(messageObject);
+ InspectorTest.log("-----");
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/console-api-repeated-in-console-expected.txt b/deps/v8/test/inspector/runtime/console-api-repeated-in-console-expected.txt
new file mode 100644
index 0000000000..04d2d90265
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/console-api-repeated-in-console-expected.txt
@@ -0,0 +1,6 @@
+Check that console.log is reported through Console domain as well.
+api call: 42
+api call: abc
+console message: 42
+console message: abc
+
diff --git a/deps/v8/test/inspector/runtime/console-api-repeated-in-console.js b/deps/v8/test/inspector/runtime/console-api-repeated-in-console.js
new file mode 100644
index 0000000000..ec4b34d8ad
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/console-api-repeated-in-console.js
@@ -0,0 +1,37 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Check that console.log is reported through Console domain as well.");
+
+var expectedMessages = 4;
+var messages = [];
+
+Protocol.Runtime.onConsoleAPICalled(consoleAPICalled);
+Protocol.Console.onMessageAdded(messageAdded);
+Protocol.Runtime.enable();
+Protocol.Console.enable();
+Protocol.Runtime.evaluate({ "expression": "console.log(42)" });
+Protocol.Runtime.evaluate({ "expression": "console.error('abc')" });
+
+function consoleAPICalled(result)
+{
+ messages.push("api call: " + result.params.args[0].value);
+ if (!(--expectedMessages))
+ done();
+}
+
+function messageAdded(result)
+{
+ messages.push("console message: " + result.params.message.text);
+ if (!(--expectedMessages))
+ done();
+}
+
+function done()
+{
+ messages.sort();
+ for (var message of messages)
+ InspectorTest.log(message);
+ InspectorTest.completeTest();
+}
diff --git a/deps/v8/test/inspector/runtime/console-deprecated-methods-expected.txt b/deps/v8/test/inspector/runtime/console-deprecated-methods-expected.txt
new file mode 100644
index 0000000000..1b8e4aa2ce
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/console-deprecated-methods-expected.txt
@@ -0,0 +1,5 @@
+Tests checks that deprecation messages for console.
+'console.timeline' is deprecated. Please use 'console.time' instead.
+'console.timelineEnd' is deprecated. Please use 'console.timeEnd' instead.
+'console.markTimeline' is deprecated. Please use 'console.timeStamp' instead.
+
diff --git a/deps/v8/test/inspector/runtime/console-deprecated-methods.js b/deps/v8/test/inspector/runtime/console-deprecated-methods.js
new file mode 100644
index 0000000000..2705cb083f
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/console-deprecated-methods.js
@@ -0,0 +1,28 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Tests checks that deprecation messages for console.")
+
+Protocol.Runtime.onConsoleAPICalled(messageAdded);
+Protocol.Runtime.enable();
+
+var deprecatedMethods = [
+ "console.timeline(\"42\")",
+ "console.timeline(\"42\")",
+ "console.timeline(\"42\")", // three calls should produce one warning message
+ "console.timelineEnd(\"42\")",
+ "console.markTimeline(\"42\")",
+];
+Protocol.Runtime.evaluate({ expression: deprecatedMethods.join(";") });
+
+var messagesLeft = 3;
+function messageAdded(data)
+{
+ var text = data.params.args[0].value;
+ if (text.indexOf("deprecated") === -1)
+ return;
+ InspectorTest.log(text);
+ if (!--messagesLeft)
+ InspectorTest.completeTest();
+}
diff --git a/deps/v8/test/inspector/runtime/console-line-and-column-expected.txt b/deps/v8/test/inspector/runtime/console-line-and-column-expected.txt
new file mode 100644
index 0000000000..4eab60af0d
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/console-line-and-column-expected.txt
@@ -0,0 +1,52 @@
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ description : 239
+ type : number
+ value : 239
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 8
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : log
+ }
+}
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ description : 239
+ type : number
+ value : 239
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 2
+ functionName :
+ lineNumber : 1
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : log
+ }
+} \ No newline at end of file
diff --git a/deps/v8/test/inspector/runtime/console-line-and-column.js b/deps/v8/test/inspector/runtime/console-line-and-column.js
new file mode 100644
index 0000000000..fe5c24f27c
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/console-line-and-column.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Protocol.Runtime.enable();
+
+addConsoleMessagePromise("console.log(239)")
+ .then(message => InspectorTest.logMessage(message))
+ .then(() => addConsoleMessagePromise("var l = console.log;\n l(239)"))
+ .then(message => InspectorTest.logMessage(message))
+ .then(() => InspectorTest.completeTest());
+
+function addConsoleMessagePromise(expression)
+{
+ var wait = Protocol.Runtime.onceConsoleAPICalled();
+ Protocol.Runtime.evaluate({ expression: expression });
+ return wait;
+}
diff --git a/deps/v8/test/inspector/runtime/console-log-doesnt-run-microtasks-expected.txt b/deps/v8/test/inspector/runtime/console-log-doesnt-run-microtasks-expected.txt
new file mode 100644
index 0000000000..5a234ec78c
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/console-log-doesnt-run-microtasks-expected.txt
@@ -0,0 +1,21 @@
+Check that console.log doesn't run microtasks.
+{
+ description : 42
+ type : number
+ value : 42
+}
+{
+ description : 43
+ type : number
+ value : 43
+}
+{
+ description : 239
+ type : number
+ value : 239
+}
+{
+ type : string
+ value : finished
+}
+
diff --git a/deps/v8/test/inspector/runtime/console-log-doesnt-run-microtasks.js b/deps/v8/test/inspector/runtime/console-log-doesnt-run-microtasks.js
new file mode 100644
index 0000000000..b7a87391e0
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/console-log-doesnt-run-microtasks.js
@@ -0,0 +1,26 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Check that console.log doesn't run microtasks.");
+
+InspectorTest.addScript(
+`
+function testFunction()
+{
+ Promise.resolve().then(function(){ console.log(239); });
+ console.log(42);
+ console.log(43);
+}`);
+
+Protocol.Runtime.enable();
+Protocol.Runtime.onConsoleAPICalled(messageAdded);
+Protocol.Runtime.evaluate({ "expression": "testFunction()" });
+Protocol.Runtime.evaluate({ "expression": "setTimeout(() => console.log(\"finished\"), 0)" });
+
+function messageAdded(result)
+{
+ InspectorTest.logObject(result.params.args[0]);
+ if (result.params.args[0].value === "finished")
+ InspectorTest.completeTest();
+}
diff --git a/deps/v8/test/inspector/runtime/console-timestamp-expected.txt b/deps/v8/test/inspector/runtime/console-timestamp-expected.txt
new file mode 100644
index 0000000000..5e4d7b5ada
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/console-timestamp-expected.txt
@@ -0,0 +1,9 @@
+Message has timestamp: true
+Message timestamp doesn't differ too much from current time (one minute interval): true
+Message 1 has non-decreasing timestamp: true
+Message has timestamp: true
+Message timestamp doesn't differ too much from current time (one minute interval): true
+Message 2 has non-decreasing timestamp: true
+Message has timestamp: true
+Message timestamp doesn't differ too much from current time (one minute interval): true
+
diff --git a/deps/v8/test/inspector/runtime/console-timestamp.js b/deps/v8/test/inspector/runtime/console-timestamp.js
new file mode 100644
index 0000000000..0dceaed23f
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/console-timestamp.js
@@ -0,0 +1,23 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var messages = [];
+
+function messageAdded(data)
+{
+ var payload = data.params;
+ if (messages.length > 0)
+ InspectorTest.log("Message " + messages.length + " has non-decreasing timestamp: " + (payload.timestamp >= messages[messages.length - 1].timestamp));
+
+ messages.push(payload);
+ InspectorTest.log("Message has timestamp: " + !!payload.timestamp);
+
+ InspectorTest.log("Message timestamp doesn't differ too much from current time (one minute interval): " + (Math.abs(new Date().getTime() - payload.timestamp) < 60000));
+ if (messages.length === 3)
+ InspectorTest.completeTest();
+}
+
+Protocol.Runtime.onConsoleAPICalled(messageAdded);
+Protocol.Runtime.enable();
+Protocol.Runtime.evaluate({ expression: "console.log('testUnique'); for (var i = 0; i < 2; ++i) console.log('testDouble');" });
diff --git a/deps/v8/test/inspector/runtime/evaluate-async-expected.txt b/deps/v8/test/inspector/runtime/evaluate-async-expected.txt
new file mode 100644
index 0000000000..c03dd7a409
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/evaluate-async-expected.txt
@@ -0,0 +1,95 @@
+Tests that Runtime.evaluate works with awaitPromise flag.
+
+Running test: testResolvedPromise
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 239
+ type : number
+ value : 239
+ }
+ }
+}
+
+Running test: testRejectedPromise
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 0
+ exception : {
+ description : 239
+ type : number
+ value : 239
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 0
+ stackTrace : {
+ callFrames : [
+ ]
+ }
+ text : Uncaught (in promise)
+ }
+ result : {
+ description : 239
+ type : number
+ value : 239
+ }
+ }
+}
+
+Running test: testPrimitiveValueInsteadOfPromise
+{
+ error : {
+ code : -32000
+ message : Result of the evaluation is not a promise
+ }
+ id : <messageId>
+}
+
+Running test: testObjectInsteadOfPromise
+{
+ error : {
+ code : -32000
+ message : Result of the evaluation is not a promise
+ }
+ id : <messageId>
+}
+
+Running test: testPendingPromise
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : object
+ value : {
+ a : 239
+ }
+ }
+ }
+}
+
+Running test: testExceptionInEvaluate
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 0
+ exception : {
+ description : 239
+ type : number
+ value : 239
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 0
+ scriptId : <scriptId>
+ text : Uncaught
+ }
+ result : {
+ description : 239
+ type : number
+ value : 239
+ }
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/evaluate-async-with-wrap-error-expected.txt b/deps/v8/test/inspector/runtime/evaluate-async-with-wrap-error-expected.txt
new file mode 100644
index 0000000000..743acdbc08
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/evaluate-async-with-wrap-error-expected.txt
@@ -0,0 +1,8 @@
+Test that Runtime.evaluate correctly process errors during wrapping async result.
+{
+ error : {
+ code : -32000
+ message : Object couldn't be returned by value
+ }
+ id : <messageId>
+}
diff --git a/deps/v8/test/inspector/runtime/evaluate-async-with-wrap-error.js b/deps/v8/test/inspector/runtime/evaluate-async-with-wrap-error.js
new file mode 100644
index 0000000000..e5da89ecfc
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/evaluate-async-with-wrap-error.js
@@ -0,0 +1,15 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Test that Runtime.evaluate correctly process errors during wrapping \
+async result.");
+
+var evaluateArguments = {
+ expression: "Promise.resolve(Symbol(123))",
+ returnByValue: true,
+ awaitPromise: true
+};
+Protocol.Runtime.evaluate(evaluateArguments)
+ .then(message => InspectorTest.logMessage(message))
+ .then(() => InspectorTest.completeTest());
diff --git a/deps/v8/test/inspector/runtime/evaluate-async.js b/deps/v8/test/inspector/runtime/evaluate-async.js
new file mode 100644
index 0000000000..ed4b6e30e2
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/evaluate-async.js
@@ -0,0 +1,58 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Tests that Runtime.evaluate works with awaitPromise flag.");
+
+InspectorTest.addScript(`
+function createPromiseAndScheduleResolve()
+{
+ var resolveCallback;
+ var promise = new Promise((resolve) => resolveCallback = resolve);
+ setTimeout(resolveCallback.bind(null, { a : 239 }), 0);
+ return promise;
+}`);
+
+InspectorTest.runTestSuite([
+ function testResolvedPromise(next)
+ {
+ Protocol.Runtime.evaluate({ expression: "Promise.resolve(239)", awaitPromise: true, generatePreview: true })
+ .then(result => InspectorTest.logMessage(result))
+ .then(() => next());
+ },
+
+ function testRejectedPromise(next)
+ {
+ Protocol.Runtime.evaluate({ expression: "Promise.reject(239)", awaitPromise: true })
+ .then(result => InspectorTest.logMessage(result))
+ .then(() => next());
+ },
+
+ function testPrimitiveValueInsteadOfPromise(next)
+ {
+ Protocol.Runtime.evaluate({ expression: "true", awaitPromise: true })
+ .then(result => InspectorTest.logMessage(result))
+ .then(() => next());
+ },
+
+ function testObjectInsteadOfPromise(next)
+ {
+ Protocol.Runtime.evaluate({ expression: "({})", awaitPromise: true })
+ .then(result => InspectorTest.logMessage(result))
+ .then(() => next());
+ },
+
+ function testPendingPromise(next)
+ {
+ Protocol.Runtime.evaluate({ expression: "createPromiseAndScheduleResolve()", awaitPromise: true, returnByValue: true })
+ .then(result => InspectorTest.logMessage(result))
+ .then(() => next());
+ },
+
+ function testExceptionInEvaluate(next)
+ {
+ Protocol.Runtime.evaluate({ expression: "throw 239", awaitPromise: true })
+ .then(result => InspectorTest.logMessage(result))
+ .then(() => next());
+ }
+]);
diff --git a/deps/v8/test/inspector/runtime/evaluate-with-context-id-equal-zero-expected.txt b/deps/v8/test/inspector/runtime/evaluate-with-context-id-equal-zero-expected.txt
new file mode 100644
index 0000000000..9521a06c06
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/evaluate-with-context-id-equal-zero-expected.txt
@@ -0,0 +1,9 @@
+Tests that DevTools doesn't crash on Runtime.evaluate with contextId equals 0.
+{
+ error : {
+ code : -32000
+ message : Cannot find context with specified id
+ }
+ id : <messageId>
+}
+
diff --git a/deps/v8/test/inspector/runtime/evaluate-with-context-id-equal-zero.js b/deps/v8/test/inspector/runtime/evaluate-with-context-id-equal-zero.js
new file mode 100644
index 0000000000..d37a00ce37
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/evaluate-with-context-id-equal-zero.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Tests that DevTools doesn't crash on Runtime.evaluate with contextId equals 0.");
+
+Protocol.Runtime.evaluate({ "contextId": 0, "expression": "" })
+ .then(message => InspectorTest.logMessage(message))
+ .then(() => InspectorTest.completeTest());
diff --git a/deps/v8/test/inspector/runtime/exception-thrown-expected.txt b/deps/v8/test/inspector/runtime/exception-thrown-expected.txt
new file mode 100644
index 0000000000..228c348298
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/exception-thrown-expected.txt
@@ -0,0 +1,117 @@
+Check that exceptionThrown is supported by test runner.
+{
+ method : Runtime.exceptionThrown
+ params : {
+ exceptionDetails : {
+ columnNumber : 2
+ exception : {
+ className : Error
+ description : Error at setTimeout (<anonymous>:2:9)
+ objectId : <objectId>
+ preview : {
+ description : Error at setTimeout (<anonymous>:2:9)
+ overflow : false
+ properties : [
+ [0] : {
+ name : stack
+ type : string
+ value : Error at setTimeout (<anonymous>:2:9)
+ }
+ ]
+ subtype : error
+ type : object
+ }
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ executionContextId : <executionContextId>
+ lineNumber : 1
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 8
+ functionName : setTimeout
+ lineNumber : 1
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ text : Uncaught Error
+ }
+ timestamp : <timestamp>
+ }
+}
+{
+ method : Runtime.exceptionThrown
+ params : {
+ exceptionDetails : {
+ columnNumber : 1
+ exception : {
+ className : SyntaxError
+ description : SyntaxError: Unexpected token }
+ objectId : <objectId>
+ preview : {
+ description : SyntaxError: Unexpected token }
+ overflow : false
+ properties : [
+ [0] : {
+ name : stack
+ type : string
+ value : SyntaxError: Unexpected token }
+ }
+ [1] : {
+ name : message
+ type : string
+ value : Unexpected token }
+ }
+ ]
+ subtype : error
+ type : object
+ }
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ executionContextId : <executionContextId>
+ lineNumber : 0
+ scriptId : <scriptId>
+ stackTrace : {
+ callFrames : [
+ ]
+ }
+ text : Uncaught SyntaxError: Unexpected token }
+ }
+ timestamp : <timestamp>
+ }
+}
+{
+ method : Runtime.exceptionThrown
+ params : {
+ exceptionDetails : {
+ columnNumber : 2
+ exception : {
+ description : 239
+ type : number
+ value : 239
+ }
+ exceptionId : <exceptionId>
+ executionContextId : <executionContextId>
+ lineNumber : 1
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 2
+ functionName : setTimeout
+ lineNumber : 1
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ text : Uncaught 239
+ }
+ timestamp : <timestamp>
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/exception-thrown.js b/deps/v8/test/inspector/runtime/exception-thrown.js
new file mode 100644
index 0000000000..76752f9d3b
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/exception-thrown.js
@@ -0,0 +1,12 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Check that exceptionThrown is supported by test runner.")
+
+Protocol.Runtime.enable();
+Protocol.Runtime.onExceptionThrown(message => InspectorTest.logMessage(message));
+Protocol.Runtime.evaluate({ expression: "setTimeout(() => { \n throw new Error() }, 0)" });
+Protocol.Runtime.evaluate({ expression: "setTimeout(\" }\", 0)" });
+Protocol.Runtime.evaluate({ expression: "setTimeout(() => { \n throw 239; }, 0)" });
+InspectorTest.completeTestAfterPendingTimeouts();
diff --git a/deps/v8/test/inspector/runtime/get-properties-expected.txt b/deps/v8/test/inspector/runtime/get-properties-expected.txt
new file mode 100644
index 0000000000..bb74386de8
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/get-properties-expected.txt
@@ -0,0 +1,39 @@
+Properties of Object(5)
+ __proto__ own object undefined
+ foo own string cat
+Internal properties
+ [[PrimitiveValue]] number 5
+Properties of Not own properties
+ __defineGetter__ inherited function undefined
+ __defineSetter__ inherited function undefined
+ __lookupGetter__ inherited function undefined
+ __lookupSetter__ inherited function undefined
+ __proto__ inherited no value, getter, setter
+ a own number 2
+ b own no value, getter, setter
+ c inherited number 4
+ constructor inherited function undefined
+ d inherited no value, getter
+ hasOwnProperty inherited function undefined
+ isPrototypeOf inherited function undefined
+ propertyIsEnumerable inherited function undefined
+ toLocaleString inherited function undefined
+ toString inherited function undefined
+ valueOf inherited function undefined
+Properties of Accessor only properties
+ b own no value, getter, setter
+ d own no value, setter
+Properties of array
+ 0 own string red
+ 1 own string green
+ 2 own string blue
+ __proto__ own object undefined
+ length own number 3
+Properties of Bound function
+ __proto__ own function undefined
+ length own number 0
+ name own string bound Number
+Internal properties
+ [[BoundArgs]] object undefined
+ [[BoundThis]] object undefined
+ [[TargetFunction]] function undefined
diff --git a/deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt b/deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt
new file mode 100644
index 0000000000..b36c811771
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt
@@ -0,0 +1,11 @@
+Check that while Runtime.getProperties call on proxy object no user defined trap will be executed.
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 0
+ type : number
+ value : 0
+ }
+ }
+} \ No newline at end of file
diff --git a/deps/v8/test/inspector/runtime/get-properties-on-proxy.js b/deps/v8/test/inspector/runtime/get-properties-on-proxy.js
new file mode 100644
index 0000000000..40e2a96107
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/get-properties-on-proxy.js
@@ -0,0 +1,101 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Check that while Runtime.getProperties call on proxy object no user defined trap will be executed.");
+
+InspectorTest.addScript(`
+var self = this;
+function testFunction()
+{
+ self.counter = 0;
+ var handler = {
+ get: function(target, name){
+ self.counter++;
+ return Reflect.get.apply(this, arguments);
+ },
+ set: function(target, name){
+ self.counter++;
+ return Reflect.set.apply(this, arguments);
+ },
+ getPrototypeOf: function(target) {
+ self.counter++;
+ return Reflect.getPrototypeOf.apply(this, arguments);
+ },
+ setPrototypeOf: function(target) {
+ self.counter++;
+ return Reflect.setPrototypeOf.apply(this, arguments);
+ },
+ isExtensible: function(target) {
+ self.counter++;
+ return Reflect.isExtensible.apply(this, arguments);
+ },
+ isExtensible: function(target) {
+ self.counter++;
+ return Reflect.isExtensible.apply(this, arguments);
+ },
+ isExtensible: function(target) {
+ self.counter++;
+ return Reflect.isExtensible.apply(this, arguments);
+ },
+ preventExtensions: function() {
+ self.counter++;
+ return Reflect.preventExtensions.apply(this, arguments);
+ },
+ getOwnPropertyDescriptor: function() {
+ self.counter++;
+ return Reflect.getOwnPropertyDescriptor.apply(this, arguments);
+ },
+ defineProperty: function() {
+ self.counter++;
+ return Reflect.defineProperty.apply(this, arguments);
+ },
+ has: function() {
+ self.counter++;
+ return Reflect.has.apply(this, arguments);
+ },
+ get: function() {
+ self.counter++;
+ return Reflect.get.apply(this, arguments);
+ },
+ set: function() {
+ self.counter++;
+ return Reflect.set.apply(this, arguments);
+ },
+ deleteProperty: function() {
+ self.counter++;
+ return Reflect.deleteProperty.apply(this, arguments);
+ },
+ ownKeys: function() {
+ self.counter++;
+ return Reflect.ownKeys.apply(this, arguments);
+ },
+ apply: function() {
+ self.counter++;
+ return Reflect.apply.apply(this, arguments);
+ },
+ construct: function() {
+ self.counter++;
+ return Reflect.construct.apply(this, arguments);
+ }
+ };
+ return new Proxy({ a : 1}, handler);
+}`);
+
+Protocol.Runtime.evaluate({ expression: "testFunction()"}).then(requestProperties);
+
+function requestProperties(result)
+{
+ Protocol.Runtime.getProperties({ objectId: result.result.objectId, generatePreview: true }).then(checkCounter);
+}
+
+function checkCounter(result)
+{
+ Protocol.Runtime.evaluate({ expression: "self.counter" }).then(dumpCounter);
+}
+
+function dumpCounter(result)
+{
+ InspectorTest.logMessage(result);
+ InspectorTest.completeTest();
+}
diff --git a/deps/v8/test/inspector/runtime/get-properties-preview-expected.txt b/deps/v8/test/inspector/runtime/get-properties-preview-expected.txt
new file mode 100644
index 0000000000..fd1f31a4c2
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/get-properties-preview-expected.txt
@@ -0,0 +1,32 @@
+p1 : Object
+p2 : Object
+p1 : {
+ "type": "object",
+ "description": "Object",
+ "overflow": false,
+ "properties": [
+ {
+ "name": "a",
+ "type": "number",
+ "value": "1"
+ }
+ ]
+}
+p2 : {
+ "type": "object",
+ "description": "Object",
+ "overflow": false,
+ "properties": [
+ {
+ "name": "b",
+ "type": "string",
+ "value": "foo"
+ },
+ {
+ "name": "bb",
+ "type": "string",
+ "value": "bar"
+ }
+ ]
+}
+
diff --git a/deps/v8/test/inspector/runtime/get-properties-preview.js b/deps/v8/test/inspector/runtime/get-properties-preview.js
new file mode 100644
index 0000000000..7cc81bc486
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/get-properties-preview.js
@@ -0,0 +1,25 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Protocol.Runtime.evaluate({ "expression": "({p1: {a:1}, p2: {b:'foo', bb:'bar'}})" }).then(callbackEvaluate);
+
+function callbackEvaluate(result)
+{
+ Protocol.Runtime.getProperties({ "objectId": result.result.result.objectId, "ownProperties": true }).then(callbackGetProperties.bind(null, false));
+ Protocol.Runtime.getProperties({ "objectId": result.result.result.objectId, "ownProperties": true, "generatePreview": true }).then(callbackGetProperties.bind(null, true));
+}
+
+function callbackGetProperties(completeTest, result)
+{
+ for (var property of result.result.result) {
+ if (!property.value || property.name === "__proto__")
+ continue;
+ if (property.value.preview)
+ InspectorTest.log(property.name + " : " + JSON.stringify(property.value.preview, null, 4));
+ else
+ InspectorTest.log(property.name + " : " + property.value.description);
+ }
+ if (completeTest)
+ InspectorTest.completeTest();
+}
diff --git a/deps/v8/test/inspector/runtime/get-properties.js b/deps/v8/test/inspector/runtime/get-properties.js
new file mode 100644
index 0000000000..579e5422d9
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/get-properties.js
@@ -0,0 +1,221 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A general-purpose engine for sending a sequence of protocol commands.
+// The clients provide requests and response handlers, while the engine catches
+// errors and makes sure that once there's nothing to do completeTest() is called.
+// @param step is an object with command, params and callback fields
+function runRequestSeries(step)
+{
+ processStep(step);
+
+ function processStep(s)
+ {
+ try {
+ processStepOrFail(s);
+ } catch (e) {
+ InspectorTest.log(e.stack);
+ InspectorTest.completeTest();
+ }
+ }
+
+ function processStepOrFail(s)
+ {
+ if (!s) {
+ InspectorTest.completeTest();
+ return;
+ }
+ if (!s.command) {
+ // A simple loopback step.
+ var next = s.callback();
+ processStep(next);
+ return;
+ }
+
+ var innerCallback = function(response)
+ {
+ if ("error" in response) {
+ InspectorTest.log(response.error.message);
+ InspectorTest.completeTest();
+ return;
+ }
+ var next;
+ try {
+ next = s.callback(response.result);
+ } catch (e) {
+ InspectorTest.log(e.stack);
+ InspectorTest.completeTest();
+ return;
+ }
+ processStep(next);
+ }
+ var command = s.command.split(".");
+ Protocol[command[0]][command[1]](s.params).then(innerCallback);
+ }
+}
+
+var firstStep = { callback: callbackStart5 };
+
+runRequestSeries(firstStep);
+
+// 'Object5' section -- check properties of '5' wrapped as object (has an internal property).
+
+function callbackStart5()
+{
+ // Create an wrapper object with additional property.
+ var expression = "(function(){var r = Object(5); r.foo = 'cat';return r;})()";
+
+ return { command: "Runtime.evaluate", params: {expression: expression}, callback: callbackEval5 };
+}
+function callbackEval5(result)
+{
+ var id = result.result.objectId;
+ if (id === undefined)
+ throw new Error("objectId is expected");
+ return {
+ command: "Runtime.getProperties", params: {objectId: id, ownProperties: true}, callback: callbackProperties5
+ };
+}
+function callbackProperties5(result)
+{
+ logGetPropertiesResult("Object(5)", result);
+ return { callback: callbackStartNotOwn };
+}
+
+
+// 'Not own' section -- check all properties of the object, including ones from it prototype chain.
+
+function callbackStartNotOwn()
+{
+ // Create an wrapper object with additional property.
+ var expression = "({ a: 2, set b(_) {}, get b() {return 5;}, __proto__: { a: 3, c: 4, get d() {return 6;} }})";
+
+ return { command: "Runtime.evaluate", params: {expression: expression}, callback: callbackEvalNotOwn };
+}
+function callbackEvalNotOwn(result)
+{
+ var id = result.result.objectId;
+ if (id === undefined)
+ throw new Error("objectId is expected");
+ return {
+ command: "Runtime.getProperties", params: {objectId: id, ownProperties: false}, callback: callbackPropertiesNotOwn
+ };
+}
+function callbackPropertiesNotOwn(result)
+{
+ logGetPropertiesResult("Not own properties", result);
+ return { callback: callbackStartAccessorsOnly };
+}
+
+
+// 'Accessors only' section -- check only accessor properties of the object.
+
+function callbackStartAccessorsOnly()
+{
+ // Create an wrapper object with additional property.
+ var expression = "({ a: 2, set b(_) {}, get b() {return 5;}, c: 'c', set d(_){} })";
+
+ return { command: "Runtime.evaluate", params: {expression: expression}, callback: callbackEvalAccessorsOnly };
+}
+function callbackEvalAccessorsOnly(result)
+{
+ var id = result.result.objectId;
+ if (id === undefined)
+ throw new Error("objectId is expected");
+ return {
+ command: "Runtime.getProperties", params: {objectId: id, ownProperties: true, accessorPropertiesOnly: true}, callback: callbackPropertiesAccessorsOnly
+ };
+}
+function callbackPropertiesAccessorsOnly(result)
+{
+ logGetPropertiesResult("Accessor only properties", result);
+ return { callback: callbackStartArray };
+}
+
+
+// 'Array' section -- check properties of an array.
+
+function callbackStartArray()
+{
+ var expression = "['red', 'green', 'blue']";
+ return { command: "Runtime.evaluate", params: {expression: expression}, callback: callbackEvalArray };
+}
+function callbackEvalArray(result)
+{
+ var id = result.result.objectId;
+ if (id === undefined)
+ throw new Error("objectId is expected");
+ return {
+ command: "Runtime.getProperties", params: {objectId: id, ownProperties: true}, callback: callbackPropertiesArray
+ };
+}
+function callbackPropertiesArray(result)
+{
+ logGetPropertiesResult("array", result);
+ return { callback: callbackStartBound };
+}
+
+
+// 'Bound' section -- check properties of a bound function (has a bunch of internal properties).
+
+function callbackStartBound()
+{
+ var expression = "Number.bind({}, 5)";
+ return { command: "Runtime.evaluate", params: {expression: expression}, callback: callbackEvalBound };
+}
+function callbackEvalBound(result)
+{
+ var id = result.result.objectId;
+ if (id === undefined)
+ throw new Error("objectId is expected");
+ return {
+ command: "Runtime.getProperties", params: {objectId: id, ownProperties: true}, callback: callbackPropertiesBound
+ };
+}
+function callbackPropertiesBound(result)
+{
+ logGetPropertiesResult("Bound function", result);
+ return; // End of test
+}
+
+// A helper function that dumps object properties and internal properties in sorted order.
+function logGetPropertiesResult(title, protocolResult)
+{
+ function hasGetterSetter(property, fieldName)
+ {
+ var v = property[fieldName];
+ if (!v)
+ return false;
+ return v.type !== "undefined"
+ }
+
+ InspectorTest.log("Properties of " + title);
+ var propertyArray = protocolResult.result;
+ propertyArray.sort(NamedThingComparator);
+ for (var i = 0; i < propertyArray.length; i++) {
+ var p = propertyArray[i];
+ var v = p.value;
+ var own = p.isOwn ? "own" : "inherited";
+ if (v)
+ InspectorTest.log(" " + p.name + " " + own + " " + v.type + " " + v.value);
+ else
+ InspectorTest.log(" " + p.name + " " + own + " no value" +
+ (hasGetterSetter(p, "get") ? ", getter" : "") + (hasGetterSetter(p, "set") ? ", setter" : ""));
+ }
+ var internalPropertyArray = protocolResult.internalProperties;
+ if (internalPropertyArray) {
+ InspectorTest.log("Internal properties");
+ internalPropertyArray.sort(NamedThingComparator);
+ for (var i = 0; i < internalPropertyArray.length; i++) {
+ var p = internalPropertyArray[i];
+ var v = p.value;
+ InspectorTest.log(" " + p.name + " " + v.type + " " + v.value);
+ }
+ }
+
+ function NamedThingComparator(o1, o2)
+ {
+ return o1.name === o2.name ? 0 : (o1.name < o2.name ? -1 : 1);
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/property-on-console-proto-expected.txt b/deps/v8/test/inspector/runtime/property-on-console-proto-expected.txt
new file mode 100644
index 0000000000..6e75294e82
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/property-on-console-proto-expected.txt
@@ -0,0 +1,12 @@
+Tests that property defined on console.__proto__ doesn't observable on other Objects.
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 0
+ type : number
+ value : 0
+ }
+ }
+}
+
diff --git a/deps/v8/test/inspector/runtime/property-on-console-proto.js b/deps/v8/test/inspector/runtime/property-on-console-proto.js
new file mode 100644
index 0000000000..001dd00291
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/property-on-console-proto.js
@@ -0,0 +1,25 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Tests that property defined on console.__proto__ doesn't observable on other Objects.");
+
+InspectorTest.addScript(`
+function testFunction()
+{
+ var amountOfProperties = 0;
+ for (var p in {})
+ ++amountOfProperties;
+ console.__proto__.debug = 239;
+ for (var p in {})
+ --amountOfProperties;
+ return amountOfProperties;
+}`);
+
+Protocol.Runtime.evaluate({ "expression": "testFunction()" }).then(dumpResult);
+
+function dumpResult(result)
+{
+ InspectorTest.logMessage(result);
+ InspectorTest.completeTest();
+}
diff --git a/deps/v8/test/inspector/runtime/protocol-works-with-different-locale-expected.txt b/deps/v8/test/inspector/runtime/protocol-works-with-different-locale-expected.txt
new file mode 100644
index 0000000000..d526d5d447
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/protocol-works-with-different-locale-expected.txt
@@ -0,0 +1,138 @@
+Running test: consoleLogWithDefaultLocale
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ description : 239
+ type : number
+ value : 239
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 8
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : log
+ }
+}
+
+Running test: consoleTimeWithCommaAsSeparator
+set locale to fr_CA.UTF-8 (has comma as separator)
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ type : string
+ value : a: x.xms
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 27
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : debug
+ }
+}
+
+Running test: consoleLogWithCommaAsSeparator
+set locale to fr_CA.UTF-8 (has comma as separator)
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ description : 239
+ type : number
+ value : 239
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 8
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : log
+ }
+}
+
+Running test: consoleTimeWithCommaAfterConsoleLog
+set locale to fr_CA.UTF-8 (has comma as separator)
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ description : 239
+ type : number
+ value : 239
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 8
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : log
+ }
+}
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ type : string
+ value : a: x.xms
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 27
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : debug
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/protocol-works-with-different-locale.js b/deps/v8/test/inspector/runtime/protocol-works-with-different-locale.js
new file mode 100644
index 0000000000..381dfab31e
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/protocol-works-with-different-locale.js
@@ -0,0 +1,40 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Protocol.Runtime.enable();
+
+Protocol.Runtime.onConsoleAPICalled(dumpConsoleApiCalled);
+
+InspectorTest.runTestSuite([
+ function consoleLogWithDefaultLocale(next) {
+ Protocol.Runtime.evaluate({ expression: "console.log(239) "}).then(next);
+ },
+
+ function consoleTimeWithCommaAsSeparator(next) {
+ InspectorTest.log("set locale to fr_CA.UTF-8 (has comma as separator)");
+ setlocale("fr_CA.UTF-8");
+ Protocol.Runtime.evaluate({ expression: "console.time(\"a\"); console.timeEnd(\"a\")"}).then(next);
+ },
+
+ function consoleLogWithCommaAsSeparator(next) {
+ InspectorTest.log("set locale to fr_CA.UTF-8 (has comma as separator)");
+ setlocale("fr_CA.UTF-8");
+ Protocol.Runtime.evaluate({ expression: "console.log(239) "}).then(next);
+ },
+
+ function consoleTimeWithCommaAfterConsoleLog(next) {
+ InspectorTest.log("set locale to fr_CA.UTF-8 (has comma as separator)");
+ setlocale("fr_CA.UTF-8");
+ Protocol.Runtime.evaluate({ expression: "console.log(239) "})
+ .then(() => Protocol.Runtime.evaluate({ expression: "console.time(\"a\"); console.timeEnd(\"a\")"}))
+ .then(next);
+ }
+]);
+
+function dumpConsoleApiCalled(message) {
+ var firstArg = message.params.args[0];
+ if (firstArg.type === "string")
+ firstArg.value = firstArg.value.replace(/[0-9]+/g, "x");
+ InspectorTest.logMessage(message);
+}
diff --git a/deps/v8/test/inspector/runtime/run-script-async-expected.txt b/deps/v8/test/inspector/runtime/run-script-async-expected.txt
new file mode 100644
index 0000000000..c6a53caee6
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/run-script-async-expected.txt
@@ -0,0 +1,191 @@
+Tests that Runtime.compileScript and Runtime.runScript work with awaitPromise flag.
+
+Running test: testRunAndCompileWithoutAgentEnable
+{
+ error : {
+ code : -32000
+ message : Runtime agent is not enabled
+ }
+ id : <messageId>
+}
+{
+ error : {
+ code : -32000
+ message : Runtime agent is not enabled
+ }
+ id : <messageId>
+}
+
+Running test: testSyntaxErrorInScript
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 1
+ exception : {
+ className : SyntaxError
+ description : SyntaxError: Unexpected token }
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 1
+ scriptId : <scriptId>
+ text : Uncaught
+ }
+ }
+}
+
+Running test: testSyntaxErrorInEvalInScript
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 0
+ exception : {
+ className : SyntaxError
+ description : SyntaxError: Unexpected token } at boo.js:2:2
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 0
+ scriptId : <scriptId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 1
+ functionName :
+ lineNumber : 1
+ scriptId : <scriptId>
+ url : boo.js
+ }
+ ]
+ }
+ text : Uncaught
+ }
+ result : {
+ className : SyntaxError
+ description : SyntaxError: Unexpected token } at boo.js:2:2
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ }
+}
+
+Running test: testRunNotCompiledScript
+{
+ error : {
+ code : -32000
+ message : No script with given id
+ }
+ id : <messageId>
+}
+
+Running test: testRunCompiledScriptAfterAgentWasReenabled
+{
+ error : {
+ code : -32000
+ message : Runtime agent is not enabled
+ }
+ id : <messageId>
+}
+{
+ error : {
+ code : -32000
+ message : No script with given id
+ }
+ id : <messageId>
+}
+
+Running test: testRunScriptWithPreview
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ preview : {
+ description : Object
+ overflow : false
+ properties : [
+ [0] : {
+ name : a
+ type : number
+ value : 1
+ }
+ ]
+ type : object
+ }
+ type : object
+ }
+ }
+}
+
+Running test: testRunScriptReturnByValue
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : object
+ value : {
+ a : 1
+ }
+ }
+ }
+}
+
+Running test: testAwaitNotPromise
+{
+ error : {
+ code : -32000
+ message : Result of the script execution is not a promise
+ }
+ id : <messageId>
+}
+
+Running test: testAwaitResolvedPromise
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : object
+ value : {
+ a : 1
+ }
+ }
+ }
+}
+
+Running test: testAwaitRejectedPromise
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 0
+ exception : {
+ type : object
+ value : {
+ a : 1
+ }
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 0
+ stackTrace : {
+ callFrames : [
+ ]
+ }
+ text : Uncaught (in promise)
+ }
+ result : {
+ type : object
+ value : {
+ a : 1
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/deps/v8/test/inspector/runtime/run-script-async.js b/deps/v8/test/inspector/runtime/run-script-async.js
new file mode 100644
index 0000000000..0aa90962a5
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/run-script-async.js
@@ -0,0 +1,110 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Tests that Runtime.compileScript and Runtime.runScript work with awaitPromise flag.");
+
+InspectorTest.runTestSuite([
+ function testRunAndCompileWithoutAgentEnable(next)
+ {
+ Protocol.Runtime.compileScript({ expression: "", sourceURL: "", persistScript: true })
+ .then((result) => InspectorTest.logMessage(result))
+ .then(() => Protocol.Runtime.runScript({ scriptId: "1" }))
+ .then((result) => InspectorTest.logMessage(result))
+ .then(() => next());
+ },
+
+ function testSyntaxErrorInScript(next)
+ {
+ Protocol.Runtime.enable()
+ .then(() => Protocol.Runtime.compileScript({ expression: "\n }", sourceURL: "boo.js", persistScript: true }))
+ .then((result) => InspectorTest.logMessage(result))
+ .then(() => Protocol.Runtime.disable())
+ .then(() => next());
+ },
+
+ function testSyntaxErrorInEvalInScript(next)
+ {
+ Protocol.Runtime.enable()
+ .then(() => Protocol.Runtime.compileScript({ expression: "{\n eval(\"\\\n}\")\n}", sourceURL: "boo.js", persistScript: true }))
+ .then((result) => Protocol.Runtime.runScript({ scriptId: result.result.scriptId }))
+ .then((result) => InspectorTest.logMessage(result))
+ .then(() => Protocol.Runtime.disable())
+ .then(() => next());
+ },
+
+ function testRunNotCompiledScript(next)
+ {
+ Protocol.Runtime.enable()
+ .then((result) => Protocol.Runtime.runScript({ scriptId: "1" }))
+ .then((result) => InspectorTest.logMessage(result))
+ .then(() => Protocol.Runtime.disable())
+ .then(() => next());
+ },
+
+ function testRunCompiledScriptAfterAgentWasReenabled(next)
+ {
+ var scriptId;
+ Protocol.Runtime.enable()
+ .then(() => Protocol.Runtime.compileScript({ expression: "{\n eval(\"\\\n}\")\n}", sourceURL: "boo.js", persistScript: true }))
+ .then((result) => scriptId = result.result.scriptId)
+ .then(() => Protocol.Runtime.disable())
+ .then((result) => Protocol.Runtime.runScript({ scriptId: scriptId }))
+ .then((result) => InspectorTest.logMessage(result))
+ .then(() => Protocol.Runtime.enable())
+ .then((result) => Protocol.Runtime.runScript({ scriptId: scriptId }))
+ .then((result) => InspectorTest.logMessage(result))
+ .then(() => Protocol.Runtime.disable())
+ .then(() => next());
+ },
+
+ function testRunScriptWithPreview(next)
+ {
+ Protocol.Runtime.enable()
+ .then(() => Protocol.Runtime.compileScript({ expression: "({a:1})", sourceURL: "boo.js", persistScript: true }))
+ .then((result) => Protocol.Runtime.runScript({ scriptId: result.result.scriptId, generatePreview: true }))
+ .then((result) => InspectorTest.logMessage(result))
+ .then(() => Protocol.Runtime.disable())
+ .then(() => next());
+ },
+
+ function testRunScriptReturnByValue(next)
+ {
+ Protocol.Runtime.enable()
+ .then(() => Protocol.Runtime.compileScript({ expression: "({a:1})", sourceURL: "boo.js", persistScript: true }))
+ .then((result) => Protocol.Runtime.runScript({ scriptId: result.result.scriptId, returnByValue: true }))
+ .then((result) => InspectorTest.logMessage(result))
+ .then(() => Protocol.Runtime.disable())
+ .then(() => next());
+ },
+
+ function testAwaitNotPromise(next)
+ {
+ Protocol.Runtime.enable()
+ .then(() => Protocol.Runtime.compileScript({ expression: "({a:1})", sourceURL: "boo.js", persistScript: true }))
+ .then((result) => Protocol.Runtime.runScript({ scriptId: result.result.scriptId, awaitPromise: true }))
+ .then((result) => InspectorTest.logMessage(result))
+ .then(() => Protocol.Runtime.disable())
+ .then(() => next());
+ },
+
+ function testAwaitResolvedPromise(next)
+ {
+ Protocol.Runtime.enable()
+ .then(() => Protocol.Runtime.compileScript({ expression: "Promise.resolve({a:1})", sourceURL: "boo.js", persistScript: true }))
+ .then((result) => Protocol.Runtime.runScript({ scriptId: result.result.scriptId, awaitPromise: true, returnByValue: true }))
+ .then((result) => InspectorTest.logMessage(result))
+ .then(() => Protocol.Runtime.disable())
+ .then(() => next());
+ },
+
+ function testAwaitRejectedPromise(next)
+ {
+ Protocol.Runtime.enable()
+ .then(() => Protocol.Runtime.compileScript({ expression: "Promise.reject({a:1})", sourceURL: "boo.js", persistScript: true }))
+ .then((result) => Protocol.Runtime.runScript({ scriptId: result.result.scriptId, awaitPromise: true, returnByValue: true }))
+ .then((result) => InspectorTest.logMessage(result))
+ .then(() => Protocol.Runtime.disable())
+ .then(() => next());
+ }
+]);
diff --git a/deps/v8/test/inspector/runtime/set-or-map-entries-expected.txt b/deps/v8/test/inspector/runtime/set-or-map-entries-expected.txt
new file mode 100644
index 0000000000..05f6d972f1
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/set-or-map-entries-expected.txt
@@ -0,0 +1,9 @@
+Test that Runtime.getProperties doesn't truncate set and map entries in internalProperties.
+Entries for "createSet(10)"
+Array[10]
+Entries for "createSet(1000)"
+Array[1000]
+Entries for "createMap(10)"
+Array[10]
+Entries for "createMap(1000)"
+Array[1000]
diff --git a/deps/v8/test/inspector/runtime/set-or-map-entries.js b/deps/v8/test/inspector/runtime/set-or-map-entries.js
new file mode 100644
index 0000000000..33ba7c0547
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/set-or-map-entries.js
@@ -0,0 +1,52 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Test that Runtime.getProperties doesn't truncate set and map entries in internalProperties.")
+
+InspectorTest.addScript(`
+ function createSet(size) {
+ var s = new Set();
+ var a = {};
+ a.a = a;
+ for (var i = 0; i < size; ++i) s.add({ wrapper: a});
+ return s;
+ }
+
+ function createMap(size) {
+ var m = new Map();
+ var a = {};
+ a.a = a;
+ for (var i = 0; i < size; ++i) m.set(i, { wrapper: a});
+ return m;
+ }
+`);
+
+Protocol.Debugger.enable();
+Protocol.Runtime.enable();
+
+testExpression("createSet(10)")
+ .then(() => testExpression("createSet(1000)"))
+ .then(() => testExpression("createMap(10)"))
+ .then(() => testExpression("createMap(1000)"))
+ .then(() => InspectorTest.completeTest());
+
+function testExpression(expression)
+{
+ return Protocol.Runtime.evaluate({ "expression": expression})
+ .then(result => Protocol.Runtime.getProperties({ ownProperties: true, objectId: result.result.result.objectId }))
+ .then(message => dumpEntriesDescription(expression, message));
+}
+
+function dumpEntriesDescription(expression, message)
+{
+ InspectorTest.log(`Entries for "${expression}"`);
+ var properties = message.result.internalProperties;
+ var property;
+ if (properties)
+ property = properties.find(property => property.name === "[[Entries]]");
+ if (!property)
+ InspectorTest.log("[[Entries]] not found");
+ else
+ InspectorTest.log(property.value.description);
+}
diff --git a/deps/v8/test/inspector/task-runner.cc b/deps/v8/test/inspector/task-runner.cc
new file mode 100644
index 0000000000..c78d23b415
--- /dev/null
+++ b/deps/v8/test/inspector/task-runner.cc
@@ -0,0 +1,145 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/inspector/task-runner.h"
+
+#if !defined(_WIN32) && !defined(_WIN64)
+#include <unistd.h> // NOLINT
+#endif // !defined(_WIN32) && !defined(_WIN64)
+
+namespace {
+
+const int kTaskRunnerIndex = 2;
+
+void ReportUncaughtException(v8::Isolate* isolate,
+ const v8::TryCatch& try_catch) {
+ CHECK(try_catch.HasCaught());
+ v8::HandleScope handle_scope(isolate);
+ std::string message = *v8::String::Utf8Value(try_catch.Message()->Get());
+ fprintf(stderr, "Unhandle exception: %s\n", message.data());
+}
+
+} // namespace
+
+TaskRunner::TaskRunner(v8::ExtensionConfiguration* extensions,
+ bool catch_exceptions,
+ v8::base::Semaphore* ready_semaphore)
+ : Thread(Options("Task Runner")),
+ extensions_(extensions),
+ catch_exceptions_(catch_exceptions),
+ ready_semaphore_(ready_semaphore),
+ isolate_(nullptr),
+ process_queue_semaphore_(0),
+ nested_loop_count_(0) {
+ Start();
+}
+
+TaskRunner::~TaskRunner() { Join(); }
+
+void TaskRunner::InitializeContext() {
+ v8::Isolate::CreateParams params;
+ params.array_buffer_allocator =
+ v8::ArrayBuffer::Allocator::NewDefaultAllocator();
+ isolate_ = v8::Isolate::New(params);
+ isolate_->SetMicrotasksPolicy(v8::MicrotasksPolicy::kScoped);
+ v8::Isolate::Scope isolate_scope(isolate_);
+ v8::HandleScope handle_scope(isolate_);
+
+ v8::Local<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate_);
+ v8::Local<v8::Context> context =
+ v8::Context::New(isolate_, extensions_, global_template);
+ context->SetAlignedPointerInEmbedderData(kTaskRunnerIndex, this);
+ context_.Reset(isolate_, context);
+
+ if (ready_semaphore_) ready_semaphore_->Signal();
+}
+
+void TaskRunner::Run() {
+ InitializeContext();
+ RunMessageLoop(false);
+}
+
+void TaskRunner::RunMessageLoop(bool only_protocol) {
+ int loop_number = ++nested_loop_count_;
+ while (nested_loop_count_ == loop_number) {
+ TaskRunner::Task* task = GetNext(only_protocol);
+ v8::Isolate::Scope isolate_scope(isolate_);
+ if (catch_exceptions_) {
+ v8::TryCatch try_catch(isolate_);
+ task->Run(isolate_, context_);
+ delete task;
+ if (try_catch.HasCaught()) {
+ ReportUncaughtException(isolate_, try_catch);
+ fflush(stdout);
+ fflush(stderr);
+ _exit(0);
+ }
+ } else {
+ task->Run(isolate_, context_);
+ delete task;
+ }
+ }
+}
+
+void TaskRunner::QuitMessageLoop() {
+ DCHECK(nested_loop_count_ > 0);
+ --nested_loop_count_;
+}
+
+void TaskRunner::Append(Task* task) {
+ queue_.Enqueue(task);
+ process_queue_semaphore_.Signal();
+}
+
+TaskRunner::Task* TaskRunner::GetNext(bool only_protocol) {
+ for (;;) {
+ if (only_protocol) {
+ Task* task = nullptr;
+ if (queue_.Dequeue(&task)) {
+ if (task->is_inspector_task()) return task;
+ deffered_queue_.Enqueue(task);
+ }
+ } else {
+ Task* task = nullptr;
+ if (deffered_queue_.Dequeue(&task)) return task;
+ if (queue_.Dequeue(&task)) return task;
+ }
+ process_queue_semaphore_.Wait();
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+TaskRunner* TaskRunner::FromContext(v8::Local<v8::Context> context) {
+ return static_cast<TaskRunner*>(
+ context->GetAlignedPointerFromEmbedderData(kTaskRunnerIndex));
+}
+
+ExecuteStringTask::ExecuteStringTask(const v8_inspector::String16& expression)
+ : expression_(expression) {}
+
+void ExecuteStringTask::Run(v8::Isolate* isolate,
+ const v8::Global<v8::Context>& context) {
+ v8::MicrotasksScope microtasks_scope(isolate,
+ v8::MicrotasksScope::kRunMicrotasks);
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> local_context = context.Get(isolate);
+ v8::Context::Scope context_scope(local_context);
+
+ v8::ScriptOrigin origin(v8::String::Empty(isolate));
+ v8::Local<v8::String> source =
+ v8::String::NewFromTwoByte(isolate, expression_.characters16(),
+ v8::NewStringType::kNormal,
+ static_cast<int>(expression_.length()))
+ .ToLocalChecked();
+
+ v8::ScriptCompiler::Source scriptSource(source, origin);
+ v8::Local<v8::Script> script;
+ if (!v8::ScriptCompiler::Compile(local_context, &scriptSource)
+ .ToLocal(&script))
+ return;
+ v8::MaybeLocal<v8::Value> result;
+ result = script->Run(local_context);
+}
diff --git a/deps/v8/test/inspector/task-runner.h b/deps/v8/test/inspector/task-runner.h
new file mode 100644
index 0000000000..88c36543d3
--- /dev/null
+++ b/deps/v8/test/inspector/task-runner.h
@@ -0,0 +1,80 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TEST_INSPECTOR_PROTOCOL_TASK_RUNNER_H_
+#define V8_TEST_INSPECTOR_PROTOCOL_TASK_RUNNER_H_
+
+#include "include/v8-inspector.h"
+#include "include/v8-platform.h"
+#include "include/v8.h"
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+#include "src/inspector/string-16.h"
+#include "src/locked-queue-inl.h"
+
+class TaskRunner : public v8::base::Thread {
+ public:
+ class Task {
+ public:
+ virtual ~Task() {}
+ virtual bool is_inspector_task() = 0;
+ virtual void Run(v8::Isolate* isolate,
+ const v8::Global<v8::Context>& context) = 0;
+ };
+
+ TaskRunner(v8::ExtensionConfiguration* extensions, bool catch_exceptions,
+ v8::base::Semaphore* ready_semaphore);
+ virtual ~TaskRunner();
+
+ // Thread implementation.
+ void Run() override;
+
+ // Should be called from the same thread and only from task.
+ void RunMessageLoop(bool only_protocol);
+ void QuitMessageLoop();
+
+ // TaskRunner takes ownership.
+ void Append(Task* task);
+
+ static TaskRunner* FromContext(v8::Local<v8::Context>);
+
+ private:
+ void InitializeContext();
+ Task* GetNext(bool only_protocol);
+
+ v8::ExtensionConfiguration* extensions_;
+ bool catch_exceptions_;
+ v8::base::Semaphore* ready_semaphore_;
+
+ v8::Isolate* isolate_;
+ v8::Global<v8::Context> context_;
+
+ // deferred_queue_ combined with queue_ (in this order) have all tasks in the
+ // correct order.
+ // Sometimes we skip non-protocol tasks by moving them from queue_ to
+ // deferred_queue_.
+ v8::internal::LockedQueue<Task*> queue_;
+ v8::internal::LockedQueue<Task*> deffered_queue_;
+ v8::base::Semaphore process_queue_semaphore_;
+
+ int nested_loop_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(TaskRunner);
+};
+
+class ExecuteStringTask : public TaskRunner::Task {
+ public:
+ explicit ExecuteStringTask(const v8_inspector::String16& expression);
+ bool is_inspector_task() override { return false; }
+
+ void Run(v8::Isolate* isolate,
+ const v8::Global<v8::Context>& context) override;
+
+ private:
+ v8_inspector::String16 expression_;
+
+ DISALLOW_COPY_AND_ASSIGN(ExecuteStringTask);
+};
+
+#endif // V8_TEST_INSPECTOR_PROTOCOL_TASK_RUNNER_H_
diff --git a/deps/v8/test/inspector/testcfg.py b/deps/v8/test/inspector/testcfg.py
new file mode 100644
index 0000000000..6995669a15
--- /dev/null
+++ b/deps/v8/test/inspector/testcfg.py
@@ -0,0 +1,109 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import itertools
+import os
+import re
+
+from testrunner.local import testsuite
+from testrunner.local import utils
+from testrunner.objects import testcase
+
+FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
+PROTOCOL_TEST_JS = "protocol-test.js"
+EXPECTED_SUFFIX = "-expected.txt"
+
+class InspectorProtocolTestSuite(testsuite.TestSuite):
+
+ def __init__(self, name, root):
+ super(InspectorProtocolTestSuite, self).__init__(name, root)
+
+ def ListTests(self, context):
+ tests = []
+ for dirname, dirs, files in os.walk(os.path.join(self.root), followlinks=True):
+ for dotted in [x for x in dirs if x.startswith('.')]:
+ dirs.remove(dotted)
+ dirs.sort()
+ files.sort()
+ for filename in files:
+ if filename.endswith(".js") and filename != PROTOCOL_TEST_JS:
+ fullpath = os.path.join(dirname, filename)
+ relpath = fullpath[len(self.root) + 1 : -3]
+ testname = relpath.replace(os.path.sep, "/")
+ test = testcase.TestCase(self, testname)
+ tests.append(test)
+ return tests
+
+ def GetFlagsForTestCase(self, testcase, context):
+ source = self.GetSourceForTest(testcase)
+ flags_match = re.findall(FLAGS_PATTERN, source)
+ flags = []
+ for match in flags_match:
+ flags += match.strip().split()
+ testname = testcase.path.split(os.path.sep)[-1]
+ testfilename = os.path.join(self.root, testcase.path + self.suffix())
+ protocoltestfilename = os.path.join(self.root, PROTOCOL_TEST_JS)
+ return [ protocoltestfilename, testfilename ] + flags
+
+ def GetSourceForTest(self, testcase):
+ filename = os.path.join(self.root, testcase.path + self.suffix())
+ with open(filename) as f:
+ return f.read()
+
+ def shell(self):
+ return "inspector-test"
+
+ def _IgnoreLine(self, string):
+ """Ignore empty lines, valgrind output and Android output."""
+ if not string: return True
+ return (string.startswith("==") or string.startswith("**") or
+ string.startswith("ANDROID") or
+ # FIXME(machenbach): The test driver shouldn't try to use slow
+ # asserts if they weren't compiled. This fails in optdebug=2.
+ string == "Warning: unknown flag --enable-slow-asserts." or
+ string == "Try --help for options")
+
+ def IsFailureOutput(self, testcase):
+ file_name = os.path.join(self.root, testcase.path) + EXPECTED_SUFFIX
+ with file(file_name, "r") as expected:
+ expected_lines = expected.readlines()
+
+ def ExpIterator():
+ for line in expected_lines:
+ if line.startswith("#") or not line.strip(): continue
+ yield line.strip()
+
+ def ActIterator(lines):
+ for line in lines:
+ if self._IgnoreLine(line.strip()): continue
+ yield line.strip()
+
+ def ActBlockIterator():
+ """Iterates over blocks of actual output lines."""
+ lines = testcase.output.stdout.splitlines()
+ start_index = 0
+ found_eqeq = False
+ for index, line in enumerate(lines):
+ # If a stress test separator is found:
+ if line.startswith("=="):
+ # Iterate over all lines before a separator except the first.
+ if not found_eqeq:
+ found_eqeq = True
+ else:
+ yield ActIterator(lines[start_index:index])
+ # The next block of output lines starts after the separator.
+ start_index = index + 1
+ # Iterate over complete output if no separator was found.
+ if not found_eqeq:
+ yield ActIterator(lines)
+
+ for act_iterator in ActBlockIterator():
+ for (expected, actual) in itertools.izip_longest(
+ ExpIterator(), act_iterator, fillvalue=''):
+ if expected != actual:
+ return True
+ return False
+
+def GetSuite(name, root):
+ return InspectorProtocolTestSuite(name, root)
diff --git a/deps/v8/test/intl/assert.js b/deps/v8/test/intl/assert.js
index e17615267a..26405e8e9e 100644
--- a/deps/v8/test/intl/assert.js
+++ b/deps/v8/test/intl/assert.js
@@ -27,6 +27,14 @@
// Some methods are taken from v8/test/mjsunit/mjsunit.js
+
+function classOf(object) {
+ // Argument must not be null or undefined.
+ var string = Object.prototype.toString.call(object);
+ // String has format [object <ClassName>].
+ return string.substring(8, string.length - 1);
+}
+
/**
* Compares two objects for key/value equality.
* Returns true if they are equal, false otherwise.
diff --git a/deps/v8/test/intl/date-format/date-format-to-parts.js b/deps/v8/test/intl/date-format/date-format-to-parts.js
new file mode 100644
index 0000000000..cd954acc79
--- /dev/null
+++ b/deps/v8/test/intl/date-format/date-format-to-parts.js
@@ -0,0 +1,20 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --datetime-format-to-parts
+
+var d = new Date(2016, 11, 15, 14, 10, 34);
+var df = Intl.DateTimeFormat("ja",
+ {hour: 'numeric', minute: 'numeric', second: 'numeric', year: 'numeric',
+ month: 'numeric', day: 'numeric', timeZoneName: 'short', era: 'short'});
+
+var formattedParts = df.formatToParts(d);
+
+var formattedReconstructedFromParts = formattedParts.map((part) => part.value)
+ .reduce((accumulated, part) => accumulated + part);
+assertEquals(df.format(d), formattedReconstructedFromParts);
+// 西暦2016年11月15日 14:10:34 GMT-7
+assertEquals(["era", "year", "literal", "month", "literal", "day", "literal",
+ "hour", "literal", "minute", "literal", "second", "literal",
+ "timeZoneName"], formattedParts.map((part) => part.type));
diff --git a/deps/v8/test/intl/date-format/parse-MMMdy.js b/deps/v8/test/intl/date-format/parse-MMMdy.js
deleted file mode 100644
index f8291f49a9..0000000000
--- a/deps/v8/test/intl/date-format/parse-MMMdy.js
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Testing v8Parse method for date and time pattern.
-// Month is represented as a short name.
-
-// Flags: --intl-extra
-
-var dtf = new Intl.DateTimeFormat(['en'],
- {year: 'numeric', month: 'short',
- day: 'numeric',
- timeZone: 'America/Los_Angeles'});
-
-// Make sure we have pattern we expect (may change in the future).
-assertEquals('MMM d, y', dtf.resolved.pattern);
-
-var date = dtf.v8Parse('Feb 4, 1974');
-assertEquals(1974, date.getUTCFullYear());
-assertEquals(1, date.getUTCMonth());
-assertEquals(4, date.getUTCDate());
-
-// Can deal with a missing ','.
-date = dtf.v8Parse('Feb 4 1974');
-assertEquals(1974, date.getUTCFullYear());
-assertEquals(1, date.getUTCMonth());
-assertEquals(4, date.getUTCDate());
-
-// Extra "th" after 4 in the pattern.
-assertEquals(undefined, dtf.v8Parse('Feb 4th, 1974'));
-
-// TODO(jshin): Make sure if this is what's supposed to be.
-date = dtf.v8Parse('2/4/1974');
-assertEquals(1974, date.getUTCFullYear());
-assertEquals(1, date.getUTCMonth());
-assertEquals(4, date.getUTCDate());
diff --git a/deps/v8/test/intl/date-format/parse-invalid-input.js b/deps/v8/test/intl/date-format/parse-invalid-input.js
deleted file mode 100644
index 47a95477eb..0000000000
--- a/deps/v8/test/intl/date-format/parse-invalid-input.js
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --intl-extra
-
-// Invalid input is handled properly.
-
-var dtf = new Intl.DateTimeFormat(['en']);
-
-assertEquals(undefined, dtf.v8Parse(''));
-assertEquals(undefined, dtf.v8Parse('A'));
-assertEquals(undefined, dtf.v8Parse(5));
-assertEquals(undefined, dtf.v8Parse(new Date()));
diff --git a/deps/v8/test/intl/date-format/parse-mdy.js b/deps/v8/test/intl/date-format/parse-mdy.js
deleted file mode 100644
index a248a08422..0000000000
--- a/deps/v8/test/intl/date-format/parse-mdy.js
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --intl-extra
-
-// Testing v8Parse method for date only.
-
-function checkDate(date) {
- assertEquals(1974, date.getUTCFullYear());
- assertEquals(1, date.getUTCMonth());
- assertEquals(4, date.getUTCDate());
-}
-
-var dtf = new Intl.DateTimeFormat(['en'], {timeZone: 'America/Los_Angeles'});
-
-// Make sure we have pattern we expect (may change in the future).
-assertEquals('M/d/y', dtf.resolved.pattern);
-
-checkDate(dtf.v8Parse('2/4/74'));
-checkDate(dtf.v8Parse('02/04/74'));
-checkDate(dtf.v8Parse('2/04/74'));
-checkDate(dtf.v8Parse('02/4/74'));
-checkDate(dtf.v8Parse('2/4/1974'));
-checkDate(dtf.v8Parse('02/4/1974'));
-checkDate(dtf.v8Parse('2/04/1974'));
-checkDate(dtf.v8Parse('02/04/1974'));
-
-// Month is numeric, so it fails on "Feb".
-assertEquals(undefined, dtf.v8Parse('Feb 4th 1974'));
diff --git a/deps/v8/test/intl/date-format/parse-mdyhms.js b/deps/v8/test/intl/date-format/parse-mdyhms.js
deleted file mode 100644
index 766f7192fe..0000000000
--- a/deps/v8/test/intl/date-format/parse-mdyhms.js
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Testing v8Parse method for date and time pattern.
-//
-// Flags: --intl-extra
-
-var dtf = new Intl.DateTimeFormat(['en'],
- {year: 'numeric', month: 'numeric',
- day: 'numeric', hour: 'numeric',
- minute: 'numeric', second: 'numeric',
- timeZone: 'UTC'});
-
-// Make sure we have pattern we expect (may change in the future).
-assertEquals('M/d/y, h:mm:ss a', dtf.resolved.pattern);
-
-var date = dtf.v8Parse('2/4/74 12:30:42 pm');
-assertEquals(1974, date.getUTCFullYear());
-assertEquals(1, date.getUTCMonth());
-assertEquals(4, date.getUTCDate());
-assertEquals(12, date.getUTCHours());
-assertEquals(30, date.getUTCMinutes());
-assertEquals(42, date.getUTCSeconds());
-
-// Can deal with '-' vs '/'.
-date = dtf.v8Parse('2-4-74 12:30:42 am');
-assertEquals(1974, date.getUTCFullYear());
-assertEquals(1, date.getUTCMonth());
-assertEquals(4, date.getUTCDate());
-assertEquals(0, date.getUTCHours());
-assertEquals(30, date.getUTCMinutes());
-assertEquals(42, date.getUTCSeconds());
-
-// AM/PM were not specified.
-assertEquals(undefined, dtf.v8Parse('2/4/74 12:30:42'));
-
-// Time was not specified.
-assertEquals(undefined, dtf.v8Parse('2/4/74'));
-
-// Month is numeric, so it fails on "Feb".
-assertEquals(undefined, dtf.v8Parse('Feb 4th 1974'));
diff --git a/deps/v8/test/intl/extra-flag.js b/deps/v8/test/intl/extra-flag.js
deleted file mode 100644
index 3d434a302b..0000000000
--- a/deps/v8/test/intl/extra-flag.js
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --intl-extra
-
-// Turning on the creates the non-standard properties
-
-var dtf = new Intl.DateTimeFormat(['en']);
-assertTrue('v8Parse' in dtf);
-assertTrue('resolved' in dtf);
-assertTrue(!!dtf.resolved && 'pattern' in dtf.resolved);
-
-var nf = new Intl.NumberFormat(['en']);
-assertTrue('v8Parse' in nf);
-assertTrue('resolved' in nf);
-assertTrue(!!nf.resolved && 'pattern' in nf.resolved);
-
-var col = new Intl.Collator(['en']);
-assertTrue('resolved' in col);
-
-var br = new Intl.v8BreakIterator(['en']);
-assertTrue('resolved' in br);
diff --git a/deps/v8/test/intl/intl.status b/deps/v8/test/intl/intl.status
index 15fbe43c12..a0722177d1 100644
--- a/deps/v8/test/intl/intl.status
+++ b/deps/v8/test/intl/intl.status
@@ -36,4 +36,9 @@
'date-format/timezone': [PASS, ['no_snap', SKIP]],
'number-format/check-digit-ranges': [PASS, ['no_snap', SKIP]],
}], # 'arch == arm64 and mode == debug and simulator_run == True and variant == ignition'
+
+['variant == asm_wasm', {
+ '*': [SKIP],
+}], # variant == asm_wasm
+
]
diff --git a/deps/v8/test/intl/no-extra-flag.js b/deps/v8/test/intl/no-extra-flag.js
deleted file mode 100644
index 6735f84a2e..0000000000
--- a/deps/v8/test/intl/no-extra-flag.js
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --no-intl-extra
-
-// Turning off the flag removes the non-standard properties
-
-var dtf = new Intl.DateTimeFormat(['en']);
-assertFalse('v8Parse' in dtf);
-assertFalse('resolved' in dtf);
-assertFalse(!!dtf.resolved && 'pattern' in dtf.resolved);
-
-var nf = new Intl.NumberFormat(['en']);
-assertFalse('v8Parse' in nf);
-assertFalse('resolved' in nf);
-assertFalse(!!nf.resolved && 'pattern' in nf.resolved);
-
-var col = new Intl.Collator(['en']);
-assertFalse('resolved' in col);
-
-var br = new Intl.v8BreakIterator(['en']);
-assertFalse('resolved' in br);
diff --git a/deps/v8/test/intl/number-format/parse-decimal.js b/deps/v8/test/intl/number-format/parse-decimal.js
deleted file mode 100644
index 62f4728911..0000000000
--- a/deps/v8/test/intl/number-format/parse-decimal.js
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Flags: --intl-extra
-
-var nf = new Intl.NumberFormat(['en'], {style: 'decimal'});
-
-assertEquals(123.43, nf.v8Parse('123.43'));
-assertEquals(123, nf.v8Parse('123'));
-assertEquals(NaN, nf.v8Parse(NaN));
-assertEquals(12323, nf.v8Parse('12,323'));
-assertEquals(12323, nf.v8Parse('12323'));
-assertEquals(12323.456, nf.v8Parse('12,323.456'));
-assertEquals(12323.456, nf.v8Parse('000000012323.456'));
-assertEquals(12323.456, nf.v8Parse('000,000,012,323.456'));
-assertEquals(-12323.456, nf.v8Parse('-12,323.456'));
-
-assertEquals(12323, nf.v8Parse('000000012323'));
-assertEquals(12323, nf.v8Parse('000,000,012,323'));
-assertEquals(undefined, nf.v8Parse('000000012,323.456'));
-
-// not tolerant of a misplaced thousand separator
-assertEquals(undefined, nf.v8Parse('123,23.456'));
-assertEquals(undefined, nf.v8Parse('0000000123,23.456'));
-assertEquals(undefined, nf.v8Parse('-123,23.456'));
-
-// Scientific notation is supported.
-assertEquals(0.123456, nf.v8Parse('123.456e-3'));
diff --git a/deps/v8/test/intl/number-format/parse-invalid-input.js b/deps/v8/test/intl/number-format/parse-invalid-input.js
deleted file mode 100644
index 251b52a5e7..0000000000
--- a/deps/v8/test/intl/number-format/parse-invalid-input.js
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --intl-extra
-
-// Invalid input is handled properly.
-
-var nf = new Intl.NumberFormat(['en']);
-
-assertEquals(undefined, nf.v8Parse(''));
-assertEquals(undefined, nf.v8Parse('A'));
-assertEquals(undefined, nf.v8Parse(new Date()));
-assertEquals(undefined, nf.v8Parse(undefined));
-assertEquals(undefined, nf.v8Parse(null));
-assertEquals(undefined, nf.v8Parse());
-assertEquals(undefined, nf.v8Parse('Text before 12345'));
diff --git a/deps/v8/test/intl/number-format/parse-percent.js b/deps/v8/test/intl/number-format/parse-percent.js
deleted file mode 100644
index 0dc36d3f0b..0000000000
--- a/deps/v8/test/intl/number-format/parse-percent.js
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --intl-extra
-
-var nf = new Intl.NumberFormat(['en'], {style: 'percent'});
-
-assertEquals(1.2343, nf.v8Parse('123.43%'));
-assertEquals(1.23, nf.v8Parse('123%'));
-assertEquals(NaN, nf.v8Parse(NaN));
-assertEquals(123.23, nf.v8Parse('12,323%'));
-assertEquals(123.23456, nf.v8Parse('12,323.456%'));
-assertEquals(123.23456, nf.v8Parse('000000012323.456%'));
-assertEquals(-123.23456, nf.v8Parse('-12,323.456%'));
-
-// Not tolerant of misplaced group separators.
-assertEquals(undefined, nf.v8Parse('123,23%'));
-assertEquals(undefined, nf.v8Parse('123,23.456%'));
-assertEquals(undefined, nf.v8Parse('0000000123,23.456%'));
-assertEquals(undefined, nf.v8Parse('-123,23.456%'));
-assertEquals(undefined, nf.v8Parse('0000000123,23.456%'));
-assertEquals(undefined, nf.v8Parse('-123,23.456%'));
diff --git a/deps/v8/test/js-perf-test/JSTests.json b/deps/v8/test/js-perf-test/JSTests.json
index 52bff011db..a88746b10c 100644
--- a/deps/v8/test/js-perf-test/JSTests.json
+++ b/deps/v8/test/js-perf-test/JSTests.json
@@ -1,8 +1,8 @@
{
"name": "JSTests",
- "run_count": 5,
- "run_count_android_arm": 3,
- "run_count_android_arm64": 3,
+ "run_count": 3,
+ "run_count_android_arm": 1,
+ "run_count_android_arm64": 1,
"timeout": 120,
"units": "score",
"total": true,
@@ -45,7 +45,6 @@
"path": ["RestParameters"],
"main": "run.js",
"resources": ["rest.js"],
- "run_count": 5,
"units": "score",
"results_regexp": "^%s\\-RestParameters\\(Score\\): (.+)$",
"tests": [
@@ -57,7 +56,6 @@
"path": ["SpreadCalls"],
"main": "run.js",
"resources": ["spreadcalls.js"],
- "run_count": 5,
"units": "score",
"results_regexp": "^%s\\-SpreadCalls\\(Score\\): (.+)$",
"tests": [
@@ -119,16 +117,35 @@
"main": "run.js",
"resources": ["harmony-string.js"],
"results_regexp": "^%s\\-Strings\\(Score\\): (.+)$",
+ "run_count": 1,
+ "timeout": 240,
"tests": [
{"name": "StringFunctions"}
]
},
{
+ "name": "StringIterators",
+ "path": ["StringIterators"],
+ "main": "run.js",
+ "resources": ["string-iterator.js"],
+ "results_regexp": "^%s\\-StringIterators\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "Spread_OneByteShort"},
+ {"name": "Spread_TwoByteShort"},
+ {"name": "Spread_WithSurrogatePairsShort"},
+ {"name": "ForOf_OneByteShort"},
+ {"name": "ForOf_TwoByteShort"},
+ {"name": "ForOf_WithSurrogatePairsShort"},
+ {"name": "ForOf_OneByteLong"},
+ {"name": "ForOf_TwoByteLong"},
+ {"name": "ForOf_WithSurrogatePairsLong"}
+ ]
+ },
+ {
"name": "Templates",
"path": ["Templates"],
"main": "run.js",
"resources": ["templates.js"],
- "run_count": 5,
"units": "score",
"results_regexp": "^%s\\-Templates\\(Score\\): (.+)$",
"total": true,
@@ -183,9 +200,6 @@
"main": "run.js",
"resources": ["keys.js"],
"results_regexp": "^%s\\-Keys\\(Score\\): (.+)$",
- "run_count": 3,
- "run_count_android_arm": 2,
- "run_count_android_arm64": 2,
"tests": [
{"name": "Object.keys()"},
{"name": "for-in"},
diff --git a/deps/v8/test/js-perf-test/Object/ObjectTests.json b/deps/v8/test/js-perf-test/Object/ObjectTests.json
index 1c0e5ed2ee..47b1bf9f4c 100644
--- a/deps/v8/test/js-perf-test/Object/ObjectTests.json
+++ b/deps/v8/test/js-perf-test/Object/ObjectTests.json
@@ -15,12 +15,14 @@
"flags": ["--harmony"],
"resources": [
"assign.js",
- "values.js",
- "entries.js"
+ "create.js",
+ "entries.js",
+ "values.js"
],
"results_regexp": "^%s\\-Object\\(Score\\): (.+)$",
"tests": [
{"name": "Assign"},
+ {"name": "Create"},
{"name": "Entries"},
{"name": "EntriesMegamorphic"},
{"name": "Values"},
diff --git a/deps/v8/test/js-perf-test/Object/create.js b/deps/v8/test/js-perf-test/Object/create.js
new file mode 100644
index 0000000000..49ebe5c1bf
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Object/create.js
@@ -0,0 +1,70 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// ----------------------------------------------------------------------------
+
+function shallowClone(object) {
+ return Object.create(object.__proto__,
+ Object.getOwnPropertyDescriptors(object));
+}
+
+function makeSlowCopy(object) {
+ object = shallowClone(object);
+ object.__foo__ = 1;
+ delete object.__foo__;
+ return object;
+}
+
+function convertToPropertyDescriptors(dict) {
+ for (var key in dict) {
+ var propertiesObject = dict[key];
+ dict[key] = Object.getOwnPropertyDescriptors(propertiesObject);
+ }
+ return dict;
+}
+
+var properties_5 = { a:1, b:2, c:3, d:4, e:5 };
+var TEST_PROPERTIES = convertToPropertyDescriptors({
+ empty: {},
+ array_5: [1, 2, 3, 4, 5],
+ properties_5: properties_5,
+ properties_10: { a:1, b:2, c:3, d:4, e:5, f:6, g:7, h:8, i:9, j:10 },
+ properties_dict: makeSlowCopy(properties_5)
+});
+
+var TEST_PROTOTYPES = {
+ null: null,
+ empty: {},
+ 'Object.prototype': Object.prototype,
+ 'Array.prototype': Array.prototype
+};
+
+// ----------------------------------------------------------------------------
+
+var testFunction = () => {
+ return Object.create(prototype, properties);
+}
+
+function createTestFunction(prototype, properties) {
+ // Force a new function for each test-object to avoid side-effects due to ICs.
+ var random_comment = "\n// random comment" + Math.random() + "\n";
+ return eval(random_comment + testFunction.toString());
+}
+
+// ----------------------------------------------------------------------------
+
+var benchmarks = []
+
+for (var proto_name in TEST_PROTOTYPES) {
+ var prototype = TEST_PROTOTYPES[proto_name];
+ for (var prop_name in TEST_PROPERTIES) {
+ var properties = TEST_PROPERTIES[prop_name];
+ var name = 'Create proto:' + proto_name + " properties:" + prop_name;
+ benchmarks.push(
+ new Benchmark(name, false, false, 0,
+ createTestFunction(prototype, properties)));
+ }
+}
+
+new BenchmarkSuite('Create', [1000], benchmarks);
diff --git a/deps/v8/test/js-perf-test/Object/run.js b/deps/v8/test/js-perf-test/Object/run.js
index f25bee4572..0dc65a8e57 100644
--- a/deps/v8/test/js-perf-test/Object/run.js
+++ b/deps/v8/test/js-perf-test/Object/run.js
@@ -6,8 +6,9 @@
load('../base.js');
load('assign.js');
-load('values.js');
+load('create.js');
load('entries.js');
+load('values.js');
var success = true;
diff --git a/deps/v8/test/js-perf-test/StringIterators/run.js b/deps/v8/test/js-perf-test/StringIterators/run.js
new file mode 100644
index 0000000000..4f9f2dd30d
--- /dev/null
+++ b/deps/v8/test/js-perf-test/StringIterators/run.js
@@ -0,0 +1,27 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+load('../base.js');
+load('string-iterator.js');
+
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-StringIterators(Score): ' + result);
+}
+
+
+function PrintError(name, error) {
+ PrintResult(name, error);
+ success = false;
+}
+
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+
+BenchmarkSuite.RunSuites({ NotifyResult: PrintResult,
+ NotifyError: PrintError });
diff --git a/deps/v8/test/js-perf-test/StringIterators/string-iterator.js b/deps/v8/test/js-perf-test/StringIterators/string-iterator.js
new file mode 100644
index 0000000000..c55925415b
--- /dev/null
+++ b/deps/v8/test/js-perf-test/StringIterators/string-iterator.js
@@ -0,0 +1,239 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function assert(expression, message) {
+ if (typeof expression === "string" && message === void 0) {
+ message = expression;
+ expression = eval(expression);
+ }
+
+ if (!expression) {
+ var lines = ["Benchmark Error"];
+ if (message !== void 0) {
+ lines = ["Benchmark Error:", String(message)];
+ }
+ throw new Error(lines.join("\n"));
+ }
+ return true;
+}
+
+assert.same = function(expected, actual, message) {
+ var isSame =
+ expected === actual || typeof expected !== expected && actual !== actual;
+ if (!isSame) {
+ var details = `Expected: ${String(expected)}\n` +
+ `But found: ${String(actual)}`;
+ var lines = ["Benchmark Error:", details];
+ if (message !== void 0) {
+ lines = ["Benchmark Error:", details, "", String(message)];
+ }
+ throw new Error(lines.join("\n"));
+ }
+ return true;
+}
+
+new BenchmarkSuite('Spread_OneByteShort', [1000], [
+ new Benchmark('test', false, false, 0,
+ Spread_OneByteShort, Spread_OneByteShortSetup,
+ Spread_OneByteShortTearDown),
+]);
+
+var result;
+var string;
+function Spread_OneByteShortSetup() {
+ result = undefined;
+ string = "Alphabet-Soup";
+}
+
+function Spread_OneByteShort() {
+ result = [...string];
+}
+
+function Spread_OneByteShortTearDown() {
+ var expected = "A|l|p|h|a|b|e|t|-|S|o|u|p";
+ return assert("Array.isArray(result)")
+ && assert.same(expected, result.join("|"));
+}
+
+// ----------------------------------------------------------------------------
+
+new BenchmarkSuite('Spread_TwoByteShort', [1000], [
+ new Benchmark('test', false, false, 0,
+ Spread_TwoByteShort, Spread_TwoByteShortSetup,
+ Spread_TwoByteShortTearDown),
+]);
+
+function Spread_TwoByteShortSetup() {
+ result = undefined;
+ string = "\u5FCD\u8005\u306E\u653B\u6483";
+}
+
+function Spread_TwoByteShort() {
+ result = [...string];
+}
+
+function Spread_TwoByteShortTearDown() {
+ var expected = "\u5FCD|\u8005|\u306E|\u653B|\u6483";
+ return assert("Array.isArray(result)")
+ && assert.same(expected, result.join("|"));
+}
+
+// ----------------------------------------------------------------------------
+
+new BenchmarkSuite('Spread_WithSurrogatePairsShort', [1000], [
+ new Benchmark('test', false, false, 0,
+ Spread_WithSurrogatePairsShort,
+ Spread_WithSurrogatePairsShortSetup,
+ Spread_WithSurrogatePairsShortTearDown),
+]);
+
+function Spread_WithSurrogatePairsShortSetup() {
+ result = undefined;
+ string = "\uD83C\uDF1F\u5FCD\u8005\u306E\u653B\u6483\uD83C\uDF1F";
+}
+
+function Spread_WithSurrogatePairsShort() {
+ result = [...string];
+}
+
+function Spread_WithSurrogatePairsShortTearDown() {
+ var expected =
+ "\uD83C\uDF1F|\u5FCD|\u8005|\u306E|\u653B|\u6483|\uD83C\uDF1F";
+ return assert("Array.isArray(result)")
+ && assert.same(expected, result.join("|"));
+}
+
+// ----------------------------------------------------------------------------
+
+new BenchmarkSuite('ForOf_OneByteShort', [1000], [
+ new Benchmark('test', false, false, 0,
+ ForOf_OneByteShort, ForOf_OneByteShortSetup,
+ ForOf_OneByteShortTearDown),
+]);
+
+function ForOf_OneByteShortSetup() {
+ result = undefined;
+ string = "Alphabet-Soup";
+}
+
+function ForOf_OneByteShort() {
+ result = "";
+ for (var c of string) result += c;
+}
+
+function ForOf_OneByteShortTearDown() {
+ return assert.same(string, result);
+}
+
+// ----------------------------------------------------------------------------
+
+new BenchmarkSuite('ForOf_TwoByteShort', [1000], [
+ new Benchmark('test', false, false, 0,
+ ForOf_TwoByteShort, ForOf_TwoByteShortSetup,
+ ForOf_TwoByteShortTearDown),
+]);
+
+function ForOf_TwoByteShortSetup() {
+ result = undefined;
+ string = "\u5FCD\u8005\u306E\u653B\u6483";
+}
+
+function ForOf_TwoByteShort() {
+ result = "";
+ for (var c of string) result += c;
+}
+
+function ForOf_TwoByteShortTearDown() {
+ return assert.same(string, result);
+}
+
+// ----------------------------------------------------------------------------
+
+new BenchmarkSuite('ForOf_WithSurrogatePairsShort', [1000], [
+ new Benchmark('test', false, false, 0,
+ ForOf_WithSurrogatePairsShort,
+ ForOf_WithSurrogatePairsShortSetup,
+ ForOf_WithSurrogatePairsShortTearDown),
+]);
+
+function ForOf_WithSurrogatePairsShortSetup() {
+ result = undefined;
+ string = "\uD83C\uDF1F\u5FCD\u8005\u306E\u653B\u6483\uD83C\uDF1F";
+}
+
+function ForOf_WithSurrogatePairsShort() {
+ result = "";
+ for (var c of string) result += c;
+}
+
+function ForOf_WithSurrogatePairsShortTearDown() {
+ return assert.same(string, result);
+}
+
+// ----------------------------------------------------------------------------
+
+new BenchmarkSuite('ForOf_OneByteLong', [1000], [
+ new Benchmark('test', false, false, 0,
+ ForOf_OneByteLong, ForOf_OneByteLongSetup,
+ ForOf_OneByteLongTearDown),
+]);
+
+function ForOf_OneByteLongSetup() {
+ result = undefined;
+ string = "Alphabet-Soup|".repeat(128);
+}
+
+function ForOf_OneByteLong() {
+ result = "";
+ for (var c of string) result += c;
+}
+
+function ForOf_OneByteLongTearDown() {
+ return assert.same(string, result);
+}
+
+// ----------------------------------------------------------------------------
+
+new BenchmarkSuite('ForOf_TwoByteLong', [1000], [
+ new Benchmark('test', false, false, 0,
+ ForOf_OneByteLong, ForOf_OneByteLongSetup,
+ ForOf_OneByteLongTearDown),
+]);
+
+function ForOf_TwoByteLongSetup() {
+ result = undefined;
+ string = "\u5FCD\u8005\u306E\u653B\u6483".repeat(128);
+}
+
+function ForOf_TwoByteLong() {
+ result = "";
+ for (var c of string) result += c;
+}
+
+function ForOf_TwoByteLongTearDown() {
+ return assert.same(string, result);
+}
+
+// ----------------------------------------------------------------------------
+
+new BenchmarkSuite('ForOf_WithSurrogatePairsLong', [1000], [
+ new Benchmark('test', false, false, 0,
+ ForOf_WithSurrogatePairsLong, ForOf_WithSurrogatePairsLongSetup,
+ ForOf_WithSurrogatePairsLongTearDown),
+]);
+
+function ForOf_WithSurrogatePairsLongSetup() {
+ result = undefined;
+ string = "\uD83C\uDF1F\u5FCD\u8005\u306E\u653B\u6483\uD83C\uDF1F|"
+ .repeat(128);
+}
+
+function ForOf_WithSurrogatePairsLong() {
+ result = "";
+ for (var c of string) result += c;
+}
+
+function ForOf_WithSurrogatePairsLongTearDown() {
+ return assert.same(string, result);
+}
diff --git a/deps/v8/test/message/arrow-invalid-rest-2.out b/deps/v8/test/message/arrow-invalid-rest-2.out
index 0196483a66..ad6bcb034d 100644
--- a/deps/v8/test/message/arrow-invalid-rest-2.out
+++ b/deps/v8/test/message/arrow-invalid-rest-2.out
@@ -1,4 +1,4 @@
-*%(basename)s:7: SyntaxError: Rest parameter must be an identifier or destructuring pattern
+*%(basename)s:7: SyntaxError: Unexpected token =
var f = (a, ...x = 10) => x;
- ^^^^^^^^^
-SyntaxError: Rest parameter must be an identifier or destructuring pattern
+ ^
+SyntaxError: Unexpected token =
diff --git a/deps/v8/test/message/arrow-invalid-rest.out b/deps/v8/test/message/arrow-invalid-rest.out
index 4045f14e78..99a8557f1e 100644
--- a/deps/v8/test/message/arrow-invalid-rest.out
+++ b/deps/v8/test/message/arrow-invalid-rest.out
@@ -1,4 +1,4 @@
-*%(basename)s:7: SyntaxError: Rest parameter must be an identifier or destructuring pattern
+*%(basename)s:7: SyntaxError: Unexpected token =
var f = (...x = 10) => x;
- ^^^^^^^^^
-SyntaxError: Rest parameter must be an identifier or destructuring pattern
+ ^
+SyntaxError: Unexpected token =
diff --git a/deps/v8/test/message/export-duplicate-as.js b/deps/v8/test/message/export-duplicate-as.js
index 49b52d4b17..416180b093 100644
--- a/deps/v8/test/message/export-duplicate-as.js
+++ b/deps/v8/test/message/export-duplicate-as.js
@@ -4,6 +4,6 @@
//
// MODULE
-var a, b;
+var a, b, c;
export { a as c };
-export { a, b as c };
+export { a, b as c, c, b };
diff --git a/deps/v8/test/message/export-duplicate-as.out b/deps/v8/test/message/export-duplicate-as.out
index 1726d9491a..729de8a904 100644
--- a/deps/v8/test/message/export-duplicate-as.out
+++ b/deps/v8/test/message/export-duplicate-as.out
@@ -2,6 +2,6 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
*%(basename)s:9: SyntaxError: Duplicate export of 'c'
-export { a, b as c };
- ^
+export { a, b as c, c, b };
+ ^
SyntaxError: Duplicate export of 'c'
diff --git a/deps/v8/test/message/export-duplicate-default.js b/deps/v8/test/message/export-duplicate-default.js
index 72a54a45f4..de1a8807c1 100644
--- a/deps/v8/test/message/export-duplicate-default.js
+++ b/deps/v8/test/message/export-duplicate-default.js
@@ -5,4 +5,5 @@
// MODULE
export default function f() {};
+export default 42;
export default class C {};
diff --git a/deps/v8/test/message/export-duplicate-default.out b/deps/v8/test/message/export-duplicate-default.out
index 4c6b97a7a1..685e289891 100644
--- a/deps/v8/test/message/export-duplicate-default.out
+++ b/deps/v8/test/message/export-duplicate-default.out
@@ -1,7 +1,7 @@
# Copyright 2015 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-*%(basename)s:8: SyntaxError: Duplicate export of 'default'
+*%(basename)s:9: SyntaxError: Duplicate export of 'default'
export default class C {};
^^^^^^^
SyntaxError: Duplicate export of 'default'
diff --git a/deps/v8/test/message/export-duplicate.js b/deps/v8/test/message/export-duplicate.js
index f45aefe13f..93011f0c1c 100644
--- a/deps/v8/test/message/export-duplicate.js
+++ b/deps/v8/test/message/export-duplicate.js
@@ -4,6 +4,7 @@
//
// MODULE
-var a, b;
+var a, b, c;
export { a };
export { a, b };
+export { b, c };
diff --git a/deps/v8/test/message/export-duplicate.out b/deps/v8/test/message/export-duplicate.out
index e88779f580..9811cb122c 100644
--- a/deps/v8/test/message/export-duplicate.out
+++ b/deps/v8/test/message/export-duplicate.out
@@ -1,7 +1,7 @@
# Copyright 2015 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-*%(basename)s:9: SyntaxError: Duplicate export of 'a'
-export { a, b };
+*%(basename)s:10: SyntaxError: Duplicate export of 'b'
+export { b, c };
^
-SyntaxError: Duplicate export of 'a'
+SyntaxError: Duplicate export of 'b'
diff --git a/deps/v8/test/message/syntactic-tail-call-in-binop-lhs.out b/deps/v8/test/message/syntactic-tail-call-in-binop-lhs.out
deleted file mode 100644
index 14670cd280..0000000000
--- a/deps/v8/test/message/syntactic-tail-call-in-binop-lhs.out
+++ /dev/null
@@ -1,4 +0,0 @@
-*%(basename)s:13: SyntaxError: Tail call expression is not allowed here
- return continue f() - a ;
- ^^^^^^^^^^^^
-SyntaxError: Tail call expression is not allowed here
diff --git a/deps/v8/test/message/syntactic-tail-call-in-binop-rhs.out b/deps/v8/test/message/syntactic-tail-call-in-binop-rhs.out
deleted file mode 100644
index 207c526e2f..0000000000
--- a/deps/v8/test/message/syntactic-tail-call-in-binop-rhs.out
+++ /dev/null
@@ -1,4 +0,0 @@
-*%(basename)s:13: SyntaxError: Tail call expression is not allowed here
- return b + continue f() ;
- ^^^^^^^^^^^^
-SyntaxError: Tail call expression is not allowed here
diff --git a/deps/v8/test/message/syntactic-tail-call-in-comma.out b/deps/v8/test/message/syntactic-tail-call-in-comma.out
deleted file mode 100644
index c4ecc28c97..0000000000
--- a/deps/v8/test/message/syntactic-tail-call-in-comma.out
+++ /dev/null
@@ -1,4 +0,0 @@
-*%(basename)s:13: SyntaxError: Tail call expression is not allowed here
- return 1, 2, 3, continue f() , 4 ;
- ^^^^^^^^^^^^
-SyntaxError: Tail call expression is not allowed here
diff --git a/deps/v8/test/message/syntactic-tail-call-in-extends.out b/deps/v8/test/message/syntactic-tail-call-in-extends.out
deleted file mode 100644
index f54155d2b5..0000000000
--- a/deps/v8/test/message/syntactic-tail-call-in-extends.out
+++ /dev/null
@@ -1,4 +0,0 @@
-*%(basename)s:9: SyntaxError: Tail call expression is not allowed here
- return class A extends continue f() {};
- ^^^^^^^^^^^^
-SyntaxError: Tail call expression is not allowed here
diff --git a/deps/v8/test/message/syntactic-tail-call-in-for-in.out b/deps/v8/test/message/syntactic-tail-call-in-for-in.out
deleted file mode 100644
index 1bf52c48cb..0000000000
--- a/deps/v8/test/message/syntactic-tail-call-in-for-in.out
+++ /dev/null
@@ -1,4 +0,0 @@
-*%(basename)s:14: SyntaxError: Tail call expression in for-in/of body
- return continue f() ;
- ^^^^^^^^^^^^
-SyntaxError: Tail call expression in for-in/of body
diff --git a/deps/v8/test/message/syntactic-tail-call-in-for-of.js b/deps/v8/test/message/syntactic-tail-call-in-for-of.js
deleted file mode 100644
index 7cd761f38e..0000000000
--- a/deps/v8/test/message/syntactic-tail-call-in-for-of.js
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-explicit-tailcalls
-"use strict";
-
-function f() {
- return 1;
-}
-
-function g() {
- for (var v of [1, 2, 3]) {
- return continue f() ;
- }
-}
diff --git a/deps/v8/test/message/syntactic-tail-call-in-for-of.out b/deps/v8/test/message/syntactic-tail-call-in-for-of.out
deleted file mode 100644
index 1bf52c48cb..0000000000
--- a/deps/v8/test/message/syntactic-tail-call-in-for-of.out
+++ /dev/null
@@ -1,4 +0,0 @@
-*%(basename)s:14: SyntaxError: Tail call expression in for-in/of body
- return continue f() ;
- ^^^^^^^^^^^^
-SyntaxError: Tail call expression in for-in/of body
diff --git a/deps/v8/test/message/syntactic-tail-call-in-logical-and.out b/deps/v8/test/message/syntactic-tail-call-in-logical-and.out
deleted file mode 100644
index c400f74628..0000000000
--- a/deps/v8/test/message/syntactic-tail-call-in-logical-and.out
+++ /dev/null
@@ -1,4 +0,0 @@
-*%(basename)s:13: SyntaxError: Tail call expression is not allowed here
- return continue f() && a ;
- ^^^^^^^^^^^^
-SyntaxError: Tail call expression is not allowed here
diff --git a/deps/v8/test/message/syntactic-tail-call-in-logical-or.js b/deps/v8/test/message/syntactic-tail-call-in-logical-or.js
deleted file mode 100644
index 6829bc629a..0000000000
--- a/deps/v8/test/message/syntactic-tail-call-in-logical-or.js
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-explicit-tailcalls
-"use strict";
-
-function f() {
- return 1;
-}
-
-function g() {
- return continue f() || a ;
-}
diff --git a/deps/v8/test/message/syntactic-tail-call-in-logical-or.out b/deps/v8/test/message/syntactic-tail-call-in-logical-or.out
deleted file mode 100644
index 4ced76118a..0000000000
--- a/deps/v8/test/message/syntactic-tail-call-in-logical-or.out
+++ /dev/null
@@ -1,4 +0,0 @@
-*%(basename)s:13: SyntaxError: Tail call expression is not allowed here
- return continue f() || a ;
- ^^^^^^^^^^^^
-SyntaxError: Tail call expression is not allowed here
diff --git a/deps/v8/test/message/syntactic-tail-call-in-subclass.out b/deps/v8/test/message/syntactic-tail-call-in-subclass.out
deleted file mode 100644
index fff26cc59b..0000000000
--- a/deps/v8/test/message/syntactic-tail-call-in-subclass.out
+++ /dev/null
@@ -1,4 +0,0 @@
-*%(basename)s:12: SyntaxError: Tail call expression is not allowed here
- return continue f() ;
- ^^^^^^^^^^^^
-SyntaxError: Tail call expression is not allowed here
diff --git a/deps/v8/test/message/syntactic-tail-call-in-try-catch-finally.out b/deps/v8/test/message/syntactic-tail-call-in-try-catch-finally.out
deleted file mode 100644
index b488c15af2..0000000000
--- a/deps/v8/test/message/syntactic-tail-call-in-try-catch-finally.out
+++ /dev/null
@@ -1,4 +0,0 @@
-*%(basename)s:16: SyntaxError: Tail call expression in catch block when finally block is also present
- return continue f() ;
- ^^^^^^^^^^^^
-SyntaxError: Tail call expression in catch block when finally block is also present
diff --git a/deps/v8/test/message/syntactic-tail-call-in-try-try-catch-finally.out b/deps/v8/test/message/syntactic-tail-call-in-try-try-catch-finally.out
deleted file mode 100644
index bfc2692a27..0000000000
--- a/deps/v8/test/message/syntactic-tail-call-in-try-try-catch-finally.out
+++ /dev/null
@@ -1,4 +0,0 @@
-*%(basename)s:17: SyntaxError: Tail call expression in try block
- return continue f() ;
- ^^^^^^^^^^^^
-SyntaxError: Tail call expression in try block
diff --git a/deps/v8/test/message/syntactic-tail-call-in-try.js b/deps/v8/test/message/syntactic-tail-call-in-try.js
deleted file mode 100644
index 71662db877..0000000000
--- a/deps/v8/test/message/syntactic-tail-call-in-try.js
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-explicit-tailcalls
-"use strict";
-
-function f() {
- return 1;
-}
-
-function g() {
- try {
- return continue f() ;
- } catch(e) {
- }
-}
diff --git a/deps/v8/test/message/syntactic-tail-call-in-try.out b/deps/v8/test/message/syntactic-tail-call-in-try.out
deleted file mode 100644
index ed0b15cc61..0000000000
--- a/deps/v8/test/message/syntactic-tail-call-in-try.out
+++ /dev/null
@@ -1,4 +0,0 @@
-*%(basename)s:14: SyntaxError: Tail call expression in try block
- return continue f() ;
- ^^^^^^^^^^^^
-SyntaxError: Tail call expression in try block
diff --git a/deps/v8/test/message/syntactic-tail-call-inside-member-expr.js b/deps/v8/test/message/syntactic-tail-call-inside-member-expr.js
deleted file mode 100644
index 9b85dd42bb..0000000000
--- a/deps/v8/test/message/syntactic-tail-call-inside-member-expr.js
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-explicit-tailcalls
-"use strict";
-
-function f() {
- return 1;
-}
-
-function g() {
- return (continue f(1)) (2) ;
-}
diff --git a/deps/v8/test/message/syntactic-tail-call-inside-member-expr.out b/deps/v8/test/message/syntactic-tail-call-inside-member-expr.out
deleted file mode 100644
index 10fd54db5d..0000000000
--- a/deps/v8/test/message/syntactic-tail-call-inside-member-expr.out
+++ /dev/null
@@ -1,4 +0,0 @@
-*%(basename)s:13: SyntaxError: Tail call expression is not allowed here
- return (continue f(1)) (2) ;
- ^^^^^^^^^^^^^^
-SyntaxError: Tail call expression is not allowed here
diff --git a/deps/v8/test/message/syntactic-tail-call-of-eval.out b/deps/v8/test/message/syntactic-tail-call-of-eval.out
deleted file mode 100644
index 06eeb78baf..0000000000
--- a/deps/v8/test/message/syntactic-tail-call-of-eval.out
+++ /dev/null
@@ -1,4 +0,0 @@
-*%(basename)s:8: SyntaxError: Tail call of a direct eval is not allowed
- return continue eval ("f()") ;
- ^^^^^^^^^^^^^
-SyntaxError: Tail call of a direct eval is not allowed
diff --git a/deps/v8/test/message/syntactic-tail-call-of-identifier.out b/deps/v8/test/message/syntactic-tail-call-of-identifier.out
deleted file mode 100644
index 393bbc657c..0000000000
--- a/deps/v8/test/message/syntactic-tail-call-of-identifier.out
+++ /dev/null
@@ -1,4 +0,0 @@
-*%(basename)s:9: SyntaxError: Unexpected expression inside tail call
- return continue x ;
- ^
-SyntaxError: Unexpected expression inside tail call
diff --git a/deps/v8/test/message/syntactic-tail-call-of-new.out b/deps/v8/test/message/syntactic-tail-call-of-new.out
deleted file mode 100644
index 954e1ca0ec..0000000000
--- a/deps/v8/test/message/syntactic-tail-call-of-new.out
+++ /dev/null
@@ -1,4 +0,0 @@
-*%(basename)s:12: SyntaxError: Unexpected expression inside tail call
- return continue new f() ;
- ^^^^^^^
-SyntaxError: Unexpected expression inside tail call
diff --git a/deps/v8/test/message/syntactic-tail-call-sloppy.out b/deps/v8/test/message/syntactic-tail-call-sloppy.out
deleted file mode 100644
index 74d9d53bb4..0000000000
--- a/deps/v8/test/message/syntactic-tail-call-sloppy.out
+++ /dev/null
@@ -1,4 +0,0 @@
-*%(basename)s:8: SyntaxError: Tail call expressions are not allowed in non-strict mode
- return continue f() ;
- ^^^^^^^^^^^^^
-SyntaxError: Tail call expressions are not allowed in non-strict mode
diff --git a/deps/v8/test/message/syntactic-tail-call-without-return.out b/deps/v8/test/message/syntactic-tail-call-without-return.out
deleted file mode 100644
index 0508fc3378..0000000000
--- a/deps/v8/test/message/syntactic-tail-call-without-return.out
+++ /dev/null
@@ -1,4 +0,0 @@
-*%(basename)s:13: SyntaxError: Tail call expression is not allowed here
- var x = continue f() ;
- ^^^^^^^^^^^^^
-SyntaxError: Tail call expression is not allowed here
diff --git a/deps/v8/test/mjsunit/array-indexing-receiver.js b/deps/v8/test/mjsunit/array-indexing-receiver.js
index d5f5a7692d..5d1dbf3e56 100644
--- a/deps/v8/test/mjsunit/array-indexing-receiver.js
+++ b/deps/v8/test/mjsunit/array-indexing-receiver.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --expose-gc
// Ensure `Array.prototype.indexOf` functions correctly for numerous elements
// kinds, and various exotic receiver types,
@@ -107,7 +107,7 @@ var kTests = {
DICTIONARY_ELEMENTS() {
var array = [];
- Object.defineProperty(array, 4, { get() { return NaN; } });
+ Object.defineProperty(array, 4, { get() { gc(); return NaN; } });
Object.defineProperty(array, 7, { value: Function });
assertTrue(%HasDictionaryElements(array));
@@ -226,7 +226,7 @@ var kTests = {
DICTIONARY_ELEMENTS() {
var object = { length: 8 };
- Object.defineProperty(object, 4, { get() { return NaN; } });
+ Object.defineProperty(object, 4, { get() { gc(); return NaN; } });
Object.defineProperty(object, 7, { value: Function });
assertTrue(%HasDictionaryElements(object));
@@ -244,8 +244,10 @@ var kTests = {
return {
__proto__: {},
get 0() {
+ gc();
this.__proto__.__proto__ = {
get 1() {
+ gc();
this[2] = "c";
return "b";
}
@@ -313,7 +315,7 @@ var kTests = {
SLOW_SLOPPY_ARGUMENTS_ELEMENTS() {
var args = (function(a, a) { return arguments; })("foo", NaN, "bar");
- Object.defineProperty(args, 3, { get() { return "silver"; } });
+ Object.defineProperty(args, 3, { get() { gc(); return "silver"; } });
Object.defineProperty(args, "length", { value: 4 });
assertTrue(%HasSloppyArgumentsElements(args));
diff --git a/deps/v8/test/mjsunit/compiler/dead-string-char-from-code.js b/deps/v8/test/mjsunit/compiler/dead-string-char-from-code.js
deleted file mode 100644
index 1de5d9e44b..0000000000
--- a/deps/v8/test/mjsunit/compiler/dead-string-char-from-code.js
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-
-function dead1(a, b) {
- var x = %_StringCharFromCode(a);
- return a; // x is dead code
-}
-
-function dead2(a, b) {
- var x = %_StringCharFromCode(a);
- var y = %_StringCharFromCode(b);
- return a; // x and y are both dead
-}
-
-function dead3(a, b) {
- a = a ? 11 : 12;
- b = b ? 13 : 14;
- var x = %_StringCharFromCode(a);
- var y = %_StringCharFromCode(b);
- return a; // x and y are both dead
-}
-
-function test() {
- assertEquals(33, dead1(33, 32));
- assertEquals(33, dead2(33, 32));
- assertEquals(11, dead3(33, 32));
-
- assertEquals(31, dead1(31, 30));
- assertEquals(31, dead2(31, 30));
- assertEquals(11, dead3(31, 32));
-
- assertEquals(0, dead1(0, 30));
- assertEquals(0, dead2(0, 30));
- assertEquals(12, dead3(0, 32));
-
- assertEquals(true, dead1(true, 0));
- assertEquals(true, dead2(true, 0));
- assertEquals(11, dead3(true, 0));
-
- assertEquals("true", dead1("true", 0));
- assertEquals("true", dead2("true", 0));
- assertEquals(11, dead3("true", 0));
-}
-
-test();
-test();
-%OptimizeFunctionOnNextCall(dead1);
-%OptimizeFunctionOnNextCall(dead2);
-%OptimizeFunctionOnNextCall(dead3);
-test();
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-materialize.js b/deps/v8/test/mjsunit/compiler/escape-analysis-materialize.js
new file mode 100644
index 0000000000..e72797d823
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-materialize.js
@@ -0,0 +1,29 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo --turbo-escape
+
+(function TestMaterializeArray() {
+ function f() {
+ var a = [1,2,3];
+ %_DeoptimizeNow();
+ return a.length;
+ }
+ assertEquals(3, f());
+ assertEquals(3, f());
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(3, f());
+})();
+
+(function TestMaterializeFunction() {
+ function g() {
+ function fun(a, b) {}
+ %_DeoptimizeNow();
+ return fun.length;
+ }
+ assertEquals(2, g());
+ assertEquals(2, g());
+ %OptimizeFunctionOnNextCall(g);
+ assertEquals(2, g());
+})();
diff --git a/deps/v8/test/mjsunit/compiler/inline-exception-1.js b/deps/v8/test/mjsunit/compiler/inline-exception-1.js
new file mode 100644
index 0000000000..ac259afad7
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/inline-exception-1.js
@@ -0,0 +1,2219 @@
+// Shard 1.
+
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo --no-always-opt
+
+// This test file was generated by tools/gen-inlining-tests.py .
+
+// Global variables
+var deopt = undefined; // either true or false
+var counter = 0;
+
+function resetState() {
+ counter = 0;
+}
+
+function warmUp(f) {
+ try {
+ f();
+ } catch (ex) {
+ // ok
+ }
+ try {
+ f();
+ } catch (ex) {
+ // ok
+ }
+}
+
+function resetOptAndAssertResultEquals(expected, f) {
+ warmUp(f);
+ resetState();
+ // %DebugPrint(f);
+ eval("'dont optimize this function itself please, but do optimize f'");
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(expected, f());
+}
+
+function resetOptAndAssertThrowsWith(expected, f) {
+ warmUp(f);
+ resetState();
+ // %DebugPrint(f);
+ eval("'dont optimize this function itself please, but do optimize f'");
+ %OptimizeFunctionOnNextCall(f);
+ try {
+ var result = f();
+ fail("resetOptAndAssertThrowsWith",
+ "exception: " + expected,
+ "result: " + result);
+ } catch (ex) {
+ assertEquals(expected, ex);
+ }
+}
+
+function increaseAndReturn15() {
+ if (deopt) %DeoptimizeFunction(f);
+ counter++;
+ return 15;
+}
+
+function increaseAndThrow42() {
+ if (deopt) %DeoptimizeFunction(f);
+ counter++;
+ throw 42;
+}
+
+function increaseAndReturn15_noopt_inner() {
+ if (deopt) %DeoptimizeFunction(f);
+ counter++;
+ return 15;
+}
+
+%NeverOptimizeFunction(increaseAndReturn15_noopt_inner);
+
+function increaseAndThrow42_noopt_inner() {
+ if (deopt) %DeoptimizeFunction(f);
+ counter++;
+ throw 42;
+}
+
+%NeverOptimizeFunction(increaseAndThrow42_noopt_inner);
+
+// Alternative 1
+
+function returnOrThrow(doReturn) {
+ if (doReturn) {
+ return increaseAndReturn15();
+ } else {
+ return increaseAndThrow42();
+ }
+}
+
+// Alternative 2
+
+function increaseAndReturn15_calls_noopt() {
+ return increaseAndReturn15_noopt_inner();
+}
+
+function increaseAndThrow42_calls_noopt() {
+ return increaseAndThrow42_noopt_inner();
+}
+
+// Alternative 3.
+// When passed either {increaseAndReturn15} or {increaseAndThrow42}, it acts
+// as the other one.
+function invertFunctionCall(f) {
+ var result;
+ try {
+ result = f();
+ } catch (ex) {
+ return ex - 27;
+ }
+ throw result + 27;
+}
+
+// Alternative 4: constructor
+function increaseAndStore15Constructor() {
+ if (deopt) %DeoptimizeFunction(f);
+ ++counter;
+ this.x = 15;
+}
+
+function increaseAndThrow42Constructor() {
+ if (deopt) %DeoptimizeFunction(f);
+ ++counter;
+ this.x = 42;
+ throw this.x;
+}
+
+// Alternative 5: property
+var magic = {};
+Object.defineProperty(magic, 'prop', {
+ get: function () {
+ if (deopt) %DeoptimizeFunction(f);
+ return 15 + 0 * ++counter;
+ },
+
+ set: function(x) {
+ // argument should be 37
+ if (deopt) %DeoptimizeFunction(f);
+ counter -= 36 - x; // increments counter
+ throw 42;
+ }
+})
+
+// Generate type feedback.
+
+assertEquals(15, increaseAndReturn15_calls_noopt());
+assertThrowsEquals(function() { return increaseAndThrow42_noopt_inner() }, 42);
+
+assertEquals(15, (new increaseAndStore15Constructor()).x);
+assertThrowsEquals(function() {
+ return (new increaseAndThrow42Constructor()).x;
+ },
+ 42);
+
+function runThisShard() {
+
+ // Variant flags: [tryReturns, doFinally]
+
+ f = function f_______r______f____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ counter++;
+ } finally {
+ counter++;
+ local += 2;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [tryReturns, doFinally, finallyThrows]
+
+ f = function f_______r______f_t__ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ counter++;
+ } finally {
+ counter++;
+ throw 25;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(25, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [tryReturns, doFinally, finallyReturns]
+
+ f = function f_______r______fr___ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ counter++;
+ } finally {
+ counter++;
+ return 3 + local;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(891, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [tryReturns, doCatch]
+
+ f = function f_______r__c________ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [tryReturns, doCatch, deopt]
+
+ f = function f_______r__c_______d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [tryReturns, doCatch, doFinally]
+
+ f = function f_______r__c___f____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ } finally {
+ counter++;
+ local += 2;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [tryReturns, doCatch, doFinally, finallyThrows]
+
+ f = function f_______r__c___f_t__ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ } finally {
+ counter++;
+ throw 25;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(25, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [tryReturns, doCatch, doFinally, finallyReturns]
+
+ f = function f_______r__c___fr___ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ } finally {
+ counter++;
+ return 3 + local;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(891, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [tryReturns, doCatch, catchThrows]
+
+ f = function f_______r__c__t_____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [tryReturns, doCatch, catchThrows, deopt]
+
+ f = function f_______r__c__t____d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [tryReturns, doCatch, catchThrows, doFinally]
+
+ f = function f_______r__c__tf____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ } finally {
+ counter++;
+ local += 2;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [tryReturns, doCatch, catchThrows, doFinally,
+ // finallyThrows]
+
+ f = function f_______r__c__tf_t__ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ } finally {
+ counter++;
+ throw 25;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(25, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [tryReturns, doCatch, catchThrows, doFinally,
+ // finallyReturns]
+
+ f = function f_______r__c__tfr___ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ } finally {
+ counter++;
+ return 3 + local;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(891, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [tryReturns, doCatch, catchReturns]
+
+ f = function f_______r__cr_______ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [tryReturns, doCatch, catchReturns, deopt]
+
+ f = function f_______r__cr______d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [tryReturns, doCatch, catchReturns, doFinally]
+
+ f = function f_______r__cr__f____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ } finally {
+ counter++;
+ local += 2;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [tryReturns, doCatch, catchReturns, doFinally,
+ // finallyThrows]
+
+ f = function f_______r__cr__f_t__ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ } finally {
+ counter++;
+ throw 25;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(25, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [tryReturns, doCatch, catchReturns, doFinally,
+ // finallyReturns]
+
+ f = function f_______r__cr__fr___ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ } finally {
+ counter++;
+ return 3 + local;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(891, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [tryThrows, doFinally]
+
+ f = function f______t_______f____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ counter++;
+ } finally {
+ counter++;
+ local += 2;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(42, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [tryThrows, doFinally, finallyThrows]
+
+ f = function f______t_______f_t__ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ counter++;
+ } finally {
+ counter++;
+ throw 25;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(25, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [tryThrows, doFinally, finallyReturns]
+
+ f = function f______t_______fr___ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ counter++;
+ } finally {
+ counter++;
+ return 3 + local;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(891, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [tryThrows, doCatch]
+
+ f = function f______t___c________ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [tryThrows, doCatch, deopt]
+
+ f = function f______t___c_______d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [tryThrows, doCatch, doFinally]
+
+ f = function f______t___c___f____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ } finally {
+ counter++;
+ local += 2;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(7, counter);
+
+ // Variant flags: [tryThrows, doCatch, doFinally, finallyThrows]
+
+ f = function f______t___c___f_t__ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ } finally {
+ counter++;
+ throw 25;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(25, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [tryThrows, doCatch, doFinally, finallyReturns]
+
+ f = function f______t___c___fr___ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ } finally {
+ counter++;
+ return 3 + local;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(891, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [tryThrows, doCatch, catchThrows]
+
+ f = function f______t___c__t_____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [tryThrows, doCatch, catchThrows, deopt]
+
+ f = function f______t___c__t____d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [tryThrows, doCatch, catchThrows, doFinally]
+
+ f = function f______t___c__tf____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ } finally {
+ counter++;
+ local += 2;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(44, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [tryThrows, doCatch, catchThrows, doFinally,
+ // finallyThrows]
+
+ f = function f______t___c__tf_t__ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ } finally {
+ counter++;
+ throw 25;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(25, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [tryThrows, doCatch, catchThrows, doFinally,
+ // finallyReturns]
+
+ f = function f______t___c__tfr___ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ } finally {
+ counter++;
+ return 3 + local;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(891, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [tryThrows, doCatch, catchReturns]
+
+ f = function f______t___cr_______ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [tryThrows, doCatch, catchReturns, deopt]
+
+ f = function f______t___cr______d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [tryThrows, doCatch, catchReturns, doFinally]
+
+ f = function f______t___cr__f____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ } finally {
+ counter++;
+ local += 2;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(44, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [tryThrows, doCatch, catchReturns, doFinally,
+ // finallyThrows]
+
+ f = function f______t___cr__f_t__ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ } finally {
+ counter++;
+ throw 25;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(25, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [tryThrows, doCatch, catchReturns, doFinally,
+ // finallyReturns]
+
+ f = function f______t___cr__fr___ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ } finally {
+ counter++;
+ return 3 + local;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(891, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [tryThrows, tryReturns, doFinally]
+
+ f = function f______tr______f____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ return 4 + increaseAndReturn15();
+ counter++;
+ } finally {
+ counter++;
+ local += 2;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(42, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [tryThrows, tryReturns, doFinally, finallyThrows]
+
+ f = function f______tr______f_t__ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ return 4 + increaseAndReturn15();
+ counter++;
+ } finally {
+ counter++;
+ throw 25;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(25, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [tryThrows, tryReturns, doFinally, finallyReturns]
+
+ f = function f______tr______fr___ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ return 4 + increaseAndReturn15();
+ counter++;
+ } finally {
+ counter++;
+ return 3 + local;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(891, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [tryThrows, tryReturns, doCatch]
+
+ f = function f______tr__c________ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [tryThrows, tryReturns, doCatch, doFinally]
+
+ f = function f______tr__c___f____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ } finally {
+ counter++;
+ local += 2;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(7, counter);
+
+ // Variant flags: [tryThrows, tryReturns, doCatch, doFinally,
+ // finallyThrows]
+
+ f = function f______tr__c___f_t__ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ } finally {
+ counter++;
+ throw 25;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(25, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [tryThrows, tryReturns, doCatch, doFinally,
+ // finallyReturns]
+
+ f = function f______tr__c___fr___ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ } finally {
+ counter++;
+ return 3 + local;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(891, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [tryThrows, tryReturns, doCatch, catchThrows]
+
+ f = function f______tr__c__t_____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [tryThrows, tryReturns, doCatch, catchThrows,
+ // doFinally]
+
+ f = function f______tr__c__tf____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ } finally {
+ counter++;
+ local += 2;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(44, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [tryThrows, tryReturns, doCatch, catchThrows,
+ // doFinally, finallyThrows]
+
+ f = function f______tr__c__tf_t__ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ } finally {
+ counter++;
+ throw 25;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(25, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [tryThrows, tryReturns, doCatch, catchThrows,
+ // doFinally, finallyReturns]
+
+ f = function f______tr__c__tfr___ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ } finally {
+ counter++;
+ return 3 + local;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(891, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [tryThrows, tryReturns, doCatch, catchReturns]
+
+ f = function f______tr__cr_______ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [tryThrows, tryReturns, doCatch, catchReturns,
+ // doFinally]
+
+ f = function f______tr__cr__f____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ } finally {
+ counter++;
+ local += 2;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(44, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [tryThrows, tryReturns, doCatch, catchReturns,
+ // doFinally, finallyThrows]
+
+ f = function f______tr__cr__f_t__ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ } finally {
+ counter++;
+ throw 25;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(25, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [tryThrows, tryReturns, doCatch, catchReturns,
+ // doFinally, finallyReturns]
+
+ f = function f______tr__cr__fr___ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42();
+ return 4 + increaseAndReturn15();
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ } finally {
+ counter++;
+ return 3 + local;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(891, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [tryThrows, tryReturns, tryFirstReturns,
+ // doFinally]
+
+ f = function f______trf_____f____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ return 4 + increaseAndThrow42();
+ counter++;
+ } finally {
+ counter++;
+ local += 2;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [tryThrows, tryReturns, tryFirstReturns,
+ // doFinally, finallyThrows]
+
+ f = function f______trf_____f_t__ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ return 4 + increaseAndThrow42();
+ counter++;
+ } finally {
+ counter++;
+ throw 25;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(25, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [tryThrows, tryReturns, tryFirstReturns,
+ // doFinally, finallyReturns]
+
+ f = function f______trf_____fr___ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ return 4 + increaseAndThrow42();
+ counter++;
+ } finally {
+ counter++;
+ return 3 + local;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(891, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [tryThrows, tryReturns, tryFirstReturns, doCatch]
+
+ f = function f______trf_c________ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [tryThrows, tryReturns, tryFirstReturns, doCatch,
+ // doFinally]
+
+ f = function f______trf_c___f____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ } finally {
+ counter++;
+ local += 2;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [tryThrows, tryReturns, tryFirstReturns, doCatch,
+ // doFinally, finallyThrows]
+
+ f = function f______trf_c___f_t__ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ } finally {
+ counter++;
+ throw 25;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(25, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [tryThrows, tryReturns, tryFirstReturns, doCatch,
+ // doFinally, finallyReturns]
+
+ f = function f______trf_c___fr___ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ } finally {
+ counter++;
+ return 3 + local;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(891, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [tryThrows, tryReturns, tryFirstReturns, doCatch,
+ // catchThrows]
+
+ f = function f______trf_c__t_____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [tryThrows, tryReturns, tryFirstReturns, doCatch,
+ // catchThrows, doFinally]
+
+ f = function f______trf_c__tf____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ } finally {
+ counter++;
+ local += 2;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [tryThrows, tryReturns, tryFirstReturns, doCatch,
+ // catchThrows, doFinally, finallyThrows]
+
+ f = function f______trf_c__tf_t__ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ } finally {
+ counter++;
+ throw 25;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(25, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [tryThrows, tryReturns, tryFirstReturns, doCatch,
+ // catchThrows, doFinally, finallyReturns]
+
+ f = function f______trf_c__tfr___ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ } finally {
+ counter++;
+ return 3 + local;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(891, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [tryThrows, tryReturns, tryFirstReturns, doCatch,
+ // catchReturns]
+
+ f = function f______trf_cr_______ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [tryThrows, tryReturns, tryFirstReturns, doCatch,
+ // catchReturns, doFinally]
+
+ f = function f______trf_cr__f____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ } finally {
+ counter++;
+ local += 2;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [tryThrows, tryReturns, tryFirstReturns, doCatch,
+ // catchReturns, doFinally, finallyThrows]
+
+ f = function f______trf_cr__f_t__ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ } finally {
+ counter++;
+ throw 25;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(25, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [tryThrows, tryReturns, tryFirstReturns, doCatch,
+ // catchReturns, doFinally, finallyReturns]
+
+ f = function f______trf_cr__fr___ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15();
+ return 4 + increaseAndThrow42();
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ } finally {
+ counter++;
+ return 3 + local;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(891, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn1, tryReturns, doCatch]
+
+ f = function f_____1_r__c________ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + returnOrThrow(true);
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn1, tryReturns, doCatch, deopt]
+
+ f = function f_____1_r__c_______d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + returnOrThrow(true);
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn1, tryReturns, doCatch, catchThrows]
+
+ f = function f_____1_r__c__t_____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + returnOrThrow(true);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn1, tryReturns, doCatch, catchThrows,
+ // deopt]
+
+ f = function f_____1_r__c__t____d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + returnOrThrow(true);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn1, tryReturns, doCatch,
+ // catchReturns]
+
+ f = function f_____1_r__cr_______ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + returnOrThrow(true);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn1, tryReturns, doCatch,
+ // catchReturns, deopt]
+
+ f = function f_____1_r__cr______d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + returnOrThrow(true);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn1, tryThrows, doCatch]
+
+ f = function f_____1t___c________ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + returnOrThrow(false);
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [alternativeFn1, tryThrows, doCatch, deopt]
+
+ f = function f_____1t___c_______d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + returnOrThrow(false);
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [alternativeFn1, tryThrows, doCatch, catchThrows]
+
+ f = function f_____1t___c__t_____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + returnOrThrow(false);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn1, tryThrows, doCatch, catchThrows,
+ // deopt]
+
+ f = function f_____1t___c__t____d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + returnOrThrow(false);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn1, tryThrows, doCatch, catchReturns]
+
+ f = function f_____1t___cr_______ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + returnOrThrow(false);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn1, tryThrows, doCatch, catchReturns,
+ // deopt]
+
+ f = function f_____1t___cr______d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + returnOrThrow(false);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn2, tryReturns, doCatch,
+ // catchWithLocal, endReturnLocal, deopt]
+
+ f = function f____2__r__c_l____ld () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + increaseAndReturn15_calls_noopt();
+ counter++;
+ } catch (ex) {
+ counter++;
+ local += ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn2, tryReturns, tryResultToLocal,
+ // doCatch, endReturnLocal, deopt]
+
+ f = function f____2__r_lc______ld () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + increaseAndReturn15_calls_noopt();
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(912, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn2, tryReturns, tryResultToLocal,
+ // doCatch, catchWithLocal, endReturnLocal, deopt]
+
+ f = function f____2__r_lc_l____ld () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + increaseAndReturn15_calls_noopt();
+ counter++;
+ } catch (ex) {
+ counter++;
+ local += ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(912, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn2, tryThrows, doCatch,
+ // catchWithLocal, endReturnLocal, deopt]
+
+ f = function f____2_t___c_l____ld () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + increaseAndThrow42_calls_noopt();
+ counter++;
+ } catch (ex) {
+ counter++;
+ local += ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(935, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [alternativeFn2, tryThrows, tryResultToLocal,
+ // doCatch, endReturnLocal, deopt]
+
+ f = function f____2_t__lc______ld () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + increaseAndThrow42_calls_noopt();
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(893, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [alternativeFn2, tryThrows, tryResultToLocal,
+ // doCatch, catchWithLocal, endReturnLocal, deopt]
+
+ f = function f____2_t__lc_l____ld () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + increaseAndThrow42_calls_noopt();
+ counter++;
+ } catch (ex) {
+ counter++;
+ local += ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(935, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, doCatch]
+
+ f = function f___3___r__c________ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, doCatch, deopt]
+
+ f = function f___3___r__c_______d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, doCatch, catchThrows]
+
+ f = function f___3___r__c__t_____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, doCatch, catchThrows,
+ // deopt]
+
+ f = function f___3___r__c__t____d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, doCatch,
+ // catchWithLocal]
+
+ f = function f___3___r__c_l______ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ local += ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, doCatch,
+ // catchWithLocal, deopt]
+
+ f = function f___3___r__c_l_____d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ local += ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, doCatch,
+ // catchWithLocal, endReturnLocal]
+
+ f = function f___3___r__c_l____l_ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ local += ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, doCatch,
+ // catchWithLocal, endReturnLocal, deopt]
+
+ f = function f___3___r__c_l____ld () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ local += ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, doCatch,
+ // catchWithLocal, catchThrows]
+
+ f = function f___3___r__c_lt_____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, doCatch,
+ // catchWithLocal, catchThrows, deopt]
+
+ f = function f___3___r__c_lt____d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, doCatch,
+ // catchWithLocal, catchThrows, endReturnLocal]
+
+ f = function f___3___r__c_lt___l_ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, doCatch,
+ // catchWithLocal, catchThrows, endReturnLocal, deopt]
+
+ f = function f___3___r__c_lt___ld () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, doCatch,
+ // catchReturns]
+
+ f = function f___3___r__cr_______ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+}
+%NeverOptimizeFunction(runThisShard);
+
+// 97 tests in this shard.
+// 97 tests up to here.
+
+runThisShard();
diff --git a/deps/v8/test/mjsunit/compiler/inline-exception-2.js b/deps/v8/test/mjsunit/compiler/inline-exception-2.js
new file mode 100644
index 0000000000..89523cc6b2
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/inline-exception-2.js
@@ -0,0 +1,2063 @@
+// Shard 2.
+
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo --no-always-opt
+
+// This test file was generated by tools/gen-inlining-tests.py .
+
+// Global variables
+var deopt = undefined; // either true or false
+var counter = 0;
+
+function resetState() {
+ counter = 0;
+}
+
+function warmUp(f) {
+ try {
+ f();
+ } catch (ex) {
+ // ok
+ }
+ try {
+ f();
+ } catch (ex) {
+ // ok
+ }
+}
+
+function resetOptAndAssertResultEquals(expected, f) {
+ warmUp(f);
+ resetState();
+ // %DebugPrint(f);
+ eval("'dont optimize this function itself please, but do optimize f'");
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(expected, f());
+}
+
+function resetOptAndAssertThrowsWith(expected, f) {
+ warmUp(f);
+ resetState();
+ // %DebugPrint(f);
+ eval("'dont optimize this function itself please, but do optimize f'");
+ %OptimizeFunctionOnNextCall(f);
+ try {
+ var result = f();
+ fail("resetOptAndAssertThrowsWith",
+ "exception: " + expected,
+ "result: " + result);
+ } catch (ex) {
+ assertEquals(expected, ex);
+ }
+}
+
+function increaseAndReturn15() {
+ if (deopt) %DeoptimizeFunction(f);
+ counter++;
+ return 15;
+}
+
+function increaseAndThrow42() {
+ if (deopt) %DeoptimizeFunction(f);
+ counter++;
+ throw 42;
+}
+
+function increaseAndReturn15_noopt_inner() {
+ if (deopt) %DeoptimizeFunction(f);
+ counter++;
+ return 15;
+}
+
+%NeverOptimizeFunction(increaseAndReturn15_noopt_inner);
+
+function increaseAndThrow42_noopt_inner() {
+ if (deopt) %DeoptimizeFunction(f);
+ counter++;
+ throw 42;
+}
+
+%NeverOptimizeFunction(increaseAndThrow42_noopt_inner);
+
+// Alternative 1
+
+function returnOrThrow(doReturn) {
+ if (doReturn) {
+ return increaseAndReturn15();
+ } else {
+ return increaseAndThrow42();
+ }
+}
+
+// Alternative 2
+
+function increaseAndReturn15_calls_noopt() {
+ return increaseAndReturn15_noopt_inner();
+}
+
+function increaseAndThrow42_calls_noopt() {
+ return increaseAndThrow42_noopt_inner();
+}
+
+// Alternative 3.
+// When passed either {increaseAndReturn15} or {increaseAndThrow42}, it acts
+// as the other one.
+function invertFunctionCall(f) {
+ var result;
+ try {
+ result = f();
+ } catch (ex) {
+ return ex - 27;
+ }
+ throw result + 27;
+}
+
+// Alternative 4: constructor
+function increaseAndStore15Constructor() {
+ if (deopt) %DeoptimizeFunction(f);
+ ++counter;
+ this.x = 15;
+}
+
+function increaseAndThrow42Constructor() {
+ if (deopt) %DeoptimizeFunction(f);
+ ++counter;
+ this.x = 42;
+ throw this.x;
+}
+
+// Alternative 5: property
+var magic = {};
+Object.defineProperty(magic, 'prop', {
+ get: function () {
+ if (deopt) %DeoptimizeFunction(f);
+ return 15 + 0 * ++counter;
+ },
+
+ set: function(x) {
+ // argument should be 37
+ if (deopt) %DeoptimizeFunction(f);
+ counter -= 36 - x; // increments counter
+ throw 42;
+ }
+})
+
+// Generate type feedback.
+
+assertEquals(15, increaseAndReturn15_calls_noopt());
+assertThrowsEquals(function() { return increaseAndThrow42_noopt_inner() }, 42);
+
+assertEquals(15, (new increaseAndStore15Constructor()).x);
+assertThrowsEquals(function() {
+ return (new increaseAndThrow42Constructor()).x;
+ },
+ 42);
+
+function runThisShard() {
+
+ // Variant flags: [alternativeFn3, tryReturns, doCatch,
+ // catchReturns, deopt]
+
+ f = function f___3___r__cr______d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, doCatch,
+ // catchReturns, catchWithLocal]
+
+ f = function f___3___r__crl______ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + local;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, doCatch,
+ // catchReturns, catchWithLocal, deopt]
+
+ f = function f___3___r__crl_____d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + local;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, doCatch,
+ // catchReturns, catchWithLocal, endReturnLocal]
+
+ f = function f___3___r__crl____l_ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + local;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, doCatch,
+ // catchReturns, catchWithLocal, endReturnLocal, deopt]
+
+ f = function f___3___r__crl____ld () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + local;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, tryResultToLocal,
+ // doCatch]
+
+ f = function f___3___r_lc________ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, tryResultToLocal,
+ // doCatch, deopt]
+
+ f = function f___3___r_lc_______d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, tryResultToLocal,
+ // doCatch, endReturnLocal]
+
+ f = function f___3___r_lc______l_ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(912, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, tryResultToLocal,
+ // doCatch, endReturnLocal, deopt]
+
+ f = function f___3___r_lc______ld () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(912, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, tryResultToLocal,
+ // doCatch, catchThrows]
+
+ f = function f___3___r_lc__t_____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, tryResultToLocal,
+ // doCatch, catchThrows, deopt]
+
+ f = function f___3___r_lc__t____d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, tryResultToLocal,
+ // doCatch, catchThrows, endReturnLocal]
+
+ f = function f___3___r_lc__t___l_ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(912, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, tryResultToLocal,
+ // doCatch, catchThrows, endReturnLocal, deopt]
+
+ f = function f___3___r_lc__t___ld () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(912, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, tryResultToLocal,
+ // doCatch, catchWithLocal]
+
+ f = function f___3___r_lc_l______ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ local += ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, tryResultToLocal,
+ // doCatch, catchWithLocal, deopt]
+
+ f = function f___3___r_lc_l_____d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ local += ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, tryResultToLocal,
+ // doCatch, catchWithLocal, endReturnLocal]
+
+ f = function f___3___r_lc_l____l_ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ local += ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(912, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, tryResultToLocal,
+ // doCatch, catchWithLocal, endReturnLocal, deopt]
+
+ f = function f___3___r_lc_l____ld () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ local += ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(912, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, tryResultToLocal,
+ // doCatch, catchWithLocal, catchThrows]
+
+ f = function f___3___r_lc_lt_____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, tryResultToLocal,
+ // doCatch, catchWithLocal, catchThrows, deopt]
+
+ f = function f___3___r_lc_lt____d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, tryResultToLocal,
+ // doCatch, catchWithLocal, catchThrows, endReturnLocal]
+
+ f = function f___3___r_lc_lt___l_ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(912, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, tryResultToLocal,
+ // doCatch, catchWithLocal, catchThrows, endReturnLocal, deopt]
+
+ f = function f___3___r_lc_lt___ld () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(912, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, tryResultToLocal,
+ // doCatch, catchReturns]
+
+ f = function f___3___r_lcr_______ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, tryResultToLocal,
+ // doCatch, catchReturns, deopt]
+
+ f = function f___3___r_lcr______d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, tryResultToLocal,
+ // doCatch, catchReturns, endReturnLocal]
+
+ f = function f___3___r_lcr_____l_ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(912, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, tryResultToLocal,
+ // doCatch, catchReturns, endReturnLocal, deopt]
+
+ f = function f___3___r_lcr_____ld () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(912, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, tryResultToLocal,
+ // doCatch, catchReturns, catchWithLocal]
+
+ f = function f___3___r_lcrl______ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + local;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, tryResultToLocal,
+ // doCatch, catchReturns, catchWithLocal, deopt]
+
+ f = function f___3___r_lcrl_____d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + local;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, tryResultToLocal,
+ // doCatch, catchReturns, catchWithLocal, endReturnLocal]
+
+ f = function f___3___r_lcrl____l_ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + local;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(912, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn3, tryReturns, tryResultToLocal,
+ // doCatch, catchReturns, catchWithLocal, endReturnLocal, deopt]
+
+ f = function f___3___r_lcrl____ld () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndThrow42);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + local;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(912, f);
+ assertEquals(4, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, doCatch]
+
+ f = function f___3__t___c________ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, doCatch, deopt]
+
+ f = function f___3__t___c_______d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, doCatch, catchThrows]
+
+ f = function f___3__t___c__t_____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, doCatch, catchThrows,
+ // deopt]
+
+ f = function f___3__t___c__t____d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, doCatch,
+ // catchWithLocal]
+
+ f = function f___3__t___c_l______ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ local += ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, doCatch,
+ // catchWithLocal, deopt]
+
+ f = function f___3__t___c_l_____d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ local += ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, doCatch,
+ // catchWithLocal, endReturnLocal]
+
+ f = function f___3__t___c_l____l_ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ local += ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(935, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, doCatch,
+ // catchWithLocal, endReturnLocal, deopt]
+
+ f = function f___3__t___c_l____ld () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ local += ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(935, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, doCatch,
+ // catchWithLocal, catchThrows]
+
+ f = function f___3__t___c_lt_____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, doCatch,
+ // catchWithLocal, catchThrows, deopt]
+
+ f = function f___3__t___c_lt____d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, doCatch,
+ // catchWithLocal, catchThrows, endReturnLocal]
+
+ f = function f___3__t___c_lt___l_ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertThrowsWith(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, doCatch,
+ // catchWithLocal, catchThrows, endReturnLocal, deopt]
+
+ f = function f___3__t___c_lt___ld () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertThrowsWith(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, doCatch, catchReturns]
+
+ f = function f___3__t___cr_______ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, doCatch, catchReturns,
+ // deopt]
+
+ f = function f___3__t___cr______d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, doCatch, catchReturns,
+ // catchWithLocal]
+
+ f = function f___3__t___crl______ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + local;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(890, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, doCatch, catchReturns,
+ // catchWithLocal, deopt]
+
+ f = function f___3__t___crl_____d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + local;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(890, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, doCatch, catchReturns,
+ // catchWithLocal, endReturnLocal]
+
+ f = function f___3__t___crl____l_ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + local;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(890, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, doCatch, catchReturns,
+ // catchWithLocal, endReturnLocal, deopt]
+
+ f = function f___3__t___crl____ld () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + local;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(890, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, tryResultToLocal,
+ // doCatch]
+
+ f = function f___3__t__lc________ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, tryResultToLocal,
+ // doCatch, deopt]
+
+ f = function f___3__t__lc_______d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, tryResultToLocal,
+ // doCatch, endReturnLocal]
+
+ f = function f___3__t__lc______l_ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(893, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, tryResultToLocal,
+ // doCatch, endReturnLocal, deopt]
+
+ f = function f___3__t__lc______ld () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(893, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, tryResultToLocal,
+ // doCatch, catchThrows]
+
+ f = function f___3__t__lc__t_____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, tryResultToLocal,
+ // doCatch, catchThrows, deopt]
+
+ f = function f___3__t__lc__t____d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, tryResultToLocal,
+ // doCatch, catchThrows, endReturnLocal]
+
+ f = function f___3__t__lc__t___l_ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertThrowsWith(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, tryResultToLocal,
+ // doCatch, catchThrows, endReturnLocal, deopt]
+
+ f = function f___3__t__lc__t___ld () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertThrowsWith(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, tryResultToLocal,
+ // doCatch, catchWithLocal]
+
+ f = function f___3__t__lc_l______ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ local += ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, tryResultToLocal,
+ // doCatch, catchWithLocal, deopt]
+
+ f = function f___3__t__lc_l_____d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ local += ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, tryResultToLocal,
+ // doCatch, catchWithLocal, endReturnLocal]
+
+ f = function f___3__t__lc_l____l_ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ local += ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(935, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, tryResultToLocal,
+ // doCatch, catchWithLocal, endReturnLocal, deopt]
+
+ f = function f___3__t__lc_l____ld () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ local += ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(935, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, tryResultToLocal,
+ // doCatch, catchWithLocal, catchThrows]
+
+ f = function f___3__t__lc_lt_____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, tryResultToLocal,
+ // doCatch, catchWithLocal, catchThrows, deopt]
+
+ f = function f___3__t__lc_lt____d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, tryResultToLocal,
+ // doCatch, catchWithLocal, catchThrows, endReturnLocal]
+
+ f = function f___3__t__lc_lt___l_ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertThrowsWith(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, tryResultToLocal,
+ // doCatch, catchWithLocal, catchThrows, endReturnLocal, deopt]
+
+ f = function f___3__t__lc_lt___ld () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertThrowsWith(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, tryResultToLocal,
+ // doCatch, catchReturns]
+
+ f = function f___3__t__lcr_______ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, tryResultToLocal,
+ // doCatch, catchReturns, deopt]
+
+ f = function f___3__t__lcr______d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, tryResultToLocal,
+ // doCatch, catchReturns, endReturnLocal]
+
+ f = function f___3__t__lcr_____l_ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, tryResultToLocal,
+ // doCatch, catchReturns, endReturnLocal, deopt]
+
+ f = function f___3__t__lcr_____ld () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, tryResultToLocal,
+ // doCatch, catchReturns, catchWithLocal]
+
+ f = function f___3__t__lcrl______ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + local;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(890, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, tryResultToLocal,
+ // doCatch, catchReturns, catchWithLocal, deopt]
+
+ f = function f___3__t__lcrl_____d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + local;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(890, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, tryResultToLocal,
+ // doCatch, catchReturns, catchWithLocal, endReturnLocal]
+
+ f = function f___3__t__lcrl____l_ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + local;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(890, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn3, tryThrows, tryResultToLocal,
+ // doCatch, catchReturns, catchWithLocal, endReturnLocal, deopt]
+
+ f = function f___3__t__lcrl____ld () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ local += 4 + invertFunctionCall(increaseAndReturn15);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + local;
+ counter++;
+ }
+ counter++;
+ return 5 + local;
+ }
+ resetOptAndAssertResultEquals(890, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn4, tryReturns, doCatch]
+
+ f = function f__4____r__c________ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + (new increaseAndStore15Constructor()).x;
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn4, tryReturns, doCatch, deopt]
+
+ f = function f__4____r__c_______d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + (new increaseAndStore15Constructor()).x;
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn4, tryReturns, doCatch, catchThrows]
+
+ f = function f__4____r__c__t_____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + (new increaseAndStore15Constructor()).x;
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn4, tryReturns, doCatch, catchThrows,
+ // deopt]
+
+ f = function f__4____r__c__t____d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + (new increaseAndStore15Constructor()).x;
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn4, tryReturns, doCatch,
+ // catchReturns]
+
+ f = function f__4____r__cr_______ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + (new increaseAndStore15Constructor()).x;
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn4, tryReturns, doCatch,
+ // catchReturns, deopt]
+
+ f = function f__4____r__cr______d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + (new increaseAndStore15Constructor()).x;
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn4, tryThrows, doCatch]
+
+ f = function f__4___t___c________ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + (new increaseAndThrow42Constructor()).x;
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [alternativeFn4, tryThrows, doCatch, deopt]
+
+ f = function f__4___t___c_______d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + (new increaseAndThrow42Constructor()).x;
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [alternativeFn4, tryThrows, doCatch, catchThrows]
+
+ f = function f__4___t___c__t_____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + (new increaseAndThrow42Constructor()).x;
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn4, tryThrows, doCatch, catchThrows,
+ // deopt]
+
+ f = function f__4___t___c__t____d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + (new increaseAndThrow42Constructor()).x;
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn4, tryThrows, doCatch, catchReturns]
+
+ f = function f__4___t___cr_______ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + (new increaseAndThrow42Constructor()).x;
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn4, tryThrows, doCatch, catchReturns,
+ // deopt]
+
+ f = function f__4___t___cr______d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + (new increaseAndThrow42Constructor()).x;
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn5, tryReturns, doCatch]
+
+ f = function f_5_____r__c________ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + magic.prop /* returns 15 */;
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn5, tryReturns, doCatch, deopt]
+
+ f = function f_5_____r__c_______d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + magic.prop /* returns 15 */;
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn5, tryReturns, doCatch, catchThrows]
+
+ f = function f_5_____r__c__t_____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + magic.prop /* returns 15 */;
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn5, tryReturns, doCatch, catchThrows,
+ // deopt]
+
+ f = function f_5_____r__c__t____d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + magic.prop /* returns 15 */;
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn5, tryReturns, doCatch,
+ // catchReturns]
+
+ f = function f_5_____r__cr_______ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + magic.prop /* returns 15 */;
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn5, tryReturns, doCatch,
+ // catchReturns, deopt]
+
+ f = function f_5_____r__cr______d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + magic.prop /* returns 15 */;
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(19, f);
+ assertEquals(2, counter);
+
+ // Variant flags: [alternativeFn5, tryThrows, doCatch]
+
+ f = function f_5____t___c________ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + (magic.prop = 37 /* throws 42 */);
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [alternativeFn5, tryThrows, doCatch, deopt]
+
+ f = function f_5____t___c_______d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + (magic.prop = 37 /* throws 42 */);
+ counter++;
+ } catch (ex) {
+ counter++;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(undefined, f);
+ assertEquals(5, counter);
+
+ // Variant flags: [alternativeFn5, tryThrows, doCatch, catchThrows]
+
+ f = function f_5____t___c__t_____ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + (magic.prop = 37 /* throws 42 */);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn5, tryThrows, doCatch, catchThrows,
+ // deopt]
+
+ f = function f_5____t___c__t____d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + (magic.prop = 37 /* throws 42 */);
+ counter++;
+ } catch (ex) {
+ counter++;
+ throw 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertThrowsWith(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn5, tryThrows, doCatch, catchReturns]
+
+ f = function f_5____t___cr_______ () {
+ var local = 888;
+ deopt = false;
+ try {
+ counter++;
+ return 4 + (magic.prop = 37 /* throws 42 */);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(44, f);
+ assertEquals(3, counter);
+
+ // Variant flags: [alternativeFn5, tryThrows, doCatch, catchReturns,
+ // deopt]
+
+ f = function f_5____t___cr______d () {
+ var local = 888;
+ deopt = true;
+ try {
+ counter++;
+ return 4 + (magic.prop = 37 /* throws 42 */);
+ counter++;
+ } catch (ex) {
+ counter++;
+ return 2 + ex;
+ counter++;
+ }
+ counter++;
+ }
+ resetOptAndAssertResultEquals(44, f);
+ assertEquals(3, counter);
+
+}
+%NeverOptimizeFunction(runThisShard);
+
+// 95 tests in this shard.
+// 192 tests up to here.
+
+runThisShard();
diff --git a/deps/v8/test/mjsunit/compiler/instanceof.js b/deps/v8/test/mjsunit/compiler/instanceof.js
new file mode 100644
index 0000000000..cb88e7c284
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/instanceof.js
@@ -0,0 +1,133 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function A() {}
+var a = new A();
+
+var B = {
+ [Symbol.hasInstance](o) {
+ return false;
+ }
+};
+%ToFastProperties(B.__proto__);
+
+var C = Object.create({
+ [Symbol.hasInstance](o) {
+ return true;
+ }
+});
+%ToFastProperties(C.__proto__);
+
+var D = Object.create({
+ [Symbol.hasInstance](o) {
+ return o === a;
+ }
+});
+%ToFastProperties(D.__proto__);
+
+var E = Object.create({
+ [Symbol.hasInstance](o) {
+ if (o === a) throw o;
+ return true;
+ }
+});
+%ToFastProperties(E.__proto__);
+
+function F() {}
+F.__proto__ = null;
+
+(function() {
+ function foo(o) { return o instanceof A; }
+
+ assertTrue(foo(a));
+ assertTrue(foo(a));
+ assertTrue(foo(new A()));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(a));
+ assertTrue(foo(new A()));
+})();
+
+(function() {
+ function foo(o) {
+ try {
+ return o instanceof A;
+ } catch (e) {
+ return e;
+ }
+ }
+
+ assertTrue(foo(a));
+ assertTrue(foo(a));
+ assertTrue(foo(new A()));
+ assertEquals(1, foo(new Proxy({}, {getPrototypeOf() { throw 1; }})));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(a));
+ assertTrue(foo(new A()));
+ assertEquals(1, foo(new Proxy({}, {getPrototypeOf() { throw 1; }})));
+})();
+
+(function() {
+ function foo(o) { return o instanceof B; }
+
+ assertFalse(foo(a));
+ assertFalse(foo(a));
+ assertFalse(foo(new A()));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(a));
+ assertFalse(foo(new A()));
+})();
+
+(function() {
+ function foo(o) { return o instanceof C; }
+
+ assertTrue(foo(a));
+ assertTrue(foo(a));
+ assertTrue(foo(new A()));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(a));
+ assertTrue(foo(new A()));
+})();
+
+(function() {
+ function foo(o) { return o instanceof D; }
+
+ assertTrue(foo(a));
+ assertTrue(foo(a));
+ assertFalse(foo(new A()));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(a));
+ assertFalse(foo(new A()));
+})();
+
+(function() {
+ function foo(o) {
+ try {
+ return o instanceof E;
+ } catch (e) {
+ return false;
+ }
+ }
+
+ assertFalse(foo(a));
+ assertTrue(foo(new A()));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(a));
+ assertTrue(foo(new A()));
+})();
+
+(function() {
+ function foo(o) {
+ return o instanceof F;
+ }
+
+ assertFalse(foo(a));
+ assertFalse(foo(new A()));
+ assertTrue(foo(new F()));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFalse(foo(a));
+ assertFalse(foo(new A()));
+ assertTrue(foo(new F()));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/number-isfinite.js b/deps/v8/test/mjsunit/compiler/number-isfinite.js
new file mode 100644
index 0000000000..689e31cf37
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/number-isfinite.js
@@ -0,0 +1,29 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function test(f) {
+ assertTrue(f(0));
+ assertTrue(f(Number.MIN_VALUE));
+ assertTrue(f(Number.MAX_VALUE));
+ assertTrue(f(Number.MIN_SAFE_INTEGER));
+ assertTrue(f(Number.MIN_SAFE_INTEGER - 13));
+ assertTrue(f(Number.MAX_SAFE_INTEGER));
+ assertTrue(f(Number.MAX_SAFE_INTEGER + 23));
+ assertFalse(f(Number.NaN));
+ assertFalse(f(Number.POSITIVE_INFINITY));
+ assertFalse(f(Number.NEGATIVE_INFINITY));
+ assertFalse(f(1 / 0));
+ assertFalse(f(-1 / 0));
+}
+
+function f(x) {
+ return Number.isFinite(+x);
+}
+
+test(f);
+test(f);
+%OptimizeFunctionOnNextCall(f);
+test(f);
diff --git a/deps/v8/test/mjsunit/compiler/number-isinteger.js b/deps/v8/test/mjsunit/compiler/number-isinteger.js
new file mode 100644
index 0000000000..8999569bb0
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/number-isinteger.js
@@ -0,0 +1,30 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function test(f) {
+ assertTrue(f(0));
+ assertFalse(f(Number.MIN_VALUE));
+ assertTrue(f(Number.MAX_VALUE));
+ assertTrue(f(Number.MIN_SAFE_INTEGER));
+ assertTrue(f(Number.MIN_SAFE_INTEGER - 13));
+ assertTrue(f(Number.MAX_SAFE_INTEGER));
+ assertTrue(f(Number.MAX_SAFE_INTEGER + 23));
+ assertFalse(f(Number.NaN));
+ assertFalse(f(Number.POSITIVE_INFINITY));
+ assertFalse(f(Number.NEGATIVE_INFINITY));
+ assertFalse(f(1 / 0));
+ assertFalse(f(-1 / 0));
+ assertFalse(f(Number.EPSILON));
+}
+
+function f(x) {
+ return Number.isInteger(+x);
+}
+
+test(f);
+test(f);
+%OptimizeFunctionOnNextCall(f);
+test(f);
diff --git a/deps/v8/test/mjsunit/compiler/number-isnan.js b/deps/v8/test/mjsunit/compiler/number-isnan.js
new file mode 100644
index 0000000000..fb6bb6d741
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/number-isnan.js
@@ -0,0 +1,28 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function test(f) {
+ assertFalse(f(0));
+ assertFalse(f(Number.MIN_VALUE));
+ assertFalse(f(Number.MAX_VALUE));
+ assertFalse(f(Number.MIN_SAFE_INTEGER - 13));
+ assertFalse(f(Number.MAX_SAFE_INTEGER + 23));
+ assertTrue(f(Number.NaN));
+ assertFalse(f(Number.POSITIVE_INFINITY));
+ assertFalse(f(Number.NEGATIVE_INFINITY));
+ assertFalse(f(Number.EPSILON));
+ assertFalse(f(1 / 0));
+ assertFalse(f(-1 / 0));
+}
+
+function f(x) {
+ return Number.isNaN(+x);
+}
+
+test(f);
+test(f);
+%OptimizeFunctionOnNextCall(f);
+test(f);
diff --git a/deps/v8/test/mjsunit/compiler/number-issafeinteger.js b/deps/v8/test/mjsunit/compiler/number-issafeinteger.js
new file mode 100644
index 0000000000..192fb6c124
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/number-issafeinteger.js
@@ -0,0 +1,50 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function test(f) {
+ assertTrue(f(0));
+ assertTrue(f(Number.MIN_SAFE_INTEGER));
+ assertFalse(f(Number.MIN_SAFE_INTEGER - 13));
+ assertTrue(f(Number.MIN_SAFE_INTEGER + 13));
+ assertTrue(f(Number.MAX_SAFE_INTEGER));
+ assertFalse(f(Number.MAX_SAFE_INTEGER + 23));
+ assertTrue(f(Number.MAX_SAFE_INTEGER - 23));
+ assertFalse(f(Number.MIN_VALUE));
+ assertFalse(f(Number.MAX_VALUE));
+ assertFalse(f(Number.NaN));
+ assertFalse(f(Number.POSITIVE_INFINITY));
+ assertFalse(f(Number.NEGATIVE_INFINITY));
+ assertFalse(f(1 / 0));
+ assertFalse(f(-1 / 0));
+ assertFalse(f(Number.EPSILON));
+
+ var near_upper = Math.pow(2, 52);
+ assertTrue(f(near_upper));
+ assertFalse(f(2 * near_upper));
+ assertTrue(f(2 * near_upper - 1));
+ assertTrue(f(2 * near_upper - 2));
+ assertFalse(f(2 * near_upper + 1));
+ assertFalse(f(2 * near_upper + 2));
+ assertFalse(f(2 * near_upper + 7));
+
+ var near_lower = -near_upper;
+ assertTrue(f(near_lower));
+ assertFalse(f(2 * near_lower));
+ assertTrue(f(2 * near_lower + 1));
+ assertTrue(f(2 * near_lower + 2));
+ assertFalse(f(2 * near_lower - 1));
+ assertFalse(f(2 * near_lower - 2));
+ assertFalse(f(2 * near_lower - 7));
+}
+
+function f(x) {
+ return Number.isSafeInteger(+x);
+}
+
+test(f);
+test(f);
+%OptimizeFunctionOnNextCall(f);
+test(f);
diff --git a/deps/v8/test/intl/number-format/parse-currency.js b/deps/v8/test/mjsunit/compiler/regress-5320.js
index a57128ea60..2e30a7b4f5 100644
--- a/deps/v8/test/intl/number-format/parse-currency.js
+++ b/deps/v8/test/mjsunit/compiler/regress-5320.js
@@ -1,4 +1,4 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
+// Copyright 2016 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,11 +25,28 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Currency parsing is not yet supported. We need ICU49 or higher to get
-// it working.
+// Flags: --allow-natives-syntax
-// Flags: --intl-extra
+function OptimizeTruncatingBinaryOp(func) {
+ func(42, -2);
+ func(31, undefined);
+ %BaselineFunctionOnNextCall(func);
+ func(42, -2);
+ func(31, undefined);
+ %OptimizeFunctionOnNextCall(func);
+ func(-1, 2.1);
+ assertOptimized(func);
+}
-var nf = new Intl.NumberFormat(['en'], {style: 'currency', currency: 'USD'});
-
-assertEquals(undefined, nf.v8Parse('USD 123.43'));
+// SAR
+OptimizeTruncatingBinaryOp(function(a, b) { return a >> b; });
+// SHR
+OptimizeTruncatingBinaryOp(function(a, b) { return a >>> b; });
+// SHL
+OptimizeTruncatingBinaryOp(function(a, b) { return a << b; });
+// BIT_AND
+OptimizeTruncatingBinaryOp(function(a, b) { return a & b; });
+// BIT_OR
+OptimizeTruncatingBinaryOp(function(a, b) { return a | b; });
+// BIT_XOR
+OptimizeTruncatingBinaryOp(function(a, b) { return a ^ b; });
diff --git a/deps/v8/test/message/syntactic-tail-call-of-identifier.js b/deps/v8/test/mjsunit/compiler/regress-625966.js
index b3ca31df01..187596e4af 100644
--- a/deps/v8/test/message/syntactic-tail-call-of-identifier.js
+++ b/deps/v8/test/mjsunit/compiler/regress-625966.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-explicit-tailcalls
"use strict";
-
-function g(x) {
- return continue x ;
+var s = "";
+for (var i = 0; i < 65535; i++) {
+ s += ("var a" + i + ";");
}
+eval(s);
diff --git a/deps/v8/test/mjsunit/compiler/regress-626986.js b/deps/v8/test/mjsunit/compiler/regress-626986.js
new file mode 100644
index 0000000000..5e02918423
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-626986.js
@@ -0,0 +1,23 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function g() {
+ return 42;
+}
+
+var o = {};
+
+function f(o, x) {
+ o.f = x;
+}
+
+f(o, g);
+f(o, g);
+f(o, g);
+assertEquals(42, o.f());
+%OptimizeFunctionOnNextCall(f);
+f(o, function() { return 0; });
+assertEquals(0, o.f());
diff --git a/deps/v8/test/mjsunit/compiler/regress-638132.js b/deps/v8/test/mjsunit/compiler/regress-638132.js
new file mode 100644
index 0000000000..1b94feb125
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-638132.js
@@ -0,0 +1,26 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function g(x, y) {
+ return x | y;
+}
+
+function f(b) {
+ if (b) {
+ var s = g("a", "b") && true;
+ return s;
+ }
+}
+
+// Prime function g with Smi feedback.
+g(1, 2);
+g(1, 2);
+
+f(0);
+f(0);
+%OptimizeFunctionOnNextCall(f);
+// Compile inlined function g with string inputs but Smi feedback.
+f(1);
diff --git a/deps/v8/test/mjsunit/compiler/regress-639210.js b/deps/v8/test/mjsunit/compiler/regress-639210.js
new file mode 100644
index 0000000000..50303fb9d6
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-639210.js
@@ -0,0 +1,38 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var m = (function m() {
+ "use asm"
+ var i32 = new Int32Array(4);
+ var f64 = new Float64Array(4);
+
+ function init() {
+ i32[0] = 1;
+ f64[0] = 0.1;
+ }
+
+ function load(b) {
+ return (b ? 0 : i32[0]) + i32[0];
+ }
+
+ function store(b) {
+ if (b|0) {
+ } else {
+ f64[0] = 42;
+ }
+ return f64[0];
+ }
+
+ return { init : init, load : load, store : store };
+})();
+
+m.init();
+
+%OptimizeFunctionOnNextCall(m.load);
+assertEquals(2, m.load());
+
+%OptimizeFunctionOnNextCall(m.store);
+assertEquals(0.1, m.store(1));
diff --git a/deps/v8/test/mjsunit/compiler/regress-644048.js b/deps/v8/test/mjsunit/compiler/regress-644048.js
new file mode 100644
index 0000000000..ee2dd6edef
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-644048.js
@@ -0,0 +1,16 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) {
+ (x
+ ? (!0 / 0)
+ : x) | 0
+}
+
+foo(1);
+foo(2);
+%OptimizeFunctionOnNextCall(foo);
+foo(3);
diff --git a/deps/v8/test/message/syntactic-tail-call-of-new.js b/deps/v8/test/mjsunit/compiler/regress-644633.js
index 60adec7027..5087fd77fc 100644
--- a/deps/v8/test/message/syntactic-tail-call-of-new.js
+++ b/deps/v8/test/mjsunit/compiler/regress-644633.js
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-explicit-tailcalls
+var g = -1073741824;
function f() {
- return 1;
+ var x = g*g*g*g*g*g*g;
+ for (var i = g; i < 1; ) {
+ i += i * x;
+ }
}
-function g() {
- return continue new f() ;
-}
+f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-645851.js b/deps/v8/test/mjsunit/compiler/regress-645851.js
new file mode 100644
index 0000000000..0ea70bd71e
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-645851.js
@@ -0,0 +1,19 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() {
+ var sum = 0;
+ while (1) {
+ for (var j = 0; j < 200; j -= j) {
+ sum = sum + 1;
+ %OptimizeOsr();
+ if (sum == 2) return;
+ }
+ }
+ return sum;
+}
+
+f();
diff --git a/deps/v8/test/message/syntactic-tail-call-in-for-in.js b/deps/v8/test/mjsunit/compiler/regress-650215.js
index 8ad7aca54a..95ae6cfed1 100644
--- a/deps/v8/test/message/syntactic-tail-call-in-for-in.js
+++ b/deps/v8/test/mjsunit/compiler/regress-650215.js
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-explicit-tailcalls
-"use strict";
+// Flags: --allow-natives-syntax
function f() {
- return 1;
-}
-
-function g() {
- for (var v in {a:0}) {
- return continue f() ;
+ var x = 0;
+ for (var i = 0; i < 10; i++) {
+ x = (2 % x) | 0;
+ if (i === 5) %OptimizeOsr();
}
+ return x;
}
+
+assertEquals(0, f());
diff --git a/deps/v8/test/mjsunit/compiler/regress-compare-negate.js b/deps/v8/test/mjsunit/compiler/regress-compare-negate.js
new file mode 100644
index 0000000000..72b210b1e4
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-compare-negate.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo
+
+function CompareNegate(a,b) {
+ a = a|0;
+ b = b|0;
+ var sub = 0 - b;
+ return a < (sub|0);
+}
+
+var x = CompareNegate(1,0x80000000);
+%OptimizeFunctionOnNextCall(CompareNegate);
+CompareNegate(1,0x80000000);
+assertOptimized(CompareNegate);
+assertEquals(x, CompareNegate(1,0x80000000));
diff --git a/deps/v8/test/mjsunit/compiler/regress-escape-analysis-indirect.js b/deps/v8/test/mjsunit/compiler/regress-escape-analysis-indirect.js
new file mode 100644
index 0000000000..6d79a93133
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-escape-analysis-indirect.js
@@ -0,0 +1,17 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo --turbo-escape
+
+function f(apply) {
+ var value = 23;
+ apply(function bogeyman() { value = 42 });
+ return value;
+}
+function apply(fun) { fun() }
+assertEquals(42, f(apply));
+assertEquals(42, f(apply));
+%NeverOptimizeFunction(apply);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(42, f(apply));
diff --git a/deps/v8/test/message/syntactic-tail-call-in-comma.js b/deps/v8/test/mjsunit/compiler/regress-math-sign-nan-type.js
index 402a4a8f7d..e16eba8c5a 100644
--- a/deps/v8/test/message/syntactic-tail-call-in-comma.js
+++ b/deps/v8/test/mjsunit/compiler/regress-math-sign-nan-type.js
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-explicit-tailcalls
-"use strict";
+// Flags: --allow-natives-syntax
-function f() {
- return 1;
+function f(a) {
+ return Math.sign(+a) < 2;
}
-function g() {
- return 1, 2, 3, continue f() , 4 ;
-}
+f(NaN);
+f(NaN);
+%OptimizeFunctionOnNextCall(f);
+assertFalse(f(NaN));
diff --git a/deps/v8/test/mjsunit/compiler/regress-strict-equals-mixed-feedback.js b/deps/v8/test/mjsunit/compiler/regress-strict-equals-mixed-feedback.js
new file mode 100644
index 0000000000..cf41617096
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-strict-equals-mixed-feedback.js
@@ -0,0 +1,23 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function bar(x, y) {
+ return x === y;
+}
+
+function foo(x) {
+ bar("0", x);
+}
+
+foo("0");
+foo("0");
+%BaselineFunctionOnNextCall(bar);
+foo("0");
+foo("0");
+bar(1, 1);
+%OptimizeFunctionOnNextCall(foo);
+foo("0");
+assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/compiler/string-add-try-catch.js b/deps/v8/test/mjsunit/compiler/string-add-try-catch.js
new file mode 100644
index 0000000000..e34332682c
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/string-add-try-catch.js
@@ -0,0 +1,39 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var a = "a".repeat(268435440);
+
+(function() {
+ function foo(a, b) {
+ try {
+ return a + "0123456789012";
+ } catch (e) {
+ return e;
+ }
+ }
+
+ foo("a");
+ foo("a");
+ %OptimizeFunctionOnNextCall(foo);
+ foo("a");
+ assertInstanceof(foo(a), RangeError);
+})();
+
+(function() {
+ function foo(a, b) {
+ try {
+ return "0123456789012" + a;
+ } catch (e) {
+ return e;
+ }
+ }
+
+ foo("a");
+ foo("a");
+ %OptimizeFunctionOnNextCall(foo);
+ foo("a");
+ assertInstanceof(foo(a), RangeError);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/unsigned-min-max.js b/deps/v8/test/mjsunit/compiler/unsigned-min-max.js
new file mode 100644
index 0000000000..db91188628
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/unsigned-min-max.js
@@ -0,0 +1,37 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function umin(a, b) {
+ a = a >>> 0;
+ b = b >>> 0;
+ return Math.min(a, b);
+}
+
+umin(1, 1);
+umin(2, 2);
+%OptimizeFunctionOnNextCall(umin);
+assertEquals(1, umin(1, 2));
+assertEquals(1, umin(2, 1));
+assertEquals(0, umin(0, 4294967295));
+assertEquals(0, umin(4294967295, 0));
+assertEquals(4294967294, umin(-1, -2));
+assertEquals(1234, umin(-2, 1234));
+
+function umax(a, b) {
+ a = a >>> 0;
+ b = b >>> 0;
+ return Math.max(a, b);
+}
+
+umax(1, 1);
+umax(2, 2);
+%OptimizeFunctionOnNextCall(umax);
+assertEquals(2, umax(1, 2));
+assertEquals(2, umax(2, 1));
+assertEquals(4294967295, umax(0, 4294967295));
+assertEquals(4294967295, umax(4294967295, 0));
+assertEquals(4294967295, umax(-1, -2));
+assertEquals(4294967294, umax(-2, 1234));
diff --git a/deps/v8/test/mjsunit/debug-function-scopes.js b/deps/v8/test/mjsunit/debug-function-scopes.js
index ae95f9b97d..4a0809ab33 100644
--- a/deps/v8/test/mjsunit/debug-function-scopes.js
+++ b/deps/v8/test/mjsunit/debug-function-scopes.js
@@ -87,7 +87,6 @@ var f3 = (function F1(invisible_parameter) {
var invisible2 = 2;
return (function F3() {
var visible2 = 20;
- var invisible2 = 3;
return (function () {return visible1 + visible2 + visible1a;});
})();
})();
diff --git a/deps/v8/test/mjsunit/debug-print.js b/deps/v8/test/mjsunit/debug-print.js
new file mode 100644
index 0000000000..b0e141d709
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-print.js
@@ -0,0 +1,47 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Make sure printing different element kinds doesn't crash.
+
+var array;
+var obj = {};
+
+array = [];
+%DebugPrint(array);
+
+// FAST_SMI_ELEMENTS
+array = [1, 2, 3];
+%DebugPrint(array);
+
+// FAST_HOLEY_SMI_ELEMENTS
+array[10] = 100;
+array[11] = 100;
+%DebugPrint(array);
+
+// FAST_ELEMENTS
+array = [1, obj, obj];
+%DebugPrint(array);
+
+// FAST_HOLEY_ELEMENTS
+array[100] = obj;
+array[101] = obj;
+%DebugPrint(array);
+
+// FAST_DOUBLE_ELEMENTS
+array = [1.1, 2.2, 3.3, 3.3, 3.3, NaN];
+%DebugPrint(array);
+array.push(NaN);
+array.push(NaN);
+%DebugPrint(array);
+
+// FAST_HOLEY_DOUBLE_ELEMENTS
+array[100] = 1.2;
+array[101] = 1.2;
+%DebugPrint(array);
+
+// DICTIONARY_ELEMENTS
+%NormalizeElements(array);
+%DebugPrint(array);
diff --git a/deps/v8/test/mjsunit/debug-scopes.js b/deps/v8/test/mjsunit/debug-scopes.js
index 935de9cc98..0e822fce1c 100644
--- a/deps/v8/test/mjsunit/debug-scopes.js
+++ b/deps/v8/test/mjsunit/debug-scopes.js
@@ -157,20 +157,20 @@ function CheckScopeChainNames(names, exec_state) {
}
-// Check that the content of the scope is as expected. For functions just check
-// that there is a function.
-function CheckScopeContent(content, number, exec_state) {
+// Check that the scope contains at least minimum_content. For functions just
+// check that there is a function.
+function CheckScopeContent(minimum_content, number, exec_state) {
var scope = exec_state.frame().scope(number);
- var count = 0;
- for (var p in content) {
+ var minimum_count = 0;
+ for (var p in minimum_content) {
var property_mirror = scope.scopeObject().property(p);
assertFalse(property_mirror.isUndefined(), 'property ' + p + ' not found in scope');
- if (typeof(content[p]) === 'function') {
+ if (typeof(minimum_content[p]) === 'function') {
assertTrue(property_mirror.value().isFunction());
} else {
- assertEquals(content[p], property_mirror.value().value(), 'property ' + p + ' has unexpected value');
+ assertEquals(minimum_content[p], property_mirror.value().value(), 'property ' + p + ' has unexpected value');
}
- count++;
+ minimum_count++;
}
// 'arguments' and might be exposed in the local and closure scope. Just
@@ -186,14 +186,14 @@ function CheckScopeContent(content, number, exec_state) {
// Temporary variables introduced by the parser have not been materialized.
assertTrue(scope.scopeObject().property('').isUndefined());
- if (count != scope_size) {
+ if (scope_size < minimum_count) {
print('Names found in scope:');
var names = scope.scopeObject().propertyNames();
for (var i = 0; i < names.length; i++) {
print(names[i]);
}
}
- assertEquals(count, scope_size);
+ assertTrue(scope_size >= minimum_count);
// Get the debug command processor.
var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
@@ -1185,6 +1185,102 @@ listener_delegate = function(exec_state) {
eval(code3);
EndTest();
+BeginTest("Scope positions in for statement with lexical block");
+var code4 = "function for_statement() { \n" +
+ " for (let i = 0; i < 1; i++) { \n" +
+ " let j; \n" +
+ " debugger; \n" +
+ " } \n" +
+ "} \n" +
+ "for_statement(); \n";
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Block,
+ debug.ScopeType.Block,
+ debug.ScopeType.Local,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeChainPositions([{start: 66, end: 147}, {start: 52, end: 147}, {start: 22, end: 181}, {}, {}], exec_state);
+}
+eval(code4);
+EndTest();
+
+BeginTest("Scope positions in lexical for each statement");
+var code5 = "function for_each_statement() { \n" +
+ " for (let i of [0]) { \n" +
+ " debugger; \n" +
+ " } \n" +
+ "} \n" +
+ "for_each_statement(); \n";
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Block,
+ debug.ScopeType.Local,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeChainPositions([{start: 55, end: 111}, {start: 27, end: 145}, {}, {}], exec_state);
+}
+eval(code5);
+EndTest();
+
+BeginTest("Scope positions in lexical for each statement with lexical block");
+var code6 = "function for_each_statement() { \n" +
+ " for (let i of [0]) { \n" +
+ " let j; \n" +
+ " debugger; \n" +
+ " } \n" +
+ "} \n" +
+ "for_each_statement(); \n";
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Block,
+ debug.ScopeType.Block,
+ debug.ScopeType.Local,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeChainPositions([{start: 57, end: 147}, {start: 55, end: 147}, {start: 27, end: 181}, {}, {}], exec_state);
+}
+eval(code6);
+EndTest();
+
+BeginTest("Scope positions in non-lexical for each statement");
+var code7 = "function for_each_statement() { \n" +
+ " var i; \n" +
+ " for (i of [0]) { \n" +
+ " debugger; \n" +
+ " } \n" +
+ "} \n" +
+ "for_each_statement(); \n";
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeChainPositions([{start: 27, end: 181}, {}, {}], exec_state);
+}
+eval(code7);
+EndTest();
+
+BeginTest("Scope positions in non-lexical for each statement with lexical block");
+var code8 = "function for_each_statement() { \n" +
+ " var i; \n" +
+ " for (i of [0]) { \n" +
+ " let j; \n" +
+ " debugger; \n" +
+ " } \n" +
+ "} \n" +
+ "for_each_statement(); \n";
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Block,
+ debug.ScopeType.Local,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeChainPositions([{start: 89, end: 183}, {start: 27, end: 217}, {}, {}], exec_state);
+}
+eval(code8);
+EndTest();
+
assertEquals(begin_test_count, break_count,
'one or more tests did not enter the debugger');
assertEquals(begin_test_count, end_test_count,
diff --git a/deps/v8/test/mjsunit/element-accessor.js b/deps/v8/test/mjsunit/element-accessor.js
index 452afc8d16..5fba359c2a 100644
--- a/deps/v8/test/mjsunit/element-accessor.js
+++ b/deps/v8/test/mjsunit/element-accessor.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --allow-natives-syntax
+
(function () {
var o = [];
o.__proto__ = {};
@@ -31,3 +33,18 @@
Object.defineProperty(o, "0", {get: function(){}});
assertEquals(undefined, Object.getOwnPropertyDescriptor(o, "0"));
})();
+
+(function() {
+ function f() {
+ var a = new Array();
+ a[1] = 1.5;
+ return a;
+ }
+
+ f();
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ var a = f();
+ a[2] = 2;
+ assertEquals(3, a.length);
+})();
diff --git a/deps/v8/test/mjsunit/es6/block-sloppy-function.js b/deps/v8/test/mjsunit/es6/block-sloppy-function.js
index 8cb9a4deda..2cdcbce3d6 100644
--- a/deps/v8/test/mjsunit/es6/block-sloppy-function.js
+++ b/deps/v8/test/mjsunit/es6/block-sloppy-function.js
@@ -67,6 +67,24 @@
assertEquals(1, f);
})();
+(function shadowingLetDoesntBindGenerator() {
+ let f = function *f() {
+ while(true) {
+ yield 1;
+ }
+ };
+ assertEquals(1, f().next().value);
+ {
+ function *f() {
+ while(true) {
+ yield 2;
+ }
+ }
+ assertEquals(2, f().next().value);
+ }
+ assertEquals(1, f().next().value);
+})();
+
(function shadowingClassDoesntBind() {
class f { }
assertEquals('class f { }', f.toString());
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/promise-all-uncaught.js b/deps/v8/test/mjsunit/es6/debug-promises/promise-all-uncaught.js
index d183c5cf2d..c201d13e05 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/promise-all-uncaught.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/promise-all-uncaught.js
@@ -6,28 +6,16 @@
// Test debug events when we only listen to uncaught exceptions and a
// Promise p3 created by Promise.all has no catch handler, and is rejected
-// because one of the Promises p2 passed to Promise.all is rejected. We
-// expect two Exception debug events to be triggered, for p2 and p3 each,
-// because neither has an user-defined catch handler.
+// because one of the Promises p2 passed to Promise.all is rejected.
+// We expect one event for p2; the system recognizes the rejection of p3
+// to be redundant and based on the rejection of p2 and does not trigger
+// an additional rejection.
var Debug = debug.Debug;
-var expected_events = 2;
+var expected_events = 1;
var log = [];
-var p1 = Promise.resolve();
-p1.name = "p1";
-
-var p2 = p1.then(function() {
- log.push("throw");
- throw new Error("uncaught"); // event
-});
-
-p2.name = "p2";
-
-var p3 = Promise.all([p2]);
-p3.name = "p3";
-
function listener(event, exec_state, event_data, data) {
if (event != Debug.DebugEvent.Exception) return;
try {
@@ -35,13 +23,9 @@ function listener(event, exec_state, event_data, data) {
assertTrue(expected_events >= 0);
assertEquals("uncaught", event_data.exception().message);
assertTrue(event_data.promise() instanceof Promise);
- if (expected_events === 1) {
- // Assert that the debug event is triggered at the throw site.
- assertTrue(exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
- assertEquals("p2", event_data.promise().name);
- } else {
- assertEquals("p3", event_data.promise().name);
- }
+ // Assert that the debug event is triggered at the throw site.
+ assertTrue(exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
+ assertEquals("p2", event_data.promise().name);
assertTrue(event_data.uncaught());
} catch (e) {
%AbortJS(e + "\n" + e.stack);
@@ -51,6 +35,19 @@ function listener(event, exec_state, event_data, data) {
Debug.setBreakOnUncaughtException();
Debug.setListener(listener);
+var p1 = Promise.resolve();
+p1.name = "p1";
+
+var p2 = p1.then(function() {
+ log.push("throw");
+ throw new Error("uncaught"); // event
+});
+
+p2.name = "p2";
+
+var p3 = Promise.all([p2]);
+p3.name = "p3";
+
log.push("end main");
function testDone(iteration) {
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/promise-race-uncaught.js b/deps/v8/test/mjsunit/es6/debug-promises/promise-race-uncaught.js
index 57955c01ef..ed6233bc30 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/promise-race-uncaught.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/promise-race-uncaught.js
@@ -6,28 +6,16 @@
// Test debug events when we only listen to uncaught exceptions and a
// Promise p3 created by Promise.race has no catch handler, and is rejected
-// because one of the Promises p2 passed to Promise.all is rejected. We
-// expect two Exception debug events to be triggered, for p2 and p3 each,
-// because neither has an user-defined catch handler.
+// because one of the Promises p2 passed to Promise.race is rejected.
+// We expect one event for p2; the system recognizes the rejection of p3
+// to be redundant and based on the rejection of p2 and does not trigger
+// an additional rejection.
var Debug = debug.Debug;
-var expected_events = 2;
+var expected_events = 1;
var log = [];
-var p1 = Promise.resolve();
-p1.name = "p1";
-
-var p2 = p1.then(function() {
- log.push("throw");
- throw new Error("uncaught"); // event
-});
-
-p2.name = "p2";
-
-var p3 = Promise.race([p2]);
-p3.name = "p3";
-
function listener(event, exec_state, event_data, data) {
if (event != Debug.DebugEvent.Exception) return;
try {
@@ -35,13 +23,9 @@ function listener(event, exec_state, event_data, data) {
assertTrue(expected_events >= 0);
assertEquals("uncaught", event_data.exception().message);
assertTrue(event_data.promise() instanceof Promise);
- if (expected_events === 1) {
- // Assert that the debug event is triggered at the throw site.
- assertTrue(exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
- assertEquals("p2", event_data.promise().name);
- } else {
- assertEquals("p3", event_data.promise().name);
- }
+ // Assert that the debug event is triggered at the throw site.
+ assertTrue(exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
+ assertEquals("p2", event_data.promise().name);
assertTrue(event_data.uncaught());
} catch (e) {
%AbortJS(e + "\n" + e.stack);
@@ -51,6 +35,19 @@ function listener(event, exec_state, event_data, data) {
Debug.setBreakOnUncaughtException();
Debug.setListener(listener);
+var p1 = Promise.resolve();
+p1.name = "p1";
+
+var p2 = p1.then(function() {
+ log.push("throw");
+ throw new Error("uncaught"); // event
+});
+
+p2.name = "p2";
+
+var p3 = Promise.race([p2]);
+p3.name = "p3";
+
log.push("end main");
function testDone(iteration) {
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-by-default-reject-handler.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-by-default-reject-handler.js
index b7c5861c1f..6cd28259e1 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-by-default-reject-handler.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-by-default-reject-handler.js
@@ -6,13 +6,13 @@
// Test debug events when we only listen to uncaught exceptions and
// there is only a default reject handler for the to-be-rejected Promise.
-// We expect two Exception debug events:
-// - when the first Promise is rejected and only has default reject handlers.
-// - when the default reject handler passes the rejection on.
+// We expect only one debug event: when the first Promise is rejected
+// and only has default reject handlers. No event is triggered when
+// simply forwarding the rejection with .then's default handler.
Debug = debug.Debug;
-var expected_events = 2;
+var expected_events = 1;
var log = [];
var resolve, reject;
@@ -43,15 +43,9 @@ function listener(event, exec_state, event_data, data) {
assertTrue(expected_events >= 0);
assertTrue(event_data.uncaught());
assertTrue(event_data.promise() instanceof Promise);
- if (expected_events == 1) {
- // p1 is rejected, uncaught except for its default reject handler.
- assertEquals(0, exec_state.frameCount());
- assertSame(p1, event_data.promise());
- } else {
- // p2 is rejected by p1's default reject handler.
- assertEquals(0, exec_state.frameCount());
- assertSame(p2, event_data.promise());
- }
+ // p1 is rejected, uncaught, with the error from the Promise.reject line
+ assertNotNull(event_data.sourceLineText().match("Promise.reject"));
+ assertSame(p1, event_data.promise());
}
} catch (e) {
%AbortJS(e + "\n" + e.stack);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-all.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-all.js
index 0c5ecc5f3a..d4f02cddf7 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-all.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-all.js
@@ -33,8 +33,8 @@ function listener(event, exec_state, event_data, data) {
assertTrue(event_data.promise() instanceof Promise);
assertSame(q, event_data.promise());
assertTrue(event_data.uncaught());
- // All of the frames on the stack are from native Javascript.
- assertEquals(0, exec_state.frameCount());
+ // The frame comes from the Promise.reject call
+ assertNotNull(/Promise\.reject/.exec(event_data.sourceLineText()));
}
} catch (e) {
%AbortJS(e + "\n" + e.stack);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-uncaught.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-uncaught.js
index e5e560b3db..0a5279fbed 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-uncaught.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-uncaught.js
@@ -33,8 +33,8 @@ function listener(event, exec_state, event_data, data) {
assertTrue(event_data.promise() instanceof Promise);
assertSame(q, event_data.promise());
assertTrue(event_data.uncaught());
- // All of the frames on the stack are from native Javascript.
- assertEquals(0, exec_state.frameCount());
+ // The JavaScript frame is from the Promise rejection
+ assertTrue(/Promise\.reject/.test(event_data.sourceLineText()));
}
} catch (e) {
%AbortJS(e + "\n" + e.stack);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-by-default-reject-handler.js b/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-by-default-reject-handler.js
index 3c30ad3f7c..8b798f7af9 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-by-default-reject-handler.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-by-default-reject-handler.js
@@ -6,13 +6,13 @@
// Test debug events when we only listen to uncaught exceptions and
// there is only a default reject handler for the to-be-rejected Promise.
-// We expect two Exception debug events:
-// - when the first Promise is rejected and only has default reject handlers.
-// - when the default reject handler passes the rejection on.
+// We expect only one debug event: when the first Promise is rejected
+// and only has default reject handlers. No event is triggered when
+// simply forwarding the rejection with .then's default handler.
Debug = debug.Debug;
-var expected_events = 2;
+var expected_events = 1;
var log = [];
var resolve, reject;
@@ -43,16 +43,10 @@ function listener(event, exec_state, event_data, data) {
assertTrue(expected_events >= 0);
assertTrue(event_data.uncaught());
assertTrue(event_data.promise() instanceof Promise);
- if (expected_events == 1) {
- // p1 is rejected, uncaught except for its default reject handler.
- assertTrue(
- exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
- assertSame(p1, event_data.promise());
- } else {
- // p2 is rejected by p1's default reject handler.
- assertEquals(0, exec_state.frameCount());
- assertSame(p2, event_data.promise());
- }
+ // p1 is rejected, uncaught except for its default reject handler.
+ assertTrue(
+ exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
+ assertSame(p1, event_data.promise());
}
} catch (e) {
%AbortJS(e + "\n" + e.stack);
diff --git a/deps/v8/test/mjsunit/es6/function-name.js b/deps/v8/test/mjsunit/es6/function-name.js
index 0fcab441ed..3b0a6fcacb 100644
--- a/deps/v8/test/mjsunit/es6/function-name.js
+++ b/deps/v8/test/mjsunit/es6/function-name.js
@@ -73,6 +73,8 @@
static 43() { }
get 44() { }
set 44(val) { }
+ static get constructor() { }
+ static set constructor(val) { }
};
assertEquals('a', C.prototype.a.name);
@@ -85,6 +87,9 @@
var descriptor = Object.getOwnPropertyDescriptor(C.prototype, '44');
assertEquals('get 44', descriptor.get.name);
assertEquals('set 44', descriptor.set.name);
+ var descriptor = Object.getOwnPropertyDescriptor(C, 'constructor');
+ assertEquals('get constructor', descriptor.get.name);
+ assertEquals('set constructor', descriptor.set.name);
})();
(function testComputedProperties() {
diff --git a/deps/v8/test/mjsunit/es6/promise-thenable-proxy.js b/deps/v8/test/mjsunit/es6/promise-thenable-proxy.js
new file mode 100644
index 0000000000..4849639a5b
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/promise-thenable-proxy.js
@@ -0,0 +1,23 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function assertAsync(b, s) {
+ if (!b) {
+ %AbortJS(" FAILED!")
+ }
+}
+
+var handler = {
+ get: function(target, name) {
+ if (name === 'then') {
+ return (val) => Promise.prototype.then.call(target, val);
+ }
+ }
+};
+
+var target = new Promise(r => r(42));
+var p = new Proxy(target, handler);
+Promise.resolve(p).then((val) => assertAsync(val === 42));
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-5337.js b/deps/v8/test/mjsunit/es6/regress/regress-5337.js
new file mode 100644
index 0000000000..256b3cb554
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/regress/regress-5337.js
@@ -0,0 +1,39 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function testNestedSpreadsInPatterns() {
+ (function () {
+ var [...[...x]] = [42, 17];
+ assertArrayEquals([42, 17], x);
+ })();
+ (function () {
+ let [...[...x]] = [42, 17];
+ assertArrayEquals([42, 17], x);
+ })();
+ (function () {
+ const [...[...x]] = [42, 17];
+ assertArrayEquals([42, 17], x);
+ })();
+ (function () {
+ var x; [...[...x]] = [42, 17];
+ assertArrayEquals([42, 17], x);
+ })();
+
+ function f1([...[...x]] = [42, 17]) { return x; }
+ assertArrayEquals([42, 17], f1());
+ assertArrayEquals([1, 2, 3], f1([1, 2, 3]));
+
+ var f2 = function ([...[...x]] = [42, 17]) { return x; }
+ assertArrayEquals([42, 17], f2());
+ assertArrayEquals([1, 2, 3], f2([1, 2, 3]));
+
+ // The following two were failing in debug mode, until v8:5337 was fixed.
+ var f3 = ([...[...x]] = [42, 17]) => { return x; };
+ assertArrayEquals([42, 17], f3());
+ assertArrayEquals([1, 2, 3], f3([1, 2, 3]));
+
+ var f4 = ([...[...x]] = [42, 17]) => x;
+ assertArrayEquals([42, 17], f4());
+ assertArrayEquals([1, 2, 3], f4([1, 2, 3]));
+})();
diff --git a/deps/v8/test/message/syntactic-tail-call-of-eval.js b/deps/v8/test/mjsunit/es6/regress/regress-650172.js
index e69aa9c351..d6b534424c 100644
--- a/deps/v8/test/message/syntactic-tail-call-of-eval.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-650172.js
@@ -2,8 +2,5 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-explicit-tailcalls
-
-function g() {
- return continue eval ("f()") ;
-}
+var iterator = [].entries().__proto__.__proto__[Symbol.iterator];
+print(1/iterator(-1E-300));
diff --git a/deps/v8/test/mjsunit/es6/string-iterator.js b/deps/v8/test/mjsunit/es6/string-iterator.js
index 8eb27b199a..b63de36fc5 100644
--- a/deps/v8/test/mjsunit/es6/string-iterator.js
+++ b/deps/v8/test/mjsunit/es6/string-iterator.js
@@ -92,3 +92,11 @@ function TestNonOwnSlots() {
assertThrows(function() { object.next(); }, TypeError);
}
TestNonOwnSlots();
+
+
+function TestSlicedStringRegression() {
+ var long_string = "abcdefhijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
+ var sliced_string = long_string.substring(1);
+ var iterator = sliced_string[Symbol.iterator]();
+}
+TestSlicedStringRegression();
diff --git a/deps/v8/test/mjsunit/es6/super.js b/deps/v8/test/mjsunit/es6/super.js
index 4c80ce7711..a101ea896b 100644
--- a/deps/v8/test/mjsunit/es6/super.js
+++ b/deps/v8/test/mjsunit/es6/super.js
@@ -2213,3 +2213,35 @@ TestKeyedSetterCreatingOwnPropertiesNonConfigurable(42, 43, 44);
let d = new Derived(42);
assertSame(42, d.x);
})();
+
+(function TestNullSuperPropertyLoad() {
+ var obj = {
+ __proto__: null,
+ named() { return super.x },
+ keyed() { return super[5] }
+ };
+ assertThrows(obj.named, TypeError);
+ assertThrows(obj.keyed, TypeError);
+ class C extends null {
+ named() { return super.x }
+ keyed() { return super[5] }
+ }
+ assertThrows(C.prototype.named, TypeError);
+ assertThrows(C.prototype.keyed, TypeError);
+})();
+
+(function TestNullSuperPropertyStore() {
+ var obj = {
+ __proto__: null,
+ named() { super.x = 42 },
+ keyed() { super[5] = 42 }
+ };
+ assertThrows(obj.named, TypeError);
+ assertThrows(obj.keyed, TypeError);
+ class C extends null {
+ named() { super.x = 42 }
+ keyed() { super[5] = 42 }
+ }
+ assertThrows(C.prototype.named, TypeError);
+ assertThrows(C.prototype.keyed, TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/es6/tail-call-megatest.js b/deps/v8/test/mjsunit/es6/tail-call-megatest.js
index 3d2ecb8daa..a3b5cecbe0 100644
--- a/deps/v8/test/mjsunit/es6/tail-call-megatest.js
+++ b/deps/v8/test/mjsunit/es6/tail-call-megatest.js
@@ -25,10 +25,11 @@ function checkStackTrace(expected) {
var CAN_INLINE_COMMENT = "// Let it be inlined.";
var DONT_INLINE_COMMENT = (function() {
- var line = "// Don't inline. Don't inline. Don't inline. Don't inline.";
- for (var i = 0; i < 4; i++) {
- line += "\n " + line;
+ var line = "1";
+ for (var i = 0; i < 200; ++i) {
+ line += "," + i;
}
+ line += ";\n";
return line;
})();
diff --git a/deps/v8/test/mjsunit/es6/tail-call.js b/deps/v8/test/mjsunit/es6/tail-call.js
index 6ecf04f3d9..4df4836021 100644
--- a/deps/v8/test/mjsunit/es6/tail-call.js
+++ b/deps/v8/test/mjsunit/es6/tail-call.js
@@ -295,7 +295,7 @@ function f_153(expected_call_stack, a) {
function test() {
var o = new A();
- %DebugPrint(o);
+ //%DebugPrint(o);
assertEquals(153, o.x);
}
@@ -387,18 +387,57 @@ function f_153(expected_call_stack, a) {
}
}
+ function g1let() {
+ for (let v in {a:0}) {
+ return f_153([f_153, g1let, test]);
+ }
+ }
+
+ function g1nodecl() {
+ var v;
+ for (v in {a:0}) {
+ return f_153([f_153, g1nodecl, test]);
+ }
+ }
+
function g2() {
for (var v of [1, 2, 3]) {
return f_153([f_153, g2, test]);
}
}
+ function g2let() {
+ for (let v of [1, 2, 3]) {
+ return f_153([f_153, g2let, test]);
+ }
+ }
+
+ function g2nodecl() {
+ var v;
+ for (v of [1, 2, 3]) {
+ return f_153([f_153, g2nodecl, test]);
+ }
+ }
+
function g3() {
for (var i = 0; i < 10; i++) {
return f_153([f_153, test]);
}
}
+ function g3let() {
+ for (let i = 0; i < 10; i++) {
+ return f_153([f_153, test]);
+ }
+ }
+
+ function g3nodecl() {
+ var i;
+ for (i = 0; i < 10; i++) {
+ return f_153([f_153, test]);
+ }
+ }
+
function g4() {
while (true) {
return f_153([f_153, test]);
@@ -413,8 +452,14 @@ function f_153(expected_call_stack, a) {
function test() {
assertEquals(153, g1());
+ assertEquals(153, g1let());
+ assertEquals(153, g1nodecl());
assertEquals(153, g2());
+ assertEquals(153, g2let());
+ assertEquals(153, g2nodecl());
assertEquals(153, g3());
+ assertEquals(153, g3let());
+ assertEquals(153, g3nodecl());
assertEquals(153, g4());
assertEquals(153, g5());
}
diff --git a/deps/v8/test/mjsunit/es8/syntactic-tail-call-parsing-sloppy.js b/deps/v8/test/mjsunit/es8/syntactic-tail-call-parsing-sloppy.js
deleted file mode 100644
index d02608606d..0000000000
--- a/deps/v8/test/mjsunit/es8/syntactic-tail-call-parsing-sloppy.js
+++ /dev/null
@@ -1,410 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --harmony-explicit-tailcalls
-// Flags: --harmony-do-expressions
-
-var SyntaxErrorTests = [
- { msg: "Unexpected expression inside tail call",
- tests: [
- { src: `()=>{ return continue foo ; }`,
- err: ` ^^^`,
- },
- { src: `()=>{ return continue 42 ; }`,
- err: ` ^^`,
- },
- { src: `()=>{ return continue new foo () ; }`,
- err: ` ^^^^^^^^^^`,
- },
- { src: `()=>{ loop: return continue loop ; }`,
- err: ` ^^^^`,
- },
- { src: `class A { foo() { return continue super.x ; } }`,
- err: ` ^^^^^^^`,
- },
- { src: `()=>{ return continue this ; }`,
- err: ` ^^^^`,
- },
- { src: `()=>{ return continue class A {} ; }`,
- err: ` ^^^^^^^^^^`,
- },
- { src: `()=>{ return continue class A extends B {} ; }`,
- err: ` ^^^^^^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue function A() { } ; }`,
- err: ` ^^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue { a: b, c: d} ; }`,
- err: ` ^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue function* Gen() { yield 1; } ; }`,
- err: ` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^`,
- },
- { src: `function A() { return continue new.target ; }`,
- err: ` ^^^^^^^^^^`,
- },
- { src: `()=>{ return continue () ; }`,
- err: ` ^^`,
- },
- { src: `()=>{ return continue ( 42 ) ; }`,
- err: ` ^^^^^^`,
- },
- { src: "()=>{ return continue `123 ${foo} 34lk` ; }",
- err: ` ^^^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue do { x ? foo() : bar() ; } }`,
- err: ` ^^^^^^^^^^^^^^^^^^^^^^^^^^`,
- },
- ],
- },
- { msg: "Tail call expression is not allowed here",
- tests: [
- { src: `class A {}; class B extends A { constructor() { return continue foo () ; } }`,
- err: ` ^^^^^^^^^^^^^^^`,
- },
- { src: `class A extends continue f () {}; }`,
- err: ` ^^^^^^^^^^^^^`,
- },
- ],
- },
- { msg: "Tail call expressions are not allowed in non-strict mode",
- tests: [
- { src: `()=>{ return continue continue continue b() ; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue ( continue b() ) ; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue f() - a ; }`,
- err: ` ^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return b + continue f() ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return 1, 2, 3, continue f() , 4 ; }`,
- err: ` ^^^^^^^^^^^^^`,
- },
- { src: `()=>{ var x = continue f ( ) ; }`,
- err: ` ^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue f () ? 1 : 2 ; }`,
- err: ` ^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return (1, 2, 3, continue f()), 4; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return [1, 2, continue f() ] ; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return [1, 2, ... continue f() ] ; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return [1, 2, continue f(), 3 ] ; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: "()=>{ return `123 ${a} ${ continue foo ( ) } 34lk` ; }",
- err: ` ^^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return g( 1, 2, continue f() ); }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue f() || a; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a || b || c || continue f() || d; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a && b && c && continue f() && d; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a && b || c && continue f() ? d : e; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a ? b : c && continue f() && d || e; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue foo() instanceof bar ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return bar instanceof continue foo() ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue foo() in bar ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return bar in continue foo() ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ function* G() { yield continue foo(); } }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ (1, 2, 3, continue f() ) => {} }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ (... continue f()) => {} }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ (a, b, c, ... continue f() ) => {} }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a <= continue f(); }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return b > continue f(); }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a << continue f(); }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return b >> continue f(); }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return c >>> continue f(); }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue f() = a ; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a = continue f() ; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a += continue f(); }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a ** continue f() ; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return delete continue foo() ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ typeof continue foo() ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return ~ continue foo() ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return void continue foo() ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return !continue foo() ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return -continue foo() ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return +continue foo() ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return ++ continue f( ) ; }`,
- err: ` ^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue f() ++; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue f() --; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return (continue foo()) () ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ for (var i = continue foo(); i < 10; i++) bar(); }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ for (var i = 0; i < continue foo(); i++) bar(); }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ for (var i = 0; i < 10; continue foo()) bar(); }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ if (continue foo()) bar(); }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ while (continue foo()) bar(); }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ do { smth; } while (continue foo()) ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ throw continue foo() ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ switch (continue foo()) { case 1: break; } ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ with (continue foo()) { smth; } }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ let x = continue foo() }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ const c = continue foo() }`,
- err: ` ^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ try { return continue f ( ) ; } catch(e) {} }`,
- err: ` ^^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ try { try { smth; } catch(e) { return continue f( ) ; } }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ try { try { smth; } catch(e) { return continue f( ) ; } } finally { bla; } }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ try { smth; } catch(e) { return continue f ( ) ; } finally { blah; } }`,
- err: ` ^^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ try { smth; } catch(e) { try { smth; } catch (e) { return continue f ( ) ; } } finally { blah; } }`,
- err: ` ^^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ for (var v in {a:0}) { return continue foo () ; } }`,
- err: ` ^^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ for (var v of [1, 2, 3]) { return continue foo () ; } }`,
- err: ` ^^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue a.b.c.foo () ; }`,
- err: ` ^^^^^^^^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue a().b.c().d.foo () ; }`,
- err: ` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue foo (1)(2)(3, 4) ; }`,
- err: ` ^^^^^^^^^^^^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return ( continue b() ) ; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: "()=>{ return continue bar`ab cd ef` ; }",
- err: ` ^^^^^^^^^^^^^^^^^^^^^^`,
- },
- { src: "()=>{ return continue bar`ab ${cd} ef` ; }",
- err: ` ^^^^^^^^^^^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a || continue f() ; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a && continue f() ; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a , continue f() ; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ function* G() { return continue foo(); } }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ function B() { return continue new.target() ; } }`,
- err: ` ^^^^^^^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue do { x ? foo() : bar() ; }() }`,
- err: ` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue (do { x ? foo() : bar() ; })() }`,
- err: ` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return do { 1, continue foo() } }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return do { x ? continue foo() : y } }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a || (b && continue c()); }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a && (b || continue c()); }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a || (b ? c : continue d()); }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return 1, 2, 3, a || (b ? c : continue d()); }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=> continue (foo ()) ;`,
- err: ` ^^^^^^^^^^^^^^^^^^`,
- },
- { src: `()=> a || continue foo () ;`,
- err: ` ^^^^^^^^^^^^^^^^`,
- },
- { src: `()=> a && continue foo () ;`,
- err: ` ^^^^^^^^^^^^^^^^`,
- },
- { src: `()=> a ? continue foo () : b;`,
- err: ` ^^^^^^^^^^^^^^^^`,
- },
- ],
- },
- { msg: "Undefined label 'foo'",
- tests: [
- { src: `()=>{ continue foo () ; }`,
- err: ` ^^^`,
- },
- ],
- },
-];
-
-
-// Should parse successfully.
-var NoErrorTests = [
- `()=>{ class A { foo() { return continue super.f() ; } } }`,
- `()=>{ class A { foo() { return continue f() ; } } }`,
- `()=>{ class A { foo() { return a || continue f() ; } } }`,
- `()=>{ class A { foo() { return b && continue f() ; } } }`,
-];
-
-
-(function() {
- for (var test_set of SyntaxErrorTests) {
- var expected_message = "SyntaxError: " + test_set.msg;
- for (var test of test_set.tests) {
- var passed = true;
- var e = null;
- try {
- Realm.eval(0, test.src);
- } catch (ee) {
- e = ee;
- }
- print("=======================================");
- print("Expected | " + expected_message);
- print("Source | " + test.src);
- print(" | " + test.err);
-
- if (e === null) {
- print("FAILED");
- throw new Error("SyntaxError was not thrown");
- }
-
- var details = %GetExceptionDetails(e);
- if (details.start_pos == undefined ||
- details.end_pos == undefined) {
- throw new Error("Bad message object returned");
- }
- var underline = " ".repeat(details.start_pos) +
- "^".repeat(details.end_pos - details.start_pos);
- var passed = expected_message === e.toString() &&
- test.err === underline;
-
- if (passed) {
- print("PASSED");
- print();
- } else {
- print("---------------------------------------");
- print("Actual | " + e);
- print("Source | " + test.src);
- print(" | " + underline);
- print("FAILED");
- throw new Error("Test failed");
- }
- }
- }
-})();
-
-
-(function() {
- for (var src of NoErrorTests) {
- print("=======================================");
- print("Source | " + src);
- Realm.eval(0, src);
- print("PASSED");
- print();
- }
-})();
diff --git a/deps/v8/test/mjsunit/es8/syntactic-tail-call-parsing.js b/deps/v8/test/mjsunit/es8/syntactic-tail-call-parsing.js
deleted file mode 100644
index 486c3e1da6..0000000000
--- a/deps/v8/test/mjsunit/es8/syntactic-tail-call-parsing.js
+++ /dev/null
@@ -1,393 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --harmony-explicit-tailcalls
-// Flags: --harmony-do-expressions --harmony-async-await
-"use strict";
-
-var SyntaxErrorTests = [
- { msg: "Unexpected expression inside tail call",
- tests: [
- { src: `()=>{ return continue foo ; }`,
- err: ` ^^^`,
- },
- { src: `()=>{ return continue 42 ; }`,
- err: ` ^^`,
- },
- { src: `()=>{ return continue new foo () ; }`,
- err: ` ^^^^^^^^^^`,
- },
- { src: `()=>{ loop: return continue loop ; }`,
- err: ` ^^^^`,
- },
- { src: `class A { foo() { return continue super.x ; } }`,
- err: ` ^^^^^^^`,
- },
- { src: `()=>{ return continue this ; }`,
- err: ` ^^^^`,
- },
- { src: `()=>{ return continue class A {} ; }`,
- err: ` ^^^^^^^^^^`,
- },
- { src: `()=>{ return continue class A extends B {} ; }`,
- err: ` ^^^^^^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue function A() { } ; }`,
- err: ` ^^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue { a: b, c: d} ; }`,
- err: ` ^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue function* Gen() { yield 1; } ; }`,
- err: ` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^`,
- },
- { src: `function A() { return continue new.target ; }`,
- err: ` ^^^^^^^^^^`,
- },
- { src: `()=>{ return continue () ; }`,
- err: ` ^^`,
- },
- { src: `()=>{ return continue ( 42 ) ; }`,
- err: ` ^^^^^^`,
- },
- { src: "()=>{ return continue `123 ${foo} 34lk` ; }",
- err: ` ^^^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue do { x ? foo() : bar() ; } }`,
- err: ` ^^^^^^^^^^^^^^^^^^^^^^^^^^`,
- },
- ],
- },
- { msg: "Tail call expression is not allowed here",
- tests: [
- { src: `()=>{ return continue continue continue b() ; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue ( continue b() ) ; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue f() - a ; }`,
- err: ` ^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return b + continue f() ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return 1, 2, 3, continue f() , 4 ; }`,
- err: ` ^^^^^^^^^^^^^`,
- },
- { src: `()=>{ var x = continue f ( ) ; }`,
- err: ` ^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue f () ? 1 : 2 ; }`,
- err: ` ^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return (1, 2, 3, continue f()), 4; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return [1, 2, continue f() ] ; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return [1, 2, ... continue f() ] ; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return [1, 2, continue f(), 3 ] ; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: "()=>{ return `123 ${a} ${ continue foo ( ) } 34lk` ; }",
- err: ` ^^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return g( 1, 2, continue f() ); }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue f() || a; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a || b || c || continue f() || d; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a && b && c && continue f() && d; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a && b || c && continue f() ? d : e; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a ? b : c && continue f() && d || e; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue foo() instanceof bar ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return bar instanceof continue foo() ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue foo() in bar ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return bar in continue foo() ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ function* G() { yield continue foo(); } }`,
- err: ` ^^^^^`,
- },
- { src: `()=>{ function* G() { return continue foo(); } }`,
- err: ` ^^^^^`,
- },
- { src: `()=>{ (1, 2, 3, continue f() ) => {} }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ (... continue f()) => {} }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ (a, b, c, ... continue f() ) => {} }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a <= continue f(); }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return b > continue f(); }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a << continue f(); }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return b >> continue f(); }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return c >>> continue f(); }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue f() = a ; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a = continue f() ; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a += continue f(); }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a ** continue f() ; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return delete continue foo() ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ typeof continue foo() ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return ~ continue foo() ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return void continue foo() ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return !continue foo() ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return -continue foo() ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return +continue foo() ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return ++ continue f( ) ; }`,
- err: ` ^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue f() ++; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return continue f() --; }`,
- err: ` ^^^^^^^^^^^^`,
- },
- { src: `()=>{ return (continue foo()) () ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ for (var i = continue foo(); i < 10; i++) bar(); }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ for (var i = 0; i < continue foo(); i++) bar(); }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ for (var i = 0; i < 10; continue foo()) bar(); }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ if (continue foo()) bar(); }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ while (continue foo()) bar(); }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ do { smth; } while (continue foo()) ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ throw continue foo() ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ switch (continue foo()) { case 1: break; } ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ let x = continue foo() }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ const c = continue foo() }`,
- err: ` ^^^^^^^^^^^^^^^`,
- },
- { src: `class A {}; class B extends A { constructor() { return continue foo () ; } }`,
- err: ` ^^^^^^^^^^^^^^^`,
- },
- { src: `class A extends continue f () {}; }`,
- err: ` ^^^^^^^^^^^^^`,
- },
- { src: `async() => continue foo()`,
- err: ` ^^^^^`,
- },
- ],
- },
- { msg: "Tail call expression in try block",
- tests: [
- { src: `()=>{ try { return continue f ( ) ; } catch(e) {} }`,
- err: ` ^^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ try { try { smth; } catch(e) { return continue f( ) ; } }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ try { try { smth; } catch(e) { return continue f( ) ; } } finally { bla; } }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- ],
- },
- { msg: "Tail call expression in catch block when finally block is also present",
- tests: [
- { src: `()=>{ try { smth; } catch(e) { return continue f ( ) ; } finally { blah; } }`,
- err: ` ^^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ try { smth; } catch(e) { try { smth; } catch (e) { return continue f ( ) ; } } finally { blah; } }`,
- err: ` ^^^^^^^^^^^^^^^^`,
- },
- ],
- },
- { msg: "Tail call expression in for-in/of body",
- tests: [
- { src: `()=>{ for (var v in {a:0}) { return continue foo () ; } }`,
- err: ` ^^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ for (var v of [1, 2, 3]) { return continue foo () ; } }`,
- err: ` ^^^^^^^^^^^^^^^^`,
- },
- ],
- },
- { msg: "Tail call of a direct eval is not allowed",
- tests: [
- { src: `()=>{ return continue eval(" foo () " ) ; }`,
- err: ` ^^^^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a || continue eval("", 1, 2) ; }`,
- err: ` ^^^^^^^^^^^^^^`,
- },
- { src: `()=>{ return a, continue eval ( ) ; }`,
- err: ` ^^^^^^^^^`,
- },
- { src: `()=> a, continue eval ( ) ; `,
- err: ` ^^^^^^^^^`,
- },
- { src: `()=> a || continue eval (' ' ) ; `,
- err: ` ^^^^^^^^^^^^`,
- },
- ],
- },
- { msg: "Undefined label 'foo'",
- tests: [
- { src: `()=>{ continue foo () ; }`,
- err: ` ^^^`,
- },
- ],
- },
-];
-
-
-// Should parse successfully.
-var NoErrorTests = [
- `()=>{ return continue a.b.c.foo () ; }`,
- `()=>{ return continue a().b.c().d.foo () ; }`,
- `()=>{ return continue foo (1)(2)(3, 4) ; }`,
- `()=>{ return continue (0, eval)(); }`,
- `()=>{ return ( continue b() ) ; }`,
- "()=>{ return continue bar`ab cd ef` ; }",
- "()=>{ return continue bar`ab ${cd} ef` ; }",
- `()=>{ return a || continue f() ; }`,
- `()=>{ return a && continue f() ; }`,
- `()=>{ return a , continue f() ; }`,
- `()=>{ class A { foo() { return continue super.f() ; } } }`,
- `()=>{ function B() { return continue new.target() ; } }`,
- `()=>{ return continue do { x ? foo() : bar() ; }() }`,
- `()=>{ return continue (do { x ? foo() : bar() ; })() }`,
- `()=>{ return do { 1, continue foo() } }`,
- `()=>{ return do { x ? continue foo() : y } }`,
- `()=>{ return a || (b && continue c()); }`,
- `()=>{ return a && (b || continue c()); }`,
- `()=>{ return a || (b ? c : continue d()); }`,
- `()=>{ return 1, 2, 3, a || (b ? c : continue d()); }`,
- `()=> continue (foo ()) ;`,
- `()=> a || continue foo () ;`,
- `()=> a && continue foo () ;`,
- `()=> a ? continue foo () : b;`,
-];
-
-
-(function() {
- for (var test_set of SyntaxErrorTests) {
- var expected_message = "SyntaxError: " + test_set.msg;
- for (var test of test_set.tests) {
- var passed = true;
- var e = null;
- try {
- eval(test.src);
- } catch (ee) {
- e = ee;
- }
- print("=======================================");
- print("Expected | " + expected_message);
- print("Source | " + test.src);
- print(" | " + test.err);
-
- if (e === null) {
- print("FAILED");
- throw new Error("SyntaxError was not thrown");
- }
-
- var details = %GetExceptionDetails(e);
- if (details.start_pos == undefined ||
- details.end_pos == undefined) {
- throw new Error("Bad message object returned");
- }
- var underline = " ".repeat(details.start_pos) +
- "^".repeat(details.end_pos - details.start_pos);
- var passed = expected_message === e.toString() &&
- test.err === underline;
-
- if (passed) {
- print("PASSED");
- print();
- } else {
- print("---------------------------------------");
- print("Actual | " + e);
- print("Source | " + test.src);
- print(" | " + underline);
- print("FAILED");
- throw new Error("Test failed");
- }
- }
- }
-})();
-
-
-(function() {
- for (var src of NoErrorTests) {
- print("=======================================");
- print("Source | " + src);
- src = `"use strict"; ` + src;
- Realm.eval(0, src);
- print("PASSED");
- print();
- }
-})();
diff --git a/deps/v8/test/mjsunit/es8/syntactic-tail-call-simple.js b/deps/v8/test/mjsunit/es8/syntactic-tail-call-simple.js
deleted file mode 100644
index ec7ade6673..0000000000
--- a/deps/v8/test/mjsunit/es8/syntactic-tail-call-simple.js
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --harmony-explicit-tailcalls --stack-size=100
-
-//
-// Tail calls work only in strict mode.
-//
-(function() {
- function f(n) {
- if (n <= 0) {
- return "foo";
- }
- return f(n - 1);
- }
- assertThrows(()=>{ f(1e5) });
- %OptimizeFunctionOnNextCall(f);
- assertThrows(()=>{ f(1e5) });
-})();
-
-
-//
-// Tail call normal functions.
-//
-(function() {
- "use strict";
- function f(n) {
- if (n <= 0) {
- return "foo";
- }
- return continue f(n - 1);
- }
- assertEquals("foo", f(1e5));
- %OptimizeFunctionOnNextCall(f);
- assertEquals("foo", f(1e5));
-})();
-
-
-(function() {
- "use strict";
- function f(n) {
- if (n <= 0) {
- return "foo";
- }
- return continue f(n - 1, 42); // Call with arguments adaptor.
- }
- assertEquals("foo", f(1e5));
- %OptimizeFunctionOnNextCall(f);
- assertEquals("foo", f(1e5));
-})();
-
-
-(function() {
- "use strict";
- function f(n){
- if (n <= 0) {
- return "foo";
- }
- return continue g(n - 1);
- }
- function g(n){
- if (n <= 0) {
- return "bar";
- }
- return continue f(n - 1);
- }
- assertEquals("foo", f(1e5));
- assertEquals("bar", f(1e5 + 1));
- %OptimizeFunctionOnNextCall(f);
- assertEquals("foo", f(1e5));
- assertEquals("bar", f(1e5 + 1));
-})();
-
-
-(function() {
- "use strict";
- function f(n){
- if (n <= 0) {
- return "foo";
- }
- return continue g(n - 1, 42); // Call with arguments adaptor.
- }
- function g(n){
- if (n <= 0) {
- return "bar";
- }
- return continue f(n - 1, 42); // Call with arguments adaptor.
- }
- assertEquals("foo", f(1e5));
- assertEquals("bar", f(1e5 + 1));
- %OptimizeFunctionOnNextCall(f);
- assertEquals("foo", f(1e5));
- assertEquals("bar", f(1e5 + 1));
-})();
-
-
-//
-// Tail call bound functions.
-//
-(function() {
- "use strict";
- function f0(n) {
- if (n <= 0) {
- return "foo";
- }
- return continue f_bound(n - 1);
- }
- var f_bound = f0.bind({});
- function f(n) {
- return continue f_bound(n);
- }
- assertEquals("foo", f(1e5));
- %OptimizeFunctionOnNextCall(f);
- assertEquals("foo", f(1e5));
-})();
-
-
-(function() {
- "use strict";
- function f0(n){
- if (n <= 0) {
- return "foo";
- }
- return continue g_bound(n - 1);
- }
- function g0(n){
- if (n <= 0) {
- return "bar";
- }
- return continue f_bound(n - 1);
- }
- var f_bound = f0.bind({});
- var g_bound = g0.bind({});
- function f(n) {
- return continue f_bound(n);
- }
- assertEquals("foo", f(1e5));
- assertEquals("bar", f(1e5 + 1));
- %OptimizeFunctionOnNextCall(f);
- assertEquals("foo", f(1e5));
- assertEquals("bar", f(1e5 + 1));
-})();
diff --git a/deps/v8/test/mjsunit/es8/syntactic-tail-call.js b/deps/v8/test/mjsunit/es8/syntactic-tail-call.js
deleted file mode 100644
index 44936a4b22..0000000000
--- a/deps/v8/test/mjsunit/es8/syntactic-tail-call.js
+++ /dev/null
@@ -1,604 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --harmony-explicit-tailcalls
-// Flags: --harmony-do-expressions
-
-"use strict";
-
-Error.prepareStackTrace = (error,stack) => {
- error.strace = stack;
- return error.message + "\n at " + stack.join("\n at ");
-}
-
-
-function CheckStackTrace(expected) {
- var e = new Error();
- e.stack; // prepare stack trace
- var stack = e.strace;
- assertEquals("CheckStackTrace", stack[0].getFunctionName());
- for (var i = 0; i < expected.length; i++) {
- assertEquals(expected[i].name, stack[i + 1].getFunctionName());
- }
-}
-%NeverOptimizeFunction(CheckStackTrace);
-
-
-function f(expected_call_stack, a, b) {
- CheckStackTrace(expected_call_stack);
- return a;
-}
-
-function f_153(expected_call_stack, a) {
- CheckStackTrace(expected_call_stack);
- return 153;
-}
-
-
-// Tail call when caller does not have an arguments adaptor frame.
-(function() {
- // Caller and callee have same number of arguments.
- function f1(a) {
- CheckStackTrace([f1, test]);
- return 10 + a;
- }
- function g1(a) { return continue f1(2); }
-
- // Caller has more arguments than callee.
- function f2(a) {
- CheckStackTrace([f2, test]);
- return 10 + a;
- }
- function g2(a, b, c) { return continue f2(2); }
-
- // Caller has less arguments than callee.
- function f3(a, b, c) {
- CheckStackTrace([f3, test]);
- return 10 + a + b + c;
- }
- function g3(a) { return continue f3(2, 3, 4); }
-
- // Callee has arguments adaptor frame.
- function f4(a, b, c) {
- CheckStackTrace([f4, test]);
- return 10 + a;
- }
- function g4(a) { return continue f4(2); }
-
- function test() {
- assertEquals(12, g1(1));
- assertEquals(12, g2(1, 2, 3));
- assertEquals(19, g3(1));
- assertEquals(12, g4(1));
- }
- test();
- test();
- %OptimizeFunctionOnNextCall(test);
- test();
-})();
-
-
-// Tail call when caller has an arguments adaptor frame.
-(function() {
- // Caller and callee have same number of arguments.
- function f1(a) {
- CheckStackTrace([f1, test]);
- return 10 + a;
- }
- function g1(a) { return continue f1(2); }
-
- // Caller has more arguments than callee.
- function f2(a) {
- CheckStackTrace([f2, test]);
- return 10 + a;
- }
- function g2(a, b, c) { return continue f2(2); }
-
- // Caller has less arguments than callee.
- function f3(a, b, c) {
- CheckStackTrace([f3, test]);
- return 10 + a + b + c;
- }
- function g3(a) { return continue f3(2, 3, 4); }
-
- // Callee has arguments adaptor frame.
- function f4(a, b, c) {
- CheckStackTrace([f4, test]);
- return 10 + a;
- }
- function g4(a) { return continue f4(2); }
-
- function test() {
- assertEquals(12, g1());
- assertEquals(12, g2());
- assertEquals(19, g3());
- assertEquals(12, g4());
- }
- test();
- test();
- %OptimizeFunctionOnNextCall(test);
- test();
-})();
-
-
-// Tail call bound function when caller does not have an arguments
-// adaptor frame.
-(function() {
- // Caller and callee have same number of arguments.
- function f1(a) {
- assertEquals(153, this.a);
- CheckStackTrace([f1, test]);
- return 10 + a;
- }
- var b1 = f1.bind({a: 153});
- function g1(a) { return continue b1(2); }
-
- // Caller has more arguments than callee.
- function f2(a) {
- assertEquals(153, this.a);
- CheckStackTrace([f2, test]);
- return 10 + a;
- }
- var b2 = f2.bind({a: 153});
- function g2(a, b, c) { return continue b2(2); }
-
- // Caller has less arguments than callee.
- function f3(a, b, c) {
- assertEquals(153, this.a);
- CheckStackTrace([f3, test]);
- return 10 + a + b + c;
- }
- var b3 = f3.bind({a: 153});
- function g3(a) { return continue b3(2, 3, 4); }
-
- // Callee has arguments adaptor frame.
- function f4(a, b, c) {
- assertEquals(153, this.a);
- CheckStackTrace([f4, test]);
- return 10 + a;
- }
- var b4 = f4.bind({a: 153});
- function g4(a) { return continue b4(2); }
-
- function test() {
- assertEquals(12, g1(1));
- assertEquals(12, g2(1, 2, 3));
- assertEquals(19, g3(1));
- assertEquals(12, g4(1));
- }
- test();
- test();
- %OptimizeFunctionOnNextCall(test);
- test();
-})();
-
-
-// Tail call bound function when caller has an arguments adaptor frame.
-(function() {
- // Caller and callee have same number of arguments.
- function f1(a) {
- assertEquals(153, this.a);
- CheckStackTrace([f1, test]);
- return 10 + a;
- }
- var b1 = f1.bind({a: 153});
- function g1(a) { return continue b1(2); }
-
- // Caller has more arguments than callee.
- function f2(a) {
- assertEquals(153, this.a);
- CheckStackTrace([f2, test]);
- return 10 + a;
- }
- var b2 = f2.bind({a: 153});
- function g2(a, b, c) { return continue b2(2); }
-
- // Caller has less arguments than callee.
- function f3(a, b, c) {
- assertEquals(153, this.a);
- CheckStackTrace([f3, test]);
- return 10 + a + b + c;
- }
- var b3 = f3.bind({a: 153});
- function g3(a) { return continue b3(2, 3, 4); }
-
- // Callee has arguments adaptor frame.
- function f4(a, b, c) {
- assertEquals(153, this.a);
- CheckStackTrace([f4, test]);
- return 10 + a;
- }
- var b4 = f4.bind({a: 153});
- function g4(a) { return continue b4(2); }
-
- function test() {
- assertEquals(12, g1());
- assertEquals(12, g2());
- assertEquals(19, g3());
- assertEquals(12, g4());
- }
- test();
- test();
- %OptimizeFunctionOnNextCall(test);
- test();
-})();
-
-
-// Tail calling from getter.
-(function() {
- function g(v) {
- CheckStackTrace([g, test]);
- %DeoptimizeFunction(test);
- return 153;
- }
- %NeverOptimizeFunction(g);
-
- function f(v) {
- return continue g();
- }
- %SetForceInlineFlag(f);
-
- function test() {
- var o = {};
- o.__defineGetter__('p', f);
- assertEquals(153, o.p);
- }
-
- test();
- test();
- %OptimizeFunctionOnNextCall(test);
- test();
-})();
-
-
-// Tail calling from setter.
-(function() {
- function g() {
- CheckStackTrace([g, test]);
- %DeoptimizeFunction(test);
- return 153;
- }
- %NeverOptimizeFunction(g);
-
- function f(v) {
- return continue g();
- }
- %SetForceInlineFlag(f);
-
- function test() {
- var o = {};
- o.__defineSetter__('q', f);
- assertEquals(1, o.q = 1);
- }
-
- test();
- test();
- %OptimizeFunctionOnNextCall(test);
- test();
-})();
-
-
-// Tail calling from constructor.
-(function() {
- function g(context) {
- CheckStackTrace([g, test]);
- %DeoptimizeFunction(test);
- return {x: 153};
- }
- %NeverOptimizeFunction(g);
-
- function A() {
- this.x = 42;
- return continue g();
- }
-
- function test() {
- var o = new A();
- %DebugPrint(o);
- assertEquals(153, o.x);
- }
-
- test();
- test();
- %OptimizeFunctionOnNextCall(test);
- test();
-})();
-
-
-// Tail calling via various expressions.
-(function() {
- function g1(a) {
- return f([f, g1, test], false) || continue f([f, test], true);
- }
-
- function g2(a) {
- return f([f, g2, test], true) && continue f([f, test], true);
- }
-
- function g3(a) {
- return f([f, g3, test], 13), continue f([f, test], 153);
- }
-
- function g4(a) {
- return f([f, g4, test], false) ||
- (f([f, g4, test], true) && continue f([f, test], true));
- }
-
- function g5(a) {
- return f([f, g5, test], true) &&
- (f([f, g5, test], false) || continue f([f, test], true));
- }
-
- function g6(a) {
- return f([f, g6, test], 13), f([f, g6, test], 42),
- continue f([f, test], 153);
- }
-
- function g7(a) {
- return f([f, g7, test], false) ||
- (f([f, g7, test], false) ? continue f([f, test], true)
- : continue f([f, test], true));
- }
-
- function g8(a) {
- return f([f, g8, test], false) || f([f, g8, test], true) &&
- continue f([f, test], true);
- }
-
- function g9(a) {
- return f([f, g9, test], true) && f([f, g9, test], false) ||
- continue f([f, test], true);
- }
-
- function g10(a) {
- return f([f, g10, test], true) && f([f, g10, test], false) ||
- f([f, g10, test], true) ?
- f([f, g10, test], true) && f([f, g10, test], false) ||
- continue f([f, test], true) :
- f([f, g10, test], true) && f([f, g10, test], false) ||
- continue f([f, test], true);
- }
-
- function test() {
- assertEquals(true, g1());
- assertEquals(true, g2());
- assertEquals(153, g3());
- assertEquals(true, g4());
- assertEquals(true, g5());
- assertEquals(153, g6());
- assertEquals(true, g7());
- assertEquals(true, g8());
- assertEquals(true, g9());
- assertEquals(true, g10());
- }
- test();
- test();
- %OptimizeFunctionOnNextCall(test);
- test();
-})();
-
-
-// Tail calling from various statements.
-(function() {
- function g3() {
- for (var i = 0; i < 10; i++) {
- return continue f_153([f_153, test]);
- }
- }
-
- function g4() {
- while (true) {
- return continue f_153([f_153, test]);
- }
- }
-
- function g5() {
- do {
- return continue f_153([f_153, test]);
- } while (true);
- }
-
- function test() {
- assertEquals(153, g3());
- assertEquals(153, g4());
- assertEquals(153, g5());
- }
- test();
- test();
- %OptimizeFunctionOnNextCall(test);
- test();
-})();
-
-
-// Test tail calls from try-catch constructs.
-(function() {
- function tc1(a) {
- try {
- f_153([f_153, tc1, test]);
- return f_153([f_153, tc1, test]);
- } catch(e) {
- f_153([f_153, tc1, test]);
- }
- }
-
- function tc2(a) {
- try {
- f_153([f_153, tc2, test]);
- throw new Error("boom");
- } catch(e) {
- f_153([f_153, tc2, test]);
- return continue f_153([f_153, test]);
- }
- }
-
- function tc3(a) {
- try {
- f_153([f_153, tc3, test]);
- throw new Error("boom");
- } catch(e) {
- f_153([f_153, tc3, test]);
- }
- f_153([f_153, tc3, test]);
- return continue f_153([f_153, test]);
- }
-
- function test() {
- assertEquals(153, tc1());
- assertEquals(153, tc2());
- assertEquals(153, tc3());
- }
- test();
- test();
- %OptimizeFunctionOnNextCall(test);
- test();
-})();
-
-
-// Test tail calls from try-finally constructs.
-(function() {
- function tf1(a) {
- try {
- f_153([f_153, tf1, test]);
- return f_153([f_153, tf1, test]);
- } finally {
- f_153([f_153, tf1, test]);
- }
- }
-
- function tf2(a) {
- try {
- f_153([f_153, tf2, test]);
- throw new Error("boom");
- } finally {
- f_153([f_153, tf2, test]);
- return continue f_153([f_153, test]);
- }
- }
-
- function tf3(a) {
- try {
- f_153([f_153, tf3, test]);
- } finally {
- f_153([f_153, tf3, test]);
- }
- return continue f_153([f_153, test]);
- }
-
- function test() {
- assertEquals(153, tf1());
- assertEquals(153, tf2());
- assertEquals(153, tf3());
- }
- test();
- test();
- %OptimizeFunctionOnNextCall(test);
- test();
-})();
-
-
-// Test tail calls from try-catch-finally constructs.
-(function() {
- function tcf1(a) {
- try {
- f_153([f_153, tcf1, test]);
- return f_153([f_153, tcf1, test]);
- } catch(e) {
- } finally {
- f_153([f_153, tcf1, test]);
- }
- }
-
- function tcf2(a) {
- try {
- f_153([f_153, tcf2, test]);
- throw new Error("boom");
- } catch(e) {
- f_153([f_153, tcf2, test]);
- return f_153([f_153, tcf2, test]);
- } finally {
- f_153([f_153, tcf2, test]);
- }
- }
-
- function tcf3(a) {
- try {
- f_153([f_153, tcf3, test]);
- throw new Error("boom");
- } catch(e) {
- f_153([f_153, tcf3, test]);
- } finally {
- f_153([f_153, tcf3, test]);
- return continue f_153([f_153, test]);
- }
- }
-
- function tcf4(a) {
- try {
- f_153([f_153, tcf4, test]);
- throw new Error("boom");
- } catch(e) {
- f_153([f_153, tcf4, test]);
- } finally {
- f_153([f_153, tcf4, test]);
- }
- return continue f_153([f_153, test]);
- }
-
- function test() {
- assertEquals(153, tcf1());
- assertEquals(153, tcf2());
- assertEquals(153, tcf3());
- assertEquals(153, tcf4());
- }
- test();
- test();
- %OptimizeFunctionOnNextCall(test);
- test();
-})();
-
-
-// Test tail calls from arrow functions.
-(function () {
- function g1(a) {
- return continue (() => { return continue f_153([f_153, test]); })();
- }
-
- function g2(a) {
- return continue (() => continue f_153([f_153, test]))();
- }
-
- function g3(a) {
- var closure = () => f([f, closure, test], true)
- ? continue f_153([f_153, test])
- : continue f_153([f_153, test]);
- return continue closure();
- }
-
- function test() {
- assertEquals(153, g1());
- assertEquals(153, g2());
- assertEquals(153, g3());
- }
- test();
- test();
- %OptimizeFunctionOnNextCall(test);
- test();
-})();
-
-
-// Test tail calls from do expressions.
-(function () {
- function g1(a) {
- var a = do { return continue f_153([f_153, test]); 42; };
- return a;
- }
-
- function test() {
- assertEquals(153, g1());
- }
- test();
- test();
- %OptimizeFunctionOnNextCall(test);
- test();
-})();
diff --git a/deps/v8/test/mjsunit/fixed-context-shapes-when-recompiling.js b/deps/v8/test/mjsunit/fixed-context-shapes-when-recompiling.js
new file mode 100644
index 0000000000..bd64e3d168
--- /dev/null
+++ b/deps/v8/test/mjsunit/fixed-context-shapes-when-recompiling.js
@@ -0,0 +1,362 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --min-preparse-length 1 --allow-natives-syntax
+
+// Test that the information on which variables to allocate in context doesn't
+// change when recompiling.
+
+(function TestVarInInnerFunction() {
+ // Introduce variables which would potentially be context allocated, depending
+ // on whether an inner function refers to them or not.
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner() {
+ var a; // This will make "a" actually not be context allocated.
+ a; b; c;
+ }
+ // Force recompilation.
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
+
+// Other tests are the same, except that the shadowing variable "a" in inner
+// functions is declared differently.
+
+(function TestLetInInnerFunction() {
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner() {
+ let a;
+ a; b; c;
+ }
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
+(function TestInnerFunctionParameter() {
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner(a) {
+ a; b; c;
+ }
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
+(function TestInnerInnerFunctionParameter() {
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner() {
+ function innerinner(a) { a; b; c; }
+ }
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
+(function TestInnerArrowFunctionParameter() {
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner() {
+ var f = a => a + b + c;
+ }
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
+(function TestInnerFunctionInnerFunction() {
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner() {
+ function a() { }
+ a; b; c;
+ }
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
+(function TestInnerFunctionSloppyBlockFunction() {
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner() {
+ if (true) { function a() { } }
+ a; b; c;
+ }
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
+(function TestInnerFunctionCatchVariable() {
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner() {
+ try {
+ }
+ catch(a) {
+ a; b; c;
+ }
+ }
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
+(function TestInnerFunctionLoopVariable1() {
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner() {
+ for (var a in {}) {
+ a; b; c;
+ }
+ }
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
+(function TestInnerFunctionLoopVariable2() {
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner() {
+ for (let a in {}) {
+ a; b; c;
+ }
+ }
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
+(function TestInnerFunctionLoopVariable3() {
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner() {
+ for (var a of []) {
+ a; b; c;
+ }
+ }
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
+(function TestInnerFunctionLoopVariable4() {
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner() {
+ for (let a of []) {
+ a; b; c;
+ }
+ }
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
+(function TestInnerFunctionClass() {
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner() {
+ class a {}
+ a; b; c;
+ }
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
+// A cluster of similar tests where the inner function only declares a variable
+// whose name clashes with an outer function variable name, but doesn't use it.
+(function TestRegress650969_1() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ var a;
+ }
+ }
+})();
+
+(function TestRegress650969_2() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ var a = 6;
+ }
+ }
+})();
+
+(function TestRegress650969_3() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ var a, b;
+ }
+ }
+})();
+
+(function TestRegress650969_4() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ var a = 6, b;
+ }
+ }
+})();
+
+(function TestRegress650969_5() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ let a;
+ }
+ }
+})();
+
+(function TestRegress650969_6() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ let a = 6;
+ }
+ }
+})();
+
+(function TestRegress650969_7() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ let a, b;
+ }
+ }
+})();
+
+(function TestRegress650969_8() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ let a = 6, b;
+ }
+ }
+})();
+
+(function TestRegress650969_9() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner(a) {
+ }
+ }
+})();
diff --git a/deps/v8/test/mjsunit/function-var.js b/deps/v8/test/mjsunit/function-var.js
new file mode 100644
index 0000000000..607cbe730e
--- /dev/null
+++ b/deps/v8/test/mjsunit/function-var.js
@@ -0,0 +1,23 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function() {
+ function f() {
+ {
+ function f() { return 42 }
+ }
+ function g() { return f }
+ return g;
+ }
+
+ var g = f();
+ var inner_f = g();
+ assertEquals(42, inner_f());
+})();
+
+(function() {
+ var y = 100;
+ var z = (function y() { return y; });
+ assertEquals(z, z());
+})();
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases.js b/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases.js
new file mode 100644
index 0000000000..76296ef7f1
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases.js
@@ -0,0 +1,216 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-async-await --expose-debug-as debug
+
+Debug = debug.Debug
+
+let events = 0;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Exception) return;
+ events++;
+}
+
+async function thrower() {
+ throw "a"; // Exception a
+}
+
+var reject = () => Promise.reject("b"); // Exception b
+
+async function awaitReturn() { await 1; return; }
+
+async function scalar() { return 1; }
+
+function nothing() { return 1; }
+
+function rejectConstructor() {
+ return new Promise((resolve, reject) => reject("c")); // Exception c
+}
+
+async function argThrower(x = (() => { throw "d"; })()) { } // Exception d
+
+async function awaitThrow() {
+ await undefined;
+ throw "e"; // Exception e
+}
+
+function constructorThrow() {
+ return new Promise((resolve, reject) =>
+ Promise.resolve().then(() =>
+ reject("f") // Exception f
+ )
+ );
+}
+
+function suppressThrow() {
+ return thrower();
+}
+
+async function caught(producer) {
+ try {
+ await producer();
+ } catch (e) {
+ }
+}
+
+async function uncaught(producer) {
+ await producer();
+}
+
+async function indirectUncaught(producer) {
+ await uncaught(producer);
+}
+
+async function indirectCaught(producer) {
+ try {
+ await uncaught(producer);
+ } catch (e) {
+ }
+}
+
+function dotCatch(producer) {
+ Promise.resolve(producer()).catch(() => {});
+}
+
+function indirectReturnDotCatch(producer) {
+ (async() => producer())().catch(() => {});
+}
+
+function indirectAwaitDotCatch(producer) {
+ (async() => await producer())().catch(() => {});
+}
+
+function nestedDotCatch(producer) {
+ Promise.resolve(producer()).then().catch(() => {});
+}
+
+async function indirectAwaitCatch(producer) {
+ try {
+ await (() => producer())();
+ } catch (e) {
+ }
+}
+
+function switchCatch(producer) {
+ let resolve;
+ let promise = new Promise(r => resolve = r);
+ async function localCaught() {
+ try {
+ await promise; // force switching to localUncaught and back
+ await producer();
+ } catch (e) { }
+ }
+ async function localUncaught() {
+ await undefined;
+ resolve();
+ }
+ localCaught();
+ localUncaught();
+}
+
+function switchDotCatch(producer) {
+ let resolve;
+ let promise = new Promise(r => resolve = r);
+ async function localCaught() {
+ await promise; // force switching to localUncaught and back
+ await producer();
+ }
+ async function localUncaught() {
+ await undefined;
+ resolve();
+ }
+ localCaught().catch(() => {});
+ localUncaught();
+}
+
+let catches = [caught,
+ indirectCaught,
+ indirectAwaitCatch,
+ switchCatch,
+ switchDotCatch];
+let noncatches = [uncaught, indirectUncaught];
+let lateCatches = [dotCatch,
+ indirectReturnDotCatch,
+ indirectAwaitDotCatch,
+ nestedDotCatch];
+
+let throws = [thrower, reject, argThrower, suppressThrow];
+let nonthrows = [awaitReturn, scalar, nothing];
+let lateThrows = [awaitThrow, constructorThrow];
+let uncatchable = [rejectConstructor];
+
+let cases = [];
+
+for (let producer of throws.concat(lateThrows)) {
+ for (let consumer of catches) {
+ cases.push({ producer, consumer, expectedEvents: 1, caught: true });
+ cases.push({ producer, consumer, expectedEvents: 0, caught: false });
+ }
+}
+
+for (let producer of throws.concat(lateThrows)) {
+ for (let consumer of noncatches) {
+ cases.push({ producer, consumer, expectedEvents: 1, caught: true });
+ cases.push({ producer, consumer, expectedEvents: 1, caught: false });
+ }
+}
+
+for (let producer of nonthrows) {
+ for (let consumer of catches.concat(noncatches, lateCatches)) {
+ cases.push({ producer, consumer, expectedEvents: 0, caught: true });
+ cases.push({ producer, consumer, expectedEvents: 0, caught: false });
+ }
+}
+
+for (let producer of uncatchable) {
+ for (let consumer of catches.concat(noncatches, lateCatches)) {
+ cases.push({ producer, consumer, expectedEvents: 1, caught: true });
+ cases.push({ producer, consumer, expectedEvents: 1, caught: false });
+ }
+}
+
+for (let producer of lateThrows) {
+ for (let consumer of lateCatches) {
+ cases.push({ producer, consumer, expectedEvents: 1, caught: true });
+ cases.push({ producer, consumer, expectedEvents: 0, caught: false });
+ }
+}
+
+for (let producer of throws) {
+ for (let consumer of lateCatches) {
+ cases.push({ producer, consumer, expectedEvents: 1, caught: true });
+ cases.push({ producer, consumer, expectedEvents: 1, caught: false });
+ }
+}
+
+
+function runPart(n) {
+ let subcases = cases.slice(n * cases.length / 4,
+ ((n + 1) * cases.length) / 4);
+ for (let {producer, consumer, expectedEvents, caught} of subcases) {
+ Debug.setListener(listener);
+ if (caught) {
+ Debug.setBreakOnException();
+ } else {
+ Debug.setBreakOnUncaughtException();
+ }
+
+ events = 0;
+ consumer(producer);
+ %RunMicrotasks();
+
+ Debug.setListener(null);
+ if (caught) {
+ Debug.clearBreakOnException();
+ } else {
+ Debug.clearBreakOnUncaughtException();
+ }
+ if (expectedEvents != events) {
+ print(`producer ${producer} consumer ${consumer} expectedEvents ` +
+ `${expectedEvents} caught ${caught} events ${events}`);
+ quit(1);
+ }
+ }
+}
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases0.js b/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases0.js
new file mode 100644
index 0000000000..7a422c542b
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases0.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-async-await --expose-debug-as debug
+// Files: test/mjsunit/harmony/async-debug-caught-exception-cases.js
+
+runPart(0);
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases1.js b/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases1.js
new file mode 100644
index 0000000000..dfafa5af26
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases1.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-async-await --expose-debug-as debug
+// Files: test/mjsunit/harmony/async-debug-caught-exception-cases.js
+
+runPart(1);
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases2.js b/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases2.js
new file mode 100644
index 0000000000..0bfefae4b8
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases2.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-async-await --expose-debug-as debug
+// Files: test/mjsunit/harmony/async-debug-caught-exception-cases.js
+
+runPart(2);
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases3.js b/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases3.js
new file mode 100644
index 0000000000..6fc7eab0cf
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases3.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-async-await --expose-debug-as debug
+// Files: test/mjsunit/harmony/async-debug-caught-exception-cases.js
+
+runPart(3);
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-caught-exception.js b/deps/v8/test/mjsunit/harmony/async-debug-caught-exception.js
index b2ae18437d..047b421d3d 100644
--- a/deps/v8/test/mjsunit/harmony/async-debug-caught-exception.js
+++ b/deps/v8/test/mjsunit/harmony/async-debug-caught-exception.js
@@ -87,3 +87,58 @@ Debug.setListener(null);
Debug.clearBreakOnUncaughtException();
assertEquals([], log);
assertNull(exception);
+
+log = [];
+Debug.setListener(listener);
+Debug.setBreakOnException();
+
+// "rethrown" uncaught exceptions in return don't cause another event
+async function propagate_inner() { return thrower(); }
+async function propagate_outer() { return propagate_inner(); }
+
+propagate_outer();
+%RunMicrotasks();
+assertEquals(["a"], log);
+assertNull(exception);
+
+// Also don't propagate if an await interceded
+log = [];
+async function propagate_await() { await 1; return thrower(); }
+async function propagate_await_outer() { return propagate_await(); }
+propagate_await_outer();
+%RunMicrotasks();
+assertEquals(["a"], log);
+assertNull(exception);
+
+Debug.clearBreakOnException();
+Debug.setBreakOnUncaughtException();
+
+log = [];
+Promise.resolve().then(() => Promise.reject()).catch(() => log.push("d")); // Exception c
+%RunMicrotasks();
+assertEquals(["d"], log);
+assertNull(exception);
+
+Debug.clearBreakOnUncaughtException();
+Debug.setListener(null);
+
+// If devtools is turned on in the middle, then catch prediction
+// could be wrong (here, it mispredicts the exception as caught),
+// but shouldn't crash.
+
+log = [];
+
+var resolve;
+var turnOnListenerPromise = new Promise(r => resolve = r);
+async function confused() {
+ await turnOnListenerPromise;
+ throw foo
+}
+confused();
+Promise.resolve().then(() => {
+ Debug.setListener(listener);
+ Debug.setBreakOnUncaughtException();
+ resolve();
+});
+
+assertEquals([], log);
diff --git a/deps/v8/test/mjsunit/harmony/debug-async-function-async-task-event.js b/deps/v8/test/mjsunit/harmony/debug-async-function-async-task-event.js
index 249f02fc8f..90e13d8659 100644
--- a/deps/v8/test/mjsunit/harmony/debug-async-function-async-task-event.js
+++ b/deps/v8/test/mjsunit/harmony/debug-async-function-async-task-event.js
@@ -4,25 +4,31 @@
// Flags: --harmony-async-await --expose-debug-as debug --allow-natives-syntax
+// The test observes the callbacks that async/await makes to the inspector
+// to make accurate stack traces. The pattern is based on saving a stack once
+// with enqueueRecurring and restoring it multiple times.
+
+// Additionally, the limited number of events is an indirect indication that
+// we are not doing extra Promise processing that could be associated with memory
+// leaks (v8:5380). In particular, no stacks are saved and restored for extra
+// Promise handling on throwaway Promises.
+
+// TODO(littledan): Write a test that demonstrates that the memory leak in
+// the exception case is fixed.
+
Debug = debug.Debug;
var base_id = -1;
var exception = null;
var expected = [
- "enqueue #1",
- "willHandle #1",
- "then #1",
- "enqueue #2",
- "enqueue #3",
- "didHandle #1",
- "willHandle #2",
- "then #2",
- "didHandle #2",
- "willHandle #3",
- "enqueue #4",
- "didHandle #3",
- "willHandle #4",
- "didHandle #4",
+ 'enqueueRecurring #1',
+ 'willHandle #1',
+ 'then #1',
+ 'didHandle #1',
+ 'willHandle #1',
+ 'then #2',
+ 'cancel #1',
+ 'didHandle #1',
];
function assertLog(msg) {
@@ -40,8 +46,7 @@ function listener(event, exec_state, event_data, data) {
if (base_id < 0)
base_id = event_data.id();
var id = event_data.id() - base_id + 1;
- assertTrue("Promise.resolve" == event_data.name() ||
- "PromiseResolveThenableJob" == event_data.name());
+ assertTrue("async function" == event_data.name());
assertLog(event_data.type() + " #" + id);
} catch (e) {
print(e + e.stack)
diff --git a/deps/v8/test/mjsunit/harmony/default-parameter-do-expression.js b/deps/v8/test/mjsunit/harmony/default-parameter-do-expression.js
new file mode 100644
index 0000000000..cb80d246bc
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/default-parameter-do-expression.js
@@ -0,0 +1,21 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-do-expressions --nolazy
+
+function hoist_unique_do_var() {
+ var f = (y = do { var unique = 3 }) => unique;
+ assertEquals(3, f());
+ assertThrows(() => unique, ReferenceError);
+}
+hoist_unique_do_var();
+
+function hoist_duplicate_do_var() {
+ var duplicate = 100;
+ var f = (y = do { var duplicate = 3 }) => duplicate;
+ assertEquals(3, f());
+ // TODO(verwaest): The {duplicate} declarations were invalidly merged.
+ assertEquals(3, duplicate);
+}
+hoist_duplicate_do_var();
diff --git a/deps/v8/test/mjsunit/keyed-load-generic.js b/deps/v8/test/mjsunit/keyed-load-generic.js
new file mode 100644
index 0000000000..a65577d635
--- /dev/null
+++ b/deps/v8/test/mjsunit/keyed-load-generic.js
@@ -0,0 +1,20 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function NegativeIndexAndDictionaryElements() {
+ function f(o, idx) {
+ return o[idx];
+ }
+
+ f({}, 0);
+ f({}, 0); // Make the IC megamorphic/generic.
+
+ var o = {};
+ o[1000000] = "dictionary";
+ var c = -21;
+ o[c] = "foo";
+ assertEquals("foo", f(o, c));
+})();
diff --git a/deps/v8/test/mjsunit/lazy-inner-functions.js b/deps/v8/test/mjsunit/lazy-inner-functions.js
new file mode 100644
index 0000000000..127d349b1b
--- /dev/null
+++ b/deps/v8/test/mjsunit/lazy-inner-functions.js
@@ -0,0 +1,16 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --min-preparse-length 1
+
+(function TestLazyInnerFunctionCallsEval() {
+ var i = (function eager_outer() {
+ var a = 41; // Should be context-allocated
+ function lazy_inner() {
+ return eval("a");
+ }
+ return lazy_inner;
+ })();
+ assertEquals(41, i());
+})();
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index d610fce430..68cfcbe799 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -227,6 +227,14 @@
'unicode-test': [SKIP],
'whitespaces': [SKIP],
+ # Async function tests taking too long
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5411
+ 'harmony/async-function-debug-scopes': [SKIP],
+ 'harmony/async-debug-caught-exception-cases0': [SKIP],
+ 'harmony/async-debug-caught-exception-cases1': [SKIP],
+ 'harmony/async-debug-caught-exception-cases2': [SKIP],
+ 'harmony/async-debug-caught-exception-cases3': [SKIP],
+
# TODO(mstarzinger): Takes too long with TF.
'array-sort': [PASS, NO_VARIANTS],
'regress/regress-91008': [PASS, NO_VARIANTS],
@@ -531,6 +539,12 @@
}], # 'system == windows'
##############################################################################
+['system == macos', {
+ # BUG(v8:5333)
+ 'big-object-literal': [SKIP],
+}], # 'system == macos'
+
+##############################################################################
['arch == s390 or arch == s390x', {
# Stack manipulations in LiveEdit is not implemented for this arch.
@@ -561,6 +575,13 @@
# Too slow.
'es6/tail-call-megatest*': [SKIP],
+
+ # Ongoing implementation of modules.
+ # https://bugs.chromium.org/p/v8/issues/detail?id=1569
+ # The deopt fuzzer currently does not respect the 'variant != ignition' rule
+ # further down in this file, so we have to duplicate this here.
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5374
+ 'modules-*': [SKIP],
}], # 'deopt_fuzzer == True'
##############################################################################
@@ -632,47 +653,17 @@
# Might trigger stack overflow.
'unicode-test': [SKIP],
- # TODO(mythria, 4780): Related to type feedback for calls in interpreter.
- 'array-literal-feedback': [FAIL],
- 'regress/regress-4121': [FAIL],
-
# TODO(4680): Test doesn't know about three tier compiler pipeline.
'assert-opt-and-deopt': [SKIP],
- # BUG(rmcilroy,4989): Function is optimized without type feedback and so immediately deopts again, causing check failure in the test.
- 'compiler/deopt-inlined-from-call': [FAIL],
- 'compiler/increment-typefeedback': [FAIL],
+ # Fails because concurrent compilation is not triggered on bytecode.
+ # Check in Runtime_OptimizeFunctionOnNextCall.
'compiler/manual-concurrent-recompile': [FAIL],
- 'constant-folding-2': [FAIL],
- 'debug-is-active': [FAIL],
- 'deopt-with-fp-regs': [FAIL],
- 'deserialize-optimize-inner': [FAIL],
- 'div-mul-minus-one': [FAIL],
- 'double-intrinsics': [FAIL],
- 'elements-transition-hoisting': [FAIL],
- 'es6/block-let-crankshaft': [FAIL],
- 'es6/block-let-crankshaft-sloppy': [FAIL],
- 'getters-on-elements': [FAIL],
- 'harmony/do-expressions': [FAIL],
- 'math-floor-of-div-minus-zero': [FAIL],
- 'regress/regress-2132': [FAIL],
- 'regress/regress-2339': [FAIL],
- 'regress/regress-3176': [FAIL],
- 'regress/regress-3709': [FAIL],
- 'regress/regress-385565': [FAIL],
- 'regress/regress-crbug-594183': [FAIL],
'regress/regress-embedded-cons-string': [FAIL],
- 'regress/regress-map-invalidation-2': [FAIL],
- 'regress/regress-param-local-type': [FAIL],
'regress/regress-prepare-break-while-recompile': [FAIL],
- 'shift-for-integer-div': [FAIL],
- 'sin-cos': [FAIL],
- 'smi-mul-const': [FAIL],
- 'smi-mul': [FAIL],
- 'unary-minus-deopt': [FAIL],
- 'array-constructor-feedback': [FAIL],
- 'array-feedback': [FAIL],
- 'allocation-site-info': [FAIL],
+
+ # BUG(v8:5451): Flaky crashes.
+ 'wasm/asm-wasm': [PASS, ['gc_stress', SKIP]],
}], # variant == ignition
['variant == ignition and arch == arm64', {
@@ -706,43 +697,15 @@
##############################################################################
['variant == ignition_staging', {
- 'allocation-site-info': [FAIL],
- 'array-constructor-feedback': [FAIL],
- 'array-feedback': [FAIL],
- 'array-literal-feedback': [FAIL],
'assert-opt-and-deopt': [SKIP],
- 'compiler/deopt-inlined-from-call': [FAIL],
- 'compiler/increment-typefeedback': [FAIL],
- 'compiler/manual-concurrent-recompile': [FAIL],
- 'constant-folding-2': [FAIL],
- 'debug-is-active': [FAIL],
'debug-liveedit-double-call': [FAIL],
- 'deopt-with-fp-regs': [FAIL],
- 'deserialize-optimize-inner': [FAIL],
- 'div-mul-minus-one': [FAIL],
- 'elements-transition-hoisting': [FAIL],
- 'es6/block-let-crankshaft': [FAIL],
- 'es6/block-let-crankshaft-sloppy': [FAIL],
- 'getters-on-elements': [FAIL],
- 'harmony/do-expressions': [FAIL],
- 'math-floor-of-div-minus-zero': [FAIL],
- 'regress/regress-2132': [FAIL],
- 'regress/regress-2339': [FAIL],
- 'regress/regress-3176': [FAIL],
- 'regress/regress-3709': [FAIL],
- 'regress/regress-385565': [FAIL],
- 'regress/regress-4121': [FAIL],
- 'regress/regress-crbug-594183': [FAIL],
+ 'regress-sync-optimized-lists': [FAIL],
+
+ # Fails because concurrent compilation is not triggered on bytecode.
+ # Check in Runtime_OptimizeFunctionOnNextCall.
+ 'compiler/manual-concurrent-recompile': [FAIL],
'regress/regress-embedded-cons-string': [FAIL],
- 'regress/regress-map-invalidation-2': [FAIL],
- 'regress/regress-param-local-type': [FAIL],
'regress/regress-prepare-break-while-recompile': [FAIL],
- 'regress-sync-optimized-lists': [FAIL],
- 'shift-for-integer-div': [FAIL],
- 'sin-cos': [FAIL],
- 'smi-mul-const': [FAIL],
- 'smi-mul': [FAIL],
- 'unary-minus-deopt': [FAIL],
# Flaky.
'asm/int32div': [SKIP],
@@ -753,6 +716,8 @@
# Might trigger stack overflow.
'unicode-test': [SKIP],
+ # BUG(v8:5451): Flaky crashes.
+ 'wasm/asm-wasm': [PASS, ['gc_stress', SKIP]],
}], # variant == ignition_staging
##############################################################################
@@ -762,13 +727,6 @@
# Might trigger stack overflow.
'unicode-test': [SKIP],
- # TODO(mythria, 4780): Related to type feedback for calls in interpreter.
- 'array-literal-feedback': [FAIL],
- 'regress/regress-4121': [FAIL],
- 'array-constructor-feedback': [FAIL],
- 'array-feedback': [FAIL],
- 'allocation-site-info': [FAIL],
-
'wasm/asm-wasm-f32': [PASS, ['arch in [arm64]', SKIP]],
'wasm/asm-wasm-f64': [PASS, ['arch in [arm64]', SKIP]],
@@ -818,6 +776,24 @@
}], # variant == ignition_turbofan and msan
##############################################################################
+['variant == ignition or variant == ignition_staging or variant == ignition_turbofan', {
+ # Modules for which execution must fail (e.g. because of unresolved imports).
+ # Eventually we should test for the precise error message, but for now we only
+ # ensure that there is an error.
+ 'modules-fail*': [FAIL],
+
+ # Modules which are only meant to be imported from by other tests, not to be
+ # tested standalone.
+ 'modules-skip*': [SKIP],
+}], # variant == ignition or variant == ignition_staging or variant == ignition_turbofan
+
+['variant != ignition and variant != ignition_staging and variant != ignition_turbofan', {
+ # Ongoing implementation of modules.
+ # https://bugs.chromium.org/p/v8/issues/detail?id=1569
+ 'modules-*': [SKIP],
+}], # variant != ignition and variant != ignition_staging and variant != ignition_turbofan
+
+##############################################################################
['gcov_coverage', {
# Tests taking too long.
'array-functions-prototype-misc': [SKIP],
@@ -827,12 +803,22 @@
}], # 'gcov_coverage'
##############################################################################
-# This test allocates a 2G block of memory and if there are multiple
-# varients this leads kills by the OOM killer, crashes or messages
-# indicating the OS cannot allocate memory, exclude for Node.js runs
-# re-evalute when we move up to v8 5.1
-[ALWAYS, {
-'regress/regress-crbug-514081': [PASS, NO_VARIANTS],
-}], # ALWAYS
+['variant == asm_wasm', {
+ # Skip stuff uninteresting for asm.js
+ 'bugs/*': [SKIP],
+ 'compiler/*': [SKIP],
+ 'es6/*': [SKIP],
+ 'es7/*': [SKIP],
+ 'es8/*': [SKIP],
+ 'harmony/*': [SKIP],
+ 'ignition/*': [SKIP],
+ 'lithium/*': [SKIP],
+ 'third_party/*': [SKIP],
+ 'tools/*': [SKIP],
+ 'apply': [SKIP],
+ 'math-*': [SKIP],
+ 'unicode-test': [SKIP],
+ 'whitespaces': [SKIP],
+}], # variant == asm_wasm
]
diff --git a/deps/v8/test/mjsunit/modules-circular-valid.js b/deps/v8/test/mjsunit/modules-circular-valid.js
new file mode 100644
index 0000000000..e381eefdbc
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-circular-valid.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+export {a as b} from "modules-skip-circular-valid.js";
diff --git a/deps/v8/test/mjsunit/modules-default-name1.js b/deps/v8/test/mjsunit/modules-default-name1.js
new file mode 100644
index 0000000000..54c3afeec5
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-default-name1.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {default as goo} from "modules-skip-default-name1.js";
+assertEquals(
+ {value: "gaga", configurable: true, writable: false, enumerable: false},
+ Reflect.getOwnPropertyDescriptor(goo, 'name'));
diff --git a/deps/v8/test/mjsunit/modules-default-name2.js b/deps/v8/test/mjsunit/modules-default-name2.js
new file mode 100644
index 0000000000..51e64139ca
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-default-name2.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {default as goo} from "modules-skip-default-name2.js";
+assertEquals(
+ {value: "gaga", configurable: true, writable: false, enumerable: false},
+ Reflect.getOwnPropertyDescriptor(goo, 'name'));
diff --git a/deps/v8/test/mjsunit/modules-default-name3.js b/deps/v8/test/mjsunit/modules-default-name3.js
new file mode 100644
index 0000000000..caab3eb32a
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-default-name3.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {default as goo} from "modules-skip-default-name3.js";
+assertEquals(
+ {value: "default", configurable: true, writable: false, enumerable: false},
+ Reflect.getOwnPropertyDescriptor(goo, 'name'));
diff --git a/deps/v8/test/mjsunit/modules-default-name4.js b/deps/v8/test/mjsunit/modules-default-name4.js
new file mode 100644
index 0000000000..c69da9d02b
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-default-name4.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {default as goo} from "modules-skip-default-name4.js";
+assertEquals(
+ {value: "Gaga", configurable: true, writable: false, enumerable: false},
+ Reflect.getOwnPropertyDescriptor(goo, 'name'));
diff --git a/deps/v8/test/mjsunit/modules-default-name5.js b/deps/v8/test/mjsunit/modules-default-name5.js
new file mode 100644
index 0000000000..d6e0e5c049
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-default-name5.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {default as goo} from "modules-skip-default-name5.js";
+assertEquals(
+ {value: "Gaga", configurable: true, writable: false, enumerable: false},
+ Reflect.getOwnPropertyDescriptor(goo, 'name'));
diff --git a/deps/v8/test/mjsunit/modules-default-name6.js b/deps/v8/test/mjsunit/modules-default-name6.js
new file mode 100644
index 0000000000..1ac1bcb0c9
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-default-name6.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {default as goo} from "modules-skip-default-name6.js";
+assertEquals(
+ {value: "default", configurable: true, writable: false, enumerable: false},
+ Reflect.getOwnPropertyDescriptor(goo, 'name'));
diff --git a/deps/v8/test/mjsunit/modules-default-name7.js b/deps/v8/test/mjsunit/modules-default-name7.js
new file mode 100644
index 0000000000..82904d4212
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-default-name7.js
@@ -0,0 +1,11 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {default as goo} from "modules-skip-default-name7.js";
+let descr = Reflect.getOwnPropertyDescriptor(goo, 'name');
+assertEquals(descr,
+ {value: descr.value, configurable: true, writable: true, enumerable: false});
+assertEquals("yo", descr.value());
diff --git a/deps/v8/test/mjsunit/modules-default-name8.js b/deps/v8/test/mjsunit/modules-default-name8.js
new file mode 100644
index 0000000000..b192a2544a
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-default-name8.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {default as goo} from "modules-skip-default-name8.js";
+assertEquals(
+ {value: "default", configurable: true, writable: false, enumerable: false},
+ Reflect.getOwnPropertyDescriptor(goo, 'name'));
diff --git a/deps/v8/test/mjsunit/modules-default-name9.js b/deps/v8/test/mjsunit/modules-default-name9.js
new file mode 100644
index 0000000000..3ba711f47e
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-default-name9.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {default as goo} from "modules-skip-default-name9.js";
+assertEquals(
+ {value: "default", configurable: true, writable: false, enumerable: false},
+ Reflect.getOwnPropertyDescriptor(goo, 'name'));
diff --git a/deps/v8/test/mjsunit/modules-default.js b/deps/v8/test/mjsunit/modules-default.js
new file mode 100644
index 0000000000..304703b246
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-default.js
@@ -0,0 +1,11 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import foo from "modules-skip-1.js";
+assertEquals(42, foo);
+
+import {default as gaga} from "modules-skip-1.js";
+assertEquals(42, gaga);
diff --git a/deps/v8/test/mjsunit/modules-empty-import1.js b/deps/v8/test/mjsunit/modules-empty-import1.js
new file mode 100644
index 0000000000..60498f187a
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-empty-import1.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import "modules-skip-empty-import.js";
+import {counter} from "modules-skip-empty-import-aux.js";
+assertEquals(1, counter);
diff --git a/deps/v8/test/mjsunit/modules-empty-import2.js b/deps/v8/test/mjsunit/modules-empty-import2.js
new file mode 100644
index 0000000000..8862c94c92
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-empty-import2.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {} from "modules-skip-empty-import.js";
+import {counter} from "modules-skip-empty-import-aux.js";
+assertEquals(1, counter);
diff --git a/deps/v8/test/mjsunit/modules-empty-import3.js b/deps/v8/test/mjsunit/modules-empty-import3.js
new file mode 100644
index 0000000000..0503891fce
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-empty-import3.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+export {} from "modules-skip-empty-import.js";
+import {counter} from "modules-skip-empty-import-aux.js";
+assertEquals(1, counter);
diff --git a/deps/v8/test/mjsunit/modules-empty-import4.js b/deps/v8/test/mjsunit/modules-empty-import4.js
new file mode 100644
index 0000000000..0cea643414
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-empty-import4.js
@@ -0,0 +1,11 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import "modules-skip-empty-import.js";
+import {} from "modules-skip-empty-import.js";
+export {} from "modules-skip-empty-import.js";
+import {counter} from "modules-skip-empty-import-aux.js";
+assertEquals(1, counter);
diff --git a/deps/v8/test/mjsunit/modules-error-trace.js b/deps/v8/test/mjsunit/modules-error-trace.js
new file mode 100644
index 0000000000..bbf83c510d
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-error-trace.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+// Make sure the generator resume function doesn't show up in the stack trace.
+const stack = (new Error).stack;
+assertEquals(2, stack.split(/\r\n|\r|\n/).length);
diff --git a/deps/v8/test/mjsunit/modules-exports1.js b/deps/v8/test/mjsunit/modules-exports1.js
new file mode 100644
index 0000000000..260f545225
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-exports1.js
@@ -0,0 +1,55 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MODULE
+
+export var myvar = "VAR";
+assertEquals("VAR", myvar);
+assertEquals("VAR", eval("myvar"));
+(() => assertEquals("VAR", myvar))();
+
+export let mylet = "LET";
+assertEquals("LET", mylet);
+assertEquals("LET", eval("mylet"));
+(() => assertEquals("LET", mylet))();
+
+export const myconst = "CONST";
+assertEquals("CONST", myconst);
+assertEquals("CONST", eval("myconst"));
+(() => assertEquals("CONST", myconst))();
+
+
+myvar = 1;
+assertEquals(1, myvar);
+assertEquals(1, eval("myvar"));
+(() => assertEquals(1, myvar))();
+(() => myvar = 2)();
+assertEquals(2, myvar);
+(() => assertEquals(2, myvar))();
+{
+ let f = () => assertEquals(2, myvar);
+ f();
+}
+
+mylet = 1;
+assertEquals(1, mylet);
+assertEquals(1, eval("mylet"));
+(() => assertEquals(1, mylet))();
+(() => mylet = 2)();
+assertEquals(2, mylet);
+assertEquals(2, eval("mylet"));
+(() => assertEquals(2, mylet))();
+{
+ let f = () => assertEquals(2, mylet);
+ f();
+}
+
+assertThrows(() => myconst = 1, TypeError);
+assertEquals("CONST", myconst);
+assertEquals("CONST", eval("myconst"));
+(() => assertEquals("CONST", myconst))();
+{
+ let f = () => assertEquals("CONST", myconst);
+ f();
+}
diff --git a/deps/v8/test/mjsunit/modules-exports2.js b/deps/v8/test/mjsunit/modules-exports2.js
new file mode 100644
index 0000000000..77f6bb6ccc
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-exports2.js
@@ -0,0 +1,31 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MODULE
+
+export function foo() { return 42 }
+assertEquals(42, foo());
+foo = 1;
+assertEquals(1, foo);
+
+let gaga = 43;
+export {gaga as gugu};
+assertEquals(43, gaga);
+
+export default (function bar() { return 43 })
+assertThrows(() => bar(), ReferenceError);
+assertThrows("default", SyntaxError);
+assertThrows("*default*", SyntaxError);
+
+
+var bla = 44;
+var blu = 45;
+export {bla};
+export {bla as blu};
+export {bla as bli};
+assertEquals(44, bla);
+assertEquals(45, blu);
+bla = 46;
+assertEquals(46, bla);
+assertEquals(45, blu);
diff --git a/deps/v8/test/mjsunit/modules-exports3.js b/deps/v8/test/mjsunit/modules-exports3.js
new file mode 100644
index 0000000000..a792852ad9
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-exports3.js
@@ -0,0 +1,48 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MODULE
+
+export { myvar, mylet, myconst };
+
+var myvar = "VAR";
+assertEquals("VAR", myvar);
+let mylet = "LET";
+assertEquals("LET", mylet);
+const myconst = "CONST";
+assertEquals("CONST", myconst);
+
+function* gaga() { yield 1 }
+assertEquals(1, gaga().next().value);
+export {gaga};
+export default gaga;
+export {gaga as gigi};
+assertEquals(1, gaga().next().value);
+
+
+export let gugu = 42;
+
+{
+ assertEquals(42, gugu);
+}
+
+try {
+ assertEquals(42, gugu);
+} catch(_) {
+ assertUnreachable();
+}
+
+try {
+ throw {};
+} catch(_) {
+ assertEquals(42, gugu);
+}
+
+try {
+ throw {};
+} catch({x=gugu}) {
+ assertEquals(42, x);
+}
+
+assertEquals(5, eval("var x = 5; x"));
diff --git a/deps/v8/test/mjsunit/modules-fail-1.js b/deps/v8/test/mjsunit/modules-fail-1.js
new file mode 100644
index 0000000000..3e28647514
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-fail-1.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {a} from "modules-fail-1.js";
diff --git a/deps/v8/test/mjsunit/modules-fail-2.js b/deps/v8/test/mjsunit/modules-fail-2.js
new file mode 100644
index 0000000000..e7dd683ced
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-fail-2.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {a as b} from "modules-fail-2.js";
diff --git a/deps/v8/test/mjsunit/modules-fail-3.js b/deps/v8/test/mjsunit/modules-fail-3.js
new file mode 100644
index 0000000000..d29d44476d
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-fail-3.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import foo from "modules-fail-3.js";
diff --git a/deps/v8/test/mjsunit/modules-fail-4.js b/deps/v8/test/mjsunit/modules-fail-4.js
new file mode 100644
index 0000000000..ec9edda909
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-fail-4.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {a as b} from "modules-fail-4.js";
+export {c as a} from "modules-fail-4.js";
diff --git a/deps/v8/test/mjsunit/modules-fail-5.js b/deps/v8/test/mjsunit/modules-fail-5.js
new file mode 100644
index 0000000000..046275b2d3
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-fail-5.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {a as b} from "modules-fail-5.js";
+export {c as a} from "modules-fail-5.js";
+import {c} from "modules-fail-5.js";
diff --git a/deps/v8/test/mjsunit/modules-fail-6.js b/deps/v8/test/mjsunit/modules-fail-6.js
new file mode 100644
index 0000000000..766cf43852
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-fail-6.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+// Star exports do not propagate a default export.
+import a from "modules-skip-4.js";
diff --git a/deps/v8/test/mjsunit/modules-fail-7.js b/deps/v8/test/mjsunit/modules-fail-7.js
new file mode 100644
index 0000000000..766cf43852
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-fail-7.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+// Star exports do not propagate a default export.
+import a from "modules-skip-4.js";
diff --git a/deps/v8/test/mjsunit/modules-fail-8.js b/deps/v8/test/mjsunit/modules-fail-8.js
new file mode 100644
index 0000000000..bc9c101301
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-fail-8.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {a} from "modules-skip-7.js";
diff --git a/deps/v8/test/mjsunit/modules-fail-cyclic-1.js b/deps/v8/test/mjsunit/modules-fail-cyclic-1.js
new file mode 100644
index 0000000000..5156a57cd9
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-fail-cyclic-1.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {a} from "modules-fail-cyclic-1.js";
+export {a};
diff --git a/deps/v8/test/mjsunit/modules-fail-cyclic-2.js b/deps/v8/test/mjsunit/modules-fail-cyclic-2.js
new file mode 100644
index 0000000000..f6a7cecec1
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-fail-cyclic-2.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {a} from "modules-skip-cyclic.js";
+export {a as b};
diff --git a/deps/v8/test/mjsunit/modules-fail-cyclic-3.js b/deps/v8/test/mjsunit/modules-fail-cyclic-3.js
new file mode 100644
index 0000000000..a216569fff
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-fail-cyclic-3.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+export {a as x} from "modules-skip-cyclic-3.js";
+export {b as y} from "modules-skip-cyclic-3.js";
diff --git a/deps/v8/test/mjsunit/modules-fail-star-exports-conflict.js b/deps/v8/test/mjsunit/modules-fail-star-exports-conflict.js
new file mode 100644
index 0000000000..6e2b219342
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-fail-star-exports-conflict.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+export * from "modules-skip-star-exports-conflict.js";
+export * from "modules-skip-6.js";
+
+import {a} from "modules-fail-star-exports-conflict.js";
diff --git a/deps/v8/test/mjsunit/modules-imports1.js b/deps/v8/test/mjsunit/modules-imports1.js
new file mode 100644
index 0000000000..f2e33f0f6a
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-imports1.js
@@ -0,0 +1,26 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {a as x, set_a as set_x} from "modules-skip-1.js"
+
+let get_x = () => x;
+
+assertEquals(1, x);
+assertEquals(1, (() => x)());
+assertEquals(1, eval('x'));
+assertEquals(1, get_x());
+
+assertThrows(() => x = 666, TypeError);
+assertEquals(1, x);
+assertEquals(1, (() => x)());
+assertEquals(1, eval('x'));
+assertEquals(1, get_x());
+
+set_x("foo");
+assertEquals("foo", x);
+assertEquals("foo", (() => x)());
+assertEquals("foo", eval('x'));
+assertEquals("foo", get_x());
diff --git a/deps/v8/test/mjsunit/modules-imports2.js b/deps/v8/test/mjsunit/modules-imports2.js
new file mode 100644
index 0000000000..35a7f76691
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-imports2.js
@@ -0,0 +1,26 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+let get_x = () => x;
+
+assertEquals(1, x);
+assertEquals(1, (() => x)());
+assertEquals(1, eval('x'));
+assertEquals(1, get_x());
+
+assertThrows(() => x = 666, TypeError);
+assertEquals(1, x);
+assertEquals(1, (() => x)());
+assertEquals(1, eval('x'));
+assertEquals(1, get_x());
+
+set_x("foo");
+assertEquals("foo", x);
+assertEquals("foo", (() => x)());
+assertEquals("foo", eval('x'));
+assertEquals("foo", get_x());
+
+import {a as x, set_a as set_x} from "modules-skip-1.js"
diff --git a/deps/v8/test/mjsunit/modules-imports3.js b/deps/v8/test/mjsunit/modules-imports3.js
new file mode 100644
index 0000000000..42fcdcecb7
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-imports3.js
@@ -0,0 +1,38 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {a as x, a as y} from "modules-skip-1.js";
+import {b as z, get_a, set_a} from "modules-skip-1.js";
+
+assertEquals(1, get_a());
+assertEquals(1, x);
+assertEquals(1, y);
+assertEquals(1, z);
+
+set_a(2);
+assertEquals(2, get_a());
+assertEquals(2, x);
+assertEquals(2, y);
+assertEquals(2, z);
+
+assertThrows(() => x = 3, TypeError);
+assertThrows(() => y = 3, TypeError);
+assertThrows(() => z = 3, TypeError);
+
+assertEquals(2, get_a());
+assertEquals(2, x);
+assertEquals(2, y);
+assertEquals(2, z);
+
+assertEquals(2, eval('get_a()'));
+assertEquals(2, eval('x'));
+assertEquals(2, eval('y'));
+assertEquals(2, eval('z'));
+
+assertEquals(2, (() => get_a())());
+assertEquals(2, (() => x)());
+assertEquals(2, (() => y)());
+assertEquals(2, (() => z)());
diff --git a/deps/v8/test/mjsunit/modules-imports4.js b/deps/v8/test/mjsunit/modules-imports4.js
new file mode 100644
index 0000000000..4d734878aa
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-imports4.js
@@ -0,0 +1,31 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {b, c} from "modules-skip-2.js";
+import {a, set_a} from "modules-skip-1.js";
+import x from "modules-skip-2.js";
+
+assertEquals(42, x);
+
+assertEquals(1, a);
+assertEquals(1, b);
+assertEquals(1, c);
+
+set_a(2);
+assertEquals(2, a);
+assertEquals(2, b);
+assertEquals(2, c);
+
+assertThrows(() => a = 3, TypeError);
+assertThrows(() => b = 3, TypeError);
+assertThrows(() => c = 3, TypeError);
+
+assertEquals(2, a);
+assertEquals(2, b);
+assertEquals(2, c);
+
+assertThrows(() => x = 43, TypeError);
+assertEquals(42, x);
diff --git a/deps/v8/test/mjsunit/modules-imports5.js b/deps/v8/test/mjsunit/modules-imports5.js
new file mode 100644
index 0000000000..b494c7e1a7
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-imports5.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {a} from "modules-skip-3.js";
+export var b = 20;
+assertEquals(42, a+b);
diff --git a/deps/v8/test/mjsunit/modules-imports6.js b/deps/v8/test/mjsunit/modules-imports6.js
new file mode 100644
index 0000000000..4cb117a98d
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-imports6.js
@@ -0,0 +1,25 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {b, c} from "modules-skip-4.js";
+import {a, set_a} from "modules-skip-4.js";
+
+assertEquals(1, a);
+assertEquals(1, b);
+assertEquals(1, c);
+
+set_a(2);
+assertEquals(2, a);
+assertEquals(2, b);
+assertEquals(2, c);
+
+assertThrows(() => a = 3, TypeError);
+assertThrows(() => b = 3, TypeError);
+assertThrows(() => c = 3, TypeError);
+
+assertEquals(2, a);
+assertEquals(2, b);
+assertEquals(2, c);
diff --git a/deps/v8/test/mjsunit/modules-imports7.js b/deps/v8/test/mjsunit/modules-imports7.js
new file mode 100644
index 0000000000..2501481797
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-imports7.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {a} from "modules-skip-6.js";
+assertEquals(10, a);
diff --git a/deps/v8/test/mjsunit/modules-init1.js b/deps/v8/test/mjsunit/modules-init1.js
new file mode 100644
index 0000000000..fbc8df2cd1
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-init1.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import "modules-skip-init1.js";
+export function bar() { return 42 };
+bar = 5;
diff --git a/deps/v8/test/mjsunit/modules-init2.js b/deps/v8/test/mjsunit/modules-init2.js
new file mode 100644
index 0000000000..866cb2742a
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-init2.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {bar} from "modules-init1.js";
+assertEquals(5, bar);
diff --git a/deps/v8/test/mjsunit/modules-init3.js b/deps/v8/test/mjsunit/modules-init3.js
new file mode 100644
index 0000000000..36ff1e8ffa
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-init3.js
@@ -0,0 +1,20 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {check} from "modules-skip-init3.js";
+
+assertSame(undefined, w);
+assertThrows(() => x, ReferenceError);
+assertThrows(() => y, ReferenceError);
+assertThrows(() => z, ReferenceError);
+
+export function* v() { return 40 }
+export var w = 41;
+export let x = 42;
+export class y {};
+export const z = "hello world";
+
+assertTrue(check());
diff --git a/deps/v8/test/mjsunit/modules-preparse.js b/deps/v8/test/mjsunit/modules-preparse.js
new file mode 100644
index 0000000000..6006ab2306
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-preparse.js
@@ -0,0 +1,12 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+// Flags: --min-preparse-length=0
+
+let foo = 42;
+function testFoo(x) { assertEquals(x, foo); }
+testFoo(42);
+foo++;
+testFoo(43);
diff --git a/deps/v8/test/mjsunit/modules-skip-1.js b/deps/v8/test/mjsunit/modules-skip-1.js
new file mode 100644
index 0000000000..35731806bb
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-1.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export default 42;
+export let a = 1;
+export {a as b};
+export function set_a(x) { a = x };
+export function get_a() { return a };
diff --git a/deps/v8/test/mjsunit/modules-skip-2.js b/deps/v8/test/mjsunit/modules-skip-2.js
new file mode 100644
index 0000000000..fdd576a988
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-2.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export {a as b, default} from "modules-skip-1.js";
+import {a as tmp} from "modules-skip-1.js";
+export {tmp as c};
diff --git a/deps/v8/test/mjsunit/modules-skip-3.js b/deps/v8/test/mjsunit/modules-skip-3.js
new file mode 100644
index 0000000000..38ead4923a
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-3.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import {b} from "modules-imports5.js";
+export let a = 22;
+assertSame(undefined, b);
+assertEquals(22, a);
diff --git a/deps/v8/test/mjsunit/modules-skip-4.js b/deps/v8/test/mjsunit/modules-skip-4.js
new file mode 100644
index 0000000000..092e27b5fd
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-4.js
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export * from "modules-skip-1.js";
+export * from "modules-skip-2.js";
diff --git a/deps/v8/test/mjsunit/modules-skip-5.js b/deps/v8/test/mjsunit/modules-skip-5.js
new file mode 100644
index 0000000000..6a1ef495e6
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-5.js
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export function a() { return "ooo" }
diff --git a/deps/v8/test/mjsunit/modules-skip-6.js b/deps/v8/test/mjsunit/modules-skip-6.js
new file mode 100644
index 0000000000..4a0ef8da64
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-6.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export * from "modules-skip-1.js";
+export * from "modules-skip-5.js";
+export const a = 10;
diff --git a/deps/v8/test/mjsunit/modules-skip-7.js b/deps/v8/test/mjsunit/modules-skip-7.js
new file mode 100644
index 0000000000..9f46990373
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-7.js
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export * from "modules-skip-1.js"
+export * from "modules-skip-5.js"
diff --git a/deps/v8/test/mjsunit/modules-skip-circular-valid.js b/deps/v8/test/mjsunit/modules-skip-circular-valid.js
new file mode 100644
index 0000000000..c22544aed0
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-circular-valid.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export let a = {key: 'value'};
+import {b} from "modules-circular-valid.js";
+assertSame(a, b);
+assertEquals('value', a.key);
diff --git a/deps/v8/test/mjsunit/modules-skip-cyclic-3.js b/deps/v8/test/mjsunit/modules-skip-cyclic-3.js
new file mode 100644
index 0000000000..ced96270b1
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-cyclic-3.js
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export {y as a} from "modules-fail-cyclic-3.js";
+export {x as b} from "modules-fail-cyclic-3.js";
diff --git a/deps/v8/test/mjsunit/modules-skip-cyclic.js b/deps/v8/test/mjsunit/modules-skip-cyclic.js
new file mode 100644
index 0000000000..ad5d80608e
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-cyclic.js
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export {b as a} from "modules-fail-cyclic-2.js";
diff --git a/deps/v8/test/mjsunit/modules-skip-default-name1.js b/deps/v8/test/mjsunit/modules-skip-default-name1.js
new file mode 100644
index 0000000000..30a95cd691
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-default-name1.js
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export default function gaga() {}
diff --git a/deps/v8/test/mjsunit/modules-skip-default-name2.js b/deps/v8/test/mjsunit/modules-skip-default-name2.js
new file mode 100644
index 0000000000..a064b0526d
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-default-name2.js
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export default (function gaga() {})
diff --git a/deps/v8/test/mjsunit/modules-skip-default-name3.js b/deps/v8/test/mjsunit/modules-skip-default-name3.js
new file mode 100644
index 0000000000..ed26e463bb
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-default-name3.js
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export default (function () {})
diff --git a/deps/v8/test/mjsunit/modules-skip-default-name4.js b/deps/v8/test/mjsunit/modules-skip-default-name4.js
new file mode 100644
index 0000000000..1c569bed56
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-default-name4.js
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export default class Gaga { }
diff --git a/deps/v8/test/mjsunit/modules-skip-default-name5.js b/deps/v8/test/mjsunit/modules-skip-default-name5.js
new file mode 100644
index 0000000000..df72336718
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-default-name5.js
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export default (class Gaga { })
diff --git a/deps/v8/test/mjsunit/modules-skip-default-name6.js b/deps/v8/test/mjsunit/modules-skip-default-name6.js
new file mode 100644
index 0000000000..02f562998d
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-default-name6.js
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export default (class { })
diff --git a/deps/v8/test/mjsunit/modules-skip-default-name7.js b/deps/v8/test/mjsunit/modules-skip-default-name7.js
new file mode 100644
index 0000000000..e4400ca409
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-default-name7.js
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export default (class { static name() {return "yo"} })
diff --git a/deps/v8/test/mjsunit/modules-skip-default-name8.js b/deps/v8/test/mjsunit/modules-skip-default-name8.js
new file mode 100644
index 0000000000..83e1ae43ff
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-default-name8.js
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export default function() { }
diff --git a/deps/v8/test/mjsunit/modules-skip-default-name9.js b/deps/v8/test/mjsunit/modules-skip-default-name9.js
new file mode 100644
index 0000000000..ac166f39f9
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-default-name9.js
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export default class { }
diff --git a/deps/v8/test/mjsunit/modules-skip-empty-import-aux.js b/deps/v8/test/mjsunit/modules-skip-empty-import-aux.js
new file mode 100644
index 0000000000..1eb5b15159
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-empty-import-aux.js
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export let counter = 0;
+export function incr() { counter++ };
diff --git a/deps/v8/test/mjsunit/modules-skip-empty-import.js b/deps/v8/test/mjsunit/modules-skip-empty-import.js
new file mode 100644
index 0000000000..74d4d3ab48
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-empty-import.js
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import {incr} from "modules-skip-empty-import-aux.js";
+incr();
diff --git a/deps/v8/test/mjsunit/modules-skip-init1.js b/deps/v8/test/mjsunit/modules-skip-init1.js
new file mode 100644
index 0000000000..4eb4a0a865
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-init1.js
@@ -0,0 +1,6 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import {bar} from "modules-init1.js";
+assertEquals(42, bar());
diff --git a/deps/v8/test/mjsunit/modules-skip-init3.js b/deps/v8/test/mjsunit/modules-skip-init3.js
new file mode 100644
index 0000000000..eac1ae172b
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-init3.js
@@ -0,0 +1,20 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import {v, w, x, y, z} from "modules-init3.js";
+
+assertEquals({value: 40, done: true}, v().next());
+assertSame(undefined, w);
+assertThrows(() => x, ReferenceError);
+assertThrows(() => y, ReferenceError);
+assertThrows(() => z, ReferenceError);
+
+export function check() {
+ assertEquals({value: 40, done: true}, v().next());
+ assertEquals(41, w);
+ assertEquals(42, x);
+ assertEquals("y", y.name);
+ assertEquals("hello world", z);
+ return true;
+}
diff --git a/deps/v8/test/mjsunit/modules-skip-star-exports-conflict.js b/deps/v8/test/mjsunit/modules-skip-star-exports-conflict.js
new file mode 100644
index 0000000000..5cbcd85a33
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-star-exports-conflict.js
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export * from "modules-skip-1.js";
+export * from "modules-skip-5.js";
diff --git a/deps/v8/test/mjsunit/modules-skip-star-exports-cycle.js b/deps/v8/test/mjsunit/modules-skip-star-exports-cycle.js
new file mode 100644
index 0000000000..ab67ca803d
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-star-exports-cycle.js
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export * from "modules-skip-star-exports-cycle.js";
+export * from "modules-star-exports-cycle.js";
diff --git a/deps/v8/test/message/syntactic-tail-call-in-binop-rhs.js b/deps/v8/test/mjsunit/modules-star-exports-cycle.js
index a586cc84ee..6af3139af0 100644
--- a/deps/v8/test/message/syntactic-tail-call-in-binop-rhs.js
+++ b/deps/v8/test/mjsunit/modules-star-exports-cycle.js
@@ -1,14 +1,11 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+//
+// MODULE
-// Flags: --harmony-explicit-tailcalls
-"use strict";
+const bar = 42;
+export {bar as foo};
-function f() {
- return 1;
-}
-
-function g() {
- return b + continue f() ;
-}
+import {foo} from "modules-skip-star-exports-cycle.js";
+assertEquals(42, foo);
diff --git a/deps/v8/test/message/syntactic-tail-call-sloppy.js b/deps/v8/test/mjsunit/modules-this.js
index 3973fc6d18..2c8fc74fe7 100644
--- a/deps/v8/test/message/syntactic-tail-call-sloppy.js
+++ b/deps/v8/test/mjsunit/modules-this.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-explicit-tailcalls
+// MODULE
-function g() {
- return continue f() ;
-}
+assertEquals(undefined, this);
diff --git a/deps/v8/test/mjsunit/regexp-lastIndex.js b/deps/v8/test/mjsunit/regexp-lastIndex.js
new file mode 100644
index 0000000000..16b9f86d9b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regexp-lastIndex.js
@@ -0,0 +1,22 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// lastIndex is set only for global or sticky RegExps. On failure to find
+// a match, it is set to 0. If a set fails, then it acts as if in strict mode
+// and throws.
+
+var re = /x/g;
+Object.defineProperty(re, 'lastIndex', {writable: false});
+assertThrows(() => re.exec(""), TypeError);
+assertThrows(() => re.exec("x"), TypeError);
+
+var re = /x/y;
+Object.defineProperty(re, 'lastIndex', {writable: false});
+assertThrows(() => re.exec(""), TypeError);
+assertThrows(() => re.exec("x"), TypeError);
+
+var re = /x/;
+Object.defineProperty(re, 'lastIndex', {writable: false});
+assertEquals(null, re.exec(""));
+assertEquals(["x"], re.exec("x"));
diff --git a/deps/v8/test/mjsunit/regexp.js b/deps/v8/test/mjsunit/regexp.js
index 1a5de2addf..ddaf022d19 100644
--- a/deps/v8/test/mjsunit/regexp.js
+++ b/deps/v8/test/mjsunit/regexp.js
@@ -564,21 +564,21 @@ log = [];
re.lastIndex = fakeLastIndex;
var result = re.exec(fakeString);
assertEquals(["str"], result);
-assertEquals(["ts", "li"], log);
+assertEquals(["ts"], log);
// Again, to check if caching interferes.
log = [];
re.lastIndex = fakeLastIndex;
result = re.exec(fakeString);
assertEquals(["str"], result);
-assertEquals(["ts", "li"], log);
+assertEquals(["ts"], log);
// And one more time, just to be certain.
log = [];
re.lastIndex = fakeLastIndex;
result = re.exec(fakeString);
assertEquals(["str"], result);
-assertEquals(["ts", "li"], log);
+assertEquals(["ts"], log);
// Now with a global regexp, where lastIndex is actually used.
re = /str/g;
@@ -726,3 +726,8 @@ assertEquals(["a", "", ""], /(\2).(\1)/.exec("aba"));
assertEquals(["aba", "a", "a"], /(.\2).(\1)/.exec("aba"));
assertEquals(["acbc", "c", "c"], /a(.\2)b(\1)$/.exec("acbc"));
assertEquals(["acbc", "c", "c"], /a(.\2)b(\1)/.exec("aabcacbc"));
+
+// Test surrogate pair detection in split.
+// \u{daff}\u{e000} is not a surrogate pair, while \u{daff}\u{dfff} is.
+assertEquals(["\u{daff}", "\u{e000}"], "\u{daff}\u{e000}".split(/[a-z]{0,1}/u));
+assertEquals(["\u{daff}\u{dfff}"], "\u{daff}\u{dfff}".split(/[a-z]{0,1}/u));
diff --git a/deps/v8/test/mjsunit/regress/regress-2437.js b/deps/v8/test/mjsunit/regress/regress-2437.js
index c82293ae32..66f0abfef5 100644
--- a/deps/v8/test/mjsunit/regress/regress-2437.js
+++ b/deps/v8/test/mjsunit/regress/regress-2437.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Summary of the spec: lastIndex is reset to 0 if
-// - a regexp fails to match, regardless of global or non-global.
+// - a global or sticky regexp fails to match.
// - a global regexp is used in a function that returns multiple results,
// such as String.prototype.replace or String.prototype.match, since it
// repeats the regexp until it fails to match.
@@ -37,19 +37,19 @@
r = /a/;
r.lastIndex = 1;
r.exec("zzzz");
-assertEquals(0, r.lastIndex);
+assertEquals(1, r.lastIndex);
// Test Regexp.prototype.test
r = /a/;
r.lastIndex = 1;
r.test("zzzz");
-assertEquals(0, r.lastIndex);
+assertEquals(1, r.lastIndex);
// Test String.prototype.match
r = /a/;
r.lastIndex = 1;
"zzzz".match(r);
-assertEquals(0, r.lastIndex);
+assertEquals(1, r.lastIndex);
// Test String.prototype.replace with atomic regexp and empty string.
r = /a/;
@@ -116,7 +116,7 @@ assertEquals(-1, r.lastIndex);
r.lastIndex = -1;
"01234567".match(r);
-assertEquals(0, r.lastIndex);
+assertEquals(-1, r.lastIndex);
// Also test RegExp.prototype.exec and RegExp.prototype.test
r = /a/g;
@@ -131,7 +131,7 @@ assertEquals(5, r.lastIndex);
r = /a/;
r.lastIndex = 1;
r.exec("01234567");
-assertEquals(0, r.lastIndex);
+assertEquals(1, r.lastIndex);
r.lastIndex = 1;
r.exec("0123abcd");
@@ -149,7 +149,7 @@ assertEquals(5, r.lastIndex);
r = /a/;
r.lastIndex = 1;
r.test("01234567");
-assertEquals(0, r.lastIndex);
+assertEquals(1, r.lastIndex);
r.lastIndex = 1;
r.test("0123abcd");
diff --git a/deps/v8/test/mjsunit/regress/regress-2438.js b/deps/v8/test/mjsunit/regress/regress-2438.js
index f694ff8e19..51092788ac 100644
--- a/deps/v8/test/mjsunit/regress/regress-2438.js
+++ b/deps/v8/test/mjsunit/regress/regress-2438.js
@@ -27,14 +27,20 @@
function testSideEffects(subject, re) {
var counter = 0;
+ var expected_counter = 0;
+ const accesses_lastindex = (re.global || re.sticky);
var side_effect_object = { valueOf: function() { return counter++; } };
re.lastIndex = side_effect_object;
re.exec(subject);
- assertEquals(1, counter);
+
+ if (accesses_lastindex) expected_counter++;
+ assertEquals(expected_counter, counter);
re.lastIndex = side_effect_object;
re.test(subject);
- assertEquals(2, counter);
+
+ if (accesses_lastindex) expected_counter++;
+ assertEquals(expected_counter, counter);
}
testSideEffects("zzzz", /a/);
diff --git a/deps/v8/test/mjsunit/regress/regress-353551.js b/deps/v8/test/mjsunit/regress/regress-353551.js
index ea5a234658..8897448e9c 100644
--- a/deps/v8/test/mjsunit/regress/regress-353551.js
+++ b/deps/v8/test/mjsunit/regress/regress-353551.js
@@ -30,7 +30,7 @@ function __f_3(x) {
var __v_1 = arguments;
__v_1[1000] = 123;
depth++;
- if (depth > 2500) return;
+ if (depth > 2400) return;
function __f_4() {
++__v_1[0];
__f_3(0.5);
diff --git a/deps/v8/test/mjsunit/bugs/bug-4577.js b/deps/v8/test/mjsunit/regress/regress-4577.js
index de2f843965..ea46fdd2b0 100644
--- a/deps/v8/test/mjsunit/bugs/bug-4577.js
+++ b/deps/v8/test/mjsunit/regress/regress-4577.js
@@ -11,3 +11,11 @@ function g({arguments}) {
return arguments === 42;
}
assertTrue(g({arguments: 42}));
+
+function foo() {
+ let arguments = 2;
+ return arguments;
+}
+assertEquals(2, foo());
+
+assertThrows(function(x = arguments, arguments) {}, ReferenceError);
diff --git a/deps/v8/test/mjsunit/regress/regress-5332.js b/deps/v8/test/mjsunit/regress/regress-5332.js
new file mode 100644
index 0000000000..f0a0ef9ac8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5332.js
@@ -0,0 +1,31 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function() {
+ function foo() {
+ var a = new Array(2);
+ a[1] = 1.5;
+ return a;
+ }
+
+ assertEquals(undefined, foo()[0]);
+ assertEquals(undefined, foo()[0]);
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(undefined, foo()[0]);
+})();
+
+(function() {
+ function foo() {
+ var a = Array(2);
+ a[1] = 1.5;
+ return a;
+ }
+
+ assertEquals(undefined, foo()[0]);
+ assertEquals(undefined, foo()[0]);
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(undefined, foo()[0]);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-5351.js b/deps/v8/test/mjsunit/regress/regress-5351.js
new file mode 100644
index 0000000000..c9e6d3153a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5351.js
@@ -0,0 +1,12 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var re = /[bc]/;
+var str = "baba";
+
+assertEquals(["", "a", "a"], str.split(re));
+
+// Force slow path.
+re.exec = (string) => RegExp.prototype.exec.call(re, string);
+assertEquals(["", "a", "a"], str.split(re));
diff --git a/deps/v8/test/mjsunit/compiler/regress-645179.js b/deps/v8/test/mjsunit/regress/regress-5357.js
index 47a7d9bb21..11ada60708 100644
--- a/deps/v8/test/mjsunit/compiler/regress-645179.js
+++ b/deps/v8/test/mjsunit/regress/regress-5357.js
@@ -5,18 +5,13 @@
// Flags: --allow-natives-syntax
function foo(a) {
- return a.x === a.y;
+ a++;
+ a = Math.max(0, a);
+ a++;
+ return a;
}
-function A() { }
-
-var o = new A;
-
-var a = {x: o}
-o.x = 0;
-a.y = o;
-
-assertTrue(foo(a));
-assertTrue(foo(a));
+foo(0);
+foo(0);
%OptimizeFunctionOnNextCall(foo);
-assertTrue(foo(a));
+assertEquals(2147483648, foo(2147483646));
diff --git a/deps/v8/test/mjsunit/regress/regress-5380.js b/deps/v8/test/mjsunit/regress/regress-5380.js
new file mode 100644
index 0000000000..9881108379
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5380.js
@@ -0,0 +1,17 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --ignition-staging --turbo
+
+function f(do_osr) {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1 && do_osr) %OptimizeOsr();
+ }
+}
+
+f(false);
+f(false);
+%BaselineFunctionOnNextCall(f);
+f(false);
+f(true);
diff --git a/deps/v8/test/mjsunit/regress/regress-5404.js b/deps/v8/test/mjsunit/regress/regress-5404.js
new file mode 100644
index 0000000000..652db4bdb5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5404.js
@@ -0,0 +1,21 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(a, b) {
+ return a + "0123456789012";
+}
+
+foo("a");
+foo("a");
+%OptimizeFunctionOnNextCall(foo);
+foo("a");
+
+var a = "a".repeat(268435440);
+assertThrows(function() { foo(a); });
+
+%OptimizeFunctionOnNextCall(foo);
+assertThrows(function() { foo(a); });
+assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-5405.js b/deps/v8/test/mjsunit/regress/regress-5405.js
new file mode 100644
index 0000000000..dd5c3c0674
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5405.js
@@ -0,0 +1,28 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-async-await --allow-natives-syntax
+
+let log = [];
+
+(async function() {
+ with ({get ['.promise']() { log.push('async') }}) {
+ return 10;
+ }
+})();
+%RunMicrotasks();
+
+(function() {
+ with ({get ['.new.target']() { log.push('new.target') }}) {
+ return new.target;
+ }
+})();
+
+(function() {
+ with ({get ['this']() { log.push('this') }}) {
+ return this;
+ }
+})();
+
+assertArrayEquals([], log);
diff --git a/deps/v8/test/mjsunit/regress/regress-5440.js b/deps/v8/test/mjsunit/regress/regress-5440.js
new file mode 100644
index 0000000000..4182146b1a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5440.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --turbo --always-opt
+
+// The rightmost cons string is created first, resulting in an empty left part.
+eval(" " + ("" + "try {;} catch (_) {}"));
diff --git a/deps/v8/test/mjsunit/regress/regress-5559.js b/deps/v8/test/mjsunit/regress/regress-5559.js
new file mode 100644
index 0000000000..c6f32575f5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5559.js
@@ -0,0 +1,38 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+Debug = debug.Debug
+
+var exception = null;
+var break_count = 0;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ print(event_data.sourceLineText());
+ assertTrue(
+ event_data.sourceLineText().indexOf(`Break ${break_count++}.`) > 0);
+ exec_state.prepareStep(Debug.StepAction.StepOut);
+ } catch (e) {
+ exception = e;
+ }
+};
+
+function thrower() {
+ try {
+ debugger; // Break 0.
+ throw 'error';
+ } catch (err) {
+ }
+}
+
+
+Debug.setListener(listener);
+thrower();
+Debug.setListener(null); // Break 1.
+
+assertNull(exception);
+assertEquals(2, break_count);
diff --git a/deps/v8/test/mjsunit/regress/regress-5648.js b/deps/v8/test/mjsunit/regress/regress-5648.js
new file mode 100644
index 0000000000..3e9ea07456
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5648.js
@@ -0,0 +1,35 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+var iter = {}
+iter[Symbol.iterator] = () => ({
+ next: () => ({}),
+ return: () => {throw 666}
+});
+
+
+function* foo() {
+ for (let x of iter) {throw 42}
+}
+assertThrowsEquals(() => foo().next(), 42);
+
+
+function* bar() {
+ let x;
+ { let gaga = () => {x};
+ [[x]] = iter;
+ }
+}
+assertThrows(() => bar().next(), TypeError);
+
+
+function baz() {
+ let x;
+ { let gaga = () => {x};
+ let gugu = () => {gaga};
+ [[x]] = iter;
+ }
+}
+assertThrows(baz, TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-642409.js b/deps/v8/test/mjsunit/regress/regress-642409.js
new file mode 100644
index 0000000000..eceb070a7f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-642409.js
@@ -0,0 +1,22 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+class SuperClass {
+}
+
+class SubClass extends SuperClass {
+ constructor() {
+ super();
+ this.doSomething();
+ }
+ doSomething() {
+ }
+}
+
+new SubClass();
+new SubClass();
+%OptimizeFunctionOnNextCall(SubClass);
+new SubClass();
diff --git a/deps/v8/test/mjsunit/regress/regress-645680.js b/deps/v8/test/mjsunit/regress/regress-645680.js
new file mode 100644
index 0000000000..b244d9c047
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-645680.js
@@ -0,0 +1,20 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc
+//
+function getRandomProperty(v, rand) {
+ var properties = Object.getOwnPropertyNames(v);
+ if ("constructor" && v.constructor.hasOwnProperty()) {; }
+ if (properties.length == 0) { return "0"; }
+ return properties[rand % properties.length];
+}
+
+var __v_18 = (function( b) { return arguments; })("foo", NaN, "bar");
+__v_18.__p_293850326 = "foo";
+__v_18.__defineGetter__(getRandomProperty( 990787501), function() {
+ gc();
+ return __v_18.__p_293850326;
+});
+Array.prototype.indexOf.call(__v_18)
diff --git a/deps/v8/test/mjsunit/regress/regress-648373-sloppy-arguments-includesValues.js b/deps/v8/test/mjsunit/regress/regress-648373-sloppy-arguments-includesValues.js
new file mode 100644
index 0000000000..d586b80495
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-648373-sloppy-arguments-includesValues.js
@@ -0,0 +1,33 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+function getRandomProperty(v, rand) { var properties = Object.getOwnPropertyNames(v); var proto = Object.getPrototypeOf(v); if (proto) {; } if ("constructor" && v.constructor.hasOwnProperty()) {; } if (properties.length == 0) { return "0"; } return properties[rand % properties.length]; }
+var __v_4 = {};
+
+__v_2 = {
+ FAST_ELEMENTS() {
+ return {
+ get 0() {
+ } };
+ } ,
+ Arguments: {
+ FAST_SLOPPY_ARGUMENTS_ELEMENTS() {
+ var __v_11 = (function( b) { return arguments; })("foo", NaN, "bar");
+ __v_11.__p_2006760047 = __v_11[getRandomProperty( 2006760047)];
+ __v_11.__defineGetter__(getRandomProperty( 1698457573), function() { gc(); __v_4[ 1486458228] = __v_2[ 1286067691]; return __v_11.__p_2006760047; });
+;
+Array.prototype.includes.call(__v_11);
+ },
+ Detached_Float64Array() {
+ } }
+};
+function __f_3(suites) {
+ Object.keys(suites).forEach(suite => __f_4(suites[suite]));
+ function __f_4(suite) {
+ Object.keys(suite).forEach(test => suite[test]());
+ }
+}
+__f_3(__v_2);
diff --git a/deps/v8/test/mjsunit/regress/regress-649067.js b/deps/v8/test/mjsunit/regress/regress-649067.js
new file mode 100644
index 0000000000..cd75004230
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-649067.js
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertEquals(1, (function arguments() { return eval("arguments"); })(1)[0]);
diff --git a/deps/v8/test/message/syntactic-tail-call-in-logical-and.js b/deps/v8/test/mjsunit/regress/regress-649078.js
index 2c62ddcb21..f4f6e1b136 100644
--- a/deps/v8/test/message/syntactic-tail-call-in-logical-and.js
+++ b/deps/v8/test/mjsunit/regress/regress-649078.js
@@ -2,13 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-explicit-tailcalls
-"use strict";
+let p = Promise.resolve();
+Object.defineProperty(p, 'then', {
+ get: () => new Proxy(function() {}, p)
+});
-function f() {
- return 1;
-}
-
-function g() {
- return continue f() && a ;
-}
+new Promise((r) => r(p));
diff --git a/deps/v8/test/mjsunit/regress/regress-651327.js b/deps/v8/test/mjsunit/regress/regress-651327.js
new file mode 100644
index 0000000000..f7ac24f95a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-651327.js
@@ -0,0 +1,217 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc
+
+function __f_1(a) {
+ __v_1 = a;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ gc();
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = -1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ gc();
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 0;
+ gc();
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ gc();
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ __f_3();
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = -1073741825;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = -7;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ __f_3();
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 17;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ gc();
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 0;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ gc();
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 65535;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = -13;
+ x = 1;
+ x = 1;
+ this.mapHeight * Math.round();
+}
+__f_1();
+function __f_2(initialX, initialY) {
+}
+function __f_3() {
+}
+gc();
+__f_1();
diff --git a/deps/v8/test/mjsunit/regress/regress-655573.js b/deps/v8/test/mjsunit/regress/regress-655573.js
new file mode 100644
index 0000000000..31f8ffe32d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-655573.js
@@ -0,0 +1,16 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generate a function with a very large closure.
+source = "(function() {\n"
+for (var i = 0; i < 65000; i++) {
+ source += " var a_" + i + " = 0;\n";
+}
+source += " return function() {\n"
+for (var i = 0; i < 65000; i++) {
+ source += "a_" + i + "++;\n";
+}
+source += "}})();\n"
+
+eval(source);
diff --git a/deps/v8/test/mjsunit/regress/regress-662935.js b/deps/v8/test/mjsunit/regress/regress-662935.js
new file mode 100644
index 0000000000..b15f83a1e9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-662935.js
@@ -0,0 +1,16 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+Debug = debug.Debug
+function overflow() {
+ return new Promise(function foo() { foo() });
+}
+
+function listener(event, exec_state, event_data, data) { }
+
+Debug.setListener(listener);
+
+assertEquals(Promise, overflow().constructor);
diff --git a/deps/v8/test/mjsunit/regress/regress-666046.js b/deps/v8/test/mjsunit/regress/regress-666046.js
new file mode 100644
index 0000000000..b4615383e0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-666046.js
@@ -0,0 +1,57 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+function P() {
+ this.a0 = {};
+ this.a1 = {};
+ this.a2 = {};
+ this.a3 = {};
+ this.a4 = {};
+}
+
+function A() {
+}
+
+var proto = new P();
+A.prototype = proto;
+
+function foo(o) {
+ return o.a0;
+}
+
+// Ensure |proto| is in old space.
+gc();
+gc();
+gc();
+
+// Ensure |proto| is marked as "should be fast".
+var o = new A();
+foo(o);
+foo(o);
+foo(o);
+assertTrue(%HasFastProperties(proto));
+
+// Contruct a double value that looks like a tagged pointer.
+var buffer = new ArrayBuffer(8);
+var int32view = new Int32Array(buffer);
+var float64view = new Float64Array(buffer);
+int32view[0] = int32view[1] = 0x40000001;
+var boom = float64view[0];
+
+
+// Write new space object.
+proto.a4 = {a: 0};
+// Immediately delete the field.
+delete proto.a4;
+
+// |proto| must sill be fast.
+assertTrue(%HasFastProperties(proto));
+
+// Add a double field instead of deleted a4 that looks like a tagged pointer.
+proto.boom = boom;
+
+// Boom!
+gc();
diff --git a/deps/v8/test/mjsunit/regress/regress-abort-context-allocate-params.js b/deps/v8/test/mjsunit/regress/regress-abort-context-allocate-params.js
new file mode 100644
index 0000000000..3089f7817a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-abort-context-allocate-params.js
@@ -0,0 +1,941 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f(getter) {
+ arguments = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-abort-preparsing-params.js b/deps/v8/test/mjsunit/regress/regress-abort-preparsing-params.js
new file mode 100644
index 0000000000..d2bdc5084d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-abort-preparsing-params.js
@@ -0,0 +1,946 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var outer_a;
+
+function f(a) {
+ outer_a = a;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+ x = 1;
+}
+f(1);
+assertEquals(1, outer_a);
diff --git a/deps/v8/test/mjsunit/regress/regress-arguments-liveness-analysis.js b/deps/v8/test/mjsunit/regress/regress-arguments-liveness-analysis.js
new file mode 100644
index 0000000000..95c2c00262
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-arguments-liveness-analysis.js
@@ -0,0 +1,20 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function r(v) { return v.f }
+function h() { }
+function y(v) {
+ var x = arguments;
+ h.apply(r(v), x);
+};
+
+y({f:3});
+y({f:3});
+y({f:3});
+
+%OptimizeFunctionOnNextCall(y);
+
+y({ f : 3, u : 4 });
diff --git a/deps/v8/test/mjsunit/regress/regress-cr-658267.js b/deps/v8/test/mjsunit/regress/regress-cr-658267.js
new file mode 100644
index 0000000000..9caada7954
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-cr-658267.js
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("class D extends async() =>", SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631027.js b/deps/v8/test/mjsunit/regress/regress-crbug-631027.js
new file mode 100644
index 0000000000..f3d04b8efd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631027.js
@@ -0,0 +1,12 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo-escape
+
+function f() {
+ with ({ value:"foo" }) { return value; }
+}
+assertEquals("foo", f());
+%OptimizeFunctionOnNextCall(f);
+assertEquals("foo", f());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-635798.js b/deps/v8/test/mjsunit/regress/regress-crbug-635798.js
index 5456682ddc..0a89aa19f7 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-635798.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-635798.js
@@ -8,7 +8,7 @@ function foo() {
var x = [];
var y = [];
x.__proto__ = y;
- for (var i = 0; i < 200000; ++i) {
+ for (var i = 0; i < 10000; ++i) {
y[i] = 1;
}
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-635923.js b/deps/v8/test/mjsunit/regress/regress-crbug-635923.js
new file mode 100644
index 0000000000..aea5e3a97c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-635923.js
@@ -0,0 +1,21 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --ignition --turbo-from-bytecode --turbo-filter=f
+
+function f(x) { return x + 23 }
+function g(x) { return f(x) + 42 }
+
+assertEquals(23, f(0));
+assertEquals(24, f(1));
+assertEquals(67, g(2));
+assertEquals(68, g(3));
+
+// Optimize {g} with Crankshaft, causing {f} to be inlined.
+%OptimizeFunctionOnNextCall(g);
+assertEquals(65, g(0));
+
+// Optimize {f} with Turbofan, after it has been inlined.
+%OptimizeFunctionOnNextCall(f);
+assertEquals(23, f(0));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-640497.js b/deps/v8/test/mjsunit/regress/regress-crbug-640497.js
new file mode 100644
index 0000000000..c637badb66
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-640497.js
@@ -0,0 +1,19 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo --turbo-escape
+
+// Warm up {g} with arrays and strings.
+function g(v) { return v.length; }
+assertEquals(1, g("x"));
+assertEquals(2, g("xy"));
+assertEquals(1, g([1]));
+assertEquals(2, g([1,2]));
+
+// Inline into {f}, where we see only an array.
+function f() { assertEquals(0, g([])); }
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-643073.js b/deps/v8/test/mjsunit/regress/regress-crbug-643073.js
new file mode 100644
index 0000000000..1301ddd184
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-643073.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+for (i in [0,0]) {}
+function foo() {
+ i = 0;
+ return i < 0;
+}
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/message/syntactic-tail-call-without-return.js b/deps/v8/test/mjsunit/regress/regress-crbug-644111.js
index 130f67dafc..2f77590850 100644
--- a/deps/v8/test/message/syntactic-tail-call-without-return.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-644111.js
@@ -2,13 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-explicit-tailcalls
-"use strict";
+// Flags: --ignition-staging --turbo --validate-asm --always-opt
-function f() {
- return 1;
-}
-
-function g() {
- var x = continue f() ;
+function Module() {
+ "use asm";
+ return {};
}
+var m = Module();
diff --git a/deps/v8/test/message/syntactic-tail-call-in-try-catch-finally.js b/deps/v8/test/mjsunit/regress/regress-crbug-644245.js
index 3aa35a12b4..7f4e00599e 100644
--- a/deps/v8/test/message/syntactic-tail-call-in-try-catch-finally.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-644245.js
@@ -2,19 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-explicit-tailcalls
-"use strict";
+// Flags: --allow-natives-syntax --turbo --turbo-escape
function f() {
- return 1;
-}
-
-function g() {
try {
- f();
+ throw "boom";
} catch(e) {
- return continue f() ;
- } finally {
- f();
+ %_DeoptimizeNow();
}
}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/message/syntactic-tail-call-in-binop-lhs.js b/deps/v8/test/mjsunit/regress/regress-crbug-644631.js
index 58d4c957ac..5e649a4946 100644
--- a/deps/v8/test/message/syntactic-tail-call-in-binop-lhs.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-644631.js
@@ -2,13 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-explicit-tailcalls
-"use strict";
+// Flags: --turbo --always-opt
function f() {
- return 1;
+ new Int8Array(new ArrayBuffer(2147483648));
}
-function g() {
- return continue f() - a ;
-}
+// Should not crash
+assertThrows(f, RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-645103.js b/deps/v8/test/mjsunit/regress/regress-crbug-645103.js
new file mode 100644
index 0000000000..4f5f662901
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-645103.js
@@ -0,0 +1,17 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --ignition-staging --turbo
+
+class Base {}
+class Subclass extends Base {
+ constructor() {
+ %DeoptimizeNow();
+ super();
+ }
+}
+new Subclass();
+new Subclass();
+%OptimizeFunctionOnNextCall(Subclass);
+new Subclass();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-645888.js b/deps/v8/test/mjsunit/regress/regress-crbug-645888.js
new file mode 100644
index 0000000000..267fc2fefc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-645888.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --ignition-staging
+
+function f() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ break; // Trigger next loop.
+ }
+ }
+ while (true) {
+ throw "no loop, thank you";
+ }
+}
+assertThrows(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-647217.js b/deps/v8/test/mjsunit/regress/regress-crbug-647217.js
new file mode 100644
index 0000000000..390eccee67
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-647217.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --stack-size=100 --ignition-staging --turbo
+
+var source = "return 1" + new Array(2048).join(' + a') + "";
+eval("function g(a) {" + source + "}");
+%SetForceInlineFlag(g);
+
+function f(a) { return g(a) }
+%OptimizeFunctionOnNextCall(f);
+try { f(0) } catch(e) {}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-648737.js b/deps/v8/test/mjsunit/regress/regress-crbug-648737.js
new file mode 100644
index 0000000000..22f8551543
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-648737.js
@@ -0,0 +1,24 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo --turbo-escape
+
+function f(str) {
+ var s = "We turn {" + str + "} into a ConsString now";
+ return s.length;
+}
+assertEquals(33, f("a"));
+assertEquals(33, f("b"));
+%OptimizeFunctionOnNextCall(f);
+assertEquals(33, f("c"));
+
+function g(str) {
+ var s = "We also try to materalize {" + str + "} when deopting";
+ %DeoptimizeNow();
+ return s.length;
+}
+assertEquals(43, g("a"));
+assertEquals(43, g("b"));
+%OptimizeFunctionOnNextCall(g);
+assertEquals(43, g("c"));
diff --git a/deps/v8/test/message/syntactic-tail-call-in-extends.js b/deps/v8/test/mjsunit/regress/regress-crbug-648740.js
index 86bf77ebbe..e52d899852 100644
--- a/deps/v8/test/message/syntactic-tail-call-in-extends.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-648740.js
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-explicit-tailcalls
-"use strict";
+// Flags: --min-preparse-length=0
-function g() {
- return class A extends continue f() {};
-}
+(function () {
+ function foo() {
+ const arguments = 42;
+ }
+})()
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-650404.js b/deps/v8/test/mjsunit/regress/regress-crbug-650404.js
new file mode 100644
index 0000000000..ebf14e69b3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-650404.js
@@ -0,0 +1,36 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function c4(w, h) {
+ var size = w * h;
+ if (size < 0) size = 0;
+ return new Uint32Array(size);
+}
+
+for (var i = 0; i < 3; i++) {
+ // Computing -0 as the result makes the "size = w * h" multiplication IC
+ // go into double mode.
+ c4(0, -1);
+}
+// Optimize Uint32ConstructFromLength.
+for (var i = 0; i < 1000; i++) c4(2, 2);
+
+// This array will have a HeapNumber as its length:
+var bomb = c4(2, 2);
+
+function reader(o, i) {
+ // Dummy try-catch, so that TurboFan is used to optimize this.
+ try {} catch(e) {}
+ return o[i];
+}
+// Optimize reader!
+for (var i = 0; i < 3; i++) reader(bomb, 0);
+%OptimizeFunctionOnNextCall(reader);
+reader(bomb, 0);
+
+for (var i = bomb.length; i < 100; i++) {
+ assertEquals(undefined, reader(bomb, i));
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-650933.js b/deps/v8/test/mjsunit/regress/regress-crbug-650933.js
new file mode 100644
index 0000000000..e579f0d0ab
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-650933.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var a = [0, 1, 2, 3, 4, 5, 6, 7, 8];
+var o = {length: 1e40};
+try { new Uint8Array(o); } catch (e) { }
+new Float64Array(a);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-650973.js b/deps/v8/test/mjsunit/regress/regress-crbug-650973.js
new file mode 100644
index 0000000000..9c2d8c6747
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-650973.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var v = {p:0};
+// Turn the object into dictionary mode.
+v.__defineGetter__("p", function() { return 13; });
+
+function f() {
+ var boom = (v.foo = v);
+ assertEquals(v, boom.foo);
+}
+
+f();
+f();
+f();
diff --git a/deps/v8/test/message/syntactic-tail-call-in-try-try-catch-finally.js b/deps/v8/test/mjsunit/regress/regress-crbug-651403-global.js
index 5b000f1181..776bdcfc87 100644
--- a/deps/v8/test/message/syntactic-tail-call-in-try-try-catch-finally.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-651403-global.js
@@ -2,21 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-explicit-tailcalls
-"use strict";
+// Flags: --ignition-staging --turbo --always-opt
-function f() {
- return 1;
-}
+x = "";
-function g() {
- try {
+function f () {
+ function g() {
try {
- f();
+ eval('');
+ return x;
} catch(e) {
- return continue f() ;
}
- } finally {
- f();
}
+ return g();
}
+
+f();
diff --git a/deps/v8/test/message/syntactic-tail-call-in-subclass.js b/deps/v8/test/mjsunit/regress/regress-crbug-651403.js
index ab788406d2..b549c56baf 100644
--- a/deps/v8/test/message/syntactic-tail-call-in-subclass.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-651403.js
@@ -2,14 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-explicit-tailcalls
-"use strict";
+// Flags: --ignition-staging --turbo --always-opt
-function g() {
- class A {};
- class B extends A {
- constructor() {
- return continue f() ;
+function f () {
+ var x = "";
+ function g() {
+ try {
+ eval('');
+ return x;
+ } catch(e) {
}
}
+ return g();
}
+
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-652186-global.js b/deps/v8/test/mjsunit/regress/regress-crbug-652186-global.js
new file mode 100644
index 0000000000..0e869e2ff0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-652186-global.js
@@ -0,0 +1,8 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --always-opt
+
+x = 1;
+print(eval("eval('var x = 2'); x;"));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-652186-local.js b/deps/v8/test/mjsunit/regress/regress-crbug-652186-local.js
new file mode 100644
index 0000000000..39f283432d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-652186-local.js
@@ -0,0 +1,11 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --always-opt
+
+function f() {
+ var x = 1;
+ return eval("eval('var x = 2'); x;");
+}
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-654723.js b/deps/v8/test/mjsunit/regress/regress-crbug-654723.js
new file mode 100644
index 0000000000..fa81233522
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-654723.js
@@ -0,0 +1,16 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var k = "0101010101010101" + "01010101";
+
+function foo(s) {
+ return k + s;
+}
+
+foo("a");
+foo("a");
+%OptimizeFunctionOnNextCall(foo);
+var x = foo("");
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-657478.js b/deps/v8/test/mjsunit/regress/regress-crbug-657478.js
new file mode 100644
index 0000000000..0827a7a1c7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-657478.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(o) { return %_ToLength(o.length); }
+
+foo(new Array(4));
+foo(new Array(Math.pow(2, 32) - 1));
+foo({length: 10});
+%OptimizeFunctionOnNextCall(foo);
+foo({length: 10});
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-663402.js b/deps/v8/test/mjsunit/regress/regress-crbug-663402.js
new file mode 100644
index 0000000000..5368bd6e80
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-663402.js
@@ -0,0 +1,40 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var g_eval = eval;
+function emit_f(size) {
+ var body = "function f(x) {" +
+ " if (x < 0) return x;" +
+ " var a = [1];" +
+ " if (x > 0) return [";
+ for (var i = 0; i < size; i++) {
+ body += "0.1, ";
+ }
+ body += " ];" +
+ " return a;" +
+ "}";
+ g_eval(body);
+}
+
+// Length must be big enough to make the backing store's size not fit into
+// a single instruction's immediate field (2^12).
+var kLength = 701;
+emit_f(kLength);
+f(1);
+f(1);
+%OptimizeFunctionOnNextCall(f);
+var a = f(1);
+
+// Allocating something else should not disturb |a|.
+var b = new Object();
+for (var i = 0; i < kLength; i++) {
+ assertEquals(0.1, a[i]);
+}
+
+// Allocating more should not crash.
+for (var i = 0; i < 300; i++) {
+ f(1);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-667689.js b/deps/v8/test/mjsunit/regress/regress-crbug-667689.js
new file mode 100644
index 0000000000..e83c40eeda
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-667689.js
@@ -0,0 +1,16 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {}
+foo.__defineGetter__(undefined, function() {})
+
+function bar() {}
+function baz(x) { return x instanceof bar };
+%OptimizeFunctionOnNextCall(baz);
+baz();
+Object.setPrototypeOf(bar, null);
+bar[Symbol.hasInstance] = function() { return true };
+assertTrue(baz());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-673008.js b/deps/v8/test/mjsunit/regress/regress-crbug-673008.js
new file mode 100644
index 0000000000..4e232fa99c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-673008.js
@@ -0,0 +1,23 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var a = {
+ "33": true,
+ "-1": true
+};
+
+var strkeys = Object.keys(a).map(function(k) { return "" + k });
+var numkeys = Object.keys(a).map(function(k) { return +k });
+var keys = strkeys.concat(numkeys);
+
+keys.forEach(function(k) {
+ assertTrue(a.hasOwnProperty(k),
+ "property not found: " + k + "(" + (typeof k) + ")");
+});
+
+var b = {};
+b.__proto__ = a;
+keys.forEach(function(k) {
+ assertTrue(k in b, "property not found: " + k + "(" + (typeof k) + ")");
+});
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-02256.js b/deps/v8/test/mjsunit/regress/wasm/regression-02256.js
new file mode 100644
index 0000000000..27764a22ce
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-02256.js
@@ -0,0 +1,967 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --random-seed=891196975 --expose-gc --allow-natives-syntax
+// Flags: --gc-interval=207 --stress-compaction --validate-asm
+//
+// /v8/test/mjsunit/wasm/grow-memory.js
+// /v8/test/mjsunit/regress/regress-540.js
+// /v8/test/mjsunit/regress/wasm/regression-02862.js
+// /v8/test/mjsunit/regress/regress-2813.js
+// /v8/test/mjsunit/regress/regress-323845.js
+// Begin stripped down and modified version of mjsunit.js for easy minimization in CF.
+
+function MjsUnitAssertionError(message) {}
+MjsUnitAssertionError.prototype.toString = function() {
+ return this.message;
+};
+var assertSame;
+var assertEquals;
+var assertEqualsDelta;
+var assertArrayEquals;
+var assertPropertiesEqual;
+var assertToStringEquals;
+var assertTrue;
+var assertFalse;
+var triggerAssertFalse;
+var assertNull;
+var assertNotNull;
+var assertThrows;
+var assertDoesNotThrow;
+var assertInstanceof;
+var assertUnreachable;
+var assertOptimized;
+var assertUnoptimized;
+
+function classOf(object) {
+ var string = Object.prototype.toString.call(object);
+ return string.substring(8, string.length - 1);
+}
+
+function PrettyPrint(value) {
+ return "";
+}
+
+function PrettyPrintArrayElement(value, index, array) {
+ return "";
+}
+
+function fail(expectedText, found, name_opt) {}
+
+function deepObjectEquals(a, b) {
+ var aProps = Object.keys(a);
+ aProps.sort();
+ var bProps = Object.keys(b);
+ bProps.sort();
+ if (!deepEquals(aProps, bProps)) {
+ return false;
+ }
+ for (var i = 0; i < aProps.length; i++) {
+ if (!deepEquals(a[aProps[i]], b[aProps[i]])) {
+ return false;
+ }
+ }
+ return true;
+}
+
+function deepEquals(a, b) {
+ if (a === b) {
+ if (a === 0) return (1 / a) === (1 / b);
+ return true;
+ }
+ if (typeof a != typeof b) return false;
+ if (typeof a == "number") return isNaN(a) && isNaN(b);
+ if (typeof a !== "object" && typeof a !== "function") return false;
+ var objectClass = classOf(a);
+ if (objectClass !== classOf(b)) return false;
+ if (objectClass === "RegExp") {
+ return (a.toString() === b.toString());
+ }
+ if (objectClass === "Function") return false;
+ if (objectClass === "Array") {
+ var elementCount = 0;
+ if (a.length != b.length) {
+ return false;
+ }
+ for (var i = 0; i < a.length; i++) {
+ if (!deepEquals(a[i], b[i])) return false;
+ }
+ return true;
+ }
+ if (objectClass == "String" || objectClass == "Number" || objectClass == "Boolean" || objectClass == "Date") {
+ if (a.valueOf() !== b.valueOf()) return false;
+ }
+ return deepObjectEquals(a, b);
+}
+assertSame = function assertSame(expected, found, name_opt) {
+ if (found === expected) {
+ if (expected !== 0 || (1 / expected) == (1 / found)) return;
+ } else if ((expected !== expected) && (found !== found)) {
+ return;
+ }
+ fail(PrettyPrint(expected), found, name_opt);
+};
+assertEquals = function assertEquals(expected, found, name_opt) {
+ if (!deepEquals(found, expected)) {
+ fail(PrettyPrint(expected), found, name_opt);
+ }
+};
+assertEqualsDelta = function assertEqualsDelta(expected, found, delta, name_opt) {
+ assertTrue(Math.abs(expected - found) <= delta, name_opt);
+};
+assertArrayEquals = function assertArrayEquals(expected, found, name_opt) {
+ var start = "";
+ if (name_opt) {
+ start = name_opt + " - ";
+ }
+ assertEquals(expected.length, found.length, start + "array length");
+ if (expected.length == found.length) {
+ for (var i = 0; i < expected.length; ++i) {
+ assertEquals(expected[i], found[i], start + "array element at index " + i);
+ }
+ }
+};
+assertPropertiesEqual = function assertPropertiesEqual(expected, found, name_opt) {
+ if (!deepObjectEquals(expected, found)) {
+ fail(expected, found, name_opt);
+ }
+};
+assertToStringEquals = function assertToStringEquals(expected, found, name_opt) {
+ if (expected != String(found)) {
+ fail(expected, found, name_opt);
+ }
+};
+assertTrue = function assertTrue(value, name_opt) {
+ assertEquals(true, value, name_opt);
+};
+assertFalse = function assertFalse(value, name_opt) {
+ assertEquals(false, value, name_opt);
+};
+assertNull = function assertNull(value, name_opt) {
+ if (value !== null) {
+ fail("null", value, name_opt);
+ }
+};
+assertNotNull = function assertNotNull(value, name_opt) {
+ if (value === null) {
+ fail("not null", value, name_opt);
+ }
+};
+assertThrows = function assertThrows(code, type_opt, cause_opt) {
+ var threwException = true;
+ try {
+ if (typeof code == 'function') {
+ code();
+ } else {
+ eval(code);
+ }
+ threwException = false;
+ } catch (e) {
+ if (typeof type_opt == 'function') {
+ assertInstanceof(e, type_opt);
+ }
+ if (arguments.length >= 3) {
+ assertEquals(e.type, cause_opt);
+ }
+ return;
+ }
+};
+assertInstanceof = function assertInstanceof(obj, type) {
+ if (!(obj instanceof type)) {
+ var actualTypeName = null;
+ var actualConstructor = Object.getPrototypeOf(obj).constructor;
+ if (typeof actualConstructor == "function") {
+ actualTypeName = actualConstructor.name || String(actualConstructor);
+ }
+ fail("Object <" + PrettyPrint(obj) + "> is not an instance of <" + (type.name || type) + ">" + (actualTypeName ? " but of < " + actualTypeName + ">" : ""));
+ }
+};
+assertDoesNotThrow = function assertDoesNotThrow(code, name_opt) {
+ try {
+ if (typeof code == 'function') {
+ code();
+ } else {
+ eval(code);
+ }
+ } catch (e) {
+ fail("threw an exception: ", e.message || e, name_opt);
+ }
+};
+assertUnreachable = function assertUnreachable(name_opt) {
+ var message = "Fail" + "ure: unreachable";
+ if (name_opt) {
+ message += " - " + name_opt;
+ }
+};
+var OptimizationStatus = function() {}
+assertUnoptimized = function assertUnoptimized(fun, sync_opt, name_opt) {
+ if (sync_opt === undefined) sync_opt = "";
+ assertTrue(OptimizationStatus(fun, sync_opt) != 1, name_opt);
+}
+assertOptimized = function assertOptimized(fun, sync_opt, name_opt) {
+ if (sync_opt === undefined) sync_opt = "";
+ assertTrue(OptimizationStatus(fun, sync_opt) != 2, name_opt);
+}
+triggerAssertFalse = function() {}
+try {
+ console.log;
+ print = console.log;
+ alert = console.log;
+} catch (e) {}
+
+function runNearStackLimit(f) {
+ function t() {
+ try {
+ t();
+ } catch (e) {
+ f();
+ }
+ };
+ try {
+ t();
+ } catch (e) {}
+}
+
+function quit() {}
+
+function nop() {}
+try {
+ gc;
+} catch (e) {
+ gc = nop;
+}
+
+function getRandomProperty(v, rand) {
+ var properties = Object.getOwnPropertyNames(v);
+ var proto = Object.getPrototypeOf(v);
+ if (proto) {
+ properties = properties.concat(Object.getOwnPropertyNames(proto));
+ }
+ if (properties.includes("constructor") && v.constructor.hasOwnProperty("__proto__")) {
+ properties = properties.concat(Object.getOwnPropertyNames(v.constructor.__proto__));
+ }
+ if (properties.length == 0) {
+ return "0";
+ }
+ return properties[rand % properties.length];
+}
+// End stripped down and modified version of mjsunit.js.
+
+var __v_0 = {};
+var __v_1 = {};
+var __v_2 = {};
+var __v_3 = {};
+var __v_4 = -1073741824;
+var __v_5 = {};
+var __v_6 = 1;
+var __v_7 = 1073741823;
+var __v_8 = {};
+var __v_9 = {};
+var __v_10 = 4294967295;
+var __v_11 = this;
+var __v_12 = {};
+var __v_13 = {};
+try {
+ load("test/mjsunit/wasm/wasm-constants.js");
+ load("test/mjsunit/wasm/wasm-module-__v_1.js");
+ __v_2 = 0x10000;
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_16() {
+ var __v_1 = new WasmModuleBuilder();
+ __v_1.addFunction("grow_memory", kSig_i_i)
+ .addBody([kExprGetLocal, 0, kExprGrowMemory])
+ .exportFunc();
+ __v_1.addFunction("load", kSig_i_i)
+ .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
+ .exportFunc();
+ __v_1.addFunction("store", kSig_i_ii)
+ .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem, 0, 0, kExprGetLocal, 1])
+ .exportFunc();
+ __v_1.addFunction("load16", kSig_i_i)
+ .addBody([kExprGetLocal, 0, kExprI32LoadMem16U, 0, 0])
+ .exportFunc();
+ __v_1.addFunction("store16", kSig_i_ii)
+ .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem16, 0, 0, kExprGetLocal, 1])
+ .exportFunc();
+ __v_1.__p_1551105852 = __v_1[getRandomProperty(__v_1, 1551105852)];
+ __v_1.__defineGetter__(getRandomProperty(__v_1, 348910887), function() {
+ gc();
+ __v_9[getRandomProperty(__v_9, 1894652048)] = __v_13[getRandomProperty(__v_13, 1352929371)];
+ return __v_1.__p_1551105852;
+ });
+ __v_1.addFunction("load8", kSig_i_i)
+ .addBody([kExprGetLocal, 0, kExprI32LoadMem8U, 0, 0])
+ .exportFunc();
+ __v_1.addFunction("store8", kSig_i_ii)
+ .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem8, 0, 0, kExprGetLocal, 1])
+ .exportFunc();
+ return __v_1;
+}
+
+function __f_14() {
+ var __v_4 = __f_16();
+ __v_1.addMemory(1, 1, false);
+ var module = __v_1.instantiate();
+ var __v_3;
+
+ function __f_1() {
+ return module.exports.load(__v_3);
+ }
+
+ function __f_2(value) {
+ return module.exports.store(__v_3, value);
+ }
+
+ function __f_8(pages) {
+ return module.exports.grow_memory(pages);
+ }
+ for (__v_3 = 0; __v_3 <= (__v_2 - 4); __v_3 += 4) {
+ __f_2(20);
+ assertEquals(20, __f_1());
+ }
+ for (__v_3 = __v_2 - 3; __v_3 < __v_2 + 4; __v_3++) {
+ assertTraps(kTrapMemOutOfBounds, __f_2);
+ assertTraps(kTrapMemOutOfBounds, __f_1);
+ }
+ assertEquals(1, __f_8(3));
+ for (__v_3 = __v_2; __v_3 <= 4 * __v_2 - 4; __v_3 += 4) {
+ __f_2(20);
+ assertEquals(20, __f_1());
+ }
+ for (__v_3 = 4 * __v_2 - 3; __v_3 < 4 * __v_2 + 4; __v_3++) {
+ assertTraps(kTrapMemOutOfBounds, __f_2);
+ assertTraps(kTrapMemOutOfBounds, __f_1);
+ }
+ assertEquals(4, __f_8(15));
+ for (__v_3 = 4 * __v_2 - 3; __v_3 <= 4 * __v_2 + 4; __v_3 += 4) {
+ __f_2(20);
+ assertEquals(20, __f_1());
+ }
+ for (__v_3 = 19 * __v_2 - 10; __v_3 <= 19 * __v_2 - 4; __v_3 += 4) {
+ __f_2(20);
+ gc();
+ assertEquals(12, __f_1());
+ }
+ for (__v_3 = 19 * __v_2 - 3; __v_3 < 19 * __v_2 + 5; __v_3++) {
+ assertTraps(kTrapMemOutOfBounds, __f_2);
+ assertTraps(kTrapMemOutOfBounds, __f_1);
+ }
+}
+try {
+ __f_14();
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_13() {
+ var __v_1 = __f_16();
+ __v_1.__defineGetter__(getRandomProperty(__v_1, 1322348896), function() {
+ gc();
+ return __f_28(__v_1);
+ });
+ __v_1.addMemory(1, 1, false);
+ var module = __v_1.instantiate();
+ assertEquals(0, __f_30(0));
+ var __v_3;
+
+ function __f_1() {
+ return module.exports.load16(__v_3);
+ }
+
+ function __f_2(value) {
+ return module.exports.store16(__v_3, value);
+ }
+
+ function __f_8(pages) {
+ return module.exports.grow_memory(pages);
+ }
+ for (__v_3 = 0; __v_3 <= (__v_2 - 2); __v_3 += 2) {
+ __f_2(20);
+ assertEquals(20, __f_1());
+ __f_19();
+ }
+ for (__v_3 = __v_2 - 1; __v_3 < __v_2 + 4; __v_3++) {
+ assertTraps(kTrapMemOutOfBounds, __f_2);
+ assertTraps(kTrapMemOutOfBounds, __f_1);
+ }
+ assertEquals(65535, __f_8(0));
+ for (__v_3 = __v_2; __v_3 <= 4 * __v_2 - 2; __v_3 += 2) {
+ __f_2(20);
+ assertEquals(20, __f_1());
+ }
+ for (__v_3 = 4 * __v_2 - 1; __v_3 < 4 * __v_2 + 4; __v_3++) {
+ assertTraps(kTrapMemOutOfBounds, __f_2);
+ assertTraps(kTrapMemOutOfBounds, __f_1);
+ }
+ assertEquals(4, __f_8(15));
+ for (__v_3 = 4 * __v_2 - 2; __v_3 <= 4 * __v_2 + 4; __v_3 += 2) {
+ __f_2(20);
+ assertEquals(20, __f_1());
+ }
+ for (__v_1 = 19 * __v_11 - 10; __v_13 <= 19 * __v_2 - 2; __v_9 += 2) {
+ __f_2(20);
+ assertEquals(20, __f_1());
+ }
+ for (__v_3 = 19 * __v_2 - 1; __v_3 < 19 * __v_2 + 5; __v_3++) {
+ assertTraps(kTrapMemOutOfBounds, __f_2);
+ assertTraps(kTrapMemOutOfBounds, __f_1);
+ }
+}
+try {
+ __f_13();
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_10() {
+ var __v_1 = __f_16();
+ __v_1.addMemory(1, 1, false);
+ var module = __v_1.instantiate();
+ var __v_3;
+
+ function __f_1() {
+ return module.exports.load8(__v_3);
+ }
+
+ function __f_2(value) {
+ return module.exports.store8(__v_3, value);
+ }
+
+ function __f_8(pages) {
+ return module.exports.grow_memory(pages);
+ }
+ for (__v_3 = 0; __v_3 <= __v_2 - 1; __v_3++) {
+ __f_2(20);
+ assertEquals(20, __f_1());
+ }
+ for (__v_3 = __v_2; __v_3 < __v_2 + 4; __v_3++) {
+ assertTraps(kTrapMemOutOfBounds, __f_2);
+ assertTraps(kTrapMemOutOfBounds, __f_1);
+ }
+ assertEquals(1, __f_8(3));
+ for (__v_3 = __v_2; __v_3 <= 4 * __v_2 - 1; __v_3++) {
+ __f_2(20);
+ assertEquals(20, __f_1());
+ }
+ for (__v_3 = 4 * __v_2; __v_3 < 4 * __v_2 + 4; __v_3++) {
+ assertTraps(kTrapMemOutOfBounds, __f_2);
+ assertTraps(kTrapMemOutOfBounds, __f_1);
+ }
+ assertEquals(4, __f_8(15));
+ for (__v_3 = 4 * __v_2; __v_3 <= 4 * __v_2 + 4; __v_3++) {
+ __f_2(20);
+ assertEquals(20, __f_1());
+ }
+ for (__v_3 = 19 * __v_2 - 10; __v_3 <= 19 * __v_2 - 1; __v_3++) {
+ __f_2(20);
+ assertEquals(20, __f_1());
+ }
+ for (__v_3 = 19 * __v_2; __v_3 < 19 * __v_2 + 5; __v_3++) {
+ assertTraps(kTrapMemOutOfBounds, __f_2);
+ assertTraps(kTrapMemOutOfBounds, __f_1);
+ }
+}
+try {
+ __f_10();
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_5() {
+ var __v_1 = __f_16();
+ var module = __v_1.instantiate();
+ var __v_3;
+
+ function __f_1() {
+ return module.exports.load(__v_3);
+ }
+
+ function __f_2(value) {
+ return module.exports.store(__v_3, value);
+ }
+
+ function __f_8(pages) {
+ return module.exports.grow_memory(pages);
+ }
+ assertTraps(kTrapMemOutOfBounds, __f_1);
+ assertTraps(kTrapMemOutOfBounds, __f_2);
+ assertEquals(0, __f_8(1));
+ for (__v_3 = 0; __v_3 <= __v_2 - 4; __v_3++) {
+ __f_2(20);
+ assertEquals(20, __f_1());
+ }
+ for (__v_3 = __v_2; __v_3 <= __v_2 + 5; __v_3++) {
+ assertTraps(kTrapMemOutOfBounds, __f_1);
+ }
+}
+try {
+ __f_5();
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_9() {
+ var __v_1 = __f_16();
+ var module = __v_1.instantiate();
+ var __v_4 = 16385;
+
+ function __f_8(pages) {
+ return module.exports.grow_memory(pages);
+ }
+ assertEquals(-1, __f_8(__v_13));
+}
+try {
+ __f_9();
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_12() {
+ var __v_1 = __f_16();
+ __v_1.addMemory(1, 1, false);
+ var module = __v_9.instantiate();
+ __v_4.__p_1905062277 = __v_4[getRandomProperty(__v_4, 1905062277)];
+ __v_4.__defineGetter__(getRandomProperty(__v_4, 1764398743), function() {
+ gc();
+ __v_0[getRandomProperty(__v_0, 1011363961)] = __v_8[getRandomProperty(__v_8, 1946768258)];
+ return __v_4.__p_1905062277;
+ });
+ var __v_4 = 16384;
+
+ function __f_8(pages) {
+ return module.exports.grow_memory(pages);
+ }
+ assertEquals(-1, __f_8(__v_4));
+}
+try {
+ __f_12();
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_0() {
+ var __v_1 = __f_16();
+ var module = __v_1.instantiate();
+
+ function __f_8(pages) {
+ return module.exports.grow_memory(pages);
+ }
+ assertEquals(-1, __f_8(-1));
+};
+try {
+ __f_0();
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_4() {
+ var __v_1 = __f_16();
+ __v_1.addMemory(1, 1, false);
+ __v_1.addFunction("memory_size", kSig_i_v)
+ .addBody([kExprMemorySize])
+ .exportFunc();
+ var module = __v_1.instantiate();
+
+ function __f_8(pages) {
+ return module.exports.grow_memory(pages);
+ }
+
+ function __f_7() {
+ return module.exports.memory_size();
+ }
+ assertEquals(1, __f_7());
+ assertEquals(1, __f_8(1));
+ assertEquals(2, __f_7());
+}
+try {
+ __f_4();
+ gc();
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_6() {
+ var __v_1 = __f_16();
+ __v_1.addMemory(1, 1, false);
+ var module = __v_1.instantiate();
+ var __v_3, __v_0;
+ gc();
+
+ function __f_1() {
+ return module.exports.load(__v_3);
+ }
+
+ function __f_2(value) {
+ return module.exports.store(__v_3, value);
+ }
+
+ function __f_8(pages) {
+ return module.exports.grow_memory(pages);
+ }
+ gc();
+ for (__v_3 = 0; __v_3 <= (__v_2 - 4); __v_3 += 4) {
+ __f_2(100000 - __v_3);
+ __v_3.__defineGetter__(getRandomProperty(__v_3, 764734523), function() {
+ gc();
+ return __f_16(__v_3);
+ });
+ assertEquals(100000 - __v_3, __f_1());
+ }
+ assertEquals(1, __f_8(3));
+ for (__v_3 = 0; __v_3 <= (__v_2 - 4); __v_3 += 4) {
+ assertEquals(100000 - __v_3, __f_1());
+ }
+}
+try {
+ __f_6();
+ gc();
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_11() {
+ var __v_1 = __f_16();
+ __v_1.addMemory(1, 1, false);
+ var module = __v_2.instantiate();
+ var __v_3, __v_0;
+
+ function __f_1() {
+ return module.exports.load16(__v_3);
+ }
+
+ function __f_2(value) {
+ return module.exports.store16(__v_3, value);
+ }
+
+ function __f_8(pages) {
+ return module.exports.grow_memory(pages);
+ }
+ for (__v_3 = 0; __v_3 <= (__v_2 - 2); __v_3 += 2) {
+ __f_2(65535 - __v_3);
+ assertEquals(65535 - __v_3, __f_1());
+ }
+ assertEquals(1, __f_8(3));
+ for (__v_3 = 0; __v_3 <= (__v_2 - 2); __v_3 += 2) {
+ assertEquals(65535 - __v_3, __f_1());
+ }
+}
+try {
+ __f_11();
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_15() {
+ var __v_1 = __f_16();
+ __v_1.addMemory(1, 1, false);
+ var module = __v_1.instantiate();
+ var __v_3, __v_0 = 0;
+
+ function __f_1() {
+ return module.exports.load8(__v_10);
+ }
+
+ function __f_2(value) {
+ return module.exports.store8(__v_3, value);
+ }
+
+ function __f_8(pages) {
+ return module.exports.grow_memory(pages);
+ }
+ for (__v_3 = 0; __v_3 <= (__v_2 - 1); __v_3++, __v_0++) {
+ __f_2(__v_0);
+ assertEquals(__v_0, __f_1());
+ if (__v_0 == 255) __v_0 = 0;
+ }
+ assertEquals(1, __f_8(3));
+ __v_0 = 0;
+ for (__v_10 = 0; __v_4 <= (__v_0 - 1); __v_11++, __v_5++) {
+ assertEquals(__v_0, __f_1());
+ if (__v_10 == 255) __v_5 = 0;
+ }
+}
+try {
+ __f_15();
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_3() {
+ var __v_1 = __f_16();
+ __v_1.addMemory(1, 1, false);
+ var module = __v_1.instantiate();
+ var __v_3, __v_0;
+
+ function __f_1() {
+ return module.exports.load(__v_3);
+ }
+
+ function __f_2(value) {
+ return module.exports.store(__v_3, value);
+ }
+
+ function __f_8(pages) {
+ return module.exports.grow_memory(pages);
+ }
+ gc();
+ __v_3 = 3 * __v_2 + 4;
+ assertTraps(kTrapMemOutOfBounds, __f_2);
+ assertEquals(1, __f_8(1));
+ assertTraps(kTrapMemOutOfBounds, __f_2);
+ assertEquals(2, __f_8(1));
+ assertTraps(kTrapMemOutOfBounds, __f_2);
+ assertEquals(3, __f_8(1));
+ for (__v_3 = 3 * __v_2; __v_3 <= 4 * __v_2 - 4; __v_3++) {
+ __f_2(0xaced);
+ assertEquals(0xaced, __f_1());
+ }
+ for (__v_3 = 4 * __v_2 - 3; __v_3 <= 4 * __v_2 + 4; __v_3++) {
+ assertTraps(kTrapMemOutOfBounds, __f_2);
+ }
+}
+try {
+ __f_3();
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_18(__f_17, y) {
+ eval(__f_17);
+ return y();
+}
+try {
+ var __v_17 = __f_18("function y() { return 1; }", function() {
+ return 0;
+ })
+ assertEquals(1, __v_17);
+ gc();
+ __v_17 =
+ (function(__f_17) {
+ function __f_17() {
+ return 3;
+ }
+ return __f_17();
+ })(function() {
+ return 2;
+ });
+ assertEquals(3, __v_17);
+ __v_17 =
+ (function(__f_17) {
+ function __f_17() {
+ return 5;
+ }
+ return arguments[0]();
+ })(function() {
+ return -1073741825;
+ });
+ assertEquals(5, __v_17);
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_27() {}
+try {
+ var __v_24 = {};
+ var __v_21 = {};
+ var __v_22 = {};
+ var __v_20 = {};
+ __v_58 = {
+ instantiateModuleFromAsm: function(text, ffi, heap) {
+ var __v_21 = eval('(' + text + ')');
+ if (__f_27()) {
+ throw "validate failure";
+ }
+ var __v_20 = __v_21();
+ if (__f_27()) {
+ throw "bad module args";
+ }
+ }
+ };
+ __f_21 = function __f_21() {
+ if (found === expected) {
+ if (1 / expected) return;
+ } else if ((expected !== expected) && (found !== found)) {
+ return;
+ };
+ };
+ __f_28 = function __f_28() {
+ if (!__f_23()) {
+ __f_125(__f_69(), found, name_opt);
+ }
+ };
+ __f_24 = function __f_24(code, type_opt, cause_opt) {
+ var __v_24 = true;
+ try {
+ if (typeof code == 'function') {
+ code();
+ } else {
+ eval();
+ }
+ __v_24 = false;
+ } catch (e) {
+ if (typeof type_opt == 'function') {
+ __f_22();
+ }
+ if (arguments.length >= 3) {
+ __f_28();
+ }
+ return;
+ }
+ };
+ __f_22 = function __f_22() {
+ if (obj instanceof type) {
+ obj.constructor;
+ if (typeof __v_57 == "function") {;
+ };
+ }
+ };
+ try {
+ __f_28();
+ __v_82.__p_750895751 = __v_82[getRandomProperty()];
+ } catch (e) {
+ "Caught: " + e;
+ }
+ __f_19();
+ gc();
+ __f_19(19, __f_24);
+ __f_19();
+ __f_19();
+ __f_24(function() {
+ __v_58.instantiateModuleFromAsm(__f_28.toString()).__f_20();
+ });
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_19() {
+ "use asm";
+
+ function __f_20() {}
+ return {
+ __f_20: __f_20
+ };
+}
+try {
+ __f_19();
+ __f_19();
+ __f_19();
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_29() {}
+try {
+ __f_19();
+ try {
+ __f_19();
+ gc();
+ __f_25();
+ } catch (e) {
+ "Caught: " + e;
+ }
+ __f_19();
+ __f_19();
+ __f_19();
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_23() {
+ "use asm";
+
+ function __f_20() {}
+ return {
+ __f_20: __f_20
+ };
+}
+try {
+ __f_19();
+ __f_19();
+ __f_19();
+ __f_19();
+ gc();
+ __f_19();
+ __f_19();
+ __f_19();
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_26(stdlib) {
+ "use asm";
+ var __v_2 = new stdlib.Int32Array();
+ __v_22[4294967295] | 14 + 1 | 14;
+ return {
+ __f_20: __f_20
+ };
+}
+
+function __f_25() {
+ var __v_19 = new ArrayBuffer();
+ var __v_23 = new Int32Array(__v_19);
+ var module = __v_58.instantiateModuleFromAsm(__f_26.toString());
+ __f_28();
+ gc();
+}
+try {
+ (function() {})();
+ (function() {})();
+ try {
+ (function() {
+ __v_23.__defineGetter__(getRandomProperty(__v_23, 580179357), function() {
+ gc();
+ return __f_25(__v_23);
+ });
+ var __v_23 = 0x87654321;
+ __v_19.__f_89();
+ })();
+ } catch (e) {;
+ }
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_30(x) {
+ var __v_30 = x + 1;
+ var __v_31 = x + 2;
+ if (x != 0) {
+ if (x > 0 & x < 100) {
+ return __v_30;
+ }
+ }
+ return 0;
+}
+try {
+ assertEquals(0, __f_30(0));
+ assertEquals(0, __f_30(0));
+ %OptimizeFunctionOnNextCall(__f_30);
+ assertEquals(3, __f_30(2));
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_31() {
+ __f_32.arguments;
+}
+
+function __f_32(x) {
+ __f_31();
+}
+
+function __f_33() {
+ __f_32({});
+}
+try {
+ __f_33();
+ __f_33();
+ __f_33();
+ %OptimizeFunctionOnNextCall(__f_33);
+ __f_33();
+ gc();
+} catch (e) {
+ print("Caught: " + e);
+}
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-02862.js b/deps/v8/test/mjsunit/regress/wasm/regression-02862.js
new file mode 100644
index 0000000000..92ed1cd6c9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-02862.js
@@ -0,0 +1,107 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --random-seed=1557792826 --expose-gc --invoke-weak-callbacks --omit-quit --gc-interval=469 --validate-asm
+
+function nop() {}
+var __v_42 = {};
+var __v_49 = {};
+var __v_70 = {};
+var __v_79 = {};
+__v_58 = {
+ instantiateModuleFromAsm: function(text, ffi, heap) {
+ var __v_49 = eval('(' + text + ')');
+ if (nop()) {
+ throw "validate failure";
+ }
+ var __v_79 = __v_49();
+ if (nop()) {
+ throw "bad module args";
+ }
+ }};
+__f_140 = function __f_140() {
+ if (found === expected) {
+ if (1 / expected) return;
+ } else if ((expected !== expected) && (found !== found)) { return; };
+};
+__f_128 = function __f_128() { if (!__f_105()) { __f_125(__f_69(), found, name_opt); } };
+__f_136 = function __f_136(code, type_opt, cause_opt) {
+ var __v_42 = true;
+ try {
+ if (typeof code == 'function') { code(); }
+ else { eval(); }
+ __v_42 = false;
+ } catch (e) {
+ if (typeof type_opt == 'function') { __f_101(); }
+ if (arguments.length >= 3) { __f_128(); }
+ return;
+ }
+};
+__f_101 = function __f_101() { if (obj instanceof type) {obj.constructor; if (typeof __v_57 == "function") {; }; } };
+try {
+__f_128();
+__v_82.__p_750895751 = __v_82[getRandomProperty()];
+} catch(e) {"Caught: " + e; }
+__f_119();
+gc();
+__f_119(19, __f_136);
+__f_119();
+__f_119();
+__f_136(function() {
+ __v_58.instantiateModuleFromAsm(__f_128.toString()).__f_108();
+});
+function __f_119() {
+ "use asm";
+ function __f_108() {
+ }
+ return {__f_108: __f_108};
+}
+__f_119();
+__f_119();
+__f_119();
+function __f_95() {
+}
+__f_119();
+try {
+__f_119();
+__f_135();
+} catch(e) {"Caught: " + e; }
+__f_119();
+__f_119();
+__f_119();
+function __f_105() {
+ "use asm";
+ function __f_108() {
+ }
+ return {__f_108: __f_108};
+}
+__f_119();
+__f_119();
+__f_119();
+__f_119();
+__f_119();
+__f_119();
+__f_119();
+function __f_93(stdlib) {
+ "use asm";
+ var __v_70 = new stdlib.Int32Array();
+__v_70[4294967295]|14 + 1 | 14;
+ return {__f_108: __f_108};
+}
+function __f_135() {
+ var __v_66 = new ArrayBuffer();
+ var __v_54 = new Int32Array(__v_66);
+ var module = __v_58.instantiateModuleFromAsm( __f_93.toString());
+ __f_128();
+}
+(function () {
+})();
+(function () {
+})();
+try {
+(function() {
+ var __v_54 = 0x87654321;
+ __v_66.__f_89();
+})();
+} catch(e) {; }
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-644682.js b/deps/v8/test/mjsunit/regress/wasm/regression-644682.js
new file mode 100644
index 0000000000..b58c0d9b10
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-644682.js
@@ -0,0 +1,26 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function() {
+var builder = new WasmModuleBuilder();
+builder.addFunction("regression_644682", kSig_i_v)
+ .addBody([
+ kExprBlock, // @1
+ kExprI32Const, 0x3b,
+ kExprI32LoadMem, 0x00, 0x00,
+ kExprI32Const, 0x10,
+ kExprBrIf, 0x01, 0x00, // arity=1 depth0
+ kExprI32Const, 0x45,
+ kExprI32Const, 0x3b,
+ kExprI64LoadMem16S, 0x00, 0x3b,
+ kExprBrIf, 0x01, 0x00 // arity=1 depth0
+ ])
+ .exportFunc();
+assertThrows(function() { builder.instantiate(); });
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-647649.js b/deps/v8/test/mjsunit/regress/wasm/regression-647649.js
new file mode 100644
index 0000000000..fc228d4b10
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-647649.js
@@ -0,0 +1,43 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --nostress-opt --expose-gc --invoke-weak-callbacks --validate-asm
+// Flags: --noalways-opt --invoke-weak-callbacks
+
+// This test was generated by the fuzzer.
+
+function getRandomProperty(v, rand) {
+ var properties = Object.getOwnPropertyNames(v);
+ var proto = Object.getPrototypeOf(v);
+ if (proto) {; }
+ if ("constructor" && v.constructor.hasOwnProperty()) {; }
+ if (properties.length == 0) { return "0"; }
+ return properties[rand % properties.length];
+}
+
+var __v_11 = {};
+
+function __f_1(stdlib, foreign, buffer) {
+ "use asm";
+ var __v_3 = new stdlib.Float64Array(buffer);
+ function __f_0() {
+ var __v_1 = 6.0;
+ __v_3[2] = __v_1 + 1.0;
+ }
+ return {__f_0: __f_0};
+}
+try {
+ var __v_0 = new ArrayBuffer(207222809);
+ var module = __f_1(this, null, __v_0);
+( {
+})();
+} catch(e) {; }
+__v_13 = '@3'
+Array.prototype.__proto__ = {3: __v_13};
+Array.prototype.__proto__.__proto__ = {7: __v_11};
+__v_9 = [0, 1, , , 4, 5, , , , 9]
+__v_12 = __v_9.splice(4, 1)
+__v_9.__defineGetter__(getRandomProperty(__v_9, 1689439720), function() {; return __f_1(); });
+ __v_9[8]
+gc();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-651961.js b/deps/v8/test/mjsunit/regress/wasm/regression-651961.js
new file mode 100644
index 0000000000..abdec98358
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-651961.js
@@ -0,0 +1,24 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function() {
+ var builder = new WasmModuleBuilder();
+ builder.addMemory(1, 1, false);
+ builder.addFunction("foo", kSig_i_v)
+ .addBody([
+ kExprMemorySize,
+ kExprI32Const, 0x10,
+ kExprGrowMemory,
+ kExprI32Mul,
+ ])
+ .exportFunc();
+ var module = builder.instantiate();
+ var result = module.exports.foo();
+ assertEquals(1, result);
+})();
diff --git a/deps/v8/test/webkit/fast/js/stack-overflow-arrity-catch.js b/deps/v8/test/mjsunit/stack-overflow-arity-catch-noinline.js
index f36512adbb..a1f5d4e869 100644
--- a/deps/v8/test/webkit/fast/js/stack-overflow-arrity-catch.js
+++ b/deps/v8/test/mjsunit/stack-overflow-arity-catch-noinline.js
@@ -21,63 +21,67 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-description('Test that if an arrity check causes a stack overflow, the exception goes to the right catch');
+// Flags: --allow-natives-syntax
+
+var stackOverflowIn20ArgFn = false, gotRegexCatch = false, gotDateCatch = false;
function funcWith20Args(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8,
arg9, arg10, arg11, arg12, arg13, arg14, arg15,
arg16, arg17, arg18, arg19, arg20)
{
- debug("ERROR: Shouldn't arrive in 20 arg function!");
+ assertUnreachable("shouldn't arrive in non-inlined 20 arg function after stack overflow");
}
-var gotRightCatch = false, gotWrongCatch1 = false, gotWrongCatch2 = false;
+// If we should run with --turbo, then make sure {funcWith20Args} does
+// not get inlined.
+%NeverOptimizeFunction(funcWith20Args);
-function test1()
+function mutual_recursion_1()
{
try {
- test2();
+ mutual_recursion_2();
} catch (err) {
// Should get here because of stack overflow,
- // now cause a stack overflow exception due to arrity processing
+ // now cause a stack overflow exception due to arity processing
try {
var dummy = new RegExp('a|b|c');
} catch(err) {
- // (1) It is dendent on the stack size if we arrive here, in (2) or
+ // (1) It is dependent on the stack size if we arrive here, in (2) or
// both.
- gotWrongCatch1 = true;
+ gotRegexCatch = true;
}
try {
funcWith20Args(1, 2, 3);
} catch (err2) {
- gotRightCatch = true;
+ stackOverflowIn20ArgFn = true;
}
}
}
-function test2()
+function mutual_recursion_2()
{
try {
var dummy = new Date();
} catch(err) {
- // (2) It is dendent on the stack size if we arrive here, in (1) or
+ // (2) It is dependent on the stack size if we arrive here, in (1) or
// both.
- gotWrongCatch2 = true;
+ gotDateCatch = true;
}
try {
- test1();
+ mutual_recursion_1();
} catch (err) {
// Should get here because of stack overflow,
- // now cause a stack overflow exception due to arrity processing
+ // now cause a stack overflow exception due to arity processing
try {
funcWith20Args(1, 2, 3, 4, 5, 6);
} catch (err2) {
- gotRightCatch = true;
+ stackOverflowIn20ArgFn = true;
}
}
}
-test1();
+mutual_recursion_1();
-shouldBeTrue("gotRightCatch");
+assertTrue(stackOverflowIn20ArgFn);
diff --git a/deps/v8/test/mjsunit/stack-traces-overflow.js b/deps/v8/test/mjsunit/stack-traces-overflow.js
index 706f8fcef9..61153b3858 100644
--- a/deps/v8/test/mjsunit/stack-traces-overflow.js
+++ b/deps/v8/test/mjsunit/stack-traces-overflow.js
@@ -40,7 +40,12 @@ try {
overflow();
} catch (e) {
var first_frame = e.stack.split("\n")[1]
- assertTrue(first_frame.indexOf("stack-traces-overflow.js:30:18") > 0);
+ // The overflow can happen when pushing the arguments (in interpreter) or when
+ // the new function execution is starting. So the stack trace could either
+ // point to start of the function (stack-traces-overflow.js30:18) or to the
+ // location of call (stack-traces-overflow.js32:3).
+ assertTrue((first_frame.indexOf("stack-traces-overflow.js:30:18") > 0) ||
+ (first_frame.indexOf("stack-traces-overflow.js:32:3") > 0) );
}
// Test stack trace getter and setter.
diff --git a/deps/v8/test/mjsunit/substr.js b/deps/v8/test/mjsunit/substr.js
index 83929362a0..ff9d777dbb 100644
--- a/deps/v8/test/mjsunit/substr.js
+++ b/deps/v8/test/mjsunit/substr.js
@@ -171,3 +171,57 @@ for (var i = 63; i >= 0; i--) {
assertEquals("", String.prototype.substr.call(string, start, length));
assertEquals(["this", "start", "length"], log);
}
+
+// Bounds edge cases.
+{
+ const str = "abc";
+ const negativeHeapNumber = -1 * 2**32;
+ const positiveHeapNumber = 2**32;
+
+ assertEquals("abc", str.substr(negativeHeapNumber));
+ assertEquals("abc", str.substr(negativeHeapNumber, str.length));
+ assertEquals("abc", str.substr(-str.length, str.length));
+ assertEquals("abc", str.substr(0, str.length));
+ assertEquals("bc", str.substr(-2, str.length));
+ assertEquals("c", str.substr(-1, str.length));
+
+ assertEquals("", str.substr(str.length));
+ assertEquals("", str.substr(4));
+ assertEquals("", str.substr(positiveHeapNumber));
+
+ assertEquals("abc", str.substr(negativeHeapNumber, positiveHeapNumber));
+ assertEquals("abc", str.substr(negativeHeapNumber, positiveHeapNumber));
+ assertEquals("abc", str.substr(-str.length, positiveHeapNumber));
+ assertEquals("abc", str.substr(0, positiveHeapNumber));
+ assertEquals("bc", str.substr(-2, positiveHeapNumber));
+ assertEquals("c", str.substr(-1, positiveHeapNumber));
+
+ assertEquals("", str.substr(str.length, positiveHeapNumber));
+ assertEquals("", str.substr(4, positiveHeapNumber));
+ assertEquals("", str.substr(positiveHeapNumber, positiveHeapNumber));
+
+ assertEquals("", str.substr(negativeHeapNumber, negativeHeapNumber));
+ assertEquals("", str.substr(negativeHeapNumber, negativeHeapNumber));
+ assertEquals("", str.substr(-str.length, negativeHeapNumber));
+ assertEquals("", str.substr(0, negativeHeapNumber));
+ assertEquals("", str.substr(-2, negativeHeapNumber));
+ assertEquals("", str.substr(-1, negativeHeapNumber));
+
+ assertEquals("", str.substr(str.length, negativeHeapNumber));
+ assertEquals("", str.substr(4, negativeHeapNumber));
+ assertEquals("", str.substr(positiveHeapNumber, negativeHeapNumber));
+
+ assertEquals("", str.substr(negativeHeapNumber, -1));
+ assertEquals("", str.substr(negativeHeapNumber, -1));
+ assertEquals("", str.substr(-str.length, -1));
+ assertEquals("", str.substr(0, -1));
+ assertEquals("", str.substr(-2, -1));
+ assertEquals("", str.substr(-1, -1));
+
+ assertEquals("", str.substr(str.length, -1));
+ assertEquals("", str.substr(4, -1));
+ assertEquals("", str.substr(positiveHeapNumber, -1));
+
+ assertEquals("abc", str.substr(undefined));
+ assertEquals("abc", str.substr(undefined, undefined));
+}
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-expr.js b/deps/v8/test/mjsunit/wasm/asm-wasm-expr.js
new file mode 100644
index 0000000000..3b20826fe7
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-expr.js
@@ -0,0 +1,151 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --allow-natives-syntax
+
+var selectedTest = undefined;
+//selectedTest = 16;
+
+function skip(a) {
+ return selectedTest != undefined ? a != selectedTest : false;
+}
+
+const assign_in_stmt = [
+ "if (E) =",
+ "if (=) E",
+ "if (E) E; else =",
+ "for (=; E; S) S",
+ "for (E; =; S) S",
+ "for (E; E; =) E",
+ "for (E; E; E) =",
+ "do { = } while(E)",
+ "do { S } while (=)",
+];
+const assign_in_expr = [
+ "i32_func(=)",
+ "(=) ? E : E",
+ "E ? (=) : E",
+ "E ? E : (=)",
+ "(=) + E",
+ "E + (=)",
+ "imul(=, E)",
+ "imul(E, =)",
+ "~(=)",
+ "(=) | 0",
+ "(=), E",
+ "E, (=)",
+ "E, E, (=)",
+ "E, (=), E",
+ "(=), E, E",
+];
+
+const stdlib = {
+ Math: Math,
+ Int8Array: Int8Array,
+ Int16Array: Int16Array,
+ Int32Array: Int32Array,
+ Uint8Array: Uint8Array,
+ Uint16Array: Uint16Array,
+ Uint32Array: Uint32Array,
+ Float32Array: Float32Array,
+ Float64Array: Float64Array,
+};
+
+const buffer = new ArrayBuffer(65536);
+
+// Template for a module.
+function MODULE_TEMPLATE(stdlib, foreign, buffer) {
+ "use asm";
+ var imul = stdlib.Math.imul;
+ var fround = stdlib.Math.fround;
+ var M = new stdlib.Int32Array(buffer);
+ var G = 0;
+
+ function void_func() {}
+ function i32_func(a) {
+ a = a | 0;
+ return a | 0;
+ }
+
+ FUNC_DECL
+ return {main: main};
+}
+
+// Template for main function.
+{
+ function main(i32, f32, f64) {
+ i32 = i32 | 0;
+ f32 = fround(f32);
+ f64 = +f64;
+ FUNC_BODY
+ }
+}
+
+function RunAsmJsTest(asmfunc, expect) {
+ var asm_source = asmfunc.toString();
+ var nonasm_source = asm_source.replace(new RegExp("use asm"), "");
+
+ print("Testing " + asmfunc.name + " (js)...");
+ var js_module = eval("(" + nonasm_source + ")")(stdlib, {}, buffer);
+ expect(js_module);
+
+ print("Testing " + asmfunc.name + " (asm.js)...");
+ var asm_module = asmfunc(stdlib, {}, buffer);
+ assertTrue(%IsAsmWasmCode(asmfunc));
+ expect(asm_module);
+}
+
+var test = 0;
+
+function DoTheTests(expr, assign, stmt) {
+ // ==== Expression assignment tests ========================================
+ for (let e of assign_in_expr) {
+ if (skip(++test)) continue;
+ var orig = e;
+ e = e.replace(/=/g, assign);
+ e = e.replace(/E/g, expr);
+ e = e.replace(/S/g, stmt);
+ var str = main.toString().replace("FUNC_BODY", "return (" + e + ") | 0;");
+ var asm_source = MODULE_TEMPLATE.toString().replace("FUNC_DECL", str);
+ // TODO(titzer): a verbosity API for these kinds of tests?
+ // print(asm_source);
+
+ doTest(asm_source, "(" + test + ") " + e);
+ }
+
+ // ==== Statement assignment tests =========================================
+ for (let e of assign_in_stmt) {
+ if (skip(++test)) continue;
+ var orig = e;
+ e = e.replace(/=/g, assign);
+ e = e.replace(/E/g, expr);
+ e = e.replace(/S/g, stmt);
+ var str = main.toString().replace("FUNC_BODY", e + "; return 0;");
+ var asm_source = MODULE_TEMPLATE.toString().replace("FUNC_DECL", str);
+// print(asm_source);
+
+ doTest(asm_source, "(" + test + ") " + e);
+ }
+
+ function doTest(asm_source, orig) {
+ var nonasm_source = asm_source.replace(new RegExp("use asm"), "");
+ print("Testing JS: " + orig);
+ var js_module = eval("(" + nonasm_source + ")")(stdlib, {}, buffer);
+ expect(js_module);
+
+ var asmfunc = eval("(" + asm_source + ")");
+
+ print("Testing ASMJS: " + orig);
+ var asm_module = asmfunc(stdlib, {}, buffer);
+ assertTrue(%IsAsmWasmCode(asmfunc));
+ expect(asm_module);
+ }
+
+ function expect(module) { module.main(0, 0, 0); print(" ok"); return true; }
+}
+
+DoTheTests("(i32 | 0)", "i32 = 0", "void_func()");
+DoTheTests("G", "G = 0", "void_func()");
+DoTheTests("G", "G = 0", "G");
+DoTheTests("(M[0] | 0)", "M[0] = 0", "void_func()");
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-f32.js b/deps/v8/test/mjsunit/wasm/asm-wasm-f32.js
index a5d5a6c2cc..66ef274796 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm-f32.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-f32.js
@@ -159,6 +159,11 @@ function f32_gteq(a, b) {
return 0;
}
+function f32_neg(a) {
+ a = fround(a);
+ return fround(-a);
+}
+
var inputs = [
0, 1, 2, 3, 4,
@@ -211,6 +216,7 @@ var funcs = [
f32_lteq,
f32_gt,
f32_gteq,
+ f32_neg,
];
(function () {
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-f64.js b/deps/v8/test/mjsunit/wasm/asm-wasm-f64.js
index 1fd51ff9d5..c7b439fede 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm-f64.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-f64.js
@@ -205,21 +205,25 @@ function f64_tan(a) {
return +Math_tan(+a);
}
-function f64_exp(a, b) {
+function f64_exp(a) {
a = +a;
- b = +b;
- return +Math_exp(+a, +b);
+ return +Math_exp(+a);
+}
+
+function f64_log(a) {
+ a = +a;
+ return +Math_log(+a);
}
-function f64_log(a, b) {
+function f64_atan2(a, b) {
a = +a;
b = +b;
- return +Math_log(+a, +b);
+ return +Math_atan2(+a, +b);
}
-function f64_atan2(a) {
+function f64_neg(a) {
a = +a;
- return +Math_atan2(+a);
+ return +(-a);
}
@@ -272,17 +276,18 @@ var funcs = [
f64_floor,
// TODO(bradnelson) f64_sqrt,
f64_abs,
+ f64_neg,
// TODO(bradnelson) f64_min is wrong for -0
// TODO(bradnelson) f64_max is wrong for -0
-// TODO(bradnelson) f64_acos,
-// TODO(bradnelson) f64_asin,
-// TODO(bradnelson) f64_atan,
-// TODO(bradnelson) f64_cos,
-// TODO(bradnelson) f64_sin,
-// TODO(bradnelson) f64_tan,
-// TODO(bradnelson) f64_exp,
-// TODO(bradnelson) f64_log,
-// TODO(bradnelson) f64_atan2,
+ f64_acos,
+ f64_asin,
+ f64_atan,
+ f64_cos,
+ f64_sin,
+ f64_tan,
+ f64_exp,
+ f64_log,
+ f64_atan2,
];
(function () {
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-i32.js b/deps/v8/test/mjsunit/wasm/asm-wasm-i32.js
index 29f071c84c..9d8b14afec 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm-i32.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-i32.js
@@ -180,6 +180,16 @@ function i32_abs(a) {
return Math_abs(a | 0) | 0;
}
+function i32_neg(a) {
+ a = a | 0;
+ return (-a) | 0;
+}
+
+function i32_invert(a) {
+ a = a | 0;
+ return (~a) | 0;
+}
+
var inputs = [
0, 1, 2, 3, 4,
10, 20, 30, 31, 32, 33, 100, 2000,
@@ -226,7 +236,9 @@ var funcs = [
i32_gteq,
i32_min,
i32_max,
- i32_abs
+ i32_abs,
+ i32_neg,
+ i32_invert,
];
(function () {
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-u32.js b/deps/v8/test/mjsunit/wasm/asm-wasm-u32.js
index 8276015214..0809bca6ab 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm-u32.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-u32.js
@@ -157,6 +157,16 @@ function u32_gteq(a, b) {
return 0;
}
+function u32_neg(a) {
+ a = a | 0;
+ return (-a) | 0;
+}
+
+function u32_invert(a) {
+ a = a | 0;
+ return (~a) | 0;
+}
+
var inputs = [
0, 1, 2, 3, 4,
@@ -202,6 +212,8 @@ var funcs = [
u32_lteq,
u32_gt,
u32_gteq,
+ u32_neg,
+ u32_invert,
// TODO(titzer): u32_min
// TODO(titzer): u32_max
// TODO(titzer): u32_abs
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm.js b/deps/v8/test/mjsunit/wasm/asm-wasm.js
index a580c5c7e9..dc8ecff7a0 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm.js
@@ -988,6 +988,7 @@ function TestFunctionTable(stdlib, foreign, buffer) {
return {caller:caller};
}
+print("TestFunctionTable...");
var module = TestFunctionTable(stdlib);
assertEquals(55, module.caller(0, 0, 33, 22));
assertEquals(11, module.caller(0, 1, 33, 22));
@@ -1040,6 +1041,7 @@ function TestForeignFunctions() {
assertEquals(103, module.caller(23, 103));
}
+print("TestForeignFunctions...");
TestForeignFunctions();
@@ -1581,3 +1583,89 @@ function TestLoopsWithUnsigned() {
}
assertWasm(323, TestLoopsWithUnsigned);
+
+
+function TestSingleFunctionModule() {
+ "use asm";
+ function add(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return (a + b) | 0;
+ }
+ return add;
+}
+
+assertEquals(7, TestSingleFunctionModule()(3, 4));
+
+
+function TestNotZero() {
+ "use asm";
+ function caller() {
+ if (!0) {
+ return 44;
+ } else {
+ return 55;
+ }
+ return 0;
+ }
+ return {caller: caller};
+}
+
+assertWasm(44, TestNotZero);
+
+
+function TestNotOne() {
+ "use asm";
+ function caller() {
+ if (!1) {
+ return 44;
+ } else {
+ return 55;
+ }
+ return 0;
+ }
+ return {caller: caller};
+}
+
+assertWasm(55, TestNotOne);
+
+
+function TestDotfulFloat(stdlib) {
+ "use asm";
+ var fround = stdlib.Math.fround;
+ var foo = fround(55.0);
+ function caller() {
+ return +foo;
+ }
+ return {caller: caller};
+}
+
+assertWasm(55, TestDotfulFloat);
+
+
+function TestDotlessFloat(stdlib) {
+ "use asm";
+ var fround = stdlib.Math.fround;
+ var foo = fround(55);
+ function caller() {
+ return +foo;
+ }
+ return {caller: caller};
+}
+
+assertWasm(55, TestDotlessFloat);
+
+
+function TestFloatGlobals(stdlib) {
+ "use asm";
+ var fround = stdlib.Math.fround;
+ var foo = fround(1.25);
+ function caller() {
+ foo = fround(foo + fround(1.0));
+ foo = fround(foo + fround(1.0));
+ return +foo;
+ }
+ return {caller: caller};
+}
+
+assertWasm(3.25, TestFloatGlobals);
diff --git a/deps/v8/test/mjsunit/wasm/calls.js b/deps/v8/test/mjsunit/wasm/calls.js
index 4da0501cf2..b0feda8c80 100644
--- a/deps/v8/test/mjsunit/wasm/calls.js
+++ b/deps/v8/test/mjsunit/wasm/calls.js
@@ -20,13 +20,16 @@ function assertModule(module, memsize) {
assertFalse(mem === null);
assertFalse(mem === 0);
assertEquals("object", typeof mem);
- assertTrue(mem instanceof ArrayBuffer);
+ assertTrue(mem instanceof WebAssembly.Memory);
+ var buf = mem.buffer;
+ assertTrue(buf instanceof ArrayBuffer);
+ assertEquals(memsize, buf.byteLength);
for (var i = 0; i < 4; i++) {
module.exports.memory = 0; // should be ignored
- assertEquals(mem, module.exports.memory);
+ mem.buffer = 0; // should be ignored
+ assertSame(mem, module.exports.memory);
+ assertSame(buf, mem.buffer);
}
-
- assertEquals(memsize, module.exports.memory.byteLength);
}
function assertFunction(module, func) {
diff --git a/deps/v8/test/mjsunit/wasm/compiled-module-management.js b/deps/v8/test/mjsunit/wasm/compiled-module-management.js
new file mode 100644
index 0000000000..a1bd2ce3c8
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/compiled-module-management.js
@@ -0,0 +1,50 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// TODO (mtrofin): re-enable ignition (v8:5345)
+// Flags: --no-ignition --no-ignition-staging
+// Flags: --expose-wasm --expose-gc --allow-natives-syntax
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+
+(function CompiledModuleInstancesAreGCed() {
+ var builder = new WasmModuleBuilder();
+
+ builder.addMemory(1,1, true);
+ builder.addImport("getValue", kSig_i);
+ builder.addFunction("f", kSig_i)
+ .addBody([
+ kExprCallFunction, 0
+ ]).exportFunc();
+
+ var module = new WebAssembly.Module(builder.toBuffer());
+ %ValidateWasmModuleState(module);
+ %ValidateWasmInstancesChain(module, 0);
+ var i1 = new WebAssembly.Instance(module, {getValue: () => 1});
+ %ValidateWasmInstancesChain(module, 1);
+ var i2 = new WebAssembly.Instance(module, {getValue: () => 2});
+ %ValidateWasmInstancesChain(module, 2);
+ var i3 = new WebAssembly.Instance(module, {getValue: () => 3});
+ %ValidateWasmInstancesChain(module, 3);
+
+ assertEquals(1, i1.exports.f());
+ i1 = null;
+ gc();
+ %ValidateWasmInstancesChain(module, 2);
+ assertEquals(3, i3.exports.f());
+ i3 = null;
+ gc();
+ %ValidateWasmInstancesChain(module, 1);
+ assertEquals(2, i2.exports.f());
+ i2 = null;
+ gc();
+ %ValidateWasmModuleState(module);
+ var i4 = new WebAssembly.Instance(module, {getValue: () => 4});
+ assertEquals(4, i4.exports.f());
+ module = null;
+ gc();
+ %ValidateWasmOrphanedInstance(i4);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js b/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
index 94cc894275..aa36b71882 100644
--- a/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
+++ b/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
@@ -17,13 +17,13 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction("main", kSig_i_i)
.addBody([
- kExprI32Const, 1,
kExprGetLocal, 0,
kExprI32LoadMem, 0, 0,
- kExprCallIndirect, kArity1, signature,
+ kExprI32Const, 1,
+ kExprCallIndirect, signature,
kExprGetLocal,0,
kExprI32LoadMem,0, 0,
- kExprCallImport, kArity0, 0,
+ kExprCallFunction, 0,
kExprI32Add
]).exportFunc();
@@ -32,8 +32,8 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction("_wrap_writer", signature)
.addBody([
kExprGetLocal, 0,
- kExprCallImport, kArity1, 1]);
- builder.appendToTable([0, 1]);
+ kExprCallFunction, 1]);
+ builder.appendToTable([2, 3]);
var module = new WebAssembly.Module(builder.toBuffer());
@@ -77,4 +77,24 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
assertFalse(clone == undefined);
assertFalse(clone == compiled_module);
assertEquals(clone.constructor, compiled_module.constructor);
-})()
+})();
+
+(function SerializeAfterInstantiation() {
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("main", kSig_i)
+ .addBody([kExprI8Const, 42])
+ .exportFunc();
+
+ var compiled_module = new WebAssembly.Module(builder.toBuffer());
+ var instance1 = new WebAssembly.Instance(compiled_module);
+ var instance2 = new WebAssembly.Instance(compiled_module);
+ var serialized = %SerializeWasmModule(compiled_module);
+ var clone = %DeserializeWasmModule(serialized);
+
+ assertNotNull(clone);
+ assertFalse(clone == undefined);
+ assertFalse(clone == compiled_module);
+ assertEquals(clone.constructor, compiled_module.constructor);
+ var instance3 = new WebAssembly.Instance(clone);
+ assertFalse(instance3 == undefined);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/debug-disassembly.js b/deps/v8/test/mjsunit/wasm/debug-disassembly.js
index 976098a853..ac09d4af96 100644
--- a/deps/v8/test/mjsunit/wasm/debug-disassembly.js
+++ b/deps/v8/test/mjsunit/wasm/debug-disassembly.js
@@ -45,10 +45,10 @@ function listener(event, exec_state, event_data, data) {
assertTrue(!!line, "line number must occur in disassembly");
assertTrue(line.length > columnNr, "column number must be valid");
var expected_string;
- if (name.endsWith("/0")) {
+ if (name.endsWith("/1")) {
// Function 0 calls the imported function.
- expected_string = "kExprCallImport,";
- } else if (name.endsWith("/1")) {
+ expected_string = "kExprCallFunction,";
+ } else if (name.endsWith("/2")) {
// Function 1 calls function 0.
expected_string = "kExprCallFunction,";
} else {
@@ -76,7 +76,7 @@ var builder = new WasmModuleBuilder();
builder.addImport("func", kSig_v_v);
builder.addFunction("call_import", kSig_v_v)
- .addBody([kExprCallImport, kArity0, 0])
+ .addBody([kExprCallFunction, 0])
.exportFunc();
// Add a bit of unneccessary code to increase the byte offset.
@@ -87,8 +87,8 @@ builder.addFunction("call_call_import", kSig_v_v)
kExprI32Const, (-7 & 0x7f), kExprSetLocal, 1,
kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add, kExprI64UConvertI32,
kExprI64Const, 0,
- kExprI64Ne, kExprIf,
- kExprCallFunction, kArity0, 0,
+ kExprI64Ne, kExprIf, kAstStmt,
+ kExprCallFunction, 1,
kExprEnd
])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/ensure-wasm-binaries-up-to-date.js b/deps/v8/test/mjsunit/wasm/ensure-wasm-binaries-up-to-date.js
index 3fab8c65b1..26bdf8ece8 100644
--- a/deps/v8/test/mjsunit/wasm/ensure-wasm-binaries-up-to-date.js
+++ b/deps/v8/test/mjsunit/wasm/ensure-wasm-binaries-up-to-date.js
@@ -7,6 +7,27 @@
// Ensure checked in wasm binaries used by integration tests from v8 hosts
// (such as chromium) are up to date.
+(function print_incrementer() {
+ if (true) return; // remove to regenerate the module
+
+ load('test/mjsunit/wasm/wasm-constants.js');
+ load('test/mjsunit/wasm/wasm-module-builder.js');
+
+ var module = new WasmModuleBuilder();
+ module.addFunction(undefined, kSig_i_i)
+ .addBody([kExprGetLocal, 0, kExprI32Const, 1, kExprI32Add])
+ .exportAs("increment");
+
+ var buffer = module.toBuffer(true);
+ var view = new Uint8Array(buffer);
+
+ print("const unsigned char module[] = {");
+ for (var i = 0; i < buffer.byteLength; i++) {
+ print(" " + view[i] + ",");
+ }
+ print("};");
+})();
+
(function ensure_incrementer() {
var buff = readbuffer("test/mjsunit/wasm/incrementer.wasm");
var mod = new WebAssembly.Module(buff);
diff --git a/deps/v8/test/mjsunit/wasm/exceptions.js b/deps/v8/test/mjsunit/wasm/exceptions.js
new file mode 100644
index 0000000000..71bd5f18ed
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/exceptions.js
@@ -0,0 +1,383 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --wasm-eh-prototype
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+// The following methods do not attempt to catch the exception they raise.
+var test_throw = (function () {
+ var builder = new WasmModuleBuilder();
+
+ builder.addFunction("throw_param_if_not_zero", kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprI32Const, 0,
+ kExprI32Ne,
+ kExprIf, kAstStmt,
+ kExprGetLocal, 0,
+ kExprThrow,
+ kExprEnd,
+ kExprI32Const, 1
+ ])
+ .exportFunc()
+
+ builder.addFunction("throw_20", kSig_v_v)
+ .addBody([
+ kExprI32Const, 20,
+ kExprThrow,
+ ])
+ .exportFunc()
+
+ builder.addFunction("throw_expr_with_params", kSig_v_ddi)
+ .addBody([
+ // p2 * (p0 + min(p0, p1))|0 - 20
+ kExprGetLocal, 2,
+ kExprGetLocal, 0,
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprF64Min,
+ kExprF64Add,
+ kExprI32SConvertF64,
+ kExprI32Mul,
+ kExprI32Const, 20,
+ kExprI32Sub,
+ kExprThrow,
+ ])
+ .exportFunc()
+
+ return builder.instantiate();
+})();
+
+// Check the test_throw exists.
+assertFalse(test_throw === undefined);
+assertFalse(test_throw === null);
+assertFalse(test_throw === 0);
+assertEquals("object", typeof test_throw.exports);
+assertEquals("function", typeof test_throw.exports.throw_param_if_not_zero);
+assertEquals("function", typeof test_throw.exports.throw_20);
+assertEquals("function", typeof test_throw.exports.throw_expr_with_params);
+
+assertEquals(1, test_throw.exports.throw_param_if_not_zero(0));
+assertWasmThrows(10, function() { test_throw.exports.throw_param_if_not_zero(10) });
+assertWasmThrows(-1, function() { test_throw.exports.throw_param_if_not_zero(-1) });
+assertWasmThrows(20, test_throw.exports.throw_20);
+assertWasmThrows(
+ -8, function() { test_throw.exports.throw_expr_with_params(1.5, 2.5, 4); });
+assertWasmThrows(
+ 12, function() { test_throw.exports.throw_expr_with_params(5.7, 2.5, 4); });
+
+// Now that we know throwing works, we test catching the exceptions we raise.
+var test_catch = (function () {
+ var builder = new WasmModuleBuilder();
+
+ // Helper function for throwing from js. It is imported by the Wasm module
+ // as throw_i.
+ function throw_value(value) {
+ throw value;
+ }
+ var sig_index = builder.addType(kSig_v_i);
+ var kJSThrowI = builder.addImport("throw_i", sig_index);
+
+ // Helper function that throws a string. Wasm should not catch it.
+ function throw_string() {
+ throw "use wasm;";
+ }
+ sig_index = builder.addType(kSig_v_v);
+ var kJSThrowString = builder.addImport("throw_string", sig_index);
+
+ // Helper function that throws undefined. Wasm should not catch it.
+ function throw_undefined() {
+ throw undefined;
+ }
+ var kJSThrowUndefined = builder.addImport("throw_undefined", sig_index);
+
+ // Helper function that throws an fp. Wasm should not catch it.
+ function throw_fp() {
+ throw 10.5;
+ }
+ var kJSThrowFP = builder.addImport("throw_fp", sig_index);
+
+ // Helper function that throws a large number. Wasm should not catch it.
+ function throw_large() {
+ throw 1e+28;
+ }
+ var kJSThrowLarge = builder.addImport("throw_large", sig_index);
+
+ // Helper function for throwing from Wasm.
+ var kWasmThrowFunction =
+ builder.addFunction("throw", kSig_v_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprThrow
+ ])
+ .index;
+
+ // Scenario 1: Throw and catch appear on the same function. This should
+ // happen in case of inlining, for example.
+ builder.addFunction("same_scope", kSig_i_i)
+ .addBody([
+ kExprTry, kAstI32,
+ kExprGetLocal, 0,
+ kExprI32Const, 0,
+ kExprI32Ne,
+ kExprIf, kAstStmt,
+ kExprGetLocal, 0,
+ kExprThrow,
+ kExprUnreachable,
+ kExprEnd,
+ kExprI32Const, 63,
+ kExprCatch, 1,
+ kExprGetLocal, 1,
+ kExprEnd
+ ])
+ .addLocals({i32_count: 1})
+ .exportFunc()
+ .index;
+
+ builder.addFunction("same_scope_ignore", kSig_i_i)
+ .addBody([
+ kExprTry, kAstI32,
+ kExprGetLocal, 0,
+ kExprThrow,
+ kExprUnreachable,
+ kExprCatch, 1,
+ kExprGetLocal, 0,
+ kExprEnd,
+ ])
+ .addLocals({i32_count: 1})
+ .exportFunc();
+
+ builder.addFunction("same_scope_multiple", kSig_i_i)
+ // path = 0;
+ //
+ // try {
+ // try {
+ // try {
+ // if (p == 1)
+ // throw 1;
+ // path |= 2
+ // } catch (v) {
+ // path |= v | 4;
+ // throw path;
+ // }
+ // if (p == 2)
+ // throw path|8;
+ // path |= 16;
+ // } catch (v) {
+ // path |= v | 32;
+ // throw path;
+ // }
+ // if (p == 3)
+ // throw path|64;
+ // path |= 128
+ // } catch (v) {
+ // path |= v | 256;
+ // }
+ //
+ // return path;
+ //
+ // p == 1 -> path == 293
+ // p == 2 -> path == 298
+ // p == 3 -> path == 338
+ // else -> path == 146
+ .addBody([
+ kExprTry, kAstI32,
+ kExprTry, kAstI32,
+ kExprTry, kAstI32,
+ kExprGetLocal, 0,
+ kExprI32Const, 1,
+ kExprI32Eq,
+ kExprIf, kAstStmt,
+ kExprI32Const, 1,
+ kExprThrow,
+ kExprUnreachable,
+ kExprEnd,
+ kExprI32Const, 2,
+ kExprCatch, 1,
+ kExprGetLocal, 1,
+ kExprI32Const, 4,
+ kExprI32Ior,
+ kExprThrow,
+ kExprUnreachable,
+ kExprEnd,
+ kExprTeeLocal, 2,
+ kExprGetLocal, 0,
+ kExprI32Const, 2,
+ kExprI32Eq,
+ kExprIf, kAstStmt,
+ kExprGetLocal, 2,
+ kExprI32Const, 8,
+ kExprI32Ior,
+ kExprThrow,
+ kExprUnreachable,
+ kExprEnd,
+ kExprI32Const, 16,
+ kExprI32Ior,
+ kExprCatch, 1,
+ kExprGetLocal, 1,
+ kExprI32Const, 32,
+ kExprI32Ior,
+ kExprThrow,
+ kExprUnreachable,
+ kExprEnd,
+ kExprTeeLocal, 2,
+ kExprGetLocal, 0,
+ kExprI32Const, 3,
+ kExprI32Eq,
+ kExprIf, kAstStmt,
+ kExprGetLocal, 2,
+ kExprI32Const, /*64=*/ 192, 0,
+ kExprI32Ior,
+ kExprThrow,
+ kExprUnreachable,
+ kExprEnd,
+ kExprI32Const, /*128=*/ 128, 1,
+ kExprI32Ior,
+ kExprCatch, 1,
+ kExprGetLocal, 1,
+ kExprI32Const, /*256=*/ 128, 2,
+ kExprI32Ior,
+ kExprEnd,
+ ])
+ .addLocals({i32_count: 2})
+ .exportFunc();
+
+ // Scenario 2: Catches an exception raised from the direct callee.
+ var kFromDirectCallee =
+ builder.addFunction("from_direct_callee", kSig_i_i)
+ .addBody([
+ kExprTry, kAstI32,
+ kExprGetLocal, 0,
+ kExprCallFunction, kWasmThrowFunction,
+ kExprI32Const, /*-1=*/ 127,
+ kExprCatch, 1,
+ kExprGetLocal, 1,
+ kExprEnd
+ ])
+ .addLocals({i32_count: 1})
+ .exportFunc()
+ .index;
+
+ // Scenario 3: Catches an exception raised from an indirect callee.
+ var kFromIndirectCalleeHelper = kFromDirectCallee + 1;
+ builder.addFunction("from_indirect_callee_helper", kSig_v_ii)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprI32Const, 0,
+ kExprI32GtS,
+ kExprIf, kAstStmt,
+ kExprGetLocal, 0,
+ kExprI32Const, 1,
+ kExprI32Sub,
+ kExprGetLocal, 1,
+ kExprI32Const, 1,
+ kExprI32Sub,
+ kExprCallFunction, kFromIndirectCalleeHelper,
+ kExprEnd,
+ kExprGetLocal, 1,
+ kExprCallFunction, kWasmThrowFunction,
+ ]);
+
+ builder.addFunction("from_indirect_callee", kSig_i_i)
+ .addBody([
+ kExprTry, kAstI32,
+ kExprGetLocal, 0,
+ kExprI32Const, 0,
+ kExprCallFunction, kFromIndirectCalleeHelper,
+ kExprI32Const, /*-1=*/ 127,
+ kExprCatch, 1,
+ kExprGetLocal, 1,
+ kExprEnd
+ ])
+ .addLocals({i32_count: 1})
+ .exportFunc();
+
+ // Scenario 4: Catches an exception raised in JS.
+ builder.addFunction("from_js", kSig_i_i)
+ .addBody([
+ kExprTry, kAstI32,
+ kExprGetLocal, 0,
+ kExprCallFunction, kJSThrowI,
+ kExprI32Const, /*-1=*/ 127,
+ kExprCatch, 1,
+ kExprGetLocal, 1,
+ kExprEnd,
+ ])
+ .addLocals({i32_count: 1})
+ .exportFunc();
+
+ // Scenario 5: Does not catch an exception raised in JS if it is not a
+ // number.
+ builder.addFunction("string_from_js", kSig_v_v)
+ .addBody([
+ kExprCallFunction, kJSThrowString
+ ])
+ .exportFunc();
+
+ builder.addFunction("fp_from_js", kSig_v_v)
+ .addBody([
+ kExprCallFunction, kJSThrowFP
+ ])
+ .exportFunc();
+
+ builder.addFunction("large_from_js", kSig_v_v)
+ .addBody([
+ kExprCallFunction, kJSThrowLarge
+ ])
+ .exportFunc();
+
+ builder.addFunction("undefined_from_js", kSig_v_v)
+ .addBody([
+ kExprCallFunction, kJSThrowUndefined
+ ])
+ .exportFunc();
+
+ return builder.instantiate({
+ throw_i: throw_value,
+ throw_string: throw_string,
+ throw_fp: throw_fp,
+ throw_large, throw_large,
+ throw_undefined: throw_undefined
+ });
+})();
+
+// Check the test_catch exists.
+assertFalse(test_catch === undefined);
+assertFalse(test_catch === null);
+assertFalse(test_catch === 0);
+assertEquals("object", typeof test_catch.exports);
+assertEquals("function", typeof test_catch.exports.same_scope);
+assertEquals("function", typeof test_catch.exports.same_scope_ignore);
+assertEquals("function", typeof test_catch.exports.same_scope_multiple);
+assertEquals("function", typeof test_catch.exports.from_direct_callee);
+assertEquals("function", typeof test_catch.exports.from_indirect_callee);
+assertEquals("function", typeof test_catch.exports.from_js);
+assertEquals("function", typeof test_catch.exports.string_from_js);
+
+assertEquals(63, test_catch.exports.same_scope(0));
+assertEquals(1024, test_catch.exports.same_scope(1024));
+assertEquals(-3, test_catch.exports.same_scope(-3));
+assertEquals(-1, test_catch.exports.same_scope_ignore(-1));
+assertEquals(1, test_catch.exports.same_scope_ignore(1));
+assertEquals(0x7FFFFFFF, test_catch.exports.same_scope_ignore(0x7FFFFFFF));
+assertEquals(1024, test_catch.exports.same_scope_ignore(1024));
+assertEquals(-1, test_catch.exports.same_scope_ignore(-1));
+assertEquals(293, test_catch.exports.same_scope_multiple(1));
+assertEquals(298, test_catch.exports.same_scope_multiple(2));
+assertEquals(338, test_catch.exports.same_scope_multiple(3));
+assertEquals(146, test_catch.exports.same_scope_multiple(0));
+assertEquals(-10024, test_catch.exports.from_direct_callee(-10024));
+assertEquals(3334333, test_catch.exports.from_direct_callee(3334333));
+assertEquals(-1, test_catch.exports.from_direct_callee(0xFFFFFFFF));
+assertEquals(0x7FFFFFFF, test_catch.exports.from_direct_callee(0x7FFFFFFF));
+assertEquals(-10, test_catch.exports.from_indirect_callee(10));
+assertEquals(-77, test_catch.exports.from_indirect_callee(77));
+assertEquals(10, test_catch.exports.from_js(10));
+assertEquals(-10, test_catch.exports.from_js(-10));
+
+assertThrowsEquals(test_catch.exports.string_from_js, "use wasm;");
+assertThrowsEquals(test_catch.exports.large_from_js, 1e+28);
+assertThrowsEquals(test_catch.exports.undefined_from_js, undefined);
diff --git a/deps/v8/test/mjsunit/wasm/export-table.js b/deps/v8/test/mjsunit/wasm/export-table.js
index 2084ddfc0a..6d21cf5790 100644
--- a/deps/v8/test/mjsunit/wasm/export-table.js
+++ b/deps/v8/test/mjsunit/wasm/export-table.js
@@ -15,7 +15,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
.addBody([
kExprI8Const,
kReturnValue,
- kExprReturn, kArity1
+ kExprReturn
])
.exportFunc();
@@ -36,7 +36,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
.addBody([
kExprI8Const,
kReturnValue,
- kExprReturn, kArity1
+ kExprReturn
])
.exportAs("blah")
.exportAs("foo");
@@ -61,7 +61,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
.addBody([
kExprI8Const,
kReturnValue,
- kExprReturn, kArity1
+ kExprReturn
])
.exportAs("0");
diff --git a/deps/v8/test/mjsunit/wasm/ffi-error.js b/deps/v8/test/mjsunit/wasm/ffi-error.js
index 81dc47806e..6d4787e70a 100644
--- a/deps/v8/test/mjsunit/wasm/ffi-error.js
+++ b/deps/v8/test/mjsunit/wasm/ffi-error.js
@@ -16,7 +16,7 @@ function testCallFFI(ffi) {
.addBody([
kExprGetLocal, 0, // --
kExprGetLocal, 1, // --
- kExprCallFunction, kArity2, 0, // --
+ kExprCallFunction, 0, // --
]) // --
.exportFunc();
@@ -78,3 +78,40 @@ assertThrows(function() {
module.exports.function_with_invalid_signature(33, 88);
}, TypeError);
})();
+
+(function I64ParamsInSignatureThrows() {
+ var builder = new WasmModuleBuilder();
+
+ builder.addMemory(1, 1, true);
+ builder.addFunction("function_with_invalid_signature", kSig_i_l)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprI32ConvertI64
+ ])
+ .exportFunc()
+
+ var module = builder.instantiate();
+
+ assertThrows(function() {
+ module.exports.function_with_invalid_signature(33);
+ }, TypeError);
+})();
+
+(function I64JSImportThrows() {
+ var builder = new WasmModuleBuilder();
+ var sig_index = builder.addType(kSig_i_i);
+ var sig_i64_index = builder.addType(kSig_i_l);
+ var index = builder.addImport("func", sig_i64_index);
+ builder.addFunction("main", sig_index)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprI64SConvertI32,
+ kExprCallFunction, index // --
+ ]) // --
+ .exportFunc();
+ var func = function() {return {};};
+ var main = builder.instantiate({func: func}).exports.main;
+ assertThrows(function() {
+ main(13);
+ }, TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/ffi.js b/deps/v8/test/mjsunit/wasm/ffi.js
index e84f038e68..5966ec8c5e 100644
--- a/deps/v8/test/mjsunit/wasm/ffi.js
+++ b/deps/v8/test/mjsunit/wasm/ffi.js
@@ -16,7 +16,7 @@ function testCallFFI(func, check) {
.addBody([
kExprGetLocal, 0, // --
kExprGetLocal, 1, // --
- kExprCallImport, kArity2, 0 // --
+ kExprCallFunction, 0 // --
]) // --
.exportFunc();
@@ -80,7 +80,7 @@ print("Constructor");
.addBody([
kExprGetLocal, 0, // --
kExprGetLocal, 1, // --
- kExprCallImport, kArity2, 0 // --
+ kExprCallFunction, 0 // --
]) // --
.exportFunc();
@@ -98,7 +98,7 @@ print("Native function");
builder.addImport("func", sig_index);
builder.addFunction("main", sig_index)
.addBody([
- kExprCallImport, kArity0, 0 // --
+ kExprCallFunction, 0 // --
]) // --
.exportFunc();
@@ -247,7 +247,7 @@ function testCallBinopVoid(type, func, check) {
.addBody([
kExprGetLocal, 0, // --
kExprGetLocal, 1, // --
- kExprCallImport, kArity2, 0, // --
+ kExprCallFunction, 0, // --
kExprI8Const, 99 // --
]) // --
.exportFunc()
@@ -302,11 +302,11 @@ function testCallPrint() {
builder.addImport("print", makeSig_v_x(kAstF64));
builder.addFunction("main", makeSig_v_x(kAstF64))
.addBody([
- kExprI8Const, 97, // --
- kExprCallImport, kArity1, 0, // --
- kExprGetLocal, 0, // --
- kExprCallImport, kArity1, 1 // --
- ]) // --
+ kExprI8Const, 97, // --
+ kExprCallFunction, 0, // --
+ kExprGetLocal, 0, // --
+ kExprCallFunction, 1 // --
+ ]) // --
.exportFunc()
var main = builder.instantiate({print: print}).exports.main;
diff --git a/deps/v8/test/mjsunit/wasm/frame-inspection.js b/deps/v8/test/mjsunit/wasm/frame-inspection.js
index 4d342e6cae..9d45239e4a 100644
--- a/deps/v8/test/mjsunit/wasm/frame-inspection.js
+++ b/deps/v8/test/mjsunit/wasm/frame-inspection.js
@@ -52,11 +52,11 @@ var builder = new WasmModuleBuilder();
builder.addImport("func", kSig_v_v);
builder.addFunction("wasm_1", kSig_v_v)
- .addBody([kExprNop, kExprCallFunction, kArity0, 1])
+ .addBody([kExprNop, kExprCallFunction, 2])
.exportAs("main");
builder.addFunction("wasm_2", kSig_v_v)
- .addBody([kExprCallImport, kArity0, 0]);
+ .addBody([kExprCallFunction, 0]);
function call_debugger() {
debugger;
diff --git a/deps/v8/test/mjsunit/wasm/function-names.js b/deps/v8/test/mjsunit/wasm/function-names.js
index 15771d8470..94919b5e6c 100644
--- a/deps/v8/test/mjsunit/wasm/function-names.js
+++ b/deps/v8/test/mjsunit/wasm/function-names.js
@@ -19,11 +19,11 @@ var expected_names = ["exec_unreachable", "☠", null,
for (var func_name of func_names) {
last_func_index = builder.addFunction(func_name, kSig_v_v)
- .addBody([kExprCallFunction, kArity0, last_func_index]).index;
+ .addBody([kExprCallFunction, last_func_index]).index;
}
builder.addFunction("main", kSig_v_v)
- .addBody([kExprCallFunction, kArity0, last_func_index])
+ .addBody([kExprCallFunction, last_func_index])
.exportFunc();
var module = builder.instantiate();
diff --git a/deps/v8/test/mjsunit/wasm/gc-frame.js b/deps/v8/test/mjsunit/wasm/gc-frame.js
index 9c37fe485f..9e9aa98999 100644
--- a/deps/v8/test/mjsunit/wasm/gc-frame.js
+++ b/deps/v8/test/mjsunit/wasm/gc-frame.js
@@ -27,7 +27,8 @@ function makeFFI(func, t) {
kExprGetLocal, 7, // --
kExprGetLocal, 8, // --
kExprGetLocal, 9, // --
- kExprCallImport, 10, 0, // --
+ kExprCallFunction, 0, // --
+ kExprDrop, // --
kExprGetLocal, 0, // --
kExprGetLocal, 1, // --
kExprGetLocal, 2, // --
@@ -38,7 +39,7 @@ function makeFFI(func, t) {
kExprGetLocal, 7, // --
kExprGetLocal, 8, // --
kExprGetLocal, 9, // --
- kExprCallImport, 10, 0 // --
+ kExprCallFunction, 0, // --
]) // --
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory.js b/deps/v8/test/mjsunit/wasm/grow-memory.js
index 27aca22d1a..ecc105ee0d 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory.js
@@ -18,12 +18,27 @@ function genGrowMemoryBuilder() {
.addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
.exportFunc();
builder.addFunction("store", kSig_i_ii)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem, 0, 0])
+ .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem, 0, 0,
+ kExprGetLocal, 1])
+ .exportFunc();
+ builder.addFunction("load16", kSig_i_i)
+ .addBody([kExprGetLocal, 0, kExprI32LoadMem16U, 0, 0])
+ .exportFunc();
+ builder.addFunction("store16", kSig_i_ii)
+ .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem16, 0, 0,
+ kExprGetLocal, 1])
+ .exportFunc();
+ builder.addFunction("load8", kSig_i_i)
+ .addBody([kExprGetLocal, 0, kExprI32LoadMem8U, 0, 0])
+ .exportFunc();
+ builder.addFunction("store8", kSig_i_ii)
+ .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem8, 0, 0,
+ kExprGetLocal, 1])
.exportFunc();
return builder;
}
-function testGrowMemoryReadWrite() {
+function testGrowMemoryReadWrite32() {
var builder = genGrowMemoryBuilder();
builder.addMemory(1, 1, false);
var module = builder.instantiate();
@@ -32,7 +47,7 @@ function testGrowMemoryReadWrite() {
function poke(value) { return module.exports.store(offset, value); }
function growMem(pages) { return module.exports.grow_memory(pages); }
- for(offset = 0; offset <= (kPageSize - 4); offset++) {
+ for(offset = 0; offset <= (kPageSize - 4); offset+=4) {
poke(20);
assertEquals(20, peek());
}
@@ -43,7 +58,7 @@ function testGrowMemoryReadWrite() {
assertEquals(1, growMem(3));
- for (offset = kPageSize; offset <= 4*kPageSize -4; offset++) {
+ for (offset = kPageSize; offset <= 4*kPageSize -4; offset+=4) {
poke(20);
assertEquals(20, peek());
}
@@ -54,11 +69,11 @@ function testGrowMemoryReadWrite() {
assertEquals(4, growMem(15));
- for (offset = 4*kPageSize - 3; offset <= 4*kPageSize + 4; offset++) {
+ for (offset = 4*kPageSize - 3; offset <= 4*kPageSize + 4; offset+=4) {
poke(20);
assertEquals(20, peek());
}
- for (offset = 19*kPageSize - 10; offset <= 19*kPageSize - 4; offset++) {
+ for (offset = 19*kPageSize - 10; offset <= 19*kPageSize - 4; offset+=4) {
poke(20);
assertEquals(20, peek());
}
@@ -68,7 +83,101 @@ function testGrowMemoryReadWrite() {
}
}
-testGrowMemoryReadWrite();
+testGrowMemoryReadWrite32();
+
+function testGrowMemoryReadWrite16() {
+ var builder = genGrowMemoryBuilder();
+ builder.addMemory(1, 1, false);
+ var module = builder.instantiate();
+ var offset;
+ function peek() { return module.exports.load16(offset); }
+ function poke(value) { return module.exports.store16(offset, value); }
+ function growMem(pages) { return module.exports.grow_memory(pages); }
+
+ for(offset = 0; offset <= (kPageSize - 2); offset+=2) {
+ poke(20);
+ assertEquals(20, peek());
+ }
+ for (offset = kPageSize - 1; offset < kPageSize + 4; offset++) {
+ assertTraps(kTrapMemOutOfBounds, poke);
+ assertTraps(kTrapMemOutOfBounds, peek);
+ }
+
+ assertEquals(1, growMem(3));
+
+ for (offset = kPageSize; offset <= 4*kPageSize -2; offset+=2) {
+ poke(20);
+ assertEquals(20, peek());
+ }
+ for (offset = 4*kPageSize - 1; offset < 4*kPageSize + 4; offset++) {
+ assertTraps(kTrapMemOutOfBounds, poke);
+ assertTraps(kTrapMemOutOfBounds, peek);
+ }
+
+ assertEquals(4, growMem(15));
+
+ for (offset = 4*kPageSize - 2; offset <= 4*kPageSize + 4; offset+=2) {
+ poke(20);
+ assertEquals(20, peek());
+ }
+ for (offset = 19*kPageSize - 10; offset <= 19*kPageSize - 2; offset+=2) {
+ poke(20);
+ assertEquals(20, peek());
+ }
+ for (offset = 19*kPageSize - 1; offset < 19*kPageSize + 5; offset++) {
+ assertTraps(kTrapMemOutOfBounds, poke);
+ assertTraps(kTrapMemOutOfBounds, peek);
+ }
+}
+
+testGrowMemoryReadWrite16();
+
+function testGrowMemoryReadWrite8() {
+ var builder = genGrowMemoryBuilder();
+ builder.addMemory(1, 1, false);
+ var module = builder.instantiate();
+ var offset;
+ function peek() { return module.exports.load8(offset); }
+ function poke(value) { return module.exports.store8(offset, value); }
+ function growMem(pages) { return module.exports.grow_memory(pages); }
+
+ for(offset = 0; offset <= kPageSize - 1; offset++) {
+ poke(20);
+ assertEquals(20, peek());
+ }
+ for (offset = kPageSize; offset < kPageSize + 4; offset++) {
+ assertTraps(kTrapMemOutOfBounds, poke);
+ assertTraps(kTrapMemOutOfBounds, peek);
+ }
+
+ assertEquals(1, growMem(3));
+
+ for (offset = kPageSize; offset <= 4*kPageSize -1; offset++) {
+ poke(20);
+ assertEquals(20, peek());
+ }
+ for (offset = 4*kPageSize; offset < 4*kPageSize + 4; offset++) {
+ assertTraps(kTrapMemOutOfBounds, poke);
+ assertTraps(kTrapMemOutOfBounds, peek);
+ }
+
+ assertEquals(4, growMem(15));
+
+ for (offset = 4*kPageSize; offset <= 4*kPageSize + 4; offset++) {
+ poke(20);
+ assertEquals(20, peek());
+ }
+ for (offset = 19*kPageSize - 10; offset <= 19*kPageSize - 1; offset++) {
+ poke(20);
+ assertEquals(20, peek());
+ }
+ for (offset = 19*kPageSize; offset < 19*kPageSize + 5; offset++) {
+ assertTraps(kTrapMemOutOfBounds, poke);
+ assertTraps(kTrapMemOutOfBounds, peek);
+ }
+}
+
+testGrowMemoryReadWrite8();
function testGrowMemoryZeroInitialSize() {
var builder = genGrowMemoryBuilder();
@@ -117,3 +226,135 @@ function testGrowMemoryTrapMaxPages() {
}
testGrowMemoryTrapMaxPages();
+
+function testGrowMemoryTrapsWithNonSmiInput() {
+ var builder = genGrowMemoryBuilder();
+ var module = builder.instantiate();
+ function growMem(pages) { return module.exports.grow_memory(pages); }
+ // The parameter of grow_memory is unsigned. Therefore -1 stands for
+ // UINT32_MIN, which cannot be represented as SMI.
+ assertEquals(-1, growMem(-1));
+};
+
+testGrowMemoryTrapsWithNonSmiInput();
+
+function testGrowMemoryCurrentMemory() {
+ var builder = genGrowMemoryBuilder();
+ builder.addMemory(1, 1, false);
+ builder.addFunction("memory_size", kSig_i_v)
+ .addBody([kExprMemorySize])
+ .exportFunc();
+ var module = builder.instantiate();
+ function growMem(pages) { return module.exports.grow_memory(pages); }
+ function MemSize() { return module.exports.memory_size(); }
+ assertEquals(1, MemSize());
+ assertEquals(1, growMem(1));
+ assertEquals(2, MemSize());
+}
+
+testGrowMemoryCurrentMemory();
+
+function testGrowMemoryPreservesDataMemOp32() {
+ var builder = genGrowMemoryBuilder();
+ builder.addMemory(1, 1, false);
+ var module = builder.instantiate();
+ var offset, val;
+ function peek() { return module.exports.load(offset); }
+ function poke(value) { return module.exports.store(offset, value); }
+ function growMem(pages) { return module.exports.grow_memory(pages); }
+
+ for(offset = 0; offset <= (kPageSize - 4); offset+=4) {
+ poke(100000 - offset);
+ assertEquals(100000 - offset, peek());
+ }
+
+ assertEquals(1, growMem(3));
+
+ for(offset = 0; offset <= (kPageSize - 4); offset+=4) {
+ assertEquals(100000 - offset, peek());
+ }
+}
+
+testGrowMemoryPreservesDataMemOp32();
+
+function testGrowMemoryPreservesDataMemOp16() {
+ var builder = genGrowMemoryBuilder();
+ builder.addMemory(1, 1, false);
+ var module = builder.instantiate();
+ var offset, val;
+ function peek() { return module.exports.load16(offset); }
+ function poke(value) { return module.exports.store16(offset, value); }
+ function growMem(pages) { return module.exports.grow_memory(pages); }
+
+ for(offset = 0; offset <= (kPageSize - 2); offset+=2) {
+ poke(65535 - offset);
+ assertEquals(65535 - offset, peek());
+ }
+
+ assertEquals(1, growMem(3));
+
+ for(offset = 0; offset <= (kPageSize - 2); offset+=2) {
+ assertEquals(65535 - offset, peek());
+ }
+}
+
+testGrowMemoryPreservesDataMemOp16();
+
+function testGrowMemoryPreservesDataMemOp8() {
+ var builder = genGrowMemoryBuilder();
+ builder.addMemory(1, 1, false);
+ var module = builder.instantiate();
+ var offset, val = 0;
+ function peek() { return module.exports.load8(offset); }
+ function poke(value) { return module.exports.store8(offset, value); }
+ function growMem(pages) { return module.exports.grow_memory(pages); }
+
+ for(offset = 0; offset <= (kPageSize - 1); offset++, val++) {
+ poke(val);
+ assertEquals(val, peek());
+ if (val == 255) val = 0;
+ }
+
+ assertEquals(1, growMem(3));
+
+ val = 0;
+
+ for(offset = 0; offset <= (kPageSize - 1); offset++, val++) {
+ assertEquals(val, peek());
+ if (val == 255) val = 0;
+ }
+}
+
+testGrowMemoryPreservesDataMemOp8();
+
+function testGrowMemoryOutOfBoundsOffset() {
+ var builder = genGrowMemoryBuilder();
+ builder.addMemory(1, 1, false);
+ var module = builder.instantiate();
+ var offset, val;
+ function peek() { return module.exports.load(offset); }
+ function poke(value) { return module.exports.store(offset, value); }
+ function growMem(pages) { return module.exports.grow_memory(pages); }
+
+ offset = 3*kPageSize + 4;
+ assertTraps(kTrapMemOutOfBounds, poke);
+
+ assertEquals(1, growMem(1));
+ assertTraps(kTrapMemOutOfBounds, poke);
+
+ assertEquals(2, growMem(1));
+ assertTraps(kTrapMemOutOfBounds, poke);
+
+ assertEquals(3, growMem(1));
+
+ for (offset = 3*kPageSize; offset <= 4*kPageSize - 4; offset++) {
+ poke(0xaced);
+ assertEquals(0xaced, peek());
+ }
+
+ for (offset = 4*kPageSize - 3; offset <= 4*kPageSize + 4; offset++) {
+ assertTraps(kTrapMemOutOfBounds, poke);
+ }
+}
+
+testGrowMemoryOutOfBoundsOffset();
diff --git a/deps/v8/test/mjsunit/wasm/import-table.js b/deps/v8/test/mjsunit/wasm/import-table.js
index 8680addf61..aa836d6eac 100644
--- a/deps/v8/test/mjsunit/wasm/import-table.js
+++ b/deps/v8/test/mjsunit/wasm/import-table.js
@@ -16,7 +16,7 @@ function testCallImport(func, check) {
.addBody([
kExprGetLocal, 0, // --
kExprGetLocal, 1, // --
- kExprCallImport, 2, 0]) // --
+ kExprCallFunction, 0]) // --
.exportAs("main");
var main = builder.instantiate({func: func}).exports.main;
@@ -191,7 +191,7 @@ function testCallBinopVoid(type, func, check) {
.addBody([
kExprGetLocal, 0, // --
kExprGetLocal, 1, // --
- kExprCallImport, 2, 0, // --
+ kExprCallFunction, 0, // --
kExprI8Const, 99, // --
])
.exportFunc("main");
@@ -246,9 +246,9 @@ function testCallPrint() {
builder.addFunction("main", makeSig_r_x(kAstF64, kAstF64))
.addBody([
kExprI8Const, 97, // --
- kExprCallImport, kArity1, 0, // --
+ kExprCallFunction, 0, // --
kExprGetLocal, 0, // --
- kExprCallImport, kArity1, 1 // --
+ kExprCallFunction, 1 // --
])
.exportFunc();
@@ -270,8 +270,8 @@ function testCallImport2(foo, bar, expected) {
builder.addImport("bar", kSig_i);
builder.addFunction("main", kSig_i)
.addBody([
- kExprCallImport, kArity0, 0, // --
- kExprCallImport, kArity0, 1, // --
+ kExprCallFunction, 0, // --
+ kExprCallFunction, 1, // --
kExprI32Add, // --
]) // --
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/incrementer.wasm b/deps/v8/test/mjsunit/wasm/incrementer.wasm
index f80f7ad597..30b51c2e1b 100644
--- a/deps/v8/test/mjsunit/wasm/incrementer.wasm
+++ b/deps/v8/test/mjsunit/wasm/incrementer.wasm
Binary files differ
diff --git a/deps/v8/test/mjsunit/wasm/indirect-calls.js b/deps/v8/test/mjsunit/wasm/indirect-calls.js
index 1e87c6f823..26021bb74d 100644
--- a/deps/v8/test/mjsunit/wasm/indirect-calls.js
+++ b/deps/v8/test/mjsunit/wasm/indirect-calls.js
@@ -14,7 +14,7 @@ var module = (function () {
builder.addImport("add", sig_index);
builder.addFunction("add", sig_index)
.addBody([
- kExprGetLocal, 0, kExprGetLocal, 1, kExprCallImport, kArity2, 0
+ kExprGetLocal, 0, kExprGetLocal, 1, kExprCallFunction, 0
]);
builder.addFunction("sub", sig_index)
.addBody([
@@ -24,13 +24,13 @@ var module = (function () {
]);
builder.addFunction("main", kSig_i_iii)
.addBody([
- kExprGetLocal, 0,
kExprGetLocal, 1,
kExprGetLocal, 2,
- kExprCallIndirect, kArity2, sig_index
+ kExprGetLocal, 0,
+ kExprCallIndirect, sig_index
])
.exportFunc()
- builder.appendToTable([0, 1, 2]);
+ builder.appendToTable([1, 2, 3]);
return builder.instantiate({add: function(a, b) { return a + b | 0; }});
})();
@@ -47,3 +47,40 @@ assertEquals(19, module.exports.main(0, 12, 7));
assertTraps(kTrapFuncSigMismatch, "module.exports.main(2, 12, 33)");
assertTraps(kTrapFuncInvalid, "module.exports.main(3, 12, 33)");
+
+
+module = (function () {
+ var builder = new WasmModuleBuilder();
+
+ var sig_i_ii = builder.addType(kSig_i_ii);
+ var sig_i_i = builder.addType(kSig_i_i);
+ builder.addImport("mul", sig_i_ii);
+ builder.addFunction("add", sig_i_ii)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprI32Add // --
+ ]);
+ builder.addFunction("popcnt", sig_i_i)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprI32Popcnt // --
+ ]);
+ builder.addFunction("main", kSig_i_iii)
+ .addBody([
+ kExprGetLocal, 1,
+ kExprGetLocal, 2,
+ kExprGetLocal, 0,
+ kExprCallIndirect, sig_i_ii
+ ])
+ .exportFunc()
+ builder.appendToTable([0, 1, 2, 3]);
+
+ return builder.instantiate({mul: function(a, b) { return a * b | 0; }});
+})();
+
+assertEquals(-6, module.exports.main(0, -2, 3));
+assertEquals(99, module.exports.main(1, 22, 77));
+assertTraps(kTrapFuncSigMismatch, "module.exports.main(2, 12, 33)");
+assertTraps(kTrapFuncSigMismatch, "module.exports.main(3, 12, 33)");
+assertTraps(kTrapFuncInvalid, "module.exports.main(4, 12, 33)");
diff --git a/deps/v8/test/mjsunit/wasm/instance-gc.js b/deps/v8/test/mjsunit/wasm/instance-gc.js
new file mode 100644
index 0000000000..1713f27b99
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/instance-gc.js
@@ -0,0 +1,122 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --expose-gc
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+let nogc = () => {};
+
+function newModule() {
+ let builder = new WasmModuleBuilder();
+ builder.addMemory(1, 1, true);
+ builder.addFunction("main", kSig_i)
+ .addBody([kExprI32Const, 0, kExprI32LoadMem, 0, 0])
+ .exportFunc();
+
+ return new WebAssembly.Module(builder.toBuffer());
+}
+
+function newInstance(module, val) {
+ var instance = new WebAssembly.Instance(module);
+ var view = new Int32Array(instance.exports.memory.buffer);
+ view[0] = val;
+ return instance;
+}
+
+function TestSingleLiveInstance(gc) {
+ let module = newModule();
+
+ print("TestSingleLiveInstance...");
+ for (var i = 0; i < 5; i++) {
+ (() => { // don't leak references between iterations.
+ print(" [" + i + "]...");
+ gc();
+ var instance = newInstance(module, i + 99);
+ assertEquals(i + 99, instance.exports.main());
+ })();
+ }
+}
+
+TestSingleLiveInstance(nogc);
+TestSingleLiveInstance(gc);
+
+function TestMultiInstance(gc) {
+ let module = newModule();
+
+ print("TestMultiInstance...");
+ // Note: compute the root instances in another function to be
+ // sure that {roots} really is the only set of roots to the instances.
+ let roots = (() => { return [
+ newInstance(module, 33),
+ newInstance(module, 4444),
+ newInstance(module, 555555)
+ ];})();
+
+ (() => { // don't leak references!
+ print(" [0]...");
+ gc();
+ assertEquals(33, roots[0].exports.main());
+ roots[0] = null;
+ })();
+
+ (() => { // don't leak references!
+ print(" [1]...");
+ gc();
+ assertEquals(4444, roots[1].exports.main());
+ roots[1] = null;
+ })();
+
+ (() => { // don't leak references!
+ print(" [2]...");
+ gc();
+ assertEquals(555555, roots[2].exports.main());
+ roots[2] = null;
+ })();
+}
+
+TestMultiInstance(nogc);
+TestMultiInstance(gc);
+
+function TestReclaimingCompiledModule() {
+ let module = newModule();
+
+ print("TestReclaimingCompiledModule...");
+ let roots = (() => { return [
+ newInstance(module, 7777),
+ newInstance(module, 8888),
+ ];})();
+
+ (() => { // don't leak references!
+ print(" [0]...");
+ assertEquals(7777, roots[0].exports.main());
+ assertEquals(8888, roots[1].exports.main());
+ roots[1] = null;
+ })();
+
+ (() => { // don't leak references!
+ print(" [1]...");
+ gc();
+ roots[1] = newInstance(module, 9999);
+ assertEquals(7777, roots[0].exports.main());
+ assertEquals(9999, roots[1].exports.main());
+ roots[0] = null;
+ roots[1] = null;
+ })();
+
+ (() => { // don't leak references!
+ print(" [2]...");
+ gc();
+ roots[0] = newInstance(module, 11111);
+ roots[1] = newInstance(module, 22222);
+ assertEquals(11111, roots[0].exports.main());
+ assertEquals(22222, roots[1].exports.main());
+ roots[0] = null;
+ roots[1] = null;
+ })();
+}
+
+TestReclaimingCompiledModule(nogc);
+TestReclaimingCompiledModule(gc);
diff --git a/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js b/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js
index 92cdc14ff9..a0c11bdadd 100644
--- a/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js
+++ b/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js
@@ -31,14 +31,17 @@ function CheckInstance(instance) {
assertFalse(mem === null);
assertFalse(mem === 0);
assertEquals("object", typeof mem);
- assertTrue(mem instanceof ArrayBuffer);
- for (let i = 0; i < 4; i++) {
+ assertTrue(mem instanceof WebAssembly.Memory);
+ var buf = mem.buffer;
+ assertTrue(buf instanceof ArrayBuffer);
+ assertEquals(65536, buf.byteLength);
+ for (var i = 0; i < 4; i++) {
instance.exports.memory = 0; // should be ignored
+ mem.buffer = 0; // should be ignored
assertSame(mem, instance.exports.memory);
+ assertSame(buf, mem.buffer);
}
- assertEquals(65536, instance.exports.memory.byteLength);
-
// Check the properties of the main function.
let main = instance.exports.main;
assertFalse(main === undefined);
@@ -59,13 +62,18 @@ CheckInstance(new WebAssembly.Instance(module));
let promise = WebAssembly.compile(buffer);
promise.then(module => CheckInstance(new WebAssembly.Instance(module)));
+// Check that validate works correctly for a module.
+assertTrue(WebAssembly.validate(buffer));
+assertFalse(WebAssembly.validate(bytes(88, 88, 88, 88, 88, 88, 88, 88)));
+
// Negative tests.
(function InvalidModules() {
+ print("InvalidModules...");
let invalid_cases = [undefined, 1, "", "a", {some:1, obj: "b"}];
let len = invalid_cases.length;
for (var i = 0; i < len; ++i) {
try {
- let instance = new WebAssembly.Instance(1);
+ let instance = new WebAssembly.Instance(invalid_cases[i]);
assertUnreachable("should not be able to instantiate invalid modules.");
} catch (e) {
assertContains("Argument 0", e.toString());
@@ -75,9 +83,10 @@ promise.then(module => CheckInstance(new WebAssembly.Instance(module)));
// Compile async an invalid blob.
(function InvalidBinaryAsyncCompilation() {
+ print("InvalidBinaryAsyncCompilation...");
let builder = new WasmModuleBuilder();
builder.addFunction("f", kSig_i_i)
- .addBody([kExprCallImport, kArity0, 0]);
+ .addBody([kExprCallFunction, 0]);
let promise = WebAssembly.compile(builder.toBuffer());
promise
.then(compiled =>
@@ -87,6 +96,7 @@ promise.then(module => CheckInstance(new WebAssembly.Instance(module)));
// Multiple instances tests.
(function ManyInstances() {
+ print("ManyInstances...");
let compiled_module = new WebAssembly.Module(buffer);
let instance_1 = new WebAssembly.Instance(compiled_module);
let instance_2 = new WebAssembly.Instance(compiled_module);
@@ -94,6 +104,7 @@ promise.then(module => CheckInstance(new WebAssembly.Instance(module)));
})();
(function ManyInstancesAsync() {
+ print("ManyInstancesAsync...");
let promise = WebAssembly.compile(buffer);
promise.then(compiled_module => {
let instance_1 = new WebAssembly.Instance(compiled_module);
@@ -103,6 +114,7 @@ promise.then(module => CheckInstance(new WebAssembly.Instance(module)));
})();
(function InstancesAreIsolatedFromEachother() {
+ print("InstancesAreIsolatedFromEachother...");
var builder = new WasmModuleBuilder();
builder.addMemory(1,1, true);
var kSig_v_i = makeSig([kAstI32], []);
@@ -112,13 +124,13 @@ promise.then(module => CheckInstance(new WebAssembly.Instance(module)));
builder.addFunction("main", kSig_i_i)
.addBody([
- kExprI32Const, 1,
kExprGetLocal, 0,
kExprI32LoadMem, 0, 0,
- kExprCallIndirect, kArity1, signature,
+ kExprI32Const, 1,
+ kExprCallIndirect, signature,
kExprGetLocal,0,
kExprI32LoadMem,0, 0,
- kExprCallImport, kArity0, 0,
+ kExprCallFunction, 0,
kExprI32Add
]).exportFunc();
@@ -127,8 +139,8 @@ promise.then(module => CheckInstance(new WebAssembly.Instance(module)));
builder.addFunction("_wrap_writer", signature)
.addBody([
kExprGetLocal, 0,
- kExprCallImport, kArity1, 1]);
- builder.appendToTable([0, 1]);
+ kExprCallFunction, 1]);
+ builder.appendToTable([2, 3]);
var module = new WebAssembly.Module(builder.toBuffer());
@@ -153,3 +165,54 @@ promise.then(module => CheckInstance(new WebAssembly.Instance(module)));
assertEquals(42, outval_1);
assertEquals(1000, outval_2);
})();
+
+(function GlobalsArePrivateToTheInstance() {
+ print("GlobalsArePrivateToTheInstance...");
+ var builder = new WasmModuleBuilder();
+ builder.addGlobal(kAstI32);
+ builder.addFunction("read", kSig_i_v)
+ .addBody([
+ kExprGetGlobal, 0])
+ .exportFunc();
+
+ builder.addFunction("write", kSig_v_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprSetGlobal, 0])
+ .exportFunc();
+
+ var module = new WebAssembly.Module(builder.toBuffer());
+ var i1 = new WebAssembly.Instance(module);
+ var i2 = new WebAssembly.Instance(module);
+ i1.exports.write(1);
+ i2.exports.write(2);
+ assertEquals(1, i1.exports.read());
+ assertEquals(2, i2.exports.read());
+})();
+
+
+(function InstanceMemoryIsIsolated() {
+ print("InstanceMemoryIsIsolated...");
+ var builder = new WasmModuleBuilder();
+ builder.addMemory(1,1, true);
+
+ builder.addFunction("f", kSig_i)
+ .addBody([
+ kExprI32Const, 0,
+ kExprI32LoadMem, 0, 0
+ ]).exportFunc();
+
+ var mem_1 = new ArrayBuffer(65536);
+ var mem_2 = new ArrayBuffer(65536);
+ var view_1 = new Int32Array(mem_1);
+ var view_2 = new Int32Array(mem_2);
+ view_1[0] = 1;
+ view_2[0] = 1000;
+
+ var module = new WebAssembly.Module(builder.toBuffer());
+ var i1 = new WebAssembly.Instance(module, null, mem_1);
+ var i2 = new WebAssembly.Instance(module, null, mem_2);
+
+ assertEquals(1, i1.exports.f());
+ assertEquals(1000, i2.exports.f());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/memory-size.js b/deps/v8/test/mjsunit/wasm/memory-size.js
new file mode 100644
index 0000000000..197059eb49
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/memory-size.js
@@ -0,0 +1,30 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function testMemorySizeZero() {
+ print("testMemorySizeZero()");
+ var builder = new WasmModuleBuilder();
+ builder.addFunction("memory_size", kSig_i_v)
+ .addBody([kExprMemorySize])
+ .exportFunc();
+ var module = builder.instantiate();
+ assertEquals(0, module.exports.memory_size());
+})();
+
+(function testMemorySizeNonZero() {
+ print("testMemorySizeNonZero()");
+ var builder = new WasmModuleBuilder();
+ var size = 11;
+ builder.addMemory(size, size, false);
+ builder.addFunction("memory_size", kSig_i_v)
+ .addBody([kExprMemorySize])
+ .exportFunc();
+ var module = builder.instantiate();
+ assertEquals(size, module.exports.memory_size());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/memory.js b/deps/v8/test/mjsunit/wasm/memory.js
new file mode 100644
index 0000000000..e86825bd27
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/memory.js
@@ -0,0 +1,93 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+// Basic tests.
+
+var outOfUint32RangeValue = 1e12;
+
+function assertMemoryIsValid(memory) {
+ assertSame(WebAssembly.Memory.prototype, memory.__proto__);
+ assertSame(WebAssembly.Memory, memory.constructor);
+ assertTrue(memory instanceof Object);
+ assertTrue(memory instanceof WebAssembly.Memory);
+}
+
+(function TestConstructor() {
+ assertTrue(WebAssembly.Memory instanceof Function);
+ assertSame(WebAssembly.Memory, WebAssembly.Memory.prototype.constructor);
+ assertTrue(WebAssembly.Memory.prototype.grow instanceof Function);
+ let desc = Object.getOwnPropertyDescriptor(WebAssembly.Memory.prototype, 'buffer');
+ assertTrue(desc.get instanceof Function);
+ assertSame(undefined, desc.set);
+
+ assertThrows(() => new WebAssembly.Memory(), TypeError);
+ assertThrows(() => new WebAssembly.Memory(1), TypeError);
+ assertThrows(() => new WebAssembly.Memory(""), TypeError);
+
+ assertThrows(() => new WebAssembly.Memory({initial: -1}), RangeError);
+ assertThrows(() => new WebAssembly.Memory({initial: outOfUint32RangeValue}), RangeError);
+
+ assertThrows(() => new WebAssembly.Memory({initial: 10, maximum: -1}), RangeError);
+ assertThrows(() => new WebAssembly.Memory({initial: 10, maximum: outOfUint32RangeValue}), RangeError);
+ assertThrows(() => new WebAssembly.Memory({initial: 10, maximum: 9}), RangeError);
+
+ let memory = new WebAssembly.Memory({initial: 1});
+ assertMemoryIsValid(memory);
+})();
+
+(function TestConstructorWithMaximum() {
+ let memory = new WebAssembly.Memory({initial: 1, maximum: 10});
+ assertMemoryIsValid(memory);
+})();
+
+(function TestInitialIsUndefined() {
+ // New memory with initial = undefined, which means initial = 0.
+ let memory = new WebAssembly.Memory({initial: undefined});
+ assertMemoryIsValid(memory);
+})();
+
+(function TestMaximumIsUndefined() {
+ // New memory with maximum = undefined, which means maximum = 0.
+ let memory = new WebAssembly.Memory({initial: 0, maximum: undefined});
+ assertMemoryIsValid(memory);
+})();
+
+(function TestMaximumIsReadOnce() {
+ var a = true;
+ var desc = {initial: 10};
+ Object.defineProperty(desc, 'maximum', {get: function() {
+ if (a) {
+ a = false;
+ return 16;
+ }
+ else {
+ // Change the return value on the second call so it throws.
+ return -1;
+ }
+ }});
+ let memory = new WebAssembly.Memory(desc);
+ assertMemoryIsValid(memory);
+})();
+
+(function TestMaximumDoesHasProperty() {
+ var hasPropertyWasCalled = false;
+ var desc = {initial: 10};
+ var proxy = new Proxy({maximum: 16}, {
+ has: function(target, name) { hasPropertyWasCalled = true; }
+ });
+ Object.setPrototypeOf(desc, proxy);
+ let memory = new WebAssembly.Memory(desc);
+ assertMemoryIsValid(memory);
+ assertTrue(hasPropertyWasCalled);
+})();
+
+(function TestBuffer() {
+ let memory = new WebAssembly.Memory({initial: 1});
+ assertTrue(memory.buffer instanceof Object);
+ assertTrue(memory.buffer instanceof ArrayBuffer);
+ assertThrows(() => {'use strict'; memory.buffer = memory.buffer}, TypeError)
+ assertThrows(() => ({__proto__: memory}).buffer, TypeError)
+})();
diff --git a/deps/v8/test/mjsunit/wasm/module-memory.js b/deps/v8/test/mjsunit/wasm/module-memory.js
index a5e5f42488..6707f08164 100644
--- a/deps/v8/test/mjsunit/wasm/module-memory.js
+++ b/deps/v8/test/mjsunit/wasm/module-memory.js
@@ -15,33 +15,36 @@ function genModule(memory) {
builder.addMemory(1, 1, true);
builder.addFunction("main", kSig_i_i)
.addBody([
- // main body: while(i) { if(mem[i]) return -1; i -= 4; } return 0;
- kExprLoop,
- kExprGetLocal,0,
- kExprIf,
- kExprGetLocal,0,
- kExprI32LoadMem,0,0,
- kExprIf,
- kExprI8Const,255,
- kExprReturn, kArity1,
- kExprEnd,
- kExprGetLocal,0,
- kExprI8Const,4,
- kExprI32Sub,
- kExprSetLocal,0,
- kExprBr, kArity1, 1,
- kExprEnd,
- kExprEnd,
- kExprI8Const,0
+ // main body: while(i) { if(mem[i]) return -1; i -= 4; } return 0;
+ // TODO(titzer): this manual bytecode has a copy of test-run-wasm.cc
+ /**/ kExprLoop, kAstStmt, // --
+ /* */ kExprGetLocal, 0, // --
+ /* */ kExprIf, kAstStmt, // --
+ /* */ kExprGetLocal, 0, // --
+ /* */ kExprI32LoadMem, 0, 0, // --
+ /* */ kExprIf, kAstStmt, // --
+ /* */ kExprI8Const, 255, // --
+ /* */ kExprReturn, // --
+ /* */ kExprEnd, // --
+ /* */ kExprGetLocal, 0, // --
+ /* */ kExprI8Const, 4, // --
+ /* */ kExprI32Sub, // --
+ /* */ kExprSetLocal, 0, // --
+ /* */ kExprBr, 1, // --
+ /* */ kExprEnd, // --
+ /* */ kExprEnd, // --
+ /**/ kExprI8Const, 0 // --
])
.exportFunc();
-
- return builder.instantiate(null, memory);
+ var module = builder.instantiate(null, memory);
+ assertTrue(module.exports.memory instanceof WebAssembly.Memory);
+ if (memory != null) assertEquals(memory, module.exports.memory.buffer);
+ return module;
}
function testPokeMemory() {
var module = genModule(null);
- var buffer = module.exports.memory;
+ var buffer = module.exports.memory.buffer;
var main = module.exports.main;
assertEquals(kMemSize, buffer.byteLength);
@@ -66,9 +69,13 @@ function testPokeMemory() {
testPokeMemory();
+function genAndGetMain(buffer) {
+ return genModule(buffer).exports.main; // to prevent intermediates living
+}
+
function testSurvivalAcrossGc() {
- var checker = genModule(null).exports.main;
- for (var i = 0; i < 5; i++) {
+ var checker = genAndGetMain(null);
+ for (var i = 0; i < 3; i++) {
print("gc run ", i);
assertEquals(0, checker(kMemSize - 4));
gc();
@@ -110,8 +117,8 @@ testPokeOuterMemory();
function testOuterMemorySurvivalAcrossGc() {
var buffer = new ArrayBuffer(kMemSize);
- var checker = genModule(buffer).exports.main;
- for (var i = 0; i < 5; i++) {
+ var checker = genAndGetMain(buffer);
+ for (var i = 0; i < 3; i++) {
print("gc run ", i);
assertEquals(0, checker(kMemSize - 4));
gc();
@@ -133,7 +140,9 @@ function testOOBThrows() {
kExprGetLocal, 0,
kExprGetLocal, 1,
kExprI32LoadMem, 0, 0,
- kExprI32StoreMem, 0, 0
+ kExprI32StoreMem, 0, 0,
+ kExprGetLocal, 1,
+ kExprI32LoadMem, 0, 0,
])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/parallel_compilation.js b/deps/v8/test/mjsunit/wasm/parallel_compilation.js
index 23c5658dcd..208232cfd4 100644
--- a/deps/v8/test/mjsunit/wasm/parallel_compilation.js
+++ b/deps/v8/test/mjsunit/wasm/parallel_compilation.js
@@ -20,13 +20,16 @@ function assertModule(module, memsize) {
assertFalse(mem === null);
assertFalse(mem === 0);
assertEquals("object", typeof mem);
- assertTrue(mem instanceof ArrayBuffer);
+ assertTrue(mem instanceof WebAssembly.Memory);
+ var buf = mem.buffer;
+ assertTrue(buf instanceof ArrayBuffer);
+ assertEquals(memsize, buf.byteLength);
for (var i = 0; i < 4; i++) {
module.exports.memory = 0; // should be ignored
- assertEquals(mem, module.exports.memory);
+ mem.buffer = 0; // should be ignored
+ assertSame(mem, module.exports.memory);
+ assertSame(buf, mem.buffer);
}
-
- assertEquals(memsize, module.exports.memory.byteLength);
}
function assertFunction(module, func) {
@@ -84,7 +87,7 @@ function assertFunction(module, func) {
.addBody([ // --
kExprGetLocal, 0, // --
kExprGetLocal, 1, // --
- kExprCallFunction, kArity2, f[i >>> 1].index]) // --
+ kExprCallFunction, f[i >>> 1].index]) // --
.exportFunc()
}
var module = builder.instantiate();
diff --git a/deps/v8/test/mjsunit/wasm/receiver.js b/deps/v8/test/mjsunit/wasm/receiver.js
index c0070f8b91..97a6d94c9b 100644
--- a/deps/v8/test/mjsunit/wasm/receiver.js
+++ b/deps/v8/test/mjsunit/wasm/receiver.js
@@ -16,7 +16,7 @@ function testCallImport(func, expected, a, b) {
.addBody([
kExprGetLocal, 0, // --
kExprGetLocal, 1, // --
- kExprCallImport, 2, 0]) // --
+ kExprCallFunction, 0]) // --
.exportAs("main");
var main = builder.instantiate({func: func}).exports.main;
diff --git a/deps/v8/test/mjsunit/wasm/stack.js b/deps/v8/test/mjsunit/wasm/stack.js
index 0197b77caf..71038507db 100644
--- a/deps/v8/test/mjsunit/wasm/stack.js
+++ b/deps/v8/test/mjsunit/wasm/stack.js
@@ -45,7 +45,7 @@ var builder = new WasmModuleBuilder();
builder.addImport("func", kSig_v_v);
builder.addFunction("main", kSig_v_v)
- .addBody([kExprCallImport, kArity0, 0])
+ .addBody([kExprCallFunction, 0])
.exportAs("main");
builder.addFunction("exec_unreachable", kSig_v_v)
@@ -53,14 +53,14 @@ builder.addFunction("exec_unreachable", kSig_v_v)
.exportAs("exec_unreachable");
// Make this function unnamed, just to test also this case.
-var mem_oob_func = builder.addFunction(undefined, kSig_v_v)
+var mem_oob_func = builder.addFunction(undefined, kSig_i_v)
// Access the memory at offset -1, to provoke a trap.
.addBody([kExprI32Const, 0x7f, kExprI32LoadMem8S, 0, 0])
.exportAs("mem_out_of_bounds");
// Call the mem_out_of_bounds function, in order to have two WASM stack frames.
-builder.addFunction("call_mem_out_of_bounds", kSig_v_v)
- .addBody([kExprCallFunction, kArity0, mem_oob_func.index])
+builder.addFunction("call_mem_out_of_bounds", kSig_i_v)
+ .addBody([kExprCallFunction, mem_oob_func.index])
.exportAs("call_mem_out_of_bounds");
var module = builder.instantiate({func: STACK});
@@ -69,7 +69,7 @@ var module = builder.instantiate({func: STACK});
var expected_string = "Error\n" +
// The line numbers below will change as this test gains / loses lines..
" at STACK (stack.js:39:11)\n" + // --
- " at main (<WASM>[0]+1)\n" + // --
+ " at main (<WASM>[1]+1)\n" + // --
" at testSimpleStack (stack.js:76:18)\n" + // --
" at stack.js:78:3"; // --
@@ -89,7 +89,7 @@ Error.prepareStackTrace = function(error, frames) {
verifyStack(stack, [
// isWasm function line pos file
[ false, "STACK", 39, 0, "stack.js"],
- [ true, "main", 0, 1, null],
+ [ true, "main", 1, 1, null],
[ false, "testStackFrames", 87, 0, "stack.js"],
[ false, null, 96, 0, "stack.js"]
]);
@@ -103,7 +103,7 @@ Error.prepareStackTrace = function(error, frames) {
assertContains("unreachable", e.message);
verifyStack(e.stack, [
// isWasm function line pos file
- [ true, "exec_unreachable", 1, 1, null],
+ [ true, "exec_unreachable", 2, 1, null],
[ false, "testWasmUnreachable", 100, 0, "stack.js"],
[ false, null, 111, 0, "stack.js"]
]);
@@ -118,8 +118,8 @@ Error.prepareStackTrace = function(error, frames) {
assertContains("out of bounds", e.message);
verifyStack(e.stack, [
// isWasm function line pos file
- [ true, "", 2, 3, null],
- [ true, "call_mem_out_of_bounds", 3, 1, null],
+ [ true, "", 3, 3, null],
+ [ true, "call_mem_out_of_bounds", 4, 1, null],
[ false, "testWasmMemOutOfBounds", 115, 0, "stack.js"],
[ false, null, 127, 0, "stack.js"]
]);
@@ -135,7 +135,7 @@ Error.prepareStackTrace = function(error, frames) {
builder.addFunction("recursion", sig_index)
.addBody([
kExprI32Const, 0,
- kExprCallIndirect, kArity0, sig_index
+ kExprCallIndirect, sig_index
])
.exportFunc()
builder.appendToTable([0]);
diff --git a/deps/v8/test/mjsunit/wasm/stackwalk.js b/deps/v8/test/mjsunit/wasm/stackwalk.js
index 913269fdf4..cd560ec62b 100644
--- a/deps/v8/test/mjsunit/wasm/stackwalk.js
+++ b/deps/v8/test/mjsunit/wasm/stackwalk.js
@@ -16,7 +16,7 @@ function makeFFI(func) {
.addBody([
kExprGetLocal, 0, // --
kExprGetLocal, 1, // --
- kExprCallImport, kArity2, 0, // --
+ kExprCallFunction, 0, // --
])
.exportFunc()
diff --git a/deps/v8/test/mjsunit/wasm/start-function.js b/deps/v8/test/mjsunit/wasm/start-function.js
index c4d299e871..f0fbd081ac 100644
--- a/deps/v8/test/mjsunit/wasm/start-function.js
+++ b/deps/v8/test/mjsunit/wasm/start-function.js
@@ -65,8 +65,8 @@ assertFails(kSig_i_dd, [kExprGetLocal, 0]);
var func = builder.addFunction("", kSig_v_v)
.addBody([kExprNop]);
- builder.addExplicitSection([kDeclStart, 0]);
- builder.addExplicitSection([kDeclStart, 0]);
+ builder.addExplicitSection([kStartSectionCode, 0]);
+ builder.addExplicitSection([kStartSectionCode, 0]);
assertThrows(builder.instantiate);
})();
@@ -84,7 +84,7 @@ assertFails(kSig_i_dd, [kExprGetLocal, 0]);
builder.addStart(func.index);
var module = builder.instantiate();
- var memory = module.exports.memory;
+ var memory = module.exports.memory.buffer;
var view = new Int8Array(memory);
assertEquals(77, view[0]);
})();
@@ -102,7 +102,7 @@ assertFails(kSig_i_dd, [kExprGetLocal, 0]);
builder.addImport("foo", sig_index);
var func = builder.addFunction("", sig_index)
- .addBody([kExprCallImport, kArity0, 0]);
+ .addBody([kExprCallFunction, 0]);
builder.addStart(func.index);
diff --git a/deps/v8/test/mjsunit/wasm/table.js b/deps/v8/test/mjsunit/wasm/table.js
new file mode 100644
index 0000000000..0275bc0522
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/table.js
@@ -0,0 +1,95 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+// Basic tests.
+
+var outOfUint32RangeValue = 1e12;
+var int32ButOob = 1073741824;
+
+function assertTableIsValid(table) {
+ assertSame(WebAssembly.Table.prototype, table.__proto__);
+ assertSame(WebAssembly.Table, table.constructor);
+ assertTrue(table instanceof Object);
+ assertTrue(table instanceof WebAssembly.Table);
+}
+
+(function TestConstructor() {
+ assertTrue(WebAssembly.Table instanceof Function);
+ assertSame(WebAssembly.Table, WebAssembly.Table.prototype.constructor);
+ assertTrue(WebAssembly.Table.prototype.grow instanceof Function);
+ assertTrue(WebAssembly.Table.prototype.get instanceof Function);
+ assertTrue(WebAssembly.Table.prototype.set instanceof Function);
+ let desc = Object.getOwnPropertyDescriptor(WebAssembly.Table.prototype, 'length');
+ assertTrue(desc.get instanceof Function);
+ assertSame(undefined, desc.set);
+
+ assertThrows(() => new WebAssembly.Table(), TypeError);
+ assertThrows(() => new WebAssembly.Table(1), TypeError);
+ assertThrows(() => new WebAssembly.Table(""), TypeError);
+
+ assertThrows(() => new WebAssembly.Table({}), TypeError);
+ assertThrows(() => new WebAssembly.Table({initial: 10}), TypeError);
+
+ assertThrows(() => new WebAssembly.Table({element: 0, initial: 10}), TypeError);
+ assertThrows(() => new WebAssembly.Table({element: "any", initial: 10}), TypeError);
+
+ assertThrows(() => new WebAssembly.Table({element: "anyfunc", initial: -1}), RangeError);
+ assertThrows(() => new WebAssembly.Table({element: "anyfunc", initial: outOfUint32RangeValue}), RangeError);
+
+ assertThrows(() => new WebAssembly.Table({element: "anyfunc", initial: 10, maximum: -1}), RangeError);
+ assertThrows(() => new WebAssembly.Table({element: "anyfunc", initial: 10, maximum: outOfUint32RangeValue}), RangeError);
+ assertThrows(() => new WebAssembly.Table({element: "anyfunc", initial: 10, maximum: 9}), RangeError);
+
+ assertThrows(() => new WebAssembly.Table({element: "anyfunc", initial: 0, maximum: int32ButOob}));
+
+ let table = new WebAssembly.Table({element: "anyfunc", initial: 1});
+ assertTableIsValid(table);
+})();
+
+(function TestConstructorWithMaximum() {
+ let table = new WebAssembly.Table({element: "anyfunc", maximum: 10});
+ assertTableIsValid(table);
+})();
+
+(function TestInitialIsUndefined() {
+ // New memory with initial = undefined, which means initial = 0.
+ let table = new WebAssembly.Table({element: "anyfunc", initial: undefined});
+ assertTableIsValid(table);
+})();
+
+(function TestMaximumIsUndefined() {
+ // New memory with maximum = undefined, which means maximum = 0.
+ let table = new WebAssembly.Table({element: "anyfunc", initial: 0, maximum: undefined});
+ assertTableIsValid(table);
+})();
+
+(function TestMaximumIsReadOnce() {
+ var a = true;
+ var desc = {element: "anyfunc", initial: 10};
+ Object.defineProperty(desc, 'maximum', {get: function() {
+ if (a) {
+ a = false;
+ return 16;
+ }
+ else {
+ // Change the return value on the second call so it throws.
+ return -1;
+ }
+ }});
+ let table = new WebAssembly.Table(desc);
+ assertTableIsValid(table);
+})();
+
+(function TestMaximumDoesHasProperty() {
+ var hasPropertyWasCalled = false;
+ var desc = {element: "anyfunc", initial: 10};
+ var proxy = new Proxy({maximum: 16}, {
+ has: function(target, name) { hasPropertyWasCalled = true; }
+ });
+ Object.setPrototypeOf(desc, proxy);
+ let table = new WebAssembly.Table(desc);
+ assertTableIsValid(table);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/test-import-export-wrapper.js b/deps/v8/test/mjsunit/wasm/test-import-export-wrapper.js
index e180611818..df03aec9f5 100644
--- a/deps/v8/test/mjsunit/wasm/test-import-export-wrapper.js
+++ b/deps/v8/test/mjsunit/wasm/test-import-export-wrapper.js
@@ -26,8 +26,8 @@ var expect_no_elison = 1;
.addFunction("second_export", sig_index)
.addBody([
kExprGetLocal, 0,
- kExprCallImport, kArity1, 0,
- kExprReturn, kArity1
+ kExprCallFunction, 0,
+ kExprReturn
])
.exportFunc();
@@ -39,8 +39,8 @@ var expect_no_elison = 1;
.addFunction("first_export", sig_index)
.addBody([
kExprGetLocal, 0,
- kExprCallFunction, kArity1, 1,
- kExprReturn, kArity1
+ kExprCallFunction, 2,
+ kExprReturn
])
.exportFunc();
first_module
@@ -49,8 +49,8 @@ var expect_no_elison = 1;
kExprI32Const, 1,
kExprGetLocal, 0,
kExprI32Add,
- kExprCallImport, kArity1, 0,
- kExprReturn, kArity1
+ kExprCallFunction, 0,
+ kExprReturn
]);
var f = second_module
@@ -83,8 +83,8 @@ var expect_no_elison = 1;
.addFunction("second_export", sig_index_1)
.addBody([
kExprGetLocal, 0,
- kExprCallImport, kArity1, 0,
- kExprReturn, kArity1
+ kExprCallFunction, 0,
+ kExprReturn
])
.exportFunc();
@@ -97,8 +97,8 @@ var expect_no_elison = 1;
.addBody([
kExprGetLocal, 0,
kExprGetLocal, 1,
- kExprCallFunction, kArity2, 1,
- kExprReturn, kArity1
+ kExprCallFunction, 2,
+ kExprReturn
])
.exportFunc();
first_module
@@ -106,8 +106,8 @@ var expect_no_elison = 1;
.addBody([
kExprGetLocal, 0,
kExprGetLocal, 1,
- kExprCallImport, kArity2, 0,
- kExprReturn, kArity1
+ kExprCallFunction, 0,
+ kExprReturn
]);
var f = second_module
@@ -142,8 +142,8 @@ var expect_no_elison = 1;
kExprGetLocal, 0,
kExprGetLocal, 1,
kExprGetLocal, 2,
- kExprCallImport, kArity3, 0,
- kExprReturn, kArity1
+ kExprCallFunction, 0,
+ kExprReturn
])
.exportFunc();
@@ -156,8 +156,8 @@ var expect_no_elison = 1;
.addBody([
kExprGetLocal, 0,
kExprGetLocal, 1,
- kExprCallFunction, kArity2, 1,
- kExprReturn, kArity1
+ kExprCallFunction, 2,
+ kExprReturn
])
.exportFunc();
first_module
@@ -165,8 +165,8 @@ var expect_no_elison = 1;
.addBody([
kExprGetLocal, 0,
kExprGetLocal, 1,
- kExprCallImport, kArity2, 0,
- kExprReturn, kArity1
+ kExprCallFunction, 0,
+ kExprReturn
]);
var f = second_module
@@ -200,8 +200,8 @@ var expect_no_elison = 1;
.addBody([
kExprGetLocal, 0,
kExprGetLocal, 1,
- kExprCallImport, kArity2, 0,
- kExprReturn, kArity1
+ kExprCallFunction, 0,
+ kExprReturn
])
.exportFunc();
@@ -214,8 +214,8 @@ var expect_no_elison = 1;
.addBody([
kExprGetLocal, 0,
kExprGetLocal, 1,
- kExprCallFunction, kArity2, 1,
- kExprReturn, kArity1
+ kExprCallFunction, 2,
+ kExprReturn
])
.exportFunc();
first_module
@@ -223,8 +223,8 @@ var expect_no_elison = 1;
.addBody([
kExprGetLocal, 0,
kExprGetLocal, 1,
- kExprCallImport, kArity2, 0,
- kExprReturn, kArity1
+ kExprCallFunction, 0,
+ kExprReturn
]);
var f = second_module
diff --git a/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
index 72d5a7aaa4..b1a2309770 100644
--- a/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
@@ -7,7 +7,7 @@
load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
-var debug = false;
+var debug = true;
(function BasicTest() {
var module = new WasmModuleBuilder();
@@ -25,7 +25,7 @@ var debug = false;
var module = new WasmModuleBuilder();
var index = module.addImport("print", makeSig_v_x(kAstI32));
module.addFunction("foo", kSig_v_v)
- .addBody([kExprI8Const, 13, kExprCallImport, kArity1, index])
+ .addBody([kExprI8Const, 13, kExprCallFunction, index])
.exportAs("main");
var buffer = module.toBuffer(debug);
@@ -38,7 +38,7 @@ var debug = false;
var module = new WasmModuleBuilder();
module.addFunction(undefined, kSig_i_i)
.addLocals({i32_count: 1})
- .addBody([kExprGetLocal, 0, kExprSetLocal, 1])
+ .addBody([kExprGetLocal, 0, kExprSetLocal, 1, kExprGetLocal, 1])
.exportAs("main");
var buffer = module.toBuffer(debug);
@@ -60,7 +60,7 @@ var debug = false;
var module = new WasmModuleBuilder();
module.addFunction(undefined, makeSig_r_x(p.type, p.type))
.addLocals(p.locals)
- .addBody([kExprGetLocal, 0, kExprSetLocal, 1])
+ .addBody([kExprGetLocal, 0, kExprSetLocal, 1, kExprGetLocal, 1])
.exportAs("main");
var buffer = module.toBuffer(debug);
@@ -75,7 +75,7 @@ var debug = false;
module.addFunction("add", kSig_i_ii)
.addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add]);
module.addFunction("main", kSig_i_ii)
- .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprCallFunction, kArity2, 0])
+ .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprCallFunction, 0])
.exportAs("main");
var instance = module.instantiate();
@@ -89,7 +89,7 @@ var debug = false;
.addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add]);
module.addFunction("main", kSig_i_iii)
.addBody([kExprGetLocal,
- 0, kExprGetLocal, 1, kExprGetLocal, 2, kExprCallIndirect, kArity2, 0])
+ 1, kExprGetLocal, 2, kExprGetLocal, 0, kExprCallIndirect, 0])
.exportAs("main");
module.appendToTable([0]);
@@ -143,7 +143,7 @@ var debug = false;
var module = new WasmModuleBuilder();
var index = module.addImportWithModule("mod", "print", makeSig_v_x(kAstI32));
module.addFunction("foo", kSig_v_v)
- .addBody([kExprI8Const, 19, kExprCallImport, kArity1, index])
+ .addBody([kExprI8Const, 19, kExprCallFunction, index])
.exportAs("main");
var buffer = module.toBuffer(debug);
diff --git a/deps/v8/test/mjsunit/wasm/trap-location.js b/deps/v8/test/mjsunit/wasm/trap-location.js
index 0440af9ccc..bc8214f9b2 100644
--- a/deps/v8/test/mjsunit/wasm/trap-location.js
+++ b/deps/v8/test/mjsunit/wasm/trap-location.js
@@ -30,29 +30,29 @@ var sig_index = builder.addType(kSig_i_v)
builder.addFunction("main", kSig_i_i)
.addBody([
// offset 1
- kExprBlock,
+ kExprBlock, kAstI32,
kExprGetLocal, 0,
kExprI32Const, 2,
kExprI32LtU,
- kExprIf,
- // offset 8
+ kExprIf, kAstStmt,
+ // offset 9
kExprI32Const, 0x7e /* -2 */,
kExprGetLocal, 0,
kExprI32DivU,
- // offset 13
+ // offset 15
kExprI32LoadMem, 0, 0,
- kExprBr, 1, 1,
+ kExprBr, 1,
kExprEnd,
- // offset 20
+ // offset 21
kExprGetLocal, 0,
kExprI32Const, 2,
kExprI32Eq,
- kExprIf,
+ kExprIf, kAstStmt,
kExprUnreachable,
kExprEnd,
- // offset 28
- kExprGetLocal, 0,
- kExprCallIndirect, kArity0, sig_index,
+ // offset 30
+ kExprGetLocal, 0,
+ kExprCallIndirect, sig_index,
kExprEnd,
])
.exportAs("main");
@@ -72,7 +72,7 @@ function testWasmTrap(value, reason, position) {
}
// The actual tests:
-testWasmTrap(0, kTrapDivByZero, 12);
-testWasmTrap(1, kTrapMemOutOfBounds, 13);
-testWasmTrap(2, kTrapUnreachable, 26);
-testWasmTrap(3, kTrapFuncInvalid, 30);
+testWasmTrap(0, kTrapDivByZero, 14);
+testWasmTrap(1, kTrapMemOutOfBounds, 15);
+testWasmTrap(2, kTrapUnreachable, 28);
+testWasmTrap(3, kTrapFuncInvalid, 32);
diff --git a/deps/v8/test/mjsunit/wasm/unicode-validation.js b/deps/v8/test/mjsunit/wasm/unicode-validation.js
index b2e4603087..29d1f73d94 100644
--- a/deps/v8/test/mjsunit/wasm/unicode-validation.js
+++ b/deps/v8/test/mjsunit/wasm/unicode-validation.js
@@ -49,7 +49,7 @@ function checkImportsAndExports(imported_module_name, imported_function_name,
kSig_v_v);
builder.addFunction(internal_function_name, kSig_v_v)
- .addBody([kExprCallImport, kArity0, 0])
+ .addBody([kExprCallFunction, 0])
.exportAs(exported_function_name);
// sanity check: does javascript agree with out shouldThrow annotation?
@@ -79,6 +79,7 @@ function checkImportsAndExports(imported_module_name, imported_function_name,
} catch (err) {
if (!shouldThrow) print(err);
assertTrue(shouldThrow, "Should not throw error on valid names");
+ assertTrue(err instanceof Error, "exception should be an Error");
assertContains("UTF-8", err.toString());
}
assertEquals(shouldThrow, hasThrown,
diff --git a/deps/v8/test/mjsunit/wasm/verify-function-simple.js b/deps/v8/test/mjsunit/wasm/verify-function-simple.js
index 31c23a6b69..1ac25143d7 100644
--- a/deps/v8/test/mjsunit/wasm/verify-function-simple.js
+++ b/deps/v8/test/mjsunit/wasm/verify-function-simple.js
@@ -25,7 +25,7 @@ try {
var data = bytes(
kWasmFunctionTypeForm, 0, 1, kAstI32, // signature
kDeclNoLocals, // --
- kExprBlock, kExprNop, kExprNop, kExprEnd // body
+ kExprBlock, kAstStmt, kExprNop, kExprNop, kExprEnd // body
);
Wasm.verifyFunction(data);
diff --git a/deps/v8/test/mjsunit/wasm/wasm-constants.js b/deps/v8/test/mjsunit/wasm/wasm-constants.js
index 04ac0c9592..388e5f5015 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-constants.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-constants.js
@@ -21,7 +21,7 @@ var kWasmH1 = 0x61;
var kWasmH2 = 0x73;
var kWasmH3 = 0x6d;
-var kWasmV0 = 11;
+var kWasmV0 = 0xC;
var kWasmV1 = 0;
var kWasmV2 = 0;
var kWasmV3 = 0;
@@ -51,30 +51,24 @@ function bytesWithHeader() {
var kDeclNoLocals = 0;
// Section declaration constants
-var kDeclMemory = 0x00;
-var kDeclTypes = 0x01;
-var kDeclFunctions = 0x02;
-var kDeclGlobals = 0x03;
-var kDeclData = 0x04;
-var kDeclTable = 0x05;
-var kDeclEnd = 0x06;
-var kDeclStart = 0x07;
-var kDeclImports = 0x08;
-var kDeclExports = 0x09;
-var kDeclFunctions = 0x0a;
-var kDeclCode = 0x0b;
-var kDeclNames = 0x0c;
+var kUnknownSectionCode = 0;
+var kTypeSectionCode = 1; // Function signature declarations
+var kImportSectionCode = 2; // Import declarations
+var kFunctionSectionCode = 3; // Function declarations
+var kTableSectionCode = 4; // Indirect function table and other tables
+var kMemorySectionCode = 5; // Memory attributes
+var kGlobalSectionCode = 6; // Global declarations
+var kExportSectionCode = 7; // Exports
+var kStartSectionCode = 8; // Start function declaration
+var kElementSectionCode = 9; // Elements section
+var kCodeSectionCode = 10; // Function code
+var kDataSectionCode = 11; // Data segments
+var kNameSectionCode = 12; // Name section (encoded as string)
-var kArity0 = 0;
-var kArity1 = 1;
-var kArity2 = 2;
-var kArity3 = 3;
var kWasmFunctionTypeForm = 0x40;
+var kWasmAnyFunctionTypeForm = 0x20;
-var section_names = [
- "memory", "type", "old_function", "global", "data",
- "table", "end", "start", "import", "export",
- "function", "code", "name"];
+var kResizableMaximumFlag = 1;
// Function declaration flags
var kDeclFunctionName = 0x01;
@@ -89,10 +83,16 @@ var kAstI64 = 2;
var kAstF32 = 3;
var kAstF64 = 4;
+var kExternalFunction = 0;
+var kExternalTable = 1;
+var kExternalMemory = 2;
+var kExternalGlobal = 3;
+
// Useful signatures
var kSig_i = makeSig([], [kAstI32]);
var kSig_d = makeSig([], [kAstF64]);
var kSig_i_i = makeSig([kAstI32], [kAstI32]);
+var kSig_i_l = makeSig([kAstI64], [kAstI32]);
var kSig_i_ii = makeSig([kAstI32, kAstI32], [kAstI32]);
var kSig_i_iii = makeSig([kAstI32, kAstI32, kAstI32], [kAstI32]);
var kSig_d_dd = makeSig([kAstF64, kAstF64], [kAstF64]);
@@ -105,6 +105,7 @@ var kSig_v_ii = makeSig([kAstI32, kAstI32], []);
var kSig_v_iii = makeSig([kAstI32, kAstI32, kAstI32], []);
var kSig_v_d = makeSig([kAstF64], []);
var kSig_v_dd = makeSig([kAstF64, kAstF64], []);
+var kSig_v_ddi = makeSig([kAstF64, kAstF64, kAstI32], []);
function makeSig(params, results) {
return {params: params, results: results};
@@ -131,7 +132,8 @@ function makeSig_r_xx(r, x) {
}
// Opcodes
-var kExprNop = 0x00;
+var kExprUnreachable = 0x00;
+var kExprNop = 0x0a;
var kExprBlock = 0x01;
var kExprLoop = 0x02;
var kExprIf = 0x03;
@@ -141,8 +143,12 @@ var kExprBr = 0x06;
var kExprBrIf = 0x07;
var kExprBrTable = 0x08;
var kExprReturn = 0x09;
-var kExprUnreachable = 0x0a;
+var kExprThrow = 0xfa;
+var kExprTry = 0xfb;
+var kExprCatch = 0xfe;
var kExprEnd = 0x0f;
+var kExprTeeLocal = 0x19;
+var kExprDrop = 0x0b;
var kExprI32Const = 0x10;
var kExprI64Const = 0x11;
@@ -152,7 +158,6 @@ var kExprGetLocal = 0x14;
var kExprSetLocal = 0x15;
var kExprCallFunction = 0x16;
var kExprCallIndirect = 0x17;
-var kExprCallImport = 0x18;
var kExprI8Const = 0xcb;
var kExprGetGlobal = 0xbb;
var kExprSetGlobal = 0xbc;
@@ -347,3 +352,20 @@ function assertTraps(trap, code) {
}
throw new MjsUnitAssertionError("Did not trap, expected: " + kTrapMsgs[trap]);
}
+
+function assertWasmThrows(value, code) {
+ assertEquals("number", typeof(value));
+ try {
+ if (typeof code === 'function') {
+ code();
+ } else {
+ eval(code);
+ }
+ } catch (e) {
+ assertEquals("number", typeof e);
+ assertEquals(value, e);
+ // Success.
+ return;
+ }
+ throw new MjsUnitAssertionError("Did not throw at all, expected: " + value);
+}
diff --git a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
index fecd164b56..7b77a8c9b1 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
@@ -61,7 +61,7 @@ class Binary extends Array {
emit_section(section_code, content_generator) {
// Emit section name.
- this.emit_string(section_names[section_code]);
+ this.emit_u8(section_code);
// Emit the section to a temporary buffer: its full length isn't know yet.
let section = new Binary;
content_generator(section);
@@ -104,6 +104,7 @@ class WasmModuleBuilder {
constructor() {
this.types = [];
this.imports = [];
+ this.globals = [];
this.functions = [];
this.exports = [];
this.table = [];
@@ -138,10 +139,15 @@ class WasmModuleBuilder {
return this.types.length - 1;
}
+ addGlobal(local_type) {
+ this.globals.push(local_type);
+ return this.globals.length - 1;
+ }
+
addFunction(name, type) {
let type_index = (typeof type) == "number" ? type : this.addType(type);
let func = new WasmFunctionBuilder(name, type_index);
- func.index = this.functions.length;
+ func.index = this.functions.length + this.imports.length;
this.functions.push(func);
return func;
}
@@ -176,7 +182,7 @@ class WasmModuleBuilder {
// Add type section
if (wasm.types.length > 0) {
if (debug) print("emitting types @ " + binary.length);
- binary.emit_section(kDeclTypes, section => {
+ binary.emit_section(kTypeSectionCode, section => {
section.emit_varint(wasm.types.length);
for (let type of wasm.types) {
section.emit_u8(kWasmFunctionTypeForm);
@@ -195,12 +201,13 @@ class WasmModuleBuilder {
// Add imports section
if (wasm.imports.length > 0) {
if (debug) print("emitting imports @ " + binary.length);
- binary.emit_section(kDeclImports, section => {
+ binary.emit_section(kImportSectionCode, section => {
section.emit_varint(wasm.imports.length);
for (let imp of wasm.imports) {
- section.emit_varint(imp.type);
section.emit_string(imp.module);
section.emit_string(imp.name || '');
+ section.emit_u8(kExternalFunction);
+ section.emit_varint(imp.type);
}
});
}
@@ -211,7 +218,7 @@ class WasmModuleBuilder {
let exports = 0;
if (wasm.functions.length > 0) {
if (debug) print("emitting function decls @ " + binary.length);
- binary.emit_section(kDeclFunctions, section => {
+ binary.emit_section(kFunctionSectionCode, section => {
section.emit_varint(wasm.functions.length);
for (let func of wasm.functions) {
has_names = has_names || (func.name != undefined &&
@@ -225,56 +232,108 @@ class WasmModuleBuilder {
// Add table.
if (wasm.table.length > 0) {
if (debug) print("emitting table @ " + binary.length);
- binary.emit_section(kDeclTable, section => {
+ binary.emit_section(kTableSectionCode, section => {
+ section.emit_u8(1); // one table entry
+ section.emit_u8(kWasmAnyFunctionTypeForm);
+ section.emit_u8(1);
+ section.emit_varint(wasm.table.length);
section.emit_varint(wasm.table.length);
- if (wasm.pad !== null) {
- if (debug) print("emitting table padding @ " + binary.length);
- section.emit_varint(wasm.pad);
- }
- for (let index of wasm.table) {
- section.emit_varint(index);
- }
});
}
// Add memory section
if (wasm.memory != undefined) {
if (debug) print("emitting memory @ " + binary.length);
- binary.emit_section(kDeclMemory, section => {
+ binary.emit_section(kMemorySectionCode, section => {
+ section.emit_u8(1); // one memory entry
+ section.emit_varint(kResizableMaximumFlag);
section.emit_varint(wasm.memory.min);
section.emit_varint(wasm.memory.max);
- section.emit_u8(wasm.memory.exp ? 1 : 0);
});
}
+ // Add global section.
+ if (wasm.globals.length > 0) {
+ if (debug) print ("emitting globals @ " + binary.length);
+ binary.emit_section(kGlobalSectionCode, section => {
+ section.emit_varint(wasm.globals.length);
+ for (let global_type of wasm.globals) {
+ section.emit_u8(global_type);
+ section.emit_u8(true); // mutable
+ switch (global_type) {
+ case kAstI32:
+ section.emit_u8(kExprI32Const);
+ section.emit_u8(0);
+ break;
+ case kAstI64:
+ section.emit_u8(kExprI64Const);
+ section.emit_u8(0);
+ break;
+ case kAstF32:
+ section.emit_u8(kExprF32Const);
+ section.emit_u32(0);
+ break;
+ case kAstF64:
+ section.emit_u8(kExprI32Const);
+ section.emit_u32(0);
+ section.emit_u32(0);
+ break;
+ }
+ section.emit_u8(kExprEnd); // end of init expression
+ }
+ });
+ }
// Add export table.
- if (exports > 0) {
+ var mem_export = (wasm.memory != undefined && wasm.memory.exp);
+ if (exports > 0 || mem_export) {
if (debug) print("emitting exports @ " + binary.length);
- binary.emit_section(kDeclExports, section => {
- section.emit_varint(exports);
+ binary.emit_section(kExportSectionCode, section => {
+ section.emit_varint(exports + (mem_export ? 1 : 0));
for (let func of wasm.functions) {
for (let exp of func.exports) {
- section.emit_varint(func.index);
section.emit_string(exp);
+ section.emit_u8(kExternalFunction);
+ section.emit_varint(func.index);
}
}
+ if (mem_export) {
+ section.emit_string("memory");
+ section.emit_u8(kExternalMemory);
+ section.emit_u8(0);
+ }
});
}
// Add start function section.
if (wasm.start_index != undefined) {
if (debug) print("emitting start function @ " + binary.length);
- binary.emit_section(kDeclStart, section => {
+ binary.emit_section(kStartSectionCode, section => {
section.emit_varint(wasm.start_index);
});
}
+ // Add table elements.
+ if (wasm.table.length > 0) {
+ if (debug) print("emitting table @ " + binary.length);
+ binary.emit_section(kElementSectionCode, section => {
+ section.emit_u8(1);
+ section.emit_u8(0); // table index
+ section.emit_u8(kExprI32Const);
+ section.emit_u8(0);
+ section.emit_u8(kExprEnd);
+ section.emit_varint(wasm.table.length);
+ for (let index of wasm.table) {
+ section.emit_varint(index);
+ }
+ });
+ }
+
// Add function bodies.
if (wasm.functions.length > 0) {
// emit function bodies
if (debug) print("emitting code @ " + binary.length);
- binary.emit_section(kDeclCode, section => {
+ binary.emit_section(kCodeSectionCode, section => {
section.emit_varint(wasm.functions.length);
for (let func of wasm.functions) {
// Function body length will be patched later.
@@ -313,10 +372,13 @@ class WasmModuleBuilder {
// Add data segments.
if (wasm.segments.length > 0) {
if (debug) print("emitting data segments @ " + binary.length);
- binary.emit_section(kDeclData, section => {
+ binary.emit_section(kDataSectionCode, section => {
section.emit_varint(wasm.segments.length);
for (let seg of wasm.segments) {
+ section.emit_u8(0); // linear memory index 0
+ section.emit_u8(kExprI32Const);
section.emit_varint(seg.addr);
+ section.emit_u8(kExprEnd);
section.emit_varint(seg.data.length);
section.emit_bytes(seg.data);
}
@@ -332,7 +394,8 @@ class WasmModuleBuilder {
// Add function names.
if (has_names) {
if (debug) print("emitting names @ " + binary.length);
- binary.emit_section(kDeclNames, section => {
+ binary.emit_section(kUnknownSectionCode, section => {
+ section.emit_string("name");
section.emit_varint(wasm.functions.length);
for (let func of wasm.functions) {
var name = func.name == undefined ? "" : func.name;
diff --git a/deps/v8/test/mjsunit/wasm/wasm-object-api.js b/deps/v8/test/mjsunit/wasm/wasm-object-api.js
index 4e1df8cf14..b8663b3b29 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-object-api.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-object-api.js
@@ -15,3 +15,4 @@ assertEquals('object', typeof WebAssembly);
assertEquals('function', typeof WebAssembly.Module);
assertEquals('function', typeof WebAssembly.Instance);
assertEquals('function', typeof WebAssembly.compile);
+assertEquals('function', typeof WebAssembly.validate);
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index d4eebeec54..26503bfa6e 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -191,6 +191,9 @@
'js1_5/GC/regress-278725': [PASS, ['mode == debug', FAIL]],
# http://b/issue?id=1206983
'js1_5/Regress/regress-367561-03': [PASS, ['mode == debug', FAIL], NO_VARIANTS],
+ 'ecma/FunctionObjects/15.3.1.1-3': [PASS, FAIL, ['mode == debug', TIMEOUT, NO_VARIANTS]],
+ 'ecma/FunctionObjects/15.3.2.1-3': [PASS, FAIL, ['mode == debug', TIMEOUT, NO_VARIANTS]],
+ 'ecma/FunctionObjects/15.3.5-1': [PASS, FAIL, ['mode == debug', TIMEOUT, NO_VARIANTS]],
'ecma/Date/15.9.5.10-2': [PASS, FAIL, ['mode == debug', TIMEOUT, NO_VARIANTS]],
# These tests create two Date objects just after each other and
@@ -979,4 +982,9 @@
#BUG(3152): Avoid C stack overflow.
'js1_5/extensions/regress-355497': [FAIL_OK, 'Flags: --sim-stack-size=512'],
}], # 'arch == arm64 and simulator_run == True'
+
+['variant == asm_wasm', {
+ '*': [SKIP],
+}], # variant == asm_wasm
+
]
diff --git a/deps/v8/test/test262/detachArrayBuffer.js b/deps/v8/test/test262/detachArrayBuffer.js
index adfece7edb..c34aa67a47 100644
--- a/deps/v8/test/test262/detachArrayBuffer.js
+++ b/deps/v8/test/test262/detachArrayBuffer.js
@@ -5,3 +5,5 @@
function $DETACHBUFFER(buffer) {
%ArrayBufferNeuter(buffer);
}
+
+$.detachArrayBuffer = $DETACHBUFFER;
diff --git a/deps/v8/test/test262/harness-adapt.js b/deps/v8/test/test262/harness-adapt.js
index 60c0858f02..d93d7e1610 100644
--- a/deps/v8/test/test262/harness-adapt.js
+++ b/deps/v8/test/test262/harness-adapt.js
@@ -89,3 +89,7 @@ function $DONE(arg){
quit(0);
};
+
+var $ = {
+ evalScript(script) { return Realm.eval(Realm.current(), script); }
+};
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index 80bb9d109e..479e2cb198 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -26,6 +26,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
[
+
[ALWAYS, {
###################### NEEDS INVESTIGATION #######################
@@ -34,6 +35,11 @@
# https://code.google.com/p/v8/issues/detail?id=705
'language/statements/for-in/12.6.4-2': [PASS, FAIL_OK],
+ # Date tests that fail in CE(S)T timezone.
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5449
+ 'built-ins/Date/prototype/setFullYear/new-value-time-clip': [PASS, FAIL],
+ 'built-ins/Date/prototype/setMonth/new-value-time-clip': [PASS, FAIL],
+
###################### MISSING ES6 FEATURES #######################
# The order of adding the name property is wrong
@@ -94,37 +100,19 @@
'language/expressions/postfix-decrement/S11.3.2_A5_*': [FAIL],
'language/expressions/prefix-decrement/S11.4.5_A5_*': [FAIL],
'language/expressions/prefix-increment/S11.4.4_A5_*': [FAIL],
+ 'language/statements/variable/binding-resolution': [FAIL],
# https://code.google.com/p/v8/issues/detail?id=4253
'language/asi/S7.9_A5.7_T1': [PASS, FAIL_OK],
###### BEGIN REGEXP SUBCLASSING SECTION ######
- # Times out
- 'built-ins/RegExp/prototype/Symbol.match/coerce-global': [SKIP],
-
- # Sticky support busted
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5361
'built-ins/RegExp/prototype/Symbol.replace/y-init-lastindex': [FAIL],
'built-ins/RegExp/prototype/Symbol.replace/y-set-lastindex': [FAIL],
- # https://code.google.com/p/v8/issues/detail?id=4504
- # https://bugs.chromium.org/p/chromium/issues/detail?id=624318
- 'built-ins/RegExp/prototype/Symbol.match/builtin-failure-set-lastindex-err': [PASS, FAIL],
- 'built-ins/RegExp/prototype/Symbol.match/builtin-failure-y-set-lastindex-err': [PASS, FAIL],
- 'built-ins/RegExp/prototype/Symbol.match/builtin-success-g-set-lastindex-err': [SKIP],
- 'built-ins/RegExp/prototype/Symbol.match/builtin-success-y-set-lastindex-err': [PASS, FAIL],
- 'built-ins/RegExp/prototype/Symbol.match/g-init-lastindex-err': [PASS, FAIL],
- 'built-ins/RegExp/prototype/Symbol.match/g-match-empty-set-lastindex-err': [PASS, FAIL],
- 'built-ins/RegExp/prototype/Symbol.match/y-fail-lastindex-no-write': [PASS, FAIL],
- 'built-ins/RegExp/prototype/Symbol.replace/g-init-lastindex-err': [PASS, FAIL],
- 'built-ins/RegExp/prototype/Symbol.replace/y-fail-lastindex-no-write': [PASS, FAIL],
- 'built-ins/RegExp/prototype/Symbol.search/set-lastindex-init-err': [PASS, FAIL],
- 'built-ins/RegExp/prototype/Symbol.search/set-lastindex-restore-err': [PASS, FAIL],
- 'built-ins/RegExp/prototype/exec/y-fail-lastindex-no-write': [PASS, FAIL],
- 'built-ins/RegExp/prototype/test/y-fail-lastindex-no-write': [PASS, FAIL],
-
- # SKIP rather than FAIL, as the test checks for an exception which
- # happens to be thrown for some other reason.
- 'built-ins/RegExp/prototype/Symbol.split/str-result-get-length-err': [SKIP],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5360
+ 'built-ins/RegExp/prototype/Symbol.match/builtin-failure-set-lastindex-err': [FAIL],
+ 'built-ins/RegExp/prototype/Symbol.search/set-lastindex-restore-err': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=5123
'built-ins/RegExp/prototype/Symbol.replace/coerce-global': [FAIL],
@@ -132,26 +120,16 @@
###### END REGEXP SUBCLASSING SECTION ######
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5360
+ 'built-ins/RegExp/prototype/Symbol.match/builtin-coerce-lastindex-err': [FAIL],
+ 'built-ins/RegExp/prototype/Symbol.match/builtin-failure-set-lastindex': [FAIL],
+ 'built-ins/RegExp/prototype/Symbol.search/set-lastindex-restore': [FAIL],
+
# https://code.google.com/p/v8/issues/detail?id=4360
'intl402/Collator/10.1.1_1': [FAIL],
'intl402/DateTimeFormat/12.1.1_1': [FAIL],
'intl402/NumberFormat/11.1.1_1': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=4784
- 'built-ins/TypedArray/prototype/set/typedarray-arg-negative-integer-offset-throws': [FAIL],
- 'built-ins/TypedArray/prototype/set/array-arg-negative-integer-offset-throws': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=4901
- 'built-ins/TypedArrays/internals/DefineOwnProperty/key-is-greater-than-last-index': [FAIL],
- 'built-ins/TypedArrays/internals/DefineOwnProperty/key-is-lower-than-zero': [FAIL],
- 'built-ins/TypedArrays/internals/DefineOwnProperty/key-is-minus-zero': [FAIL],
- 'built-ins/TypedArrays/internals/DefineOwnProperty/key-is-not-integer': [FAIL],
- 'built-ins/TypedArrays/internals/DefineOwnProperty/key-is-numericindex-desc-not-writable': [FAIL],
- 'built-ins/TypedArrays/internals/Set/key-is-minus-zero': [FAIL],
- 'built-ins/TypedArrays/internals/Set/key-is-not-integer': [FAIL],
- 'built-ins/TypedArrays/internals/Set/key-is-out-of-bounds': [FAIL],
- 'built-ins/TypedArrays/internals/Set/tonumber-value-throws': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=4895
'built-ins/TypedArrays/internals/HasProperty/detached-buffer': [FAIL],
'built-ins/TypedArrays/internals/Set/detached-buffer': [FAIL],
@@ -215,6 +193,8 @@
'built-ins/DataView/prototype/byteOffset/detached-buffer': [FAIL],
'built-ins/DataView/detached-buffer': [FAIL],
'built-ins/ArrayBuffer/prototype/byteLength/detached-buffer': [FAIL],
+ 'built-ins/DataView/prototype/setFloat64/detached-buffer-after-toindex-byteoffset': [FAIL],
+ 'built-ins/DataView/prototype/setInt16/detached-buffer-after-toindex-byteoffset': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=4648
'built-ins/TypedArray/prototype/copyWithin/detached-buffer': [FAIL],
@@ -298,11 +278,17 @@
'language/eval-code/direct/non-definable-function-with-variable': [FAIL],
'language/eval-code/indirect/non-definable-function-with-function': [FAIL],
'language/eval-code/indirect/non-definable-function-with-variable': [FAIL],
+ 'language/global-code/script-decl-func-err-non-configurable': [FAIL],
+ 'language/global-code/script-decl-var-collision': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=4124
'built-ins/Simd/*': [SKIP],
# https://bugs.chromium.org/p/v8/issues/detail?id=4958
+ 'built-ins/Function/prototype/toString/AsyncFunction': [FAIL],
+ 'built-ins/Function/prototype/toString/async-function-declaration': [FAIL],
+ 'built-ins/Function/prototype/toString/async-function-expression': [FAIL],
+ 'built-ins/Function/prototype/toString/async-method': [FAIL],
'built-ins/Function/prototype/toString/Function': [FAIL],
'built-ins/Function/prototype/toString/GeneratorFunction': [FAIL],
'built-ins/Function/prototype/toString/function-declaration': [FAIL],
@@ -326,13 +312,6 @@
'built-ins/Function/prototype/toString/setter-object': [FAIL],
'built-ins/Function/prototype/toString/unicode': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=1569
- 'language/eval-code/direct/export': [SKIP],
- 'language/eval-code/direct/import': [SKIP],
- 'language/eval-code/indirect/export': [SKIP],
- 'language/eval-code/indirect/import': [SKIP],
- 'language/module-code/*': [SKIP],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=5012
# http://bugs.icu-project.org/trac/ticket/12671
'intl402/Intl/getCanonicalLocales/weird-cases': [FAIL],
@@ -420,8 +399,89 @@
'annexB/built-ins/Date/prototype/setYear/time-clip': [FAIL],
'annexB/built-ins/Date/prototype/setYear/year-number-relative': [FAIL],
- # Fixed by https://github.com/tc39/test262/pull/662.
- 'built-ins/Object/getOwnPropertyDescriptors/duplicate-keys': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5155
+ 'language/white-space/mongolian-vowel-separator': [FAIL],
+ 'language/white-space/mongolian-vowel-separator-eval': [FAIL],
+ 'built-ins/RegExp/S15.10.2.12_A2_T1': [FAIL],
+ 'built-ins/RegExp/S15.10.2.12_A1_T1': [FAIL],
+ 'built-ins/parseFloat/S15.1.2.3_A2_T10_U180E': [FAIL],
+ 'built-ins/parseInt/S15.1.2.2_A2_T10_U180E': [FAIL],
+ 'built-ins/String/prototype/trim/u180e': [FAIL],
+ 'built-ins/Number/S9.3.1_A3_T2_U180E': [FAIL],
+ 'built-ins/Number/S9.3.1_A3_T1_U180E': [FAIL],
+ 'built-ins/Number/S9.3.1_A2_U180E': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5051
+ 'language/expressions/arrow-function/params-trailing-comma': [FAIL],
+ 'language/expressions/arrow-function/params-trailing-comma-length': [FAIL],
+ 'language/expressions/function/params-trailing-comma': [FAIL],
+ 'language/expressions/function/params-trailing-comma-arguments': [FAIL],
+ 'language/expressions/function/params-trailing-comma-length': [FAIL],
+ 'language/expressions/generators/params-trailing-comma': [FAIL],
+ 'language/expressions/generators/params-trailing-comma-arguments': [FAIL],
+ 'language/expressions/generators/params-trailing-comma-length': [FAIL],
+ 'language/expressions/object/method-definition/params-trailing-comma': [FAIL],
+ 'language/expressions/object/method-definition/params-trailing-comma-arguments': [FAIL],
+ 'language/expressions/object/method-definition/params-trailing-comma-length': [FAIL],
+ 'language/statements/class/definition/params-trailing-comma': [FAIL],
+ 'language/statements/class/definition/params-trailing-comma-arguments': [FAIL],
+ 'language/statements/class/definition/params-trailing-comma-length': [FAIL],
+ 'language/statements/function/params-trailing-comma': [FAIL],
+ 'language/statements/function/params-trailing-comma-arguments': [FAIL],
+ 'language/statements/function/params-trailing-comma-length': [FAIL],
+ 'language/statements/generators/params-trailing-comma': [FAIL],
+ 'language/statements/generators/params-trailing-comma-arguments': [FAIL],
+ 'language/statements/generators/params-trailing-comma-length': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5326
+ 'language/expressions/super/call-new-target-undef': [FAIL],
+ 'language/eval-code/direct/super-call-fn': [FAIL],
+ 'language/expressions/super/prop-dot-fn-no-super-bndng': [FAIL],
+ 'language/expressions/super/prop-expr-fn-no-super-bndng': [FAIL],
+ 'language/expressions/super/prop-expr-fn-eval-before-has-super': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5064
+ 'language/expressions/arrow-function/params-dflt-duplicates': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5244
+ 'intl402/NumberFormat/prototype/formatToParts/*': [SKIP],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5327
+ 'built-ins/TypedArray/prototype/set/array-arg-negative-integer-offset-throws': [FAIL],
+ 'built-ins/TypedArray/prototype/set/typedarray-arg-negative-integer-offset-throws': [FAIL],
+ 'built-ins/TypedArrays/internals/Set/key-is-minus-zero': [FAIL],
+ 'built-ins/TypedArrays/internals/Set/key-is-not-integer': [FAIL],
+ 'built-ins/TypedArrays/internals/Set/key-is-out-of-bounds': [FAIL],
+ 'built-ins/TypedArrays/internals/Set/tonumber-value-throws': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5328
+ 'built-ins/TypedArrays/internals/DefineOwnProperty/key-is-numericindex-desc-not-writable': [FAIL],
+ 'built-ins/TypedArrays/internals/DefineOwnProperty/key-is-not-integer': [FAIL],
+ 'built-ins/TypedArrays/internals/DefineOwnProperty/key-is-minus-zero': [FAIL],
+ 'built-ins/TypedArrays/internals/DefineOwnProperty/key-is-lower-than-zero': [FAIL],
+ 'built-ins/TypedArrays/internals/DefineOwnProperty/key-is-greater-than-last-index': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5329
+ 'built-ins/RegExp/prototype/source/value-line-terminator': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5112
+ 'annexB/language/eval-code/direct/func-block-decl-eval-func-no-skip-try': [FAIL],
+ 'annexB/language/eval-code/direct/func-if-decl-else-decl-a-eval-func-no-skip-try': [FAIL],
+ 'annexB/language/eval-code/direct/func-if-decl-else-decl-b-eval-func-no-skip-try': [FAIL],
+ 'annexB/language/eval-code/direct/func-if-decl-else-stmt-eval-func-no-skip-try': [FAIL],
+ 'annexB/language/eval-code/direct/func-if-decl-no-else-eval-func-no-skip-try': [FAIL],
+ 'annexB/language/eval-code/direct/func-if-stmt-else-decl-eval-func-no-skip-try': [FAIL],
+ 'annexB/language/eval-code/direct/func-switch-case-eval-func-no-skip-try': [FAIL],
+ 'annexB/language/eval-code/direct/func-switch-dflt-eval-func-no-skip-try': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5334
+ 'built-ins/Proxy/setPrototypeOf/internals-call-order': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5336
+ 'language/expressions/super/call-proto-not-ctor': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5337
+ 'language/expressions/arrow-function/dstr-dflt-ary-ptrn-rest-ary-rest': [SKIP],
######################## NEEDS INVESTIGATION ###########################
@@ -483,32 +543,6 @@
# Test262 Bug: https://bugs.ecmascript.org/show_bug.cgi?id=596
'built-ins/Array/prototype/sort/bug_596_1': [PASS, FAIL_OK],
- # https://github.com/tc39/test262/issues/677
- 'built-ins/RegExp/prototype/source/15.10.7.1-1': [FAIL],
- 'built-ins/RegExp/prototype/global/15.10.7.2-1': [FAIL],
- 'built-ins/RegExp/prototype/ignoreCase/15.10.7.3-1': [FAIL],
- 'built-ins/RegExp/prototype/multiline/15.10.7.4-1': [FAIL],
-
- # https://github.com/tc39/test262/issues/694
- 'built-ins/TypedArrays/length-arg-toindex-length': [FAIL],
-
- # https://github.com/tc39/test262/issues/696
- 'language/statements/class/subclass/builtin-objects/ArrayBuffer/regular-subclassing': [FAIL],
-
- # https://github.com/tc39/test262/issues/685
- 'built-ins/DataView/prototype/setUint8/range-check-after-value-conversion': [FAIL],
- 'built-ins/DataView/prototype/setUint16/range-check-after-value-conversion': [FAIL],
- 'built-ins/DataView/prototype/setUint32/range-check-after-value-conversion': [FAIL],
- 'built-ins/DataView/prototype/setInt8/range-check-after-value-conversion': [FAIL],
- 'built-ins/DataView/prototype/setInt16/range-check-after-value-conversion': [FAIL],
- 'built-ins/DataView/prototype/setInt32/range-check-after-value-conversion': [FAIL],
- 'built-ins/DataView/prototype/setFloat32/range-check-after-value-conversion': [FAIL],
- 'built-ins/DataView/prototype/setFloat64/range-check-after-value-conversion': [FAIL],
-
- # https://github.com/tc39/test262/issues/686
- 'built-ins/DataView/prototype/setFloat32/toindex-byteoffset': [FAIL],
- 'built-ins/DataView/prototype/setFloat64/toindex-byteoffset': [FAIL],
-
############################ SKIPPED TESTS #############################
# These tests take a looong time to run.
@@ -544,8 +578,10 @@
'built-ins/String/prototype/normalize/return-normalized-string-using-default-parameter': [SKIP],
# Case-conversion is not fully compliant to the Unicode spec with i18n off.
+ 'built-ins/String/prototype/toLocaleLowerCase/Final_Sigma_U180E': [FAIL],
'built-ins/String/prototype/toLocaleLowerCase/special_casing_conditional': [FAIL],
'built-ins/String/prototype/toLocaleLowerCase/supplementary_plane': [FAIL],
+ 'built-ins/String/prototype/toLowerCase/Final_Sigma_U180E': [FAIL],
'built-ins/String/prototype/toLowerCase/special_casing_conditional': [FAIL],
'built-ins/String/prototype/toLowerCase/supplementary_plane': [FAIL],
'built-ins/String/prototype/toLocaleUpperCase/supplementary_plane': [FAIL],
@@ -588,4 +624,24 @@
'built-ins/ArrayBuffer/length-is-too-large-throws': [SKIP],
}], # asan == True or msan == True or tsan == True
+['variant == asm_wasm', {
+ '*': [SKIP],
+}], # variant == asm_wasm
+
+# Module-related tests
+# https://bugs.chromium.org/p/v8/issues/detail?id=1569
+['variant != ignition and variant != ignition_staging and variant != ignition_turbofan', {
+ 'language/eval-code/direct/export': [SKIP],
+ 'language/eval-code/direct/import': [SKIP],
+ 'language/eval-code/indirect/export': [SKIP],
+ 'language/eval-code/indirect/import': [SKIP],
+ 'language/module-code/*': [SKIP],
+}], # variant != ignition and variant != ignition_staging and variant != ignition_turbofan
+['variant == ignition or variant == ignition_staging or variant == ignition_turbofan', {
+ 'language/module-code/comment-*': [SKIP],
+ 'language/module-code/eval-*': [SKIP],
+ 'language/module-code/instn-*': [SKIP],
+ 'language/module-code/namespace/*': [SKIP],
+}], # variant == ignition or variant == ignition_staging or variant == ignition_turbofan
+
]
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index 7193afb966..ebee73db75 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -2,36 +2,151 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-# The sources are kept automatically in sync with unittests.gyp.
-
import("../../gni/v8.gni")
-gypi_values = exec_script("//build/gypi_to_gn.py",
- [ rebase_path("unittests.gyp") ],
- "scope",
- [ "unittests.gyp" ])
-
v8_executable("unittests") {
testonly = true
- sources = gypi_values.unittests_sources
+ sources = [
+ "../../testing/gmock-support.h",
+ "../../testing/gtest-support.h",
+ "base/atomic-utils-unittest.cc",
+ "base/bits-unittest.cc",
+ "base/cpu-unittest.cc",
+ "base/division-by-constant-unittest.cc",
+ "base/flags-unittest.cc",
+ "base/functional-unittest.cc",
+ "base/ieee754-unittest.cc",
+ "base/iterator-unittest.cc",
+ "base/logging-unittest.cc",
+ "base/platform/condition-variable-unittest.cc",
+ "base/platform/mutex-unittest.cc",
+ "base/platform/platform-unittest.cc",
+ "base/platform/semaphore-unittest.cc",
+ "base/platform/time-unittest.cc",
+ "base/sys-info-unittest.cc",
+ "base/utils/random-number-generator-unittest.cc",
+ "cancelable-tasks-unittest.cc",
+ "char-predicates-unittest.cc",
+ "compiler-dispatcher/compiler-dispatcher-job-unittest.cc",
+ "compiler/branch-elimination-unittest.cc",
+ "compiler/checkpoint-elimination-unittest.cc",
+ "compiler/common-operator-reducer-unittest.cc",
+ "compiler/common-operator-unittest.cc",
+ "compiler/compiler-test-utils.h",
+ "compiler/control-equivalence-unittest.cc",
+ "compiler/control-flow-optimizer-unittest.cc",
+ "compiler/dead-code-elimination-unittest.cc",
+ "compiler/diamond-unittest.cc",
+ "compiler/effect-control-linearizer-unittest.cc",
+ "compiler/escape-analysis-unittest.cc",
+ "compiler/graph-reducer-unittest.cc",
+ "compiler/graph-reducer-unittest.h",
+ "compiler/graph-trimmer-unittest.cc",
+ "compiler/graph-unittest.cc",
+ "compiler/graph-unittest.h",
+ "compiler/instruction-selector-unittest.cc",
+ "compiler/instruction-selector-unittest.h",
+ "compiler/instruction-sequence-unittest.cc",
+ "compiler/instruction-sequence-unittest.h",
+ "compiler/int64-lowering-unittest.cc",
+ "compiler/js-builtin-reducer-unittest.cc",
+ "compiler/js-create-lowering-unittest.cc",
+ "compiler/js-intrinsic-lowering-unittest.cc",
+ "compiler/js-operator-unittest.cc",
+ "compiler/js-typed-lowering-unittest.cc",
+ "compiler/linkage-tail-call-unittest.cc",
+ "compiler/live-range-builder.h",
+ "compiler/live-range-unittest.cc",
+ "compiler/liveness-analyzer-unittest.cc",
+ "compiler/load-elimination-unittest.cc",
+ "compiler/loop-peeling-unittest.cc",
+ "compiler/machine-operator-reducer-unittest.cc",
+ "compiler/machine-operator-unittest.cc",
+ "compiler/move-optimizer-unittest.cc",
+ "compiler/node-cache-unittest.cc",
+ "compiler/node-matchers-unittest.cc",
+ "compiler/node-properties-unittest.cc",
+ "compiler/node-test-utils.cc",
+ "compiler/node-test-utils.h",
+ "compiler/node-unittest.cc",
+ "compiler/opcodes-unittest.cc",
+ "compiler/register-allocator-unittest.cc",
+ "compiler/schedule-unittest.cc",
+ "compiler/scheduler-rpo-unittest.cc",
+ "compiler/scheduler-unittest.cc",
+ "compiler/simplified-operator-reducer-unittest.cc",
+ "compiler/simplified-operator-unittest.cc",
+ "compiler/state-values-utils-unittest.cc",
+ "compiler/tail-call-optimization-unittest.cc",
+ "compiler/typed-optimization-unittest.cc",
+ "compiler/typer-unittest.cc",
+ "compiler/value-numbering-reducer-unittest.cc",
+ "compiler/zone-pool-unittest.cc",
+ "counters-unittest.cc",
+ "eh-frame-iterator-unittest.cc",
+ "eh-frame-writer-unittest.cc",
+ "heap/bitmap-unittest.cc",
+ "heap/gc-idle-time-handler-unittest.cc",
+ "heap/gc-tracer-unittest.cc",
+ "heap/heap-unittest.cc",
+ "heap/marking-unittest.cc",
+ "heap/memory-reducer-unittest.cc",
+ "heap/scavenge-job-unittest.cc",
+ "heap/slot-set-unittest.cc",
+ "interpreter/bytecode-array-builder-unittest.cc",
+ "interpreter/bytecode-array-iterator-unittest.cc",
+ "interpreter/bytecode-array-writer-unittest.cc",
+ "interpreter/bytecode-dead-code-optimizer-unittest.cc",
+ "interpreter/bytecode-decoder-unittest.cc",
+ "interpreter/bytecode-peephole-optimizer-unittest.cc",
+ "interpreter/bytecode-pipeline-unittest.cc",
+ "interpreter/bytecode-register-allocator-unittest.cc",
+ "interpreter/bytecode-register-optimizer-unittest.cc",
+ "interpreter/bytecode-utils.h",
+ "interpreter/bytecodes-unittest.cc",
+ "interpreter/constant-array-builder-unittest.cc",
+ "interpreter/interpreter-assembler-unittest.cc",
+ "interpreter/interpreter-assembler-unittest.h",
+ "libplatform/default-platform-unittest.cc",
+ "libplatform/task-queue-unittest.cc",
+ "libplatform/worker-thread-unittest.cc",
+ "locked-queue-unittest.cc",
+ "register-configuration-unittest.cc",
+ "run-all-unittests.cc",
+ "source-position-table-unittest.cc",
+ "test-utils.cc",
+ "test-utils.h",
+ "unicode-unittest.cc",
+ "value-serializer-unittest.cc",
+ "wasm/asm-types-unittest.cc",
+ "wasm/ast-decoder-unittest.cc",
+ "wasm/control-transfer-unittest.cc",
+ "wasm/decoder-unittest.cc",
+ "wasm/leb-helper-unittest.cc",
+ "wasm/loop-assignment-analysis-unittest.cc",
+ "wasm/module-decoder-unittest.cc",
+ "wasm/switch-logic-unittest.cc",
+ "wasm/wasm-macro-gen-unittest.cc",
+ "wasm/wasm-module-builder-unittest.cc",
+ ]
if (v8_current_cpu == "arm") {
- sources += gypi_values.unittests_sources_arm
+ sources += [ "compiler/arm/instruction-selector-arm-unittest.cc" ]
} else if (v8_current_cpu == "arm64") {
- sources += gypi_values.unittests_sources_arm64
+ sources += [ "compiler/arm64/instruction-selector-arm64-unittest.cc" ]
} else if (v8_current_cpu == "x86") {
- sources += gypi_values.unittests_sources_ia32
+ sources += [ "compiler/ia32/instruction-selector-ia32-unittest.cc" ]
} else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
- sources += gypi_values.unittests_sources_mips
+ sources += [ "compiler/mips/instruction-selector-mips-unittest.cc" ]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
- sources += gypi_values.unittests_sources_mips64
+ sources += [ "compiler/mips64/instruction-selector-mips64-unittest.cc" ]
} else if (v8_current_cpu == "x64") {
- sources += gypi_values.unittests_sources_x64
+ sources += [ "compiler/x64/instruction-selector-x64-unittest.cc" ]
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
- sources += gypi_values.unittests_sources_ppc
+ sources += [ "compiler/ppc/instruction-selector-ppc-unittest.cc" ]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
- sources += gypi_values.unittests_sources_s390
+ sources += [ "compiler/s390/instruction-selector-s390-unittest.cc" ]
}
configs = [
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-job-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-job-unittest.cc
index 922ed2f44e..d4c54247e2 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-job-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-job-unittest.cc
@@ -6,11 +6,14 @@
#include "include/v8.h"
#include "src/api.h"
+#include "src/ast/ast.h"
#include "src/ast/scopes.h"
+#include "src/base/platform/semaphore.h"
#include "src/compiler-dispatcher/compiler-dispatcher-job.h"
#include "src/flags.h"
#include "src/isolate-inl.h"
#include "src/parsing/parse-info.h"
+#include "src/v8.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -19,6 +22,30 @@ namespace internal {
typedef TestWithContext CompilerDispatcherJobTest;
+class IgnitionCompilerDispatcherJobTest : public TestWithContext {
+ public:
+ IgnitionCompilerDispatcherJobTest() {}
+ ~IgnitionCompilerDispatcherJobTest() override {}
+
+ static void SetUpTestCase() {
+ old_flag_ = i::FLAG_ignition;
+ i::FLAG_ignition = true;
+ i::FLAG_never_compact = true;
+ TestWithContext::SetUpTestCase();
+ }
+
+ static void TearDownTestCase() {
+ TestWithContext::TearDownTestCase();
+ i::FLAG_ignition = old_flag_;
+ }
+
+ private:
+ static bool old_flag_;
+ DISALLOW_COPY_AND_ASSIGN(IgnitionCompilerDispatcherJobTest);
+};
+
+bool IgnitionCompilerDispatcherJobTest::old_flag_;
+
namespace {
const char test_script[] = "(x) { x*x; }";
@@ -39,7 +66,7 @@ class ScriptResource : public v8::String::ExternalOneByteStringResource {
DISALLOW_COPY_AND_ASSIGN(ScriptResource);
};
-Handle<JSFunction> CreateFunction(
+Handle<SharedFunctionInfo> CreateSharedFunctionInfo(
Isolate* isolate, ExternalOneByteString::Resource* maybe_resource) {
HandleScope scope(isolate);
Handle<String> source;
@@ -52,40 +79,53 @@ Handle<JSFunction> CreateFunction(
}
Handle<Script> script = isolate->factory()->NewScript(source);
Handle<SharedFunctionInfo> shared = isolate->factory()->NewSharedFunctionInfo(
- isolate->factory()->NewStringFromAsciiChecked("f"), MaybeHandle<Code>(),
- false);
+ isolate->factory()->NewStringFromAsciiChecked("f"),
+ isolate->builtins()->CompileLazy(), false);
SharedFunctionInfo::SetScript(shared, script);
shared->set_end_position(source->length());
- Handle<JSFunction> function =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared, handle(isolate->context(), isolate));
- return scope.CloseAndEscape(function);
+ shared->set_outer_scope_info(ScopeInfo::Empty(isolate));
+ return scope.CloseAndEscape(shared);
+}
+
+Handle<Object> RunJS(v8::Isolate* isolate, const char* script) {
+ return Utils::OpenHandle(
+ *v8::Script::Compile(
+ isolate->GetCurrentContext(),
+ v8::String::NewFromUtf8(isolate, script, v8::NewStringType::kNormal)
+ .ToLocalChecked())
+ .ToLocalChecked()
+ ->Run(isolate->GetCurrentContext())
+ .ToLocalChecked());
}
} // namespace
TEST_F(CompilerDispatcherJobTest, Construct) {
std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
- i_isolate(), CreateFunction(i_isolate(), nullptr), FLAG_stack_size));
+ i_isolate(), CreateSharedFunctionInfo(i_isolate(), nullptr),
+ FLAG_stack_size));
}
TEST_F(CompilerDispatcherJobTest, CanParseOnBackgroundThread) {
{
std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
- i_isolate(), CreateFunction(i_isolate(), nullptr), FLAG_stack_size));
+ i_isolate(), CreateSharedFunctionInfo(i_isolate(), nullptr),
+ FLAG_stack_size));
ASSERT_FALSE(job->can_parse_on_background_thread());
}
{
ScriptResource script(test_script, strlen(test_script));
std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
- i_isolate(), CreateFunction(i_isolate(), &script), FLAG_stack_size));
+ i_isolate(), CreateSharedFunctionInfo(i_isolate(), &script),
+ FLAG_stack_size));
ASSERT_TRUE(job->can_parse_on_background_thread());
}
}
TEST_F(CompilerDispatcherJobTest, StateTransitions) {
std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
- i_isolate(), CreateFunction(i_isolate(), nullptr), FLAG_stack_size));
+ i_isolate(), CreateSharedFunctionInfo(i_isolate(), nullptr),
+ FLAG_stack_size));
ASSERT_TRUE(job->status() == CompileJobStatus::kInitial);
job->PrepareToParseOnMainThread();
@@ -93,7 +133,13 @@ TEST_F(CompilerDispatcherJobTest, StateTransitions) {
job->Parse();
ASSERT_TRUE(job->status() == CompileJobStatus::kParsed);
ASSERT_TRUE(job->FinalizeParsingOnMainThread());
+ ASSERT_TRUE(job->status() == CompileJobStatus::kReadyToAnalyse);
+ ASSERT_TRUE(job->PrepareToCompileOnMainThread());
ASSERT_TRUE(job->status() == CompileJobStatus::kReadyToCompile);
+ job->Compile();
+ ASSERT_TRUE(job->status() == CompileJobStatus::kCompiled);
+ ASSERT_TRUE(job->FinalizeCompilingOnMainThread());
+ ASSERT_TRUE(job->status() == CompileJobStatus::kDone);
job->ResetOnMainThread();
ASSERT_TRUE(job->status() == CompileJobStatus::kInitial);
}
@@ -101,7 +147,8 @@ TEST_F(CompilerDispatcherJobTest, StateTransitions) {
TEST_F(CompilerDispatcherJobTest, SyntaxError) {
ScriptResource script("^^^", strlen("^^^"));
std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
- i_isolate(), CreateFunction(i_isolate(), &script), FLAG_stack_size));
+ i_isolate(), CreateSharedFunctionInfo(i_isolate(), &script),
+ FLAG_stack_size));
job->PrepareToParseOnMainThread();
job->Parse();
@@ -117,34 +164,28 @@ TEST_F(CompilerDispatcherJobTest, SyntaxError) {
TEST_F(CompilerDispatcherJobTest, ScopeChain) {
const char script[] =
- "function g() { var g = 1; function f(x) { return x * g }; return f; } "
+ "function g() { var y = 1; function f(x) { return x * y }; return f; } "
"g();";
- Handle<JSFunction> f = Handle<JSFunction>::cast(Utils::OpenHandle(
- *v8::Script::Compile(isolate()->GetCurrentContext(),
- v8::String::NewFromUtf8(isolate(), script,
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .ToLocalChecked()
- ->Run(isolate()->GetCurrentContext())
- .ToLocalChecked()));
+ Handle<JSFunction> f = Handle<JSFunction>::cast(RunJS(isolate(), script));
- std::unique_ptr<CompilerDispatcherJob> job(
- new CompilerDispatcherJob(i_isolate(), f, FLAG_stack_size));
+ std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
+ i_isolate(), handle(f->shared()), FLAG_stack_size));
job->PrepareToParseOnMainThread();
job->Parse();
ASSERT_TRUE(job->FinalizeParsingOnMainThread());
+ ASSERT_TRUE(job->PrepareToCompileOnMainThread());
ASSERT_TRUE(job->status() == CompileJobStatus::kReadyToCompile);
const AstRawString* var_x =
job->parse_info_->ast_value_factory()->GetOneByteString("x");
Variable* var = job->parse_info_->literal()->scope()->Lookup(var_x);
ASSERT_TRUE(var);
- ASSERT_TRUE(var->IsUnallocated());
+ ASSERT_TRUE(var->IsParameter());
- const AstRawString* var_g =
- job->parse_info_->ast_value_factory()->GetOneByteString("g");
- var = job->parse_info_->literal()->scope()->Lookup(var_g);
+ const AstRawString* var_y =
+ job->parse_info_->ast_value_factory()->GetOneByteString("y");
+ var = job->parse_info_->literal()->scope()->Lookup(var_y);
ASSERT_TRUE(var);
ASSERT_TRUE(var->IsContextSlot());
@@ -152,5 +193,127 @@ TEST_F(CompilerDispatcherJobTest, ScopeChain) {
ASSERT_TRUE(job->status() == CompileJobStatus::kInitial);
}
+TEST_F(CompilerDispatcherJobTest, CompileAndRun) {
+ const char script[] =
+ "function g() {\n"
+ " f = function(a) {\n"
+ " for (var i = 0; i < 3; i++) { a += 20; }\n"
+ " return a;\n"
+ " }\n"
+ " return f;\n"
+ "}\n"
+ "g();";
+ Handle<JSFunction> f = Handle<JSFunction>::cast(RunJS(isolate(), script));
+ std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
+ i_isolate(), handle(f->shared()), FLAG_stack_size));
+
+ job->PrepareToParseOnMainThread();
+ job->Parse();
+ job->FinalizeParsingOnMainThread();
+ job->PrepareToCompileOnMainThread();
+ job->Compile();
+ ASSERT_TRUE(job->FinalizeCompilingOnMainThread());
+ ASSERT_TRUE(job->status() == CompileJobStatus::kDone);
+
+ Smi* value = Smi::cast(*RunJS(isolate(), "f(100);"));
+ ASSERT_TRUE(value == Smi::FromInt(160));
+
+ job->ResetOnMainThread();
+ ASSERT_TRUE(job->status() == CompileJobStatus::kInitial);
+}
+
+TEST_F(CompilerDispatcherJobTest, CompileFailureToPrepare) {
+ std::string raw_script("() { var a = ");
+ for (int i = 0; i < 100000; i++) {
+ raw_script += "'x' + ";
+ }
+ raw_script += " 'x'; }";
+ ScriptResource script(raw_script.c_str(), strlen(raw_script.c_str()));
+ std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
+ i_isolate(), CreateSharedFunctionInfo(i_isolate(), &script), 100));
+
+ job->PrepareToParseOnMainThread();
+ job->Parse();
+ job->FinalizeParsingOnMainThread();
+ ASSERT_FALSE(job->PrepareToCompileOnMainThread());
+ ASSERT_TRUE(job->status() == CompileJobStatus::kFailed);
+ ASSERT_TRUE(i_isolate()->has_pending_exception());
+
+ i_isolate()->clear_pending_exception();
+ job->ResetOnMainThread();
+ ASSERT_TRUE(job->status() == CompileJobStatus::kInitial);
+}
+
+TEST_F(CompilerDispatcherJobTest, CompileFailureToFinalize) {
+ std::string raw_script("() { var a = ");
+ for (int i = 0; i < 1000; i++) {
+ raw_script += "'x' + ";
+ }
+ raw_script += " 'x'; }";
+ ScriptResource script(raw_script.c_str(), strlen(raw_script.c_str()));
+ std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
+ i_isolate(), CreateSharedFunctionInfo(i_isolate(), &script), 50));
+
+ job->PrepareToParseOnMainThread();
+ job->Parse();
+ job->FinalizeParsingOnMainThread();
+ job->PrepareToCompileOnMainThread();
+ job->Compile();
+ ASSERT_FALSE(job->FinalizeCompilingOnMainThread());
+ ASSERT_TRUE(job->status() == CompileJobStatus::kFailed);
+ ASSERT_TRUE(i_isolate()->has_pending_exception());
+
+ i_isolate()->clear_pending_exception();
+ job->ResetOnMainThread();
+ ASSERT_TRUE(job->status() == CompileJobStatus::kInitial);
+}
+
+class CompileTask : public Task {
+ public:
+ CompileTask(CompilerDispatcherJob* job, base::Semaphore* semaphore)
+ : job_(job), semaphore_(semaphore) {}
+ ~CompileTask() override {}
+
+ void Run() override {
+ job_->Compile();
+ semaphore_->Signal();
+ }
+
+ private:
+ CompilerDispatcherJob* job_;
+ base::Semaphore* semaphore_;
+ DISALLOW_COPY_AND_ASSIGN(CompileTask);
+};
+
+TEST_F(IgnitionCompilerDispatcherJobTest, CompileOnBackgroundThread) {
+ const char* raw_script =
+ "(a, b) {\n"
+ " var c = a + b;\n"
+ " function bar() { return b }\n"
+ " var d = { foo: 100, bar : bar() }\n"
+ " return bar;"
+ "}";
+ ScriptResource script(raw_script, strlen(raw_script));
+ std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
+ i_isolate(), CreateSharedFunctionInfo(i_isolate(), &script), 100));
+
+ job->PrepareToParseOnMainThread();
+ job->Parse();
+ job->FinalizeParsingOnMainThread();
+ job->PrepareToCompileOnMainThread();
+ ASSERT_TRUE(job->can_compile_on_background_thread());
+
+ base::Semaphore semaphore(0);
+ CompileTask* background_task = new CompileTask(job.get(), &semaphore);
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(background_task,
+ Platform::kShortRunningTask);
+ semaphore.Wait();
+ ASSERT_TRUE(job->FinalizeCompilingOnMainThread());
+ ASSERT_TRUE(job->status() == CompileJobStatus::kDone);
+
+ job->ResetOnMainThread();
+ ASSERT_TRUE(job->status() == CompileJobStatus::kInitial);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
index 6317d91fa9..746624a691 100644
--- a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
@@ -2079,10 +2079,6 @@ TEST_P(InstructionSelectorFlagSettingTest, CmpZeroOnlyUserInBasicBlock) {
const FlagSettingInst inst = GetParam();
// Binop with additional users, but in a different basic block.
TRACED_FOREACH(Comparison, cmp, kBinopCmpZeroRightInstructions) {
- // We don't optimise this case at the moment.
- if (cmp.flags_condition == kEqual || cmp.flags_condition == kNotEqual) {
- continue;
- }
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
RawMachineLabel a, b;
@@ -2108,10 +2104,6 @@ TEST_P(InstructionSelectorFlagSettingTest, ShiftedOperand) {
const FlagSettingInst inst = GetParam();
// Like the test above, but with a shifted input to the binary operator.
TRACED_FOREACH(Comparison, cmp, kBinopCmpZeroRightInstructions) {
- // We don't optimise this case at the moment.
- if (cmp.flags_condition == kEqual || cmp.flags_condition == kNotEqual) {
- continue;
- }
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
RawMachineLabel a, b;
@@ -2158,8 +2150,7 @@ TEST_P(InstructionSelectorFlagSettingTest, UsersInSameBasicBlock) {
EXPECT_EQ(inst.arch_opcode, s[0]->arch_opcode());
EXPECT_NE(kFlags_branch, s[0]->flags_mode());
EXPECT_EQ(kArmMul, s[1]->arch_opcode());
- EXPECT_EQ(cmp.flags_condition == kEqual ? kArmTst : kArmCmp,
- s[2]->arch_opcode());
+ EXPECT_EQ(kArmCmp, s[2]->arch_opcode());
EXPECT_EQ(kFlags_branch, s[2]->flags_mode());
EXPECT_EQ(cmp.flags_condition, s[2]->flags_condition());
}
@@ -3059,10 +3050,11 @@ TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArmTst, s[0]->arch_opcode());
- EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
ASSERT_EQ(2U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
EXPECT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(kFlags_set, s[0]->flags_mode());
EXPECT_EQ(kEqual, s[0]->flags_condition());
@@ -3072,10 +3064,11 @@ TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArmTst, s[0]->arch_opcode());
- EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+ EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
ASSERT_EQ(2U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
EXPECT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(kFlags_set, s[0]->flags_mode());
EXPECT_EQ(kEqual, s[0]->flags_condition());
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index 6ca5e5e684..7892c4bbb9 100644
--- a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -402,7 +402,6 @@ TEST_P(InstructionSelectorLogicalTest, Parameter) {
TEST_P(InstructionSelectorLogicalTest, Immediate) {
const MachInst2 dpi = GetParam();
const MachineType type = dpi.machine_type;
- // TODO(all): Add support for testing 64-bit immediates.
if (type == MachineType::Int32()) {
// Immediate on the right.
TRACED_FOREACH(int32_t, imm, kLogical32Immediates) {
@@ -429,6 +428,32 @@ TEST_P(InstructionSelectorLogicalTest, Immediate) {
EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
EXPECT_EQ(1U, s[0]->OutputCount());
}
+ } else if (type == MachineType::Int64()) {
+ // Immediate on the right.
+ TRACED_FOREACH(int64_t, imm, kLogical64Immediates) {
+ StreamBuilder m(this, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int64Constant(imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+
+ // Immediate on the left; all logical ops should commute.
+ TRACED_FOREACH(int64_t, imm, kLogical64Immediates) {
+ StreamBuilder m(this, type, type);
+ m.Return((m.*dpi.constructor)(m.Int64Constant(imm), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
}
}
@@ -1067,7 +1092,8 @@ TEST_F(InstructionSelectorTest, SubBranchWithImmediateOnRight) {
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ EXPECT_EQ((imm == 0) ? kArm64CompareAndBranch32 : kArm64Cmp32,
+ s[0]->arch_opcode());
EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
EXPECT_EQ(kNotEqual, s[0]->flags_condition());
}
@@ -3142,11 +3168,20 @@ const IntegerCmp kIntegerCmpInstructions[] = {
kUnsignedLessThanOrEqual,
kUnsignedGreaterThanOrEqual}};
+const IntegerCmp kIntegerCmpEqualityInstructions[] = {
+ {{&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32,
+ MachineType::Int32()},
+ kEqual,
+ kEqual},
+ {{&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kArm64Cmp32,
+ MachineType::Int32()},
+ kNotEqual,
+ kNotEqual}};
} // namespace
TEST_F(InstructionSelectorTest, Word32CompareNegateWithWord32Shift) {
- TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
+ TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpEqualityInstructions) {
TRACED_FOREACH(Shift, shift, kShiftInstructions) {
// Test 32-bit operations. Ignore ROR shifts, as compare-negate does not
// support them.
@@ -3183,19 +3218,16 @@ TEST_F(InstructionSelectorTest, CmpWithImmediateOnLeft) {
// kEqual and kNotEqual trigger the cbz/cbnz optimization, which
// is tested elsewhere.
if (cmp.cond == kEqual || cmp.cond == kNotEqual) continue;
+ // For signed less than or equal to zero, we generate TBNZ.
+ if (cmp.cond == kSignedLessThanOrEqual && imm == 0) continue;
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
- RawMachineLabel a, b;
- m.Branch((m.*cmp.mi.constructor)(m.Int32Constant(imm), p0), &a, &b);
- m.Bind(&a);
- m.Return(m.Int32Constant(1));
- m.Bind(&b);
- m.Return(m.Int32Constant(0));
+ m.Return((m.*cmp.mi.constructor)(m.Int32Constant(imm), p0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
ASSERT_LE(2U, s[0]->InputCount());
- EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
EXPECT_EQ(cmp.commuted_cond, s[0]->flags_condition());
EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
}
@@ -3203,24 +3235,19 @@ TEST_F(InstructionSelectorTest, CmpWithImmediateOnLeft) {
}
TEST_F(InstructionSelectorTest, CmnWithImmediateOnLeft) {
- TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
+ TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpEqualityInstructions) {
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
// kEqual and kNotEqual trigger the cbz/cbnz optimization, which
// is tested elsewhere.
if (cmp.cond == kEqual || cmp.cond == kNotEqual) continue;
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* sub = m.Int32Sub(m.Int32Constant(0), m.Parameter(0));
- RawMachineLabel a, b;
- m.Branch((m.*cmp.mi.constructor)(m.Int32Constant(imm), sub), &a, &b);
- m.Bind(&a);
- m.Return(m.Int32Constant(1));
- m.Bind(&b);
- m.Return(m.Int32Constant(0));
+ m.Return((m.*cmp.mi.constructor)(m.Int32Constant(imm), sub));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
ASSERT_LE(2U, s[0]->InputCount());
- EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
EXPECT_EQ(cmp.cond, s[0]->flags_condition());
EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
}
@@ -3244,7 +3271,7 @@ TEST_F(InstructionSelectorTest, CmpSignedExtendByteOnLeft) {
}
TEST_F(InstructionSelectorTest, CmnSignedExtendByteOnLeft) {
- TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
+ TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpEqualityInstructions) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
Node* sub = m.Int32Sub(m.Int32Constant(0), m.Parameter(0));
@@ -3294,7 +3321,7 @@ TEST_F(InstructionSelectorTest, CmpShiftByImmediateOnLeft) {
}
TEST_F(InstructionSelectorTest, CmnShiftByImmediateOnLeft) {
- TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
+ TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpEqualityInstructions) {
TRACED_FOREACH(Shift, shift, kShiftInstructions) {
// Only test relevant shifted operands.
if (shift.mi.machine_type != MachineType::Int32()) continue;
@@ -3408,21 +3435,15 @@ TEST_P(InstructionSelectorFlagSettingTest, CmpZeroRight) {
TRACED_FOREACH(IntegerCmp, cmp, kBinopCmpZeroRightInstructions) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
- RawMachineLabel a, b;
Node* binop = (m.*inst.mi.constructor)(m.Parameter(0), m.Parameter(1));
- Node* comp = (m.*cmp.mi.constructor)(binop, m.Int32Constant(0));
- m.Branch(comp, &a, &b);
- m.Bind(&a);
- m.Return(m.Int32Constant(1));
- m.Bind(&b);
- m.Return(m.Int32Constant(0));
+ m.Return((m.*cmp.mi.constructor)(binop, m.Int32Constant(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- ASSERT_EQ(4U, s[0]->InputCount()); // The labels are also inputs.
+ ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(inst.no_output_opcode, s[0]->arch_opcode());
EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(m.Parameter(1)), s.ToVreg(s[0]->InputAt(1)));
- EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
EXPECT_EQ(cmp.cond, s[0]->flags_condition());
}
}
@@ -3433,21 +3454,15 @@ TEST_P(InstructionSelectorFlagSettingTest, CmpZeroLeft) {
TRACED_FOREACH(IntegerCmp, cmp, kBinopCmpZeroLeftInstructions) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
- RawMachineLabel a, b;
Node* binop = (m.*inst.mi.constructor)(m.Parameter(0), m.Parameter(1));
- Node* comp = (m.*cmp.mi.constructor)(m.Int32Constant(0), binop);
- m.Branch(comp, &a, &b);
- m.Bind(&a);
- m.Return(m.Int32Constant(1));
- m.Bind(&b);
- m.Return(m.Int32Constant(0));
+ m.Return((m.*cmp.mi.constructor)(m.Int32Constant(0), binop));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- ASSERT_EQ(4U, s[0]->InputCount()); // The labels are also inputs.
+ ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(inst.no_output_opcode, s[0]->arch_opcode());
EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(m.Parameter(1)), s.ToVreg(s[0]->InputAt(1)));
- EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
EXPECT_EQ(cmp.cond, s[0]->flags_condition());
}
}
@@ -3456,25 +3471,23 @@ TEST_P(InstructionSelectorFlagSettingTest, CmpZeroOnlyUserInBasicBlock) {
const FlagSettingInst inst = GetParam();
// Binop with additional users, but in a different basic block.
TRACED_FOREACH(IntegerCmp, cmp, kBinopCmpZeroRightInstructions) {
- // For kEqual and kNotEqual, we generate a cbz or cbnz.
- if (cmp.cond == kEqual || cmp.cond == kNotEqual) continue;
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
RawMachineLabel a, b;
Node* binop = (m.*inst.mi.constructor)(m.Parameter(0), m.Parameter(1));
Node* comp = (m.*cmp.mi.constructor)(binop, m.Int32Constant(0));
- m.Branch(comp, &a, &b);
+ m.Branch(m.Parameter(0), &a, &b);
m.Bind(&a);
m.Return(binop);
m.Bind(&b);
- m.Return(m.Int32Constant(0));
+ m.Return(comp);
Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- ASSERT_EQ(4U, s[0]->InputCount()); // The labels are also inputs.
+ ASSERT_EQ(2U, s.size()); // Flag-setting instruction and branch.
+ ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(inst.mi.arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(m.Parameter(1)), s.ToVreg(s[0]->InputAt(1)));
- EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
EXPECT_EQ(cmp.cond, s[0]->flags_condition());
}
}
@@ -3483,8 +3496,6 @@ TEST_P(InstructionSelectorFlagSettingTest, ShiftedOperand) {
const FlagSettingInst inst = GetParam();
// Like the test above, but with a shifted input to the binary operator.
TRACED_FOREACH(IntegerCmp, cmp, kBinopCmpZeroRightInstructions) {
- // For kEqual and kNotEqual, we generate a cbz or cbnz.
- if (cmp.cond == kEqual || cmp.cond == kNotEqual) continue;
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
RawMachineLabel a, b;
@@ -3492,20 +3503,20 @@ TEST_P(InstructionSelectorFlagSettingTest, ShiftedOperand) {
Node* shift = m.Word32Shl(m.Parameter(1), imm);
Node* binop = (m.*inst.mi.constructor)(m.Parameter(0), shift);
Node* comp = (m.*cmp.mi.constructor)(binop, m.Int32Constant(0));
- m.Branch(comp, &a, &b);
+ m.Branch(m.Parameter(0), &a, &b);
m.Bind(&a);
m.Return(binop);
m.Bind(&b);
- m.Return(m.Int32Constant(0));
+ m.Return(comp);
Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- ASSERT_EQ(5U, s[0]->InputCount()); // The labels are also inputs.
+ ASSERT_EQ(2U, s.size()); // Flag-setting instruction and branch.
+ ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(inst.mi.arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(m.Parameter(1)), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(5, s.ToInt32(s[0]->InputAt(2)));
EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
- EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
EXPECT_EQ(cmp.cond, s[0]->flags_condition());
}
}
@@ -3521,19 +3532,18 @@ TEST_P(InstructionSelectorFlagSettingTest, UsersInSameBasicBlock) {
Node* binop = (m.*inst.mi.constructor)(m.Parameter(0), m.Parameter(1));
Node* mul = m.Int32Mul(m.Parameter(0), binop);
Node* comp = (m.*cmp.mi.constructor)(binop, m.Int32Constant(0));
- m.Branch(comp, &a, &b);
+ m.Branch(m.Parameter(0), &a, &b);
m.Bind(&a);
m.Return(mul);
m.Bind(&b);
- m.Return(m.Int32Constant(0));
+ m.Return(comp);
Stream s = m.Build();
- ASSERT_EQ(3U, s.size());
+ ASSERT_EQ(4U, s.size()); // Includes the compare and branch instruction.
EXPECT_EQ(inst.mi.arch_opcode, s[0]->arch_opcode());
- EXPECT_NE(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
EXPECT_EQ(kArm64Mul32, s[1]->arch_opcode());
- EXPECT_EQ(cmp.cond == kEqual ? kArm64CompareAndBranch32 : kArm64Cmp32,
- s[2]->arch_opcode());
- EXPECT_EQ(kFlags_branch, s[2]->flags_mode());
+ EXPECT_EQ(kArm64Cmp32, s[2]->arch_opcode());
+ EXPECT_EQ(kFlags_set, s[2]->flags_mode());
EXPECT_EQ(cmp.cond, s[2]->flags_condition());
}
}
@@ -3543,23 +3553,18 @@ TEST_P(InstructionSelectorFlagSettingTest, CommuteImmediate) {
// Immediate on left hand side of the binary operator.
TRACED_FOREACH(IntegerCmp, cmp, kBinopCmpZeroRightInstructions) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
- RawMachineLabel a, b;
// 3 can be an immediate on both arithmetic and logical instructions.
Node* imm = m.Int32Constant(3);
Node* binop = (m.*inst.mi.constructor)(imm, m.Parameter(0));
Node* comp = (m.*cmp.mi.constructor)(binop, m.Int32Constant(0));
- m.Branch(comp, &a, &b);
- m.Bind(&a);
- m.Return(m.Int32Constant(1));
- m.Bind(&b);
- m.Return(m.Int32Constant(0));
+ m.Return(comp);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- ASSERT_EQ(4U, s[0]->InputCount()); // The labels are also inputs.
+ ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(inst.no_output_opcode, s[0]->arch_opcode());
EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(3, s.ToInt32(s[0]->InputAt(1)));
- EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
EXPECT_EQ(cmp.cond, s[0]->flags_condition());
}
}
@@ -3606,23 +3611,18 @@ TEST_F(InstructionSelectorTest, TstInvalidImmediate) {
// Make sure we do not generate an invalid immediate for TST.
TRACED_FOREACH(IntegerCmp, cmp, kBinopCmpZeroRightInstructions) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
- RawMachineLabel a, b;
// 5 is not a valid constant for TST.
Node* imm = m.Int32Constant(5);
Node* binop = m.Word32And(imm, m.Parameter(0));
Node* comp = (m.*cmp.mi.constructor)(binop, m.Int32Constant(0));
- m.Branch(comp, &a, &b);
- m.Bind(&a);
- m.Return(m.Int32Constant(1));
- m.Bind(&b);
- m.Return(m.Int32Constant(0));
+ m.Return(comp);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- ASSERT_EQ(4U, s[0]->InputCount()); // The labels are also inputs.
+ ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
EXPECT_NE(InstructionOperand::IMMEDIATE, s[0]->InputAt(0)->kind());
EXPECT_NE(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
- EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
EXPECT_EQ(cmp.cond, s[0]->flags_condition());
}
}
@@ -4311,6 +4311,74 @@ TEST_F(InstructionSelectorTest, LoadAndShiftRight) {
}
}
+TEST_F(InstructionSelectorTest, CompareAgainstZero32) {
+ TRACED_FOREACH(IntegerCmp, cmp, kBinopCmpZeroRightInstructions) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const param = m.Parameter(0);
+ RawMachineLabel a, b;
+ m.Branch((m.*cmp.mi.constructor)(param, m.Int32Constant(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(s.ToVreg(param), s.ToVreg(s[0]->InputAt(0)));
+ if (cmp.cond == kNegative || cmp.cond == kPositiveOrZero) {
+ EXPECT_EQ(kArm64TestAndBranch32, s[0]->arch_opcode());
+ EXPECT_EQ(4U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ((cmp.cond == kNegative) ? kNotEqual : kEqual,
+ s[0]->flags_condition());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(31, s.ToInt32(s[0]->InputAt(1)));
+ } else {
+ EXPECT_EQ(kArm64CompareAndBranch32, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ }
+ }
+}
+
+TEST_F(InstructionSelectorTest, CompareFloat64HighLessThanZero64) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float64());
+ Node* const param = m.Parameter(0);
+ Node* const high = m.Float64ExtractHighWord32(param);
+ RawMachineLabel a, b;
+ m.Branch(m.Int32LessThan(high, m.Int32Constant(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArm64U64MoveFloat64, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64TestAndBranch, s[1]->arch_opcode());
+ EXPECT_EQ(kNotEqual, s[1]->flags_condition());
+ EXPECT_EQ(4U, s[1]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[1]->InputAt(1)->kind());
+ EXPECT_EQ(63, s.ToInt32(s[1]->InputAt(1)));
+}
+
+TEST_F(InstructionSelectorTest, CompareFloat64HighGreaterThanOrEqualZero64) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float64());
+ Node* const param = m.Parameter(0);
+ Node* const high = m.Float64ExtractHighWord32(param);
+ RawMachineLabel a, b;
+ m.Branch(m.Int32GreaterThanOrEqual(high, m.Int32Constant(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArm64U64MoveFloat64, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64TestAndBranch, s[1]->arch_opcode());
+ EXPECT_EQ(kEqual, s[1]->flags_condition());
+ EXPECT_EQ(4U, s[1]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[1]->InputAt(1)->kind());
+ EXPECT_EQ(63, s.ToInt32(s[1]->InputAt(1)));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc b/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc
index fcd702c428..9486d1fe6e 100644
--- a/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc
@@ -15,7 +15,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-class BranchEliminationTest : public TypedGraphTest {
+class BranchEliminationTest : public GraphTest {
public:
BranchEliminationTest()
: machine_(zone(), MachineType::PointerRepresentation(),
diff --git a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
index f51a54d074..d284772395 100644
--- a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
@@ -158,6 +158,26 @@ TEST_F(CommonOperatorReducerTest, BranchWithBooleanNot) {
}
}
+TEST_F(CommonOperatorReducerTest, BranchWithSelect) {
+ Node* const value = Parameter(0);
+ TRACED_FOREACH(BranchHint, hint, kBranchHints) {
+ Node* const control = graph()->start();
+ Node* const branch = graph()->NewNode(
+ common()->Branch(hint),
+ graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
+ value, FalseConstant(), TrueConstant()),
+ control);
+ Node* const if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* const if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Reduction const r = Reduce(branch);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(branch, r.replacement());
+ EXPECT_THAT(branch, IsBranch(value, control));
+ EXPECT_THAT(if_false, IsIfTrue(branch));
+ EXPECT_THAT(if_true, IsIfFalse(branch));
+ EXPECT_EQ(NegateBranchHint(hint), BranchHintOf(branch->op()));
+ }
+}
// -----------------------------------------------------------------------------
// Merge
diff --git a/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc b/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc
index a87f760c82..6534e90ccc 100644
--- a/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc
+++ b/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/bit-vector.h"
#include "src/compiler/control-equivalence.h"
+#include "src/bit-vector.h"
#include "src/compiler/graph-visualizer.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/source-position.h"
-#include "src/zone-containers.h"
+#include "src/zone/zone-containers.h"
#include "test/unittests/compiler/graph-unittest.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc b/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
index 71a8696d09..0a12ea371a 100644
--- a/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
@@ -21,10 +21,10 @@ namespace compiler {
using testing::Capture;
-class EffectControlLinearizerTest : public TypedGraphTest {
+class EffectControlLinearizerTest : public GraphTest {
public:
EffectControlLinearizerTest()
- : TypedGraphTest(3),
+ : GraphTest(3),
machine_(zone()),
javascript_(zone()),
simplified_(zone()),
diff --git a/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc b/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc
index 990b813947..3a233d6872 100644
--- a/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc
+++ b/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc
@@ -2,22 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/bit-vector.h"
#include "src/compiler/escape-analysis.h"
+#include "src/bit-vector.h"
#include "src/compiler/escape-analysis-reducer.h"
#include "src/compiler/graph-visualizer.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
-#include "src/types.h"
-#include "src/zone-containers.h"
+#include "src/compiler/types.h"
+#include "src/zone/zone-containers.h"
#include "test/unittests/compiler/graph-unittest.h"
namespace v8 {
namespace internal {
namespace compiler {
-class EscapeAnalysisTest : public GraphTest {
+class EscapeAnalysisTest : public TypedGraphTest {
public:
EscapeAnalysisTest()
: simplified_(zone()),
@@ -468,8 +468,7 @@ TEST_F(EscapeAnalysisTest, DeoptReplacement) {
ASSERT_EQ(object1, NodeProperties::GetValueInput(object_state, 0));
}
-
-TEST_F(EscapeAnalysisTest, DeoptReplacementIdentity) {
+TEST_F(EscapeAnalysisTest, DISABLED_DeoptReplacementIdentity) {
Node* object1 = Constant(1);
BeginRegion();
Node* allocation = Allocate(Constant(kPointerSize * 2));
diff --git a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
index d2953159fc..18ccaaaea5 100644
--- a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
@@ -41,7 +41,8 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
SourcePositionTable source_position_table(graph());
InstructionSelector selector(test_->zone(), node_count, &linkage, &sequence,
schedule, &source_position_table, nullptr,
- source_position_mode, features);
+ source_position_mode, features,
+ InstructionSelector::kDisableScheduling);
selector.SelectInstructions();
if (FLAG_trace_turbo) {
OFStream out(stdout);
@@ -244,19 +245,13 @@ TARGET_TEST_F(InstructionSelectorTest, FinishRegion) {
m.AddNode(m.common()->FinishRegion(), param, m.graph()->start());
m.Return(finish);
Stream s = m.Build(kAllInstructions);
- ASSERT_EQ(4U, s.size());
+ ASSERT_EQ(3U, s.size());
EXPECT_EQ(kArchNop, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->OutputCount());
ASSERT_TRUE(s[0]->Output()->IsUnallocated());
+ EXPECT_EQ(kArchRet, s[1]->arch_opcode());
EXPECT_EQ(s.ToVreg(param), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kArchNop, s[1]->arch_opcode());
- ASSERT_EQ(1U, s[1]->InputCount());
- ASSERT_TRUE(s[1]->InputAt(0)->IsUnallocated());
EXPECT_EQ(s.ToVreg(param), s.ToVreg(s[1]->InputAt(0)));
- ASSERT_EQ(1U, s[1]->OutputCount());
- ASSERT_TRUE(s[1]->Output()->IsUnallocated());
- EXPECT_TRUE(UnallocatedOperand::cast(s[1]->Output())->HasSameAsInputPolicy());
- EXPECT_EQ(s.ToVreg(finish), s.ToVreg(s[1]->Output()));
EXPECT_TRUE(s.IsReference(finish));
}
diff --git a/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
index ed20e64194..48debc368c 100644
--- a/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
@@ -38,6 +38,15 @@ class JSBuiltinReducerTest : public TypedGraphTest {
return reducer.Reduce(node);
}
+ Node* GlobalFunction(const char* name) {
+ Handle<JSFunction> f = Handle<JSFunction>::cast(
+ Object::GetProperty(
+ isolate()->global_object(),
+ isolate()->factory()->NewStringFromAsciiChecked(name))
+ .ToHandleChecked());
+ return HeapConstant(f);
+ }
+
Node* MathFunction(const char* name) {
Handle<Object> m =
JSObject::GetProperty(isolate()->global_object(),
@@ -101,6 +110,91 @@ Type* const kNumberTypes[] = {
// -----------------------------------------------------------------------------
+// isFinite
+
+TEST_F(JSBuiltinReducerTest, GlobalIsFiniteWithNumber) {
+ Node* function = GlobalFunction("isFinite");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberEqual(IsNumberSubtract(p0, p0),
+ IsNumberSubtract(p0, p0)));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, GlobalIsFiniteWithPlainPrimitive) {
+ Node* function = GlobalFunction("isFinite");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsNumberEqual(IsNumberSubtract(IsPlainPrimitiveToNumber(p0),
+ IsPlainPrimitiveToNumber(p0)),
+ IsNumberSubtract(IsPlainPrimitiveToNumber(p0),
+ IsPlainPrimitiveToNumber(p0))));
+}
+
+// -----------------------------------------------------------------------------
+// isNaN
+
+TEST_F(JSBuiltinReducerTest, GlobalIsNaNWithNumber) {
+ Node* function = GlobalFunction("isNaN");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsBooleanNot(IsNumberEqual(p0, p0)));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, GlobalIsNaNWithPlainPrimitive) {
+ Node* function = GlobalFunction("isNaN");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsBooleanNot(IsNumberEqual(IsPlainPrimitiveToNumber(p0),
+ IsPlainPrimitiveToNumber(p0))));
+}
+
+// -----------------------------------------------------------------------------
// Math.abs
TEST_F(JSBuiltinReducerTest, MathAbsWithNumber) {
@@ -1315,6 +1409,97 @@ TEST_F(JSBuiltinReducerTest, MathTruncWithPlainPrimitive) {
}
// -----------------------------------------------------------------------------
+// Number.isFinite
+
+TEST_F(JSBuiltinReducerTest, NumberIsFiniteWithNumber) {
+ Node* function = NumberFunction("isFinite");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberEqual(IsNumberSubtract(p0, p0),
+ IsNumberSubtract(p0, p0)));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Number.isInteger
+
+TEST_F(JSBuiltinReducerTest, NumberIsIntegerWithNumber) {
+ Node* function = NumberFunction("isInteger");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsNumberEqual(IsNumberSubtract(p0, IsNumberTrunc(p0)),
+ IsNumberConstant(0.0)));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Number.isNaN
+
+TEST_F(JSBuiltinReducerTest, NumberIsNaNWithNumber) {
+ Node* function = NumberFunction("isNaN");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsBooleanNot(IsNumberEqual(p0, p0)));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Number.isSafeInteger
+
+TEST_F(JSBuiltinReducerTest, NumberIsSafeIntegerWithIntegral32) {
+ Node* function = NumberFunction("isSafeInteger");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kIntegral32Types) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsTrueConstant());
+ }
+}
+
+// -----------------------------------------------------------------------------
// Number.parseInt
TEST_F(JSBuiltinReducerTest, NumberParseIntWithIntegral32) {
diff --git a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
index 9c001e9eb2..ebb1633401 100644
--- a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
@@ -40,7 +40,8 @@ class JSCreateLoweringTest : public TypedGraphTest {
// TODO(titzer): mock the GraphReducer here for better unit testing.
GraphReducer graph_reducer(zone(), graph());
JSCreateLowering reducer(&graph_reducer, &deps_, &jsgraph,
- MaybeHandle<LiteralsArray>(), zone());
+ MaybeHandle<LiteralsArray>(),
+ MaybeHandle<Context>(), zone());
return reducer.Reduce(node);
}
@@ -174,14 +175,15 @@ TEST_F(JSCreateLoweringTest, JSCreateFunctionContextViaInlinedAllocation) {
// JSCreateWithContext
TEST_F(JSCreateLoweringTest, JSCreateWithContext) {
+ Handle<ScopeInfo> scope_info(factory()->NewScopeInfo(1));
Node* const object = Parameter(Type::Receiver());
Node* const closure = Parameter(Type::Function());
Node* const context = Parameter(Type::Any());
Node* const effect = graph()->start();
Node* const control = graph()->start();
Reduction r =
- Reduce(graph()->NewNode(javascript()->CreateWithContext(), object,
- closure, context, effect, control));
+ Reduce(graph()->NewNode(javascript()->CreateWithContext(scope_info),
+ object, closure, context, effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsFinishRegion(IsAllocate(IsNumberConstant(Context::SizeFor(
@@ -195,14 +197,15 @@ TEST_F(JSCreateLoweringTest, JSCreateWithContext) {
TEST_F(JSCreateLoweringTest, JSCreateCatchContext) {
Handle<String> name = factory()->length_string();
+ Handle<ScopeInfo> scope_info(factory()->NewScopeInfo(1));
Node* const exception = Parameter(Type::Receiver());
Node* const closure = Parameter(Type::Function());
Node* const context = Parameter(Type::Any());
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Reduction r =
- Reduce(graph()->NewNode(javascript()->CreateCatchContext(name), exception,
- closure, context, effect, control));
+ Reduction r = Reduce(
+ graph()->NewNode(javascript()->CreateCatchContext(name, scope_info),
+ exception, closure, context, effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsFinishRegion(IsAllocate(IsNumberConstant(Context::SizeFor(
diff --git a/deps/v8/test/unittests/compiler/js-operator-unittest.cc b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
index 3b83d691f1..853249785e 100644
--- a/deps/v8/test/unittests/compiler/js-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
@@ -46,7 +46,6 @@ const SharedOperator kSharedOperators[] = {
SHARED(ToObject, Operator::kFoldable, 1, 1, 1, 1, 1, 1, 2),
SHARED(Create, Operator::kEliminatable, 2, 1, 1, 0, 1, 1, 0),
SHARED(TypeOf, Operator::kPure, 1, 0, 0, 0, 1, 0, 0),
- SHARED(CreateWithContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1, 2),
#undef SHARED
};
diff --git a/deps/v8/test/unittests/compiler/js-type-feedback-unittest.cc b/deps/v8/test/unittests/compiler/js-type-feedback-unittest.cc
deleted file mode 100644
index dece25def1..0000000000
--- a/deps/v8/test/unittests/compiler/js-type-feedback-unittest.cc
+++ /dev/null
@@ -1,336 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler.h"
-
-#include "src/compiler/access-builder.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/js-operator.h"
-#include "src/compiler/js-type-feedback.h"
-#include "src/compiler/machine-operator.h"
-#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties.h"
-#include "src/compiler/operator-properties.h"
-
-#include "test/unittests/compiler/compiler-test-utils.h"
-#include "test/unittests/compiler/graph-unittest.h"
-#include "test/unittests/compiler/node-test-utils.h"
-#include "testing/gmock-support.h"
-
-using testing::Capture;
-
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class JSTypeFeedbackTest : public TypedGraphTest {
- public:
- JSTypeFeedbackTest()
- : TypedGraphTest(3),
- javascript_(zone()),
- dependencies_(isolate(), zone()) {}
- ~JSTypeFeedbackTest() override { dependencies_.Rollback(); }
-
- protected:
- Reduction Reduce(Node* node,
- JSTypeFeedbackSpecializer::DeoptimizationMode mode) {
- Handle<GlobalObject> global_object(
- isolate()->native_context()->global_object(), isolate());
-
- MachineOperatorBuilder machine(zone());
- SimplifiedOperatorBuilder simplified(zone());
- JSGraph jsgraph(isolate(), graph(), common(), javascript(), &simplified,
- &machine);
- JSTypeFeedbackTable table(zone());
- // TODO(titzer): mock the GraphReducer here for better unit testing.
- GraphReducer graph_reducer(zone(), graph());
- JSTypeFeedbackSpecializer reducer(&graph_reducer, &jsgraph, &table, nullptr,
- global_object, mode, &dependencies_);
- return reducer.Reduce(node);
- }
-
- Node* EmptyFrameState() {
- MachineOperatorBuilder machine(zone());
- JSGraph jsgraph(isolate(), graph(), common(), javascript(), nullptr,
- &machine);
- return jsgraph.EmptyFrameState();
- }
-
- JSOperatorBuilder* javascript() { return &javascript_; }
-
- void SetGlobalProperty(const char* string, int value) {
- SetGlobalProperty(string, Handle<Smi>(Smi::FromInt(value), isolate()));
- }
-
- void SetGlobalProperty(const char* string, double value) {
- SetGlobalProperty(string, isolate()->factory()->NewNumber(value));
- }
-
- void SetGlobalProperty(const char* string, Handle<Object> value) {
- Handle<JSObject> global(isolate()->context()->global_object(), isolate());
- Handle<String> name =
- isolate()->factory()->NewStringFromAsciiChecked(string);
- MaybeHandle<Object> result =
- JSReceiver::SetProperty(global, name, value, SLOPPY);
- result.Assert();
- }
-
- Node* ReturnLoadNamedFromGlobal(
- const char* string, Node* effect, Node* control,
- JSTypeFeedbackSpecializer::DeoptimizationMode mode) {
- VectorSlotPair feedback;
- Node* vector = UndefinedConstant();
- Node* context = UndefinedConstant();
-
- Handle<Name> name = isolate()->factory()->InternalizeUtf8String(string);
- const Operator* op = javascript()->LoadGlobal(name, feedback);
- Node* load = graph()->NewNode(op, vector, context, EmptyFrameState(),
- EmptyFrameState(), effect, control);
- Node* if_success = graph()->NewNode(common()->IfSuccess(), load);
- return graph()->NewNode(common()->Return(), load, load, if_success);
- }
-
- CompilationDependencies* dependencies() { return &dependencies_; }
-
- private:
- JSOperatorBuilder javascript_;
- CompilationDependencies dependencies_;
-};
-
-
-TEST_F(JSTypeFeedbackTest, JSLoadNamedGlobalConstSmi) {
- const int kValue = 111;
- const char* kName = "banana";
- SetGlobalProperty(kName, kValue);
-
- Node* ret = ReturnLoadNamedFromGlobal(
- kName, graph()->start(), graph()->start(),
- JSTypeFeedbackSpecializer::kDeoptimizationDisabled);
- graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
-
- Reduction r = Reduce(ret->InputAt(0),
- JSTypeFeedbackSpecializer::kDeoptimizationDisabled);
- EXPECT_FALSE(r.Changed());
- EXPECT_TRUE(dependencies()->IsEmpty());
-}
-
-
-TEST_F(JSTypeFeedbackTest, JSLoadNamedGlobalConstSmiWithDeoptimization) {
- const int kValue = 111;
- const char* kName = "banana";
- SetGlobalProperty(kName, kValue);
-
- Node* ret = ReturnLoadNamedFromGlobal(
- kName, graph()->start(), graph()->start(),
- JSTypeFeedbackSpecializer::kDeoptimizationEnabled);
- graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
-
- Reduction r = Reduce(ret->InputAt(0),
- JSTypeFeedbackSpecializer::kDeoptimizationEnabled);
-
- // Check LoadNamed(global) => HeapConstant[kValue]
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberConstant(kValue));
-
- EXPECT_THAT(ret, IsReturn(IsNumberConstant(kValue), graph()->start(),
- graph()->start()));
- EXPECT_THAT(graph()->end(), IsEnd(ret));
-
- EXPECT_FALSE(dependencies()->IsEmpty());
- dependencies()->Rollback();
-}
-
-
-TEST_F(JSTypeFeedbackTest, JSLoadNamedGlobalConstNumber) {
- const double kValue = -11.25;
- const char* kName = "kiwi";
- SetGlobalProperty(kName, kValue);
-
- Node* ret = ReturnLoadNamedFromGlobal(
- kName, graph()->start(), graph()->start(),
- JSTypeFeedbackSpecializer::kDeoptimizationDisabled);
- graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
-
- Reduction r = Reduce(ret->InputAt(0),
- JSTypeFeedbackSpecializer::kDeoptimizationDisabled);
-
- EXPECT_FALSE(r.Changed());
- EXPECT_TRUE(dependencies()->IsEmpty());
-}
-
-
-TEST_F(JSTypeFeedbackTest, JSLoadNamedGlobalConstNumberWithDeoptimization) {
- const double kValue = -11.25;
- const char* kName = "kiwi";
- SetGlobalProperty(kName, kValue);
-
- Node* ret = ReturnLoadNamedFromGlobal(
- kName, graph()->start(), graph()->start(),
- JSTypeFeedbackSpecializer::kDeoptimizationEnabled);
- graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
-
- Reduction r = Reduce(ret->InputAt(0),
- JSTypeFeedbackSpecializer::kDeoptimizationEnabled);
-
- // Check LoadNamed(global) => HeapConstant[kValue]
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberConstant(kValue));
-
- EXPECT_THAT(ret, IsReturn(IsNumberConstant(kValue), graph()->start(),
- graph()->start()));
- EXPECT_THAT(graph()->end(), IsEnd(ret));
-
- EXPECT_FALSE(dependencies()->IsEmpty());
-}
-
-
-TEST_F(JSTypeFeedbackTest, JSLoadNamedGlobalConstString) {
- Handle<HeapObject> kValue = isolate()->factory()->undefined_string();
- const char* kName = "mango";
- SetGlobalProperty(kName, kValue);
-
- Node* ret = ReturnLoadNamedFromGlobal(
- kName, graph()->start(), graph()->start(),
- JSTypeFeedbackSpecializer::kDeoptimizationDisabled);
- graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
-
- Reduction r = Reduce(ret->InputAt(0),
- JSTypeFeedbackSpecializer::kDeoptimizationDisabled);
- ASSERT_FALSE(r.Changed());
- EXPECT_TRUE(dependencies()->IsEmpty());
-}
-
-
-TEST_F(JSTypeFeedbackTest, JSLoadNamedGlobalConstStringWithDeoptimization) {
- Handle<HeapObject> kValue = isolate()->factory()->undefined_string();
- const char* kName = "mango";
- SetGlobalProperty(kName, kValue);
-
- Node* ret = ReturnLoadNamedFromGlobal(
- kName, graph()->start(), graph()->start(),
- JSTypeFeedbackSpecializer::kDeoptimizationEnabled);
- graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
-
- Reduction r = Reduce(ret->InputAt(0),
- JSTypeFeedbackSpecializer::kDeoptimizationEnabled);
-
- // Check LoadNamed(global) => HeapConstant[kValue]
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsHeapConstant(kValue));
-
- EXPECT_THAT(ret, IsReturn(IsHeapConstant(kValue), graph()->start(),
- graph()->start()));
- EXPECT_THAT(graph()->end(), IsEnd(ret));
-
- EXPECT_FALSE(dependencies()->IsEmpty());
- dependencies()->Rollback();
-}
-
-
-TEST_F(JSTypeFeedbackTest, JSLoadNamedGlobalPropertyCellSmi) {
- const char* kName = "melon";
- SetGlobalProperty(kName, 123);
- SetGlobalProperty(kName, 124);
-
- Node* ret = ReturnLoadNamedFromGlobal(
- kName, graph()->start(), graph()->start(),
- JSTypeFeedbackSpecializer::kDeoptimizationDisabled);
- graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
-
- Reduction r = Reduce(ret->InputAt(0),
- JSTypeFeedbackSpecializer::kDeoptimizationDisabled);
- ASSERT_FALSE(r.Changed());
- EXPECT_TRUE(dependencies()->IsEmpty());
-}
-
-
-TEST_F(JSTypeFeedbackTest, JSLoadNamedGlobalPropertyCellSmiWithDeoptimization) {
- const char* kName = "melon";
- SetGlobalProperty(kName, 123);
- SetGlobalProperty(kName, 124);
-
- Node* ret = ReturnLoadNamedFromGlobal(
- kName, graph()->start(), graph()->start(),
- JSTypeFeedbackSpecializer::kDeoptimizationEnabled);
- graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
-
- Reduction r = Reduce(ret->InputAt(0),
- JSTypeFeedbackSpecializer::kDeoptimizationEnabled);
-
- // Check LoadNamed(global) => LoadField[PropertyCell::value](cell)
- ASSERT_TRUE(r.Changed());
- FieldAccess access = AccessBuilder::ForPropertyCellValue();
- Capture<Node*> cell_capture;
- Matcher<Node*> load_field_match = IsLoadField(
- access, CaptureEq(&cell_capture), graph()->start(), graph()->start());
- EXPECT_THAT(r.replacement(), load_field_match);
-
- HeapObjectMatcher cell(cell_capture.value());
- EXPECT_TRUE(cell.HasValue());
- EXPECT_TRUE(cell.Value()->IsPropertyCell());
-
- EXPECT_THAT(ret,
- IsReturn(load_field_match, load_field_match, graph()->start()));
- EXPECT_THAT(graph()->end(), IsEnd(ret));
-
- EXPECT_FALSE(dependencies()->IsEmpty());
- dependencies()->Rollback();
-}
-
-
-TEST_F(JSTypeFeedbackTest, JSLoadNamedGlobalPropertyCellString) {
- const char* kName = "pineapple";
- SetGlobalProperty(kName, isolate()->factory()->undefined_string());
- SetGlobalProperty(kName, isolate()->factory()->undefined_value());
-
- Node* ret = ReturnLoadNamedFromGlobal(
- kName, graph()->start(), graph()->start(),
- JSTypeFeedbackSpecializer::kDeoptimizationDisabled);
- graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
-
- Reduction r = Reduce(ret->InputAt(0),
- JSTypeFeedbackSpecializer::kDeoptimizationDisabled);
- ASSERT_FALSE(r.Changed());
- EXPECT_TRUE(dependencies()->IsEmpty());
-}
-
-
-TEST_F(JSTypeFeedbackTest,
- JSLoadNamedGlobalPropertyCellStringWithDeoptimization) {
- const char* kName = "pineapple";
- SetGlobalProperty(kName, isolate()->factory()->undefined_string());
- SetGlobalProperty(kName, isolate()->factory()->undefined_value());
-
- Node* ret = ReturnLoadNamedFromGlobal(
- kName, graph()->start(), graph()->start(),
- JSTypeFeedbackSpecializer::kDeoptimizationEnabled);
- graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
-
- Reduction r = Reduce(ret->InputAt(0),
- JSTypeFeedbackSpecializer::kDeoptimizationEnabled);
-
- // Check LoadNamed(global) => LoadField[PropertyCell::value](cell)
- ASSERT_TRUE(r.Changed());
- FieldAccess access = AccessBuilder::ForPropertyCellValue();
- Capture<Node*> cell_capture;
- Matcher<Node*> load_field_match = IsLoadField(
- access, CaptureEq(&cell_capture), graph()->start(), graph()->start());
- EXPECT_THAT(r.replacement(), load_field_match);
-
- HeapObjectMatcher cell(cell_capture.value());
- EXPECT_TRUE(cell.HasValue());
- EXPECT_TRUE(cell.Value()->IsPropertyCell());
-
- EXPECT_THAT(ret,
- IsReturn(load_field_match, load_field_match, graph()->start()));
- EXPECT_THAT(graph()->end(), IsEnd(ret));
-
- EXPECT_FALSE(dependencies()->IsEmpty());
- dependencies()->Rollback();
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index 72c582525e..ec1ff19880 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -33,36 +33,8 @@ const ExternalArrayType kExternalArrayTypes[] = {
kExternalInt16Array, kExternalUint32Array, kExternalInt32Array,
kExternalFloat32Array, kExternalFloat64Array};
-
-const double kFloat64Values[] = {
- -V8_INFINITY, -4.23878e+275, -5.82632e+265, -6.60355e+220, -6.26172e+212,
- -2.56222e+211, -4.82408e+201, -1.84106e+157, -1.63662e+127, -1.55772e+100,
- -1.67813e+72, -2.3382e+55, -3.179e+30, -1.441e+09, -1.0647e+09,
- -7.99361e+08, -5.77375e+08, -2.20984e+08, -32757, -13171, -9970, -3984,
- -107, -105, -92, -77, -61, -0.000208163, -1.86685e-06, -1.17296e-10,
- -9.26358e-11, -5.08004e-60, -1.74753e-65, -1.06561e-71, -5.67879e-79,
- -5.78459e-130, -2.90989e-171, -7.15489e-243, -3.76242e-252, -1.05639e-263,
- -4.40497e-267, -2.19666e-273, -4.9998e-276, -5.59821e-278, -2.03855e-282,
- -5.99335e-283, -7.17554e-284, -3.11744e-309, -0.0, 0.0, 2.22507e-308,
- 1.30127e-270, 7.62898e-260, 4.00313e-249, 3.16829e-233, 1.85244e-228,
- 2.03544e-129, 1.35126e-110, 1.01182e-106, 5.26333e-94, 1.35292e-90,
- 2.85394e-83, 1.78323e-77, 5.4967e-57, 1.03207e-25, 4.57401e-25, 1.58738e-05,
- 2, 125, 2310, 9636, 14802, 17168, 28945, 29305, 4.81336e+07, 1.41207e+08,
- 4.65962e+08, 1.40499e+09, 2.12648e+09, 8.80006e+30, 1.4446e+45, 1.12164e+54,
- 2.48188e+89, 6.71121e+102, 3.074e+112, 4.9699e+152, 5.58383e+166,
- 4.30654e+172, 7.08824e+185, 9.6586e+214, 2.028e+223, 6.63277e+243,
- 1.56192e+261, 1.23202e+269, 5.72883e+289, 8.5798e+290, 1.40256e+294,
- 1.79769e+308, V8_INFINITY};
-
-
const size_t kIndices[] = {0, 1, 42, 100, 1024};
-
-const double kIntegerValues[] = {-V8_INFINITY, INT_MIN, -1000.0, -42.0,
- -1.0, 0.0, 1.0, 42.0,
- 1000.0, INT_MAX, UINT_MAX, V8_INFINITY};
-
-
Type* const kJSTypes[] = {Type::Undefined(), Type::Null(), Type::Boolean(),
Type::Number(), Type::String(), Type::Object()};
@@ -112,100 +84,6 @@ class JSTypedLoweringTest : public TypedGraphTest {
// -----------------------------------------------------------------------------
-// Constant propagation
-
-
-TEST_F(JSTypedLoweringTest, ParameterWithMinusZero) {
- {
- Reduction r = Reduce(
- Parameter(Type::Constant(factory()->minus_zero_value(), zone())));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberConstant(-0.0));
- }
- {
- Reduction r = Reduce(Parameter(Type::MinusZero()));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberConstant(-0.0));
- }
- {
- Reduction r = Reduce(Parameter(
- Type::Union(Type::MinusZero(),
- Type::Constant(factory()->NewNumber(0), zone()), zone())));
- EXPECT_FALSE(r.Changed());
- }
-}
-
-
-TEST_F(JSTypedLoweringTest, ParameterWithNull) {
- Handle<HeapObject> null = factory()->null_value();
- {
- Reduction r = Reduce(Parameter(Type::Constant(null, zone())));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsHeapConstant(null));
- }
- {
- Reduction r = Reduce(Parameter(Type::Null()));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsHeapConstant(null));
- }
-}
-
-
-TEST_F(JSTypedLoweringTest, ParameterWithNaN) {
- const double kNaNs[] = {-std::numeric_limits<double>::quiet_NaN(),
- std::numeric_limits<double>::quiet_NaN(),
- std::numeric_limits<double>::signaling_NaN()};
- TRACED_FOREACH(double, nan, kNaNs) {
- Handle<Object> constant = factory()->NewNumber(nan);
- Reduction r = Reduce(Parameter(Type::Constant(constant, zone())));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberConstant(IsNaN()));
- }
- {
- Reduction r =
- Reduce(Parameter(Type::Constant(factory()->nan_value(), zone())));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberConstant(IsNaN()));
- }
- {
- Reduction r = Reduce(Parameter(Type::NaN()));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberConstant(IsNaN()));
- }
-}
-
-
-TEST_F(JSTypedLoweringTest, ParameterWithPlainNumber) {
- TRACED_FOREACH(double, value, kFloat64Values) {
- Handle<Object> constant = factory()->NewNumber(value);
- Reduction r = Reduce(Parameter(Type::Constant(constant, zone())));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberConstant(value));
- }
- TRACED_FOREACH(double, value, kIntegerValues) {
- Reduction r = Reduce(Parameter(Type::Range(value, value, zone())));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberConstant(value));
- }
-}
-
-
-TEST_F(JSTypedLoweringTest, ParameterWithUndefined) {
- Handle<HeapObject> undefined = factory()->undefined_value();
- {
- Reduction r = Reduce(Parameter(Type::Undefined()));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsHeapConstant(undefined));
- }
- {
- Reduction r = Reduce(Parameter(Type::Constant(undefined, zone())));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsHeapConstant(undefined));
- }
-}
-
-
-// -----------------------------------------------------------------------------
// JSToBoolean
@@ -219,60 +97,6 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithBoolean) {
}
-TEST_F(JSTypedLoweringTest, JSToBooleanWithFalsish) {
- Node* input = Parameter(
- Type::Union(
- Type::MinusZero(),
- Type::Union(
- Type::NaN(),
- Type::Union(
- Type::Null(),
- Type::Union(
- Type::Undefined(),
- Type::Union(
- Type::Undetectable(),
- Type::Union(
- Type::Constant(factory()->false_value(), zone()),
- Type::Range(0.0, 0.0, zone()), zone()),
- zone()),
- zone()),
- zone()),
- zone()),
- zone()),
- 0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFalseConstant());
-}
-
-
-TEST_F(JSTypedLoweringTest, JSToBooleanWithTruish) {
- Node* input = Parameter(
- Type::Union(
- Type::Constant(factory()->true_value(), zone()),
- Type::Union(Type::DetectableReceiver(), Type::Symbol(), zone()),
- zone()),
- 0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsTrueConstant());
-}
-
-
-TEST_F(JSTypedLoweringTest, JSToBooleanWithNonZeroPlainNumber) {
- Node* input = Parameter(Type::Range(1, V8_INFINITY, zone()), 0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsTrueConstant());
-}
-
-
TEST_F(JSTypedLoweringTest, JSToBooleanWithOrderedNumber) {
Node* input = Parameter(Type::OrderedNumber(), 0);
Node* context = Parameter(Type::Any(), 1);
@@ -289,24 +113,9 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithNumber) {
Reduction r = Reduce(graph()->NewNode(
javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsNumberLessThan(IsNumberConstant(0.0), IsNumberAbs(input)));
+ EXPECT_THAT(r.replacement(), IsNumberToBoolean(input));
}
-TEST_F(JSTypedLoweringTest, JSToBooleanWithString) {
- Node* input = Parameter(Type::String(), 0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(
- r.replacement(),
- IsNumberLessThan(IsNumberConstant(0.0),
- IsLoadField(AccessBuilder::ForStringLength(), input,
- graph()->start(), graph()->start())));
-}
-
-
TEST_F(JSTypedLoweringTest, JSToBooleanWithAny) {
Node* input = Parameter(Type::Any(), 0);
Node* context = Parameter(Type::Any(), 1);
@@ -1022,68 +831,6 @@ TEST_F(JSTypedLoweringTest, JSSubtractSmis) {
}
// -----------------------------------------------------------------------------
-// JSInstanceOf
-// Test that instanceOf is reduced if and only if the right-hand side is a
-// function constant. Functional correctness is ensured elsewhere.
-
-TEST_F(JSTypedLoweringTest, JSInstanceOfSpecializationWithoutSmiCheck) {
- Node* const context = Parameter(Type::Any());
- Node* const frame_state = EmptyFrameState();
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
-
- // Reduce if left-hand side is known to be an object.
- Node* instanceOf =
- graph()->NewNode(javascript()->InstanceOf(), Parameter(Type::Object(), 0),
- HeapConstant(isolate()->object_function()), context,
- frame_state, effect, control);
- Node* dummy = graph()->NewNode(javascript()->ToObject(), instanceOf, context,
- frame_state, effect, control);
- Reduction r = Reduce(instanceOf);
- ASSERT_TRUE(r.Changed());
- ASSERT_EQ(r.replacement(), dummy->InputAt(0));
- ASSERT_NE(instanceOf, dummy->InputAt(0));
-}
-
-
-TEST_F(JSTypedLoweringTest, JSInstanceOfSpecializationWithSmiCheck) {
- Node* const context = Parameter(Type::Any());
- Node* const frame_state = EmptyFrameState();
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
-
- // Reduce if left-hand side could be a Smi.
- Node* instanceOf =
- graph()->NewNode(javascript()->InstanceOf(), Parameter(Type::Any(), 0),
- HeapConstant(isolate()->object_function()), context,
- frame_state, effect, control);
- Node* dummy = graph()->NewNode(javascript()->ToObject(), instanceOf, context,
- frame_state, effect, control);
- Reduction r = Reduce(instanceOf);
- ASSERT_TRUE(r.Changed());
- ASSERT_EQ(r.replacement(), dummy->InputAt(0));
- ASSERT_NE(instanceOf, dummy->InputAt(0));
-}
-
-
-TEST_F(JSTypedLoweringTest, JSInstanceOfNoSpecialization) {
- Node* const context = Parameter(Type::Any());
- Node* const frame_state = EmptyFrameState();
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
-
- // Do not reduce if right-hand side is not a function constant.
- Node* instanceOf = graph()->NewNode(
- javascript()->InstanceOf(), Parameter(Type::Any(), 0),
- Parameter(Type::Any()), context, frame_state, effect, control);
- Node* dummy = graph()->NewNode(javascript()->ToObject(), instanceOf, context,
- frame_state, effect, control);
- Reduction r = Reduce(instanceOf);
- ASSERT_FALSE(r.Changed());
- ASSERT_EQ(instanceOf, dummy->InputAt(0));
-}
-
-// -----------------------------------------------------------------------------
// JSBitwiseAnd
TEST_F(JSTypedLoweringTest, JSBitwiseAndWithSignedSmallHint) {
diff --git a/deps/v8/test/unittests/compiler/load-elimination-unittest.cc b/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
index ada99b5a7f..81393941bb 100644
--- a/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
@@ -213,6 +213,269 @@ TEST_F(LoadEliminationTest, StoreFieldAndStoreElementAndLoadField) {
EXPECT_EQ(value, r.replacement());
}
+TEST_F(LoadEliminationTest, LoadElementOnTrueBranchOfDiamond) {
+ Node* object = Parameter(Type::Any(), 0);
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+ Node* check = Parameter(Type::Boolean(), 2);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ ElementAccess const access = {kTaggedBase, kPointerSize, Type::Any(),
+ MachineType::AnyTagged(), kNoWriteBarrier};
+
+ StrictMock<MockAdvancedReducerEditor> editor;
+ LoadElimination load_elimination(&editor, jsgraph(), zone());
+
+ load_elimination.Reduce(graph()->start());
+
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = graph()->NewNode(simplified()->LoadElement(access), object,
+ index, effect, if_true);
+ load_elimination.Reduce(etrue);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ load_elimination.Reduce(effect);
+
+ Node* load = graph()->NewNode(simplified()->LoadElement(access), object,
+ index, effect, control);
+ Reduction r = load_elimination.Reduce(load);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(load, r.replacement());
+}
+
+TEST_F(LoadEliminationTest, LoadElementOnFalseBranchOfDiamond) {
+ Node* object = Parameter(Type::Any(), 0);
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+ Node* check = Parameter(Type::Boolean(), 2);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ ElementAccess const access = {kTaggedBase, kPointerSize, Type::Any(),
+ MachineType::AnyTagged(), kNoWriteBarrier};
+
+ StrictMock<MockAdvancedReducerEditor> editor;
+ LoadElimination load_elimination(&editor, jsgraph(), zone());
+
+ load_elimination.Reduce(graph()->start());
+
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = graph()->NewNode(simplified()->LoadElement(access), object,
+ index, effect, if_false);
+ load_elimination.Reduce(efalse);
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ load_elimination.Reduce(effect);
+
+ Node* load = graph()->NewNode(simplified()->LoadElement(access), object,
+ index, effect, control);
+ Reduction r = load_elimination.Reduce(load);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(load, r.replacement());
+}
+
+TEST_F(LoadEliminationTest, LoadFieldOnFalseBranchOfDiamond) {
+ Node* object = Parameter(Type::Any(), 0);
+ Node* check = Parameter(Type::Boolean(), 1);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ FieldAccess const access = {kTaggedBase,
+ kPointerSize,
+ MaybeHandle<Name>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kNoWriteBarrier};
+
+ StrictMock<MockAdvancedReducerEditor> editor;
+ LoadElimination load_elimination(&editor, jsgraph(), zone());
+
+ load_elimination.Reduce(graph()->start());
+
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = graph()->NewNode(simplified()->LoadField(access), object,
+ effect, if_false);
+ load_elimination.Reduce(efalse);
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ load_elimination.Reduce(effect);
+
+ Node* load = graph()->NewNode(simplified()->LoadField(access), object, effect,
+ control);
+ Reduction r = load_elimination.Reduce(load);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(load, r.replacement());
+}
+
+TEST_F(LoadEliminationTest, LoadFieldOnTrueBranchOfDiamond) {
+ Node* object = Parameter(Type::Any(), 0);
+ Node* check = Parameter(Type::Boolean(), 1);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ FieldAccess const access = {kTaggedBase,
+ kPointerSize,
+ MaybeHandle<Name>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kNoWriteBarrier};
+
+ StrictMock<MockAdvancedReducerEditor> editor;
+ LoadElimination load_elimination(&editor, jsgraph(), zone());
+
+ load_elimination.Reduce(graph()->start());
+
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = graph()->NewNode(simplified()->LoadField(access), object,
+ effect, if_true);
+ load_elimination.Reduce(etrue);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ load_elimination.Reduce(effect);
+
+ Node* load = graph()->NewNode(simplified()->LoadField(access), object, effect,
+ control);
+ Reduction r = load_elimination.Reduce(load);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(load, r.replacement());
+}
+
+TEST_F(LoadEliminationTest, LoadFieldWithTypeMismatch) {
+ Node* object = Parameter(Type::Any(), 0);
+ Node* value = Parameter(Type::Signed32(), 1);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ FieldAccess const access = {kTaggedBase,
+ kPointerSize,
+ MaybeHandle<Name>(),
+ Type::Unsigned31(),
+ MachineType::AnyTagged(),
+ kNoWriteBarrier};
+
+ StrictMock<MockAdvancedReducerEditor> editor;
+ LoadElimination load_elimination(&editor, jsgraph(), zone());
+
+ load_elimination.Reduce(graph()->start());
+
+ Node* store = effect = graph()->NewNode(simplified()->StoreField(access),
+ object, value, effect, control);
+ load_elimination.Reduce(effect);
+
+ Node* load = graph()->NewNode(simplified()->LoadField(access), object, effect,
+ control);
+ EXPECT_CALL(editor,
+ ReplaceWithValue(load, IsTypeGuard(value, control), store, _));
+ Reduction r = load_elimination.Reduce(load);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsTypeGuard(value, control));
+}
+
+TEST_F(LoadEliminationTest, LoadElementWithTypeMismatch) {
+ Node* object = Parameter(Type::Any(), 0);
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+ Node* value = Parameter(Type::Signed32(), 2);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ ElementAccess const access = {kTaggedBase, kPointerSize, Type::Unsigned31(),
+ MachineType::AnyTagged(), kNoWriteBarrier};
+
+ StrictMock<MockAdvancedReducerEditor> editor;
+ LoadElimination load_elimination(&editor, jsgraph(), zone());
+
+ load_elimination.Reduce(graph()->start());
+
+ Node* store = effect =
+ graph()->NewNode(simplified()->StoreElement(access), object, index, value,
+ effect, control);
+ load_elimination.Reduce(effect);
+
+ Node* load = graph()->NewNode(simplified()->LoadElement(access), object,
+ index, effect, control);
+ EXPECT_CALL(editor,
+ ReplaceWithValue(load, IsTypeGuard(value, control), store, _));
+ Reduction r = load_elimination.Reduce(load);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsTypeGuard(value, control));
+}
+
+TEST_F(LoadEliminationTest, AliasAnalysisForFinishRegion) {
+ Node* value0 = Parameter(Type::Signed32(), 0);
+ Node* value1 = Parameter(Type::Signed32(), 1);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ FieldAccess const access = {kTaggedBase,
+ kPointerSize,
+ MaybeHandle<Name>(),
+ Type::Signed32(),
+ MachineType::AnyTagged(),
+ kNoWriteBarrier};
+
+ StrictMock<MockAdvancedReducerEditor> editor;
+ LoadElimination load_elimination(&editor, jsgraph(), zone());
+
+ load_elimination.Reduce(effect);
+
+ effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable), effect);
+ load_elimination.Reduce(effect);
+
+ Node* object0 = effect =
+ graph()->NewNode(simplified()->Allocate(NOT_TENURED),
+ jsgraph()->Constant(16), effect, control);
+ load_elimination.Reduce(effect);
+
+ Node* region0 = effect =
+ graph()->NewNode(common()->FinishRegion(), object0, effect);
+ load_elimination.Reduce(effect);
+
+ effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable), effect);
+ load_elimination.Reduce(effect);
+
+ Node* object1 = effect =
+ graph()->NewNode(simplified()->Allocate(NOT_TENURED),
+ jsgraph()->Constant(16), effect, control);
+ load_elimination.Reduce(effect);
+
+ Node* region1 = effect =
+ graph()->NewNode(common()->FinishRegion(), object1, effect);
+ load_elimination.Reduce(effect);
+
+ effect = graph()->NewNode(simplified()->StoreField(access), region0, value0,
+ effect, control);
+ load_elimination.Reduce(effect);
+
+ effect = graph()->NewNode(simplified()->StoreField(access), region1, value1,
+ effect, control);
+ load_elimination.Reduce(effect);
+
+ Node* load = graph()->NewNode(simplified()->LoadField(access), region0,
+ effect, control);
+ EXPECT_CALL(editor, ReplaceWithValue(load, value0, effect, _));
+ Reduction r = load_elimination.Reduce(load);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(value0, r.replacement());
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
index ed426be5d8..1d29d9733f 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -23,10 +23,10 @@ namespace v8 {
namespace internal {
namespace compiler {
-class MachineOperatorReducerTest : public TypedGraphTest {
+class MachineOperatorReducerTest : public GraphTest {
public:
explicit MachineOperatorReducerTest(int num_parameters = 2)
- : TypedGraphTest(num_parameters), machine_(zone()) {}
+ : GraphTest(num_parameters), machine_(zone()) {}
protected:
Reduction Reduce(Node* node) {
@@ -729,25 +729,52 @@ TEST_F(MachineOperatorReducerTest, ReduceToWord32RorWithParameters) {
EXPECT_EQ(reduction2.replacement(), node2);
EXPECT_THAT(reduction2.replacement(), IsWord32Ror(value, sub));
- // Testing rotate right.
- Node* shl_r = graph()->NewNode(machine()->Word32Shl(), value, sub);
- Node* shr_r = graph()->NewNode(machine()->Word32Shr(), value, shift);
-
- // (x << (32 - y)) | (x >>> y) => x ror y
- Node* node3 = graph()->NewNode(machine()->Word32Or(), shl_r, shr_r);
+ // (x << y) ^ (x >>> (32 - y)) => x ror (32 - y)
+ Node* node3 = graph()->NewNode(machine()->Word32Xor(), shl_l, shr_l);
Reduction reduction3 = Reduce(node3);
EXPECT_TRUE(reduction3.Changed());
EXPECT_EQ(reduction3.replacement(), node3);
- EXPECT_THAT(reduction3.replacement(), IsWord32Ror(value, shift));
+ EXPECT_THAT(reduction3.replacement(), IsWord32Ror(value, sub));
- // (x >>> y) | (x << (32 - y)) => x ror y
- Node* node4 = graph()->NewNode(machine()->Word32Or(), shr_r, shl_r);
+ // (x >>> (32 - y)) ^ (x << y) => x ror (32 - y)
+ Node* node4 = graph()->NewNode(machine()->Word32Xor(), shr_l, shl_l);
Reduction reduction4 = Reduce(node4);
EXPECT_TRUE(reduction4.Changed());
EXPECT_EQ(reduction4.replacement(), node4);
- EXPECT_THAT(reduction4.replacement(), IsWord32Ror(value, shift));
-}
+ EXPECT_THAT(reduction4.replacement(), IsWord32Ror(value, sub));
+
+ // Testing rotate right.
+ Node* shl_r = graph()->NewNode(machine()->Word32Shl(), value, sub);
+ Node* shr_r = graph()->NewNode(machine()->Word32Shr(), value, shift);
+
+ // (x << (32 - y)) | (x >>> y) => x ror y
+ Node* node5 = graph()->NewNode(machine()->Word32Or(), shl_r, shr_r);
+ Reduction reduction5 = Reduce(node5);
+ EXPECT_TRUE(reduction5.Changed());
+ EXPECT_EQ(reduction5.replacement(), node5);
+ EXPECT_THAT(reduction5.replacement(), IsWord32Ror(value, shift));
+ // (x >>> y) | (x << (32 - y)) => x ror y
+ Node* node6 = graph()->NewNode(machine()->Word32Or(), shr_r, shl_r);
+ Reduction reduction6 = Reduce(node6);
+ EXPECT_TRUE(reduction6.Changed());
+ EXPECT_EQ(reduction6.replacement(), node6);
+ EXPECT_THAT(reduction6.replacement(), IsWord32Ror(value, shift));
+
+ // (x << (32 - y)) ^ (x >>> y) => x ror y
+ Node* node7 = graph()->NewNode(machine()->Word32Xor(), shl_r, shr_r);
+ Reduction reduction7 = Reduce(node7);
+ EXPECT_TRUE(reduction7.Changed());
+ EXPECT_EQ(reduction7.replacement(), node7);
+ EXPECT_THAT(reduction7.replacement(), IsWord32Ror(value, shift));
+
+ // (x >>> y) ^ (x << (32 - y)) => x ror y
+ Node* node8 = graph()->NewNode(machine()->Word32Xor(), shr_r, shl_r);
+ Reduction reduction8 = Reduce(node8);
+ EXPECT_TRUE(reduction8.Changed());
+ EXPECT_EQ(reduction8.replacement(), node8);
+ EXPECT_THAT(reduction8.replacement(), IsWord32Ror(value, shift));
+}
TEST_F(MachineOperatorReducerTest, ReduceToWord32RorWithConstant) {
Node* value = Parameter(0);
@@ -1587,6 +1614,48 @@ TEST_F(MachineOperatorReducerTest, Float32SubMinusZeroMinusX) {
}
}
+TEST_F(MachineOperatorReducerTest, Float64MulWithTwo) {
+ Node* const p0 = Parameter(0);
+ {
+ Reduction r = Reduce(
+ graph()->NewNode(machine()->Float64Mul(), Float64Constant(2.0), p0));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Add(p0, p0));
+ }
+ {
+ Reduction r = Reduce(
+ graph()->NewNode(machine()->Float64Mul(), p0, Float64Constant(2.0)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Add(p0, p0));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Div
+
+TEST_F(MachineOperatorReducerTest, Float64DivWithMinusOne) {
+ Node* const p0 = Parameter(0);
+ {
+ Reduction r = Reduce(
+ graph()->NewNode(machine()->Float64Div(), p0, Float64Constant(-1.0)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Neg(p0));
+ }
+}
+
+TEST_F(MachineOperatorReducerTest, Float64DivWithPowerOfTwo) {
+ Node* const p0 = Parameter(0);
+ TRACED_FORRANGE(uint64_t, exponent, 1, 0x7fe) {
+ Double divisor = Double(exponent << Double::kPhysicalSignificandSize);
+ if (divisor.value() == 1.0) continue; // Skip x / 1.0 => x.
+ Reduction r = Reduce(graph()->NewNode(machine()->Float64Div(), p0,
+ Float64Constant(divisor.value())));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFloat64Mul(p0, IsFloat64Constant(1.0 / divisor.value())));
+ }
+}
+
// -----------------------------------------------------------------------------
// Float64Acos
@@ -1772,6 +1841,37 @@ TEST_F(MachineOperatorReducerTest, Float64Log1pWithConstant) {
}
// -----------------------------------------------------------------------------
+// Float64Pow
+
+TEST_F(MachineOperatorReducerTest, Float64PowWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ TRACED_FOREACH(double, y, kFloat64Values) {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Float64Pow(), Float64Constant(x), Float64Constant(y)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(Pow(x, y))));
+ }
+ }
+}
+
+TEST_F(MachineOperatorReducerTest, Float64PowWithZeroExponent) {
+ Node* const p0 = Parameter(0);
+ {
+ Reduction const r = Reduce(
+ graph()->NewNode(machine()->Float64Pow(), p0, Float64Constant(-0.0)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Constant(1.0));
+ }
+ {
+ Reduction const r = Reduce(
+ graph()->NewNode(machine()->Float64Pow(), p0, Float64Constant(0.0)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Constant(1.0));
+ }
+}
+
+// -----------------------------------------------------------------------------
// Float64Sin
TEST_F(MachineOperatorReducerTest, Float64SinWithConstant) {
diff --git a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
index 7b5c667261..dc14b85361 100644
--- a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
@@ -408,6 +408,36 @@ TEST_F(InstructionSelectorTest, Word32ShlWithWord32And) {
}
}
+TEST_F(InstructionSelectorTest, Word32SarWithWord32Shl) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r =
+ m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(24)), m.Int32Constant(24));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsSeb, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r =
+ m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(16)), m.Int32Constant(16));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsSeh, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+}
// ----------------------------------------------------------------------------
// Logical instructions.
@@ -1149,6 +1179,219 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
+TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
+ if (!IsMipsArchVariant(kMips32r2) && !IsMipsArchVariant(kMips32r6)) {
+ return;
+ }
+ {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* const n = m.Float32Add(m.Float32Mul(p0, p1), p2);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ if (IsMipsArchVariant(kMips32r2)) {
+ EXPECT_EQ(kMipsMaddS, s[0]->arch_opcode());
+ } else if (IsMipsArchVariant(kMips32r6)) {
+ EXPECT_EQ(kMipsMaddfS, s[0]->arch_opcode());
+ }
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ if (IsMipsArchVariant(kMips32r2)) {
+ EXPECT_FALSE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ } else if (IsMipsArchVariant(kMips32r6)) {
+ EXPECT_TRUE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ }
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* const n = m.Float32Add(p0, m.Float32Mul(p1, p2));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ if (IsMipsArchVariant(kMips32r2)) {
+ EXPECT_EQ(kMipsMaddS, s[0]->arch_opcode());
+ } else if (IsMipsArchVariant(kMips32r6)) {
+ EXPECT_EQ(kMipsMaddfS, s[0]->arch_opcode());
+ }
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ if (IsMipsArchVariant(kMips32r2)) {
+ EXPECT_FALSE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ } else if (IsMipsArchVariant(kMips32r6)) {
+ EXPECT_TRUE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ }
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+}
+
+TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
+ if (!IsMipsArchVariant(kMips32r2) && !IsMipsArchVariant(kMips32r6)) {
+ return;
+ }
+ {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* const n = m.Float64Add(m.Float64Mul(p0, p1), p2);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ if (IsMipsArchVariant(kMips32r2)) {
+ EXPECT_EQ(kMipsMaddD, s[0]->arch_opcode());
+ } else if (IsMipsArchVariant(kMips32r6)) {
+ EXPECT_EQ(kMipsMaddfD, s[0]->arch_opcode());
+ }
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ if (IsMipsArchVariant(kMips32r2)) {
+ EXPECT_FALSE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ } else if (IsMipsArchVariant(kMips32r6)) {
+ EXPECT_TRUE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ }
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* const n = m.Float64Add(p0, m.Float64Mul(p1, p2));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ if (IsMipsArchVariant(kMips32r2)) {
+ EXPECT_EQ(kMipsMaddD, s[0]->arch_opcode());
+ } else if (IsMipsArchVariant(kMips32r6)) {
+ EXPECT_EQ(kMipsMaddfD, s[0]->arch_opcode());
+ }
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ if (IsMipsArchVariant(kMips32r2)) {
+ EXPECT_FALSE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ } else if (IsMipsArchVariant(kMips32r6)) {
+ EXPECT_TRUE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ }
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+}
+
+TEST_F(InstructionSelectorTest, Float32SubWithFloat32Mul) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* n = nullptr;
+
+ if (!IsMipsArchVariant(kMips32r2) && !IsMipsArchVariant(kMips32r6)) {
+ return;
+ }
+
+ if (IsMipsArchVariant(kMips32r2)) {
+ n = m.Float32Sub(m.Float32Mul(p1, p2), p0);
+ } else if (IsMipsArchVariant(kMips32r6)) {
+ n = m.Float32Sub(p0, m.Float32Mul(p1, p2));
+ }
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ if (IsMipsArchVariant(kMips32r2)) {
+ EXPECT_EQ(kMipsMsubS, s[0]->arch_opcode());
+ } else if (IsMipsArchVariant(kMips32r6)) {
+ EXPECT_EQ(kMipsMsubfS, s[0]->arch_opcode());
+ }
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ if (IsMipsArchVariant(kMips32r2)) {
+ EXPECT_FALSE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ } else if (IsMipsArchVariant(kMips32r6)) {
+ EXPECT_TRUE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ }
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+}
+
+TEST_F(InstructionSelectorTest, Float64SubWithFloat64Mul) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* n = nullptr;
+
+ if (!IsMipsArchVariant(kMips32r2) && !IsMipsArchVariant(kMips32r6)) {
+ return;
+ }
+
+ if (IsMipsArchVariant(kMips32r2)) {
+ n = m.Float64Sub(m.Float64Mul(p1, p2), p0);
+ } else if (IsMipsArchVariant(kMips32r6)) {
+ n = m.Float64Sub(p0, m.Float64Mul(p1, p2));
+ }
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ if (IsMipsArchVariant(kMips32r2)) {
+ EXPECT_EQ(kMipsMsubD, s[0]->arch_opcode());
+ } else if (IsMipsArchVariant(kMips32r6)) {
+ EXPECT_EQ(kMipsMsubfD, s[0]->arch_opcode());
+ }
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ if (IsMipsArchVariant(kMips32r2)) {
+ EXPECT_FALSE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ } else if (IsMipsArchVariant(kMips32r6)) {
+ EXPECT_TRUE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ }
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+}
TEST_F(InstructionSelectorTest, Float64Max) {
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
diff --git a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
index c82cb9fe4f..be77126688 100644
--- a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
@@ -719,6 +719,51 @@ TEST_F(InstructionSelectorTest, Word64ShlWithWord64And) {
}
}
+TEST_F(InstructionSelectorTest, Word32SarWithWord32Shl) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r =
+ m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(24)), m.Int32Constant(24));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Seb, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r =
+ m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(16)), m.Int32Constant(16));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Seh, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r =
+ m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(32)), m.Int32Constant(32));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Shl, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+}
// ----------------------------------------------------------------------------
// MUL/DIV instructions.
@@ -1491,6 +1536,203 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
+TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
+ {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* const n = m.Float32Add(m.Float32Mul(p0, p1), p2);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ if (kArchVariant == kMips64r2) {
+ EXPECT_EQ(kMips64MaddS, s[0]->arch_opcode());
+ } else if (kArchVariant == kMips64r6) {
+ EXPECT_EQ(kMips64MaddfS, s[0]->arch_opcode());
+ }
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ if (kArchVariant == kMips64r2) {
+ EXPECT_FALSE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ } else if (kArchVariant == kMips64r6) {
+ EXPECT_TRUE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ }
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* const n = m.Float32Add(p0, m.Float32Mul(p1, p2));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ if (kArchVariant == kMips64r2) {
+ EXPECT_EQ(kMips64MaddS, s[0]->arch_opcode());
+ } else if (kArchVariant == kMips64r6) {
+ EXPECT_EQ(kMips64MaddfS, s[0]->arch_opcode());
+ }
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ if (kArchVariant == kMips64r2) {
+ EXPECT_FALSE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ } else if (kArchVariant == kMips64r6) {
+ EXPECT_TRUE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ }
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+}
+
+TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
+ {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* const n = m.Float64Add(m.Float64Mul(p0, p1), p2);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ if (kArchVariant == kMips64r2) {
+ EXPECT_EQ(kMips64MaddD, s[0]->arch_opcode());
+ } else if (kArchVariant == kMips64r6) {
+ EXPECT_EQ(kMips64MaddfD, s[0]->arch_opcode());
+ }
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ if (kArchVariant == kMips64r2) {
+ EXPECT_FALSE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ } else if (kArchVariant == kMips64r6) {
+ EXPECT_TRUE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ }
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* const n = m.Float64Add(p0, m.Float64Mul(p1, p2));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ if (kArchVariant == kMips64r2) {
+ EXPECT_EQ(kMips64MaddD, s[0]->arch_opcode());
+ } else if (kArchVariant == kMips64r6) {
+ EXPECT_EQ(kMips64MaddfD, s[0]->arch_opcode());
+ }
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ if (kArchVariant == kMips64r2) {
+ EXPECT_FALSE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ } else if (kArchVariant == kMips64r6) {
+ EXPECT_TRUE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ }
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+}
+
+TEST_F(InstructionSelectorTest, Float32SubWithFloat32Mul) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* n;
+ if (kArchVariant == kMips64r2) {
+ n = m.Float32Sub(m.Float32Mul(p1, p2), p0);
+ } else if (kArchVariant == kMips64r6) {
+ n = m.Float32Sub(p0, m.Float32Mul(p1, p2));
+ }
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ if (kArchVariant == kMips64r2) {
+ EXPECT_EQ(kMips64MsubS, s[0]->arch_opcode());
+ } else if (kArchVariant == kMips64r6) {
+ EXPECT_EQ(kMips64MsubfS, s[0]->arch_opcode());
+ }
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ if (kArchVariant == kMips64r2) {
+ EXPECT_FALSE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ } else if (kArchVariant == kMips64r6) {
+ EXPECT_TRUE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ }
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+}
+
+TEST_F(InstructionSelectorTest, Float64SubWithFloat64Mul) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* n;
+ if (kArchVariant == kMips64r2) {
+ n = m.Float64Sub(m.Float64Mul(p1, p2), p0);
+ } else if (kArchVariant == kMips64r6) {
+ n = m.Float64Sub(p0, m.Float64Mul(p1, p2));
+ }
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ if (kArchVariant == kMips64r2) {
+ EXPECT_EQ(kMips64MsubD, s[0]->arch_opcode());
+ } else if (kArchVariant == kMips64r6) {
+ EXPECT_EQ(kMips64MsubfD, s[0]->arch_opcode());
+ }
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ if (kArchVariant == kMips64r2) {
+ EXPECT_FALSE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ } else if (kArchVariant == kMips64r6) {
+ EXPECT_TRUE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ }
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+}
TEST_F(InstructionSelectorTest, Float64Max) {
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index 5620b8bec1..3a5b2c3aeb 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -406,6 +406,35 @@ class IsTerminateMatcher final : public NodeMatcher {
const Matcher<Node*> control_matcher_;
};
+class IsTypeGuardMatcher final : public NodeMatcher {
+ public:
+ IsTypeGuardMatcher(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kTypeGuard),
+ value_matcher_(value_matcher),
+ control_matcher_(control_matcher) {}
+
+ void DescribeTo(std::ostream* os) const final {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose value (";
+ value_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+ "value", value_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Node*> value_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
template <typename T>
class IsConstantMatcher final : public NodeMatcher {
@@ -1714,6 +1743,10 @@ Matcher<Node*> IsTerminate(const Matcher<Node*>& effect_matcher,
return MakeMatcher(new IsTerminateMatcher(effect_matcher, control_matcher));
}
+Matcher<Node*> IsTypeGuard(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsTypeGuardMatcher(value_matcher, control_matcher));
+}
Matcher<Node*> IsExternalConstant(
const Matcher<ExternalReference>& value_matcher) {
@@ -2274,7 +2307,9 @@ IS_BINOP_MATCHER(Float32LessThan)
IS_BINOP_MATCHER(Float32LessThanOrEqual)
IS_BINOP_MATCHER(Float64Max)
IS_BINOP_MATCHER(Float64Min)
+IS_BINOP_MATCHER(Float64Add)
IS_BINOP_MATCHER(Float64Sub)
+IS_BINOP_MATCHER(Float64Mul)
IS_BINOP_MATCHER(Float64InsertLowWord32)
IS_BINOP_MATCHER(Float64InsertHighWord32)
#undef IS_BINOP_MATCHER
@@ -2285,6 +2320,9 @@ IS_BINOP_MATCHER(Float64InsertHighWord32)
return MakeMatcher(new IsUnopMatcher(IrOpcode::k##Name, input_matcher)); \
}
IS_UNOP_MATCHER(BooleanNot)
+IS_UNOP_MATCHER(BitcastTaggedToWord)
+IS_UNOP_MATCHER(BitcastWordToTagged)
+IS_UNOP_MATCHER(BitcastWordToTaggedSigned)
IS_UNOP_MATCHER(TruncateFloat64ToWord32)
IS_UNOP_MATCHER(ChangeFloat64ToInt32)
IS_UNOP_MATCHER(ChangeFloat64ToUint32)
@@ -2332,6 +2370,7 @@ IS_UNOP_MATCHER(NumberSqrt)
IS_UNOP_MATCHER(NumberTan)
IS_UNOP_MATCHER(NumberTanh)
IS_UNOP_MATCHER(NumberTrunc)
+IS_UNOP_MATCHER(NumberToBoolean)
IS_UNOP_MATCHER(NumberToInt32)
IS_UNOP_MATCHER(NumberToUint32)
IS_UNOP_MATCHER(PlainPrimitiveToNumber)
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index 2a24803380..3afe2adf14 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -83,6 +83,8 @@ Matcher<Node*> IsReturn2(const Matcher<Node*>& value_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsTerminate(const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsTypeGuard(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& control_matcher);
Matcher<Node*> IsExternalConstant(
const Matcher<ExternalReference>& value_matcher);
Matcher<Node*> IsHeapConstant(Handle<HeapObject> value);
@@ -384,6 +386,9 @@ Matcher<Node*> IsInt64Sub(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsJSAdd(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsBitcastTaggedToWord(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsBitcastWordToTagged(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsBitcastWordToTaggedSigned(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsTruncateFloat64ToWord32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeFloat64ToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeFloat64ToUint32(const Matcher<Node*>& input_matcher);
@@ -405,8 +410,12 @@ Matcher<Node*> IsFloat64Max(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat64Min(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsFloat64Add(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat64Sub(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsFloat64Mul(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat64Abs(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64Neg(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64Sqrt(const Matcher<Node*>& input_matcher);
@@ -425,6 +434,7 @@ Matcher<Node*> IsToNumber(const Matcher<Node*>& base_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsLoadContext(const Matcher<ContextAccess>& access_matcher,
const Matcher<Node*>& context_matcher);
+Matcher<Node*> IsNumberToBoolean(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsNumberToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsNumberToUint32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsParameter(const Matcher<int> index_matcher);
diff --git a/deps/v8/test/unittests/compiler/opcodes-unittest.cc b/deps/v8/test/unittests/compiler/opcodes-unittest.cc
index 3bb65c2e13..a0e67ecb27 100644
--- a/deps/v8/test/unittests/compiler/opcodes-unittest.cc
+++ b/deps/v8/test/unittests/compiler/opcodes-unittest.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/opcodes.h"
-#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/gtest-support.h"
namespace v8 {
namespace internal {
@@ -81,65 +81,60 @@ bool IsComparisonOpcode(IrOpcode::Value opcode) {
const IrOpcode::Value kInvalidOpcode = static_cast<IrOpcode::Value>(123456789);
-} // namespace
+char const* const kMnemonics[] = {
+#define OPCODE(Opcode) #Opcode,
+ ALL_OP_LIST(OPCODE)
+#undef OPCODE
+};
+
+const IrOpcode::Value kOpcodes[] = {
+#define OPCODE(Opcode) IrOpcode::k##Opcode,
+ ALL_OP_LIST(OPCODE)
+#undef OPCODE
+};
+} // namespace
TEST(IrOpcodeTest, IsCommonOpcode) {
EXPECT_FALSE(IrOpcode::IsCommonOpcode(kInvalidOpcode));
-#define OPCODE(Opcode) \
- EXPECT_EQ(IsCommonOpcode(IrOpcode::k##Opcode), \
- IrOpcode::IsCommonOpcode(IrOpcode::k##Opcode));
- ALL_OP_LIST(OPCODE)
-#undef OPCODE
+ TRACED_FOREACH(IrOpcode::Value, opcode, kOpcodes) {
+ EXPECT_EQ(IsCommonOpcode(opcode), IrOpcode::IsCommonOpcode(opcode));
+ }
}
-
TEST(IrOpcodeTest, IsControlOpcode) {
EXPECT_FALSE(IrOpcode::IsControlOpcode(kInvalidOpcode));
-#define OPCODE(Opcode) \
- EXPECT_EQ(IsControlOpcode(IrOpcode::k##Opcode), \
- IrOpcode::IsControlOpcode(IrOpcode::k##Opcode));
- ALL_OP_LIST(OPCODE)
-#undef OPCODE
+ TRACED_FOREACH(IrOpcode::Value, opcode, kOpcodes) {
+ EXPECT_EQ(IsControlOpcode(opcode), IrOpcode::IsControlOpcode(opcode));
+ }
}
-
TEST(IrOpcodeTest, IsJsOpcode) {
EXPECT_FALSE(IrOpcode::IsJsOpcode(kInvalidOpcode));
-#define OPCODE(Opcode) \
- EXPECT_EQ(IsJsOpcode(IrOpcode::k##Opcode), \
- IrOpcode::IsJsOpcode(IrOpcode::k##Opcode));
- ALL_OP_LIST(OPCODE)
-#undef OPCODE
+ TRACED_FOREACH(IrOpcode::Value, opcode, kOpcodes) {
+ EXPECT_EQ(IsJsOpcode(opcode), IrOpcode::IsJsOpcode(opcode));
+ }
}
-
TEST(IrOpcodeTest, IsConstantOpcode) {
EXPECT_FALSE(IrOpcode::IsConstantOpcode(kInvalidOpcode));
-#define OPCODE(Opcode) \
- EXPECT_EQ(IsConstantOpcode(IrOpcode::k##Opcode), \
- IrOpcode::IsConstantOpcode(IrOpcode::k##Opcode));
- ALL_OP_LIST(OPCODE)
-#undef OPCODE
+ TRACED_FOREACH(IrOpcode::Value, opcode, kOpcodes) {
+ EXPECT_EQ(IsConstantOpcode(opcode), IrOpcode::IsConstantOpcode(opcode));
+ }
}
-
TEST(IrOpcodeTest, IsComparisonOpcode) {
EXPECT_FALSE(IrOpcode::IsComparisonOpcode(kInvalidOpcode));
-#define OPCODE(Opcode) \
- EXPECT_EQ(IsComparisonOpcode(IrOpcode::k##Opcode), \
- IrOpcode::IsComparisonOpcode(IrOpcode::k##Opcode));
- ALL_OP_LIST(OPCODE)
-#undef OPCODE
+ TRACED_FOREACH(IrOpcode::Value, opcode, kOpcodes) {
+ EXPECT_EQ(IsComparisonOpcode(opcode), IrOpcode::IsComparisonOpcode(opcode));
+ }
}
-
TEST(IrOpcodeTest, Mnemonic) {
EXPECT_STREQ("UnknownOpcode", IrOpcode::Mnemonic(kInvalidOpcode));
-#define OPCODE(Opcode) \
- EXPECT_STREQ(#Opcode, IrOpcode::Mnemonic(IrOpcode::k##Opcode));
- ALL_OP_LIST(OPCODE)
-#undef OPCODE
+ TRACED_FOREACH(IrOpcode::Value, opcode, kOpcodes) {
+ EXPECT_STREQ(kMnemonics[opcode], IrOpcode::Mnemonic(opcode));
+ }
}
} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
index b21a148718..6f37609f3a 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/simplified-operator.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
-#include "src/compiler/simplified-operator.h"
#include "src/compiler/simplified-operator-reducer.h"
+#include "src/compiler/types.h"
#include "src/conversions-inl.h"
-#include "src/types.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
#include "testing/gmock-support.h"
@@ -97,10 +97,6 @@ const double kNaNs[] = {-std::numeric_limits<double>::quiet_NaN(),
bit_cast<double>(V8_UINT64_C(0x7FFFFFFFFFFFFFFF)),
bit_cast<double>(V8_UINT64_C(0xFFFFFFFFFFFFFFFF))};
-const CheckForMinusZeroMode kCheckForMinusZeroModes[] = {
- CheckForMinusZeroMode::kDontCheckForMinusZero,
- CheckForMinusZeroMode::kCheckForMinusZero};
-
} // namespace
@@ -191,13 +187,11 @@ TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToBitWithChangeBitToTagged) {
// ChangeFloat64ToTagged
TEST_F(SimplifiedOperatorReducerTest, ChangeFloat64ToTaggedWithConstant) {
- TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
- TRACED_FOREACH(double, n, kFloat64Values) {
- Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeFloat64ToTagged(mode), Float64Constant(n)));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsNumberConstant(BitEq(n)));
- }
+ TRACED_FOREACH(double, n, kFloat64Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeFloat64ToTagged(), Float64Constant(n)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsNumberConstant(BitEq(n)));
}
}
@@ -222,13 +216,11 @@ TEST_F(SimplifiedOperatorReducerTest, ChangeInt32ToTaggedWithConstant) {
TEST_F(SimplifiedOperatorReducerTest,
ChangeTaggedToFloat64WithChangeFloat64ToTagged) {
Node* param0 = Parameter(0);
- TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
- Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeTaggedToFloat64(),
- graph()->NewNode(simplified()->ChangeFloat64ToTagged(mode), param0)));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_EQ(param0, reduction.replacement());
- }
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToFloat64(),
+ graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(param0, reduction.replacement());
}
TEST_F(SimplifiedOperatorReducerTest,
@@ -279,13 +271,11 @@ TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithNaNConstant) {
TEST_F(SimplifiedOperatorReducerTest,
ChangeTaggedToInt32WithChangeFloat64ToTagged) {
Node* param0 = Parameter(0);
- TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
- Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeTaggedToInt32(),
- graph()->NewNode(simplified()->ChangeFloat64ToTagged(mode), param0)));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToInt32(param0));
- }
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToInt32(),
+ graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToInt32(param0));
}
TEST_F(SimplifiedOperatorReducerTest,
@@ -305,13 +295,11 @@ TEST_F(SimplifiedOperatorReducerTest,
TEST_F(SimplifiedOperatorReducerTest,
ChangeTaggedToUint32WithChangeFloat64ToTagged) {
Node* param0 = Parameter(0);
- TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
- Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeTaggedToUint32(),
- graph()->NewNode(simplified()->ChangeFloat64ToTagged(mode), param0)));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToUint32(param0));
- }
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToUint32(),
+ graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToUint32(param0));
}
TEST_F(SimplifiedOperatorReducerTest,
@@ -331,13 +319,11 @@ TEST_F(SimplifiedOperatorReducerTest,
TEST_F(SimplifiedOperatorReducerTest,
TruncateTaggedToWord3WithChangeFloat64ToTagged) {
Node* param0 = Parameter(0);
- TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
- Reduction reduction = Reduce(graph()->NewNode(
- simplified()->TruncateTaggedToWord32(),
- graph()->NewNode(simplified()->ChangeFloat64ToTagged(mode), param0)));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsTruncateFloat64ToWord32(param0));
- }
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->TruncateTaggedToWord32(),
+ graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsTruncateFloat64ToWord32(param0));
}
TEST_F(SimplifiedOperatorReducerTest, TruncateTaggedToWord32WithConstant) {
@@ -350,20 +336,20 @@ TEST_F(SimplifiedOperatorReducerTest, TruncateTaggedToWord32WithConstant) {
}
// -----------------------------------------------------------------------------
-// CheckTaggedPointer
+// CheckHeapObject
-TEST_F(SimplifiedOperatorReducerTest, CheckTaggedPointerWithChangeBitToTagged) {
+TEST_F(SimplifiedOperatorReducerTest, CheckHeapObjectWithChangeBitToTagged) {
Node* param0 = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* value = graph()->NewNode(simplified()->ChangeBitToTagged(), param0);
- Reduction reduction = Reduce(graph()->NewNode(
- simplified()->CheckTaggedPointer(), value, effect, control));
+ Reduction reduction = Reduce(graph()->NewNode(simplified()->CheckHeapObject(),
+ value, effect, control));
ASSERT_TRUE(reduction.Changed());
EXPECT_EQ(value, reduction.replacement());
}
-TEST_F(SimplifiedOperatorReducerTest, CheckTaggedPointerWithHeapConstant) {
+TEST_F(SimplifiedOperatorReducerTest, CheckHeapObjectWithHeapConstant) {
Node* effect = graph()->start();
Node* control = graph()->start();
Handle<HeapObject> kHeapObjects[] = {
@@ -372,34 +358,57 @@ TEST_F(SimplifiedOperatorReducerTest, CheckTaggedPointerWithHeapConstant) {
TRACED_FOREACH(Handle<HeapObject>, object, kHeapObjects) {
Node* value = HeapConstant(object);
Reduction reduction = Reduce(graph()->NewNode(
- simplified()->CheckTaggedPointer(), value, effect, control));
+ simplified()->CheckHeapObject(), value, effect, control));
ASSERT_TRUE(reduction.Changed());
EXPECT_EQ(value, reduction.replacement());
}
}
+TEST_F(SimplifiedOperatorReducerTest, CheckHeapObjectWithCheckHeapObject) {
+ Node* param0 = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* value = effect = graph()->NewNode(simplified()->CheckHeapObject(),
+ param0, effect, control);
+ Reduction reduction = Reduce(graph()->NewNode(simplified()->CheckHeapObject(),
+ value, effect, control));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+}
+
// -----------------------------------------------------------------------------
-// CheckTaggedSigned
+// CheckSmi
-TEST_F(SimplifiedOperatorReducerTest,
- CheckTaggedSignedWithChangeInt31ToTaggedSigned) {
+TEST_F(SimplifiedOperatorReducerTest, CheckSmiWithChangeInt31ToTaggedSigned) {
Node* param0 = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* value =
graph()->NewNode(simplified()->ChangeInt31ToTaggedSigned(), param0);
- Reduction reduction = Reduce(graph()->NewNode(
- simplified()->CheckTaggedSigned(), value, effect, control));
+ Reduction reduction = Reduce(
+ graph()->NewNode(simplified()->CheckSmi(), value, effect, control));
ASSERT_TRUE(reduction.Changed());
EXPECT_EQ(value, reduction.replacement());
}
-TEST_F(SimplifiedOperatorReducerTest, CheckTaggedSignedWithNumberConstant) {
+TEST_F(SimplifiedOperatorReducerTest, CheckSmiWithNumberConstant) {
Node* effect = graph()->start();
Node* control = graph()->start();
Node* value = NumberConstant(1.0);
- Reduction reduction = Reduce(graph()->NewNode(
- simplified()->CheckTaggedSigned(), value, effect, control));
+ Reduction reduction = Reduce(
+ graph()->NewNode(simplified()->CheckSmi(), value, effect, control));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+}
+
+TEST_F(SimplifiedOperatorReducerTest, CheckSmiWithCheckSmi) {
+ Node* param0 = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* value = effect =
+ graph()->NewNode(simplified()->CheckSmi(), param0, effect, control);
+ Reduction reduction = Reduce(
+ graph()->NewNode(simplified()->CheckSmi(), value, effect, control));
ASSERT_TRUE(reduction.Changed());
EXPECT_EQ(value, reduction.replacement());
}
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
index febd76a528..d32dcaec12 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/simplified-operator.h"
#include "src/compiler/opcodes.h"
-#include "src/compiler/operator.h"
#include "src/compiler/operator-properties.h"
-#include "src/compiler/simplified-operator.h"
-#include "src/types.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/types.h"
#include "test/unittests/test-utils.h"
namespace v8 {
@@ -63,6 +63,8 @@ const PureOperator kPureOperators[] = {
PURE(ChangeTaggedToBit, Operator::kNoProperties, 1),
PURE(ChangeBitToTagged, Operator::kNoProperties, 1),
PURE(TruncateTaggedToWord32, Operator::kNoProperties, 1),
+ PURE(TruncateTaggedToFloat64, Operator::kNoProperties, 1),
+ PURE(TruncateTaggedToBit, Operator::kNoProperties, 1),
PURE(ObjectIsNumber, Operator::kNoProperties, 1),
PURE(ObjectIsReceiver, Operator::kNoProperties, 1),
PURE(ObjectIsSmi, Operator::kNoProperties, 1)
diff --git a/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc b/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
new file mode 100644
index 0000000000..d73c72d4e0
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
@@ -0,0 +1,226 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/typed-optimization.h"
+#include "src/code-factory.h"
+#include "src/compilation-dependencies.h"
+#include "src/compiler/access-builder.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
+#include "src/isolate-inl.h"
+#include "test/unittests/compiler/compiler-test-utils.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::IsNaN;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+const double kFloat64Values[] = {
+ -V8_INFINITY, -4.23878e+275, -5.82632e+265, -6.60355e+220,
+ -6.26172e+212, -2.56222e+211, -4.82408e+201, -1.84106e+157,
+ -1.63662e+127, -1.55772e+100, -1.67813e+72, -2.3382e+55,
+ -3.179e+30, -1.441e+09, -1.0647e+09, -7.99361e+08,
+ -5.77375e+08, -2.20984e+08, -32757, -13171,
+ -9970, -3984, -107, -105,
+ -92, -77, -61, -0.000208163,
+ -1.86685e-06, -1.17296e-10, -9.26358e-11, -5.08004e-60,
+ -1.74753e-65, -1.06561e-71, -5.67879e-79, -5.78459e-130,
+ -2.90989e-171, -7.15489e-243, -3.76242e-252, -1.05639e-263,
+ -4.40497e-267, -2.19666e-273, -4.9998e-276, -5.59821e-278,
+ -2.03855e-282, -5.99335e-283, -7.17554e-284, -3.11744e-309,
+ -0.0, 0.0, 2.22507e-308, 1.30127e-270,
+ 7.62898e-260, 4.00313e-249, 3.16829e-233, 1.85244e-228,
+ 2.03544e-129, 1.35126e-110, 1.01182e-106, 5.26333e-94,
+ 1.35292e-90, 2.85394e-83, 1.78323e-77, 5.4967e-57,
+ 1.03207e-25, 4.57401e-25, 1.58738e-05, 2,
+ 125, 2310, 9636, 14802,
+ 17168, 28945, 29305, 4.81336e+07,
+ 1.41207e+08, 4.65962e+08, 1.40499e+09, 2.12648e+09,
+ 8.80006e+30, 1.4446e+45, 1.12164e+54, 2.48188e+89,
+ 6.71121e+102, 3.074e+112, 4.9699e+152, 5.58383e+166,
+ 4.30654e+172, 7.08824e+185, 9.6586e+214, 2.028e+223,
+ 6.63277e+243, 1.56192e+261, 1.23202e+269, 5.72883e+289,
+ 8.5798e+290, 1.40256e+294, 1.79769e+308, V8_INFINITY};
+
+const double kIntegerValues[] = {-V8_INFINITY, INT_MIN, -1000.0, -42.0,
+ -1.0, 0.0, 1.0, 42.0,
+ 1000.0, INT_MAX, UINT_MAX, V8_INFINITY};
+
+} // namespace
+
+class TypedOptimizationTest : public TypedGraphTest {
+ public:
+ TypedOptimizationTest()
+ : TypedGraphTest(3), javascript_(zone()), deps_(isolate(), zone()) {}
+ ~TypedOptimizationTest() override {}
+
+ protected:
+ Reduction Reduce(Node* node) {
+ MachineOperatorBuilder machine(zone());
+ SimplifiedOperatorBuilder simplified(zone());
+ JSGraph jsgraph(isolate(), graph(), common(), javascript(), &simplified,
+ &machine);
+ // TODO(titzer): mock the GraphReducer here for better unit testing.
+ GraphReducer graph_reducer(zone(), graph());
+ TypedOptimization reducer(&graph_reducer, &deps_,
+ TypedOptimization::kDeoptimizationEnabled,
+ &jsgraph);
+ return reducer.Reduce(node);
+ }
+
+ JSOperatorBuilder* javascript() { return &javascript_; }
+
+ private:
+ JSOperatorBuilder javascript_;
+ CompilationDependencies deps_;
+};
+
+TEST_F(TypedOptimizationTest, ParameterWithMinusZero) {
+ {
+ Reduction r = Reduce(
+ Parameter(Type::Constant(factory()->minus_zero_value(), zone())));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberConstant(-0.0));
+ }
+ {
+ Reduction r = Reduce(Parameter(Type::MinusZero()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberConstant(-0.0));
+ }
+ {
+ Reduction r = Reduce(Parameter(
+ Type::Union(Type::MinusZero(),
+ Type::Constant(factory()->NewNumber(0), zone()), zone())));
+ EXPECT_FALSE(r.Changed());
+ }
+}
+
+TEST_F(TypedOptimizationTest, ParameterWithNull) {
+ Handle<HeapObject> null = factory()->null_value();
+ {
+ Reduction r = Reduce(Parameter(Type::Constant(null, zone())));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsHeapConstant(null));
+ }
+ {
+ Reduction r = Reduce(Parameter(Type::Null()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsHeapConstant(null));
+ }
+}
+
+TEST_F(TypedOptimizationTest, ParameterWithNaN) {
+ const double kNaNs[] = {-std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::signaling_NaN()};
+ TRACED_FOREACH(double, nan, kNaNs) {
+ Handle<Object> constant = factory()->NewNumber(nan);
+ Reduction r = Reduce(Parameter(Type::Constant(constant, zone())));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberConstant(IsNaN()));
+ }
+ {
+ Reduction r =
+ Reduce(Parameter(Type::Constant(factory()->nan_value(), zone())));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberConstant(IsNaN()));
+ }
+ {
+ Reduction r = Reduce(Parameter(Type::NaN()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberConstant(IsNaN()));
+ }
+}
+
+TEST_F(TypedOptimizationTest, ParameterWithPlainNumber) {
+ TRACED_FOREACH(double, value, kFloat64Values) {
+ Handle<Object> constant = factory()->NewNumber(value);
+ Reduction r = Reduce(Parameter(Type::Constant(constant, zone())));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberConstant(value));
+ }
+ TRACED_FOREACH(double, value, kIntegerValues) {
+ Reduction r = Reduce(Parameter(Type::Range(value, value, zone())));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberConstant(value));
+ }
+}
+
+TEST_F(TypedOptimizationTest, ParameterWithUndefined) {
+ Handle<HeapObject> undefined = factory()->undefined_value();
+ {
+ Reduction r = Reduce(Parameter(Type::Undefined()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsHeapConstant(undefined));
+ }
+ {
+ Reduction r = Reduce(Parameter(Type::Constant(undefined, zone())));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsHeapConstant(undefined));
+ }
+}
+
+TEST_F(TypedOptimizationTest, JSToBooleanWithFalsish) {
+ Node* input = Parameter(
+ Type::Union(
+ Type::MinusZero(),
+ Type::Union(
+ Type::NaN(),
+ Type::Union(
+ Type::Null(),
+ Type::Union(
+ Type::Undefined(),
+ Type::Union(
+ Type::Undetectable(),
+ Type::Union(
+ Type::Constant(factory()->false_value(), zone()),
+ Type::Range(0.0, 0.0, zone()), zone()),
+ zone()),
+ zone()),
+ zone()),
+ zone()),
+ zone()),
+ 0);
+ Node* context = Parameter(Type::Any(), 1);
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFalseConstant());
+}
+
+TEST_F(TypedOptimizationTest, JSToBooleanWithTruish) {
+ Node* input = Parameter(
+ Type::Union(
+ Type::Constant(factory()->true_value(), zone()),
+ Type::Union(Type::DetectableReceiver(), Type::Symbol(), zone()),
+ zone()),
+ 0);
+ Node* context = Parameter(Type::Any(), 1);
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsTrueConstant());
+}
+
+TEST_F(TypedOptimizationTest, JSToBooleanWithNonZeroPlainNumber) {
+ Node* input = Parameter(Type::Range(1, V8_INFINITY, zone()), 0);
+ Node* context = Parameter(Type::Any(), 1);
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsTrueConstant());
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/zone-pool-unittest.cc b/deps/v8/test/unittests/compiler/zone-pool-unittest.cc
index 47f1cc5c75..5bbdbfd45d 100644
--- a/deps/v8/test/unittests/compiler/zone-pool-unittest.cc
+++ b/deps/v8/test/unittests/compiler/zone-pool-unittest.cc
@@ -38,7 +38,7 @@ class ZonePoolTest : public TestWithIsolate {
}
private:
- base::AccountingAllocator allocator_;
+ v8::internal::AccountingAllocator allocator_;
ZonePool zone_pool_;
base::RandomNumberGenerator rng;
};
diff --git a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
index 84e4d973e2..677da0eb0b 100644
--- a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
@@ -160,7 +160,8 @@ TEST_F(GCTracerTest, RegularScope) {
EXPECT_DOUBLE_EQ(0.0, tracer->current_.scopes[GCTracer::Scope::MC_MARK]);
// Sample not added because it's not within a started tracer.
tracer->AddScopeSample(GCTracer::Scope::MC_MARK, 100);
- tracer->Start(MARK_COMPACTOR, "gc unittest", "collector unittest");
+ tracer->Start(MARK_COMPACTOR, GarbageCollectionReason::kTesting,
+ "collector unittest");
tracer->AddScopeSample(GCTracer::Scope::MC_MARK, 100);
tracer->Stop(MARK_COMPACTOR);
EXPECT_DOUBLE_EQ(100.0, tracer->current_.scopes[GCTracer::Scope::MC_MARK]);
@@ -174,7 +175,8 @@ TEST_F(GCTracerTest, IncrementalScope) {
0.0, tracer->current_.scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]);
// Sample is added because its ScopeId is listed as incremental sample.
tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 100);
- tracer->Start(MARK_COMPACTOR, "gc unittest", "collector unittest");
+ tracer->Start(MARK_COMPACTOR, GarbageCollectionReason::kTesting,
+ "collector unittest");
// Switch to incremental MC to enable writing back incremental scopes.
tracer->current_.type = GCTracer::Event::INCREMENTAL_MARK_COMPACTOR;
tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 100);
@@ -189,7 +191,12 @@ TEST_F(GCTracerTest, IncrementalMarkingDetails) {
// Round 1.
tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 50);
- tracer->Start(MARK_COMPACTOR, "gc unittest", "collector unittest");
+ // Scavenger has no impact on incremental marking details.
+ tracer->Start(SCAVENGER, GarbageCollectionReason::kTesting,
+ "collector unittest");
+ tracer->Stop(SCAVENGER);
+ tracer->Start(MARK_COMPACTOR, GarbageCollectionReason::kTesting,
+ "collector unittest");
// Switch to incremental MC to enable writing back incremental scopes.
tracer->current_.type = GCTracer::Event::INCREMENTAL_MARK_COMPACTOR;
tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 100);
@@ -208,12 +215,13 @@ TEST_F(GCTracerTest, IncrementalMarkingDetails) {
150,
tracer->current_
.incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
- .cumulative_duration);
+ .duration);
- // Round 2. Cumulative numbers should add up, others should be reset.
+ // Round 2. Numbers should be reset.
tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 13);
tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 15);
- tracer->Start(MARK_COMPACTOR, "gc unittest", "collector unittest");
+ tracer->Start(MARK_COMPACTOR, GarbageCollectionReason::kTesting,
+ "collector unittest");
// Switch to incremental MC to enable writing back incremental scopes.
tracer->current_.type = GCTracer::Event::INCREMENTAL_MARK_COMPACTOR;
tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 122);
@@ -229,10 +237,61 @@ TEST_F(GCTracerTest, IncrementalMarkingDetails) {
.incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
.steps);
EXPECT_DOUBLE_EQ(
- 300,
+ 150,
tracer->current_
.incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
- .cumulative_duration);
+ .duration);
+}
+
+TEST_F(GCTracerTest, IncrementalMarkingSpeed) {
+ GCTracer* tracer = i_isolate()->heap()->tracer();
+ tracer->ResetForTesting();
+
+ // Round 1.
+ // 1000000 bytes in 100ms.
+ tracer->AddIncrementalMarkingStep(100, 1000000);
+ EXPECT_EQ(1000000 / 100,
+ tracer->IncrementalMarkingSpeedInBytesPerMillisecond());
+ // 1000000 bytes in 100ms.
+ tracer->AddIncrementalMarkingStep(100, 1000000);
+ EXPECT_EQ(1000000 / 100,
+ tracer->IncrementalMarkingSpeedInBytesPerMillisecond());
+ // Scavenger has no impact on incremental marking details.
+ tracer->Start(SCAVENGER, GarbageCollectionReason::kTesting,
+ "collector unittest");
+ tracer->Stop(SCAVENGER);
+ // 1000000 bytes in 100ms.
+ tracer->AddIncrementalMarkingStep(100, 1000000);
+ EXPECT_EQ(300, tracer->incremental_marking_duration_);
+ EXPECT_EQ(3000000, tracer->incremental_marking_bytes_);
+ EXPECT_EQ(1000000 / 100,
+ tracer->IncrementalMarkingSpeedInBytesPerMillisecond());
+ tracer->Start(MARK_COMPACTOR, GarbageCollectionReason::kTesting,
+ "collector unittest");
+ // Switch to incremental MC.
+ tracer->current_.type = GCTracer::Event::INCREMENTAL_MARK_COMPACTOR;
+ // 1000000 bytes in 100ms.
+ tracer->AddIncrementalMarkingStep(100, 1000000);
+ EXPECT_EQ(400, tracer->incremental_marking_duration_);
+ EXPECT_EQ(4000000, tracer->incremental_marking_bytes_);
+ tracer->Stop(MARK_COMPACTOR);
+ EXPECT_EQ(400, tracer->current_.incremental_marking_duration);
+ EXPECT_EQ(4000000, tracer->current_.incremental_marking_bytes);
+ EXPECT_EQ(0, tracer->incremental_marking_duration_);
+ EXPECT_EQ(0, tracer->incremental_marking_bytes_);
+ EXPECT_EQ(1000000 / 100,
+ tracer->IncrementalMarkingSpeedInBytesPerMillisecond());
+
+ // Round 2.
+ tracer->AddIncrementalMarkingStep(2000, 1000);
+ tracer->Start(MARK_COMPACTOR, GarbageCollectionReason::kTesting,
+ "collector unittest");
+ // Switch to incremental MC.
+ tracer->current_.type = GCTracer::Event::INCREMENTAL_MARK_COMPACTOR;
+ tracer->Stop(MARK_COMPACTOR);
+ EXPECT_DOUBLE_EQ((4000000.0 / 400 + 1000.0 / 2000) / 2,
+ static_cast<double>(
+ tracer->IncrementalMarkingSpeedInBytesPerMillisecond()));
}
} // namespace internal
diff --git a/deps/v8/test/unittests/heap/slot-set-unittest.cc b/deps/v8/test/unittests/heap/slot-set-unittest.cc
index cfb1f1f9d2..65b7925310 100644
--- a/deps/v8/test/unittests/heap/slot-set-unittest.cc
+++ b/deps/v8/test/unittests/heap/slot-set-unittest.cc
@@ -52,14 +52,16 @@ TEST(SlotSet, Iterate) {
}
}
- set.Iterate([](Address slot_address) {
- uintptr_t intaddr = reinterpret_cast<uintptr_t>(slot_address);
- if (intaddr % 3 == 0) {
- return KEEP_SLOT;
- } else {
- return REMOVE_SLOT;
- }
- });
+ set.Iterate(
+ [](Address slot_address) {
+ uintptr_t intaddr = reinterpret_cast<uintptr_t>(slot_address);
+ if (intaddr % 3 == 0) {
+ return KEEP_SLOT;
+ } else {
+ return REMOVE_SLOT;
+ }
+ },
+ SlotSet::KEEP_EMPTY_BUCKETS);
for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
if (i % 21 == 0) {
@@ -147,29 +149,34 @@ TEST(TypedSlotSet, Iterate) {
uint32_t j = 0;
for (uint32_t i = 0; i < TypedSlotSet::kMaxOffset;
i += kDelta, j += kHostDelta) {
- SlotType type = static_cast<SlotType>(i % NUMBER_OF_SLOT_TYPES);
+ SlotType type = static_cast<SlotType>(i % CLEARED_SLOT);
set.Insert(type, j, i);
++added;
}
int iterated = 0;
- set.Iterate([&iterated, kDelta, kHostDelta](SlotType type, Address host_addr,
- Address addr) {
- uint32_t i = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr));
- uint32_t j = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(host_addr));
- EXPECT_EQ(i % NUMBER_OF_SLOT_TYPES, static_cast<uint32_t>(type));
- EXPECT_EQ(0, i % kDelta);
- EXPECT_EQ(0, j % kHostDelta);
- ++iterated;
- return i % 2 == 0 ? KEEP_SLOT : REMOVE_SLOT;
- });
+ set.Iterate(
+ [&iterated, kDelta, kHostDelta](SlotType type, Address host_addr,
+ Address addr) {
+ uint32_t i = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr));
+ uint32_t j =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(host_addr));
+ EXPECT_EQ(i % CLEARED_SLOT, static_cast<uint32_t>(type));
+ EXPECT_EQ(0, i % kDelta);
+ EXPECT_EQ(0, j % kHostDelta);
+ ++iterated;
+ return i % 2 == 0 ? KEEP_SLOT : REMOVE_SLOT;
+ },
+ TypedSlotSet::KEEP_EMPTY_CHUNKS);
EXPECT_EQ(added, iterated);
iterated = 0;
- set.Iterate([&iterated](SlotType type, Address host_addr, Address addr) {
- uint32_t i = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr));
- EXPECT_EQ(0, i % 2);
- ++iterated;
- return KEEP_SLOT;
- });
+ set.Iterate(
+ [&iterated](SlotType type, Address host_addr, Address addr) {
+ uint32_t i = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr));
+ EXPECT_EQ(0, i % 2);
+ ++iterated;
+ return KEEP_SLOT;
+ },
+ TypedSlotSet::KEEP_EMPTY_CHUNKS);
EXPECT_EQ(added / 2, iterated);
}
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index fffc97f54d..4507d63eb1 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -33,6 +33,8 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
Register reg(0);
Register other(reg.index() + 1);
Register wide(128);
+ RegisterList reg_list;
+ RegisterList pair(0, 2), triple(0, 3);
// Emit argument creation operations.
builder.CreateArguments(CreateArgumentsType::kMappedArguments)
@@ -43,8 +45,9 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
builder.LoadLiteral(Smi::FromInt(0))
.StoreAccumulatorInRegister(reg)
.LoadLiteral(Smi::FromInt(8))
- .CompareOperation(Token::Value::NE, reg) // Prevent peephole optimization
- // LdaSmi, Star -> LdrSmi.
+ .CompareOperation(Token::Value::NE, reg,
+ 1) // Prevent peephole optimization
+ // LdaSmi, Star -> LdrSmi.
.StoreAccumulatorInRegister(reg)
.LoadLiteral(Smi::FromInt(10000000))
.StoreAccumulatorInRegister(reg)
@@ -82,8 +85,8 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Emit context operations.
builder.PushContext(reg)
.PopContext(reg)
- .LoadContextSlot(reg, 1)
- .StoreContextSlot(reg, 1);
+ .LoadContextSlot(reg, 1, 0)
+ .StoreContextSlot(reg, 1, 0);
// Emit load / store property operations.
builder.LoadNamedProperty(reg, name, 0)
@@ -99,14 +102,22 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.StoreLookupSlot(name, LanguageMode::SLOPPY)
.StoreLookupSlot(name, LanguageMode::STRICT);
+ // Emit load / store lookup slots with context fast paths.
+ builder.LoadLookupContextSlot(name, TypeofMode::NOT_INSIDE_TYPEOF, 1, 0)
+ .LoadLookupContextSlot(name, TypeofMode::INSIDE_TYPEOF, 1, 0);
+
+ // Emit load / store lookup slots with global fast paths.
+ builder.LoadLookupGlobalSlot(name, TypeofMode::NOT_INSIDE_TYPEOF, 1, 0)
+ .LoadLookupGlobalSlot(name, TypeofMode::INSIDE_TYPEOF, 1, 0);
+
// Emit closure operations.
builder.CreateClosure(0, NOT_TENURED);
// Emit create context operation.
builder.CreateBlockContext(factory->NewScopeInfo(1));
- builder.CreateCatchContext(reg, name);
+ builder.CreateCatchContext(reg, name, factory->NewScopeInfo(1));
builder.CreateFunctionContext(1);
- builder.CreateWithContext(reg);
+ builder.CreateWithContext(reg, factory->NewScopeInfo(1));
// Emit literal creation operations.
builder.CreateRegExpLiteral(factory->NewStringFromStaticChars("a"), 0, 0)
@@ -114,16 +125,11 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.CreateObjectLiteral(factory->NewFixedArray(1), 0, 0, reg);
// Call operations.
- builder.Call(reg, other, 0, 1)
- .Call(reg, wide, 0, 1)
- .TailCall(reg, other, 0, 1)
- .TailCall(reg, wide, 0, 1)
- .CallRuntime(Runtime::kIsArray, reg, 1)
- .CallRuntime(Runtime::kIsArray, wide, 1)
- .CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, reg, 1, other)
- .CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, wide, 1, other)
- .CallJSRuntime(Context::SPREAD_ITERABLE_INDEX, reg, 1)
- .CallJSRuntime(Context::SPREAD_ITERABLE_INDEX, wide, 1);
+ builder.Call(reg, reg_list, 1)
+ .Call(reg, reg_list, 1, TailCallMode::kAllow)
+ .CallRuntime(Runtime::kIsArray, reg)
+ .CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, reg_list, pair)
+ .CallJSRuntime(Context::SPREAD_ITERABLE_INDEX, reg_list);
// Emit binary operator invocations.
builder.BinaryOperation(Token::Value::ADD, reg, 1)
@@ -170,36 +176,37 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
builder.Delete(reg, LanguageMode::SLOPPY).Delete(reg, LanguageMode::STRICT);
// Emit new.
- builder.New(reg, reg, 0);
- builder.New(wide, wide, 0);
+ builder.New(reg, reg_list, 1);
// Emit test operator invocations.
- builder.CompareOperation(Token::Value::EQ, reg)
- .CompareOperation(Token::Value::NE, reg)
- .CompareOperation(Token::Value::EQ_STRICT, reg)
- .CompareOperation(Token::Value::LT, reg)
- .CompareOperation(Token::Value::GT, reg)
- .CompareOperation(Token::Value::LTE, reg)
- .CompareOperation(Token::Value::GTE, reg)
- .CompareOperation(Token::Value::INSTANCEOF, reg)
- .CompareOperation(Token::Value::IN, reg);
-
- // Emit cast operator invocations.
- builder.CastAccumulatorToNumber(reg)
- .CastAccumulatorToJSObject(reg)
- .CastAccumulatorToName(reg);
-
- // Emit control flow. Return must be the last instruction.
- BytecodeLabel start;
- builder.Bind(&start);
+ builder.CompareOperation(Token::Value::EQ, reg, 1)
+ .CompareOperation(Token::Value::NE, reg, 2)
+ .CompareOperation(Token::Value::EQ_STRICT, reg, 3)
+ .CompareOperation(Token::Value::LT, reg, 4)
+ .CompareOperation(Token::Value::GT, reg, 5)
+ .CompareOperation(Token::Value::LTE, reg, 6)
+ .CompareOperation(Token::Value::GTE, reg, 7)
+ .CompareOperation(Token::Value::INSTANCEOF, reg, 8)
+ .CompareOperation(Token::Value::IN, reg, 9);
+
+ // Emit conversion operator invocations.
+ builder.ConvertAccumulatorToNumber(reg)
+ .ConvertAccumulatorToObject(reg)
+ .ConvertAccumulatorToName(reg);
+
+ // Short jumps with Imm8 operands
{
- // Short jumps with Imm8 operands
- BytecodeLabel after_jump;
- builder.Jump(&start)
- .Bind(&after_jump)
- .JumpIfNull(&start)
- .JumpIfUndefined(&start)
- .JumpIfNotHole(&start);
+ BytecodeLabel start, after_jump1, after_jump2, after_jump3, after_jump4;
+ builder.Bind(&start)
+ .Jump(&after_jump1)
+ .Bind(&after_jump1)
+ .JumpIfNull(&after_jump2)
+ .Bind(&after_jump2)
+ .JumpIfUndefined(&after_jump3)
+ .Bind(&after_jump3)
+ .JumpIfNotHole(&after_jump4)
+ .Bind(&after_jump4)
+ .JumpLoop(&start, 0);
}
// Longer jumps with constant operands
@@ -223,48 +230,31 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Perform an operation that returns boolean value to
// generate JumpIfTrue/False
- builder.CompareOperation(Token::Value::EQ, reg)
- .JumpIfTrue(&start)
- .CompareOperation(Token::Value::EQ, reg)
- .JumpIfFalse(&start);
+ {
+ BytecodeLabel after_jump1, after_jump2;
+ builder.CompareOperation(Token::Value::EQ, reg, 1)
+ .JumpIfTrue(&after_jump1)
+ .Bind(&after_jump1)
+ .CompareOperation(Token::Value::EQ, reg, 2)
+ .JumpIfFalse(&after_jump2)
+ .Bind(&after_jump2);
+ }
+
// Perform an operation that returns a non-boolean operation to
// generate JumpIfToBooleanTrue/False.
- builder.BinaryOperation(Token::Value::ADD, reg, 1)
- .JumpIfTrue(&start)
- .BinaryOperation(Token::Value::ADD, reg, 2)
- .JumpIfFalse(&start);
- // Insert dummy ops to force longer jumps
- for (int i = 0; i < 128; i++) {
- builder.LoadTrue();
- }
- // Longer jumps requiring Constant operand
{
- BytecodeLabel after_jump;
- builder.Jump(&start)
- .Bind(&after_jump)
- .JumpIfNull(&start)
- .JumpIfUndefined(&start)
- .JumpIfNotHole(&start);
- // Perform an operation that returns boolean value to
- // generate JumpIfTrue/False
- builder.CompareOperation(Token::Value::EQ, reg)
- .JumpIfTrue(&start)
- .CompareOperation(Token::Value::EQ, reg)
- .JumpIfFalse(&start);
- // Perform an operation that returns a non-boolean operation to
- // generate JumpIfToBooleanTrue/False.
+ BytecodeLabel after_jump1, after_jump2;
builder.BinaryOperation(Token::Value::ADD, reg, 1)
- .JumpIfTrue(&start)
+ .JumpIfTrue(&after_jump1)
+ .Bind(&after_jump1)
.BinaryOperation(Token::Value::ADD, reg, 2)
- .JumpIfFalse(&start);
+ .JumpIfFalse(&after_jump2)
+ .Bind(&after_jump2);
}
// Emit stack check bytecode.
builder.StackCheck(0);
- // Emit an OSR poll bytecode.
- builder.OsrPoll(1);
-
// Emit throw and re-throw in it's own basic block so that the rest of the
// code isn't omitted due to being dead.
BytecodeLabel after_throw;
@@ -272,13 +262,9 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
BytecodeLabel after_rethrow;
builder.ReThrow().Bind(&after_rethrow);
- builder.ForInPrepare(reg, reg)
- .ForInDone(reg, reg)
- .ForInNext(reg, reg, reg, 1)
- .ForInStep(reg);
- builder.ForInPrepare(reg, wide)
- .ForInDone(reg, other)
- .ForInNext(wide, wide, wide, 1024)
+ builder.ForInPrepare(reg, triple)
+ .ForInContinue(reg, reg)
+ .ForInNext(reg, reg, pair, 1)
.ForInStep(reg);
// Wide constant pool loads
@@ -308,7 +294,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.StoreKeyedProperty(reg, reg, 2056, LanguageMode::STRICT);
// Emit wide context operations.
- builder.LoadContextSlot(reg, 1024).StoreContextSlot(reg, 1024);
+ builder.LoadContextSlot(reg, 1024, 0).StoreContextSlot(reg, 1024, 0);
// Emit wide load / store lookup slots.
builder.LoadLookupSlot(wide_name, TypeofMode::NOT_INSIDE_TYPEOF)
@@ -322,7 +308,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.StoreAccumulatorInRegister(reg)
.LoadKeyedProperty(reg, 0)
.StoreAccumulatorInRegister(reg)
- .LoadContextSlot(reg, 1)
+ .LoadContextSlot(reg, 1, 0)
.StoreAccumulatorInRegister(reg)
.LoadGlobal(0, TypeofMode::NOT_INSIDE_TYPEOF)
.StoreAccumulatorInRegister(reg)
@@ -338,48 +324,33 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.CreateArrayLiteral(factory->NewFixedArray(2), 0, 0)
.CreateObjectLiteral(factory->NewFixedArray(2), 0, 0, reg);
- // Longer jumps requiring ConstantWide operand
- {
- BytecodeLabel after_jump;
- builder.Jump(&start)
- .Bind(&after_jump)
- .JumpIfNull(&start)
- .JumpIfUndefined(&start)
- .JumpIfNotHole(&start);
- }
-
- // Perform an operation that returns boolean value to
- // generate JumpIfTrue/False
- builder.CompareOperation(Token::Value::EQ, reg)
- .JumpIfTrue(&start)
- .CompareOperation(Token::Value::EQ, reg)
- .JumpIfFalse(&start);
-
- // Perform an operation that returns a non-boolean operation to
- // generate JumpIfToBooleanTrue/False.
- builder.BinaryOperation(Token::Value::ADD, reg, 1)
- .JumpIfTrue(&start)
- .BinaryOperation(Token::Value::ADD, reg, 2)
- .JumpIfFalse(&start);
-
// Emit generator operations
builder.SuspendGenerator(reg)
.ResumeGenerator(reg);
// Intrinsics handled by the interpreter.
- builder.CallRuntime(Runtime::kInlineIsArray, reg, 1)
- .CallRuntime(Runtime::kInlineIsArray, wide, 1);
+ builder.CallRuntime(Runtime::kInlineIsArray, reg_list);
+ // Emit debugger bytecode.
builder.Debugger();
+
+ // Insert dummy ops to force longer jumps.
+ for (int i = 0; i < 128; i++) {
+ builder.LoadTrue();
+ }
+
+ // Bind labels for long jumps at the very end.
for (size_t i = 0; i < arraysize(end); i++) {
builder.Bind(&end[i]);
}
+
+ // Return must be the last instruction.
builder.Return();
// Generate BytecodeArray.
Handle<BytecodeArray> the_array = builder.ToBytecodeArray(isolate());
CHECK_EQ(the_array->frame_size(),
- builder.fixed_and_temporary_register_count() * kPointerSize);
+ builder.total_register_count() * kPointerSize);
// Build scorecard of bytecodes encountered in the BytecodeArray.
std::vector<int> scorecard(Bytecodes::ToByte(Bytecode::kLast) + 1);
@@ -448,21 +419,18 @@ TEST_F(BytecodeArrayBuilderTest, FrameSizesLookGood) {
for (int contexts = 0; contexts < 4; contexts++) {
for (int temps = 0; temps < 3; temps++) {
BytecodeArrayBuilder builder(isolate(), zone(), 0, contexts, locals);
- BytecodeRegisterAllocator temporaries(
- zone(), builder.temporary_register_allocator());
+ BytecodeRegisterAllocator* allocator(builder.register_allocator());
for (int i = 0; i < locals + contexts; i++) {
builder.LoadLiteral(Smi::FromInt(0));
builder.StoreAccumulatorInRegister(Register(i));
}
for (int i = 0; i < temps; i++) {
+ Register temp = allocator->NewRegister();
builder.LoadLiteral(Smi::FromInt(0));
- builder.StoreAccumulatorInRegister(temporaries.NewRegister());
- }
- if (temps > 0) {
+ builder.StoreAccumulatorInRegister(temp);
// Ensure temporaries are used so not optimized away by the
// register optimizer.
- builder.New(Register(locals + contexts), Register(locals + contexts),
- static_cast<size_t>(temps));
+ builder.ConvertAccumulatorToName(temp);
}
builder.Return();
@@ -498,30 +466,6 @@ TEST_F(BytecodeArrayBuilderTest, Parameters) {
}
-TEST_F(BytecodeArrayBuilderTest, RegisterType) {
- CanonicalHandleScope canonical(isolate());
- BytecodeArrayBuilder builder(isolate(), zone(), 10, 0, 3);
- BytecodeRegisterAllocator register_allocator(
- zone(), builder.temporary_register_allocator());
- Register temp0 = register_allocator.NewRegister();
- Register param0(builder.Parameter(0));
- Register param9(builder.Parameter(9));
- Register temp1 = register_allocator.NewRegister();
- Register reg0(0);
- Register reg1(1);
- Register reg2(2);
- Register temp2 = register_allocator.NewRegister();
- CHECK_EQ(builder.RegisterIsParameterOrLocal(temp0), false);
- CHECK_EQ(builder.RegisterIsParameterOrLocal(temp1), false);
- CHECK_EQ(builder.RegisterIsParameterOrLocal(temp2), false);
- CHECK_EQ(builder.RegisterIsParameterOrLocal(param0), true);
- CHECK_EQ(builder.RegisterIsParameterOrLocal(param9), true);
- CHECK_EQ(builder.RegisterIsParameterOrLocal(reg0), true);
- CHECK_EQ(builder.RegisterIsParameterOrLocal(reg1), true);
- CHECK_EQ(builder.RegisterIsParameterOrLocal(reg2), true);
-}
-
-
TEST_F(BytecodeArrayBuilderTest, Constants) {
CanonicalHandleScope canonical(isolate());
BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 0);
@@ -563,9 +507,9 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
builder.Jump(&near0)
.Bind(&after_jump0)
- .CompareOperation(Token::Value::EQ, reg)
+ .CompareOperation(Token::Value::EQ, reg, 1)
.JumpIfTrue(&near1)
- .CompareOperation(Token::Value::EQ, reg)
+ .CompareOperation(Token::Value::EQ, reg, 2)
.JumpIfFalse(&near2)
.BinaryOperation(Token::Value::ADD, reg, 1)
.JumpIfTrue(&near3)
@@ -578,26 +522,26 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
.Bind(&near4)
.Jump(&far0)
.Bind(&after_jump1)
- .CompareOperation(Token::Value::EQ, reg)
+ .CompareOperation(Token::Value::EQ, reg, 3)
.JumpIfTrue(&far1)
- .CompareOperation(Token::Value::EQ, reg)
+ .CompareOperation(Token::Value::EQ, reg, 4)
.JumpIfFalse(&far2)
.BinaryOperation(Token::Value::ADD, reg, 3)
.JumpIfTrue(&far3)
.BinaryOperation(Token::Value::ADD, reg, 4)
.JumpIfFalse(&far4);
- for (int i = 0; i < kFarJumpDistance - 20; i++) {
+ for (int i = 0; i < kFarJumpDistance - 22; i++) {
builder.Debugger();
}
builder.Bind(&far0).Bind(&far1).Bind(&far2).Bind(&far3).Bind(&far4);
builder.Return();
Handle<BytecodeArray> array = builder.ToBytecodeArray(isolate());
- DCHECK_EQ(array->length(), 40 + kFarJumpDistance - 20 + 1);
+ DCHECK_EQ(array->length(), 44 + kFarJumpDistance - 22 + 1);
BytecodeArrayIterator iterator(array);
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
- CHECK_EQ(iterator.GetImmediateOperand(0), 20);
+ CHECK_EQ(iterator.GetImmediateOperand(0), 22);
iterator.Advance();
// Ignore compare operation.
@@ -605,7 +549,7 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
CHECK_EQ(iterator.current_bytecode(),
PeepholeToBoolean(Bytecode::kJumpIfToBooleanTrue));
- CHECK_EQ(iterator.GetImmediateOperand(0), 16);
+ CHECK_EQ(iterator.GetImmediateOperand(0), 17);
iterator.Advance();
// Ignore compare operation.
@@ -641,7 +585,7 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
CHECK_EQ(iterator.current_bytecode(),
PeepholeToBoolean(Bytecode::kJumpIfToBooleanTrueConstant));
CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
- Smi::FromInt(kFarJumpDistance - 4));
+ Smi::FromInt(kFarJumpDistance - 5));
iterator.Advance();
// Ignore compare operation.
@@ -650,7 +594,7 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
CHECK_EQ(iterator.current_bytecode(),
PeepholeToBoolean(Bytecode::kJumpIfToBooleanFalseConstant));
CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
- Smi::FromInt(kFarJumpDistance - 8));
+ Smi::FromInt(kFarJumpDistance - 10));
iterator.Advance();
// Ignore add operation.
@@ -658,7 +602,7 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanTrueConstant);
CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
- Smi::FromInt(kFarJumpDistance - 13));
+ Smi::FromInt(kFarJumpDistance - 15));
iterator.Advance();
// Ignore add operation.
@@ -667,7 +611,7 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
CHECK_EQ(iterator.current_bytecode(),
Bytecode::kJumpIfToBooleanFalseConstant);
CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
- Smi::FromInt(kFarJumpDistance - 18));
+ Smi::FromInt(kFarJumpDistance - 20));
iterator.Advance();
}
@@ -678,24 +622,11 @@ TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
Register reg(0);
- BytecodeLabel label0, label1, label2, label3, label4;
- builder.Bind(&label0)
- .Jump(&label0)
- .Bind(&label1)
- .CompareOperation(Token::Value::EQ, reg)
- .JumpIfTrue(&label1)
- .Bind(&label2)
- .CompareOperation(Token::Value::EQ, reg)
- .JumpIfFalse(&label2)
- .Bind(&label3)
- .BinaryOperation(Token::Value::ADD, reg, 1)
- .JumpIfTrue(&label3)
- .Bind(&label4)
- .BinaryOperation(Token::Value::ADD, reg, 2)
- .JumpIfFalse(&label4);
- for (int i = 0; i < 62; i++) {
+ BytecodeLabel label0;
+ builder.Bind(&label0).JumpLoop(&label0, 0);
+ for (int i = 0; i < 42; i++) {
BytecodeLabel after_jump;
- builder.Jump(&label4).Bind(&after_jump);
+ builder.JumpLoop(&label0, 0).Bind(&after_jump);
}
// Add padding to force wide backwards jumps.
@@ -703,51 +634,21 @@ TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
builder.Debugger();
}
- builder.BinaryOperation(Token::Value::ADD, reg, 1).JumpIfFalse(&label4);
- builder.BinaryOperation(Token::Value::ADD, reg, 2).JumpIfTrue(&label3);
- builder.CompareOperation(Token::Value::EQ, reg).JumpIfFalse(&label2);
- builder.CompareOperation(Token::Value::EQ, reg).JumpIfTrue(&label1);
- builder.Jump(&label0);
+ builder.JumpLoop(&label0, 0);
BytecodeLabel end;
builder.Bind(&end);
builder.Return();
Handle<BytecodeArray> array = builder.ToBytecodeArray(isolate());
BytecodeArrayIterator iterator(array);
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpLoop);
CHECK_EQ(iterator.GetImmediateOperand(0), 0);
iterator.Advance();
- // Ignore compare operation.
- iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(),
- PeepholeToBoolean(Bytecode::kJumpIfToBooleanTrue));
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(iterator.GetImmediateOperand(0), -2);
- iterator.Advance();
- // Ignore compare operation.
- iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(),
- PeepholeToBoolean(Bytecode::kJumpIfToBooleanFalse));
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(iterator.GetImmediateOperand(0), -2);
- iterator.Advance();
- // Ignore binary operation.
- iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanTrue);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(iterator.GetImmediateOperand(0), -3);
- iterator.Advance();
- // Ignore binary operation.
- iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanFalse);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(iterator.GetImmediateOperand(0), -3);
- iterator.Advance();
- for (int i = 0; i < 62; i++) {
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
+ for (int i = 0; i < 42; i++) {
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpLoop);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- // offset of 5 (3 for binary operation and 2 for jump)
- CHECK_EQ(iterator.GetImmediateOperand(0), -i * 2 - 5);
+ // offset of 3 (because kJumpLoop takes two immediate operands)
+ CHECK_EQ(iterator.GetImmediateOperand(0), -i * 3 - 3);
iterator.Advance();
}
// Check padding to force wide backwards jumps.
@@ -755,35 +656,9 @@ TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
CHECK_EQ(iterator.current_bytecode(), Bytecode::kDebugger);
iterator.Advance();
}
- // Ignore binary operation.
- iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanFalse);
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpLoop);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kDouble);
- CHECK_EQ(iterator.GetImmediateOperand(0), -389);
- iterator.Advance();
- // Ignore binary operation.
- iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanTrue);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kDouble);
- CHECK_EQ(iterator.GetImmediateOperand(0), -401);
- iterator.Advance();
- // Ignore compare operation.
- iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(),
- PeepholeToBoolean(Bytecode::kJumpIfToBooleanFalse));
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kDouble);
- CHECK_EQ(iterator.GetImmediateOperand(0), -411);
- iterator.Advance();
- // Ignore compare operation.
- iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(),
- PeepholeToBoolean(Bytecode::kJumpIfToBooleanTrue));
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kDouble);
- CHECK_EQ(iterator.GetImmediateOperand(0), -421);
- iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
- CHECK_EQ(iterator.current_operand_scale(), OperandScale::kDouble);
- CHECK_EQ(iterator.GetImmediateOperand(0), -427);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -386);
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kReturn);
iterator.Advance();
@@ -801,9 +676,9 @@ TEST_F(BytecodeArrayBuilderTest, LabelReuse) {
builder.Jump(&label)
.Bind(&label)
- .Jump(&label)
+ .JumpLoop(&label, 0)
.Bind(&after_jump0)
- .Jump(&label)
+ .JumpLoop(&label, 0)
.Bind(&after_jump1)
.Return();
@@ -812,11 +687,11 @@ TEST_F(BytecodeArrayBuilderTest, LabelReuse) {
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
CHECK_EQ(iterator.GetImmediateOperand(0), 2);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpLoop);
CHECK_EQ(iterator.GetImmediateOperand(0), 0);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
- CHECK_EQ(iterator.GetImmediateOperand(0), -2);
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpLoop);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -3);
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kReturn);
iterator.Advance();
@@ -833,9 +708,9 @@ TEST_F(BytecodeArrayBuilderTest, LabelAddressReuse) {
BytecodeLabel label, after_jump0, after_jump1;
builder.Jump(&label)
.Bind(&label)
- .Jump(&label)
+ .JumpLoop(&label, 0)
.Bind(&after_jump0)
- .Jump(&label)
+ .JumpLoop(&label, 0)
.Bind(&after_jump1);
}
builder.Return();
@@ -846,11 +721,11 @@ TEST_F(BytecodeArrayBuilderTest, LabelAddressReuse) {
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
CHECK_EQ(iterator.GetImmediateOperand(0), 2);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpLoop);
CHECK_EQ(iterator.GetImmediateOperand(0), 0);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
- CHECK_EQ(iterator.GetImmediateOperand(0), -2);
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpLoop);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -3);
iterator.Advance();
}
CHECK_EQ(iterator.current_bytecode(), Bytecode::kReturn);
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
index b844180dc0..07ecefb529 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
@@ -31,6 +31,8 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
Smi* smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Register reg_1(1);
+ RegisterList pair(0, 2);
+ RegisterList triple(0, 3);
Register param = Register::FromParameterIndex(2, builder.parameter_count());
Handle<String> name = factory->NewStringFromStaticChars("abc");
int name_index = 2;
@@ -54,9 +56,9 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
.LoadNamedProperty(reg_1, name, feedback_slot)
.BinaryOperation(Token::Value::ADD, reg_0, 3)
.StoreAccumulatorInRegister(param)
- .CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, 1, reg_0)
- .ForInPrepare(reg_0, reg_0)
- .CallRuntime(Runtime::kLoadIC_Miss, reg_0, 1)
+ .CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair)
+ .ForInPrepare(reg_0, triple)
+ .CallRuntime(Runtime::kLoadIC_Miss, reg_0)
.Debugger()
.LoadGlobal(0x10000000, TypeofMode::NOT_INSIDE_TYPEOF)
.Return();
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
index 9681612ac4..0bb0f9757a 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
@@ -28,86 +28,78 @@ class BytecodeArrayWriterUnittest : public TestWithIsolateAndZone {
SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS) {}
~BytecodeArrayWriterUnittest() override {}
- void Write(BytecodeNode* node, const BytecodeSourceInfo& info);
- void Write(Bytecode bytecode,
- const BytecodeSourceInfo& info = BytecodeSourceInfo());
+ void Write(Bytecode bytecode, BytecodeSourceInfo info = BytecodeSourceInfo());
void Write(Bytecode bytecode, uint32_t operand0,
- const BytecodeSourceInfo& info = BytecodeSourceInfo());
+ BytecodeSourceInfo info = BytecodeSourceInfo());
void Write(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
-
- const BytecodeSourceInfo& info = BytecodeSourceInfo());
+ BytecodeSourceInfo info = BytecodeSourceInfo());
void Write(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
- uint32_t operand2,
- const BytecodeSourceInfo& info = BytecodeSourceInfo());
+ uint32_t operand2, BytecodeSourceInfo info = BytecodeSourceInfo());
void Write(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
uint32_t operand2, uint32_t operand3,
- const BytecodeSourceInfo& info = BytecodeSourceInfo());
+ BytecodeSourceInfo info = BytecodeSourceInfo());
void WriteJump(Bytecode bytecode, BytecodeLabel* label,
-
- const BytecodeSourceInfo& info = BytecodeSourceInfo());
+ BytecodeSourceInfo info = BytecodeSourceInfo());
+ void WriteJumpLoop(Bytecode bytecode, BytecodeLabel* label, int depth,
+ BytecodeSourceInfo info = BytecodeSourceInfo());
BytecodeArrayWriter* writer() { return &bytecode_array_writer_; }
ZoneVector<unsigned char>* bytecodes() { return writer()->bytecodes(); }
SourcePositionTableBuilder* source_position_table_builder() {
return writer()->source_position_table_builder();
}
- int max_register_count() { return writer()->max_register_count(); }
private:
ConstantArrayBuilder constant_array_builder_;
BytecodeArrayWriter bytecode_array_writer_;
};
-void BytecodeArrayWriterUnittest::Write(BytecodeNode* node,
- const BytecodeSourceInfo& info) {
- if (info.is_valid()) {
- node->source_info().Clone(info);
- }
- writer()->Write(node);
-}
-
void BytecodeArrayWriterUnittest::Write(Bytecode bytecode,
- const BytecodeSourceInfo& info) {
- BytecodeNode node(bytecode);
- Write(&node, info);
+ BytecodeSourceInfo info) {
+ BytecodeNode node(bytecode, &info);
+ writer()->Write(&node);
}
void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
- const BytecodeSourceInfo& info) {
- BytecodeNode node(bytecode, operand0);
- Write(&node, info);
+ BytecodeSourceInfo info) {
+ BytecodeNode node(bytecode, operand0, &info);
+ writer()->Write(&node);
}
void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
uint32_t operand1,
- const BytecodeSourceInfo& info) {
- BytecodeNode node(bytecode, operand0, operand1);
- Write(&node, info);
+ BytecodeSourceInfo info) {
+ BytecodeNode node(bytecode, operand0, operand1, &info);
+ writer()->Write(&node);
}
void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
uint32_t operand1, uint32_t operand2,
- const BytecodeSourceInfo& info) {
- BytecodeNode node(bytecode, operand0, operand1, operand2);
- Write(&node, info);
+ BytecodeSourceInfo info) {
+ BytecodeNode node(bytecode, operand0, operand1, operand2, &info);
+ writer()->Write(&node);
}
void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
uint32_t operand1, uint32_t operand2,
uint32_t operand3,
- const BytecodeSourceInfo& info) {
- BytecodeNode node(bytecode, operand0, operand1, operand2, operand3);
- Write(&node, info);
+ BytecodeSourceInfo info) {
+ BytecodeNode node(bytecode, operand0, operand1, operand2, operand3, &info);
+ writer()->Write(&node);
}
void BytecodeArrayWriterUnittest::WriteJump(Bytecode bytecode,
BytecodeLabel* label,
- const BytecodeSourceInfo& info) {
- BytecodeNode node(bytecode, 0);
- if (info.is_valid()) {
- node.source_info().Clone(info);
- }
+ BytecodeSourceInfo info) {
+ BytecodeNode node(bytecode, 0, &info);
+ writer()->WriteJump(&node, label);
+}
+
+void BytecodeArrayWriterUnittest::WriteJumpLoop(Bytecode bytecode,
+ BytecodeLabel* label, int depth,
+ BytecodeSourceInfo info) {
+ BytecodeNode node(bytecode, 0, depth, &info);
writer()->WriteJump(&node, label);
}
@@ -116,19 +108,15 @@ TEST_F(BytecodeArrayWriterUnittest, SimpleExample) {
Write(Bytecode::kStackCheck, {10, false});
CHECK_EQ(bytecodes()->size(), 1);
- CHECK_EQ(max_register_count(), 0);
Write(Bytecode::kLdaSmi, 127, {55, true});
CHECK_EQ(bytecodes()->size(), 3);
- CHECK_EQ(max_register_count(), 0);
Write(Bytecode::kLdar, Register(200).ToOperand());
CHECK_EQ(bytecodes()->size(), 7);
- CHECK_EQ(max_register_count(), 201);
Write(Bytecode::kReturn, {70, true});
CHECK_EQ(bytecodes()->size(), 8);
- CHECK_EQ(max_register_count(), 201);
static const uint8_t bytes[] = {B(StackCheck), B(LdaSmi), U8(127), B(Wide),
B(Ldar), R16(200), B(Return)};
@@ -160,15 +148,15 @@ TEST_F(BytecodeArrayWriterUnittest, ComplexExample) {
// clang-format off
/* 0 30 E> */ B(StackCheck),
/* 1 42 S> */ B(LdaConstant), U8(0),
- /* 3 42 E> */ B(Star), R8(1),
- /* 5 68 S> */ B(JumpIfUndefined), U8(38),
- /* 7 */ B(JumpIfNull), U8(36),
+ /* 3 42 E> */ B(Add), R8(1), U8(1),
+ /* 5 68 S> */ B(JumpIfUndefined), U8(39),
+ /* 7 */ B(JumpIfNull), U8(37),
/* 9 */ B(ToObject), R8(3),
/* 11 */ B(ForInPrepare), R8(3), R8(4),
/* 14 */ B(LdaZero),
/* 15 */ B(Star), R8(7),
- /* 17 63 S> */ B(ForInDone), R8(7), R8(6),
- /* 20 */ B(JumpIfTrue), U8(23),
+ /* 17 63 S> */ B(ForInContinue), R8(7), R8(6),
+ /* 20 */ B(JumpIfFalse), U8(24),
/* 22 */ B(ForInNext), R8(3), R8(7), R8(4), U8(1),
/* 27 */ B(JumpIfUndefined), U8(10),
/* 29 */ B(Star), R8(0),
@@ -178,38 +166,31 @@ TEST_F(BytecodeArrayWriterUnittest, ComplexExample) {
/* 36 85 S> */ B(Return),
/* 37 */ B(ForInStep), R8(7),
/* 39 */ B(Star), R8(7),
- /* 41 */ B(Jump), U8(-24),
- /* 43 */ B(LdaUndefined),
- /* 44 85 S> */ B(Return),
+ /* 41 */ B(JumpLoop), U8(-24), U8(0),
+ /* 44 */ B(LdaUndefined),
+ /* 45 85 S> */ B(Return),
// clang-format on
};
static const PositionTableEntry expected_positions[] = {
- {0, 30, false}, {1, 42, true}, {3, 42, false}, {5, 68, true},
- {17, 63, true}, {31, 54, false}, {36, 85, true}, {44, 85, true}};
+ {0, 30, false}, {1, 42, true}, {3, 42, false}, {6, 68, true},
+ {18, 63, true}, {32, 54, false}, {37, 85, true}, {46, 85, true}};
BytecodeLabel back_jump, jump_for_in, jump_end_1, jump_end_2, jump_end_3;
#define R(i) static_cast<uint32_t>(Register(i).ToOperand())
Write(Bytecode::kStackCheck, {30, false});
Write(Bytecode::kLdaConstant, U8(0), {42, true});
- CHECK_EQ(max_register_count(), 0);
- Write(Bytecode::kStar, R(1), {42, false});
- CHECK_EQ(max_register_count(), 2);
+ Write(Bytecode::kAdd, R(1), U8(1), {42, false});
WriteJump(Bytecode::kJumpIfUndefined, &jump_end_1, {68, true});
WriteJump(Bytecode::kJumpIfNull, &jump_end_2);
Write(Bytecode::kToObject, R(3));
- CHECK_EQ(max_register_count(), 4);
Write(Bytecode::kForInPrepare, R(3), R(4));
- CHECK_EQ(max_register_count(), 7);
Write(Bytecode::kLdaZero);
- CHECK_EQ(max_register_count(), 7);
Write(Bytecode::kStar, R(7));
- CHECK_EQ(max_register_count(), 8);
writer()->BindLabel(&back_jump);
- Write(Bytecode::kForInDone, R(7), R(6), {63, true});
- CHECK_EQ(max_register_count(), 8);
- WriteJump(Bytecode::kJumpIfTrue, &jump_end_3);
+ Write(Bytecode::kForInContinue, R(7), R(6), {63, true});
+ WriteJump(Bytecode::kJumpIfFalse, &jump_end_3);
Write(Bytecode::kForInNext, R(3), R(7), R(4), U8(1));
WriteJump(Bytecode::kJumpIfUndefined, &jump_for_in);
Write(Bytecode::kStar, R(0));
@@ -220,13 +201,12 @@ TEST_F(BytecodeArrayWriterUnittest, ComplexExample) {
writer()->BindLabel(&jump_for_in);
Write(Bytecode::kForInStep, R(7));
Write(Bytecode::kStar, R(7));
- WriteJump(Bytecode::kJump, &back_jump);
+ WriteJumpLoop(Bytecode::kJumpLoop, &back_jump, 0);
writer()->BindLabel(&jump_end_1);
writer()->BindLabel(&jump_end_2);
writer()->BindLabel(&jump_end_3);
Write(Bytecode::kLdaUndefined);
Write(Bytecode::kReturn, {85, true});
- CHECK_EQ(max_register_count(), 8);
#undef R
CHECK_EQ(bytecodes()->size(), arraysize(expected_bytes));
diff --git a/deps/v8/test/unittests/interpreter/bytecode-dead-code-optimizer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-dead-code-optimizer-unittest.cc
index 2b2171bc78..4cb5e69f4e 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-dead-code-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-dead-code-optimizer-unittest.cc
@@ -16,7 +16,8 @@ namespace interpreter {
class BytecodeDeadCodeOptimizerTest : public BytecodePipelineStage,
public TestWithIsolateAndZone {
public:
- BytecodeDeadCodeOptimizerTest() : dead_code_optimizer_(this) {}
+ BytecodeDeadCodeOptimizerTest()
+ : dead_code_optimizer_(this), last_written_(Bytecode::kIllegal) {}
~BytecodeDeadCodeOptimizerTest() override {}
void Write(BytecodeNode* node) override {
@@ -56,7 +57,7 @@ TEST_F(BytecodeDeadCodeOptimizerTest, LiveCodeKept) {
CHECK_EQ(add, last_written());
BytecodeLabel target;
- BytecodeNode jump(Bytecode::kJump, 0);
+ BytecodeNode jump(Bytecode::kJump, 0, nullptr);
optimizer()->WriteJump(&jump, &target);
CHECK_EQ(write_count(), 2);
CHECK_EQ(jump, last_written());
@@ -100,7 +101,7 @@ TEST_F(BytecodeDeadCodeOptimizerTest, DeadCodeAfterReThrowEliminated) {
TEST_F(BytecodeDeadCodeOptimizerTest, DeadCodeAfterJumpEliminated) {
BytecodeLabel target;
- BytecodeNode jump(Bytecode::kJump, 0);
+ BytecodeNode jump(Bytecode::kJump, 0, nullptr);
optimizer()->WriteJump(&jump, &target);
CHECK_EQ(write_count(), 1);
CHECK_EQ(jump, last_written());
@@ -118,7 +119,7 @@ TEST_F(BytecodeDeadCodeOptimizerTest, DeadCodeStillDeadAfterConditinalJump) {
CHECK_EQ(ret, last_written());
BytecodeLabel target;
- BytecodeNode jump(Bytecode::kJumpIfTrue, 0);
+ BytecodeNode jump(Bytecode::kJumpIfTrue, 0, nullptr);
optimizer()->WriteJump(&jump, &target);
CHECK_EQ(write_count(), 1);
CHECK_EQ(ret, last_written());
diff --git a/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
index 7d61f6a1b3..1b0af73e05 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
@@ -37,10 +37,18 @@ TEST(BytecodeDecoder, DecodeBytecodeAndOperands) {
"LdaSmi.ExtraWide [-100000]"},
{{B(Star), R8(5)}, 2, 0, " Star r5"},
{{B(Wide), B(Star), R16(136)}, 4, 0, " Star.Wide r136"},
- {{B(Wide), B(Call), R16(134), R16(135), U16(2), U16(177)},
+ {{B(Wide), B(Call), R16(134), R16(135), U16(10), U16(177)},
10,
0,
- "Call.Wide r134, r135, #2, [177]"},
+ "Call.Wide r134, r135-r144, [177]"},
+ {{B(ForInPrepare), R8(10), R8(11)},
+ 3,
+ 0,
+ " ForInPrepare r10, r11-r13"},
+ {{B(CallRuntime), U16(134), R8(0), U8(0)},
+ 5,
+ 0,
+ " CallRuntime [134], r0-r0"},
{{B(Ldar),
static_cast<uint8_t>(Register::FromParameterIndex(2, 3).ToOperand())},
2,
diff --git a/deps/v8/test/unittests/interpreter/bytecode-peephole-optimizer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-peephole-optimizer-unittest.cc
index c23c89b433..d7beb47a01 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-peephole-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-peephole-optimizer-unittest.cc
@@ -18,7 +18,8 @@ namespace interpreter {
class BytecodePeepholeOptimizerTest : public BytecodePipelineStage,
public TestWithIsolateAndZone {
public:
- BytecodePeepholeOptimizerTest() : peephole_optimizer_(this) {}
+ BytecodePeepholeOptimizerTest()
+ : peephole_optimizer_(this), last_written_(Bytecode::kIllegal) {}
~BytecodePeepholeOptimizerTest() override {}
void Reset() {
@@ -71,7 +72,7 @@ TEST_F(BytecodePeepholeOptimizerTest, FlushOnJump) {
CHECK_EQ(write_count(), 0);
BytecodeLabel target;
- BytecodeNode jump(Bytecode::kJump, 0);
+ BytecodeNode jump(Bytecode::kJump, 0, nullptr);
optimizer()->WriteJump(&jump, &target);
CHECK_EQ(write_count(), 2);
CHECK_EQ(jump, last_written());
@@ -103,8 +104,8 @@ TEST_F(BytecodePeepholeOptimizerTest, ElideEmptyNop) {
}
TEST_F(BytecodePeepholeOptimizerTest, ElideExpressionNop) {
- BytecodeNode nop(Bytecode::kNop);
- nop.source_info().MakeExpressionPosition(3);
+ BytecodeSourceInfo source_info(3, false);
+ BytecodeNode nop(Bytecode::kNop, &source_info);
optimizer()->Write(&nop);
BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
optimizer()->Write(&add);
@@ -114,11 +115,11 @@ TEST_F(BytecodePeepholeOptimizerTest, ElideExpressionNop) {
}
TEST_F(BytecodePeepholeOptimizerTest, KeepStatementNop) {
- BytecodeNode nop(Bytecode::kNop);
- nop.source_info().MakeStatementPosition(3);
+ BytecodeSourceInfo source_info(3, true);
+ BytecodeNode nop(Bytecode::kNop, &source_info);
optimizer()->Write(&nop);
- BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
- add.source_info().MakeExpressionPosition(3);
+ source_info.MakeExpressionPosition(3);
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1, &source_info);
optimizer()->Write(&add);
Flush();
CHECK_EQ(write_count(), 2);
@@ -204,8 +205,8 @@ TEST_F(BytecodePeepholeOptimizerTest, StarRxLdarRx) {
TEST_F(BytecodePeepholeOptimizerTest, StarRxLdarRxStatement) {
BytecodeNode first(Bytecode::kStar, Register(0).ToOperand());
- BytecodeNode second(Bytecode::kLdar, Register(0).ToOperand());
- second.source_info().MakeStatementPosition(0);
+ BytecodeSourceInfo source_info(3, true);
+ BytecodeNode second(Bytecode::kLdar, Register(0).ToOperand(), &source_info);
optimizer()->Write(&first);
CHECK_EQ(write_count(), 0);
optimizer()->Write(&second);
@@ -220,9 +221,9 @@ TEST_F(BytecodePeepholeOptimizerTest, StarRxLdarRxStatement) {
TEST_F(BytecodePeepholeOptimizerTest, StarRxLdarRxStatementStarRy) {
BytecodeLabel label;
BytecodeNode first(Bytecode::kStar, Register(0).ToOperand());
- BytecodeNode second(Bytecode::kLdar, Register(0).ToOperand());
+ BytecodeSourceInfo source_info(0, true);
+ BytecodeNode second(Bytecode::kLdar, Register(0).ToOperand(), &source_info);
BytecodeNode third(Bytecode::kStar, Register(3).ToOperand());
- second.source_info().MakeStatementPosition(0);
optimizer()->Write(&first);
CHECK_EQ(write_count(), 0);
optimizer()->Write(&second);
@@ -277,8 +278,8 @@ TEST_F(BytecodePeepholeOptimizerTest, LdaTrueLdaFalse) {
}
TEST_F(BytecodePeepholeOptimizerTest, LdaTrueStatementLdaFalse) {
- BytecodeNode first(Bytecode::kLdaTrue);
- first.source_info().MakeExpressionPosition(3);
+ BytecodeSourceInfo source_info(3, true);
+ BytecodeNode first(Bytecode::kLdaTrue, &source_info);
BytecodeNode second(Bytecode::kLdaFalse);
optimizer()->Write(&first);
CHECK_EQ(write_count(), 0);
@@ -287,13 +288,13 @@ TEST_F(BytecodePeepholeOptimizerTest, LdaTrueStatementLdaFalse) {
Flush();
CHECK_EQ(write_count(), 1);
CHECK_EQ(last_written(), second);
- CHECK(second.source_info().is_expression());
+ CHECK(second.source_info().is_statement());
CHECK_EQ(second.source_info().source_position(), 3);
}
TEST_F(BytecodePeepholeOptimizerTest, NopStackCheck) {
BytecodeNode first(Bytecode::kNop);
- BytecodeNode second(Bytecode::kStackCheck);
+ BytecodeNode second(Bytecode::kStackCheck, nullptr);
optimizer()->Write(&first);
CHECK_EQ(write_count(), 0);
optimizer()->Write(&second);
@@ -304,8 +305,8 @@ TEST_F(BytecodePeepholeOptimizerTest, NopStackCheck) {
}
TEST_F(BytecodePeepholeOptimizerTest, NopStatementStackCheck) {
- BytecodeNode first(Bytecode::kNop);
- first.source_info().MakeExpressionPosition(3);
+ BytecodeSourceInfo source_info(3, true);
+ BytecodeNode first(Bytecode::kNop, &source_info);
BytecodeNode second(Bytecode::kStackCheck);
optimizer()->Write(&first);
CHECK_EQ(write_count(), 0);
@@ -313,9 +314,9 @@ TEST_F(BytecodePeepholeOptimizerTest, NopStatementStackCheck) {
CHECK_EQ(write_count(), 0);
Flush();
CHECK_EQ(write_count(), 1);
- second.source_info().MakeExpressionPosition(
- first.source_info().source_position());
- CHECK_EQ(last_written(), second);
+ BytecodeSourceInfo expected_source_info(3, true);
+ BytecodeNode expected(Bytecode::kStackCheck, &expected_source_info);
+ CHECK_EQ(last_written(), expected);
}
// Tests covering BytecodePeepholeOptimizer::UpdateLastAndCurrentBytecodes().
@@ -352,7 +353,8 @@ TEST_F(BytecodePeepholeOptimizerTest, MergeLdaKeyedPropertyStar) {
static_cast<uint32_t>(Register(1).ToOperand())};
const int expected_operand_count = static_cast<int>(arraysize(operands));
- BytecodeNode first(Bytecode::kLdaKeyedProperty, operands[0], operands[1]);
+ BytecodeNode first(Bytecode::kLdaKeyedProperty, operands[0], operands[1],
+ nullptr);
BytecodeNode second(Bytecode::kStar, operands[2]);
BytecodeNode third(Bytecode::kReturn);
optimizer()->Write(&first);
@@ -398,11 +400,13 @@ TEST_F(BytecodePeepholeOptimizerTest, MergeLdaGlobalStar) {
TEST_F(BytecodePeepholeOptimizerTest, MergeLdaContextSlotStar) {
const uint32_t operands[] = {
static_cast<uint32_t>(Register(200000).ToOperand()), 55005500,
+ static_cast<uint32_t>(Register(0).ToOperand()),
static_cast<uint32_t>(Register(1).ToOperand())};
const int expected_operand_count = static_cast<int>(arraysize(operands));
- BytecodeNode first(Bytecode::kLdaContextSlot, operands[0], operands[1]);
- BytecodeNode second(Bytecode::kStar, operands[2]);
+ BytecodeNode first(Bytecode::kLdaContextSlot, operands[0], operands[1],
+ operands[2]);
+ BytecodeNode second(Bytecode::kStar, operands[3]);
BytecodeNode third(Bytecode::kReturn);
optimizer()->Write(&first);
optimizer()->Write(&second);
@@ -455,8 +459,8 @@ TEST_F(BytecodePeepholeOptimizerTest, MergeLdaSmiWithBinaryOp) {
for (auto operator_replacement : operator_replacement_pairs) {
uint32_t imm_operand = 17;
- BytecodeNode first(Bytecode::kLdaSmi, imm_operand);
- first.source_info().Clone({3, true});
+ BytecodeSourceInfo source_info(3, true);
+ BytecodeNode first(Bytecode::kLdaSmi, imm_operand, &source_info);
uint32_t reg_operand = Register(0).ToOperand();
uint32_t idx_operand = 1;
BytecodeNode second(operator_replacement[0], reg_operand, idx_operand);
@@ -485,11 +489,11 @@ TEST_F(BytecodePeepholeOptimizerTest, NotMergingLdaSmiWithBinaryOp) {
for (auto operator_replacement : operator_replacement_pairs) {
uint32_t imm_operand = 17;
- BytecodeNode first(Bytecode::kLdaSmi, imm_operand);
- first.source_info().Clone({3, true});
+ BytecodeSourceInfo source_info(3, true);
+ BytecodeNode first(Bytecode::kLdaSmi, imm_operand, &source_info);
uint32_t reg_operand = Register(0).ToOperand();
- BytecodeNode second(operator_replacement[0], reg_operand, 1);
- second.source_info().Clone({4, true});
+ source_info.MakeStatementPosition(4);
+ BytecodeNode second(operator_replacement[0], reg_operand, 1, &source_info);
optimizer()->Write(&first);
optimizer()->Write(&second);
CHECK_EQ(last_written(), first);
diff --git a/deps/v8/test/unittests/interpreter/bytecode-pipeline-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-pipeline-unittest.cc
index 663b7e54e5..4399dce6f9 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-pipeline-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-pipeline-unittest.cc
@@ -51,12 +51,6 @@ TEST(BytecodeSourceInfo, Operations) {
CHECK_EQ(y.is_statement(), true);
}
-TEST_F(BytecodeNodeTest, Constructor0) {
- BytecodeNode node;
- CHECK_EQ(node.bytecode(), Bytecode::kIllegal);
- CHECK(!node.source_info().is_valid());
-}
-
TEST_F(BytecodeNodeTest, Constructor1) {
BytecodeNode node(Bytecode::kLdaZero);
CHECK_EQ(node.bytecode(), Bytecode::kLdaZero);
@@ -119,21 +113,21 @@ TEST_F(BytecodeNodeTest, Equality) {
TEST_F(BytecodeNodeTest, EqualityWithSourceInfo) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
+ BytecodeSourceInfo first_source_info(3, true);
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
- operands[3]);
- node.source_info().MakeStatementPosition(3);
+ operands[3], &first_source_info);
CHECK_EQ(node, node);
+ BytecodeSourceInfo second_source_info(3, true);
BytecodeNode other(Bytecode::kForInNext, operands[0], operands[1],
- operands[2], operands[3]);
- other.source_info().MakeStatementPosition(3);
+ operands[2], operands[3], &second_source_info);
CHECK_EQ(node, other);
}
TEST_F(BytecodeNodeTest, NoEqualityWithDifferentSourceInfo) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
+ BytecodeSourceInfo source_info(77, true);
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
- operands[3]);
- node.source_info().MakeStatementPosition(3);
+ operands[3], &source_info);
BytecodeNode other(Bytecode::kForInNext, operands[0], operands[1],
operands[2], operands[3]);
CHECK_NE(node, other);
@@ -143,41 +137,39 @@ TEST_F(BytecodeNodeTest, Clone) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
operands[3]);
- BytecodeNode clone;
+ BytecodeNode clone(Bytecode::kIllegal);
clone.Clone(&node);
CHECK_EQ(clone, node);
}
TEST_F(BytecodeNodeTest, SetBytecode0) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
- BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
- operands[3]);
BytecodeSourceInfo source_info(77, false);
- node.source_info().Clone(source_info);
- CHECK_EQ(node.source_info(), source_info);
+ BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
+ operands[3], &source_info);
+ CHECK_EQ(node.source_info(), BytecodeSourceInfo(77, false));
- BytecodeNode clone;
+ BytecodeNode clone(Bytecode::kIllegal);
clone.Clone(&node);
clone.set_bytecode(Bytecode::kNop);
CHECK_EQ(clone.bytecode(), Bytecode::kNop);
CHECK_EQ(clone.operand_count(), 0);
- CHECK_EQ(clone.source_info(), source_info);
+ CHECK_EQ(clone.source_info(), BytecodeSourceInfo(77, false));
}
TEST_F(BytecodeNodeTest, SetBytecode1) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
- BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
- operands[3]);
BytecodeSourceInfo source_info(77, false);
- node.source_info().Clone(source_info);
+ BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
+ operands[3], &source_info);
- BytecodeNode clone;
+ BytecodeNode clone(Bytecode::kIllegal);
clone.Clone(&node);
clone.set_bytecode(Bytecode::kJump, 0x01aabbcc);
CHECK_EQ(clone.bytecode(), Bytecode::kJump);
CHECK_EQ(clone.operand_count(), 1);
CHECK_EQ(clone.operand(0), 0x01aabbcc);
- CHECK_EQ(clone.source_info(), source_info);
+ CHECK_EQ(clone.source_info(), BytecodeSourceInfo(77, false));
}
} // namespace interpreter
diff --git a/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc
index d4dc111d69..f06e454cc9 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc
@@ -12,199 +12,83 @@ namespace v8 {
namespace internal {
namespace interpreter {
-class TemporaryRegisterAllocatorTest : public TestWithIsolateAndZone {
- public:
- TemporaryRegisterAllocatorTest() : allocator_(zone(), 0) {}
- ~TemporaryRegisterAllocatorTest() override {}
- TemporaryRegisterAllocator* allocator() { return &allocator_; }
-
- private:
- TemporaryRegisterAllocator allocator_;
-};
-
-TEST_F(TemporaryRegisterAllocatorTest, FirstAllocation) {
- CHECK_EQ(allocator()->allocation_count(), 0);
- int reg0_index = allocator()->BorrowTemporaryRegister();
- CHECK_EQ(reg0_index, 0);
- CHECK_EQ(allocator()->allocation_count(), 1);
- CHECK(allocator()->RegisterIsLive(Register(reg0_index)));
- allocator()->ReturnTemporaryRegister(reg0_index);
- CHECK(!allocator()->RegisterIsLive(Register(reg0_index)));
- CHECK_EQ(allocator()->allocation_count(), 1);
- CHECK(allocator()->first_temporary_register() == Register(0));
- CHECK(allocator()->last_temporary_register() == Register(0));
-}
-
-TEST_F(TemporaryRegisterAllocatorTest, SimpleAllocations) {
- for (int i = 0; i < 13; i++) {
- int reg_index = allocator()->BorrowTemporaryRegister();
- CHECK_EQ(reg_index, i);
- CHECK_EQ(allocator()->allocation_count(), i + 1);
- }
- for (int i = 0; i < 13; i++) {
- CHECK(allocator()->RegisterIsLive(Register(i)));
- allocator()->ReturnTemporaryRegister(i);
- CHECK(!allocator()->RegisterIsLive(Register(i)));
- int reg_index = allocator()->BorrowTemporaryRegister();
- CHECK_EQ(reg_index, i);
- CHECK_EQ(allocator()->allocation_count(), 13);
- }
- for (int i = 0; i < 13; i++) {
- CHECK(allocator()->RegisterIsLive(Register(i)));
- allocator()->ReturnTemporaryRegister(i);
- CHECK(!allocator()->RegisterIsLive(Register(i)));
- }
-}
-
-TEST_F(TemporaryRegisterAllocatorTest, SimpleRangeAllocation) {
- static const int kRunLength = 7;
- int start = allocator()->PrepareForConsecutiveTemporaryRegisters(kRunLength);
- CHECK(!allocator()->RegisterIsLive(Register(start)));
- for (int i = 0; i < kRunLength; i++) {
- CHECK(!allocator()->RegisterIsLive(Register(start + i)));
- allocator()->BorrowConsecutiveTemporaryRegister(start + i);
- CHECK(allocator()->RegisterIsLive(Register(start + i)));
- }
-}
-
-TEST_F(TemporaryRegisterAllocatorTest, RangeAllocationAbuttingFree) {
- static const int kFreeCount = 3;
- static const int kRunLength = 6;
-
- for (int i = 0; i < kFreeCount; i++) {
- int to_free = allocator()->BorrowTemporaryRegister();
- CHECK_EQ(to_free, i);
- }
- for (int i = 0; i < kFreeCount; i++) {
- allocator()->ReturnTemporaryRegister(i);
- }
-
- int start = allocator()->PrepareForConsecutiveTemporaryRegisters(kRunLength);
- CHECK(!allocator()->RegisterIsLive(Register(start)));
- for (int i = 0; i < kRunLength; i++) {
- CHECK(!allocator()->RegisterIsLive(Register(start + i)));
- allocator()->BorrowConsecutiveTemporaryRegister(start + i);
- CHECK(allocator()->RegisterIsLive(Register(start + i)));
- }
-}
-
-TEST_F(TemporaryRegisterAllocatorTest, RangeAllocationAbuttingHole) {
- static const int kPreAllocatedCount = 7;
- static const int kPreAllocatedFreeCount = 6;
- static const int kRunLength = 8;
-
- for (int i = 0; i < kPreAllocatedCount; i++) {
- int to_free = allocator()->BorrowTemporaryRegister();
- CHECK_EQ(to_free, i);
- }
- for (int i = 0; i < kPreAllocatedFreeCount; i++) {
- allocator()->ReturnTemporaryRegister(i);
- }
- int start = allocator()->PrepareForConsecutiveTemporaryRegisters(kRunLength);
- CHECK(!allocator()->RegisterIsLive(Register(start)));
- CHECK_EQ(start, kPreAllocatedCount);
- for (int i = 0; i < kRunLength; i++) {
- CHECK(!allocator()->RegisterIsLive(Register(start + i)));
- allocator()->BorrowConsecutiveTemporaryRegister(start + i);
- CHECK(allocator()->RegisterIsLive(Register(start + i)));
- }
-}
-
-TEST_F(TemporaryRegisterAllocatorTest, RangeAllocationAvailableInTemporaries) {
- static const int kNotRunLength = 13;
- static const int kRunLength = 8;
-
- // Allocate big batch
- for (int i = 0; i < kNotRunLength * 2 + kRunLength; i++) {
- int allocated = allocator()->BorrowTemporaryRegister();
- CHECK_EQ(allocated, i);
- }
- // Free every other register either side of target.
- for (int i = 0; i < kNotRunLength; i++) {
- if ((i & 2) == 1) {
- allocator()->ReturnTemporaryRegister(i);
- allocator()->ReturnTemporaryRegister(kNotRunLength + kRunLength + i);
- }
- }
- // Free all registers for target.
- for (int i = kNotRunLength; i < kNotRunLength + kRunLength; i++) {
- allocator()->ReturnTemporaryRegister(i);
- }
-
- int start = allocator()->PrepareForConsecutiveTemporaryRegisters(kRunLength);
- CHECK_EQ(start, kNotRunLength);
- for (int i = 0; i < kRunLength; i++) {
- CHECK(!allocator()->RegisterIsLive(Register(start + i)));
- allocator()->BorrowConsecutiveTemporaryRegister(start + i);
- CHECK(allocator()->RegisterIsLive(Register(start + i)));
- }
-}
-
-TEST_F(TemporaryRegisterAllocatorTest, NotInRange) {
- for (int i = 0; i < 10; i++) {
- int reg = allocator()->BorrowTemporaryRegisterNotInRange(2, 5);
- CHECK(reg == i || (reg > 2 && reg == i + 4));
- }
- for (int i = 0; i < 10; i++) {
- if (i < 2) {
- allocator()->ReturnTemporaryRegister(i);
- } else {
- allocator()->ReturnTemporaryRegister(i + 4);
- }
- }
- int reg0 = allocator()->BorrowTemporaryRegisterNotInRange(0, 3);
- CHECK_EQ(reg0, 4);
- int reg1 = allocator()->BorrowTemporaryRegisterNotInRange(3, 10);
- CHECK_EQ(reg1, 2);
- int reg2 = allocator()->BorrowTemporaryRegisterNotInRange(2, 6);
- CHECK_EQ(reg2, 1);
- allocator()->ReturnTemporaryRegister(reg0);
- allocator()->ReturnTemporaryRegister(reg1);
- allocator()->ReturnTemporaryRegister(reg2);
-}
-
class BytecodeRegisterAllocatorTest : public TestWithIsolateAndZone {
public:
- BytecodeRegisterAllocatorTest() {}
+ BytecodeRegisterAllocatorTest() : allocator_(0) {}
~BytecodeRegisterAllocatorTest() override {}
-};
-
-TEST_F(BytecodeRegisterAllocatorTest, TemporariesRecycled) {
- BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 0);
- int first;
- {
- BytecodeRegisterAllocator allocator(zone(),
- builder.temporary_register_allocator());
- first = allocator.NewRegister().index();
- allocator.NewRegister();
- allocator.NewRegister();
- allocator.NewRegister();
- }
+ BytecodeRegisterAllocator* allocator() { return &allocator_; }
- int second;
- {
- BytecodeRegisterAllocator allocator(zone(),
- builder.temporary_register_allocator());
- second = allocator.NewRegister().index();
- }
+ private:
+ BytecodeRegisterAllocator allocator_;
+};
- CHECK_EQ(first, second);
+TEST_F(BytecodeRegisterAllocatorTest, SimpleAllocations) {
+ CHECK_EQ(allocator()->maximum_register_count(), 0);
+ Register reg0 = allocator()->NewRegister();
+ CHECK_EQ(reg0.index(), 0);
+ CHECK_EQ(allocator()->maximum_register_count(), 1);
+ CHECK_EQ(allocator()->next_register_index(), 1);
+ CHECK(allocator()->RegisterIsLive(reg0));
+
+ allocator()->ReleaseRegisters(0);
+ CHECK(!allocator()->RegisterIsLive(reg0));
+ CHECK_EQ(allocator()->maximum_register_count(), 1);
+ CHECK_EQ(allocator()->next_register_index(), 0);
+
+ reg0 = allocator()->NewRegister();
+ Register reg1 = allocator()->NewRegister();
+ CHECK_EQ(reg0.index(), 0);
+ CHECK_EQ(reg1.index(), 1);
+ CHECK(allocator()->RegisterIsLive(reg0));
+ CHECK(allocator()->RegisterIsLive(reg1));
+ CHECK_EQ(allocator()->maximum_register_count(), 2);
+ CHECK_EQ(allocator()->next_register_index(), 2);
+
+ allocator()->ReleaseRegisters(1);
+ CHECK(allocator()->RegisterIsLive(reg0));
+ CHECK(!allocator()->RegisterIsLive(reg1));
+ CHECK_EQ(allocator()->maximum_register_count(), 2);
+ CHECK_EQ(allocator()->next_register_index(), 1);
}
-TEST_F(BytecodeRegisterAllocatorTest, ConsecutiveRegisters) {
- BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 0);
- BytecodeRegisterAllocator allocator(zone(),
- builder.temporary_register_allocator());
- allocator.PrepareForConsecutiveAllocations(4);
- Register reg0 = allocator.NextConsecutiveRegister();
- Register other = allocator.NewRegister();
- Register reg1 = allocator.NextConsecutiveRegister();
- Register reg2 = allocator.NextConsecutiveRegister();
- Register reg3 = allocator.NextConsecutiveRegister();
- USE(other);
-
- CHECK(Register::AreContiguous(reg0, reg1, reg2, reg3));
+TEST_F(BytecodeRegisterAllocatorTest, RegisterListAllocations) {
+ CHECK_EQ(allocator()->maximum_register_count(), 0);
+ RegisterList reg_list = allocator()->NewRegisterList(3);
+ CHECK_EQ(reg_list.first_register().index(), 0);
+ CHECK_EQ(reg_list.register_count(), 3);
+ CHECK_EQ(reg_list[0].index(), 0);
+ CHECK_EQ(reg_list[1].index(), 1);
+ CHECK_EQ(reg_list[2].index(), 2);
+ CHECK_EQ(allocator()->maximum_register_count(), 3);
+ CHECK_EQ(allocator()->next_register_index(), 3);
+ CHECK(allocator()->RegisterIsLive(reg_list[2]));
+
+ Register reg = allocator()->NewRegister();
+ RegisterList reg_list_2 = allocator()->NewRegisterList(2);
+ CHECK_EQ(reg.index(), 3);
+ CHECK_EQ(reg_list_2.first_register().index(), 4);
+ CHECK_EQ(reg_list_2.register_count(), 2);
+ CHECK_EQ(reg_list_2[0].index(), 4);
+ CHECK_EQ(reg_list_2[1].index(), 5);
+ CHECK_EQ(allocator()->maximum_register_count(), 6);
+ CHECK_EQ(allocator()->next_register_index(), 6);
+ CHECK(allocator()->RegisterIsLive(reg));
+ CHECK(allocator()->RegisterIsLive(reg_list_2[1]));
+
+ allocator()->ReleaseRegisters(reg.index());
+ CHECK(!allocator()->RegisterIsLive(reg));
+ CHECK(!allocator()->RegisterIsLive(reg_list_2[0]));
+ CHECK(!allocator()->RegisterIsLive(reg_list_2[1]));
+ CHECK(allocator()->RegisterIsLive(reg_list[2]));
+ CHECK_EQ(allocator()->maximum_register_count(), 6);
+ CHECK_EQ(allocator()->next_register_index(), 3);
+
+ RegisterList empty_reg_list = allocator()->NewRegisterList(0);
+ CHECK_EQ(empty_reg_list.first_register().index(), 0);
+ CHECK_EQ(empty_reg_list.register_count(), 0);
+ CHECK_EQ(allocator()->maximum_register_count(), 6);
+ CHECK_EQ(allocator()->next_register_index(), 3);
}
} // namespace interpreter
diff --git a/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
index ca69026fda..ae7c159563 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
@@ -22,10 +22,10 @@ class BytecodeRegisterOptimizerTest : public BytecodePipelineStage,
~BytecodeRegisterOptimizerTest() override { delete register_allocator_; }
void Initialize(int number_of_parameters, int number_of_locals) {
- register_allocator_ =
- new TemporaryRegisterAllocator(zone(), number_of_locals);
- register_optimizer_ = new (zone()) BytecodeRegisterOptimizer(
- zone(), register_allocator_, number_of_parameters, this);
+ register_allocator_ = new BytecodeRegisterAllocator(number_of_locals);
+ register_optimizer_ = new (zone())
+ BytecodeRegisterOptimizer(zone(), register_allocator_, number_of_locals,
+ number_of_parameters, this);
}
void Write(BytecodeNode* node) override { output_.push_back(*node); }
@@ -40,15 +40,13 @@ class BytecodeRegisterOptimizerTest : public BytecodePipelineStage,
return Handle<BytecodeArray>();
}
- TemporaryRegisterAllocator* allocator() { return register_allocator_; }
+ BytecodeRegisterAllocator* allocator() { return register_allocator_; }
BytecodeRegisterOptimizer* optimizer() { return register_optimizer_; }
- Register NewTemporary() {
- return Register(allocator()->BorrowTemporaryRegister());
- }
+ Register NewTemporary() { return allocator()->NewRegister(); }
- void KillTemporary(Register reg) {
- allocator()->ReturnTemporaryRegister(reg.index());
+ void ReleaseTemporaries(Register reg) {
+ allocator()->ReleaseRegisters(reg.index());
}
size_t write_count() const { return output_.size(); }
@@ -56,7 +54,7 @@ class BytecodeRegisterOptimizerTest : public BytecodePipelineStage,
const std::vector<BytecodeNode>* output() { return &output_; }
private:
- TemporaryRegisterAllocator* register_allocator_;
+ BytecodeRegisterAllocator* register_allocator_;
BytecodeRegisterOptimizer* register_optimizer_;
std::vector<BytecodeNode> output_;
@@ -74,8 +72,8 @@ TEST_F(BytecodeRegisterOptimizerTest, WriteNop) {
TEST_F(BytecodeRegisterOptimizerTest, WriteNopExpression) {
Initialize(1, 1);
- BytecodeNode node(Bytecode::kNop);
- node.source_info().MakeExpressionPosition(3);
+ BytecodeSourceInfo source_info(3, false);
+ BytecodeNode node(Bytecode::kNop, &source_info);
optimizer()->Write(&node);
CHECK_EQ(write_count(), 1);
CHECK_EQ(node, last_written());
@@ -83,8 +81,8 @@ TEST_F(BytecodeRegisterOptimizerTest, WriteNopExpression) {
TEST_F(BytecodeRegisterOptimizerTest, WriteNopStatement) {
Initialize(1, 1);
+ BytecodeSourceInfo source_info(3, true);
BytecodeNode node(Bytecode::kNop);
- node.source_info().MakeStatementPosition(3);
optimizer()->Write(&node);
CHECK_EQ(write_count(), 1);
CHECK_EQ(node, last_written());
@@ -97,7 +95,7 @@ TEST_F(BytecodeRegisterOptimizerTest, TemporaryMaterializedForJump) {
optimizer()->Write(&node);
CHECK_EQ(write_count(), 0);
BytecodeLabel label;
- BytecodeNode jump(Bytecode::kJump, 0);
+ BytecodeNode jump(Bytecode::kJump, 0, nullptr);
optimizer()->WriteJump(&jump, &label);
CHECK_EQ(write_count(), 2);
CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar);
@@ -130,7 +128,7 @@ TEST_F(BytecodeRegisterOptimizerTest, TemporaryNotEmitted) {
BytecodeNode node1(Bytecode::kStar, NewTemporary().ToOperand());
optimizer()->Write(&node1);
CHECK_EQ(write_count(), 0);
- KillTemporary(temp);
+ ReleaseTemporaries(temp);
CHECK_EQ(write_count(), 0);
BytecodeNode node2(Bytecode::kReturn);
optimizer()->Write(&node2);
@@ -140,6 +138,61 @@ TEST_F(BytecodeRegisterOptimizerTest, TemporaryNotEmitted) {
CHECK_EQ(output()->at(1).bytecode(), Bytecode::kReturn);
}
+TEST_F(BytecodeRegisterOptimizerTest, ReleasedRegisterUsed) {
+ Initialize(3, 1);
+ BytecodeNode node0(Bytecode::kLdaSmi, 3);
+ optimizer()->Write(&node0);
+ CHECK_EQ(write_count(), 1);
+ Register temp0 = NewTemporary();
+ Register temp1 = NewTemporary();
+ BytecodeNode node1(Bytecode::kStar, temp1.ToOperand());
+ optimizer()->Write(&node1);
+ CHECK_EQ(write_count(), 1);
+ BytecodeNode node2(Bytecode::kLdaSmi, 1);
+ optimizer()->Write(&node2);
+ CHECK_EQ(write_count(), 3);
+ BytecodeNode node3(Bytecode::kMov, temp1.ToOperand(), temp0.ToOperand());
+ optimizer()->Write(&node3);
+ CHECK_EQ(write_count(), 3);
+ ReleaseTemporaries(temp1);
+ CHECK_EQ(write_count(), 3);
+ BytecodeNode node4(Bytecode::kLdar, temp0.ToOperand());
+ optimizer()->Write(&node4);
+ CHECK_EQ(write_count(), 3);
+ BytecodeNode node5(Bytecode::kReturn);
+ optimizer()->Write(&node5);
+ CHECK_EQ(write_count(), 5);
+ CHECK_EQ(output()->at(3).bytecode(), Bytecode::kLdar);
+ CHECK_EQ(output()->at(3).operand(0), temp1.ToOperand());
+ CHECK_EQ(output()->at(4).bytecode(), Bytecode::kReturn);
+}
+
+TEST_F(BytecodeRegisterOptimizerTest, ReleasedRegisterNotFlushed) {
+ Initialize(3, 1);
+ BytecodeNode node0(Bytecode::kLdaSmi, 3);
+ optimizer()->Write(&node0);
+ CHECK_EQ(write_count(), 1);
+ Register temp0 = NewTemporary();
+ Register temp1 = NewTemporary();
+ BytecodeNode node1(Bytecode::kStar, temp0.ToOperand());
+ optimizer()->Write(&node1);
+ CHECK_EQ(write_count(), 1);
+ BytecodeNode node2(Bytecode::kStar, temp1.ToOperand());
+ optimizer()->Write(&node2);
+ CHECK_EQ(write_count(), 1);
+ ReleaseTemporaries(temp1);
+ BytecodeLabel label;
+ BytecodeNode jump(Bytecode::kJump, 0, nullptr);
+ optimizer()->WriteJump(&jump, &label);
+ BytecodeNode node3(Bytecode::kReturn);
+ optimizer()->Write(&node3);
+ CHECK_EQ(write_count(), 4);
+ CHECK_EQ(output()->at(1).bytecode(), Bytecode::kStar);
+ CHECK_EQ(output()->at(1).operand(0), temp0.ToOperand());
+ CHECK_EQ(output()->at(2).bytecode(), Bytecode::kJump);
+ CHECK_EQ(output()->at(3).bytecode(), Bytecode::kReturn);
+}
+
TEST_F(BytecodeRegisterOptimizerTest, StoresToLocalsImmediate) {
Initialize(3, 1);
Register parameter = Register::FromParameterIndex(1, 3);
diff --git a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
index 0e68e188c7..47c7abb772 100644
--- a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
@@ -161,18 +161,47 @@ TEST(Bytecodes, PrefixMappings) {
}
}
-TEST(Bytecodes, SizesForSignedOperands) {
- CHECK(Bytecodes::SizeForSignedOperand(0) == OperandSize::kByte);
- CHECK(Bytecodes::SizeForSignedOperand(kMaxInt8) == OperandSize::kByte);
- CHECK(Bytecodes::SizeForSignedOperand(kMinInt8) == OperandSize::kByte);
- CHECK(Bytecodes::SizeForSignedOperand(kMaxInt8 + 1) == OperandSize::kShort);
- CHECK(Bytecodes::SizeForSignedOperand(kMinInt8 - 1) == OperandSize::kShort);
- CHECK(Bytecodes::SizeForSignedOperand(kMaxInt16) == OperandSize::kShort);
- CHECK(Bytecodes::SizeForSignedOperand(kMinInt16) == OperandSize::kShort);
- CHECK(Bytecodes::SizeForSignedOperand(kMaxInt16 + 1) == OperandSize::kQuad);
- CHECK(Bytecodes::SizeForSignedOperand(kMinInt16 - 1) == OperandSize::kQuad);
- CHECK(Bytecodes::SizeForSignedOperand(kMaxInt) == OperandSize::kQuad);
- CHECK(Bytecodes::SizeForSignedOperand(kMinInt) == OperandSize::kQuad);
+TEST(Bytecodes, ScaleForSignedOperand) {
+ CHECK(Bytecodes::ScaleForSignedOperand(0) == OperandScale::kSingle);
+ CHECK(Bytecodes::ScaleForSignedOperand(kMaxInt8) == OperandScale::kSingle);
+ CHECK(Bytecodes::ScaleForSignedOperand(kMinInt8) == OperandScale::kSingle);
+ CHECK(Bytecodes::ScaleForSignedOperand(kMaxInt8 + 1) ==
+ OperandScale::kDouble);
+ CHECK(Bytecodes::ScaleForSignedOperand(kMinInt8 - 1) ==
+ OperandScale::kDouble);
+ CHECK(Bytecodes::ScaleForSignedOperand(kMaxInt16) == OperandScale::kDouble);
+ CHECK(Bytecodes::ScaleForSignedOperand(kMinInt16) == OperandScale::kDouble);
+ CHECK(Bytecodes::ScaleForSignedOperand(kMaxInt16 + 1) ==
+ OperandScale::kQuadruple);
+ CHECK(Bytecodes::ScaleForSignedOperand(kMinInt16 - 1) ==
+ OperandScale::kQuadruple);
+ CHECK(Bytecodes::ScaleForSignedOperand(kMaxInt) == OperandScale::kQuadruple);
+ CHECK(Bytecodes::ScaleForSignedOperand(kMinInt) == OperandScale::kQuadruple);
+}
+
+TEST(Bytecodes, ScaleForUnsignedOperands) {
+ // int overloads
+ CHECK(Bytecodes::ScaleForUnsignedOperand(0) == OperandScale::kSingle);
+ CHECK(Bytecodes::ScaleForUnsignedOperand(kMaxUInt8) == OperandScale::kSingle);
+ CHECK(Bytecodes::ScaleForUnsignedOperand(kMaxUInt8 + 1) ==
+ OperandScale::kDouble);
+ CHECK(Bytecodes::ScaleForUnsignedOperand(kMaxUInt16) ==
+ OperandScale::kDouble);
+ CHECK(Bytecodes::ScaleForUnsignedOperand(kMaxUInt16 + 1) ==
+ OperandScale::kQuadruple);
+ // size_t overloads
+ CHECK(Bytecodes::ScaleForUnsignedOperand(static_cast<size_t>(0)) ==
+ OperandScale::kSingle);
+ CHECK(Bytecodes::ScaleForUnsignedOperand(static_cast<size_t>(kMaxUInt8)) ==
+ OperandScale::kSingle);
+ CHECK(Bytecodes::ScaleForUnsignedOperand(
+ static_cast<size_t>(kMaxUInt8 + 1)) == OperandScale::kDouble);
+ CHECK(Bytecodes::ScaleForUnsignedOperand(static_cast<size_t>(kMaxUInt16)) ==
+ OperandScale::kDouble);
+ CHECK(Bytecodes::ScaleForUnsignedOperand(
+ static_cast<size_t>(kMaxUInt16 + 1)) == OperandScale::kQuadruple);
+ CHECK(Bytecodes::ScaleForUnsignedOperand(static_cast<size_t>(kMaxUInt32)) ==
+ OperandScale::kQuadruple);
}
TEST(Bytecodes, SizesForUnsignedOperands) {
@@ -236,14 +265,6 @@ TEST(AccumulatorUse, SampleBytecodes) {
AccumulatorUse::kReadWrite);
}
-TEST(AccumulatorUse, AccumulatorUseToString) {
- std::set<std::string> names;
- names.insert(Bytecodes::AccumulatorUseToString(AccumulatorUse::kNone));
- names.insert(Bytecodes::AccumulatorUseToString(AccumulatorUse::kRead));
- names.insert(Bytecodes::AccumulatorUseToString(AccumulatorUse::kWrite));
- names.insert(Bytecodes::AccumulatorUseToString(AccumulatorUse::kReadWrite));
- CHECK_EQ(names.size(), 4);
-}
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
index ff6f14df21..53afb35a12 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
@@ -421,12 +421,16 @@ TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
EXPECT_THAT(m.BytecodeOperandIdx(i),
m.IsUnsignedOperand(offset, operand_size));
break;
+ case interpreter::OperandType::kUImm:
+ EXPECT_THAT(m.BytecodeOperandUImm(i),
+ m.IsUnsignedOperand(offset, operand_size));
+ break;
case interpreter::OperandType::kImm: {
EXPECT_THAT(m.BytecodeOperandImm(i),
m.IsSignedOperand(offset, operand_size));
break;
}
- case interpreter::OperandType::kMaybeReg:
+ case interpreter::OperandType::kRegList:
case interpreter::OperandType::kReg:
case interpreter::OperandType::kRegOut:
case interpreter::OperandType::kRegOutPair:
@@ -537,9 +541,9 @@ TARGET_TEST_F(InterpreterAssemblerTest, SmiTag) {
EXPECT_THAT(m.SmiTag(value),
IsIntPtrConstant(static_cast<intptr_t>(44)
<< (kSmiShiftSize + kSmiTagSize)));
- EXPECT_THAT(
- m.SmiUntag(value),
- IsWordSar(value, IsIntPtrConstant(kSmiShiftSize + kSmiTagSize)));
+ EXPECT_THAT(m.SmiUntag(value),
+ IsWordSar(IsBitcastTaggedToWord(value),
+ IsIntPtrConstant(kSmiShiftSize + kSmiTagSize)));
}
}
diff --git a/deps/v8/test/unittests/test-utils.h b/deps/v8/test/unittests/test-utils.h
index c5788e2478..984d63ce2a 100644
--- a/deps/v8/test/unittests/test-utils.h
+++ b/deps/v8/test/unittests/test-utils.h
@@ -8,7 +8,8 @@
#include "include/v8.h"
#include "src/base/macros.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/zone.h"
+#include "src/zone/accounting-allocator.h"
+#include "src/zone/zone.h"
#include "testing/gtest-support.h"
namespace v8 {
@@ -103,7 +104,7 @@ class TestWithZone : public virtual ::testing::Test {
Zone* zone() { return &zone_; }
private:
- base::AccountingAllocator allocator_;
+ v8::internal::AccountingAllocator allocator_;
Zone zone_;
DISALLOW_COPY_AND_ASSIGN(TestWithZone);
@@ -118,7 +119,7 @@ class TestWithIsolateAndZone : public virtual TestWithIsolate {
Zone* zone() { return &zone_; }
private:
- base::AccountingAllocator allocator_;
+ v8::internal::AccountingAllocator allocator_;
Zone zone_;
DISALLOW_COPY_AND_ASSIGN(TestWithIsolateAndZone);
diff --git a/deps/v8/test/unittests/unicode-unittest.cc b/deps/v8/test/unittests/unicode-unittest.cc
new file mode 100644
index 0000000000..67edfb7331
--- /dev/null
+++ b/deps/v8/test/unittests/unicode-unittest.cc
@@ -0,0 +1,39 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+#include <string>
+
+#include "src/unicode-decoder.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+using Utf8Decoder = unibrow::Utf8Decoder<512>;
+
+void Decode(Utf8Decoder* decoder, const std::string& str) {
+ // Put the string in its own buffer on the heap to make sure that
+ // AddressSanitizer's heap-buffer-overflow logic can see what's going on.
+ std::unique_ptr<char[]> buffer(new char[str.length()]);
+ memcpy(buffer.get(), str.data(), str.length());
+ decoder->Reset(buffer.get(), str.length());
+}
+
+} // namespace
+
+TEST(UnicodeTest, ReadOffEndOfUtf8String) {
+ Utf8Decoder decoder;
+
+ // Not enough continuation bytes before string ends.
+ Decode(&decoder, "\xE0");
+ Decode(&decoder, "\xED");
+ Decode(&decoder, "\xF0");
+ Decode(&decoder, "\xF4");
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/unittests.gyp b/deps/v8/test/unittests/unittests.gyp
index 0ea8b9a43d..e2b9f26347 100644
--- a/deps/v8/test/unittests/unittests.gyp
+++ b/deps/v8/test/unittests/unittests.gyp
@@ -54,6 +54,7 @@
'compiler/js-typed-lowering-unittest.cc',
'compiler/linkage-tail-call-unittest.cc',
'compiler/liveness-analyzer-unittest.cc',
+ 'compiler/live-range-builder.h',
'compiler/live-range-unittest.cc',
'compiler/load-elimination-unittest.cc',
'compiler/loop-peeling-unittest.cc',
@@ -75,6 +76,7 @@
'compiler/simplified-operator-unittest.cc',
'compiler/state-values-utils-unittest.cc',
'compiler/tail-call-optimization-unittest.cc',
+ 'compiler/typed-optimization-unittest.cc',
'compiler/typer-unittest.cc',
'compiler/value-numbering-reducer-unittest.cc',
'compiler/zone-pool-unittest.cc',
@@ -92,6 +94,7 @@
'interpreter/bytecode-pipeline-unittest.cc',
'interpreter/bytecode-register-allocator-unittest.cc',
'interpreter/bytecode-register-optimizer-unittest.cc',
+ 'interpreter/bytecode-utils.h',
'interpreter/constant-array-builder-unittest.cc',
'interpreter/interpreter-assembler-unittest.cc',
'interpreter/interpreter-assembler-unittest.h',
@@ -112,17 +115,18 @@
'source-position-table-unittest.cc',
'test-utils.h',
'test-utils.cc',
+ 'unicode-unittest.cc',
'value-serializer-unittest.cc',
'wasm/asm-types-unittest.cc',
'wasm/ast-decoder-unittest.cc',
'wasm/control-transfer-unittest.cc',
'wasm/decoder-unittest.cc',
- 'wasm/encoder-unittest.cc',
'wasm/leb-helper-unittest.cc',
'wasm/loop-assignment-analysis-unittest.cc',
'wasm/module-decoder-unittest.cc',
'wasm/switch-logic-unittest.cc',
'wasm/wasm-macro-gen-unittest.cc',
+ 'wasm/wasm-module-builder-unittest.cc',
],
'unittests_sources_arm': [ ### gcmole(arch:arm) ###
'compiler/arm/instruction-selector-arm-unittest.cc',
diff --git a/deps/v8/test/unittests/unittests.status b/deps/v8/test/unittests/unittests.status
index ee135ba5e8..71c17f6b6b 100644
--- a/deps/v8/test/unittests/unittests.status
+++ b/deps/v8/test/unittests/unittests.status
@@ -3,15 +3,6 @@
# found in the LICENSE file.
[
-['byteorder == big', {
- # TODO(mips-team): Fix Wasm for big-endian.
- 'WasmModuleVerifyTest*': [SKIP],
- 'WasmFunctionVerifyTest*': [SKIP],
- 'WasmDecoderTest.TableSwitch*': [SKIP],
- 'WasmDecoderTest.AllLoadMemCombinations': [SKIP],
- 'AstDecoderTest.AllLoadMemCombinations': [SKIP],
- 'AstDecoderTest.AllStoreMemCombinations': [SKIP],
-}], # 'byteorder == big'
['arch == x87', {
'Ieee754.Expm1': [SKIP],
'Ieee754.Cos': [SKIP],
@@ -19,4 +10,9 @@
'Ieee754.Acosh': [SKIP],
'Ieee754.Asinh': [SKIP],
}], # 'arch == x87'
+
+['variant == asm_wasm', {
+ '*': [SKIP],
+}], # variant == asm_wasm
+
]
diff --git a/deps/v8/test/unittests/value-serializer-unittest.cc b/deps/v8/test/unittests/value-serializer-unittest.cc
index f4ed15b644..d88d60a3e6 100644
--- a/deps/v8/test/unittests/value-serializer-unittest.cc
+++ b/deps/v8/test/unittests/value-serializer-unittest.cc
@@ -11,16 +11,48 @@
#include "src/api.h"
#include "src/base/build_config.h"
#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace {
+using ::testing::_;
+using ::testing::Invoke;
+
class ValueSerializerTest : public TestWithIsolate {
protected:
ValueSerializerTest()
: serialization_context_(Context::New(isolate())),
- deserialization_context_(Context::New(isolate())) {}
+ deserialization_context_(Context::New(isolate())) {
+ // Create a host object type that can be tested through
+ // serialization/deserialization delegates below.
+ Local<FunctionTemplate> function_template = v8::FunctionTemplate::New(
+ isolate(), [](const FunctionCallbackInfo<Value>& args) {
+ args.Holder()->SetInternalField(0, args[0]);
+ args.Holder()->SetInternalField(1, args[1]);
+ });
+ function_template->InstanceTemplate()->SetInternalFieldCount(2);
+ function_template->InstanceTemplate()->SetAccessor(
+ StringFromUtf8("value"),
+ [](Local<String> property, const PropertyCallbackInfo<Value>& args) {
+ args.GetReturnValue().Set(args.Holder()->GetInternalField(0));
+ });
+ function_template->InstanceTemplate()->SetAccessor(
+ StringFromUtf8("value2"),
+ [](Local<String> property, const PropertyCallbackInfo<Value>& args) {
+ args.GetReturnValue().Set(args.Holder()->GetInternalField(1));
+ });
+ for (Local<Context> context :
+ {serialization_context_, deserialization_context_}) {
+ context->Global()
+ ->CreateDataProperty(
+ context, StringFromUtf8("ExampleHostObject"),
+ function_template->GetFunction(context).ToLocalChecked())
+ .ToChecked();
+ }
+ host_object_constructor_template_ = function_template;
+ }
const Local<Context>& serialization_context() {
return serialization_context_;
@@ -29,6 +61,14 @@ class ValueSerializerTest : public TestWithIsolate {
return deserialization_context_;
}
+ // Overridden in more specific fixtures.
+ virtual ValueSerializer::Delegate* GetSerializerDelegate() { return nullptr; }
+ virtual void BeforeEncode(ValueSerializer*) {}
+ virtual ValueDeserializer::Delegate* GetDeserializerDelegate() {
+ return nullptr;
+ }
+ virtual void BeforeDecode(ValueDeserializer*) {}
+
template <typename InputFunctor, typename OutputFunctor>
void RoundTripTest(const InputFunctor& input_functor,
const OutputFunctor& output_functor) {
@@ -46,20 +86,30 @@ class ValueSerializerTest : public TestWithIsolate {
output_functor);
}
+ // Variant which uses JSON.parse/stringify to check the result.
+ void RoundTripJSON(const char* source) {
+ RoundTripTest(
+ [this, source]() {
+ return JSON::Parse(serialization_context_, StringFromUtf8(source))
+ .ToLocalChecked();
+ },
+ [this, source](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ EXPECT_EQ(source, Utf8Value(JSON::Stringify(deserialization_context_,
+ value.As<Object>())
+ .ToLocalChecked()));
+ });
+ }
+
Maybe<std::vector<uint8_t>> DoEncode(Local<Value> value) {
- // This approximates what the API implementation would do.
- // TODO(jbroman): Use the public API once it exists.
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate());
- i::HandleScope handle_scope(internal_isolate);
- i::ValueSerializer serializer(internal_isolate);
+ Local<Context> context = serialization_context();
+ ValueSerializer serializer(isolate(), GetSerializerDelegate());
+ BeforeEncode(&serializer);
serializer.WriteHeader();
- if (serializer.WriteObject(Utils::OpenHandle(*value)).FromMaybe(false)) {
- return Just(serializer.ReleaseBuffer());
- }
- if (internal_isolate->has_pending_exception()) {
- internal_isolate->OptionalRescheduleException(true);
+ if (!serializer.WriteValue(context, value).FromMaybe(false)) {
+ return Nothing<std::vector<uint8_t>>();
}
- return Nothing<std::vector<uint8_t>>();
+ return Just(serializer.ReleaseBuffer());
}
template <typename InputFunctor, typename EncodedDataFunctor>
@@ -90,24 +140,23 @@ class ValueSerializerTest : public TestWithIsolate {
template <typename OutputFunctor>
void DecodeTest(const std::vector<uint8_t>& data,
const OutputFunctor& output_functor) {
- Context::Scope scope(deserialization_context());
+ Local<Context> context = deserialization_context();
+ Context::Scope scope(context);
TryCatch try_catch(isolate());
- // TODO(jbroman): Use the public API once it exists.
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate());
- i::HandleScope handle_scope(internal_isolate);
- i::ValueDeserializer deserializer(
- internal_isolate,
- i::Vector<const uint8_t>(&data[0], static_cast<int>(data.size())));
- ASSERT_TRUE(deserializer.ReadHeader().FromMaybe(false));
+ ValueDeserializer deserializer(isolate(), &data[0],
+ static_cast<int>(data.size()),
+ GetDeserializerDelegate());
+ deserializer.SetSupportsLegacyWireFormat(true);
+ BeforeDecode(&deserializer);
+ ASSERT_TRUE(deserializer.ReadHeader(context).FromMaybe(false));
Local<Value> result;
- ASSERT_TRUE(ToLocal<Value>(deserializer.ReadObject(), &result));
+ ASSERT_TRUE(deserializer.ReadValue(context).ToLocal(&result));
ASSERT_FALSE(result.IsEmpty());
ASSERT_FALSE(try_catch.HasCaught());
- ASSERT_TRUE(deserialization_context()
- ->Global()
- ->CreateDataProperty(deserialization_context_,
- StringFromUtf8("result"), result)
- .FromMaybe(false));
+ ASSERT_TRUE(
+ context->Global()
+ ->CreateDataProperty(context, StringFromUtf8("result"), result)
+ .FromMaybe(false));
output_functor(result);
ASSERT_FALSE(try_catch.HasCaught());
}
@@ -115,43 +164,45 @@ class ValueSerializerTest : public TestWithIsolate {
template <typename OutputFunctor>
void DecodeTestForVersion0(const std::vector<uint8_t>& data,
const OutputFunctor& output_functor) {
- Context::Scope scope(deserialization_context());
+ Local<Context> context = deserialization_context();
+ Context::Scope scope(context);
TryCatch try_catch(isolate());
- // TODO(jbroman): Use the public API once it exists.
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate());
- i::HandleScope handle_scope(internal_isolate);
- i::ValueDeserializer deserializer(
- internal_isolate,
- i::Vector<const uint8_t>(&data[0], static_cast<int>(data.size())));
- // TODO(jbroman): Enable legacy support.
- ASSERT_TRUE(deserializer.ReadHeader().FromMaybe(false));
- // TODO(jbroman): Check version 0.
+ ValueDeserializer deserializer(isolate(), &data[0],
+ static_cast<int>(data.size()),
+ GetDeserializerDelegate());
+ deserializer.SetSupportsLegacyWireFormat(true);
+ BeforeDecode(&deserializer);
+ ASSERT_TRUE(deserializer.ReadHeader(context).FromMaybe(false));
+ ASSERT_EQ(0, deserializer.GetWireFormatVersion());
Local<Value> result;
- ASSERT_TRUE(ToLocal<Value>(
- deserializer.ReadObjectUsingEntireBufferForLegacyFormat(), &result));
+ ASSERT_TRUE(deserializer.ReadValue(context).ToLocal(&result));
ASSERT_FALSE(result.IsEmpty());
ASSERT_FALSE(try_catch.HasCaught());
- ASSERT_TRUE(deserialization_context()
- ->Global()
- ->CreateDataProperty(deserialization_context_,
- StringFromUtf8("result"), result)
- .FromMaybe(false));
+ ASSERT_TRUE(
+ context->Global()
+ ->CreateDataProperty(context, StringFromUtf8("result"), result)
+ .FromMaybe(false));
output_functor(result);
ASSERT_FALSE(try_catch.HasCaught());
}
void InvalidDecodeTest(const std::vector<uint8_t>& data) {
- Context::Scope scope(deserialization_context());
+ Local<Context> context = deserialization_context();
+ Context::Scope scope(context);
TryCatch try_catch(isolate());
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate());
- i::HandleScope handle_scope(internal_isolate);
- i::ValueDeserializer deserializer(
- internal_isolate,
- i::Vector<const uint8_t>(&data[0], static_cast<int>(data.size())));
- Maybe<bool> header_result = deserializer.ReadHeader();
- if (header_result.IsNothing()) return;
+ ValueDeserializer deserializer(isolate(), &data[0],
+ static_cast<int>(data.size()),
+ GetDeserializerDelegate());
+ deserializer.SetSupportsLegacyWireFormat(true);
+ BeforeDecode(&deserializer);
+ Maybe<bool> header_result = deserializer.ReadHeader(context);
+ if (header_result.IsNothing()) {
+ EXPECT_TRUE(try_catch.HasCaught());
+ return;
+ }
ASSERT_TRUE(header_result.ToChecked());
- ASSERT_TRUE(deserializer.ReadObject().is_null());
+ ASSERT_TRUE(deserializer.ReadValue(context).IsEmpty());
+ EXPECT_TRUE(try_catch.HasCaught());
}
Local<Value> EvaluateScriptForInput(const char* utf8_source) {
@@ -179,9 +230,18 @@ class ValueSerializerTest : public TestWithIsolate {
return std::string(*utf8, utf8.length());
}
+ Local<Object> NewHostObject(Local<Context> context, int argc,
+ Local<Value> argv[]) {
+ return host_object_constructor_template_->GetFunction(context)
+ .ToLocalChecked()
+ ->NewInstance(context, argc, argv)
+ .ToLocalChecked();
+ }
+
private:
Local<Context> serialization_context_;
Local<Context> deserialization_context_;
+ Local<FunctionTemplate> host_object_constructor_template_;
DISALLOW_COPY_AND_ASSIGN(ValueSerializerTest);
};
@@ -659,6 +719,31 @@ TEST_F(ValueSerializerTest, RoundTripTrickyGetters) {
});
}
+TEST_F(ValueSerializerTest, RoundTripDictionaryObjectForTransitions) {
+ // A case which should run on the fast path, and should reach all of the
+ // different cases:
+ // 1. no known transition (first time creating this kind of object)
+ // 2. expected transitions match to end
+ // 3. transition partially matches, but falls back due to new property 'w'
+ // 4. transition to 'z' is now a full transition (needs to be looked up)
+ // 5. same for 'w'
+ // 6. new property after complex transition succeeded
+ // 7. new property after complex transition failed (due to new property)
+ RoundTripJSON(
+ "[{\"x\":1,\"y\":2,\"z\":3}"
+ ",{\"x\":4,\"y\":5,\"z\":6}"
+ ",{\"x\":5,\"y\":6,\"w\":7}"
+ ",{\"x\":6,\"y\":7,\"z\":8}"
+ ",{\"x\":0,\"y\":0,\"w\":0}"
+ ",{\"x\":3,\"y\":1,\"w\":4,\"z\":1}"
+ ",{\"x\":5,\"y\":9,\"k\":2,\"z\":6}]");
+ // A simpler case that uses two-byte strings.
+ RoundTripJSON(
+ "[{\"\xF0\x9F\x91\x8A\":1,\"\xF0\x9F\x91\x8B\":2}"
+ ",{\"\xF0\x9F\x91\x8A\":3,\"\xF0\x9F\x91\x8C\":4}"
+ ",{\"\xF0\x9F\x91\x8A\":5,\"\xF0\x9F\x91\x9B\":6}]");
+}
+
TEST_F(ValueSerializerTest, DecodeDictionaryObjectVersion0) {
// Empty object.
DecodeTestForVersion0(
@@ -950,6 +1035,19 @@ TEST_F(ValueSerializerTest, RoundTripArrayWithTrickyGetters) {
EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === 1"));
EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(2)"));
});
+ // The same is true if the length is shortened, but there are still items
+ // remaining.
+ RoundTripTest(
+ "(() => {"
+ " var x = [1, { get a() { x.length = 3; }}, 3, 4];"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(4, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[2] === 3"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(3)"));
+ });
// Same for sparse arrays.
RoundTripTest(
"(() => {"
@@ -963,6 +1061,18 @@ TEST_F(ValueSerializerTest, RoundTripArrayWithTrickyGetters) {
EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === 1"));
EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(2)"));
});
+ RoundTripTest(
+ "(() => {"
+ " var x = [1, { get a() { x.length = 3; }}, 3, 4];"
+ " x.length = 1000;"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[2] === 3"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(3)"));
+ });
// If a getter makes a property non-enumerable, it should still be enumerated
// as enumeration happens once before getters are invoked.
RoundTripTest(
@@ -1364,5 +1474,889 @@ TEST_F(ValueSerializerTest, DecodeRegExp) {
});
}
+TEST_F(ValueSerializerTest, RoundTripMap) {
+ RoundTripTest(
+ "(() => { var m = new Map(); m.set(42, 'foo'); return m; })()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsMap());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Map.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.size === 1"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.get(42) === 'foo'"));
+ });
+ RoundTripTest("(() => { var m = new Map(); m.set(m, m); return m; })()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsMap());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.size === 1"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.get(result) === result"));
+ });
+ // Iteration order must be preserved.
+ RoundTripTest(
+ "(() => {"
+ " var m = new Map();"
+ " m.set(1, 0); m.set('a', 0); m.set(3, 0); m.set(2, 0);"
+ " return m;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsMap());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Array.from(result.keys()).toString() === '1,a,3,2'"));
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeMap) {
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x3b, 0x3f, 0x01, 0x49, 0x54, 0x3f, 0x01, 0x53,
+ 0x03, 0x66, 0x6f, 0x6f, 0x3a, 0x02},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsMap());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Map.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.size === 1"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.get(42) === 'foo'"));
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3b, 0x3f, 0x01, 0x5e, 0x00, 0x3f, 0x01,
+ 0x5e, 0x00, 0x3a, 0x02, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsMap());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.size === 1"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.get(result) === result"));
+ });
+ // Iteration order must be preserved.
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3b, 0x3f, 0x01, 0x49, 0x02, 0x3f,
+ 0x01, 0x49, 0x00, 0x3f, 0x01, 0x53, 0x01, 0x61, 0x3f, 0x01,
+ 0x49, 0x00, 0x3f, 0x01, 0x49, 0x06, 0x3f, 0x01, 0x49, 0x00,
+ 0x3f, 0x01, 0x49, 0x04, 0x3f, 0x01, 0x49, 0x00, 0x3a, 0x08},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsMap());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Array.from(result.keys()).toString() === '1,a,3,2'"));
+ });
+}
+
+TEST_F(ValueSerializerTest, RoundTripMapWithTrickyGetters) {
+ // Even if an entry is removed or reassigned, the original key/value pair is
+ // used.
+ RoundTripTest(
+ "(() => {"
+ " var m = new Map();"
+ " m.set(0, { get a() {"
+ " m.delete(1); m.set(2, 'baz'); m.set(3, 'quux');"
+ " }});"
+ " m.set(1, 'foo');"
+ " m.set(2, 'bar');"
+ " return m;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsMap());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Array.from(result.keys()).toString() === '0,1,2'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.get(1) === 'foo'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.get(2) === 'bar'"));
+ });
+ // However, deeper modifications of objects yet to be serialized still apply.
+ RoundTripTest(
+ "(() => {"
+ " var m = new Map();"
+ " var key = { get a() { value.foo = 'bar'; } };"
+ " var value = { get a() { key.baz = 'quux'; } };"
+ " m.set(key, value);"
+ " return m;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsMap());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "!('baz' in Array.from(result.keys())[0])"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Array.from(result.values())[0].foo === 'bar'"));
+ });
+}
+
+TEST_F(ValueSerializerTest, RoundTripSet) {
+ RoundTripTest(
+ "(() => { var s = new Set(); s.add(42); s.add('foo'); return s; })()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsSet());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Set.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.size === 2"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.has(42)"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.has('foo')"));
+ });
+ RoundTripTest(
+ "(() => { var s = new Set(); s.add(s); return s; })()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsSet());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.size === 1"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.has(result)"));
+ });
+ // Iteration order must be preserved.
+ RoundTripTest(
+ "(() => {"
+ " var s = new Set();"
+ " s.add(1); s.add('a'); s.add(3); s.add(2);"
+ " return s;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsSet());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Array.from(result.keys()).toString() === '1,a,3,2'"));
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeSet) {
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x27, 0x3f, 0x01, 0x49, 0x54, 0x3f, 0x01,
+ 0x53, 0x03, 0x66, 0x6f, 0x6f, 0x2c, 0x02},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsSet());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Set.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.size === 2"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.has(42)"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.has('foo')"));
+ });
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x27, 0x3f, 0x01, 0x5e, 0x00, 0x2c, 0x01, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsSet());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.size === 1"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.has(result)"));
+ });
+ // Iteration order must be preserved.
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x27, 0x3f, 0x01, 0x49, 0x02, 0x3f, 0x01, 0x53,
+ 0x01, 0x61, 0x3f, 0x01, 0x49, 0x06, 0x3f, 0x01, 0x49, 0x04, 0x2c, 0x04},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsSet());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Array.from(result.keys()).toString() === '1,a,3,2'"));
+ });
+}
+
+TEST_F(ValueSerializerTest, RoundTripSetWithTrickyGetters) {
+ // Even if an element is added or removed during serialization, the original
+ // set of elements is used.
+ RoundTripTest(
+ "(() => {"
+ " var s = new Set();"
+ " s.add({ get a() { s.delete(1); s.add(2); } });"
+ " s.add(1);"
+ " return s;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsSet());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Array.from(result.keys()).toString() === '[object Object],1'"));
+ });
+ // However, deeper modifications of objects yet to be serialized still apply.
+ RoundTripTest(
+ "(() => {"
+ " var s = new Set();"
+ " var first = { get a() { second.foo = 'bar'; } };"
+ " var second = { get a() { first.baz = 'quux'; } };"
+ " s.add(first);"
+ " s.add(second);"
+ " return s;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsSet());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "!('baz' in Array.from(result.keys())[0])"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Array.from(result.keys())[1].foo === 'bar'"));
+ });
+}
+
+TEST_F(ValueSerializerTest, RoundTripArrayBuffer) {
+ RoundTripTest("new ArrayBuffer()", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArrayBuffer());
+ EXPECT_EQ(0u, ArrayBuffer::Cast(*value)->ByteLength());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === ArrayBuffer.prototype"));
+ });
+ RoundTripTest("new Uint8Array([0, 128, 255]).buffer",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArrayBuffer());
+ EXPECT_EQ(3u, ArrayBuffer::Cast(*value)->ByteLength());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "new Uint8Array(result).toString() === '0,128,255'"));
+ });
+ RoundTripTest(
+ "({ a: new ArrayBuffer(), get b() { return this.a; }})",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("result.a instanceof ArrayBuffer"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeArrayBuffer) {
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x42, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArrayBuffer());
+ EXPECT_EQ(0u, ArrayBuffer::Cast(*value)->ByteLength());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === ArrayBuffer.prototype"));
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x42, 0x03, 0x00, 0x80, 0xff, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArrayBuffer());
+ EXPECT_EQ(3u, ArrayBuffer::Cast(*value)->ByteLength());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "new Uint8Array(result).toString() === '0,128,255'"));
+ });
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x01,
+ 0x61, 0x3f, 0x01, 0x42, 0x00, 0x3f, 0x02, 0x53, 0x01,
+ 0x62, 0x3f, 0x02, 0x5e, 0x01, 0x7b, 0x02, 0x00},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("result.a instanceof ArrayBuffer"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeInvalidArrayBuffer) {
+ InvalidDecodeTest({0xff, 0x09, 0x42, 0xff, 0xff, 0x00});
+}
+
+// Includes an ArrayBuffer wrapper marked for transfer from the serialization
+// context to the deserialization context.
+class ValueSerializerTestWithArrayBufferTransfer : public ValueSerializerTest {
+ protected:
+ static const size_t kTestByteLength = 4;
+
+ ValueSerializerTestWithArrayBufferTransfer() {
+ {
+ Context::Scope scope(serialization_context());
+ input_buffer_ = ArrayBuffer::New(isolate(), nullptr, 0);
+ input_buffer_->Neuter();
+ }
+ {
+ Context::Scope scope(deserialization_context());
+ output_buffer_ = ArrayBuffer::New(isolate(), kTestByteLength);
+ const uint8_t data[kTestByteLength] = {0x00, 0x01, 0x80, 0xff};
+ memcpy(output_buffer_->GetContents().Data(), data, kTestByteLength);
+ }
+ }
+
+ const Local<ArrayBuffer>& input_buffer() { return input_buffer_; }
+ const Local<ArrayBuffer>& output_buffer() { return output_buffer_; }
+
+ void BeforeEncode(ValueSerializer* serializer) override {
+ serializer->TransferArrayBuffer(0, input_buffer_);
+ }
+
+ void BeforeDecode(ValueDeserializer* deserializer) override {
+ deserializer->TransferArrayBuffer(0, output_buffer_);
+ }
+
+ private:
+ Local<ArrayBuffer> input_buffer_;
+ Local<ArrayBuffer> output_buffer_;
+};
+
+TEST_F(ValueSerializerTestWithArrayBufferTransfer,
+ RoundTripArrayBufferTransfer) {
+ RoundTripTest([this]() { return input_buffer(); },
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArrayBuffer());
+ EXPECT_EQ(output_buffer(), value);
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "new Uint8Array(result).toString() === '0,1,128,255'"));
+ });
+ RoundTripTest(
+ [this]() {
+ Local<Object> object = Object::New(isolate());
+ EXPECT_TRUE(object
+ ->CreateDataProperty(serialization_context(),
+ StringFromUtf8("a"),
+ input_buffer())
+ .FromMaybe(false));
+ EXPECT_TRUE(object
+ ->CreateDataProperty(serialization_context(),
+ StringFromUtf8("b"),
+ input_buffer())
+ .FromMaybe(false));
+ return object;
+ },
+ [this](Local<Value> value) {
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("result.a instanceof ArrayBuffer"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "new Uint8Array(result.a).toString() === '0,1,128,255'"));
+ });
+}
+
+TEST_F(ValueSerializerTest, RoundTripTypedArray) {
+// Check that the right type comes out the other side for every kind of typed
+// array.
+#define TYPED_ARRAY_ROUND_TRIP_TEST(Type, type, TYPE, ctype, size) \
+ RoundTripTest("new " #Type "Array(2)", [this](Local<Value> value) { \
+ ASSERT_TRUE(value->Is##Type##Array()); \
+ EXPECT_EQ(2 * size, TypedArray::Cast(*value)->ByteLength()); \
+ EXPECT_EQ(2, TypedArray::Cast(*value)->Length()); \
+ EXPECT_TRUE(EvaluateScriptForResultBool( \
+ "Object.getPrototypeOf(result) === " #Type "Array.prototype")); \
+ });
+ TYPED_ARRAYS(TYPED_ARRAY_ROUND_TRIP_TEST)
+#undef TYPED_ARRAY_CASE
+
+ // Check that values of various kinds are suitably preserved.
+ RoundTripTest("new Uint8Array([1, 128, 255])", [this](Local<Value> value) {
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("result.toString() === '1,128,255'"));
+ });
+ RoundTripTest("new Int16Array([0, 256, -32768])", [this](Local<Value> value) {
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("result.toString() === '0,256,-32768'"));
+ });
+ RoundTripTest("new Float32Array([0, -0.5, NaN, Infinity])",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.toString() === '0,-0.5,NaN,Infinity'"));
+ });
+
+ // Array buffer views sharing a buffer should do so on the other side.
+ // Similarly, multiple references to the same typed array should be resolved.
+ RoundTripTest(
+ "(() => {"
+ " var buffer = new ArrayBuffer(32);"
+ " return {"
+ " u8: new Uint8Array(buffer),"
+ " get u8_2() { return this.u8; },"
+ " f32: new Float32Array(buffer, 4, 5),"
+ " b: buffer,"
+ " };"
+ "})()",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("result.u8 instanceof Uint8Array"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.u8 === result.u8_2"));
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("result.f32 instanceof Float32Array"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.u8.buffer === result.f32.buffer"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.f32.byteOffset === 4"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.f32.length === 5"));
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeTypedArray) {
+ // Check that the right type comes out the other side for every kind of typed
+ // array.
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3f, 0x00, 0x42, 0x02, 0x00, 0x00, 0x56,
+ 0x42, 0x00, 0x02},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsUint8Array());
+ EXPECT_EQ(2, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2, TypedArray::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Uint8Array.prototype"));
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3f, 0x00, 0x42, 0x02, 0x00, 0x00, 0x56,
+ 0x62, 0x00, 0x02},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt8Array());
+ EXPECT_EQ(2, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2, TypedArray::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Int8Array.prototype"));
+ });
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3f, 0x00, 0x42, 0x04, 0x00, 0x00, 0x00,
+ 0x00, 0x56, 0x57, 0x00, 0x04},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsUint16Array());
+ EXPECT_EQ(4, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2, TypedArray::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Uint16Array.prototype"));
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3f, 0x00, 0x42, 0x04, 0x00, 0x00, 0x00,
+ 0x00, 0x56, 0x77, 0x00, 0x04},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt16Array());
+ EXPECT_EQ(4, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2, TypedArray::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Int16Array.prototype"));
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3f, 0x00, 0x42, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x44, 0x00, 0x08},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsUint32Array());
+ EXPECT_EQ(8, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2, TypedArray::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Uint32Array.prototype"));
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3f, 0x00, 0x42, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x64, 0x00, 0x08},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt32Array());
+ EXPECT_EQ(8, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2, TypedArray::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Int32Array.prototype"));
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3f, 0x00, 0x42, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x66, 0x00, 0x08},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsFloat32Array());
+ EXPECT_EQ(8, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2, TypedArray::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Float32Array.prototype"));
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3f, 0x00, 0x42, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x56, 0x46, 0x00, 0x10},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsFloat64Array());
+ EXPECT_EQ(16, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2, TypedArray::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Float64Array.prototype"));
+ });
+#endif // V8_TARGET_LITTLE_ENDIAN
+
+ // Check that values of various kinds are suitably preserved.
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3f, 0x00, 0x42, 0x03, 0x01, 0x80, 0xff,
+ 0x56, 0x42, 0x00, 0x03, 0x00},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.toString() === '1,128,255'"));
+ });
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3f, 0x00, 0x42, 0x06, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x80, 0x56, 0x77, 0x00, 0x06},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.toString() === '0,256,-32768'"));
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3f, 0x00, 0x42, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbf, 0x00, 0x00, 0xc0, 0x7f,
+ 0x00, 0x00, 0x80, 0x7f, 0x56, 0x66, 0x00, 0x10},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.toString() === '0,-0.5,NaN,Infinity'"));
+ });
+#endif // V8_TARGET_LITTLE_ENDIAN
+
+ // Array buffer views sharing a buffer should do so on the other side.
+ // Similarly, multiple references to the same typed array should be resolved.
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x02, 0x75, 0x38, 0x3f,
+ 0x01, 0x3f, 0x01, 0x42, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x56, 0x42, 0x00, 0x20, 0x3f, 0x03, 0x53, 0x04, 0x75, 0x38, 0x5f,
+ 0x32, 0x3f, 0x03, 0x5e, 0x02, 0x3f, 0x03, 0x53, 0x03, 0x66, 0x33, 0x32,
+ 0x3f, 0x03, 0x3f, 0x03, 0x5e, 0x01, 0x56, 0x66, 0x04, 0x14, 0x3f, 0x04,
+ 0x53, 0x01, 0x62, 0x3f, 0x04, 0x5e, 0x01, 0x7b, 0x04, 0x00},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("result.u8 instanceof Uint8Array"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.u8 === result.u8_2"));
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("result.f32 instanceof Float32Array"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.u8.buffer === result.f32.buffer"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.f32.byteOffset === 4"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.f32.length === 5"));
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeInvalidTypedArray) {
+ // Byte offset out of range.
+ InvalidDecodeTest(
+ {0xff, 0x09, 0x42, 0x02, 0x00, 0x00, 0x56, 0x42, 0x03, 0x01});
+ // Byte offset in range, offset + length out of range.
+ InvalidDecodeTest(
+ {0xff, 0x09, 0x42, 0x02, 0x00, 0x00, 0x56, 0x42, 0x01, 0x03});
+ // Byte offset not divisible by element size.
+ InvalidDecodeTest(
+ {0xff, 0x09, 0x42, 0x04, 0x00, 0x00, 0x00, 0x00, 0x56, 0x77, 0x01, 0x02});
+ // Byte length not divisible by element size.
+ InvalidDecodeTest(
+ {0xff, 0x09, 0x42, 0x04, 0x00, 0x00, 0x00, 0x00, 0x56, 0x77, 0x02, 0x01});
+}
+
+TEST_F(ValueSerializerTest, RoundTripDataView) {
+ RoundTripTest("new DataView(new ArrayBuffer(4), 1, 2)",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDataView());
+ EXPECT_EQ(1, DataView::Cast(*value)->ByteOffset());
+ EXPECT_EQ(2, DataView::Cast(*value)->ByteLength());
+ EXPECT_EQ(4, DataView::Cast(*value)->Buffer()->ByteLength());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === DataView.prototype"));
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeDataView) {
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x3f, 0x00, 0x42, 0x04, 0x00, 0x00, 0x00,
+ 0x00, 0x56, 0x3f, 0x01, 0x02},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDataView());
+ EXPECT_EQ(1, DataView::Cast(*value)->ByteOffset());
+ EXPECT_EQ(2, DataView::Cast(*value)->ByteLength());
+ EXPECT_EQ(4, DataView::Cast(*value)->Buffer()->ByteLength());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === DataView.prototype"));
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeInvalidDataView) {
+ // Byte offset out of range.
+ InvalidDecodeTest(
+ {0xff, 0x09, 0x42, 0x02, 0x00, 0x00, 0x56, 0x3f, 0x03, 0x01});
+ // Byte offset in range, offset + length out of range.
+ InvalidDecodeTest(
+ {0xff, 0x09, 0x42, 0x02, 0x00, 0x00, 0x56, 0x3f, 0x01, 0x03});
+}
+
+class ValueSerializerTestWithSharedArrayBufferTransfer
+ : public ValueSerializerTest {
+ protected:
+ static const size_t kTestByteLength = 4;
+
+ ValueSerializerTestWithSharedArrayBufferTransfer() {
+ const uint8_t data[kTestByteLength] = {0x00, 0x01, 0x80, 0xff};
+ memcpy(data_, data, kTestByteLength);
+ {
+ Context::Scope scope(serialization_context());
+ input_buffer_ =
+ SharedArrayBuffer::New(isolate(), &data_, kTestByteLength);
+ }
+ {
+ Context::Scope scope(deserialization_context());
+ output_buffer_ =
+ SharedArrayBuffer::New(isolate(), &data_, kTestByteLength);
+ }
+ }
+
+ const Local<SharedArrayBuffer>& input_buffer() { return input_buffer_; }
+ const Local<SharedArrayBuffer>& output_buffer() { return output_buffer_; }
+
+ void BeforeEncode(ValueSerializer* serializer) override {
+ serializer->TransferSharedArrayBuffer(0, input_buffer_);
+ }
+
+ void BeforeDecode(ValueDeserializer* deserializer) override {
+ deserializer->TransferSharedArrayBuffer(0, output_buffer_);
+ }
+
+ static void SetUpTestCase() {
+ flag_was_enabled_ = i::FLAG_harmony_sharedarraybuffer;
+ i::FLAG_harmony_sharedarraybuffer = true;
+ ValueSerializerTest::SetUpTestCase();
+ }
+
+ static void TearDownTestCase() {
+ ValueSerializerTest::TearDownTestCase();
+ i::FLAG_harmony_sharedarraybuffer = flag_was_enabled_;
+ flag_was_enabled_ = false;
+ }
+
+ private:
+ static bool flag_was_enabled_;
+ uint8_t data_[kTestByteLength];
+ Local<SharedArrayBuffer> input_buffer_;
+ Local<SharedArrayBuffer> output_buffer_;
+};
+
+bool ValueSerializerTestWithSharedArrayBufferTransfer::flag_was_enabled_ =
+ false;
+
+TEST_F(ValueSerializerTestWithSharedArrayBufferTransfer,
+ RoundTripSharedArrayBufferTransfer) {
+ RoundTripTest([this]() { return input_buffer(); },
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsSharedArrayBuffer());
+ EXPECT_EQ(output_buffer(), value);
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "new Uint8Array(result).toString() === '0,1,128,255'"));
+ });
+ RoundTripTest(
+ [this]() {
+ Local<Object> object = Object::New(isolate());
+ EXPECT_TRUE(object
+ ->CreateDataProperty(serialization_context(),
+ StringFromUtf8("a"),
+ input_buffer())
+ .FromMaybe(false));
+ EXPECT_TRUE(object
+ ->CreateDataProperty(serialization_context(),
+ StringFromUtf8("b"),
+ input_buffer())
+ .FromMaybe(false));
+ return object;
+ },
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.a instanceof SharedArrayBuffer"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "new Uint8Array(result.a).toString() === '0,1,128,255'"));
+ });
+}
+
+TEST_F(ValueSerializerTestWithSharedArrayBufferTransfer,
+ SharedArrayBufferMustBeTransferred) {
+ // A SharedArrayBuffer which was not marked for transfer should fail encoding.
+ InvalidEncodeTest("new SharedArrayBuffer(32)");
+}
+
+TEST_F(ValueSerializerTest, UnsupportedHostObject) {
+ InvalidEncodeTest("new ExampleHostObject()");
+ InvalidEncodeTest("({ a: new ExampleHostObject() })");
+}
+
+class ValueSerializerTestWithHostObject : public ValueSerializerTest {
+ protected:
+ ValueSerializerTestWithHostObject() : serializer_delegate_(this) {}
+
+ static const uint8_t kExampleHostObjectTag;
+
+ void WriteExampleHostObjectTag() {
+ serializer_->WriteRawBytes(&kExampleHostObjectTag, 1);
+ }
+
+ bool ReadExampleHostObjectTag() {
+ const void* tag;
+ return deserializer_->ReadRawBytes(1, &tag) &&
+ *reinterpret_cast<const uint8_t*>(tag) == kExampleHostObjectTag;
+ }
+
+// GMock doesn't use the "override" keyword.
+#if __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Winconsistent-missing-override"
+#endif
+
+ class SerializerDelegate : public ValueSerializer::Delegate {
+ public:
+ explicit SerializerDelegate(ValueSerializerTestWithHostObject* test)
+ : test_(test) {}
+ MOCK_METHOD2(WriteHostObject,
+ Maybe<bool>(Isolate* isolate, Local<Object> object));
+ void ThrowDataCloneError(Local<String> message) override {
+ test_->isolate()->ThrowException(Exception::Error(message));
+ }
+
+ private:
+ ValueSerializerTestWithHostObject* test_;
+ };
+
+ class DeserializerDelegate : public ValueDeserializer::Delegate {
+ public:
+ MOCK_METHOD1(ReadHostObject, MaybeLocal<Object>(Isolate* isolate));
+ };
+
+#if __clang__
+#pragma clang diagnostic pop
+#endif
+
+ ValueSerializer::Delegate* GetSerializerDelegate() override {
+ return &serializer_delegate_;
+ }
+ void BeforeEncode(ValueSerializer* serializer) override {
+ serializer_ = serializer;
+ }
+ ValueDeserializer::Delegate* GetDeserializerDelegate() override {
+ return &deserializer_delegate_;
+ }
+ void BeforeDecode(ValueDeserializer* deserializer) override {
+ deserializer_ = deserializer;
+ }
+
+ SerializerDelegate serializer_delegate_;
+ DeserializerDelegate deserializer_delegate_;
+ ValueSerializer* serializer_;
+ ValueDeserializer* deserializer_;
+
+ friend class SerializerDelegate;
+ friend class DeserializerDelegate;
+};
+
+// This is a tag that's not used in V8.
+const uint8_t ValueSerializerTestWithHostObject::kExampleHostObjectTag = '+';
+
+TEST_F(ValueSerializerTestWithHostObject, RoundTripUint32) {
+ // The host can serialize data as uint32_t.
+ EXPECT_CALL(serializer_delegate_, WriteHostObject(isolate(), _))
+ .WillRepeatedly(Invoke([this](Isolate*, Local<Object> object) {
+ uint32_t value = 0;
+ EXPECT_TRUE(object->GetInternalField(0)
+ ->Uint32Value(serialization_context())
+ .To(&value));
+ WriteExampleHostObjectTag();
+ serializer_->WriteUint32(value);
+ return Just(true);
+ }));
+ EXPECT_CALL(deserializer_delegate_, ReadHostObject(isolate()))
+ .WillRepeatedly(Invoke([this](Isolate*) {
+ EXPECT_TRUE(ReadExampleHostObjectTag());
+ uint32_t value = 0;
+ EXPECT_TRUE(deserializer_->ReadUint32(&value));
+ Local<Value> argv[] = {Integer::NewFromUnsigned(isolate(), value)};
+ return NewHostObject(deserialization_context(), arraysize(argv), argv);
+ }));
+ RoundTripTest("new ExampleHostObject(42)", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ ASSERT_TRUE(Object::Cast(*value)->InternalFieldCount());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === ExampleHostObject.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.value === 42"));
+ });
+ RoundTripTest(
+ "new ExampleHostObject(0xCAFECAFE)", [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.value === 0xCAFECAFE"));
+ });
+}
+
+TEST_F(ValueSerializerTestWithHostObject, RoundTripUint64) {
+ // The host can serialize data as uint64_t.
+ EXPECT_CALL(serializer_delegate_, WriteHostObject(isolate(), _))
+ .WillRepeatedly(Invoke([this](Isolate*, Local<Object> object) {
+ uint32_t value = 0, value2 = 0;
+ EXPECT_TRUE(object->GetInternalField(0)
+ ->Uint32Value(serialization_context())
+ .To(&value));
+ EXPECT_TRUE(object->GetInternalField(1)
+ ->Uint32Value(serialization_context())
+ .To(&value2));
+ WriteExampleHostObjectTag();
+ serializer_->WriteUint64((static_cast<uint64_t>(value) << 32) | value2);
+ return Just(true);
+ }));
+ EXPECT_CALL(deserializer_delegate_, ReadHostObject(isolate()))
+ .WillRepeatedly(Invoke([this](Isolate*) {
+ EXPECT_TRUE(ReadExampleHostObjectTag());
+ uint64_t value_packed;
+ EXPECT_TRUE(deserializer_->ReadUint64(&value_packed));
+ Local<Value> argv[] = {
+ Integer::NewFromUnsigned(isolate(),
+ static_cast<uint32_t>(value_packed >> 32)),
+ Integer::NewFromUnsigned(isolate(),
+ static_cast<uint32_t>(value_packed))};
+ return NewHostObject(deserialization_context(), arraysize(argv), argv);
+ }));
+ RoundTripTest("new ExampleHostObject(42, 0)", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ ASSERT_TRUE(Object::Cast(*value)->InternalFieldCount());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === ExampleHostObject.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.value === 42"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.value2 === 0"));
+ });
+ RoundTripTest(
+ "new ExampleHostObject(0xFFFFFFFF, 0x12345678)",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.value === 0xFFFFFFFF"));
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("result.value2 === 0x12345678"));
+ });
+}
+
+TEST_F(ValueSerializerTestWithHostObject, RoundTripDouble) {
+ // The host can serialize data as double.
+ EXPECT_CALL(serializer_delegate_, WriteHostObject(isolate(), _))
+ .WillRepeatedly(Invoke([this](Isolate*, Local<Object> object) {
+ double value = 0;
+ EXPECT_TRUE(object->GetInternalField(0)
+ ->NumberValue(serialization_context())
+ .To(&value));
+ WriteExampleHostObjectTag();
+ serializer_->WriteDouble(value);
+ return Just(true);
+ }));
+ EXPECT_CALL(deserializer_delegate_, ReadHostObject(isolate()))
+ .WillRepeatedly(Invoke([this](Isolate*) {
+ EXPECT_TRUE(ReadExampleHostObjectTag());
+ double value = 0;
+ EXPECT_TRUE(deserializer_->ReadDouble(&value));
+ Local<Value> argv[] = {Number::New(isolate(), value)};
+ return NewHostObject(deserialization_context(), arraysize(argv), argv);
+ }));
+ RoundTripTest("new ExampleHostObject(-3.5)", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ ASSERT_TRUE(Object::Cast(*value)->InternalFieldCount());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === ExampleHostObject.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.value === -3.5"));
+ });
+ RoundTripTest("new ExampleHostObject(NaN)", [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("Number.isNaN(result.value)"));
+ });
+ RoundTripTest("new ExampleHostObject(Infinity)", [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.value === Infinity"));
+ });
+ RoundTripTest("new ExampleHostObject(-0)", [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("1/result.value === -Infinity"));
+ });
+}
+
+TEST_F(ValueSerializerTestWithHostObject, RoundTripRawBytes) {
+ // The host can serialize arbitrary raw bytes.
+ const struct {
+ uint64_t u64;
+ uint32_t u32;
+ char str[12];
+ } sample_data = {0x1234567812345678, 0x87654321, "Hello world"};
+ EXPECT_CALL(serializer_delegate_, WriteHostObject(isolate(), _))
+ .WillRepeatedly(
+ Invoke([this, &sample_data](Isolate*, Local<Object> object) {
+ WriteExampleHostObjectTag();
+ serializer_->WriteRawBytes(&sample_data, sizeof(sample_data));
+ return Just(true);
+ }));
+ EXPECT_CALL(deserializer_delegate_, ReadHostObject(isolate()))
+ .WillRepeatedly(Invoke([this, &sample_data](Isolate*) {
+ EXPECT_TRUE(ReadExampleHostObjectTag());
+ const void* copied_data = nullptr;
+ EXPECT_TRUE(
+ deserializer_->ReadRawBytes(sizeof(sample_data), &copied_data));
+ if (copied_data) {
+ EXPECT_EQ(0, memcmp(&sample_data, copied_data, sizeof(sample_data)));
+ }
+ return NewHostObject(deserialization_context(), 0, nullptr);
+ }));
+ RoundTripTest("new ExampleHostObject()", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ ASSERT_TRUE(Object::Cast(*value)->InternalFieldCount());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === ExampleHostObject.prototype"));
+ });
+}
+
+TEST_F(ValueSerializerTestWithHostObject, RoundTripSameObject) {
+ // If the same object exists in two places, the delegate should be invoked
+ // only once, and the objects should be the same (by reference equality) on
+ // the other side.
+ EXPECT_CALL(serializer_delegate_, WriteHostObject(isolate(), _))
+ .WillOnce(Invoke([this](Isolate*, Local<Object> object) {
+ WriteExampleHostObjectTag();
+ return Just(true);
+ }));
+ EXPECT_CALL(deserializer_delegate_, ReadHostObject(isolate()))
+ .WillOnce(Invoke([this](Isolate*) {
+ EXPECT_TRUE(ReadExampleHostObjectTag());
+ return NewHostObject(deserialization_context(), 0, nullptr);
+ }));
+ RoundTripTest(
+ "({ a: new ExampleHostObject(), get b() { return this.a; }})",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.a instanceof ExampleHostObject"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
+ });
+}
+
} // namespace
} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc b/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc
index 7311f063a0..cbaf6201c6 100644
--- a/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc
@@ -6,25 +6,30 @@
#include "src/v8.h"
-#include "test/cctest/wasm/test-signatures.h"
+#include "test/common/wasm/test-signatures.h"
#include "src/objects.h"
#include "src/wasm/ast-decoder.h"
#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-opcodes.h"
namespace v8 {
namespace internal {
namespace wasm {
-#define B1(a) kExprBlock, a, kExprEnd
-#define B2(a, b) kExprBlock, a, b, kExprEnd
-#define B3(a, b, c) kExprBlock, a, b, c, kExprEnd
+#define B1(a) WASM_BLOCK(a)
+#define B2(a, b) WASM_BLOCK(a, b)
+#define B3(a, b, c) WASM_BLOCK(a, b, c)
+
+#define WASM_IF_OP kExprIf, kLocalVoid
+#define WASM_LOOP_OP kExprLoop, kLocalVoid
static const byte kCodeGetLocal0[] = {kExprGetLocal, 0};
static const byte kCodeGetLocal1[] = {kExprGetLocal, 1};
static const byte kCodeSetLocal0[] = {WASM_SET_LOCAL(0, WASM_ZERO)};
+static const byte kCodeTeeLocal0[] = {WASM_TEE_LOCAL(0, WASM_ZERO)};
static const LocalType kLocalTypes[] = {kAstI32, kAstI64, kAstF32, kAstF64};
static const MachineType machineTypes[] = {
@@ -40,36 +45,56 @@ static const WasmOpcode kInt32BinopOpcodes[] = {
kExprI32LeS, kExprI32LtU, kExprI32LeU};
#define WASM_BRV_IF_ZERO(depth, val) \
- val, WASM_ZERO, kExprBrIf, ARITY_1, static_cast<byte>(depth)
+ val, WASM_ZERO, kExprBrIf, static_cast<byte>(depth)
+
+#define EXPECT_VERIFIES_C(sig, x) \
+ Verify(kSuccess, sigs.sig(), x, x + arraysize(x))
-#define EXPECT_VERIFIES(env, x) Verify(kSuccess, env, x, x + arraysize(x))
+#define EXPECT_FAILURE_C(sig, x) Verify(kError, sigs.sig(), x, x + arraysize(x))
-#define EXPECT_FAILURE(env, x) Verify(kError, env, x, x + arraysize(x))
+#define EXPECT_VERIFIES_SC(sig, x) Verify(kSuccess, sig, x, x + arraysize(x))
-#define EXPECT_VERIFIES_INLINE(env, ...) \
+#define EXPECT_FAILURE_SC(sig, x) Verify(kError, sig, x, x + arraysize(x))
+
+#define EXPECT_VERIFIES_S(env, ...) \
do { \
static byte code[] = {__VA_ARGS__}; \
Verify(kSuccess, env, code, code + arraysize(code)); \
} while (false)
-#define EXPECT_FAILURE_INLINE(env, ...) \
+#define EXPECT_FAILURE_S(env, ...) \
do { \
static byte code[] = {__VA_ARGS__}; \
Verify(kError, env, code, code + arraysize(code)); \
} while (false)
-#define VERIFY(...) \
+#define EXPECT_VERIFIES(sig, ...) \
do { \
static const byte code[] = {__VA_ARGS__}; \
- Verify(kSuccess, sigs.v_i(), code, code + sizeof(code)); \
+ Verify(kSuccess, sigs.sig(), code, code + sizeof(code)); \
} while (false)
+#define EXPECT_FAILURE(sig, ...) \
+ do { \
+ static const byte code[] = {__VA_ARGS__}; \
+ Verify(kError, sigs.sig(), code, code + sizeof(code)); \
+ } while (false)
+
+static bool old_eh_flag;
+
class AstDecoderTest : public TestWithZone {
public:
typedef std::pair<uint32_t, LocalType> LocalsDecl;
AstDecoderTest() : module(nullptr), local_decls(zone()) {}
+ static void SetUpTestCase() { old_eh_flag = FLAG_wasm_eh_prototype; }
+
+ static void TearDownTestCase() {
+ // Reset the wasm_eh_prototype flag
+ FLAG_wasm_eh_prototype = old_eh_flag;
+ }
+
TestSignatures sigs;
ModuleEnv* module;
LocalDeclEncoder local_decls;
@@ -103,14 +128,14 @@ class AstDecoderTest : public TestWithZone {
if (result.error_pt) str << ", pt = +" << pt;
}
}
- FATAL(str.str().c_str());
+ EXPECT_TRUE(false) << str.str().c_str();
}
}
void TestBinop(WasmOpcode opcode, FunctionSig* success) {
// op(local[0], local[1])
byte code[] = {WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))};
- EXPECT_VERIFIES(success, code);
+ EXPECT_VERIFIES_SC(success, code);
// Try all combinations of return and parameter types.
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
@@ -122,7 +147,7 @@ class AstDecoderTest : public TestWithZone {
types[2] != success->GetParam(1)) {
// Test signature mismatch.
FunctionSig sig(1, 2, types);
- EXPECT_FAILURE(&sig, code);
+ EXPECT_FAILURE_SC(&sig, code);
}
}
}
@@ -139,7 +164,7 @@ class AstDecoderTest : public TestWithZone {
{
LocalType types[] = {ret_type, param_type};
FunctionSig sig(1, 1, types);
- EXPECT_VERIFIES(&sig, code);
+ EXPECT_VERIFIES_SC(&sig, code);
}
// Try all combinations of return and parameter types.
@@ -149,7 +174,7 @@ class AstDecoderTest : public TestWithZone {
if (types[0] != ret_type || types[1] != param_type) {
// Test signature mismatch.
FunctionSig sig(1, 1, types);
- EXPECT_FAILURE(&sig, code);
+ EXPECT_FAILURE_SC(&sig, code);
}
}
}
@@ -160,7 +185,7 @@ TEST_F(AstDecoderTest, Int8Const) {
byte code[] = {kExprI8Const, 0};
for (int i = -128; i < 128; i++) {
code[1] = static_cast<byte>(i);
- EXPECT_VERIFIES(sigs.i_i(), code);
+ EXPECT_VERIFIES_C(i_i, code);
}
}
@@ -172,13 +197,16 @@ TEST_F(AstDecoderTest, EmptyFunction) {
TEST_F(AstDecoderTest, IncompleteIf1) {
byte code[] = {kExprIf};
- EXPECT_FAILURE(sigs.v_v(), code);
- EXPECT_FAILURE(sigs.i_i(), code);
+ EXPECT_FAILURE_C(v_v, code);
+ EXPECT_FAILURE_C(i_i, code);
}
TEST_F(AstDecoderTest, Int8Const_fallthru) {
- byte code[] = {kExprI8Const, 0, kExprI8Const, 1};
- EXPECT_VERIFIES(sigs.i_i(), code);
+ EXPECT_VERIFIES(i_i, WASM_I32V_1(0));
+}
+
+TEST_F(AstDecoderTest, Int8Const_fallthru2) {
+ EXPECT_FAILURE(i_i, WASM_I32V_1(0), WASM_I32V_1(1));
}
TEST_F(AstDecoderTest, Int32Const) {
@@ -186,20 +214,15 @@ TEST_F(AstDecoderTest, Int32Const) {
for (int32_t i = kMinInt; i < kMaxInt - kInc; i = i + kInc) {
// TODO(binji): expand test for other sized int32s; 1 through 5 bytes.
byte code[] = {WASM_I32V(i)};
- EXPECT_VERIFIES(sigs.i_i(), code);
+ EXPECT_VERIFIES_C(i_i, code);
}
}
-TEST_F(AstDecoderTest, Int8Const_fallthru2) {
- byte code[] = {WASM_I8(0), WASM_I32V_4(0x1122334)};
- EXPECT_VERIFIES(sigs.i_i(), code);
-}
-
TEST_F(AstDecoderTest, Int64Const) {
const int kInc = 4498211;
for (int32_t i = kMinInt; i < kMaxInt - kInc; i = i + kInc) {
byte code[] = {WASM_I64V((static_cast<int64_t>(i) << 32) | i)};
- EXPECT_VERIFIES(sigs.l_l(), code);
+ EXPECT_VERIFIES_C(l_l, code);
}
}
@@ -207,8 +230,8 @@ TEST_F(AstDecoderTest, Float32Const) {
byte code[] = {kExprF32Const, 0, 0, 0, 0};
float* ptr = reinterpret_cast<float*>(code + 1);
for (int i = 0; i < 30; i++) {
- *ptr = i * -7.75f;
- EXPECT_VERIFIES(sigs.f_ff(), code);
+ WriteLittleEndianValue<float>(ptr, i * -7.75f);
+ EXPECT_VERIFIES_C(f_ff, code);
}
}
@@ -216,8 +239,8 @@ TEST_F(AstDecoderTest, Float64Const) {
byte code[] = {kExprF64Const, 0, 0, 0, 0, 0, 0, 0, 0};
double* ptr = reinterpret_cast<double*>(code + 1);
for (int i = 0; i < 30; i++) {
- *ptr = i * 33.45;
- EXPECT_VERIFIES(sigs.d_dd(), code);
+ WriteLittleEndianValue<double>(ptr, i * 33.45);
+ EXPECT_VERIFIES_C(d_dd, code);
}
}
@@ -230,19 +253,24 @@ TEST_F(AstDecoderTest, Int32Const_off_end) {
}
TEST_F(AstDecoderTest, GetLocal0_param) {
- EXPECT_VERIFIES(sigs.i_i(), kCodeGetLocal0);
+ EXPECT_VERIFIES_C(i_i, kCodeGetLocal0);
}
TEST_F(AstDecoderTest, GetLocal0_local) {
AddLocals(kAstI32, 1);
- EXPECT_VERIFIES(sigs.i_v(), kCodeGetLocal0);
+ EXPECT_VERIFIES_C(i_v, kCodeGetLocal0);
+}
+
+TEST_F(AstDecoderTest, TooManyLocals) {
+ AddLocals(kAstI32, 4034986500);
+ EXPECT_FAILURE_C(i_v, kCodeGetLocal0);
}
TEST_F(AstDecoderTest, GetLocal0_param_n) {
FunctionSig* array[] = {sigs.i_i(), sigs.i_ii(), sigs.i_iii()};
for (size_t i = 0; i < arraysize(array); i++) {
- EXPECT_VERIFIES(array[i], kCodeGetLocal0);
+ EXPECT_VERIFIES_SC(array[i], kCodeGetLocal0);
}
}
@@ -251,540 +279,618 @@ TEST_F(AstDecoderTest, GetLocalN_local) {
AddLocals(kAstI32, 1);
for (byte j = 0; j < i; j++) {
byte code[] = {kExprGetLocal, j};
- EXPECT_VERIFIES(sigs.i_v(), code);
+ EXPECT_VERIFIES_C(i_v, code);
}
}
}
TEST_F(AstDecoderTest, GetLocal0_fail_no_params) {
- EXPECT_FAILURE(sigs.i_v(), kCodeGetLocal0);
+ EXPECT_FAILURE_C(i_v, kCodeGetLocal0);
}
TEST_F(AstDecoderTest, GetLocal1_fail_no_locals) {
- EXPECT_FAILURE(sigs.i_i(), kCodeGetLocal1);
+ EXPECT_FAILURE_C(i_i, kCodeGetLocal1);
}
TEST_F(AstDecoderTest, GetLocal_off_end) {
static const byte code[] = {kExprGetLocal};
- EXPECT_FAILURE(sigs.i_i(), code);
+ EXPECT_FAILURE_C(i_i, code);
+}
+
+TEST_F(AstDecoderTest, NumLocalBelowLimit) {
+ AddLocals(kAstI32, kMaxNumWasmLocals - 1);
+ EXPECT_VERIFIES(v_v, WASM_NOP);
+}
+
+TEST_F(AstDecoderTest, NumLocalAtLimit) {
+ AddLocals(kAstI32, kMaxNumWasmLocals);
+ EXPECT_VERIFIES(v_v, WASM_NOP);
+}
+
+TEST_F(AstDecoderTest, NumLocalAboveLimit) {
+ AddLocals(kAstI32, kMaxNumWasmLocals + 1);
+ EXPECT_FAILURE(v_v, WASM_NOP);
}
TEST_F(AstDecoderTest, GetLocal_varint) {
- const int kMaxLocals = 8000000;
+ const int kMaxLocals = kMaxNumWasmLocals;
AddLocals(kAstI32, kMaxLocals);
- for (int index = 0; index < kMaxLocals; index = index * 11 + 5) {
- EXPECT_VERIFIES_INLINE(sigs.i_i(), kExprGetLocal, U32V_1(index));
- EXPECT_VERIFIES_INLINE(sigs.i_i(), kExprGetLocal, U32V_2(index));
- EXPECT_VERIFIES_INLINE(sigs.i_i(), kExprGetLocal, U32V_3(index));
- EXPECT_VERIFIES_INLINE(sigs.i_i(), kExprGetLocal, U32V_4(index));
- }
+ EXPECT_VERIFIES(i_i, kExprGetLocal, U32V_1(66));
+ EXPECT_VERIFIES(i_i, kExprGetLocal, U32V_2(7777));
+ EXPECT_VERIFIES(i_i, kExprGetLocal, U32V_3(888888));
+ EXPECT_VERIFIES(i_i, kExprGetLocal, U32V_4(3999999));
+
+ EXPECT_VERIFIES(i_i, kExprGetLocal, U32V_5(kMaxLocals - 1));
- EXPECT_VERIFIES_INLINE(sigs.i_i(), kExprGetLocal, U32V_5(kMaxLocals - 1));
+ EXPECT_FAILURE(i_i, kExprGetLocal, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
- EXPECT_VERIFIES_INLINE(sigs.i_i(), kExprGetLocal, U32V_4(kMaxLocals - 1));
- EXPECT_VERIFIES_INLINE(sigs.i_i(), kExprGetLocal, U32V_4(kMaxLocals));
- EXPECT_FAILURE_INLINE(sigs.i_i(), kExprGetLocal, U32V_4(kMaxLocals + 1));
+ EXPECT_VERIFIES(i_i, kExprGetLocal, U32V_4(kMaxLocals - 1));
+ EXPECT_VERIFIES(i_i, kExprGetLocal, U32V_4(kMaxLocals));
+ EXPECT_FAILURE(i_i, kExprGetLocal, U32V_4(kMaxLocals + 1));
- EXPECT_FAILURE_INLINE(sigs.i_v(), kExprGetLocal, U32V_4(kMaxLocals));
- EXPECT_FAILURE_INLINE(sigs.i_v(), kExprGetLocal, U32V_4(kMaxLocals + 1));
+ EXPECT_FAILURE(i_v, kExprGetLocal, U32V_4(kMaxLocals));
+ EXPECT_FAILURE(i_v, kExprGetLocal, U32V_4(kMaxLocals + 1));
}
TEST_F(AstDecoderTest, Binops_off_end) {
byte code1[] = {0}; // [opcode]
for (size_t i = 0; i < arraysize(kInt32BinopOpcodes); i++) {
code1[0] = kInt32BinopOpcodes[i];
- EXPECT_FAILURE(sigs.i_i(), code1);
+ EXPECT_FAILURE_C(i_i, code1);
}
byte code3[] = {kExprGetLocal, 0, 0}; // [expr] [opcode]
for (size_t i = 0; i < arraysize(kInt32BinopOpcodes); i++) {
code3[2] = kInt32BinopOpcodes[i];
- EXPECT_FAILURE(sigs.i_i(), code3);
+ EXPECT_FAILURE_C(i_i, code3);
}
byte code4[] = {kExprGetLocal, 0, 0, 0}; // [expr] [opcode] [opcode]
for (size_t i = 0; i < arraysize(kInt32BinopOpcodes); i++) {
code4[2] = kInt32BinopOpcodes[i];
code4[3] = kInt32BinopOpcodes[i];
- EXPECT_FAILURE(sigs.i_i(), code4);
+ EXPECT_FAILURE_C(i_i, code4);
}
}
TEST_F(AstDecoderTest, BinopsAcrossBlock1) {
static const byte code[] = {WASM_ZERO, kExprBlock, WASM_ZERO, kExprI32Add,
kExprEnd};
- EXPECT_FAILURE(sigs.i_i(), code);
+ EXPECT_FAILURE_C(i_i, code);
}
TEST_F(AstDecoderTest, BinopsAcrossBlock2) {
static const byte code[] = {WASM_ZERO, WASM_ZERO, kExprBlock, kExprI32Add,
kExprEnd};
- EXPECT_FAILURE(sigs.i_i(), code);
+ EXPECT_FAILURE_C(i_i, code);
}
TEST_F(AstDecoderTest, BinopsAcrossBlock3) {
static const byte code[] = {WASM_ZERO, WASM_ZERO, kExprIf, kExprI32Add,
kExprElse, kExprI32Add, kExprEnd};
- EXPECT_FAILURE(sigs.i_i(), code);
+ EXPECT_FAILURE_C(i_i, code);
}
TEST_F(AstDecoderTest, Nop) {
static const byte code[] = {kExprNop};
- EXPECT_VERIFIES(sigs.v_v(), code);
+ EXPECT_VERIFIES_C(v_v, code);
+}
+
+TEST_F(AstDecoderTest, SetLocal0_void) {
+ EXPECT_FAILURE(i_i, WASM_SET_LOCAL(0, WASM_ZERO));
}
TEST_F(AstDecoderTest, SetLocal0_param) {
- EXPECT_VERIFIES(sigs.i_i(), kCodeSetLocal0);
- EXPECT_FAILURE(sigs.f_ff(), kCodeSetLocal0);
- EXPECT_FAILURE(sigs.d_dd(), kCodeSetLocal0);
+ EXPECT_FAILURE_C(i_i, kCodeSetLocal0);
+ EXPECT_FAILURE_C(f_ff, kCodeSetLocal0);
+ EXPECT_FAILURE_C(d_dd, kCodeSetLocal0);
+}
+
+TEST_F(AstDecoderTest, TeeLocal0_param) {
+ EXPECT_VERIFIES_C(i_i, kCodeTeeLocal0);
+ EXPECT_FAILURE_C(f_ff, kCodeTeeLocal0);
+ EXPECT_FAILURE_C(d_dd, kCodeTeeLocal0);
}
TEST_F(AstDecoderTest, SetLocal0_local) {
- EXPECT_FAILURE(sigs.i_v(), kCodeSetLocal0);
+ EXPECT_FAILURE_C(i_v, kCodeSetLocal0);
+ EXPECT_FAILURE_C(v_v, kCodeSetLocal0);
+ AddLocals(kAstI32, 1);
+ EXPECT_FAILURE_C(i_v, kCodeSetLocal0);
+ EXPECT_VERIFIES_C(v_v, kCodeSetLocal0);
+}
+
+TEST_F(AstDecoderTest, TeeLocal0_local) {
+ EXPECT_FAILURE_C(i_v, kCodeTeeLocal0);
AddLocals(kAstI32, 1);
- EXPECT_VERIFIES(sigs.i_v(), kCodeSetLocal0);
+ EXPECT_VERIFIES_C(i_v, kCodeTeeLocal0);
}
-TEST_F(AstDecoderTest, SetLocalN_local) {
+TEST_F(AstDecoderTest, TeeLocalN_local) {
for (byte i = 1; i < 8; i++) {
AddLocals(kAstI32, 1);
for (byte j = 0; j < i; j++) {
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_SET_LOCAL(j, WASM_I8(i)));
+ EXPECT_FAILURE(v_v, WASM_TEE_LOCAL(j, WASM_I8(i)));
+ EXPECT_VERIFIES(i_i, WASM_TEE_LOCAL(j, WASM_I8(i)));
}
}
}
TEST_F(AstDecoderTest, BlockN) {
const int kMaxSize = 200;
- byte buffer[kMaxSize + 2];
+ byte buffer[kMaxSize + 3];
for (int i = 0; i <= kMaxSize; i++) {
memset(buffer, kExprNop, sizeof(buffer));
buffer[0] = kExprBlock;
- buffer[i + 1] = kExprEnd;
- Verify(kSuccess, sigs.v_i(), buffer, buffer + i + 2);
+ buffer[1] = kLocalVoid;
+ buffer[i + 2] = kExprEnd;
+ Verify(kSuccess, sigs.v_i(), buffer, buffer + i + 3);
}
}
+#define WASM_EMPTY_BLOCK kExprBlock, kLocalVoid, kExprEnd
+
TEST_F(AstDecoderTest, Block0) {
- static const byte code[] = {kExprBlock, kExprEnd};
- EXPECT_VERIFIES(sigs.v_v(), code);
- EXPECT_FAILURE(sigs.i_i(), code);
+ static const byte code[] = {WASM_EMPTY_BLOCK};
+ EXPECT_VERIFIES_C(v_v, code);
+ EXPECT_FAILURE_C(i_i, code);
}
TEST_F(AstDecoderTest, Block0_fallthru1) {
- static const byte code[] = {kExprBlock, kExprBlock, kExprEnd, kExprEnd};
- EXPECT_VERIFIES(sigs.v_v(), code);
- EXPECT_FAILURE(sigs.i_i(), code);
+ static const byte code[] = {WASM_BLOCK(WASM_EMPTY_BLOCK)};
+ EXPECT_VERIFIES_C(v_v, code);
+ EXPECT_FAILURE_C(i_i, code);
}
TEST_F(AstDecoderTest, Block0Block0) {
- static const byte code[] = {kExprBlock, kExprEnd, kExprBlock, kExprEnd};
- EXPECT_VERIFIES(sigs.v_v(), code);
- EXPECT_FAILURE(sigs.i_i(), code);
+ static const byte code[] = {WASM_EMPTY_BLOCK, WASM_EMPTY_BLOCK};
+ EXPECT_VERIFIES_C(v_v, code);
+ EXPECT_FAILURE_C(i_i, code);
+}
+
+TEST_F(AstDecoderTest, Block0_end) {
+ EXPECT_VERIFIES(v_v, WASM_EMPTY_BLOCK, kExprEnd);
}
TEST_F(AstDecoderTest, Block0_end_end) {
- static const byte code[] = {kExprBlock, kExprEnd, kExprEnd};
- EXPECT_FAILURE(sigs.v_v(), code);
+ EXPECT_FAILURE(v_v, WASM_EMPTY_BLOCK, kExprEnd, kExprEnd);
}
TEST_F(AstDecoderTest, Block1) {
- byte code[] = {B1(WASM_SET_LOCAL(0, WASM_ZERO))};
- EXPECT_VERIFIES(sigs.i_i(), code);
- EXPECT_VERIFIES(sigs.v_i(), code);
- EXPECT_FAILURE(sigs.d_dd(), code);
+ byte code[] = {WASM_BLOCK_I(WASM_GET_LOCAL(0))};
+ EXPECT_VERIFIES_C(i_i, code);
+ EXPECT_FAILURE_C(v_i, code);
+ EXPECT_FAILURE_C(d_dd, code);
+ EXPECT_FAILURE_C(i_f, code);
+ EXPECT_FAILURE_C(i_d, code);
}
TEST_F(AstDecoderTest, Block1_i) {
- byte code[] = {B1(WASM_ZERO)};
- EXPECT_VERIFIES(sigs.i_i(), code);
- EXPECT_FAILURE(sigs.f_ff(), code);
- EXPECT_FAILURE(sigs.d_dd(), code);
- EXPECT_FAILURE(sigs.l_ll(), code);
+ byte code[] = {WASM_BLOCK_I(WASM_ZERO)};
+ EXPECT_VERIFIES_C(i_i, code);
+ EXPECT_FAILURE_C(f_ff, code);
+ EXPECT_FAILURE_C(d_dd, code);
+ EXPECT_FAILURE_C(l_ll, code);
}
TEST_F(AstDecoderTest, Block1_f) {
- byte code[] = {B1(WASM_F32(0))};
- EXPECT_FAILURE(sigs.i_i(), code);
- EXPECT_VERIFIES(sigs.f_ff(), code);
- EXPECT_FAILURE(sigs.d_dd(), code);
- EXPECT_FAILURE(sigs.l_ll(), code);
+ byte code[] = {WASM_BLOCK_F(WASM_F32(0))};
+ EXPECT_FAILURE_C(i_i, code);
+ EXPECT_VERIFIES_C(f_ff, code);
+ EXPECT_FAILURE_C(d_dd, code);
+ EXPECT_FAILURE_C(l_ll, code);
}
TEST_F(AstDecoderTest, Block1_continue) {
- EXPECT_VERIFIES_INLINE(sigs.v_v(), B1(WASM_BR(0)));
- EXPECT_FAILURE_INLINE(sigs.v_v(), B1(WASM_BR(1)));
- EXPECT_FAILURE_INLINE(sigs.v_v(), B1(WASM_BR(2)));
- EXPECT_FAILURE_INLINE(sigs.v_v(), B1(WASM_BR(3)));
+ EXPECT_VERIFIES(v_v, WASM_LOOP(WASM_BR(0)));
}
TEST_F(AstDecoderTest, Block1_br) {
- EXPECT_FAILURE_INLINE(sigs.v_v(), kExprBlock, kExprBr, ARITY_1, DEPTH_0,
- kExprEnd);
- EXPECT_VERIFIES_INLINE(sigs.v_v(), kExprBlock, kExprBr, ARITY_0, DEPTH_0,
- kExprEnd);
+ EXPECT_VERIFIES(v_v, B1(WASM_BR(0)));
+ EXPECT_VERIFIES(v_v, B1(WASM_BR(1)));
+ EXPECT_FAILURE(v_v, B1(WASM_BR(2)));
}
TEST_F(AstDecoderTest, Block2_br) {
- EXPECT_VERIFIES_INLINE(sigs.v_v(), B2(WASM_NOP, WASM_BR(0)));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), B2(WASM_BR(0), WASM_NOP));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), B2(WASM_BR(0), WASM_BR(0)));
+ EXPECT_VERIFIES(v_v, B2(WASM_NOP, WASM_BR(0)));
+ EXPECT_VERIFIES(v_v, B2(WASM_BR(0), WASM_NOP));
+ EXPECT_VERIFIES(v_v, B2(WASM_BR(0), WASM_BR(0)));
}
TEST_F(AstDecoderTest, Block2) {
- EXPECT_VERIFIES_INLINE(sigs.i_i(),
- B2(WASM_NOP, WASM_SET_LOCAL(0, WASM_ZERO)));
- EXPECT_FAILURE_INLINE(sigs.i_i(), B2(WASM_SET_LOCAL(0, WASM_ZERO), WASM_NOP));
- EXPECT_VERIFIES_INLINE(sigs.i_i(), B2(WASM_SET_LOCAL(0, WASM_ZERO),
- WASM_SET_LOCAL(0, WASM_ZERO)));
+ EXPECT_FAILURE(i_i, WASM_BLOCK(WASM_NOP, WASM_NOP));
+ EXPECT_FAILURE(i_i, WASM_BLOCK_I(WASM_NOP, WASM_NOP));
+ EXPECT_VERIFIES(i_i, WASM_BLOCK_I(WASM_NOP, WASM_ZERO));
+ EXPECT_VERIFIES(i_i, WASM_BLOCK_I(WASM_ZERO, WASM_NOP));
+ EXPECT_FAILURE(i_i, WASM_BLOCK_I(WASM_ZERO, WASM_ZERO));
}
TEST_F(AstDecoderTest, Block2b) {
- byte code[] = {B2(WASM_SET_LOCAL(0, WASM_ZERO), WASM_ZERO)};
- EXPECT_VERIFIES(sigs.i_i(), code);
- EXPECT_FAILURE(sigs.v_v(), code);
- EXPECT_FAILURE(sigs.f_ff(), code);
+ byte code[] = {WASM_BLOCK_I(WASM_SET_LOCAL(0, WASM_ZERO), WASM_ZERO)};
+ EXPECT_VERIFIES_C(i_i, code);
+ EXPECT_FAILURE_C(v_v, code);
+ EXPECT_FAILURE_C(f_ff, code);
}
TEST_F(AstDecoderTest, Block2_fallthru) {
- EXPECT_VERIFIES_INLINE(sigs.i_i(), B2(WASM_SET_LOCAL(0, WASM_ZERO),
- WASM_SET_LOCAL(0, WASM_ZERO)),
- WASM_I8(23));
+ EXPECT_VERIFIES(
+ i_i, B2(WASM_SET_LOCAL(0, WASM_ZERO), WASM_SET_LOCAL(0, WASM_ZERO)),
+ WASM_I8(23));
}
TEST_F(AstDecoderTest, Block3) {
- EXPECT_VERIFIES_INLINE(
- sigs.i_i(), B3(WASM_SET_LOCAL(0, WASM_ZERO), WASM_SET_LOCAL(0, WASM_ZERO),
- WASM_I8(11)));
+ EXPECT_VERIFIES(i_i, WASM_BLOCK_I(WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_SET_LOCAL(0, WASM_ZERO), WASM_I8(11)));
}
TEST_F(AstDecoderTest, Block5) {
- EXPECT_VERIFIES_INLINE(sigs.v_i(), B1(WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE(v_i, WASM_BLOCK(WASM_ZERO));
+
+ EXPECT_FAILURE(v_i, WASM_BLOCK(WASM_ZERO, WASM_ZERO));
+
+ EXPECT_FAILURE(v_i, WASM_BLOCK(WASM_ZERO, WASM_ZERO, WASM_ZERO));
+
+ EXPECT_FAILURE(v_i, WASM_BLOCK(WASM_ZERO, WASM_ZERO, WASM_ZERO, WASM_ZERO));
+
+ EXPECT_FAILURE(
+ v_i, WASM_BLOCK(WASM_ZERO, WASM_ZERO, WASM_ZERO, WASM_ZERO, WASM_ZERO));
+}
+
+TEST_F(AstDecoderTest, BlockType) {
+ EXPECT_VERIFIES(i_i, WASM_BLOCK_I(WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES(l_l, WASM_BLOCK_L(WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES(f_f, WASM_BLOCK_F(WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES(d_d, WASM_BLOCK_D(WASM_GET_LOCAL(0)));
+}
- EXPECT_VERIFIES_INLINE(sigs.v_i(), B2(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+TEST_F(AstDecoderTest, BlockType_fail) {
+ EXPECT_FAILURE(i_i, WASM_BLOCK_L(WASM_I64V_1(0)));
+ EXPECT_FAILURE(i_i, WASM_BLOCK_F(WASM_F32(0.0)));
+ EXPECT_FAILURE(i_i, WASM_BLOCK_D(WASM_F64(1.1)));
- EXPECT_VERIFIES_INLINE(
- sigs.v_i(), B3(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE(l_l, WASM_BLOCK_I(WASM_ZERO));
+ EXPECT_FAILURE(l_l, WASM_BLOCK_F(WASM_F32(0.0)));
+ EXPECT_FAILURE(l_l, WASM_BLOCK_D(WASM_F64(1.1)));
- EXPECT_VERIFIES_INLINE(sigs.v_i(),
- WASM_BLOCK(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0),
- WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE(f_ff, WASM_BLOCK_I(WASM_ZERO));
+ EXPECT_FAILURE(f_ff, WASM_BLOCK_L(WASM_I64V_1(0)));
+ EXPECT_FAILURE(f_ff, WASM_BLOCK_D(WASM_F64(1.1)));
- EXPECT_VERIFIES_INLINE(
- sigs.v_i(),
- WASM_BLOCK(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0),
- WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE(d_dd, WASM_BLOCK_I(WASM_ZERO));
+ EXPECT_FAILURE(d_dd, WASM_BLOCK_L(WASM_I64V_1(0)));
+ EXPECT_FAILURE(d_dd, WASM_BLOCK_F(WASM_F32(0.0)));
}
TEST_F(AstDecoderTest, BlockF32) {
- static const byte code[] = {kExprBlock, kExprF32Const, 0, 0, 0, 0, kExprEnd};
- EXPECT_VERIFIES(sigs.f_ff(), code);
- EXPECT_FAILURE(sigs.i_i(), code);
- EXPECT_FAILURE(sigs.d_dd(), code);
+ static const byte code[] = {WASM_BLOCK_F(kExprF32Const, 0, 0, 0, 0)};
+ EXPECT_VERIFIES_C(f_ff, code);
+ EXPECT_FAILURE_C(i_i, code);
+ EXPECT_FAILURE_C(d_dd, code);
}
TEST_F(AstDecoderTest, BlockN_off_end) {
- byte code[] = {kExprBlock, kExprNop, kExprNop, kExprNop, kExprNop, kExprEnd};
- EXPECT_VERIFIES(sigs.v_v(), code);
+ byte code[] = {WASM_BLOCK(kExprNop, kExprNop, kExprNop, kExprNop)};
+ EXPECT_VERIFIES_C(v_v, code);
for (size_t i = 1; i < arraysize(code); i++) {
Verify(kError, sigs.v_v(), code, code + i);
}
}
TEST_F(AstDecoderTest, Block2_continue) {
- static const byte code[] = {kExprBlock, kExprBr, ARITY_0,
- DEPTH_1, kExprNop, kExprEnd};
- EXPECT_FAILURE(sigs.v_v(), code);
+ EXPECT_VERIFIES(v_v, WASM_LOOP(WASM_NOP, WASM_BR(0)));
+ EXPECT_VERIFIES(v_v, WASM_LOOP(WASM_NOP, WASM_BR(1)));
+ EXPECT_FAILURE(v_v, WASM_LOOP(WASM_NOP, WASM_BR(2)));
}
-TEST_F(AstDecoderTest, NestedBlock_return) {
- EXPECT_VERIFIES_INLINE(sigs.i_i(), B1(B1(WASM_RETURN1(WASM_ZERO))));
+TEST_F(AstDecoderTest, Block3_continue) {
+ EXPECT_VERIFIES(v_v, B1(WASM_LOOP(WASM_NOP, WASM_BR(0))));
+ EXPECT_VERIFIES(v_v, B1(WASM_LOOP(WASM_NOP, WASM_BR(1))));
+ EXPECT_VERIFIES(v_v, B1(WASM_LOOP(WASM_NOP, WASM_BR(2))));
+ EXPECT_FAILURE(v_v, B1(WASM_LOOP(WASM_NOP, WASM_BR(3))));
}
-TEST_F(AstDecoderTest, BlockBinop) {
- EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_I32_AND(B1(WASM_I8(1)), WASM_I8(2)));
+TEST_F(AstDecoderTest, NestedBlock_return) {
+ EXPECT_VERIFIES(i_i, B1(B1(WASM_RETURN1(WASM_ZERO))));
}
TEST_F(AstDecoderTest, BlockBrBinop) {
- EXPECT_VERIFIES_INLINE(sigs.i_i(),
- WASM_I32_AND(B1(WASM_BRV(0, WASM_I8(1))), WASM_I8(2)));
+ EXPECT_VERIFIES(
+ i_i, WASM_I32_AND(WASM_BLOCK_I(WASM_BRV(0, WASM_I8(1))), WASM_I8(2)));
}
TEST_F(AstDecoderTest, If_empty1) {
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_ZERO, kExprIf, kExprEnd);
+ EXPECT_VERIFIES(v_v, WASM_ZERO, WASM_IF_OP, kExprEnd);
}
TEST_F(AstDecoderTest, If_empty2) {
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_ZERO, kExprIf, kExprElse, kExprEnd);
+ EXPECT_VERIFIES(v_v, WASM_ZERO, WASM_IF_OP, kExprElse, kExprEnd);
}
TEST_F(AstDecoderTest, If_empty3) {
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_ZERO, kExprIf, WASM_ZERO, kExprElse,
- kExprEnd);
+ EXPECT_VERIFIES(v_v, WASM_ZERO, WASM_IF_OP, WASM_NOP, kExprElse, kExprEnd);
+ EXPECT_FAILURE(v_v, WASM_ZERO, WASM_IF_OP, WASM_ZERO, kExprElse, kExprEnd);
}
TEST_F(AstDecoderTest, If_empty4) {
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_ZERO, kExprIf, kExprElse, WASM_ZERO,
- kExprEnd);
+ EXPECT_VERIFIES(v_v, WASM_ZERO, WASM_IF_OP, kExprElse, WASM_NOP, kExprEnd);
+ EXPECT_FAILURE(v_v, WASM_ZERO, WASM_IF_OP, kExprElse, WASM_ZERO, kExprEnd);
}
TEST_F(AstDecoderTest, If_empty_stack) {
byte code[] = {kExprIf};
- EXPECT_FAILURE(sigs.v_v(), code);
- EXPECT_FAILURE(sigs.i_i(), code);
+ EXPECT_FAILURE_C(v_v, code);
+ EXPECT_FAILURE_C(i_i, code);
}
TEST_F(AstDecoderTest, If_incomplete1) {
byte code[] = {kExprI8Const, 0, kExprIf};
- EXPECT_FAILURE(sigs.v_v(), code);
- EXPECT_FAILURE(sigs.i_i(), code);
+ EXPECT_FAILURE_C(v_v, code);
+ EXPECT_FAILURE_C(i_i, code);
}
TEST_F(AstDecoderTest, If_incomplete2) {
byte code[] = {kExprI8Const, 0, kExprIf, kExprNop};
- EXPECT_FAILURE(sigs.v_v(), code);
- EXPECT_FAILURE(sigs.i_i(), code);
+ EXPECT_FAILURE_C(v_v, code);
+ EXPECT_FAILURE_C(i_i, code);
}
TEST_F(AstDecoderTest, If_else_else) {
- byte code[] = {kExprI8Const, 0, kExprIf, kExprElse, kExprElse, kExprEnd};
- EXPECT_FAILURE(sigs.v_v(), code);
- EXPECT_FAILURE(sigs.i_i(), code);
+ byte code[] = {kExprI8Const, 0, WASM_IF_OP, kExprElse, kExprElse, kExprEnd};
+ EXPECT_FAILURE_C(v_v, code);
+ EXPECT_FAILURE_C(i_i, code);
}
TEST_F(AstDecoderTest, IfEmpty) {
- EXPECT_VERIFIES_INLINE(sigs.v_i(), kExprGetLocal, 0, kExprIf, kExprEnd);
+ EXPECT_VERIFIES(v_i, kExprGetLocal, 0, WASM_IF_OP, kExprEnd);
}
TEST_F(AstDecoderTest, IfSet) {
- EXPECT_VERIFIES_INLINE(
- sigs.v_i(), WASM_IF(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(
- sigs.v_i(),
- WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_ZERO), WASM_NOP));
+ EXPECT_VERIFIES(v_i,
+ WASM_IF(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_ZERO)));
+ EXPECT_VERIFIES(v_i, WASM_IF_ELSE(WASM_GET_LOCAL(0),
+ WASM_SET_LOCAL(0, WASM_ZERO), WASM_NOP));
}
TEST_F(AstDecoderTest, IfElseEmpty) {
- EXPECT_VERIFIES_INLINE(sigs.v_i(), WASM_GET_LOCAL(0), kExprIf, kExprElse,
- kExprEnd);
- EXPECT_VERIFIES_INLINE(sigs.v_i(),
- WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_NOP));
+ EXPECT_VERIFIES(v_i, WASM_GET_LOCAL(0), WASM_IF_OP, kExprElse, kExprEnd);
+ EXPECT_VERIFIES(v_i, WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_NOP));
}
TEST_F(AstDecoderTest, IfElseUnreachable1) {
- EXPECT_VERIFIES_INLINE(
- sigs.i_i(),
- WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_UNREACHABLE, WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES_INLINE(
- sigs.i_i(),
- WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_UNREACHABLE));
+ EXPECT_VERIFIES(i_i, WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_UNREACHABLE,
+ WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES(i_i, WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0),
+ WASM_UNREACHABLE));
}
TEST_F(AstDecoderTest, IfElseUnreachable2) {
static const byte code[] = {
- WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_UNREACHABLE, WASM_GET_LOCAL(0))};
+ WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_UNREACHABLE, WASM_GET_LOCAL(0))};
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
LocalType types[] = {kAstI32, kLocalTypes[i]};
FunctionSig sig(1, 1, types);
if (kLocalTypes[i] == kAstI32) {
- EXPECT_VERIFIES(&sig, code);
+ EXPECT_VERIFIES_SC(&sig, code);
} else {
- EXPECT_FAILURE(&sig, code);
+ EXPECT_FAILURE_SC(&sig, code);
}
}
}
TEST_F(AstDecoderTest, IfBreak) {
- EXPECT_VERIFIES_INLINE(sigs.v_i(), WASM_IF(WASM_GET_LOCAL(0), WASM_BR(0)));
- EXPECT_FAILURE_INLINE(sigs.v_i(), WASM_IF(WASM_GET_LOCAL(0), WASM_BR(1)));
+ EXPECT_VERIFIES(v_i, WASM_IF(WASM_GET_LOCAL(0), WASM_BR(0)));
+ EXPECT_VERIFIES(v_i, WASM_IF(WASM_GET_LOCAL(0), WASM_BR(1)));
+ EXPECT_FAILURE(v_i, WASM_IF(WASM_GET_LOCAL(0), WASM_BR(2)));
}
TEST_F(AstDecoderTest, IfElseBreak) {
- EXPECT_VERIFIES_INLINE(sigs.v_i(),
- WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_BR(0)));
- EXPECT_FAILURE_INLINE(sigs.v_i(),
- WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_BR(1)));
+ EXPECT_VERIFIES(v_i, WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_BR(0)));
+ EXPECT_VERIFIES(v_i, WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_BR(1)));
+ EXPECT_FAILURE(v_i, WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_BR(2)));
}
TEST_F(AstDecoderTest, Block_else) {
byte code[] = {kExprI8Const, 0, kExprBlock, kExprElse, kExprEnd};
- EXPECT_FAILURE(sigs.v_v(), code);
- EXPECT_FAILURE(sigs.i_i(), code);
+ EXPECT_FAILURE_C(v_v, code);
+ EXPECT_FAILURE_C(i_i, code);
}
TEST_F(AstDecoderTest, IfNop) {
- EXPECT_VERIFIES_INLINE(sigs.v_i(), WASM_IF(WASM_GET_LOCAL(0), WASM_NOP));
+ EXPECT_VERIFIES(v_i, WASM_IF(WASM_GET_LOCAL(0), WASM_NOP));
}
TEST_F(AstDecoderTest, IfNopElseNop) {
- EXPECT_VERIFIES_INLINE(sigs.v_i(),
- WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_NOP));
+ EXPECT_VERIFIES(v_i, WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_NOP));
}
TEST_F(AstDecoderTest, If_end_end) {
- static const byte code[] = {kExprGetLocal, 0, kExprIf, kExprEnd, kExprEnd};
- EXPECT_FAILURE(sigs.v_i(), code);
+ static const byte code[] = {kExprGetLocal, 0, WASM_IF_OP, kExprEnd, kExprEnd};
+ EXPECT_VERIFIES_C(v_i, code);
}
-TEST_F(AstDecoderTest, If_falloff) {
+TEST_F(AstDecoderTest, If_end_end_end) {
+ static const byte code[] = {kExprGetLocal, 0, WASM_IF_OP,
+ kExprEnd, kExprEnd, kExprEnd};
+ EXPECT_FAILURE_C(v_i, code);
+}
+
+TEST_F(AstDecoderTest, If_falloff1) {
static const byte code[] = {kExprGetLocal, 0, kExprIf};
- EXPECT_FAILURE(sigs.v_i(), code);
+ EXPECT_FAILURE_C(v_i, code);
+}
+
+TEST_F(AstDecoderTest, If_falloff2) {
+ static const byte code[] = {kExprGetLocal, 0, WASM_IF_OP};
+ EXPECT_FAILURE_C(v_i, code);
}
TEST_F(AstDecoderTest, IfElse_falloff) {
- static const byte code[] = {kExprGetLocal, 0, kExprIf, kExprNop, kExprElse};
- EXPECT_FAILURE(sigs.v_i(), code);
+ static const byte code[] = {kExprGetLocal, 0, WASM_IF_OP, kExprNop,
+ kExprElse};
+ EXPECT_FAILURE_C(v_i, code);
}
TEST_F(AstDecoderTest, IfElseNop) {
- EXPECT_VERIFIES_INLINE(
- sigs.v_i(),
- WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_ZERO), WASM_NOP));
+ EXPECT_VERIFIES(v_i, WASM_IF_ELSE(WASM_GET_LOCAL(0),
+ WASM_SET_LOCAL(0, WASM_ZERO), WASM_NOP));
}
TEST_F(AstDecoderTest, IfBlock1) {
- EXPECT_VERIFIES_INLINE(
- sigs.v_i(), WASM_IF_ELSE(WASM_GET_LOCAL(0),
- B1(WASM_SET_LOCAL(0, WASM_ZERO)), WASM_NOP));
+ EXPECT_VERIFIES(
+ v_i, WASM_IF_ELSE(WASM_GET_LOCAL(0), B1(WASM_SET_LOCAL(0, WASM_ZERO)),
+ WASM_NOP));
}
TEST_F(AstDecoderTest, IfBlock1b) {
- EXPECT_VERIFIES_INLINE(
- sigs.v_i(), WASM_IF(WASM_GET_LOCAL(0), B1(WASM_SET_LOCAL(0, WASM_ZERO))));
+ EXPECT_VERIFIES(v_i,
+ WASM_IF(WASM_GET_LOCAL(0), B1(WASM_SET_LOCAL(0, WASM_ZERO))));
}
TEST_F(AstDecoderTest, IfBlock2a) {
- EXPECT_VERIFIES_INLINE(
- sigs.v_i(), WASM_IF(WASM_GET_LOCAL(0), B2(WASM_SET_LOCAL(0, WASM_ZERO),
+ EXPECT_VERIFIES(v_i,
+ WASM_IF(WASM_GET_LOCAL(0), B2(WASM_SET_LOCAL(0, WASM_ZERO),
WASM_SET_LOCAL(0, WASM_ZERO))));
}
TEST_F(AstDecoderTest, IfBlock2b) {
- EXPECT_VERIFIES_INLINE(
- sigs.v_i(),
- WASM_IF_ELSE(WASM_GET_LOCAL(0), B2(WASM_SET_LOCAL(0, WASM_ZERO),
- WASM_SET_LOCAL(0, WASM_ZERO)),
- WASM_NOP));
+ EXPECT_VERIFIES(
+ v_i, WASM_IF_ELSE(WASM_GET_LOCAL(0), B2(WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_SET_LOCAL(0, WASM_ZERO)),
+ WASM_NOP));
}
TEST_F(AstDecoderTest, IfElseSet) {
- EXPECT_VERIFIES_INLINE(
- sigs.v_i(), WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_ZERO),
+ EXPECT_VERIFIES(v_i,
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_ZERO),
WASM_SET_LOCAL(0, WASM_I8(1))));
}
-TEST_F(AstDecoderTest, Loop0) {
- static const byte code[] = {kExprLoop, kExprEnd};
- EXPECT_VERIFIES(sigs.v_v(), code);
-}
+TEST_F(AstDecoderTest, Loop0) { EXPECT_VERIFIES(v_v, WASM_LOOP_OP, kExprEnd); }
TEST_F(AstDecoderTest, Loop1) {
static const byte code[] = {WASM_LOOP(WASM_SET_LOCAL(0, WASM_ZERO))};
- EXPECT_VERIFIES(sigs.v_i(), code);
- EXPECT_FAILURE(sigs.v_v(), code);
- EXPECT_FAILURE(sigs.f_ff(), code);
+ EXPECT_VERIFIES_C(v_i, code);
+ EXPECT_FAILURE_C(v_v, code);
+ EXPECT_FAILURE_C(f_ff, code);
}
TEST_F(AstDecoderTest, Loop2) {
- EXPECT_VERIFIES_INLINE(sigs.v_i(), WASM_LOOP(WASM_SET_LOCAL(0, WASM_ZERO),
- WASM_SET_LOCAL(0, WASM_ZERO)));
+ EXPECT_VERIFIES(v_i, WASM_LOOP(WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_SET_LOCAL(0, WASM_ZERO)));
}
TEST_F(AstDecoderTest, Loop1_continue) {
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BR(0)));
+ EXPECT_VERIFIES(v_v, WASM_LOOP(WASM_BR(0)));
}
TEST_F(AstDecoderTest, Loop1_break) {
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BR(1)));
+ EXPECT_VERIFIES(v_v, WASM_LOOP(WASM_BR(1)));
}
TEST_F(AstDecoderTest, Loop2_continue) {
- EXPECT_VERIFIES_INLINE(sigs.v_i(),
- WASM_LOOP(WASM_SET_LOCAL(0, WASM_ZERO), WASM_BR(0)));
+ EXPECT_VERIFIES(v_i, WASM_LOOP(WASM_SET_LOCAL(0, WASM_ZERO), WASM_BR(0)));
}
TEST_F(AstDecoderTest, Loop2_break) {
- EXPECT_VERIFIES_INLINE(sigs.v_i(),
- WASM_LOOP(WASM_SET_LOCAL(0, WASM_ZERO), WASM_BR(1)));
+ EXPECT_VERIFIES(v_i, WASM_LOOP(WASM_SET_LOCAL(0, WASM_ZERO), WASM_BR(1)));
+}
+
+TEST_F(AstDecoderTest, InfiniteLoop) {
+ EXPECT_VERIFIES(i_i, WASM_LOOP(WASM_BR(0)));
+ EXPECT_VERIFIES(i_i, WASM_LOOP(WASM_BRV(1, WASM_ZERO)));
}
-TEST_F(AstDecoderTest, ExprLoop0) {
- static const byte code[] = {kExprLoop, kExprEnd};
- EXPECT_VERIFIES(sigs.v_v(), code);
+TEST_F(AstDecoderTest, Loop2_unreachable) {
+ EXPECT_VERIFIES(i_i, WASM_LOOP(WASM_BR(0), WASM_NOP));
}
-TEST_F(AstDecoderTest, ExprLoop1a) {
- EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_LOOP(WASM_BRV(0, WASM_ZERO)));
+TEST_F(AstDecoderTest, LoopType) {
+ EXPECT_VERIFIES(i_i, WASM_LOOP_I(WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES(l_l, WASM_LOOP_L(WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES(f_f, WASM_LOOP_F(WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES(d_d, WASM_LOOP_D(WASM_GET_LOCAL(0)));
}
-TEST_F(AstDecoderTest, ExprLoop1b) {
- EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_LOOP(WASM_BRV(1, WASM_ZERO)));
- EXPECT_FAILURE_INLINE(sigs.f_ff(), WASM_LOOP(WASM_BRV(1, WASM_ZERO)));
+TEST_F(AstDecoderTest, LoopType_void) {
+ EXPECT_FAILURE(v_v, WASM_LOOP_I(WASM_ZERO));
+ EXPECT_FAILURE(v_v, WASM_LOOP_L(WASM_I64V_1(0)));
+ EXPECT_FAILURE(v_v, WASM_LOOP_F(WASM_F32(0.0)));
+ EXPECT_FAILURE(v_v, WASM_LOOP_D(WASM_F64(1.1)));
}
-TEST_F(AstDecoderTest, ExprLoop2_unreachable) {
- EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_LOOP(WASM_BR(0), WASM_NOP));
+TEST_F(AstDecoderTest, LoopType_fail) {
+ EXPECT_FAILURE(i_i, WASM_LOOP_L(WASM_I64V_1(0)));
+ EXPECT_FAILURE(i_i, WASM_LOOP_F(WASM_F32(0.0)));
+ EXPECT_FAILURE(i_i, WASM_LOOP_D(WASM_F64(1.1)));
+
+ EXPECT_FAILURE(l_l, WASM_LOOP_I(WASM_ZERO));
+ EXPECT_FAILURE(l_l, WASM_LOOP_F(WASM_F32(0.0)));
+ EXPECT_FAILURE(l_l, WASM_LOOP_D(WASM_F64(1.1)));
+
+ EXPECT_FAILURE(f_ff, WASM_LOOP_I(WASM_ZERO));
+ EXPECT_FAILURE(f_ff, WASM_LOOP_L(WASM_I64V_1(0)));
+ EXPECT_FAILURE(f_ff, WASM_LOOP_D(WASM_F64(1.1)));
+
+ EXPECT_FAILURE(d_dd, WASM_LOOP_I(WASM_ZERO));
+ EXPECT_FAILURE(d_dd, WASM_LOOP_L(WASM_I64V_1(0)));
+ EXPECT_FAILURE(d_dd, WASM_LOOP_F(WASM_F32(0.0)));
}
TEST_F(AstDecoderTest, ReturnVoid1) {
static const byte code[] = {kExprNop};
- EXPECT_VERIFIES(sigs.v_v(), code);
- EXPECT_FAILURE(sigs.i_i(), code);
- EXPECT_FAILURE(sigs.i_f(), code);
+ EXPECT_VERIFIES_C(v_v, code);
+ EXPECT_FAILURE_C(i_i, code);
+ EXPECT_FAILURE_C(i_f, code);
}
TEST_F(AstDecoderTest, ReturnVoid2) {
- static const byte code[] = {kExprBlock, kExprBr, ARITY_0, DEPTH_0, kExprEnd};
- EXPECT_VERIFIES(sigs.v_v(), code);
- EXPECT_FAILURE(sigs.i_i(), code);
- EXPECT_FAILURE(sigs.i_f(), code);
+ static const byte code[] = {WASM_BLOCK(WASM_BR(0))};
+ EXPECT_VERIFIES_C(v_v, code);
+ EXPECT_FAILURE_C(i_i, code);
+ EXPECT_FAILURE_C(i_f, code);
}
TEST_F(AstDecoderTest, ReturnVoid3) {
- EXPECT_VERIFIES_INLINE(sigs.v_v(), kExprI8Const, 0);
- EXPECT_VERIFIES_INLINE(sigs.v_v(), kExprI32Const, 0, 0, 0, 0);
- EXPECT_VERIFIES_INLINE(sigs.v_v(), kExprI64Const, 0, 0, 0, 0, 0, 0, 0, 0);
- EXPECT_VERIFIES_INLINE(sigs.v_v(), kExprF32Const, 0, 0, 0, 0);
- EXPECT_VERIFIES_INLINE(sigs.v_v(), kExprF64Const, 0, 0, 0, 0, 0, 0, 0, 0);
+ EXPECT_FAILURE(v_v, kExprI8Const, 0);
+ EXPECT_FAILURE(v_v, kExprI32Const, 0);
+ EXPECT_FAILURE(v_v, kExprI64Const, 0);
+ EXPECT_FAILURE(v_v, kExprF32Const, 0, 0, 0, 0);
+ EXPECT_FAILURE(v_v, kExprF64Const, 0, 0, 0, 0, 0, 0, 0, 0);
- EXPECT_VERIFIES_INLINE(sigs.v_i(), kExprGetLocal, 0);
+ EXPECT_FAILURE(v_i, kExprGetLocal, 0);
}
TEST_F(AstDecoderTest, Unreachable1) {
- EXPECT_VERIFIES_INLINE(sigs.v_v(), kExprUnreachable);
- EXPECT_VERIFIES_INLINE(sigs.v_v(), kExprUnreachable, kExprUnreachable);
- EXPECT_VERIFIES_INLINE(sigs.v_v(), B2(WASM_UNREACHABLE, WASM_ZERO));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), B2(WASM_BR(0), WASM_ZERO));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_UNREACHABLE, WASM_ZERO));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BR(0), WASM_ZERO));
+ EXPECT_VERIFIES(v_v, kExprUnreachable);
+ EXPECT_VERIFIES(v_v, kExprUnreachable, kExprUnreachable);
+ EXPECT_VERIFIES(v_v, B2(WASM_UNREACHABLE, WASM_ZERO));
+ EXPECT_VERIFIES(v_v, B2(WASM_BR(0), WASM_ZERO));
+ EXPECT_VERIFIES(v_v, WASM_LOOP(WASM_UNREACHABLE, WASM_ZERO));
+ EXPECT_VERIFIES(v_v, WASM_LOOP(WASM_BR(0), WASM_ZERO));
}
TEST_F(AstDecoderTest, Unreachable_binop) {
- EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_I32_AND(WASM_ZERO, WASM_UNREACHABLE));
- EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_I32_AND(WASM_UNREACHABLE, WASM_ZERO));
+ EXPECT_VERIFIES(i_i, WASM_I32_AND(WASM_ZERO, WASM_UNREACHABLE));
+ EXPECT_VERIFIES(i_i, WASM_I32_AND(WASM_UNREACHABLE, WASM_ZERO));
}
TEST_F(AstDecoderTest, Unreachable_select) {
- EXPECT_VERIFIES_INLINE(sigs.i_i(),
- WASM_SELECT(WASM_UNREACHABLE, WASM_ZERO, WASM_ZERO));
- EXPECT_VERIFIES_INLINE(sigs.i_i(),
- WASM_SELECT(WASM_ZERO, WASM_UNREACHABLE, WASM_ZERO));
- EXPECT_VERIFIES_INLINE(sigs.i_i(),
- WASM_SELECT(WASM_ZERO, WASM_ZERO, WASM_UNREACHABLE));
+ EXPECT_VERIFIES(i_i, WASM_SELECT(WASM_UNREACHABLE, WASM_ZERO, WASM_ZERO));
+ EXPECT_VERIFIES(i_i, WASM_SELECT(WASM_ZERO, WASM_UNREACHABLE, WASM_ZERO));
+ EXPECT_VERIFIES(i_i, WASM_SELECT(WASM_ZERO, WASM_ZERO, WASM_UNREACHABLE));
}
TEST_F(AstDecoderTest, If1) {
- EXPECT_VERIFIES_INLINE(
- sigs.i_i(), WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_I8(9), WASM_I8(8)));
- EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_I8(9),
- WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES_INLINE(
- sigs.i_i(),
- WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_I8(8)));
+ EXPECT_VERIFIES(i_i,
+ WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_I8(9), WASM_I8(8)));
+ EXPECT_VERIFIES(
+ i_i, WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_I8(9), WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES(
+ i_i, WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_I8(8)));
}
TEST_F(AstDecoderTest, If_off_end) {
@@ -798,55 +904,56 @@ TEST_F(AstDecoderTest, If_off_end) {
TEST_F(AstDecoderTest, If_type1) {
// float|double ? 1 : 2
static const byte kCode[] = {
- WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_I8(0), WASM_I8(2))};
- EXPECT_VERIFIES(sigs.i_i(), kCode);
- EXPECT_FAILURE(sigs.i_f(), kCode);
- EXPECT_FAILURE(sigs.i_d(), kCode);
+ WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_I8(0), WASM_I8(2))};
+ EXPECT_VERIFIES_C(i_i, kCode);
+ EXPECT_FAILURE_C(i_f, kCode);
+ EXPECT_FAILURE_C(i_d, kCode);
}
TEST_F(AstDecoderTest, If_type2) {
// 1 ? float|double : 2
static const byte kCode[] = {
- WASM_IF_ELSE(WASM_I8(1), WASM_GET_LOCAL(0), WASM_I8(1))};
- EXPECT_VERIFIES(sigs.i_i(), kCode);
- EXPECT_FAILURE(sigs.i_f(), kCode);
- EXPECT_FAILURE(sigs.i_d(), kCode);
+ WASM_IF_ELSE_I(WASM_I8(1), WASM_GET_LOCAL(0), WASM_I8(1))};
+ EXPECT_VERIFIES_C(i_i, kCode);
+ EXPECT_FAILURE_C(i_f, kCode);
+ EXPECT_FAILURE_C(i_d, kCode);
}
TEST_F(AstDecoderTest, If_type3) {
// stmt ? 0 : 1
- static const byte kCode[] = {WASM_IF_ELSE(WASM_NOP, WASM_I8(0), WASM_I8(1))};
- EXPECT_FAILURE(sigs.i_i(), kCode);
- EXPECT_FAILURE(sigs.i_f(), kCode);
- EXPECT_FAILURE(sigs.i_d(), kCode);
+ static const byte kCode[] = {
+ WASM_IF_ELSE_I(WASM_NOP, WASM_I8(0), WASM_I8(1))};
+ EXPECT_FAILURE_C(i_i, kCode);
+ EXPECT_FAILURE_C(i_f, kCode);
+ EXPECT_FAILURE_C(i_d, kCode);
}
TEST_F(AstDecoderTest, If_type4) {
// 0 ? stmt : 1
static const byte kCode[] = {
- WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_I8(1))};
- EXPECT_FAILURE(sigs.i_i(), kCode);
- EXPECT_FAILURE(sigs.i_f(), kCode);
- EXPECT_FAILURE(sigs.i_d(), kCode);
+ WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_NOP, WASM_I8(1))};
+ EXPECT_FAILURE_C(i_i, kCode);
+ EXPECT_FAILURE_C(i_f, kCode);
+ EXPECT_FAILURE_C(i_d, kCode);
}
TEST_F(AstDecoderTest, If_type5) {
// 0 ? 1 : stmt
- static const byte kCode[] = {WASM_IF_ELSE(WASM_ZERO, WASM_I8(1), WASM_NOP)};
- EXPECT_FAILURE(sigs.i_i(), kCode);
- EXPECT_FAILURE(sigs.i_f(), kCode);
- EXPECT_FAILURE(sigs.i_d(), kCode);
+ static const byte kCode[] = {WASM_IF_ELSE_I(WASM_ZERO, WASM_I8(1), WASM_NOP)};
+ EXPECT_FAILURE_C(i_i, kCode);
+ EXPECT_FAILURE_C(i_f, kCode);
+ EXPECT_FAILURE_C(i_d, kCode);
}
TEST_F(AstDecoderTest, Int64Local_param) {
- EXPECT_VERIFIES(sigs.l_l(), kCodeGetLocal0);
+ EXPECT_VERIFIES_C(l_l, kCodeGetLocal0);
}
TEST_F(AstDecoderTest, Int64Locals) {
for (byte i = 1; i < 8; i++) {
AddLocals(kAstI64, 1);
for (byte j = 0; j < i; j++) {
- EXPECT_VERIFIES_INLINE(sigs.l_v(), WASM_GET_LOCAL(j));
+ EXPECT_VERIFIES(l_v, WASM_GET_LOCAL(j));
}
}
}
@@ -908,132 +1015,120 @@ TEST_F(AstDecoderTest, TypeConversions) {
}
TEST_F(AstDecoderTest, MacrosStmt) {
- VERIFY(WASM_SET_LOCAL(0, WASM_I32V_3(87348)));
- VERIFY(WASM_STORE_MEM(MachineType::Int32(), WASM_I8(24), WASM_I8(40)));
- VERIFY(WASM_IF(WASM_GET_LOCAL(0), WASM_NOP));
- VERIFY(WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_NOP));
- VERIFY(WASM_NOP);
- VERIFY(B1(WASM_NOP));
- VERIFY(WASM_LOOP(WASM_NOP));
- VERIFY(WASM_LOOP(WASM_BREAK(0)));
- VERIFY(WASM_LOOP(WASM_CONTINUE(0)));
-}
-
-TEST_F(AstDecoderTest, MacrosBreak) {
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BREAK(0)));
-
- EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_LOOP(WASM_BREAKV(0, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(sigs.l_l(), WASM_LOOP(WASM_BREAKV(0, WASM_I64V_1(0))));
- EXPECT_VERIFIES_INLINE(sigs.f_ff(), WASM_LOOP(WASM_BREAKV(0, WASM_F32(0.0))));
- EXPECT_VERIFIES_INLINE(sigs.d_dd(), WASM_LOOP(WASM_BREAKV(0, WASM_F64(0.0))));
+ EXPECT_VERIFIES(v_i, WASM_SET_LOCAL(0, WASM_I32V_3(87348)));
+ EXPECT_VERIFIES(
+ v_i, WASM_STORE_MEM(MachineType::Int32(), WASM_I8(24), WASM_I8(40)));
+ EXPECT_VERIFIES(v_i, WASM_IF(WASM_GET_LOCAL(0), WASM_NOP));
+ EXPECT_VERIFIES(v_i, WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_NOP));
+ EXPECT_VERIFIES(v_v, WASM_NOP);
+ EXPECT_VERIFIES(v_v, B1(WASM_NOP));
+ EXPECT_VERIFIES(v_v, WASM_LOOP(WASM_NOP));
+ EXPECT_VERIFIES(v_v, WASM_LOOP(WASM_BR(0)));
}
TEST_F(AstDecoderTest, MacrosContinue) {
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_CONTINUE(0)));
+ EXPECT_VERIFIES(v_v, WASM_LOOP(WASM_CONTINUE(0)));
}
TEST_F(AstDecoderTest, MacrosVariadic) {
- VERIFY(B2(WASM_NOP, WASM_NOP));
- VERIFY(B3(WASM_NOP, WASM_NOP, WASM_NOP));
- VERIFY(WASM_LOOP(WASM_NOP, WASM_NOP));
- VERIFY(WASM_LOOP(WASM_NOP, WASM_NOP, WASM_NOP));
+ EXPECT_VERIFIES(v_v, B2(WASM_NOP, WASM_NOP));
+ EXPECT_VERIFIES(v_v, B3(WASM_NOP, WASM_NOP, WASM_NOP));
+ EXPECT_VERIFIES(v_v, WASM_LOOP(WASM_NOP, WASM_NOP));
+ EXPECT_VERIFIES(v_v, WASM_LOOP(WASM_NOP, WASM_NOP, WASM_NOP));
}
TEST_F(AstDecoderTest, MacrosNestedBlocks) {
- VERIFY(B2(WASM_NOP, B2(WASM_NOP, WASM_NOP)));
- VERIFY(B3(WASM_NOP, // --
- B2(WASM_NOP, WASM_NOP), // --
- B2(WASM_NOP, WASM_NOP))); // --
- VERIFY(B1(B1(B2(WASM_NOP, WASM_NOP))));
+ EXPECT_VERIFIES(v_v, B2(WASM_NOP, B2(WASM_NOP, WASM_NOP)));
+ EXPECT_VERIFIES(v_v, B3(WASM_NOP, // --
+ B2(WASM_NOP, WASM_NOP), // --
+ B2(WASM_NOP, WASM_NOP))); // --
+ EXPECT_VERIFIES(v_v, B1(B1(B2(WASM_NOP, WASM_NOP))));
}
TEST_F(AstDecoderTest, MultipleReturn) {
static LocalType kIntTypes5[] = {kAstI32, kAstI32, kAstI32, kAstI32, kAstI32};
FunctionSig sig_ii_v(2, 0, kIntTypes5);
- EXPECT_VERIFIES_INLINE(&sig_ii_v, WASM_RETURNN(2, WASM_ZERO, WASM_ONE));
- EXPECT_FAILURE_INLINE(&sig_ii_v, WASM_RETURNN(1, WASM_ZERO));
+ EXPECT_VERIFIES_S(&sig_ii_v, WASM_RETURNN(2, WASM_ZERO, WASM_ONE));
+ EXPECT_FAILURE_S(&sig_ii_v, WASM_RETURNN(1, WASM_ZERO));
FunctionSig sig_iii_v(3, 0, kIntTypes5);
- EXPECT_VERIFIES_INLINE(&sig_iii_v,
- WASM_RETURNN(3, WASM_ZERO, WASM_ONE, WASM_I8(44)));
- EXPECT_FAILURE_INLINE(&sig_iii_v, WASM_RETURNN(2, WASM_ZERO, WASM_ONE));
+ EXPECT_VERIFIES_S(&sig_iii_v,
+ WASM_RETURNN(3, WASM_ZERO, WASM_ONE, WASM_I8(44)));
+ EXPECT_FAILURE_S(&sig_iii_v, WASM_RETURNN(2, WASM_ZERO, WASM_ONE));
}
TEST_F(AstDecoderTest, MultipleReturn_fallthru) {
static LocalType kIntTypes5[] = {kAstI32, kAstI32, kAstI32, kAstI32, kAstI32};
FunctionSig sig_ii_v(2, 0, kIntTypes5);
- EXPECT_VERIFIES_INLINE(&sig_ii_v, WASM_ZERO, WASM_ONE);
- EXPECT_FAILURE_INLINE(&sig_ii_v, WASM_ZERO);
+ EXPECT_VERIFIES_S(&sig_ii_v, WASM_ZERO, WASM_ONE);
+ EXPECT_FAILURE_S(&sig_ii_v, WASM_ZERO);
FunctionSig sig_iii_v(3, 0, kIntTypes5);
- EXPECT_VERIFIES_INLINE(&sig_iii_v, WASM_ZERO, WASM_ONE, WASM_I8(44));
- EXPECT_FAILURE_INLINE(&sig_iii_v, WASM_ZERO, WASM_ONE);
+ EXPECT_VERIFIES_S(&sig_iii_v, WASM_ZERO, WASM_ONE, WASM_I8(44));
+ EXPECT_FAILURE_S(&sig_iii_v, WASM_ZERO, WASM_ONE);
}
TEST_F(AstDecoderTest, MacrosInt32) {
- VERIFY(WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_I8(12)));
- VERIFY(WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(13)));
- VERIFY(WASM_I32_MUL(WASM_GET_LOCAL(0), WASM_I8(14)));
- VERIFY(WASM_I32_DIVS(WASM_GET_LOCAL(0), WASM_I8(15)));
- VERIFY(WASM_I32_DIVU(WASM_GET_LOCAL(0), WASM_I8(16)));
- VERIFY(WASM_I32_REMS(WASM_GET_LOCAL(0), WASM_I8(17)));
- VERIFY(WASM_I32_REMU(WASM_GET_LOCAL(0), WASM_I8(18)));
- VERIFY(WASM_I32_AND(WASM_GET_LOCAL(0), WASM_I8(19)));
- VERIFY(WASM_I32_IOR(WASM_GET_LOCAL(0), WASM_I8(20)));
- VERIFY(WASM_I32_XOR(WASM_GET_LOCAL(0), WASM_I8(21)));
- VERIFY(WASM_I32_SHL(WASM_GET_LOCAL(0), WASM_I8(22)));
- VERIFY(WASM_I32_SHR(WASM_GET_LOCAL(0), WASM_I8(23)));
- VERIFY(WASM_I32_SAR(WASM_GET_LOCAL(0), WASM_I8(24)));
- VERIFY(WASM_I32_ROR(WASM_GET_LOCAL(0), WASM_I8(24)));
- VERIFY(WASM_I32_ROL(WASM_GET_LOCAL(0), WASM_I8(24)));
- VERIFY(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(25)));
- VERIFY(WASM_I32_NE(WASM_GET_LOCAL(0), WASM_I8(25)));
-
- VERIFY(WASM_I32_LTS(WASM_GET_LOCAL(0), WASM_I8(26)));
- VERIFY(WASM_I32_LES(WASM_GET_LOCAL(0), WASM_I8(27)));
- VERIFY(WASM_I32_LTU(WASM_GET_LOCAL(0), WASM_I8(28)));
- VERIFY(WASM_I32_LEU(WASM_GET_LOCAL(0), WASM_I8(29)));
-
- VERIFY(WASM_I32_GTS(WASM_GET_LOCAL(0), WASM_I8(26)));
- VERIFY(WASM_I32_GES(WASM_GET_LOCAL(0), WASM_I8(27)));
- VERIFY(WASM_I32_GTU(WASM_GET_LOCAL(0), WASM_I8(28)));
- VERIFY(WASM_I32_GEU(WASM_GET_LOCAL(0), WASM_I8(29)));
+ EXPECT_VERIFIES(i_i, WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_I8(12)));
+ EXPECT_VERIFIES(i_i, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(13)));
+ EXPECT_VERIFIES(i_i, WASM_I32_MUL(WASM_GET_LOCAL(0), WASM_I8(14)));
+ EXPECT_VERIFIES(i_i, WASM_I32_DIVS(WASM_GET_LOCAL(0), WASM_I8(15)));
+ EXPECT_VERIFIES(i_i, WASM_I32_DIVU(WASM_GET_LOCAL(0), WASM_I8(16)));
+ EXPECT_VERIFIES(i_i, WASM_I32_REMS(WASM_GET_LOCAL(0), WASM_I8(17)));
+ EXPECT_VERIFIES(i_i, WASM_I32_REMU(WASM_GET_LOCAL(0), WASM_I8(18)));
+ EXPECT_VERIFIES(i_i, WASM_I32_AND(WASM_GET_LOCAL(0), WASM_I8(19)));
+ EXPECT_VERIFIES(i_i, WASM_I32_IOR(WASM_GET_LOCAL(0), WASM_I8(20)));
+ EXPECT_VERIFIES(i_i, WASM_I32_XOR(WASM_GET_LOCAL(0), WASM_I8(21)));
+ EXPECT_VERIFIES(i_i, WASM_I32_SHL(WASM_GET_LOCAL(0), WASM_I8(22)));
+ EXPECT_VERIFIES(i_i, WASM_I32_SHR(WASM_GET_LOCAL(0), WASM_I8(23)));
+ EXPECT_VERIFIES(i_i, WASM_I32_SAR(WASM_GET_LOCAL(0), WASM_I8(24)));
+ EXPECT_VERIFIES(i_i, WASM_I32_ROR(WASM_GET_LOCAL(0), WASM_I8(24)));
+ EXPECT_VERIFIES(i_i, WASM_I32_ROL(WASM_GET_LOCAL(0), WASM_I8(24)));
+ EXPECT_VERIFIES(i_i, WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(25)));
+ EXPECT_VERIFIES(i_i, WASM_I32_NE(WASM_GET_LOCAL(0), WASM_I8(25)));
+
+ EXPECT_VERIFIES(i_i, WASM_I32_LTS(WASM_GET_LOCAL(0), WASM_I8(26)));
+ EXPECT_VERIFIES(i_i, WASM_I32_LES(WASM_GET_LOCAL(0), WASM_I8(27)));
+ EXPECT_VERIFIES(i_i, WASM_I32_LTU(WASM_GET_LOCAL(0), WASM_I8(28)));
+ EXPECT_VERIFIES(i_i, WASM_I32_LEU(WASM_GET_LOCAL(0), WASM_I8(29)));
+
+ EXPECT_VERIFIES(i_i, WASM_I32_GTS(WASM_GET_LOCAL(0), WASM_I8(26)));
+ EXPECT_VERIFIES(i_i, WASM_I32_GES(WASM_GET_LOCAL(0), WASM_I8(27)));
+ EXPECT_VERIFIES(i_i, WASM_I32_GTU(WASM_GET_LOCAL(0), WASM_I8(28)));
+ EXPECT_VERIFIES(i_i, WASM_I32_GEU(WASM_GET_LOCAL(0), WASM_I8(29)));
}
TEST_F(AstDecoderTest, MacrosInt64) {
-#define VERIFY_L_LL(...) EXPECT_VERIFIES_INLINE(sigs.l_ll(), __VA_ARGS__)
-#define VERIFY_I_LL(...) EXPECT_VERIFIES_INLINE(sigs.i_ll(), __VA_ARGS__)
-
- VERIFY_L_LL(WASM_I64_ADD(WASM_GET_LOCAL(0), WASM_I64V_1(12)));
- VERIFY_L_LL(WASM_I64_SUB(WASM_GET_LOCAL(0), WASM_I64V_1(13)));
- VERIFY_L_LL(WASM_I64_MUL(WASM_GET_LOCAL(0), WASM_I64V_1(14)));
- VERIFY_L_LL(WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_I64V_1(15)));
- VERIFY_L_LL(WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_I64V_1(16)));
- VERIFY_L_LL(WASM_I64_REMS(WASM_GET_LOCAL(0), WASM_I64V_1(17)));
- VERIFY_L_LL(WASM_I64_REMU(WASM_GET_LOCAL(0), WASM_I64V_1(18)));
- VERIFY_L_LL(WASM_I64_AND(WASM_GET_LOCAL(0), WASM_I64V_1(19)));
- VERIFY_L_LL(WASM_I64_IOR(WASM_GET_LOCAL(0), WASM_I64V_1(20)));
- VERIFY_L_LL(WASM_I64_XOR(WASM_GET_LOCAL(0), WASM_I64V_1(21)));
-
- VERIFY_L_LL(WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64V_1(22)));
- VERIFY_L_LL(WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64V_1(23)));
- VERIFY_L_LL(WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_I64V_1(24)));
- VERIFY_L_LL(WASM_I64_ROR(WASM_GET_LOCAL(0), WASM_I64V_1(24)));
- VERIFY_L_LL(WASM_I64_ROL(WASM_GET_LOCAL(0), WASM_I64V_1(24)));
-
- VERIFY_I_LL(WASM_I64_LTS(WASM_GET_LOCAL(0), WASM_I64V_1(26)));
- VERIFY_I_LL(WASM_I64_LES(WASM_GET_LOCAL(0), WASM_I64V_1(27)));
- VERIFY_I_LL(WASM_I64_LTU(WASM_GET_LOCAL(0), WASM_I64V_1(28)));
- VERIFY_I_LL(WASM_I64_LEU(WASM_GET_LOCAL(0), WASM_I64V_1(29)));
-
- VERIFY_I_LL(WASM_I64_GTS(WASM_GET_LOCAL(0), WASM_I64V_1(26)));
- VERIFY_I_LL(WASM_I64_GES(WASM_GET_LOCAL(0), WASM_I64V_1(27)));
- VERIFY_I_LL(WASM_I64_GTU(WASM_GET_LOCAL(0), WASM_I64V_1(28)));
- VERIFY_I_LL(WASM_I64_GEU(WASM_GET_LOCAL(0), WASM_I64V_1(29)));
-
- VERIFY_I_LL(WASM_I64_EQ(WASM_GET_LOCAL(0), WASM_I64V_1(25)));
- VERIFY_I_LL(WASM_I64_NE(WASM_GET_LOCAL(0), WASM_I64V_1(25)));
+ EXPECT_VERIFIES(l_ll, WASM_I64_ADD(WASM_GET_LOCAL(0), WASM_I64V_1(12)));
+ EXPECT_VERIFIES(l_ll, WASM_I64_SUB(WASM_GET_LOCAL(0), WASM_I64V_1(13)));
+ EXPECT_VERIFIES(l_ll, WASM_I64_MUL(WASM_GET_LOCAL(0), WASM_I64V_1(14)));
+ EXPECT_VERIFIES(l_ll, WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_I64V_1(15)));
+ EXPECT_VERIFIES(l_ll, WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_I64V_1(16)));
+ EXPECT_VERIFIES(l_ll, WASM_I64_REMS(WASM_GET_LOCAL(0), WASM_I64V_1(17)));
+ EXPECT_VERIFIES(l_ll, WASM_I64_REMU(WASM_GET_LOCAL(0), WASM_I64V_1(18)));
+ EXPECT_VERIFIES(l_ll, WASM_I64_AND(WASM_GET_LOCAL(0), WASM_I64V_1(19)));
+ EXPECT_VERIFIES(l_ll, WASM_I64_IOR(WASM_GET_LOCAL(0), WASM_I64V_1(20)));
+ EXPECT_VERIFIES(l_ll, WASM_I64_XOR(WASM_GET_LOCAL(0), WASM_I64V_1(21)));
+
+ EXPECT_VERIFIES(l_ll, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64V_1(22)));
+ EXPECT_VERIFIES(l_ll, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64V_1(23)));
+ EXPECT_VERIFIES(l_ll, WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_I64V_1(24)));
+ EXPECT_VERIFIES(l_ll, WASM_I64_ROR(WASM_GET_LOCAL(0), WASM_I64V_1(24)));
+ EXPECT_VERIFIES(l_ll, WASM_I64_ROL(WASM_GET_LOCAL(0), WASM_I64V_1(24)));
+
+ EXPECT_VERIFIES(i_ll, WASM_I64_LTS(WASM_GET_LOCAL(0), WASM_I64V_1(26)));
+ EXPECT_VERIFIES(i_ll, WASM_I64_LES(WASM_GET_LOCAL(0), WASM_I64V_1(27)));
+ EXPECT_VERIFIES(i_ll, WASM_I64_LTU(WASM_GET_LOCAL(0), WASM_I64V_1(28)));
+ EXPECT_VERIFIES(i_ll, WASM_I64_LEU(WASM_GET_LOCAL(0), WASM_I64V_1(29)));
+
+ EXPECT_VERIFIES(i_ll, WASM_I64_GTS(WASM_GET_LOCAL(0), WASM_I64V_1(26)));
+ EXPECT_VERIFIES(i_ll, WASM_I64_GES(WASM_GET_LOCAL(0), WASM_I64V_1(27)));
+ EXPECT_VERIFIES(i_ll, WASM_I64_GTU(WASM_GET_LOCAL(0), WASM_I64V_1(28)));
+ EXPECT_VERIFIES(i_ll, WASM_I64_GEU(WASM_GET_LOCAL(0), WASM_I64V_1(29)));
+
+ EXPECT_VERIFIES(i_ll, WASM_I64_EQ(WASM_GET_LOCAL(0), WASM_I64V_1(25)));
+ EXPECT_VERIFIES(i_ll, WASM_I64_NE(WASM_GET_LOCAL(0), WASM_I64V_1(25)));
}
TEST_F(AstDecoderTest, AllSimpleExpressions) {
@@ -1055,21 +1150,49 @@ TEST_F(AstDecoderTest, AllSimpleExpressions) {
TEST_F(AstDecoderTest, MemorySize) {
byte code[] = {kExprMemorySize};
- EXPECT_VERIFIES(sigs.i_i(), code);
- EXPECT_FAILURE(sigs.f_ff(), code);
-}
-
-TEST_F(AstDecoderTest, GrowMemory) {
- byte code[] = {WASM_UNOP(kExprGrowMemory, WASM_GET_LOCAL(0))};
- EXPECT_VERIFIES(sigs.i_i(), code);
- EXPECT_FAILURE(sigs.i_d(), code);
+ EXPECT_VERIFIES_C(i_i, code);
+ EXPECT_FAILURE_C(f_ff, code);
}
TEST_F(AstDecoderTest, LoadMemOffset) {
for (int offset = 0; offset < 128; offset += 7) {
byte code[] = {kExprI8Const, 0, kExprI32LoadMem, ZERO_ALIGNMENT,
static_cast<byte>(offset)};
- EXPECT_VERIFIES(sigs.i_i(), code);
+ EXPECT_VERIFIES_C(i_i, code);
+ }
+}
+
+TEST_F(AstDecoderTest, LoadMemAlignment) {
+ struct {
+ WasmOpcode instruction;
+ uint32_t maximum_aligment;
+ } values[] = {
+ {kExprI32LoadMem8U, 0}, // --
+ {kExprI32LoadMem8S, 0}, // --
+ {kExprI32LoadMem16U, 1}, // --
+ {kExprI32LoadMem16S, 1}, // --
+ {kExprI64LoadMem8U, 0}, // --
+ {kExprI64LoadMem8S, 0}, // --
+ {kExprI64LoadMem16U, 1}, // --
+ {kExprI64LoadMem16S, 1}, // --
+ {kExprI64LoadMem32U, 2}, // --
+ {kExprI64LoadMem32S, 2}, // --
+ {kExprI32LoadMem, 2}, // --
+ {kExprI64LoadMem, 3}, // --
+ {kExprF32LoadMem, 2}, // --
+ {kExprF64LoadMem, 3}, // --
+ };
+
+ for (int i = 0; i < arraysize(values); i++) {
+ for (byte alignment = 0; alignment <= 4; alignment++) {
+ byte code[] = {WASM_ZERO, static_cast<byte>(values[i].instruction),
+ alignment, ZERO_OFFSET, WASM_DROP};
+ if (static_cast<uint32_t>(alignment) <= values[i].maximum_aligment) {
+ EXPECT_VERIFIES_C(v_i, code);
+ } else {
+ EXPECT_FAILURE_C(v_i, code);
+ }
+ }
}
}
@@ -1077,10 +1200,15 @@ TEST_F(AstDecoderTest, StoreMemOffset) {
for (int offset = 0; offset < 128; offset += 7) {
byte code[] = {WASM_STORE_MEM_OFFSET(MachineType::Int32(), offset,
WASM_ZERO, WASM_ZERO)};
- EXPECT_VERIFIES(sigs.i_i(), code);
+ EXPECT_VERIFIES_C(v_i, code);
}
}
+TEST_F(AstDecoderTest, StoreMemOffset_void) {
+ EXPECT_FAILURE(i_i, WASM_STORE_MEM_OFFSET(MachineType::Int32(), 0, WASM_ZERO,
+ WASM_ZERO));
+}
+
#define BYTE0(x) ((x)&0x7F)
#define BYTE1(x) ((x >> 7) & 0x7F)
#define BYTE2(x) ((x >> 14) & 0x7F)
@@ -1092,25 +1220,25 @@ TEST_F(AstDecoderTest, StoreMemOffset) {
#define VARINT4(x) BYTE0(x) | 0x80, BYTE1(x) | 0x80, BYTE2(x) | 0x80, BYTE3(x)
TEST_F(AstDecoderTest, LoadMemOffset_varint) {
- EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
- VARINT1(0x45));
- EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
- VARINT2(0x3999));
- EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
- VARINT3(0x344445));
- EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
- VARINT4(0x36666667));
+ EXPECT_VERIFIES(i_i, WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
+ VARINT1(0x45));
+ EXPECT_VERIFIES(i_i, WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
+ VARINT2(0x3999));
+ EXPECT_VERIFIES(i_i, WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
+ VARINT3(0x344445));
+ EXPECT_VERIFIES(i_i, WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
+ VARINT4(0x36666667));
}
TEST_F(AstDecoderTest, StoreMemOffset_varint) {
- EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_ZERO, WASM_ZERO, kExprI32StoreMem,
- ZERO_ALIGNMENT, VARINT1(0x33));
- EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_ZERO, WASM_ZERO, kExprI32StoreMem,
- ZERO_ALIGNMENT, VARINT2(0x1111));
- EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_ZERO, WASM_ZERO, kExprI32StoreMem,
- ZERO_ALIGNMENT, VARINT3(0x222222));
- EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_ZERO, WASM_ZERO, kExprI32StoreMem,
- ZERO_ALIGNMENT, VARINT4(0x44444444));
+ EXPECT_VERIFIES(v_i, WASM_ZERO, WASM_ZERO, kExprI32StoreMem, ZERO_ALIGNMENT,
+ VARINT1(0x33));
+ EXPECT_VERIFIES(v_i, WASM_ZERO, WASM_ZERO, kExprI32StoreMem, ZERO_ALIGNMENT,
+ VARINT2(0x1111));
+ EXPECT_VERIFIES(v_i, WASM_ZERO, WASM_ZERO, kExprI32StoreMem, ZERO_ALIGNMENT,
+ VARINT3(0x222222));
+ EXPECT_VERIFIES(v_i, WASM_ZERO, WASM_ZERO, kExprI32StoreMem, ZERO_ALIGNMENT,
+ VARINT4(0x44444444));
}
TEST_F(AstDecoderTest, AllLoadMemCombinations) {
@@ -1121,9 +1249,9 @@ TEST_F(AstDecoderTest, AllLoadMemCombinations) {
byte code[] = {WASM_LOAD_MEM(mem_type, WASM_ZERO)};
FunctionSig sig(1, 0, &local_type);
if (local_type == WasmOpcodes::LocalTypeFor(mem_type)) {
- EXPECT_VERIFIES(&sig, code);
+ EXPECT_VERIFIES_SC(&sig, code);
} else {
- EXPECT_FAILURE(&sig, code);
+ EXPECT_FAILURE_SC(&sig, code);
}
}
}
@@ -1137,9 +1265,9 @@ TEST_F(AstDecoderTest, AllStoreMemCombinations) {
byte code[] = {WASM_STORE_MEM(mem_type, WASM_ZERO, WASM_GET_LOCAL(0))};
FunctionSig sig(0, 1, &local_type);
if (local_type == WasmOpcodes::LocalTypeFor(mem_type)) {
- EXPECT_VERIFIES(&sig, code);
+ EXPECT_VERIFIES_SC(&sig, code);
} else {
- EXPECT_FAILURE(&sig, code);
+ EXPECT_FAILURE_SC(&sig, code);
}
}
}
@@ -1154,8 +1282,8 @@ class TestModuleEnv : public ModuleEnv {
instance = nullptr;
module = &mod;
}
- byte AddGlobal(LocalType type) {
- mod.globals.push_back({0, 0, type, 0, false});
+ byte AddGlobal(LocalType type, bool mutability = true) {
+ mod.globals.push_back({type, mutability, NO_INIT, 0, false, false});
CHECK(mod.globals.size() <= 127);
return static_cast<byte>(mod.globals.size() - 1);
}
@@ -1165,25 +1293,22 @@ class TestModuleEnv : public ModuleEnv {
return static_cast<byte>(mod.signatures.size() - 1);
}
byte AddFunction(FunctionSig* sig) {
- mod.functions.push_back({sig, // sig
- 0, // func_index
- 0, // sig_index
- 0, // name_offset
- 0, // name_length
- 0, // code_start_offset
- 0}); // code_end_offset
+ mod.functions.push_back({sig, // sig
+ 0, // func_index
+ 0, // sig_index
+ 0, // name_offset
+ 0, // name_length
+ 0, // code_start_offset
+ 0, // code_end_offset
+ false, // import
+ false}); // export
CHECK(mod.functions.size() <= 127);
return static_cast<byte>(mod.functions.size() - 1);
}
byte AddImport(FunctionSig* sig) {
- mod.import_table.push_back({sig, // sig
- 0, // sig_index
- 0, // module_name_offset
- 0, // module_name_length
- 0, // function_name_offset
- 0}); // function_name_length
- CHECK(mod.import_table.size() <= 127);
- return static_cast<byte>(mod.import_table.size() - 1);
+ byte result = AddFunction(sig);
+ mod.functions[result].imported = true;
+ return result;
}
private:
@@ -1200,9 +1325,9 @@ TEST_F(AstDecoderTest, SimpleCalls) {
module_env.AddFunction(sigs.i_i());
module_env.AddFunction(sigs.i_ii());
- EXPECT_VERIFIES_INLINE(sig, WASM_CALL_FUNCTION0(0));
- EXPECT_VERIFIES_INLINE(sig, WASM_CALL_FUNCTION1(1, WASM_I8(27)));
- EXPECT_VERIFIES_INLINE(sig, WASM_CALL_FUNCTION2(2, WASM_I8(37), WASM_I8(77)));
+ EXPECT_VERIFIES_S(sig, WASM_CALL_FUNCTION0(0));
+ EXPECT_VERIFIES_S(sig, WASM_CALL_FUNCTION(1, WASM_I8(27)));
+ EXPECT_VERIFIES_S(sig, WASM_CALL_FUNCTION(2, WASM_I8(37), WASM_I8(77)));
}
TEST_F(AstDecoderTest, CallsWithTooFewArguments) {
@@ -1214,9 +1339,9 @@ TEST_F(AstDecoderTest, CallsWithTooFewArguments) {
module_env.AddFunction(sigs.i_ii());
module_env.AddFunction(sigs.f_ff());
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION0(0));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(1, WASM_ZERO));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(2, WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION0(0));
+ EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(1, WASM_ZERO));
+ EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(2, WASM_GET_LOCAL(0)));
}
TEST_F(AstDecoderTest, CallsWithMismatchedSigs2) {
@@ -1226,9 +1351,9 @@ TEST_F(AstDecoderTest, CallsWithMismatchedSigs2) {
module_env.AddFunction(sigs.i_i());
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(0, WASM_I64V_1(17)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(0, WASM_F32(17.1)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(0, WASM_F64(17.1)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(0, WASM_I64V_1(17)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(0, WASM_F32(17.1)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(0, WASM_F64(17.1)));
}
TEST_F(AstDecoderTest, CallsWithMismatchedSigs3) {
@@ -1238,15 +1363,59 @@ TEST_F(AstDecoderTest, CallsWithMismatchedSigs3) {
module_env.AddFunction(sigs.i_f());
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(0, WASM_I8(17)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(0, WASM_I64V_1(27)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(0, WASM_F64(37.2)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(0, WASM_I8(17)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(0, WASM_I64V_1(27)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(0, WASM_F64(37.2)));
module_env.AddFunction(sigs.i_d());
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(1, WASM_I8(16)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(1, WASM_I64V_1(16)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(1, WASM_F32(17.6)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(1, WASM_I8(16)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(1, WASM_I64V_1(16)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(1, WASM_F32(17.6)));
+}
+
+TEST_F(AstDecoderTest, MultiReturn) {
+ FLAG_wasm_mv_prototype = true;
+ LocalType storage[] = {kAstI32, kAstI32};
+ FunctionSig sig_ii_v(2, 0, storage);
+ FunctionSig sig_v_ii(0, 2, storage);
+ TestModuleEnv module_env;
+ module = &module_env;
+
+ module_env.AddFunction(&sig_v_ii);
+ module_env.AddFunction(&sig_ii_v);
+
+ EXPECT_VERIFIES_S(&sig_ii_v, WASM_CALL_FUNCTION0(1));
+ EXPECT_VERIFIES(v_v, WASM_CALL_FUNCTION0(1), WASM_DROP, WASM_DROP);
+ EXPECT_VERIFIES(v_v, WASM_CALL_FUNCTION0(1), kExprCallFunction, 0);
+}
+
+TEST_F(AstDecoderTest, MultiReturnType) {
+ FLAG_wasm_mv_prototype = true;
+ for (size_t a = 0; a < arraysize(kLocalTypes); a++) {
+ for (size_t b = 0; b < arraysize(kLocalTypes); b++) {
+ for (size_t c = 0; c < arraysize(kLocalTypes); c++) {
+ for (size_t d = 0; d < arraysize(kLocalTypes); d++) {
+ LocalType storage_ab[] = {kLocalTypes[a], kLocalTypes[b]};
+ FunctionSig sig_ab_v(2, 0, storage_ab);
+ LocalType storage_cd[] = {kLocalTypes[c], kLocalTypes[d]};
+ FunctionSig sig_cd_v(2, 0, storage_cd);
+
+ TestModuleEnv module_env;
+ module = &module_env;
+ module_env.AddFunction(&sig_cd_v);
+
+ EXPECT_VERIFIES_S(&sig_cd_v, WASM_CALL_FUNCTION0(0));
+
+ if (a == c && b == d) {
+ EXPECT_VERIFIES_S(&sig_ab_v, WASM_CALL_FUNCTION0(0));
+ } else {
+ EXPECT_FAILURE_S(&sig_ab_v, WASM_CALL_FUNCTION0(0));
+ }
+ }
+ }
+ }
+ }
}
TEST_F(AstDecoderTest, SimpleIndirectCalls) {
@@ -1258,9 +1427,9 @@ TEST_F(AstDecoderTest, SimpleIndirectCalls) {
byte f1 = module_env.AddSignature(sigs.i_i());
byte f2 = module_env.AddSignature(sigs.i_ii());
- EXPECT_VERIFIES_INLINE(sig, WASM_CALL_INDIRECT0(f0, WASM_ZERO));
- EXPECT_VERIFIES_INLINE(sig, WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_I8(22)));
- EXPECT_VERIFIES_INLINE(
+ EXPECT_VERIFIES_S(sig, WASM_CALL_INDIRECT0(f0, WASM_ZERO));
+ EXPECT_VERIFIES_S(sig, WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_I8(22)));
+ EXPECT_VERIFIES_S(
sig, WASM_CALL_INDIRECT2(f2, WASM_ZERO, WASM_I8(32), WASM_I8(72)));
}
@@ -1269,15 +1438,15 @@ TEST_F(AstDecoderTest, IndirectCallsOutOfBounds) {
TestModuleEnv module_env;
module = &module_env;
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT0(0, WASM_ZERO));
+ EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT0(0, WASM_ZERO));
module_env.AddSignature(sigs.i_v());
- EXPECT_VERIFIES_INLINE(sig, WASM_CALL_INDIRECT0(0, WASM_ZERO));
+ EXPECT_VERIFIES_S(sig, WASM_CALL_INDIRECT0(0, WASM_ZERO));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT1(1, WASM_ZERO, WASM_I8(22)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT1(1, WASM_ZERO, WASM_I8(22)));
module_env.AddSignature(sigs.i_i());
- EXPECT_VERIFIES_INLINE(sig, WASM_CALL_INDIRECT1(1, WASM_ZERO, WASM_I8(27)));
+ EXPECT_VERIFIES_S(sig, WASM_CALL_INDIRECT1(1, WASM_ZERO, WASM_I8(27)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT1(2, WASM_ZERO, WASM_I8(27)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT1(2, WASM_ZERO, WASM_I8(27)));
}
TEST_F(AstDecoderTest, IndirectCallsWithMismatchedSigs3) {
@@ -1287,23 +1456,19 @@ TEST_F(AstDecoderTest, IndirectCallsWithMismatchedSigs3) {
byte f0 = module_env.AddFunction(sigs.i_f());
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT1(f0, WASM_ZERO, WASM_I8(17)));
- EXPECT_FAILURE_INLINE(sig,
- WASM_CALL_INDIRECT1(f0, WASM_ZERO, WASM_I64V_1(27)));
- EXPECT_FAILURE_INLINE(sig,
- WASM_CALL_INDIRECT1(f0, WASM_ZERO, WASM_F64(37.2)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT1(f0, WASM_ZERO, WASM_I8(17)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT1(f0, WASM_ZERO, WASM_I64V_1(27)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT1(f0, WASM_ZERO, WASM_F64(37.2)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT0(f0, WASM_I8(17)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT0(f0, WASM_I64V_1(27)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT0(f0, WASM_F64(37.2)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT0(f0, WASM_I8(17)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT0(f0, WASM_I64V_1(27)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT0(f0, WASM_F64(37.2)));
byte f1 = module_env.AddFunction(sigs.i_d());
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_I8(16)));
- EXPECT_FAILURE_INLINE(sig,
- WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_I64V_1(16)));
- EXPECT_FAILURE_INLINE(sig,
- WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_F32(17.6)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_I8(16)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_I64V_1(16)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_F32(17.6)));
}
TEST_F(AstDecoderTest, SimpleImportCalls) {
@@ -1315,9 +1480,9 @@ TEST_F(AstDecoderTest, SimpleImportCalls) {
byte f1 = module_env.AddImport(sigs.i_i());
byte f2 = module_env.AddImport(sigs.i_ii());
- EXPECT_VERIFIES_INLINE(sig, WASM_CALL_IMPORT0(f0));
- EXPECT_VERIFIES_INLINE(sig, WASM_CALL_IMPORT1(f1, WASM_I8(22)));
- EXPECT_VERIFIES_INLINE(sig, WASM_CALL_IMPORT2(f2, WASM_I8(32), WASM_I8(72)));
+ EXPECT_VERIFIES_S(sig, WASM_CALL_FUNCTION0(f0));
+ EXPECT_VERIFIES_S(sig, WASM_CALL_FUNCTION(f1, WASM_I8(22)));
+ EXPECT_VERIFIES_S(sig, WASM_CALL_FUNCTION(f2, WASM_I8(32), WASM_I8(72)));
}
TEST_F(AstDecoderTest, ImportCallsWithMismatchedSigs3) {
@@ -1327,17 +1492,17 @@ TEST_F(AstDecoderTest, ImportCallsWithMismatchedSigs3) {
byte f0 = module_env.AddImport(sigs.i_f());
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT0(f0));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT1(f0, WASM_I8(17)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT1(f0, WASM_I64V_1(27)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT1(f0, WASM_F64(37.2)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION0(f0));
+ EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(f0, WASM_I8(17)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(f0, WASM_I64V_1(27)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(f0, WASM_F64(37.2)));
byte f1 = module_env.AddImport(sigs.i_d());
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT0(f1));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT1(f1, WASM_I8(16)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT1(f1, WASM_I64V_1(16)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT1(f1, WASM_F32(17.6)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION0(f1));
+ EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(f1, WASM_I8(16)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(f1, WASM_I64V_1(16)));
+ EXPECT_FAILURE_S(sig, WASM_CALL_FUNCTION(f1, WASM_F32(17.6)));
}
TEST_F(AstDecoderTest, Int32Globals) {
@@ -1347,8 +1512,21 @@ TEST_F(AstDecoderTest, Int32Globals) {
module_env.AddGlobal(kAstI32);
- EXPECT_VERIFIES_INLINE(sig, WASM_GET_GLOBAL(0));
- EXPECT_VERIFIES_INLINE(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_S(sig, WASM_GET_GLOBAL(0));
+ EXPECT_FAILURE_S(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_S(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)), WASM_ZERO);
+}
+
+TEST_F(AstDecoderTest, ImmutableGlobal) {
+ FunctionSig* sig = sigs.v_v();
+ TestModuleEnv module_env;
+ module = &module_env;
+
+ uint32_t g0 = module_env.AddGlobal(kAstI32, true);
+ uint32_t g1 = module_env.AddGlobal(kAstI32, false);
+
+ EXPECT_VERIFIES_S(sig, WASM_SET_GLOBAL(g0, WASM_ZERO));
+ EXPECT_FAILURE_S(sig, WASM_SET_GLOBAL(g1, WASM_ZERO));
}
TEST_F(AstDecoderTest, Int32Globals_fail) {
@@ -1361,15 +1539,15 @@ TEST_F(AstDecoderTest, Int32Globals_fail) {
module_env.AddGlobal(kAstF32);
module_env.AddGlobal(kAstF64);
- EXPECT_FAILURE_INLINE(sig, WASM_GET_GLOBAL(0));
- EXPECT_FAILURE_INLINE(sig, WASM_GET_GLOBAL(1));
- EXPECT_FAILURE_INLINE(sig, WASM_GET_GLOBAL(2));
- EXPECT_FAILURE_INLINE(sig, WASM_GET_GLOBAL(3));
+ EXPECT_FAILURE_S(sig, WASM_GET_GLOBAL(0));
+ EXPECT_FAILURE_S(sig, WASM_GET_GLOBAL(1));
+ EXPECT_FAILURE_S(sig, WASM_GET_GLOBAL(2));
+ EXPECT_FAILURE_S(sig, WASM_GET_GLOBAL(3));
- EXPECT_FAILURE_INLINE(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
- EXPECT_FAILURE_INLINE(sig, WASM_SET_GLOBAL(1, WASM_GET_LOCAL(0)));
- EXPECT_FAILURE_INLINE(sig, WASM_SET_GLOBAL(2, WASM_GET_LOCAL(0)));
- EXPECT_FAILURE_INLINE(sig, WASM_SET_GLOBAL(3, WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_S(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)), WASM_ZERO);
+ EXPECT_FAILURE_S(sig, WASM_SET_GLOBAL(1, WASM_GET_LOCAL(0)), WASM_ZERO);
+ EXPECT_FAILURE_S(sig, WASM_SET_GLOBAL(2, WASM_GET_LOCAL(0)), WASM_ZERO);
+ EXPECT_FAILURE_S(sig, WASM_SET_GLOBAL(3, WASM_GET_LOCAL(0)), WASM_ZERO);
}
TEST_F(AstDecoderTest, Int64Globals) {
@@ -1380,11 +1558,13 @@ TEST_F(AstDecoderTest, Int64Globals) {
module_env.AddGlobal(kAstI64);
module_env.AddGlobal(kAstI64);
- EXPECT_VERIFIES_INLINE(sig, WASM_GET_GLOBAL(0));
- EXPECT_VERIFIES_INLINE(sig, WASM_GET_GLOBAL(1));
+ EXPECT_VERIFIES_S(sig, WASM_GET_GLOBAL(0));
+ EXPECT_VERIFIES_S(sig, WASM_GET_GLOBAL(1));
- EXPECT_VERIFIES_INLINE(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES_INLINE(sig, WASM_SET_GLOBAL(1, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_S(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)),
+ WASM_GET_LOCAL(0));
+ EXPECT_VERIFIES_S(sig, WASM_SET_GLOBAL(1, WASM_GET_LOCAL(0)),
+ WASM_GET_LOCAL(0));
}
TEST_F(AstDecoderTest, Float32Globals) {
@@ -1394,8 +1574,9 @@ TEST_F(AstDecoderTest, Float32Globals) {
module_env.AddGlobal(kAstF32);
- EXPECT_VERIFIES_INLINE(sig, WASM_GET_GLOBAL(0));
- EXPECT_VERIFIES_INLINE(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_S(sig, WASM_GET_GLOBAL(0));
+ EXPECT_VERIFIES_S(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)),
+ WASM_GET_LOCAL(0));
}
TEST_F(AstDecoderTest, Float64Globals) {
@@ -1405,8 +1586,9 @@ TEST_F(AstDecoderTest, Float64Globals) {
module_env.AddGlobal(kAstF64);
- EXPECT_VERIFIES_INLINE(sig, WASM_GET_GLOBAL(0));
- EXPECT_VERIFIES_INLINE(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_S(sig, WASM_GET_GLOBAL(0));
+ EXPECT_VERIFIES_S(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)),
+ WASM_GET_LOCAL(0));
}
TEST_F(AstDecoderTest, AllGetGlobalCombinations) {
@@ -1419,9 +1601,9 @@ TEST_F(AstDecoderTest, AllGetGlobalCombinations) {
module = &module_env;
module_env.AddGlobal(global_type);
if (local_type == global_type) {
- EXPECT_VERIFIES_INLINE(&sig, WASM_GET_GLOBAL(0));
+ EXPECT_VERIFIES_S(&sig, WASM_GET_GLOBAL(0));
} else {
- EXPECT_FAILURE_INLINE(&sig, WASM_GET_GLOBAL(0));
+ EXPECT_FAILURE_S(&sig, WASM_GET_GLOBAL(0));
}
}
}
@@ -1437,287 +1619,417 @@ TEST_F(AstDecoderTest, AllSetGlobalCombinations) {
module = &module_env;
module_env.AddGlobal(global_type);
if (local_type == global_type) {
- EXPECT_VERIFIES_INLINE(&sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_S(&sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
} else {
- EXPECT_FAILURE_INLINE(&sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_S(&sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
}
}
}
}
+TEST_F(AstDecoderTest, WasmGrowMemory) {
+ TestModuleEnv module_env;
+ module = &module_env;
+ module->origin = kWasmOrigin;
+
+ byte code[] = {WASM_UNOP(kExprGrowMemory, WASM_GET_LOCAL(0))};
+ EXPECT_VERIFIES_C(i_i, code);
+ EXPECT_FAILURE_C(i_d, code);
+}
+
+TEST_F(AstDecoderTest, AsmJsGrowMemory) {
+ TestModuleEnv module_env;
+ module = &module_env;
+ module->origin = kAsmJsOrigin;
+
+ byte code[] = {WASM_UNOP(kExprGrowMemory, WASM_GET_LOCAL(0))};
+ EXPECT_FAILURE_C(i_i, code);
+}
+
+TEST_F(AstDecoderTest, AsmJsBinOpsCheckOrigin) {
+ LocalType float32int32float32[] = {kAstF32, kAstI32, kAstF32};
+ FunctionSig sig_f_if(1, 2, float32int32float32);
+ LocalType float64int32float64[] = {kAstF64, kAstI32, kAstF64};
+ FunctionSig sig_d_id(1, 2, float64int32float64);
+ struct {
+ WasmOpcode op;
+ FunctionSig* sig;
+ } AsmJsBinOps[] = {
+ {kExprF64Atan2, sigs.d_dd()},
+ {kExprF64Pow, sigs.d_dd()},
+ {kExprF64Mod, sigs.d_dd()},
+ {kExprI32AsmjsDivS, sigs.i_ii()},
+ {kExprI32AsmjsDivU, sigs.i_ii()},
+ {kExprI32AsmjsRemS, sigs.i_ii()},
+ {kExprI32AsmjsRemU, sigs.i_ii()},
+ {kExprI32AsmjsStoreMem8, sigs.i_ii()},
+ {kExprI32AsmjsStoreMem16, sigs.i_ii()},
+ {kExprI32AsmjsStoreMem, sigs.i_ii()},
+ {kExprF32AsmjsStoreMem, &sig_f_if},
+ {kExprF64AsmjsStoreMem, &sig_d_id},
+ };
+
+ {
+ TestModuleEnv module_env;
+ module = &module_env;
+ module->origin = kAsmJsOrigin;
+ for (int i = 0; i < arraysize(AsmJsBinOps); i++) {
+ TestBinop(AsmJsBinOps[i].op, AsmJsBinOps[i].sig);
+ }
+ }
+
+ {
+ TestModuleEnv module_env;
+ module = &module_env;
+ module->origin = kWasmOrigin;
+ for (int i = 0; i < arraysize(AsmJsBinOps); i++) {
+ byte code[] = {
+ WASM_BINOP(AsmJsBinOps[i].op, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))};
+ EXPECT_FAILURE_SC(AsmJsBinOps[i].sig, code);
+ }
+ }
+}
+
+TEST_F(AstDecoderTest, AsmJsUnOpsCheckOrigin) {
+ LocalType float32int32[] = {kAstF32, kAstI32};
+ FunctionSig sig_f_i(1, 1, float32int32);
+ LocalType float64int32[] = {kAstF64, kAstI32};
+ FunctionSig sig_d_i(1, 1, float64int32);
+ struct {
+ WasmOpcode op;
+ FunctionSig* sig;
+ } AsmJsUnOps[] = {{kExprF64Acos, sigs.d_d()},
+ {kExprF64Asin, sigs.d_d()},
+ {kExprF64Atan, sigs.d_d()},
+ {kExprF64Cos, sigs.d_d()},
+ {kExprF64Sin, sigs.d_d()},
+ {kExprF64Tan, sigs.d_d()},
+ {kExprF64Exp, sigs.d_d()},
+ {kExprF64Log, sigs.d_d()},
+ {kExprI32AsmjsLoadMem8S, sigs.i_i()},
+ {kExprI32AsmjsLoadMem8U, sigs.i_i()},
+ {kExprI32AsmjsLoadMem16S, sigs.i_i()},
+ {kExprI32AsmjsLoadMem16U, sigs.i_i()},
+ {kExprI32AsmjsLoadMem, sigs.i_i()},
+ {kExprF32AsmjsLoadMem, &sig_f_i},
+ {kExprF64AsmjsLoadMem, &sig_d_i},
+ {kExprI32AsmjsSConvertF32, sigs.i_f()},
+ {kExprI32AsmjsUConvertF32, sigs.i_f()},
+ {kExprI32AsmjsSConvertF64, sigs.i_d()},
+ {kExprI32AsmjsUConvertF64, sigs.i_d()}};
+ {
+ TestModuleEnv module_env;
+ module = &module_env;
+ module->origin = kAsmJsOrigin;
+ for (int i = 0; i < arraysize(AsmJsUnOps); i++) {
+ TestUnop(AsmJsUnOps[i].op, AsmJsUnOps[i].sig);
+ }
+ }
+
+ {
+ TestModuleEnv module_env;
+ module = &module_env;
+ module->origin = kWasmOrigin;
+ for (int i = 0; i < arraysize(AsmJsUnOps); i++) {
+ byte code[] = {WASM_UNOP(AsmJsUnOps[i].op, WASM_GET_LOCAL(0))};
+ EXPECT_FAILURE_SC(AsmJsUnOps[i].sig, code);
+ }
+ }
+}
+
TEST_F(AstDecoderTest, BreakEnd) {
- EXPECT_VERIFIES_INLINE(sigs.i_i(),
- B1(WASM_I32_ADD(WASM_BRV(0, WASM_ZERO), WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(sigs.i_i(),
- B1(WASM_I32_ADD(WASM_ZERO, WASM_BRV(0, WASM_ZERO))));
+ EXPECT_VERIFIES(
+ i_i, WASM_BLOCK_I(WASM_I32_ADD(WASM_BRV(0, WASM_ZERO), WASM_ZERO)));
+ EXPECT_VERIFIES(
+ i_i, WASM_BLOCK_I(WASM_I32_ADD(WASM_ZERO, WASM_BRV(0, WASM_ZERO))));
}
TEST_F(AstDecoderTest, BreakIfBinop) {
- EXPECT_FAILURE_INLINE(
- sigs.i_i(), WASM_BLOCK(WASM_I32_ADD(WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO),
- WASM_ZERO)));
- EXPECT_FAILURE_INLINE(sigs.i_i(),
- WASM_BLOCK(WASM_I32_ADD(
- WASM_ZERO, WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO))));
+ EXPECT_VERIFIES(i_i, WASM_BLOCK_I(WASM_I32_ADD(
+ WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO), WASM_ZERO)));
+ EXPECT_VERIFIES(i_i, WASM_BLOCK_I(WASM_I32_ADD(
+ WASM_ZERO, WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO))));
+ EXPECT_VERIFIES_S(
+ sigs.f_ff(),
+ WASM_BLOCK_F(WASM_F32_ABS(WASM_BRV_IF(0, WASM_F32(0.0f), WASM_ZERO))));
+}
+
+TEST_F(AstDecoderTest, BreakIfBinop_fail) {
+ EXPECT_FAILURE_S(
+ sigs.f_ff(),
+ WASM_BLOCK_F(WASM_F32_ABS(WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO))));
+ EXPECT_FAILURE_S(
+ sigs.i_i(),
+ WASM_BLOCK_I(WASM_F32_ABS(WASM_BRV_IF(0, WASM_F32(0.0f), WASM_ZERO))));
}
TEST_F(AstDecoderTest, BreakNesting1) {
for (int i = 0; i < 5; i++) {
// (block[2] (loop[2] (if (get p) break[N]) (set p 1)) p)
- byte code[] = {WASM_BLOCK(
+ byte code[] = {WASM_BLOCK_I(
WASM_LOOP(WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(i + 1, WASM_ZERO)),
WASM_SET_LOCAL(0, WASM_I8(1))),
- WASM_GET_LOCAL(0))};
+ WASM_ZERO)};
if (i < 3) {
- EXPECT_VERIFIES(sigs.i_i(), code);
+ EXPECT_VERIFIES_C(i_i, code);
} else {
- EXPECT_FAILURE(sigs.i_i(), code);
+ EXPECT_FAILURE_C(i_i, code);
}
}
}
TEST_F(AstDecoderTest, BreakNesting2) {
- AddLocals(kAstI32, 1);
- for (int i = 0; i < 5; i++) {
- // (block[2] (loop[2] (if 0 break[N]) (set p 1)) (return p)) (11)
- byte code[] = {B1(WASM_LOOP(WASM_IF(WASM_ZERO, WASM_BREAK(i + 1)),
- WASM_SET_LOCAL(0, WASM_I8(1)))),
- WASM_I8(11)};
- if (i < 2) {
- EXPECT_VERIFIES(sigs.v_v(), code);
+ for (int i = 0; i < 7; i++) {
+ byte code[] = {B1(WASM_LOOP(WASM_IF(WASM_ZERO, WASM_BR(i)), WASM_NOP))};
+ if (i <= 3) {
+ EXPECT_VERIFIES_C(v_v, code);
} else {
- EXPECT_FAILURE(sigs.v_v(), code);
+ EXPECT_FAILURE_C(v_v, code);
}
}
}
TEST_F(AstDecoderTest, BreakNesting3) {
- for (int i = 0; i < 5; i++) {
+ for (int i = 0; i < 7; i++) {
// (block[1] (loop[1] (block[1] (if 0 break[N])
byte code[] = {
- WASM_BLOCK(WASM_LOOP(B1(WASM_IF(WASM_ZERO, WASM_BREAK(i + 1)))))};
- if (i < 3) {
- EXPECT_VERIFIES(sigs.v_v(), code);
+ WASM_BLOCK(WASM_LOOP(B1(WASM_IF(WASM_ZERO, WASM_BR(i + 1)))))};
+ if (i < 4) {
+ EXPECT_VERIFIES_C(v_v, code);
} else {
- EXPECT_FAILURE(sigs.v_v(), code);
+ EXPECT_FAILURE_C(v_v, code);
}
}
}
TEST_F(AstDecoderTest, BreaksWithMultipleTypes) {
- EXPECT_FAILURE_INLINE(sigs.i_i(),
- B2(WASM_BRV_IF_ZERO(0, WASM_I8(7)), WASM_F32(7.7)));
+ EXPECT_FAILURE(i_i, B2(WASM_BRV_IF_ZERO(0, WASM_I8(7)), WASM_F32(7.7)));
- EXPECT_FAILURE_INLINE(sigs.i_i(), B2(WASM_BRV_IF_ZERO(0, WASM_I8(7)),
- WASM_BRV_IF_ZERO(0, WASM_F32(7.7))));
- EXPECT_FAILURE_INLINE(sigs.i_i(), B3(WASM_BRV_IF_ZERO(0, WASM_I8(8)),
- WASM_BRV_IF_ZERO(0, WASM_I8(0)),
- WASM_BRV_IF_ZERO(0, WASM_F32(7.7))));
- EXPECT_FAILURE_INLINE(sigs.i_i(), B3(WASM_BRV_IF_ZERO(0, WASM_I8(9)),
- WASM_BRV_IF_ZERO(0, WASM_F32(7.7)),
- WASM_BRV_IF_ZERO(0, WASM_I8(11))));
+ EXPECT_FAILURE(i_i, B2(WASM_BRV_IF_ZERO(0, WASM_I8(7)),
+ WASM_BRV_IF_ZERO(0, WASM_F32(7.7))));
+ EXPECT_FAILURE(
+ i_i, B3(WASM_BRV_IF_ZERO(0, WASM_I8(8)), WASM_BRV_IF_ZERO(0, WASM_I8(0)),
+ WASM_BRV_IF_ZERO(0, WASM_F32(7.7))));
+ EXPECT_FAILURE(i_i, B3(WASM_BRV_IF_ZERO(0, WASM_I8(9)),
+ WASM_BRV_IF_ZERO(0, WASM_F32(7.7)),
+ WASM_BRV_IF_ZERO(0, WASM_I8(11))));
}
TEST_F(AstDecoderTest, BreakNesting_6_levels) {
for (int mask = 0; mask < 64; mask++) {
for (int i = 0; i < 14; i++) {
- byte code[] = {
- kExprBlock, // --
- kExprBlock, // --
- kExprBlock, // --
- kExprBlock, // --
- kExprBlock, // --
- kExprBlock, // --
- kExprBr, ARITY_0, static_cast<byte>(i), // --
- kExprEnd, // --
- kExprEnd, // --
- kExprEnd, // --
- kExprEnd, // --
- kExprEnd, // --
- kExprEnd // --
- };
+ byte code[] = {WASM_BLOCK(WASM_BLOCK(
+ WASM_BLOCK(WASM_BLOCK(WASM_BLOCK(WASM_BLOCK(WASM_BR(i)))))))};
int depth = 6;
- for (int l = 0; l < 6; l++) {
- if (mask & (1 << l)) {
- code[l] = kExprLoop;
- depth++;
+ int m = mask;
+ for (size_t pos = 0; pos < sizeof(code) - 1; pos++) {
+ if (code[pos] != kExprBlock) continue;
+ if (m & 1) {
+ code[pos] = kExprLoop;
+ code[pos + 1] = kLocalVoid;
}
+ m >>= 1;
}
- if (i < depth) {
- EXPECT_VERIFIES(sigs.v_v(), code);
+ if (i <= depth) {
+ EXPECT_VERIFIES_C(v_v, code);
} else {
- EXPECT_FAILURE(sigs.v_v(), code);
+ EXPECT_FAILURE_C(v_v, code);
}
}
}
}
-TEST_F(AstDecoderTest, ExprBreak_TypeCheck) {
+TEST_F(AstDecoderTest, Break_TypeCheck) {
FunctionSig* sigarray[] = {sigs.i_i(), sigs.l_l(), sigs.f_ff(), sigs.d_dd()};
for (size_t i = 0; i < arraysize(sigarray); i++) {
FunctionSig* sig = sigarray[i];
// unify X and X => OK
- EXPECT_VERIFIES_INLINE(
- sig, B2(WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
- WASM_GET_LOCAL(0)));
+ byte code[] = {WASM_BLOCK_T(
+ sig->GetReturn(), WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
+ WASM_GET_LOCAL(0))};
+ EXPECT_VERIFIES_SC(sig, code);
}
// unify i32 and f32 => fail
- EXPECT_FAILURE_INLINE(
- sigs.i_i(),
- B2(WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_ZERO)), WASM_F32(1.2)));
+ EXPECT_FAILURE(i_i, WASM_BLOCK_I(WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_ZERO)),
+ WASM_F32(1.2)));
// unify f64 and f64 => OK
- EXPECT_VERIFIES_INLINE(
- sigs.d_dd(),
- B2(WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))), WASM_F64(1.2)));
+ EXPECT_VERIFIES(
+ d_dd, WASM_BLOCK_D(WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
+ WASM_F64(1.2)));
+}
+
+TEST_F(AstDecoderTest, Break_TypeCheckAll1) {
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
+ LocalType storage[] = {kLocalTypes[i], kLocalTypes[i], kLocalTypes[j]};
+ FunctionSig sig(1, 2, storage);
+ byte code[] = {WASM_BLOCK_T(
+ sig.GetReturn(), WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
+ WASM_GET_LOCAL(1))};
+
+ if (i == j) {
+ EXPECT_VERIFIES_SC(&sig, code);
+ } else {
+ EXPECT_FAILURE_SC(&sig, code);
+ }
+ }
+ }
}
-TEST_F(AstDecoderTest, ExprBreak_TypeCheckAll) {
- byte code1[] = {WASM_BLOCK(WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
- WASM_GET_LOCAL(1))};
- byte code2[] = {B2(WASM_IF(WASM_ZERO, WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0))),
- WASM_GET_LOCAL(1))};
+TEST_F(AstDecoderTest, Break_TypeCheckAll2) {
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
+ LocalType storage[] = {kLocalTypes[i], kLocalTypes[i], kLocalTypes[j]};
+ FunctionSig sig(1, 2, storage);
+ byte code[] = {WASM_IF_ELSE_T(sig.GetReturn(0), WASM_ZERO,
+ WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0)),
+ WASM_GET_LOCAL(1))};
+
+ if (i == j) {
+ EXPECT_VERIFIES_SC(&sig, code);
+ } else {
+ EXPECT_FAILURE_SC(&sig, code);
+ }
+ }
+ }
+}
+TEST_F(AstDecoderTest, Break_TypeCheckAll3) {
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
LocalType storage[] = {kLocalTypes[i], kLocalTypes[i], kLocalTypes[j]};
FunctionSig sig(1, 2, storage);
+ byte code[] = {WASM_IF_ELSE_T(sig.GetReturn(), WASM_ZERO,
+ WASM_GET_LOCAL(1),
+ WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0)))};
if (i == j) {
- EXPECT_VERIFIES(&sig, code1);
- EXPECT_VERIFIES(&sig, code2);
+ EXPECT_VERIFIES_SC(&sig, code);
} else {
- EXPECT_FAILURE(&sig, code1);
- EXPECT_FAILURE(&sig, code2);
+ EXPECT_FAILURE_SC(&sig, code);
}
}
}
}
-TEST_F(AstDecoderTest, ExprBr_Unify) {
+TEST_F(AstDecoderTest, Break_Unify) {
for (int which = 0; which < 2; which++) {
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
LocalType type = kLocalTypes[i];
LocalType storage[] = {kAstI32, kAstI32, type};
FunctionSig sig(1, 2, storage);
- byte code1[] = {B2(WASM_IF(WASM_ZERO, WASM_BRV(1, WASM_GET_LOCAL(which))),
- WASM_GET_LOCAL(which ^ 1))};
- byte code2[] = {
- WASM_LOOP(WASM_IF(WASM_ZERO, WASM_BRV(2, WASM_GET_LOCAL(which))),
- WASM_GET_LOCAL(which ^ 1))};
+ byte code1[] = {WASM_BLOCK_T(
+ type, WASM_IF(WASM_ZERO, WASM_BRV(1, WASM_GET_LOCAL(which))),
+ WASM_GET_LOCAL(which ^ 1))};
if (type == kAstI32) {
- EXPECT_VERIFIES(&sig, code1);
- EXPECT_VERIFIES(&sig, code2);
+ EXPECT_VERIFIES_SC(&sig, code1);
} else {
- EXPECT_FAILURE(&sig, code1);
- EXPECT_FAILURE(&sig, code2);
+ EXPECT_FAILURE_SC(&sig, code1);
}
}
}
}
-TEST_F(AstDecoderTest, ExprBrIf_cond_type) {
- byte code[] = {B1(WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)))};
+TEST_F(AstDecoderTest, BreakIf_cond_type) {
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
- LocalType types[] = {kLocalTypes[i], kLocalTypes[j]};
- FunctionSig sig(0, 2, types);
+ LocalType types[] = {kLocalTypes[i], kLocalTypes[i], kLocalTypes[j]};
+ FunctionSig sig(1, 2, types);
+ byte code[] = {WASM_BLOCK_T(
+ types[0], WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)))};
- if (types[1] == kAstI32) {
- EXPECT_VERIFIES(&sig, code);
+ if (types[2] == kAstI32) {
+ EXPECT_VERIFIES_SC(&sig, code);
} else {
- EXPECT_FAILURE(&sig, code);
+ EXPECT_FAILURE_SC(&sig, code);
}
}
}
}
-TEST_F(AstDecoderTest, ExprBrIf_val_type) {
- byte code[] = {B2(WASM_BRV_IF(0, WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)),
- WASM_GET_LOCAL(0))};
+TEST_F(AstDecoderTest, BreakIf_val_type) {
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
LocalType types[] = {kLocalTypes[i], kLocalTypes[i], kLocalTypes[j],
kAstI32};
FunctionSig sig(1, 3, types);
+ byte code[] = {WASM_BLOCK_T(
+ types[1], WASM_BRV_IF(0, WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)),
+ WASM_DROP, WASM_GET_LOCAL(0))};
if (i == j) {
- EXPECT_VERIFIES(&sig, code);
+ EXPECT_VERIFIES_SC(&sig, code);
} else {
- EXPECT_FAILURE(&sig, code);
+ EXPECT_FAILURE_SC(&sig, code);
}
}
}
}
-TEST_F(AstDecoderTest, ExprBrIf_Unify) {
+TEST_F(AstDecoderTest, BreakIf_Unify) {
for (int which = 0; which < 2; which++) {
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
LocalType type = kLocalTypes[i];
LocalType storage[] = {kAstI32, kAstI32, type};
FunctionSig sig(1, 2, storage);
-
- byte code1[] = {B2(WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(which)),
- WASM_GET_LOCAL(which ^ 1))};
- byte code2[] = {WASM_LOOP(WASM_BRV_IF_ZERO(1, WASM_GET_LOCAL(which)),
- WASM_GET_LOCAL(which ^ 1))};
+ byte code[] = {WASM_BLOCK_I(WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(which)),
+ WASM_DROP, WASM_GET_LOCAL(which ^ 1))};
if (type == kAstI32) {
- EXPECT_VERIFIES(&sig, code1);
- EXPECT_VERIFIES(&sig, code2);
+ EXPECT_VERIFIES_SC(&sig, code);
} else {
- EXPECT_FAILURE(&sig, code1);
- EXPECT_FAILURE(&sig, code2);
+ EXPECT_FAILURE_SC(&sig, code);
}
}
}
}
TEST_F(AstDecoderTest, BrTable0) {
- static byte code[] = {kExprNop, kExprBrTable, 0, 0};
- EXPECT_FAILURE(sigs.v_v(), code);
+ static byte code[] = {kExprBrTable, 0, BR_TARGET(0)};
+ EXPECT_FAILURE_C(v_v, code);
}
TEST_F(AstDecoderTest, BrTable0b) {
- static byte code[] = {kExprNop, kExprI32Const, 11, kExprBrTable, 0, 0};
- EXPECT_FAILURE(sigs.v_v(), code);
- EXPECT_FAILURE(sigs.i_i(), code);
+ static byte code[] = {kExprI32Const, 11, kExprBrTable, 0, BR_TARGET(0)};
+ EXPECT_VERIFIES_C(v_v, code);
+ EXPECT_FAILURE_C(i_i, code);
}
TEST_F(AstDecoderTest, BrTable0c) {
- static byte code[] = {kExprNop, kExprI32Const, 11, kExprBrTable, 0, 1, 0, 0};
- EXPECT_FAILURE(sigs.v_v(), code);
- EXPECT_FAILURE(sigs.i_i(), code);
+ static byte code[] = {kExprI32Const, 11, kExprBrTable, 0, BR_TARGET(1)};
+ EXPECT_FAILURE_C(v_v, code);
+ EXPECT_FAILURE_C(i_i, code);
}
TEST_F(AstDecoderTest, BrTable1a) {
static byte code[] = {B1(WASM_BR_TABLE(WASM_I8(67), 0, BR_TARGET(0)))};
- EXPECT_VERIFIES(sigs.v_v(), code);
+ EXPECT_VERIFIES_C(v_v, code);
}
TEST_F(AstDecoderTest, BrTable1b) {
static byte code[] = {B1(WASM_BR_TABLE(WASM_ZERO, 0, BR_TARGET(0)))};
- EXPECT_VERIFIES(sigs.v_v(), code);
- EXPECT_FAILURE(sigs.i_i(), code);
- EXPECT_FAILURE(sigs.f_ff(), code);
- EXPECT_FAILURE(sigs.d_dd(), code);
+ EXPECT_VERIFIES_C(v_v, code);
+ EXPECT_FAILURE_C(i_i, code);
+ EXPECT_FAILURE_C(f_ff, code);
+ EXPECT_FAILURE_C(d_dd, code);
}
TEST_F(AstDecoderTest, BrTable2a) {
static byte code[] = {
B1(WASM_BR_TABLE(WASM_I8(67), 1, BR_TARGET(0), BR_TARGET(0)))};
- EXPECT_VERIFIES(sigs.v_v(), code);
+ EXPECT_VERIFIES_C(v_v, code);
}
TEST_F(AstDecoderTest, BrTable2b) {
static byte code[] = {WASM_BLOCK(
WASM_BLOCK(WASM_BR_TABLE(WASM_I8(67), 1, BR_TARGET(0), BR_TARGET(1))))};
- EXPECT_VERIFIES(sigs.v_v(), code);
+ EXPECT_VERIFIES_C(v_v, code);
}
TEST_F(AstDecoderTest, BrTable_off_end) {
@@ -1730,63 +2042,74 @@ TEST_F(AstDecoderTest, BrTable_off_end) {
TEST_F(AstDecoderTest, BrTable_invalid_br1) {
for (int depth = 0; depth < 4; depth++) {
byte code[] = {B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(depth)))};
- if (depth == 0) {
- EXPECT_VERIFIES(sigs.v_i(), code);
+ if (depth <= 1) {
+ EXPECT_VERIFIES_C(v_i, code);
} else {
- EXPECT_FAILURE(sigs.v_i(), code);
+ EXPECT_FAILURE_C(v_i, code);
}
}
}
TEST_F(AstDecoderTest, BrTable_invalid_br2) {
- for (int depth = 0; depth < 4; depth++) {
+ for (int depth = 0; depth < 7; depth++) {
byte code[] = {
WASM_LOOP(WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(depth)))};
- if (depth <= 1) {
- EXPECT_VERIFIES(sigs.v_i(), code);
+ if (depth < 2) {
+ EXPECT_VERIFIES_C(v_i, code);
} else {
- EXPECT_FAILURE(sigs.v_i(), code);
+ EXPECT_FAILURE_C(v_i, code);
}
}
}
-TEST_F(AstDecoderTest, ExprBreakNesting1) {
- EXPECT_VERIFIES_INLINE(sigs.v_v(), B1(WASM_BRV(0, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), B1(WASM_BR(0)));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), B1(WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), B1(WASM_BR_IF(0, WASM_ZERO)));
+TEST_F(AstDecoderTest, Brv1) {
+ EXPECT_VERIFIES(i_i, WASM_BLOCK_I(WASM_BRV(0, WASM_ZERO)));
+ EXPECT_VERIFIES(i_i, WASM_BLOCK_I(WASM_LOOP(WASM_BRV(2, WASM_ZERO))));
+}
+
+TEST_F(AstDecoderTest, Brv1_type) {
+ EXPECT_VERIFIES(i_ii, WASM_BLOCK_I(WASM_BRV(0, WASM_GET_LOCAL(0))));
+ EXPECT_VERIFIES(l_ll, WASM_BLOCK_L(WASM_BRV(0, WASM_GET_LOCAL(0))));
+ EXPECT_VERIFIES(f_ff, WASM_BLOCK_F(WASM_BRV(0, WASM_GET_LOCAL(0))));
+ EXPECT_VERIFIES(d_dd, WASM_BLOCK_D(WASM_BRV(0, WASM_GET_LOCAL(0))));
+}
+
+TEST_F(AstDecoderTest, Brv1_type_n) {
+ EXPECT_FAILURE(i_f, WASM_BLOCK_I(WASM_BRV(0, WASM_GET_LOCAL(0))));
+ EXPECT_FAILURE(i_d, WASM_BLOCK_I(WASM_BRV(0, WASM_GET_LOCAL(0))));
+}
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BRV(0, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BR(0)));
- EXPECT_VERIFIES_INLINE(sigs.v_v(),
- WASM_LOOP(WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BR_IF(0, WASM_ZERO)));
+TEST_F(AstDecoderTest, BrvIf1) {
+ EXPECT_VERIFIES(i_v, WASM_BLOCK_I(WASM_BRV_IF_ZERO(0, WASM_ZERO)));
+}
+
+TEST_F(AstDecoderTest, BrvIf1_type) {
+ EXPECT_VERIFIES(i_i, WASM_BLOCK_I(WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0))));
+ EXPECT_VERIFIES(l_l, WASM_BLOCK_L(WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0))));
+ EXPECT_VERIFIES(f_ff, WASM_BLOCK_F(WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0))));
+ EXPECT_VERIFIES(d_dd, WASM_BLOCK_D(WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0))));
+}
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BRV(1, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BR(1)));
+TEST_F(AstDecoderTest, BrvIf1_type_n) {
+ EXPECT_FAILURE(i_f, WASM_BLOCK_I(WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0))));
+ EXPECT_FAILURE(i_d, WASM_BLOCK_I(WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0))));
}
TEST_F(AstDecoderTest, Select) {
- EXPECT_VERIFIES_INLINE(
- sigs.i_i(), WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_ZERO));
- EXPECT_VERIFIES_INLINE(sigs.f_ff(),
- WASM_SELECT(WASM_F32(0.0), WASM_F32(0.0), WASM_ZERO));
- EXPECT_VERIFIES_INLINE(sigs.d_dd(),
- WASM_SELECT(WASM_F64(0.0), WASM_F64(0.0), WASM_ZERO));
- EXPECT_VERIFIES_INLINE(
- sigs.l_l(), WASM_SELECT(WASM_I64V_1(0), WASM_I64V_1(0), WASM_ZERO));
+ EXPECT_VERIFIES(i_i,
+ WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_ZERO));
+ EXPECT_VERIFIES(f_ff, WASM_SELECT(WASM_F32(0.0), WASM_F32(0.0), WASM_ZERO));
+ EXPECT_VERIFIES(d_dd, WASM_SELECT(WASM_F64(0.0), WASM_F64(0.0), WASM_ZERO));
+ EXPECT_VERIFIES(l_l, WASM_SELECT(WASM_I64V_1(0), WASM_I64V_1(0), WASM_ZERO));
}
TEST_F(AstDecoderTest, Select_fail1) {
- EXPECT_FAILURE_INLINE(
- sigs.i_i(),
- WASM_SELECT(WASM_F32(0.0), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
- EXPECT_FAILURE_INLINE(
- sigs.i_i(),
- WASM_SELECT(WASM_GET_LOCAL(0), WASM_F32(0.0), WASM_GET_LOCAL(0)));
- EXPECT_FAILURE_INLINE(
- sigs.i_i(),
- WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_F32(0.0)));
+ EXPECT_FAILURE(
+ i_i, WASM_SELECT(WASM_F32(0.0), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE(
+ i_i, WASM_SELECT(WASM_GET_LOCAL(0), WASM_F32(0.0), WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE(
+ i_i, WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_F32(0.0)));
}
TEST_F(AstDecoderTest, Select_fail2) {
@@ -1797,120 +2120,159 @@ TEST_F(AstDecoderTest, Select_fail2) {
LocalType types[] = {type, kAstI32, type};
FunctionSig sig(1, 2, types);
- EXPECT_VERIFIES_INLINE(
- &sig,
- WASM_SELECT(WASM_GET_LOCAL(1), WASM_GET_LOCAL(1), WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_S(&sig, WASM_SELECT(WASM_GET_LOCAL(1), WASM_GET_LOCAL(1),
+ WASM_GET_LOCAL(0)));
- EXPECT_FAILURE_INLINE(
- &sig,
- WASM_SELECT(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_S(&sig, WASM_SELECT(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(0)));
- EXPECT_FAILURE_INLINE(
- &sig,
- WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1), WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_S(&sig, WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_GET_LOCAL(0)));
- EXPECT_FAILURE_INLINE(
- &sig,
- WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ EXPECT_FAILURE_S(&sig, WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1)));
}
}
TEST_F(AstDecoderTest, Select_TypeCheck) {
- EXPECT_FAILURE_INLINE(
- sigs.i_i(),
- WASM_SELECT(WASM_F32(9.9), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE(
+ i_i, WASM_SELECT(WASM_F32(9.9), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
- EXPECT_FAILURE_INLINE(
- sigs.i_i(),
- WASM_SELECT(WASM_GET_LOCAL(0), WASM_F64(0.25), WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE(
+ i_i, WASM_SELECT(WASM_GET_LOCAL(0), WASM_F64(0.25), WASM_GET_LOCAL(0)));
- EXPECT_FAILURE_INLINE(
- sigs.i_i(),
- WASM_SELECT(WASM_F32(9.9), WASM_GET_LOCAL(0), WASM_I64V_1(0)));
+ EXPECT_FAILURE(i_i,
+ WASM_SELECT(WASM_F32(9.9), WASM_GET_LOCAL(0), WASM_I64V_1(0)));
}
TEST_F(AstDecoderTest, Throw) {
FLAG_wasm_eh_prototype = true;
- EXPECT_VERIFIES_INLINE(sigs.v_i(), WASM_GET_LOCAL(0), kExprThrow);
+ EXPECT_VERIFIES(v_i, WASM_GET_LOCAL(0), kExprThrow);
- EXPECT_FAILURE_INLINE(sigs.i_d(), WASM_GET_LOCAL(0), kExprThrow,
- WASM_I32V(0));
- EXPECT_FAILURE_INLINE(sigs.i_f(), WASM_GET_LOCAL(0), kExprThrow,
- WASM_I32V(0));
- EXPECT_FAILURE_INLINE(sigs.l_l(), WASM_GET_LOCAL(0), kExprThrow,
- WASM_I64V(0));
+ EXPECT_FAILURE(i_d, WASM_GET_LOCAL(0), kExprThrow, WASM_I32V(0));
+ EXPECT_FAILURE(i_f, WASM_GET_LOCAL(0), kExprThrow, WASM_I32V(0));
+ EXPECT_FAILURE(l_l, WASM_GET_LOCAL(0), kExprThrow, WASM_I64V(0));
}
+#define WASM_TRY_OP kExprTry, kLocalVoid
+
#define WASM_CATCH(local) kExprCatch, static_cast<byte>(local)
+
TEST_F(AstDecoderTest, TryCatch) {
FLAG_wasm_eh_prototype = true;
- EXPECT_VERIFIES_INLINE(sigs.v_i(), kExprTryCatch, WASM_CATCH(0), kExprEnd);
+ EXPECT_VERIFIES(v_i, WASM_TRY_OP, WASM_CATCH(0), kExprEnd);
// Missing catch.
- EXPECT_FAILURE_INLINE(sigs.v_v(), kExprTryCatch, kExprEnd);
+ EXPECT_FAILURE(v_v, WASM_TRY_OP, kExprEnd);
// Missing end.
- EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatch, WASM_CATCH(0));
+ EXPECT_FAILURE(v_i, WASM_TRY_OP, WASM_CATCH(0));
// Double catch.
- EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatch, WASM_CATCH(0), WASM_CATCH(0),
- kExprEnd);
-
- // Unexpected finally.
- EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatch, WASM_CATCH(0), kExprFinally,
- kExprEnd);
-}
-
-TEST_F(AstDecoderTest, TryFinally) {
- FLAG_wasm_eh_prototype = true;
- EXPECT_VERIFIES_INLINE(sigs.v_v(), kExprTryFinally, kExprFinally, kExprEnd);
+ EXPECT_FAILURE(v_i, WASM_TRY_OP, WASM_CATCH(0), WASM_CATCH(0), kExprEnd);
+}
+
+TEST_F(AstDecoderTest, MultiValBlock1) {
+ FLAG_wasm_mv_prototype = true;
+ EXPECT_VERIFIES(i_ii, WASM_BLOCK_TT(kAstI32, kAstI32, WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1)),
+ kExprI32Add);
+}
+
+TEST_F(AstDecoderTest, MultiValBlock2) {
+ FLAG_wasm_mv_prototype = true;
+ EXPECT_VERIFIES(i_ii, WASM_BLOCK_TT(kAstI32, kAstI32, WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1)),
+ WASM_I32_ADD(WASM_NOP, WASM_NOP));
+}
+
+TEST_F(AstDecoderTest, MultiValBlockBr1) {
+ FLAG_wasm_mv_prototype = true;
+ EXPECT_FAILURE(i_ii,
+ WASM_BLOCK_TT(kAstI32, kAstI32, WASM_GET_LOCAL(0), WASM_BR(0)),
+ kExprI32Add);
+ EXPECT_VERIFIES(i_ii, WASM_BLOCK_TT(kAstI32, kAstI32, WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1), WASM_BR(0)),
+ kExprI32Add);
+}
+
+TEST_F(AstDecoderTest, MultiValIf1) {
+ FLAG_wasm_mv_prototype = true;
+ EXPECT_FAILURE(
+ i_ii, WASM_IF_ELSE_TT(kAstI32, kAstI32, WASM_GET_LOCAL(0),
+ WASM_SEQ(WASM_GET_LOCAL(0)),
+ WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0))),
+ kExprI32Add);
+ EXPECT_FAILURE(i_ii,
+ WASM_IF_ELSE_TT(kAstI32, kAstI32, WASM_GET_LOCAL(0),
+ WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
+ WASM_SEQ(WASM_GET_LOCAL(1))),
+ kExprI32Add);
+ EXPECT_VERIFIES(
+ i_ii, WASM_IF_ELSE_TT(kAstI32, kAstI32, WASM_GET_LOCAL(0),
+ WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
+ WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0))),
+ kExprI32Add);
+}
+
+class BranchTableIteratorTest : public TestWithZone {
+ public:
+ BranchTableIteratorTest() : TestWithZone() {}
+ void CheckBrTableSize(const byte* start, const byte* end) {
+ Decoder decoder(start, end);
+ BranchTableOperand operand(&decoder, start);
+ BranchTableIterator iterator(&decoder, operand);
+ EXPECT_EQ(end - start - 1, iterator.length());
+ EXPECT_TRUE(decoder.ok());
+ }
+ void CheckBrTableError(const byte* start, const byte* end) {
+ Decoder decoder(start, end);
+ BranchTableOperand operand(&decoder, start);
+ BranchTableIterator iterator(&decoder, operand);
+ iterator.length();
+ EXPECT_FALSE(decoder.ok());
+ }
+};
- // Mising finally.
- EXPECT_FAILURE_INLINE(sigs.v_v(), kExprTryFinally, kExprEnd);
+#define CHECK_BR_TABLE_LENGTH(...) \
+ { \
+ static byte code[] = {kExprBrTable, __VA_ARGS__}; \
+ CheckBrTableSize(code, code + sizeof(code)); \
+ }
- // Missing end.
- EXPECT_FAILURE_INLINE(sigs.v_v(), kExprTryFinally, kExprFinally);
+#define CHECK_BR_TABLE_ERROR(...) \
+ { \
+ static byte code[] = {kExprBrTable, __VA_ARGS__}; \
+ CheckBrTableError(code, code + sizeof(code)); \
+ }
- // Double finally.
- EXPECT_FAILURE_INLINE(sigs.v_v(), kExprTryFinally, kExprFinally, kExprFinally,
- kExprEnd);
+TEST_F(BranchTableIteratorTest, count0) {
+ CHECK_BR_TABLE_LENGTH(0, U32V_1(1));
+ CHECK_BR_TABLE_LENGTH(0, U32V_2(200));
+ CHECK_BR_TABLE_LENGTH(0, U32V_3(30000));
+ CHECK_BR_TABLE_LENGTH(0, U32V_4(400000));
- // Unexpected catch.
- EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatch, WASM_CATCH(0), kExprFinally,
- kExprEnd);
+ CHECK_BR_TABLE_LENGTH(0, U32V_1(2));
+ CHECK_BR_TABLE_LENGTH(0, U32V_2(300));
+ CHECK_BR_TABLE_LENGTH(0, U32V_3(40000));
+ CHECK_BR_TABLE_LENGTH(0, U32V_4(500000));
}
-TEST_F(AstDecoderTest, TryCatchFinally) {
- FLAG_wasm_eh_prototype = true;
- EXPECT_VERIFIES_INLINE(sigs.v_i(), kExprTryCatchFinally, WASM_CATCH(0),
- kExprFinally, kExprEnd);
-
- // Missing catch.
- EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatchFinally, kExprFinally,
- kExprEnd);
-
- // Double catch.
- EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatchFinally, WASM_CATCH(0),
- WASM_CATCH(0), kExprFinally, kExprEnd);
-
- // Missing finally.
- EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatchFinally, WASM_CATCH(0),
- kExprEnd);
+TEST_F(BranchTableIteratorTest, count1) {
+ CHECK_BR_TABLE_LENGTH(1, U32V_1(1), U32V_1(6));
+ CHECK_BR_TABLE_LENGTH(1, U32V_2(200), U32V_1(8));
+ CHECK_BR_TABLE_LENGTH(1, U32V_3(30000), U32V_1(9));
+ CHECK_BR_TABLE_LENGTH(1, U32V_4(400000), U32V_1(11));
- // Double finally.
- EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatchFinally, WASM_CATCH(0),
- kExprFinally, kExprFinally, kExprEnd);
-
- // Finally before catch.
- EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatchFinally, kExprFinally,
- WASM_CATCH(0), kExprEnd);
-
- // Missing both try and finally.
- EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatchFinally, kExprEnd);
+ CHECK_BR_TABLE_LENGTH(1, U32V_1(2), U32V_2(6));
+ CHECK_BR_TABLE_LENGTH(1, U32V_2(300), U32V_2(7));
+ CHECK_BR_TABLE_LENGTH(1, U32V_3(40000), U32V_2(8));
+ CHECK_BR_TABLE_LENGTH(1, U32V_4(500000), U32V_2(9));
+}
- // Missing end.
- EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatchFinally, WASM_CATCH(0),
- kExprFinally);
+TEST_F(BranchTableIteratorTest, error0) {
+ CHECK_BR_TABLE_ERROR(0);
+ CHECK_BR_TABLE_ERROR(1, U32V_1(33));
}
class WasmOpcodeLengthTest : public TestWithZone {
@@ -1932,20 +2294,17 @@ class WasmOpcodeLengthTest : public TestWithZone {
TEST_F(WasmOpcodeLengthTest, Statements) {
EXPECT_LENGTH(1, kExprNop);
- EXPECT_LENGTH(1, kExprBlock);
- EXPECT_LENGTH(1, kExprLoop);
- EXPECT_LENGTH(1, kExprIf);
+ EXPECT_LENGTH(2, kExprBlock);
+ EXPECT_LENGTH(2, kExprLoop);
+ EXPECT_LENGTH(2, kExprIf);
EXPECT_LENGTH(1, kExprElse);
EXPECT_LENGTH(1, kExprEnd);
EXPECT_LENGTH(1, kExprSelect);
- EXPECT_LENGTH(3, kExprBr);
- EXPECT_LENGTH(3, kExprBrIf);
+ EXPECT_LENGTH(2, kExprBr);
+ EXPECT_LENGTH(2, kExprBrIf);
EXPECT_LENGTH(1, kExprThrow);
- EXPECT_LENGTH(1, kExprTryCatch);
- EXPECT_LENGTH(1, kExprTryFinally);
- EXPECT_LENGTH(1, kExprTryCatchFinally);
+ EXPECT_LENGTH(2, kExprTry);
EXPECT_LENGTH(2, kExprCatch);
- EXPECT_LENGTH(1, kExprFinally);
}
TEST_F(WasmOpcodeLengthTest, MiscExpressions) {
@@ -1956,14 +2315,8 @@ TEST_F(WasmOpcodeLengthTest, MiscExpressions) {
EXPECT_LENGTH(2, kExprSetLocal);
EXPECT_LENGTH(2, kExprGetGlobal);
EXPECT_LENGTH(2, kExprSetGlobal);
- EXPECT_LENGTH(3, kExprCallFunction);
- EXPECT_LENGTH(3, kExprCallImport);
- EXPECT_LENGTH(3, kExprCallIndirect);
- EXPECT_LENGTH(1, kExprIf);
- EXPECT_LENGTH(1, kExprBlock);
- EXPECT_LENGTH(1, kExprLoop);
- EXPECT_LENGTH(3, kExprBr);
- EXPECT_LENGTH(3, kExprBrIf);
+ EXPECT_LENGTH(2, kExprCallFunction);
+ EXPECT_LENGTH(2, kExprCallIndirect);
}
TEST_F(WasmOpcodeLengthTest, I32Const) {
@@ -2147,256 +2500,11 @@ TEST_F(WasmOpcodeLengthTest, SimpleExpressions) {
EXPECT_LENGTH(1, kExprI64ReinterpretF64);
}
-class WasmOpcodeArityTest : public TestWithZone {
- public:
- WasmOpcodeArityTest() : TestWithZone() {}
-};
-
-#define EXPECT_ARITY(expected, ...) \
- { \
- static const byte code[] = {__VA_ARGS__}; \
- EXPECT_EQ(expected, OpcodeArity(code, code + sizeof(code))); \
- }
-
-TEST_F(WasmOpcodeArityTest, Control) {
- EXPECT_ARITY(0, kExprNop);
-
- EXPECT_ARITY(0, kExprBlock, 0);
- EXPECT_ARITY(0, kExprBlock, 1);
- EXPECT_ARITY(0, kExprBlock, 2);
- EXPECT_ARITY(0, kExprBlock, 5);
- EXPECT_ARITY(0, kExprBlock, 10);
-
- EXPECT_ARITY(0, kExprLoop, 0);
- EXPECT_ARITY(0, kExprLoop, 1);
- EXPECT_ARITY(0, kExprLoop, 2);
- EXPECT_ARITY(0, kExprLoop, 7);
- EXPECT_ARITY(0, kExprLoop, 11);
-
- EXPECT_ARITY(3, kExprSelect);
-
- EXPECT_ARITY(0, kExprBr);
- EXPECT_ARITY(1, kExprBrIf);
- EXPECT_ARITY(1, kExprBrTable);
-
- EXPECT_ARITY(1, kExprBr, ARITY_1);
- EXPECT_ARITY(2, kExprBrIf, ARITY_1);
- EXPECT_ARITY(2, kExprBrTable, ARITY_1);
-
- {
- EXPECT_ARITY(0, kExprReturn, ARITY_0);
- EXPECT_ARITY(1, kExprReturn, ARITY_1);
- }
-
- EXPECT_ARITY(0, kExprThrow);
- EXPECT_ARITY(0, kExprTryCatch);
- EXPECT_ARITY(0, kExprTryFinally);
- EXPECT_ARITY(0, kExprTryCatchFinally);
- EXPECT_ARITY(1, kExprCatch, 2);
- EXPECT_ARITY(0, kExprFinally);
-}
-
-TEST_F(WasmOpcodeArityTest, Misc) {
- EXPECT_ARITY(0, kExprI8Const);
- EXPECT_ARITY(0, kExprI32Const);
- EXPECT_ARITY(0, kExprF32Const);
- EXPECT_ARITY(0, kExprI64Const);
- EXPECT_ARITY(0, kExprF64Const);
- EXPECT_ARITY(0, kExprGetLocal);
- EXPECT_ARITY(1, kExprSetLocal);
- EXPECT_ARITY(0, kExprGetGlobal);
- EXPECT_ARITY(1, kExprSetGlobal);
-}
-
-TEST_F(WasmOpcodeArityTest, Calls) {
- {
- EXPECT_ARITY(2, kExprCallFunction, 2, 0);
- EXPECT_ARITY(2, kExprCallImport, 2, 0);
- EXPECT_ARITY(3, kExprCallIndirect, 2, 0);
-
- EXPECT_ARITY(1, kExprBr, ARITY_1);
- EXPECT_ARITY(2, kExprBrIf, ARITY_1);
- EXPECT_ARITY(2, kExprBrTable, ARITY_1);
-
- EXPECT_ARITY(0, kExprBr, ARITY_0);
- EXPECT_ARITY(1, kExprBrIf, ARITY_0);
- EXPECT_ARITY(1, kExprBrTable, ARITY_0);
- }
-
- {
- EXPECT_ARITY(1, kExprCallFunction, ARITY_1, 1);
- EXPECT_ARITY(1, kExprCallImport, ARITY_1, 1);
- EXPECT_ARITY(2, kExprCallIndirect, ARITY_1, 1);
-
- EXPECT_ARITY(1, kExprBr, ARITY_1);
- EXPECT_ARITY(2, kExprBrIf, ARITY_1);
- EXPECT_ARITY(2, kExprBrTable, ARITY_1);
-
- EXPECT_ARITY(0, kExprBr, ARITY_0);
- EXPECT_ARITY(1, kExprBrIf, ARITY_0);
- EXPECT_ARITY(1, kExprBrTable, ARITY_0);
- }
-}
-
-TEST_F(WasmOpcodeArityTest, LoadsAndStores) {
- EXPECT_ARITY(1, kExprI32LoadMem8S);
- EXPECT_ARITY(1, kExprI32LoadMem8U);
- EXPECT_ARITY(1, kExprI32LoadMem16S);
- EXPECT_ARITY(1, kExprI32LoadMem16U);
- EXPECT_ARITY(1, kExprI32LoadMem);
-
- EXPECT_ARITY(1, kExprI64LoadMem8S);
- EXPECT_ARITY(1, kExprI64LoadMem8U);
- EXPECT_ARITY(1, kExprI64LoadMem16S);
- EXPECT_ARITY(1, kExprI64LoadMem16U);
- EXPECT_ARITY(1, kExprI64LoadMem32S);
- EXPECT_ARITY(1, kExprI64LoadMem32U);
- EXPECT_ARITY(1, kExprI64LoadMem);
- EXPECT_ARITY(1, kExprF32LoadMem);
- EXPECT_ARITY(1, kExprF64LoadMem);
-
- EXPECT_ARITY(2, kExprI32StoreMem8);
- EXPECT_ARITY(2, kExprI32StoreMem16);
- EXPECT_ARITY(2, kExprI32StoreMem);
- EXPECT_ARITY(2, kExprI64StoreMem8);
- EXPECT_ARITY(2, kExprI64StoreMem16);
- EXPECT_ARITY(2, kExprI64StoreMem32);
- EXPECT_ARITY(2, kExprI64StoreMem);
- EXPECT_ARITY(2, kExprF32StoreMem);
- EXPECT_ARITY(2, kExprF64StoreMem);
-}
-
-TEST_F(WasmOpcodeArityTest, MiscMemExpressions) {
- EXPECT_ARITY(0, kExprMemorySize);
- EXPECT_ARITY(1, kExprGrowMemory);
-}
-
-TEST_F(WasmOpcodeArityTest, SimpleExpressions) {
- EXPECT_ARITY(2, kExprI32Add);
- EXPECT_ARITY(2, kExprI32Sub);
- EXPECT_ARITY(2, kExprI32Mul);
- EXPECT_ARITY(2, kExprI32DivS);
- EXPECT_ARITY(2, kExprI32DivU);
- EXPECT_ARITY(2, kExprI32RemS);
- EXPECT_ARITY(2, kExprI32RemU);
- EXPECT_ARITY(2, kExprI32And);
- EXPECT_ARITY(2, kExprI32Ior);
- EXPECT_ARITY(2, kExprI32Xor);
- EXPECT_ARITY(2, kExprI32Shl);
- EXPECT_ARITY(2, kExprI32ShrU);
- EXPECT_ARITY(2, kExprI32ShrS);
- EXPECT_ARITY(2, kExprI32Eq);
- EXPECT_ARITY(2, kExprI32Ne);
- EXPECT_ARITY(2, kExprI32LtS);
- EXPECT_ARITY(2, kExprI32LeS);
- EXPECT_ARITY(2, kExprI32LtU);
- EXPECT_ARITY(2, kExprI32LeU);
- EXPECT_ARITY(2, kExprI32GtS);
- EXPECT_ARITY(2, kExprI32GeS);
- EXPECT_ARITY(2, kExprI32GtU);
- EXPECT_ARITY(2, kExprI32GeU);
- EXPECT_ARITY(1, kExprI32Clz);
- EXPECT_ARITY(1, kExprI32Ctz);
- EXPECT_ARITY(1, kExprI32Popcnt);
- EXPECT_ARITY(1, kExprI32Eqz);
- EXPECT_ARITY(2, kExprI64Add);
- EXPECT_ARITY(2, kExprI64Sub);
- EXPECT_ARITY(2, kExprI64Mul);
- EXPECT_ARITY(2, kExprI64DivS);
- EXPECT_ARITY(2, kExprI64DivU);
- EXPECT_ARITY(2, kExprI64RemS);
- EXPECT_ARITY(2, kExprI64RemU);
- EXPECT_ARITY(2, kExprI64And);
- EXPECT_ARITY(2, kExprI64Ior);
- EXPECT_ARITY(2, kExprI64Xor);
- EXPECT_ARITY(2, kExprI64Shl);
- EXPECT_ARITY(2, kExprI64ShrU);
- EXPECT_ARITY(2, kExprI64ShrS);
- EXPECT_ARITY(2, kExprI64Eq);
- EXPECT_ARITY(2, kExprI64Ne);
- EXPECT_ARITY(2, kExprI64LtS);
- EXPECT_ARITY(2, kExprI64LeS);
- EXPECT_ARITY(2, kExprI64LtU);
- EXPECT_ARITY(2, kExprI64LeU);
- EXPECT_ARITY(2, kExprI64GtS);
- EXPECT_ARITY(2, kExprI64GeS);
- EXPECT_ARITY(2, kExprI64GtU);
- EXPECT_ARITY(2, kExprI64GeU);
- EXPECT_ARITY(1, kExprI64Clz);
- EXPECT_ARITY(1, kExprI64Ctz);
- EXPECT_ARITY(1, kExprI64Popcnt);
- EXPECT_ARITY(2, kExprF32Add);
- EXPECT_ARITY(2, kExprF32Sub);
- EXPECT_ARITY(2, kExprF32Mul);
- EXPECT_ARITY(2, kExprF32Div);
- EXPECT_ARITY(2, kExprF32Min);
- EXPECT_ARITY(2, kExprF32Max);
- EXPECT_ARITY(1, kExprF32Abs);
- EXPECT_ARITY(1, kExprF32Neg);
- EXPECT_ARITY(2, kExprF32CopySign);
- EXPECT_ARITY(1, kExprF32Ceil);
- EXPECT_ARITY(1, kExprF32Floor);
- EXPECT_ARITY(1, kExprF32Trunc);
- EXPECT_ARITY(1, kExprF32NearestInt);
- EXPECT_ARITY(1, kExprF32Sqrt);
- EXPECT_ARITY(2, kExprF32Eq);
- EXPECT_ARITY(2, kExprF32Ne);
- EXPECT_ARITY(2, kExprF32Lt);
- EXPECT_ARITY(2, kExprF32Le);
- EXPECT_ARITY(2, kExprF32Gt);
- EXPECT_ARITY(2, kExprF32Ge);
- EXPECT_ARITY(2, kExprF64Add);
- EXPECT_ARITY(2, kExprF64Sub);
- EXPECT_ARITY(2, kExprF64Mul);
- EXPECT_ARITY(2, kExprF64Div);
- EXPECT_ARITY(2, kExprF64Min);
- EXPECT_ARITY(2, kExprF64Max);
- EXPECT_ARITY(1, kExprF64Abs);
- EXPECT_ARITY(1, kExprF64Neg);
- EXPECT_ARITY(2, kExprF64CopySign);
- EXPECT_ARITY(1, kExprF64Ceil);
- EXPECT_ARITY(1, kExprF64Floor);
- EXPECT_ARITY(1, kExprF64Trunc);
- EXPECT_ARITY(1, kExprF64NearestInt);
- EXPECT_ARITY(1, kExprF64Sqrt);
- EXPECT_ARITY(2, kExprF64Eq);
- EXPECT_ARITY(2, kExprF64Ne);
- EXPECT_ARITY(2, kExprF64Lt);
- EXPECT_ARITY(2, kExprF64Le);
- EXPECT_ARITY(2, kExprF64Gt);
- EXPECT_ARITY(2, kExprF64Ge);
- EXPECT_ARITY(1, kExprI32SConvertF32);
- EXPECT_ARITY(1, kExprI32SConvertF64);
- EXPECT_ARITY(1, kExprI32UConvertF32);
- EXPECT_ARITY(1, kExprI32UConvertF64);
- EXPECT_ARITY(1, kExprI32ConvertI64);
- EXPECT_ARITY(1, kExprI64SConvertF32);
- EXPECT_ARITY(1, kExprI64SConvertF64);
- EXPECT_ARITY(1, kExprI64UConvertF32);
- EXPECT_ARITY(1, kExprI64UConvertF64);
- EXPECT_ARITY(1, kExprI64SConvertI32);
- EXPECT_ARITY(1, kExprI64UConvertI32);
- EXPECT_ARITY(1, kExprF32SConvertI32);
- EXPECT_ARITY(1, kExprF32UConvertI32);
- EXPECT_ARITY(1, kExprF32SConvertI64);
- EXPECT_ARITY(1, kExprF32UConvertI64);
- EXPECT_ARITY(1, kExprF32ConvertF64);
- EXPECT_ARITY(1, kExprF32ReinterpretI32);
- EXPECT_ARITY(1, kExprF64SConvertI32);
- EXPECT_ARITY(1, kExprF64UConvertI32);
- EXPECT_ARITY(1, kExprF64SConvertI64);
- EXPECT_ARITY(1, kExprF64UConvertI64);
- EXPECT_ARITY(1, kExprF64ConvertF32);
- EXPECT_ARITY(1, kExprF64ReinterpretI64);
- EXPECT_ARITY(1, kExprI32ReinterpretF32);
- EXPECT_ARITY(1, kExprI64ReinterpretF64);
-}
-
typedef ZoneVector<LocalType> LocalTypeMap;
class LocalDeclDecoderTest : public TestWithZone {
public:
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
size_t ExpectRun(LocalTypeMap map, size_t pos, LocalType expected,
size_t count) {
diff --git a/deps/v8/test/unittests/wasm/control-transfer-unittest.cc b/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
index 2b67f12ef5..ab2d937758 100644
--- a/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
+++ b/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
@@ -24,30 +24,28 @@ namespace wasm {
#define B2(a, b) kExprBlock, a, b, kExprEnd
#define B3(a, b, c) kExprBlock, a, b, c, kExprEnd
-struct ExpectedTarget {
+#define TRANSFER_VOID 0
+#define TRANSFER_ONE 1
+
+struct ExpectedPcDelta {
pc_t pc;
- ControlTransfer expected;
+ pcdiff_t expected;
};
// For nicer error messages.
-class ControlTransferMatcher : public MatcherInterface<const ControlTransfer&> {
+class ControlTransferMatcher : public MatcherInterface<const pcdiff_t&> {
public:
- explicit ControlTransferMatcher(pc_t pc, const ControlTransfer& expected)
+ explicit ControlTransferMatcher(pc_t pc, const pcdiff_t& expected)
: pc_(pc), expected_(expected) {}
void DescribeTo(std::ostream* os) const override {
- *os << "@" << pc_ << " {pcdiff = " << expected_.pcdiff
- << ", spdiff = " << expected_.spdiff
- << ", action = " << expected_.action << "}";
+ *os << "@" << pc_ << " pcdiff = " << expected_;
}
- bool MatchAndExplain(const ControlTransfer& input,
+ bool MatchAndExplain(const pcdiff_t& input,
MatchResultListener* listener) const override {
- if (input.pcdiff != expected_.pcdiff || input.spdiff != expected_.spdiff ||
- input.action != expected_.action) {
- *listener << "@" << pc_ << " {pcdiff = " << input.pcdiff
- << ", spdiff = " << input.spdiff
- << ", action = " << input.action << "}";
+ if (input != expected_) {
+ *listener << "@" << pc_ << " pcdiff = " << input;
return false;
}
return true;
@@ -55,36 +53,43 @@ class ControlTransferMatcher : public MatcherInterface<const ControlTransfer&> {
private:
pc_t pc_;
- const ControlTransfer& expected_;
+ const pcdiff_t& expected_;
};
class ControlTransferTest : public TestWithZone {
public:
- void CheckControlTransfers(const byte* start, const byte* end,
- ExpectedTarget* expected_targets,
- size_t num_targets) {
+ void CheckPcDeltas(const byte* start, const byte* end,
+ ExpectedPcDelta* expected_deltas, size_t num_targets) {
ControlTransferMap map =
WasmInterpreter::ComputeControlTransfersForTesting(zone(), start, end);
// Check all control targets in the map.
for (size_t i = 0; i < num_targets; i++) {
- pc_t pc = expected_targets[i].pc;
+ pc_t pc = expected_deltas[i].pc;
auto it = map.find(pc);
if (it == map.end()) {
- printf("expected control target @ +%zu\n", pc);
- EXPECT_TRUE(false);
+ EXPECT_TRUE(false) << "expected control target @ " << pc;
} else {
- ControlTransfer& expected = expected_targets[i].expected;
- ControlTransfer& target = it->second;
+ pcdiff_t expected = expected_deltas[i].expected;
+ pcdiff_t& target = it->second;
EXPECT_THAT(target,
MakeMatcher(new ControlTransferMatcher(pc, expected)));
}
}
// Check there are no other control targets.
+ CheckNoOtherTargets<ExpectedPcDelta>(start, end, map, expected_deltas,
+ num_targets);
+ }
+
+ template <typename T>
+ void CheckNoOtherTargets(const byte* start, const byte* end,
+ ControlTransferMap& map, T* targets,
+ size_t num_targets) {
+ // Check there are no other control targets.
for (pc_t pc = 0; start + pc < end; pc++) {
bool found = false;
for (size_t i = 0; i < num_targets; i++) {
- if (expected_targets[i].pc == pc) {
+ if (targets[i].pc == pc) {
found = true;
break;
}
@@ -98,125 +103,128 @@ class ControlTransferTest : public TestWithZone {
}
};
-// Macro for simplifying tests below.
-#define EXPECT_TARGETS(...) \
- do { \
- ExpectedTarget pairs[] = {__VA_ARGS__}; \
- CheckControlTransfers(code, code + sizeof(code), pairs, arraysize(pairs)); \
+#define EXPECT_PC_DELTAS(...) \
+ do { \
+ ExpectedPcDelta pairs[] = {__VA_ARGS__}; \
+ CheckPcDeltas(code, code + sizeof(code), pairs, arraysize(pairs)); \
} while (false)
TEST_F(ControlTransferTest, SimpleIf) {
byte code[] = {
kExprI32Const, // @0
- 0, // +1
+ 0, // @1
kExprIf, // @2
- kExprEnd // @3
+ kLocalVoid, // @3
+ kExprEnd // @4
};
- EXPECT_TARGETS({2, {2, 0, ControlTransfer::kPushVoid}}, // --
- {3, {1, 0, ControlTransfer::kPushVoid}});
+ EXPECT_PC_DELTAS({2, 2});
}
TEST_F(ControlTransferTest, SimpleIf1) {
byte code[] = {
kExprI32Const, // @0
- 0, // +1
+ 0, // @1
kExprIf, // @2
- kExprNop, // @3
- kExprEnd // @4
+ kLocalVoid, // @3
+ kExprNop, // @4
+ kExprEnd // @5
};
- EXPECT_TARGETS({2, {3, 0, ControlTransfer::kPushVoid}}, // --
- {4, {1, 1, ControlTransfer::kPopAndRepush}});
+ EXPECT_PC_DELTAS({2, 3});
}
TEST_F(ControlTransferTest, SimpleIf2) {
byte code[] = {
kExprI32Const, // @0
- 0, // +1
+ 0, // @1
kExprIf, // @2
- kExprNop, // @3
+ kLocalVoid, // @3
kExprNop, // @4
- kExprEnd // @5
+ kExprNop, // @5
+ kExprEnd // @6
};
- EXPECT_TARGETS({2, {4, 0, ControlTransfer::kPushVoid}}, // --
- {5, {1, 2, ControlTransfer::kPopAndRepush}});
+ EXPECT_PC_DELTAS({2, 4});
}
TEST_F(ControlTransferTest, SimpleIfElse) {
byte code[] = {
kExprI32Const, // @0
- 0, // +1
+ 0, // @1
kExprIf, // @2
- kExprElse, // @3
- kExprEnd // @4
+ kLocalVoid, // @3
+ kExprElse, // @4
+ kExprEnd // @5
};
- EXPECT_TARGETS({2, {2, 0, ControlTransfer::kNoAction}}, // --
- {3, {2, 0, ControlTransfer::kPushVoid}}, // --
- {4, {1, 0, ControlTransfer::kPushVoid}});
+ EXPECT_PC_DELTAS({2, 3}, {4, 2});
+}
+
+TEST_F(ControlTransferTest, SimpleIfElse_v1) {
+ byte code[] = {
+ kExprI32Const, // @0
+ 0, // @1
+ kExprIf, // @2
+ kLocalVoid, // @3
+ kExprI8Const, // @4
+ 0, // @5
+ kExprElse, // @6
+ kExprI8Const, // @7
+ 0, // @8
+ kExprEnd // @9
+ };
+ EXPECT_PC_DELTAS({2, 5}, {6, 4});
}
TEST_F(ControlTransferTest, SimpleIfElse1) {
byte code[] = {
kExprI32Const, // @0
- 0, // +1
+ 0, // @1
kExprIf, // @2
- kExprNop, // @3
+ kLocalVoid, // @3
kExprElse, // @4
kExprNop, // @5
kExprEnd // @6
};
- EXPECT_TARGETS({2, {3, 0, ControlTransfer::kNoAction}}, // --
- {4, {3, 1, ControlTransfer::kPopAndRepush}}, // --
- {6, {1, 1, ControlTransfer::kPopAndRepush}});
+ EXPECT_PC_DELTAS({2, 3}, {4, 3});
}
TEST_F(ControlTransferTest, IfBr) {
byte code[] = {
kExprI32Const, // @0
- 0, // +1
+ 0, // @1
kExprIf, // @2
- kExprBr, // @3
- ARITY_0, // +1
- 0, // +1
+ kLocalVoid, // @3
+ kExprBr, // @4
+ 0, // @5
kExprEnd // @6
};
- EXPECT_TARGETS({2, {5, 0, ControlTransfer::kPushVoid}}, // --
- {3, {4, 0, ControlTransfer::kPushVoid}}, // --
- {6, {1, 1, ControlTransfer::kPopAndRepush}});
+ EXPECT_PC_DELTAS({2, 4}, {4, 3});
}
TEST_F(ControlTransferTest, IfBrElse) {
byte code[] = {
kExprI32Const, // @0
- 0, // +1
+ 0, // @1
kExprIf, // @2
- kExprBr, // @3
- ARITY_0, // +1
- 0, // +1
+ kLocalVoid, // @3
+ kExprBr, // @4
+ 0, // @5
kExprElse, // @6
kExprEnd // @7
};
- EXPECT_TARGETS({2, {5, 0, ControlTransfer::kNoAction}}, // --
- {3, {5, 0, ControlTransfer::kPushVoid}}, // --
- {6, {2, 1, ControlTransfer::kPopAndRepush}}, // --
- {7, {1, 0, ControlTransfer::kPushVoid}});
+ EXPECT_PC_DELTAS({2, 5}, {4, 4}, {6, 2});
}
TEST_F(ControlTransferTest, IfElseBr) {
byte code[] = {
kExprI32Const, // @0
- 0, // +1
+ 0, // @1
kExprIf, // @2
- kExprNop, // @3
+ kLocalVoid, // @3
kExprElse, // @4
kExprBr, // @5
- ARITY_0, // +1
- 0, // +1
- kExprEnd // @8
+ 0, // @6
+ kExprEnd // @7
};
- EXPECT_TARGETS({2, {3, 0, ControlTransfer::kNoAction}}, // --
- {4, {5, 1, ControlTransfer::kPopAndRepush}}, // --
- {5, {4, 0, ControlTransfer::kPushVoid}}, // --
- {8, {1, 1, ControlTransfer::kPopAndRepush}});
+ EXPECT_PC_DELTAS({2, 3}, {4, 4}, {5, 3});
}
TEST_F(ControlTransferTest, BlockEmpty) {
@@ -224,177 +232,233 @@ TEST_F(ControlTransferTest, BlockEmpty) {
kExprBlock, // @0
kExprEnd // @1
};
- EXPECT_TARGETS({1, {1, 0, ControlTransfer::kPushVoid}});
+ CheckPcDeltas(code, code + sizeof(code), nullptr, 0);
}
TEST_F(ControlTransferTest, Br0) {
byte code[] = {
kExprBlock, // @0
- kExprBr, // @1
- ARITY_0, // +1
- 0, // +1
+ kLocalVoid, // @1
+ kExprBr, // @2
+ 0, // @3
kExprEnd // @4
};
- EXPECT_TARGETS({1, {4, 0, ControlTransfer::kPushVoid}},
- {4, {1, 1, ControlTransfer::kPopAndRepush}});
+ EXPECT_PC_DELTAS({2, 3});
}
TEST_F(ControlTransferTest, Br1) {
byte code[] = {
kExprBlock, // @0
- kExprNop, // @1
- kExprBr, // @2
- ARITY_0, // +1
- 0, // +1
+ kLocalVoid, // @1
+ kExprNop, // @2
+ kExprBr, // @3
+ 0, // @4
kExprEnd // @5
};
- EXPECT_TARGETS({2, {4, 1, ControlTransfer::kPopAndRepush}}, // --
- {5, {1, 2, ControlTransfer::kPopAndRepush}});
+ EXPECT_PC_DELTAS({3, 3});
+}
+
+TEST_F(ControlTransferTest, Br_v1a) {
+ byte code[] = {
+ kExprBlock, // @0
+ kLocalVoid, // @1
+ kExprI8Const, // @2
+ 0, // @3
+ kExprBr, // @4
+ 0, // @5
+ kExprEnd // @6
+ };
+ EXPECT_PC_DELTAS({4, 3});
+}
+
+TEST_F(ControlTransferTest, Br_v1b) {
+ byte code[] = {
+ kExprBlock, // @0
+ kLocalVoid, // @1
+ kExprI8Const, // @2
+ 0, // @3
+ kExprBr, // @4
+ 0, // @5
+ kExprEnd // @6
+ };
+ EXPECT_PC_DELTAS({4, 3});
+}
+
+TEST_F(ControlTransferTest, Br_v1c) {
+ byte code[] = {
+ kExprI8Const, // @0
+ 0, // @1
+ kExprBlock, // @2
+ kLocalVoid, // @3
+ kExprBr, // @4
+ 0, // @5
+ kExprEnd // @6
+ };
+ EXPECT_PC_DELTAS({4, 3});
}
TEST_F(ControlTransferTest, Br2) {
byte code[] = {
kExprBlock, // @0
- kExprNop, // @1
+ kLocalVoid, // @1
kExprNop, // @2
- kExprBr, // @3
- ARITY_0, // +1
- 0, // +1
+ kExprNop, // @3
+ kExprBr, // @4
+ 0, // @5
kExprEnd // @6
};
- EXPECT_TARGETS({3, {4, 2, ControlTransfer::kPopAndRepush}}, // --
- {6, {1, 3, ControlTransfer::kPopAndRepush}});
+ EXPECT_PC_DELTAS({4, 3});
}
TEST_F(ControlTransferTest, Br0b) {
byte code[] = {
kExprBlock, // @0
- kExprBr, // @1
- ARITY_0, // +1
- 0, // +1
+ kLocalVoid, // @1
+ kExprBr, // @2
+ 0, // @3
kExprNop, // @4
kExprEnd // @5
};
- EXPECT_TARGETS({1, {5, 0, ControlTransfer::kPushVoid}}, // --
- {5, {1, 2, ControlTransfer::kPopAndRepush}});
+ EXPECT_PC_DELTAS({2, 4});
}
TEST_F(ControlTransferTest, Br0c) {
byte code[] = {
kExprBlock, // @0
- kExprBr, // @1
- ARITY_0, // +1
- 0, // +1
+ kLocalVoid, // @1
+ kExprBr, // @2
+ 0, // @3
kExprNop, // @4
kExprNop, // @5
kExprEnd // @6
};
- EXPECT_TARGETS({1, {6, 0, ControlTransfer::kPushVoid}}, // --
- {6, {1, 3, ControlTransfer::kPopAndRepush}});
+ EXPECT_PC_DELTAS({2, 5});
}
TEST_F(ControlTransferTest, SimpleLoop1) {
byte code[] = {
- kExprLoop, // @0
- kExprBr, // @1
- ARITY_0, // +1
- 0, // +1
- kExprEnd // @4
+ kExprLoop, // @0
+ kLocalVoid, // @1
+ kExprBr, // @2
+ 0, // @3
+ kExprEnd // @4
};
- EXPECT_TARGETS({1, {-1, 0, ControlTransfer::kNoAction}}, // --
- {4, {1, 1, ControlTransfer::kPopAndRepush}});
+ EXPECT_PC_DELTAS({2, -2});
}
TEST_F(ControlTransferTest, SimpleLoop2) {
byte code[] = {
- kExprLoop, // @0
- kExprNop, // @1
- kExprBr, // @2
- ARITY_0, // +1
- 0, // +1
- kExprEnd // @5
+ kExprLoop, // @0
+ kLocalVoid, // @1
+ kExprNop, // @2
+ kExprBr, // @3
+ 0, // @4
+ kExprEnd // @5
};
- EXPECT_TARGETS({2, {-2, 1, ControlTransfer::kNoAction}}, // --
- {5, {1, 2, ControlTransfer::kPopAndRepush}});
+ EXPECT_PC_DELTAS({3, -3});
}
TEST_F(ControlTransferTest, SimpleLoopExit1) {
byte code[] = {
- kExprLoop, // @0
- kExprBr, // @1
- ARITY_0, // +1
- 1, // +1
- kExprEnd // @4
+ kExprLoop, // @0
+ kLocalVoid, // @1
+ kExprBr, // @2
+ 1, // @3
+ kExprEnd // @4
};
- EXPECT_TARGETS({1, {4, 0, ControlTransfer::kPushVoid}}, // --
- {4, {1, 1, ControlTransfer::kPopAndRepush}});
+ EXPECT_PC_DELTAS({2, 3});
}
TEST_F(ControlTransferTest, SimpleLoopExit2) {
byte code[] = {
- kExprLoop, // @0
- kExprNop, // @1
- kExprBr, // @2
- ARITY_0, // +1
- 1, // +1
- kExprEnd // @5
+ kExprLoop, // @0
+ kLocalVoid, // @1
+ kExprNop, // @2
+ kExprBr, // @3
+ 1, // @4
+ kExprEnd // @5
};
- EXPECT_TARGETS({2, {4, 1, ControlTransfer::kPopAndRepush}}, // --
- {5, {1, 2, ControlTransfer::kPopAndRepush}});
+ EXPECT_PC_DELTAS({3, 3});
}
TEST_F(ControlTransferTest, BrTable0) {
byte code[] = {
kExprBlock, // @0
- kExprI8Const, // @1
- 0, // +1
- kExprBrTable, // @3
- ARITY_0, // +1
- 0, // +1
- U32_LE(0), // +4
- kExprEnd // @10
+ kLocalVoid, // @1
+ kExprI8Const, // @2
+ 0, // @3
+ kExprBrTable, // @4
+ 0, // @5
+ U32V_1(0), // @6
+ kExprEnd // @7
};
- EXPECT_TARGETS({3, {8, 0, ControlTransfer::kPushVoid}}, // --
- {10, {1, 1, ControlTransfer::kPopAndRepush}});
+ EXPECT_PC_DELTAS({4, 4});
}
-TEST_F(ControlTransferTest, BrTable1) {
+TEST_F(ControlTransferTest, BrTable0_v1a) {
byte code[] = {
kExprBlock, // @0
- kExprI8Const, // @1
- 0, // +1
- kExprBrTable, // @3
- ARITY_0, // +1
- 1, // +1
- U32_LE(0), // +4
- U32_LE(0), // +4
- kExprEnd // @14
+ kLocalVoid, // @1
+ kExprI8Const, // @2
+ 0, // @3
+ kExprI8Const, // @4
+ 0, // @5
+ kExprBrTable, // @6
+ 0, // @7
+ U32V_1(0), // @8
+ kExprEnd // @9
};
- EXPECT_TARGETS({3, {12, 0, ControlTransfer::kPushVoid}}, // --
- {4, {11, 0, ControlTransfer::kPushVoid}}, // --
- {14, {1, 1, ControlTransfer::kPopAndRepush}});
+ EXPECT_PC_DELTAS({6, 4});
}
-TEST_F(ControlTransferTest, BrTable2) {
+TEST_F(ControlTransferTest, BrTable0_v1b) {
+ byte code[] = {
+ kExprBlock, // @0
+ kLocalVoid, // @1
+ kExprI8Const, // @2
+ 0, // @3
+ kExprI8Const, // @4
+ 0, // @5
+ kExprBrTable, // @6
+ 0, // @7
+ U32V_1(0), // @8
+ kExprEnd // @9
+ };
+ EXPECT_PC_DELTAS({6, 4});
+}
+
+TEST_F(ControlTransferTest, BrTable1) {
byte code[] = {
kExprBlock, // @0
- kExprBlock, // @1
+ kLocalVoid, // @1
kExprI8Const, // @2
- 0, // +1
+ 0, // @3
kExprBrTable, // @4
- ARITY_0, // +1
- 2, // +1
- U32_LE(0), // +4
- U32_LE(0), // +4
- U32_LE(1), // +4
- kExprEnd, // @19
- kExprEnd // @19
+ 1, // @5
+ U32V_1(0), // @6
+ U32V_1(0), // @7
+ kExprEnd // @8
+ };
+ EXPECT_PC_DELTAS({4, 5}, {5, 4});
+}
+
+TEST_F(ControlTransferTest, BrTable2) {
+ byte code[] = {
+ kExprBlock, // @0
+ kLocalVoid, // @1
+ kExprBlock, // @2
+ kLocalVoid, // @3
+ kExprI8Const, // @4
+ 0, // @5
+ kExprBrTable, // @6
+ 2, // @7
+ U32V_1(0), // @8
+ U32V_1(0), // @9
+ U32V_1(1), // @10
+ kExprEnd, // @11
+ kExprEnd // @12
};
- EXPECT_TARGETS({4, {16, 0, ControlTransfer::kPushVoid}}, // --
- {5, {15, 0, ControlTransfer::kPushVoid}}, // --
- {6, {15, 0, ControlTransfer::kPushVoid}}, // --
- {19, {1, 1, ControlTransfer::kPopAndRepush}}, // --
- {20, {1, 1, ControlTransfer::kPopAndRepush}});
+ EXPECT_PC_DELTAS({6, 6}, {7, 5}, {8, 5});
}
} // namespace wasm
diff --git a/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc b/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
index 919ce8e234..cb452445bf 100644
--- a/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
+++ b/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
@@ -6,7 +6,7 @@
#include "src/v8.h"
-#include "test/cctest/wasm/test-signatures.h"
+#include "test/common/wasm/test-signatures.h"
#include "src/bit-vector.h"
#include "src/objects.h"
@@ -39,7 +39,7 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, Empty0) {
}
TEST_F(WasmLoopAssignmentAnalyzerTest, Empty1) {
- byte code[] = {kExprLoop, 0};
+ byte code[] = {kExprLoop, kLocalVoid, 0};
for (int i = 0; i < 5; i++) {
BitVector* assigned = Analyze(code, code + arraysize(code));
for (int j = 0; j < assigned->length(); j++) {
@@ -60,6 +60,17 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, One) {
}
}
+TEST_F(WasmLoopAssignmentAnalyzerTest, TeeOne) {
+ num_locals = 5;
+ for (int i = 0; i < 5; i++) {
+ byte code[] = {WASM_LOOP(WASM_TEE_LOCAL(i, WASM_ZERO))};
+ BitVector* assigned = Analyze(code, code + arraysize(code));
+ for (int j = 0; j < assigned->length(); j++) {
+ CHECK_EQ(j == i, assigned->Contains(j));
+ }
+ }
+}
+
TEST_F(WasmLoopAssignmentAnalyzerTest, OneBeyond) {
num_locals = 5;
for (int i = 0; i < 5; i++) {
@@ -98,24 +109,10 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, NestedIf) {
}
}
-static byte LEBByte(uint32_t val, byte which) {
- byte b = (val >> (which * 7)) & 0x7F;
- if (val >> ((which + 1) * 7)) b |= 0x80;
- return b;
-}
-
TEST_F(WasmLoopAssignmentAnalyzerTest, BigLocal) {
num_locals = 65000;
for (int i = 13; i < 65000; i = static_cast<int>(i * 1.5)) {
- byte code[] = {kExprLoop,
- 1,
- kExprSetLocal,
- LEBByte(i, 0),
- LEBByte(i, 1),
- LEBByte(i, 2),
- 11,
- 12,
- 13};
+ byte code[] = {WASM_LOOP(WASM_I8(11), kExprSetLocal, U32V_3(i))};
BitVector* assigned = Analyze(code, code + arraysize(code));
for (int j = 0; j < assigned->length(); j++) {
@@ -172,7 +169,7 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, Loop2) {
WASM_STORE_MEM(MachineType::Float32(), WASM_ZERO, WASM_GET_LOCAL(kSum)),
WASM_GET_LOCAL(kIter))};
- BitVector* assigned = Analyze(code + 1, code + arraysize(code));
+ BitVector* assigned = Analyze(code + 2, code + arraysize(code));
for (int j = 0; j < assigned->length(); j++) {
bool expected = j == kIter || j == kSum;
CHECK_EQ(expected, assigned->Contains(j));
@@ -180,13 +177,21 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, Loop2) {
}
TEST_F(WasmLoopAssignmentAnalyzerTest, Malformed) {
- byte code[] = {kExprLoop, kExprF32Neg, kExprBrTable, 0x0e, 'h', 'e',
- 'l', 'l', 'o', ',', ' ', 'w',
- 'o', 'r', 'l', 'd', '!'};
+ byte code[] = {kExprLoop, kLocalVoid, kExprF32Neg, kExprBrTable, 0x0e, 'h',
+ 'e', 'l', 'l', 'o', ',', ' ',
+ 'w', 'o', 'r', 'l', 'd', '!'};
BitVector* assigned = Analyze(code, code + arraysize(code));
CHECK_NULL(assigned);
}
+TEST_F(WasmLoopAssignmentAnalyzerTest, regress_642867) {
+ static const byte code[] = {
+ WASM_LOOP(WASM_ZERO, kExprSetLocal, 0xfa, 0xff, 0xff, 0xff,
+ 0x0f)}; // local index LEB128 0xfffffffa
+ // Just make sure that the analysis does not crash.
+ Analyze(code, code + arraysize(code));
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index 5c9c47ba00..42798ca81b 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -14,7 +14,16 @@ namespace v8 {
namespace internal {
namespace wasm {
-#define EMPTY_FUNCTION(sig_index) 0, SIG_INDEX(sig_index), U16_LE(0)
+#define WASM_INIT_EXPR_I32V_1(val) WASM_I32V_1(val), kExprEnd
+#define WASM_INIT_EXPR_I32V_2(val) WASM_I32V_2(val), kExprEnd
+#define WASM_INIT_EXPR_I32V_3(val) WASM_I32V_3(val), kExprEnd
+#define WASM_INIT_EXPR_I32V_4(val) WASM_I32V_4(val), kExprEnd
+#define WASM_INIT_EXPR_I32V_5(val) WASM_I32V_5(val), kExprEnd
+#define WASM_INIT_EXPR_F32(val) WASM_F32(val), kExprEnd
+#define WASM_INIT_EXPR_I64(val) WASM_I64(val), kExprEnd
+#define WASM_INIT_EXPR_F64(val) WASM_F64(val), kExprEnd
+#define WASM_INIT_EXPR_GLOBAL(index) WASM_GET_GLOBAL(index), kExprEnd
+
#define SIZEOF_EMPTY_FUNCTION ((size_t)5)
#define EMPTY_BODY 0
#define SIZEOF_EMPTY_BODY ((size_t)1)
@@ -23,64 +32,64 @@ namespace wasm {
#define SIG_ENTRY_i_i SIG_ENTRY_x_x(kLocalI32, kLocalI32)
-#define UNKNOWN_EMPTY_SECTION_NAME 1, '\0'
-#define UNKNOWN_SECTION_NAME 4, 'l', 'u', 'l', 'z'
+#define UNKNOWN_SECTION(size) 0, U32V_1(size + 5), 4, 'l', 'u', 'l', 'z'
-#define SECTION(NAME, EXTRA_SIZE) WASM_SECTION_##NAME, U32V_1(EXTRA_SIZE)
+#define SECTION(name, size) k##name##SectionCode, U32V_1(size)
#define SIGNATURES_SECTION(count, ...) \
- SECTION(SIGNATURES, 1 + 3 * (count)), U32V_1(count), __VA_ARGS__
+ SECTION(Type, 1 + 3 * (count)), U32V_1(count), __VA_ARGS__
#define FUNCTION_SIGNATURES_SECTION(count, ...) \
- SECTION(FUNCTION_SIGNATURES, 1 + (count)), U32V_1(count), __VA_ARGS__
+ SECTION(Function, 1 + (count)), U32V_1(count), __VA_ARGS__
#define FOO_STRING 3, 'f', 'o', 'o'
#define NO_LOCAL_NAMES 0
-#define EMPTY_SIGNATURES_SECTION SECTION(SIGNATURES, 1), 0
-#define EMPTY_FUNCTION_SIGNATURES_SECTION SECTION(FUNCTION_SIGNATURES, 1), 0
-#define EMPTY_FUNCTION_BODIES_SECTION SECTION(FUNCTION_BODIES, 1), 0
-#define EMPTY_NAMES_SECTION SECTION(NAMES, 1), 0
+#define EMPTY_SIGNATURES_SECTION SECTION(Type, 1), 0
+#define EMPTY_FUNCTION_SIGNATURES_SECTION SECTION(Function, 1), 0
+#define EMPTY_FUNCTION_BODIES_SECTION SECTION(Code, 1), 0
+#define SECTION_NAMES(size) \
+ kUnknownSectionCode, U32V_1(size + 5), 4, 'n', 'a', 'm', 'e'
+#define EMPTY_NAMES_SECTION SECTION_NAMES(1), 0
#define X1(...) __VA_ARGS__
#define X2(...) __VA_ARGS__, __VA_ARGS__
#define X3(...) __VA_ARGS__, __VA_ARGS__, __VA_ARGS__
#define X4(...) __VA_ARGS__, __VA_ARGS__, __VA_ARGS__, __VA_ARGS__
-#define ONE_EMPTY_FUNCTION WASM_SECTION_FUNCTION_SIGNATURES, 1 + 1 * 1, 1, X1(0)
+#define ONE_EMPTY_FUNCTION SECTION(Function, 1 + 1 * 1), 1, X1(0)
-#define TWO_EMPTY_FUNCTIONS \
- WASM_SECTION_FUNCTION_SIGNATURES, 1 + 2 * 1, 2, X2(0)
+#define TWO_EMPTY_FUNCTIONS SECTION(Function, 1 + 2 * 1), 2, X2(0)
-#define THREE_EMPTY_FUNCTIONS \
- WASM_SECTION_FUNCTION_SIGNATURES, 1 + 3 * 1, 3, X3(0)
+#define THREE_EMPTY_FUNCTIONS SECTION(Function, 1 + 3 * 1), 3, X3(0)
-#define FOUR_EMPTY_FUNCTIONS \
- WASM_SECTION_FUNCTION_SIGNATURES, 1 + 4 * 1, 4, X4(0)
+#define FOUR_EMPTY_FUNCTIONS SECTION(Function, 1 + 4 * 1), 4, X4(0)
-#define ONE_EMPTY_BODY \
- WASM_SECTION_FUNCTION_BODIES, 1 + 1 * (1 + SIZEOF_EMPTY_BODY), 1, \
- X1(SIZEOF_EMPTY_BODY, EMPTY_BODY)
+#define ONE_EMPTY_BODY \
+ SECTION(Code, 1 + 1 * (1 + SIZEOF_EMPTY_BODY)) \
+ , 1, X1(SIZEOF_EMPTY_BODY, EMPTY_BODY)
-#define TWO_EMPTY_BODIES \
- WASM_SECTION_FUNCTION_BODIES, 1 + 2 * (1 + SIZEOF_EMPTY_BODY), 2, \
- X2(SIZEOF_EMPTY_BODY, EMPTY_BODY)
+#define TWO_EMPTY_BODIES \
+ SECTION(Code, 1 + 2 * (1 + SIZEOF_EMPTY_BODY)) \
+ , 2, X2(SIZEOF_EMPTY_BODY, EMPTY_BODY)
-#define THREE_EMPTY_BODIES \
- WASM_SECTION_FUNCTION_BODIES, 1 + 3 * (1 + SIZEOF_EMPTY_BODY), 3, \
- X3(SIZEOF_EMPTY_BODY, EMPTY_BODY)
+#define THREE_EMPTY_BODIES \
+ SECTION(Code, 1 + 3 * (1 + SIZEOF_EMPTY_BODY)) \
+ , 3, X3(SIZEOF_EMPTY_BODY, EMPTY_BODY)
-#define FOUR_EMPTY_BODIES \
- WASM_SECTION_FUNCTION_BODIES, 1 + 4 * (1 + SIZEOF_EMPTY_BODY), 4, \
- X4(SIZEOF_EMPTY_BODY, EMPTY_BODY)
+#define FOUR_EMPTY_BODIES \
+ SECTION(Code, 1 + 4 * (1 + SIZEOF_EMPTY_BODY)) \
+ , 4, X4(SIZEOF_EMPTY_BODY, EMPTY_BODY)
#define SIGNATURES_SECTION_VOID_VOID \
- SECTION(SIGNATURES, 1 + SIZEOF_SIG_ENTRY_v_v), 1, SIG_ENTRY_v_v
+ SECTION(Type, 1 + SIZEOF_SIG_ENTRY_v_v), 1, SIG_ENTRY_v_v
+
+#define LINEAR_MEMORY_INDEX_0 0
-#define EXPECT_VERIFIES(data) \
- do { \
- ModuleResult result = DecodeModule(data, data + arraysize(data)); \
- EXPECT_TRUE(result.ok()); \
- if (result.val) delete result.val; \
+#define EXPECT_VERIFIES(data) \
+ do { \
+ ModuleResult result = DecodeModule(data, data + sizeof(data)); \
+ EXPECT_TRUE(result.ok()); \
+ if (result.val) delete result.val; \
} while (false)
#define EXPECT_FAILURE_LEN(data, length) \
@@ -149,8 +158,7 @@ class WasmModuleVerifyTest : public TestWithIsolateAndZone {
TEST_F(WasmModuleVerifyTest, WrongMagic) {
for (uint32_t x = 1; x; x <<= 1) {
- const byte data[] = {U32_LE(kWasmMagic ^ x), U32_LE(kWasmVersion),
- SECTION(END, 0)};
+ const byte data[] = {U32_LE(kWasmMagic ^ x), U32_LE(kWasmVersion)};
ModuleResult result = DecodeModuleNoHeader(data, data + sizeof(data));
EXPECT_FALSE(result.ok());
if (result.val) delete result.val;
@@ -159,8 +167,7 @@ TEST_F(WasmModuleVerifyTest, WrongMagic) {
TEST_F(WasmModuleVerifyTest, WrongVersion) {
for (uint32_t x = 1; x; x <<= 1) {
- const byte data[] = {U32_LE(kWasmMagic), U32_LE(kWasmVersion ^ x),
- SECTION(END, 0)};
+ const byte data[] = {U32_LE(kWasmMagic), U32_LE(kWasmVersion ^ x)};
ModuleResult result = DecodeModuleNoHeader(data, data + sizeof(data));
EXPECT_FALSE(result.ok());
if (result.val) delete result.val;
@@ -168,23 +175,23 @@ TEST_F(WasmModuleVerifyTest, WrongVersion) {
}
TEST_F(WasmModuleVerifyTest, DecodeEmpty) {
- static const byte data[] = {SECTION(END, 0)};
- EXPECT_VERIFIES(data);
+ ModuleResult result = DecodeModule(nullptr, 0);
+ EXPECT_TRUE(result.ok());
+ if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, OneGlobal) {
static const byte data[] = {
- SECTION(GLOBALS, 5), // --
+ SECTION(Global, 6), // --
1,
- NAME_LENGTH(1),
- 'g', // name
- kLocalI32, // local type
- 0, // exported
+ kLocalI32, // local type
+ 0, // immutable
+ WASM_INIT_EXPR_I32V_1(13) // init
};
{
// Should decode to exactly one global.
- ModuleResult result = DecodeModule(data, data + arraysize(data));
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
EXPECT_EQ(1, result.val->globals.size());
EXPECT_EQ(0, result.val->functions.size());
@@ -192,10 +199,11 @@ TEST_F(WasmModuleVerifyTest, OneGlobal) {
const WasmGlobal* global = &result.val->globals.back();
- EXPECT_EQ(1, global->name_length);
EXPECT_EQ(kAstI32, global->type);
EXPECT_EQ(0, global->offset);
- EXPECT_FALSE(global->exported);
+ EXPECT_FALSE(global->mutability);
+ EXPECT_EQ(WasmInitExpr::kI32Const, global->init.kind);
+ EXPECT_EQ(13, global->init.val.i32_const);
if (result.val) delete result.val;
}
@@ -205,25 +213,38 @@ TEST_F(WasmModuleVerifyTest, OneGlobal) {
TEST_F(WasmModuleVerifyTest, Global_invalid_type) {
static const byte data[] = {
- SECTION(GLOBALS, 5), // --
+ SECTION(Global, 6), // --
1,
- NAME_LENGTH(1),
- 'g', // name
- 64, // invalid memory type
- 0, // exported
+ 64, // invalid memory type
+ 1, // mutable
+ WASM_INIT_EXPR_I32V_1(33), // init
+ };
+
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_FALSE(result.ok());
+ if (result.val) delete result.val;
+}
+
+TEST_F(WasmModuleVerifyTest, Global_invalid_type2) {
+ static const byte data[] = {
+ SECTION(Global, 6), // --
+ 1,
+ kLocalVoid, // invalid memory type
+ 1, // mutable
+ WASM_INIT_EXPR_I32V_1(33), // init
};
- ModuleResult result = DecodeModuleNoHeader(data, data + sizeof(data));
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_FALSE(result.ok());
if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, ZeroGlobals) {
static const byte data[] = {
- SECTION(GLOBALS, 1), // --
- 0, // declare 0 globals
+ SECTION(Global, 1), // --
+ 0, // declare 0 globals
};
- ModuleResult result = DecodeModule(data, data + arraysize(data));
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
if (result.val) delete result.val;
}
@@ -244,15 +265,15 @@ static void AppendUint32v(std::vector<byte>& buffer, uint32_t val) {
TEST_F(WasmModuleVerifyTest, NGlobals) {
static const byte data[] = {
- NO_NAME, // name length
- kLocalF32, // memory type
- 0, // exported
+ kLocalF32, // memory type
+ 0, // immutable
+ WASM_INIT_EXPR_F32(7.7), // init
};
for (uint32_t i = 0; i < 1000000; i = i * 13 + 1) {
std::vector<byte> buffer;
size_t size = SizeOfVarInt(i) + i * sizeof(data);
- const byte globals[] = {WASM_SECTION_GLOBALS, U32V_5(size)};
+ const byte globals[] = {kGlobalSectionCode, U32V_5(size)};
for (size_t g = 0; g != sizeof(globals); ++g) {
buffer.push_back(globals[g]);
}
@@ -267,62 +288,48 @@ TEST_F(WasmModuleVerifyTest, NGlobals) {
}
}
-TEST_F(WasmModuleVerifyTest, GlobalWithInvalidNameOffset) {
- static const byte data[] = {
- SECTION(GLOBALS, 7),
- 1, // declare one global
- NO_NAME, // name offset
- 33, // memory type
- 0, // exported
- };
-
- EXPECT_FAILURE(data);
-}
-
TEST_F(WasmModuleVerifyTest, GlobalWithInvalidMemoryType) {
- static const byte data[] = {
- SECTION(GLOBALS, 7),
- 1, // declare one global
- NO_NAME, // name offset
- 33, // memory type
- 0, // exported
- };
+ static const byte data[] = {SECTION(Global, 7),
+ 33, // memory type
+ 0, // exported
+ WASM_INIT_EXPR_I32V_1(1)};
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, TwoGlobals) {
static const byte data[] = {
- SECTION(GLOBALS, 7),
+ SECTION(Global, 21),
2,
- NO_NAME, // #0: name length
kLocalF32, // type
- 0, // exported
- NO_NAME, // #1: name length
+ 0, // immutable
+ WASM_INIT_EXPR_F32(22.0),
kLocalF64, // type
- 1, // exported
+ 1, // mutable
+ WASM_INIT_EXPR_F64(23.0),
};
{
// Should decode to exactly two globals.
- ModuleResult result = DecodeModule(data, data + arraysize(data));
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
EXPECT_EQ(2, result.val->globals.size());
EXPECT_EQ(0, result.val->functions.size());
EXPECT_EQ(0, result.val->data_segments.size());
const WasmGlobal* g0 = &result.val->globals[0];
- const WasmGlobal* g1 = &result.val->globals[1];
- EXPECT_EQ(0, g0->name_length);
EXPECT_EQ(kAstF32, g0->type);
EXPECT_EQ(0, g0->offset);
- EXPECT_FALSE(g0->exported);
+ EXPECT_FALSE(g0->mutability);
+ EXPECT_EQ(WasmInitExpr::kF32Const, g0->init.kind);
+
+ const WasmGlobal* g1 = &result.val->globals[1];
- EXPECT_EQ(0, g1->name_length);
EXPECT_EQ(kAstF64, g1->type);
EXPECT_EQ(8, g1->offset);
- EXPECT_TRUE(g1->exported);
+ EXPECT_TRUE(g1->mutability);
+ EXPECT_EQ(WasmInitExpr::kF64Const, g1->init.kind);
if (result.val) delete result.val;
}
@@ -337,23 +344,23 @@ TEST_F(WasmModuleVerifyTest, OneSignature) {
}
{
- static const byte data[] = {SECTION(SIGNATURES, 1 + SIZEOF_SIG_ENTRY_x_x),
- 1, SIG_ENTRY_i_i};
+ static const byte data[] = {SECTION(Type, 1 + SIZEOF_SIG_ENTRY_x_x), 1,
+ SIG_ENTRY_i_i};
EXPECT_VERIFIES(data);
}
}
TEST_F(WasmModuleVerifyTest, MultipleSignatures) {
static const byte data[] = {
- SECTION(SIGNATURES, 1 + SIZEOF_SIG_ENTRY_v_v + SIZEOF_SIG_ENTRY_x_x +
- SIZEOF_SIG_ENTRY_x_xx), // --
+ SECTION(Type, 1 + SIZEOF_SIG_ENTRY_v_v + SIZEOF_SIG_ENTRY_x_x +
+ SIZEOF_SIG_ENTRY_x_xx), // --
3, // --
SIG_ENTRY_v_v, // void -> void
SIG_ENTRY_x_x(kLocalI32, kLocalF32), // f32 -> i32
SIG_ENTRY_x_xx(kLocalI32, kLocalF64, kLocalF64), // f64,f64 -> i32
};
- ModuleResult result = DecodeModule(data, data + arraysize(data));
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
EXPECT_EQ(3, result.val->signatures.size());
if (result.val->signatures.size() == 3) {
@@ -371,16 +378,18 @@ TEST_F(WasmModuleVerifyTest, MultipleSignatures) {
}
TEST_F(WasmModuleVerifyTest, OneDataSegment) {
- const byte kDataSegmentSourceOffset = 30;
+ const byte kDataSegmentSourceOffset = 24;
const byte data[] = {
- SECTION(MEMORY, 3),
+ SECTION(Memory, 4),
+ ENTRY_COUNT(1),
+ kResizableMaximumFlag,
28,
28,
- 1,
- SECTION(DATA_SEGMENTS, 8),
- 1,
- U32V_3(0x9bbaa), // dest addr
- U32V_1(3), // source size
+ SECTION(Data, 11),
+ ENTRY_COUNT(1),
+ LINEAR_MEMORY_INDEX_0,
+ WASM_INIT_EXPR_I32V_3(0x9bbaa), // dest addr
+ U32V_1(3), // source size
'a',
'b',
'c' // data bytes
@@ -388,7 +397,7 @@ TEST_F(WasmModuleVerifyTest, OneDataSegment) {
{
EXPECT_VERIFIES(data);
- ModuleResult result = DecodeModule(data, data + arraysize(data));
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
EXPECT_EQ(0, result.val->globals.size());
EXPECT_EQ(0, result.val->functions.size());
@@ -396,36 +405,39 @@ TEST_F(WasmModuleVerifyTest, OneDataSegment) {
const WasmDataSegment* segment = &result.val->data_segments.back();
- EXPECT_EQ(0x9bbaa, segment->dest_addr);
+ EXPECT_EQ(WasmInitExpr::kI32Const, segment->dest_addr.kind);
+ EXPECT_EQ(0x9bbaa, segment->dest_addr.val.i32_const);
EXPECT_EQ(kDataSegmentSourceOffset, segment->source_offset);
EXPECT_EQ(3, segment->source_size);
- EXPECT_TRUE(segment->init);
if (result.val) delete result.val;
}
- EXPECT_OFF_END_FAILURE(data, 13, sizeof(data));
+ EXPECT_OFF_END_FAILURE(data, 14, sizeof(data));
}
TEST_F(WasmModuleVerifyTest, TwoDataSegments) {
- const byte kDataSegment0SourceOffset = 30;
- const byte kDataSegment1SourceOffset = 30 + 8;
+ const byte kDataSegment0SourceOffset = 24;
+ const byte kDataSegment1SourceOffset = kDataSegment0SourceOffset + 11;
const byte data[] = {
- SECTION(MEMORY, 3),
+ SECTION(Memory, 4),
+ ENTRY_COUNT(1),
+ kResizableMaximumFlag,
28,
28,
- 1,
- SECTION(DATA_SEGMENTS, 23),
- 2, // segment count
- U32V_3(0x7ffee), // #0: dest addr
- U32V_1(4), // source size
+ SECTION(Data, 29),
+ ENTRY_COUNT(2), // segment count
+ LINEAR_MEMORY_INDEX_0,
+ WASM_INIT_EXPR_I32V_3(0x7ffee), // #0: dest addr
+ U32V_1(4), // source size
1,
2,
3,
- 4, // data bytes
- U32V_3(0x6ddcc), // #1: dest addr
- U32V_1(10), // source size
+ 4, // data bytes
+ LINEAR_MEMORY_INDEX_0,
+ WASM_INIT_EXPR_I32V_3(0x6ddcc), // #1: dest addr
+ U32V_1(10), // source size
1,
2,
3,
@@ -439,7 +451,7 @@ TEST_F(WasmModuleVerifyTest, TwoDataSegments) {
};
{
- ModuleResult result = DecodeModule(data, data + arraysize(data));
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
EXPECT_EQ(0, result.val->globals.size());
EXPECT_EQ(0, result.val->functions.size());
@@ -448,61 +460,79 @@ TEST_F(WasmModuleVerifyTest, TwoDataSegments) {
const WasmDataSegment* s0 = &result.val->data_segments[0];
const WasmDataSegment* s1 = &result.val->data_segments[1];
- EXPECT_EQ(0x7ffee, s0->dest_addr);
+ EXPECT_EQ(WasmInitExpr::kI32Const, s0->dest_addr.kind);
+ EXPECT_EQ(0x7ffee, s0->dest_addr.val.i32_const);
EXPECT_EQ(kDataSegment0SourceOffset, s0->source_offset);
EXPECT_EQ(4, s0->source_size);
- EXPECT_TRUE(s0->init);
- EXPECT_EQ(0x6ddcc, s1->dest_addr);
+ EXPECT_EQ(WasmInitExpr::kI32Const, s1->dest_addr.kind);
+ EXPECT_EQ(0x6ddcc, s1->dest_addr.val.i32_const);
EXPECT_EQ(kDataSegment1SourceOffset, s1->source_offset);
EXPECT_EQ(10, s1->source_size);
- EXPECT_TRUE(s1->init);
if (result.val) delete result.val;
}
- EXPECT_OFF_END_FAILURE(data, 13, sizeof(data));
-}
-
-TEST_F(WasmModuleVerifyTest, DataSegmentWithInvalidDest) {
- const int source_size = 3;
-
- for (byte mem_pages = 1; mem_pages < 16; mem_pages++) {
- int mem_size = mem_pages * 0x10000; // 64k pages.
-
- for (int dest_addr = mem_size - source_size;
- dest_addr < mem_size + source_size; dest_addr++) {
- byte data[] = {SECTION(MEMORY, 3),
- mem_pages,
- mem_pages,
- 1,
- SECTION(DATA_SEGMENTS, 8),
- 1,
- U32V_3(dest_addr),
- U32V_1(source_size),
- 'a',
- 'b',
- 'c'};
-
- if (dest_addr <= (mem_size - source_size)) {
- EXPECT_VERIFIES(data);
- } else {
- EXPECT_FAILURE(data);
- }
- }
- }
+ EXPECT_OFF_END_FAILURE(data, 14, sizeof(data));
+}
+
+TEST_F(WasmModuleVerifyTest, DataSegment_wrong_init_type) {
+ const byte data[] = {
+ SECTION(Memory, 4),
+ ENTRY_COUNT(1),
+ kResizableMaximumFlag,
+ 28,
+ 28,
+ SECTION(Data, 11),
+ ENTRY_COUNT(1),
+ LINEAR_MEMORY_INDEX_0,
+ WASM_INIT_EXPR_F64(9.9), // dest addr
+ U32V_1(3), // source size
+ 'a',
+ 'b',
+ 'c' // data bytes
+ };
+
+ EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, OneIndirectFunction) {
static const byte data[] = {
- // sig#0 -------------------------------------------------------
+ // sig#0 ---------------------------------------------------------------
SIGNATURES_SECTION_VOID_VOID,
- // funcs ------------------------------------------------------
+ // funcs ---------------------------------------------------------------
ONE_EMPTY_FUNCTION,
- // indirect table ----------------------------------------------
- SECTION(FUNCTION_TABLE, 2), 1, U32V_1(0)};
+ // table declaration ---------------------------------------------------
+ SECTION(Table, 4), ENTRY_COUNT(1), kWasmAnyFunctionTypeForm, 0, 1};
- ModuleResult result = DecodeModule(data, data + arraysize(data));
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_OK(result);
+ if (result.ok()) {
+ EXPECT_EQ(1, result.val->signatures.size());
+ EXPECT_EQ(1, result.val->functions.size());
+ EXPECT_EQ(1, result.val->function_tables.size());
+ EXPECT_EQ(1, result.val->function_tables[0].values.size());
+ EXPECT_EQ(-1, result.val->function_tables[0].values[0]);
+ }
+ if (result.val) delete result.val;
+}
+
+TEST_F(WasmModuleVerifyTest, OneIndirectFunction_one_entry) {
+ static const byte data[] = {
+ // sig#0 ---------------------------------------------------------------
+ SIGNATURES_SECTION_VOID_VOID,
+ // funcs ---------------------------------------------------------------
+ ONE_EMPTY_FUNCTION,
+ // table declaration ---------------------------------------------------
+ SECTION(Table, 4), ENTRY_COUNT(1), kWasmAnyFunctionTypeForm, 0, 1,
+ // elements ------------------------------------------------------------
+ SECTION(Element, 7),
+ 1, // entry count
+ TABLE_INDEX(0), WASM_INIT_EXPR_I32V_1(0),
+ 1, // elements count
+ FUNC_INDEX(0)};
+
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
if (result.ok()) {
EXPECT_EQ(1, result.val->signatures.size());
@@ -517,25 +547,30 @@ TEST_F(WasmModuleVerifyTest, OneIndirectFunction) {
TEST_F(WasmModuleVerifyTest, MultipleIndirectFunctions) {
static const byte data[] = {
// sig#0 -------------------------------------------------------
- SECTION(SIGNATURES, 1 + SIZEOF_SIG_ENTRY_v_v + SIZEOF_SIG_ENTRY_v_x),
+ SECTION(Type, 1 + SIZEOF_SIG_ENTRY_v_v + SIZEOF_SIG_ENTRY_v_x),
2, // --
SIG_ENTRY_v_v, // void -> void
SIG_ENTRY_v_x(kLocalI32), // void -> i32
// funcs ------------------------------------------------------
FOUR_EMPTY_FUNCTIONS,
- // indirect table ----------------------------------------------
- SECTION(FUNCTION_TABLE, 9), 8,
- U32V_1(0), // --
- U32V_1(1), // --
- U32V_1(2), // --
- U32V_1(3), // --
- U32V_1(0), // --
- U32V_1(1), // --
- U32V_1(2), // --
- U32V_1(3), // --
+ // table declaration -------------------------------------------
+ SECTION(Table, 4), ENTRY_COUNT(1), kWasmAnyFunctionTypeForm, 0, 8,
+ // table elements ----------------------------------------------
+ SECTION(Element, 14),
+ 1, // entry count
+ TABLE_INDEX(0), WASM_INIT_EXPR_I32V_1(0),
+ 8, // elements count
+ FUNC_INDEX(0), // --
+ FUNC_INDEX(1), // --
+ FUNC_INDEX(2), // --
+ FUNC_INDEX(3), // --
+ FUNC_INDEX(0), // --
+ FUNC_INDEX(1), // --
+ FUNC_INDEX(2), // --
+ FUNC_INDEX(3), // --
FOUR_EMPTY_BODIES};
- ModuleResult result = DecodeModule(data, data + arraysize(data));
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
if (result.ok()) {
EXPECT_EQ(2, result.val->signatures.size());
@@ -554,7 +589,7 @@ TEST_F(WasmModuleVerifyTest, IndirectFunctionNoFunctions) {
// sig#0 -------------------------------------------------------
SIGNATURES_SECTION_VOID_VOID,
// indirect table ----------------------------------------------
- SECTION(FUNCTION_TABLE, 3), 1, 0, 0,
+ SECTION(Table, 4), ENTRY_COUNT(1), 1, 0, 0,
};
EXPECT_FAILURE(data);
@@ -567,7 +602,7 @@ TEST_F(WasmModuleVerifyTest, IndirectFunctionInvalidIndex) {
// functions ---------------------------------------------------
ONE_EMPTY_FUNCTION,
// indirect table ----------------------------------------------
- SECTION(FUNCTION_TABLE, 3), 1, 1, 0,
+ SECTION(Table, 4), ENTRY_COUNT(1), 1, 1, 0,
};
EXPECT_FAILURE(data);
@@ -577,10 +612,10 @@ class WasmSignatureDecodeTest : public TestWithZone {};
TEST_F(WasmSignatureDecodeTest, Ok_v_v) {
static const byte data[] = {SIG_ENTRY_v_v};
- base::AccountingAllocator allocator;
+ v8::internal::AccountingAllocator allocator;
Zone zone(&allocator);
FunctionSig* sig =
- DecodeWasmSignatureForTesting(&zone, data, data + arraysize(data));
+ DecodeWasmSignatureForTesting(&zone, data, data + sizeof(data));
EXPECT_TRUE(sig != nullptr);
EXPECT_EQ(0, sig->parameter_count());
@@ -592,7 +627,7 @@ TEST_F(WasmSignatureDecodeTest, Ok_t_v) {
LocalTypePair ret_type = kLocalTypes[i];
const byte data[] = {SIG_ENTRY_x(ret_type.code)};
FunctionSig* sig =
- DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+ DecodeWasmSignatureForTesting(zone(), data, data + sizeof(data));
EXPECT_TRUE(sig != nullptr);
EXPECT_EQ(0, sig->parameter_count());
@@ -606,7 +641,7 @@ TEST_F(WasmSignatureDecodeTest, Ok_v_t) {
LocalTypePair param_type = kLocalTypes[i];
const byte data[] = {SIG_ENTRY_v_x(param_type.code)};
FunctionSig* sig =
- DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+ DecodeWasmSignatureForTesting(zone(), data, data + sizeof(data));
EXPECT_TRUE(sig != nullptr);
EXPECT_EQ(1, sig->parameter_count());
@@ -622,7 +657,7 @@ TEST_F(WasmSignatureDecodeTest, Ok_t_t) {
LocalTypePair param_type = kLocalTypes[j];
const byte data[] = {SIG_ENTRY_x_x(ret_type.code, param_type.code)};
FunctionSig* sig =
- DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+ DecodeWasmSignatureForTesting(zone(), data, data + sizeof(data));
EXPECT_TRUE(sig != nullptr);
EXPECT_EQ(1, sig->parameter_count());
@@ -641,7 +676,7 @@ TEST_F(WasmSignatureDecodeTest, Ok_i_tt) {
const byte data[] = {
SIG_ENTRY_x_xx(kLocalI32, p0_type.code, p1_type.code)};
FunctionSig* sig =
- DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+ DecodeWasmSignatureForTesting(zone(), data, data + sizeof(data));
EXPECT_TRUE(sig != nullptr);
EXPECT_EQ(2, sig->parameter_count());
@@ -672,7 +707,7 @@ TEST_F(WasmSignatureDecodeTest, Fail_invalid_type) {
byte data[] = {SIG_ENTRY_x_xx(kLocalI32, kLocalI32, kLocalI32)};
data[i] = kInvalidType;
FunctionSig* sig =
- DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+ DecodeWasmSignatureForTesting(zone(), data, data + sizeof(data));
EXPECT_EQ(nullptr, sig);
}
}
@@ -680,21 +715,21 @@ TEST_F(WasmSignatureDecodeTest, Fail_invalid_type) {
TEST_F(WasmSignatureDecodeTest, Fail_invalid_ret_type1) {
static const byte data[] = {SIG_ENTRY_x_x(kLocalVoid, kLocalI32)};
FunctionSig* sig =
- DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+ DecodeWasmSignatureForTesting(zone(), data, data + sizeof(data));
EXPECT_EQ(nullptr, sig);
}
TEST_F(WasmSignatureDecodeTest, Fail_invalid_param_type1) {
static const byte data[] = {SIG_ENTRY_x_x(kLocalI32, kLocalVoid)};
FunctionSig* sig =
- DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+ DecodeWasmSignatureForTesting(zone(), data, data + sizeof(data));
EXPECT_EQ(nullptr, sig);
}
TEST_F(WasmSignatureDecodeTest, Fail_invalid_param_type2) {
static const byte data[] = {SIG_ENTRY_x_xx(kLocalI32, kLocalI32, kLocalVoid)};
FunctionSig* sig =
- DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+ DecodeWasmSignatureForTesting(zone(), data, data + sizeof(data));
EXPECT_EQ(nullptr, sig);
}
@@ -715,8 +750,8 @@ TEST_F(WasmFunctionVerifyTest, Ok_v_v_empty) {
kExprNop // body
};
- FunctionResult result = DecodeWasmFunction(isolate(), zone(), nullptr, data,
- data + arraysize(data));
+ FunctionResult result =
+ DecodeWasmFunction(isolate(), zone(), nullptr, data, data + sizeof(data));
EXPECT_OK(result);
if (result.val && result.ok()) {
@@ -725,7 +760,7 @@ TEST_F(WasmFunctionVerifyTest, Ok_v_v_empty) {
EXPECT_EQ(0, function->sig->return_count());
EXPECT_EQ(0, function->name_offset);
EXPECT_EQ(SIZEOF_SIG_ENTRY_v_v, function->code_start_offset);
- EXPECT_EQ(arraysize(data), function->code_end_offset);
+ EXPECT_EQ(sizeof(data), function->code_end_offset);
// TODO(titzer): verify encoding of local declarations
}
@@ -739,6 +774,7 @@ TEST_F(WasmModuleVerifyTest, SectionWithoutNameLength) {
TEST_F(WasmModuleVerifyTest, TheLoneliestOfValidModulesTheTrulyEmptyOne) {
const byte data[] = {
+ 0, // unknown section code.
0, // Empty section name.
// No section name, no content, nothing but sadness.
0, // No section content.
@@ -748,15 +784,14 @@ TEST_F(WasmModuleVerifyTest, TheLoneliestOfValidModulesTheTrulyEmptyOne) {
TEST_F(WasmModuleVerifyTest, OnlyUnknownSectionEmpty) {
const byte data[] = {
- UNKNOWN_SECTION_NAME, 0,
+ UNKNOWN_SECTION(0),
};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, OnlyUnknownSectionNonEmpty) {
const byte data[] = {
- UNKNOWN_SECTION_NAME,
- 5, // section size
+ UNKNOWN_SECTION(5),
0xff,
0xff,
0xff,
@@ -771,9 +806,7 @@ TEST_F(WasmModuleVerifyTest, SignatureFollowedByEmptyUnknownSection) {
// signatures
SIGNATURES_SECTION_VOID_VOID,
// -----------------------------------------------------------
- UNKNOWN_SECTION_NAME,
- 0 // empty section
- };
+ UNKNOWN_SECTION(0)};
EXPECT_VERIFIES(data);
}
@@ -782,29 +815,14 @@ TEST_F(WasmModuleVerifyTest, SignatureFollowedByUnknownSection) {
// signatures
SIGNATURES_SECTION_VOID_VOID,
// -----------------------------------------------------------
- UNKNOWN_SECTION_NAME,
- 5, // section size
- 0xff, 0xff, 0xff, 0xff, 0xff,
- };
- EXPECT_VERIFIES(data);
-}
-
-TEST_F(WasmModuleVerifyTest, SignatureFollowedByUnknownSectionWithLongLEB) {
- const byte data[] = {
- // signatures
- SIGNATURES_SECTION_VOID_VOID,
- // -----------------------------------------------------------
- UNKNOWN_SECTION_NAME, 0x81, 0x80, 0x80, 0x80,
- 0x00, // section size: 1 but in a 5-byte LEB
- 0,
+ UNKNOWN_SECTION(5), 0xff, 0xff, 0xff, 0xff, 0xff,
};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, UnknownSectionOverflow) {
static const byte data[] = {
- UNKNOWN_EMPTY_SECTION_NAME,
- 9, // section size
+ UNKNOWN_SECTION(9),
1,
2,
3,
@@ -821,49 +839,26 @@ TEST_F(WasmModuleVerifyTest, UnknownSectionOverflow) {
TEST_F(WasmModuleVerifyTest, UnknownSectionUnderflow) {
static const byte data[] = {
- UNKNOWN_EMPTY_SECTION_NAME,
- 0xff,
- 0xff,
- 0xff,
- 0xff,
- 0x0f, // Section size LEB128 0xffffffff
- 1,
- 2,
- 3,
- 4, // 4 byte section
- };
- EXPECT_FAILURE(data);
-}
-
-TEST_F(WasmModuleVerifyTest, UnknownSectionLoop) {
- // Would infinite loop decoding if wrapping and allowed.
- static const byte data[] = {
- UNKNOWN_EMPTY_SECTION_NAME,
+ UNKNOWN_SECTION(333),
1,
2,
3,
4, // 4 byte section
- 0xfa,
- 0xff,
- 0xff,
- 0xff,
- 0x0f, // Section size LEB128 0xfffffffa
};
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, UnknownSectionSkipped) {
static const byte data[] = {
- UNKNOWN_EMPTY_SECTION_NAME,
- 1, // section size
+ UNKNOWN_SECTION(1),
0, // one byte section
- SECTION(GLOBALS, 4),
+ SECTION(Global, 6),
1,
- 0, // name length
- kLocalI32, // memory type
- 0, // exported
+ kLocalI32, // memory type
+ 0, // exported
+ WASM_INIT_EXPR_I32V_1(33), // init
};
- ModuleResult result = DecodeModule(data, data + arraysize(data));
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
EXPECT_EQ(1, result.val->globals.size());
@@ -872,42 +867,41 @@ TEST_F(WasmModuleVerifyTest, UnknownSectionSkipped) {
const WasmGlobal* global = &result.val->globals.back();
- EXPECT_EQ(0, global->name_length);
EXPECT_EQ(kAstI32, global->type);
EXPECT_EQ(0, global->offset);
- EXPECT_FALSE(global->exported);
if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, ImportTable_empty) {
- static const byte data[] = {SECTION(SIGNATURES, 1), 0,
- SECTION(IMPORT_TABLE, 1), 0};
+ static const byte data[] = {SECTION(Type, 1), 0, SECTION(Import, 1), 0};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, ImportTable_nosigs1) {
- static const byte data[] = {SECTION(IMPORT_TABLE, 1), 0};
+ static const byte data[] = {SECTION(Import, 1), 0};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, ImportTable_nosigs2) {
static const byte data[] = {
- SECTION(IMPORT_TABLE, 6), 1, // sig table
- IMPORT_SIG_INDEX(0), // sig index
- NAME_LENGTH(1), 'm', // module name
- NAME_LENGTH(1), 'f', // function name
+ SECTION(Import, 6), 1, // sig table
+ NAME_LENGTH(1), 'm', // module name
+ NAME_LENGTH(1), 'f', // function name
+ kExternalFunction, // import kind
+ IMPORT_SIG_INDEX(0), // sig index
};
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, ImportTable_invalid_sig) {
static const byte data[] = {
- SECTION(SIGNATURES, 1), 0, // --
- SECTION(IMPORT_TABLE, 6), 1, // --
- IMPORT_SIG_INDEX(0), // sig index
- NAME_LENGTH(1), 'm', // module name
- NAME_LENGTH(1), 'f', // function name
+ SECTION(Type, 1), 0, // --
+ SECTION(Import, 6), 1, // --
+ NAME_LENGTH(1), 'm', // module name
+ NAME_LENGTH(1), 'f', // function name
+ kExternalFunction, // import kind
+ IMPORT_SIG_INDEX(0), // sig index
};
EXPECT_FAILURE(data);
}
@@ -916,13 +910,14 @@ TEST_F(WasmModuleVerifyTest, ImportTable_one_sig) {
static const byte data[] = {
// signatures
SIGNATURES_SECTION_VOID_VOID,
- SECTION(IMPORT_TABLE, 6),
- 1, // --
- IMPORT_SIG_INDEX(0), // sig index
+ SECTION(Import, 7),
+ 1, // --
NAME_LENGTH(1),
'm', // module name
NAME_LENGTH(1),
- 'f', // function name
+ 'f', // function name
+ kExternalFunction, // import kind
+ IMPORT_SIG_INDEX(0), // sig index
};
EXPECT_VERIFIES(data);
}
@@ -930,13 +925,14 @@ TEST_F(WasmModuleVerifyTest, ImportTable_one_sig) {
TEST_F(WasmModuleVerifyTest, ImportTable_invalid_module) {
static const byte data[] = {
// signatures
- SIGNATURES_SECTION_VOID_VOID,
- SECTION(IMPORT_TABLE, 6),
- 1, // --
- IMPORT_SIG_INDEX(0), // sig index
- NO_NAME, // module name
- NAME_LENGTH(1),
- 'f' // function name
+ SIGNATURES_SECTION_VOID_VOID, // --
+ SECTION(Import, 7), // --
+ 1, // --
+ NO_NAME, // module name
+ NAME_LENGTH(1), // --
+ 'f', // function name
+ kExternalFunction, // import kind
+ IMPORT_SIG_INDEX(0), // sig index
};
EXPECT_FAILURE(data);
}
@@ -945,26 +941,27 @@ TEST_F(WasmModuleVerifyTest, ImportTable_off_end) {
static const byte data[] = {
// signatures
SIGNATURES_SECTION_VOID_VOID,
- SECTION(IMPORT_TABLE, 6),
+ SECTION(Import, 6),
1,
- IMPORT_SIG_INDEX(0), // sig index
NAME_LENGTH(1),
'm', // module name
NAME_LENGTH(1),
- 'f', // function name
+ 'f', // function name
+ kExternalFunction, // import kind
+ IMPORT_SIG_INDEX(0), // sig index
};
EXPECT_OFF_END_FAILURE(data, 16, sizeof(data));
}
TEST_F(WasmModuleVerifyTest, ExportTable_empty1) {
- static const byte data[] = {// signatures
- SIGNATURES_SECTION_VOID_VOID, ONE_EMPTY_FUNCTION,
- SECTION(EXPORT_TABLE, 1),
- 0, // --
+ static const byte data[] = { // signatures
+ SIGNATURES_SECTION_VOID_VOID, // --
+ ONE_EMPTY_FUNCTION, SECTION(Export, 1), // --
+ 0, // --
ONE_EMPTY_BODY};
- ModuleResult result = DecodeModule(data, data + arraysize(data));
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
EXPECT_EQ(1, result.val->functions.size());
@@ -975,34 +972,27 @@ TEST_F(WasmModuleVerifyTest, ExportTable_empty1) {
TEST_F(WasmModuleVerifyTest, ExportTable_empty2) {
static const byte data[] = {
- SECTION(SIGNATURES, 1), 0, SECTION(EXPORT_TABLE, 1), 0 // --
- };
- // TODO(titzer): current behavior treats empty functions section as missing.
- EXPECT_FAILURE(data);
-}
-
-TEST_F(WasmModuleVerifyTest, ExportTable_NoFunctions1) {
- static const byte data[] = {
- SECTION(SIGNATURES, 1), 0, SECTION(EXPORT_TABLE, 1), 0 // --
+ SECTION(Type, 1), 0, SECTION(Export, 1), 0 // --
};
- EXPECT_FAILURE(data);
+ EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, ExportTable_NoFunctions2) {
- static const byte data[] = {SECTION(EXPORT_TABLE, 1), 0};
- EXPECT_FAILURE(data);
+ static const byte data[] = {SECTION(Export, 1), 0};
+ EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, ExportTableOne) {
static const byte data[] = {// signatures
SIGNATURES_SECTION_VOID_VOID,
ONE_EMPTY_FUNCTION,
- SECTION(EXPORT_TABLE, 3),
- 1, // exports
- FUNC_INDEX(0), // --
- NO_NAME, // --
+ SECTION(Export, 4),
+ 1, // exports
+ NO_NAME, // --
+ kExternalFunction, // --
+ FUNC_INDEX(0), // --
ONE_EMPTY_BODY};
- ModuleResult result = DecodeModule(data, data + arraysize(data));
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
EXPECT_EQ(1, result.val->functions.size());
@@ -1011,26 +1001,42 @@ TEST_F(WasmModuleVerifyTest, ExportTableOne) {
if (result.val) delete result.val;
}
+TEST_F(WasmModuleVerifyTest, ExportNameWithInvalidStringLength) {
+ static const byte data[] = {// signatures
+ SIGNATURES_SECTION_VOID_VOID,
+ ONE_EMPTY_FUNCTION,
+ SECTION(Export, 12),
+ 1, // exports
+ NAME_LENGTH(84), // invalid string length
+ 'e', // --
+ kExternalFunction, // --
+ FUNC_INDEX(0)};
+
+ EXPECT_FAILURE(data);
+}
+
TEST_F(WasmModuleVerifyTest, ExportTableTwo) {
static const byte data[] = {// signatures
SIGNATURES_SECTION_VOID_VOID,
ONE_EMPTY_FUNCTION,
- SECTION(EXPORT_TABLE, 12),
- 2, // exports
- FUNC_INDEX(0), // --
+ SECTION(Export, 14),
+ 2, // exports
NAME_LENGTH(4),
'n',
'a',
'm',
- 'e', // --
+ 'e', // --
+ kExternalFunction,
FUNC_INDEX(0), // --
NAME_LENGTH(3),
'n',
'o',
- 'm', // --
+ 'm', // --
+ kExternalFunction, // --
+ FUNC_INDEX(0), // --
ONE_EMPTY_BODY};
- ModuleResult result = DecodeModule(data, data + arraysize(data));
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
EXPECT_EQ(1, result.val->functions.size());
@@ -1043,19 +1049,22 @@ TEST_F(WasmModuleVerifyTest, ExportTableThree) {
static const byte data[] = {// signatures
SIGNATURES_SECTION_VOID_VOID,
THREE_EMPTY_FUNCTIONS,
- SECTION(EXPORT_TABLE, 10),
- 3, // exports
+ SECTION(Export, 13),
+ 3, // exports
+ NAME_LENGTH(1),
+ 'a', // --
+ kExternalFunction,
FUNC_INDEX(0), // --
NAME_LENGTH(1),
- 'a', // --
+ 'b', // --
+ kExternalFunction,
FUNC_INDEX(1), // --
NAME_LENGTH(1),
- 'b', // --
- FUNC_INDEX(2), // --
- NAME_LENGTH(1),
'c', // --
+ kExternalFunction,
+ FUNC_INDEX(2), // --
THREE_EMPTY_BODIES};
- ModuleResult result = DecodeModule(data, data + arraysize(data));
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
EXPECT_EQ(3, result.val->functions.size());
@@ -1069,12 +1078,13 @@ TEST_F(WasmModuleVerifyTest, ExportTableThreeOne) {
const byte data[] = {// signatures
SIGNATURES_SECTION_VOID_VOID,
THREE_EMPTY_FUNCTIONS,
- SECTION(EXPORT_TABLE, 5),
- 1, // exports
- FUNC_INDEX(i), // --
+ SECTION(Export, 6),
+ 1, // exports
NAME_LENGTH(2),
'e',
'x', // --
+ kExternalFunction,
+ FUNC_INDEX(i), // --
THREE_EMPTY_BODIES};
if (i < 3) {
@@ -1090,10 +1100,11 @@ TEST_F(WasmModuleVerifyTest, ExportTableOne_off_end) {
// signatures
SIGNATURES_SECTION_VOID_VOID,
ONE_EMPTY_FUNCTION,
- SECTION(EXPORT_TABLE, 1 + 6),
- 1, // exports
+ SECTION(Export, 1 + 6),
+ 1, // exports
+ NO_NAME, // --
+ kExternalFunction,
FUNC_INDEX(0), // --
- NO_NAME // --
};
for (int length = 33; length < sizeof(data); length++) {
@@ -1105,9 +1116,9 @@ TEST_F(WasmModuleVerifyTest, ExportTableOne_off_end) {
TEST_F(WasmModuleVerifyTest, FunctionSignatures_empty) {
static const byte data[] = {
- SECTION(SIGNATURES, 1), 0, // --
- SECTION(FUNCTION_SIGNATURES, 1), 0 // --
- }; // --
+ SECTION(Type, 1), 0, // --
+ SECTION(Function, 1), 0 // --
+ }; // --
EXPECT_VERIFIES(data);
}
@@ -1119,6 +1130,15 @@ TEST_F(WasmModuleVerifyTest, FunctionSignatures_one) {
EXPECT_VERIFIES(data);
}
+TEST_F(WasmModuleVerifyTest, Regression_648070) {
+ static const byte data[] = {
+ SECTION(Type, 1), 0, // --
+ SECTION(Function, 5), // --
+ U32V_5(3500228624) // function count = 3500228624
+ }; // --
+ EXPECT_FAILURE(data);
+}
+
TEST_F(WasmModuleVerifyTest, FunctionBodies_empty) {
static const byte data[] = {
EMPTY_SIGNATURES_SECTION, // --
@@ -1130,40 +1150,40 @@ TEST_F(WasmModuleVerifyTest, FunctionBodies_empty) {
TEST_F(WasmModuleVerifyTest, FunctionBodies_one_empty) {
static const byte data[] = {
- SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
- FUNCTION_SIGNATURES_SECTION(1, 0), // --
- SECTION(FUNCTION_BODIES, 1 + SIZEOF_EMPTY_BODY), 1, EMPTY_BODY // --
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
+ FUNCTION_SIGNATURES_SECTION(1, 0), // --
+ SECTION(Code, 1 + SIZEOF_EMPTY_BODY), 1, EMPTY_BODY // --
};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, FunctionBodies_one_nop) {
static const byte data[] = {
- SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
- FUNCTION_SIGNATURES_SECTION(1, 0), // --
- SECTION(FUNCTION_BODIES, 1 + SIZEOF_NOP_BODY), 1, NOP_BODY // --
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
+ FUNCTION_SIGNATURES_SECTION(1, 0), // --
+ SECTION(Code, 1 + SIZEOF_NOP_BODY), 1, NOP_BODY // --
};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, FunctionBodies_count_mismatch1) {
static const byte data[] = {
- SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
- FUNCTION_SIGNATURES_SECTION(2, 0, 0), // --
- SECTION(FUNCTION_BODIES, 1 + SIZEOF_EMPTY_BODY), 1, // --
- EMPTY_BODY // --
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
+ FUNCTION_SIGNATURES_SECTION(2, 0, 0), // --
+ SECTION(Code, 1 + SIZEOF_EMPTY_BODY), 1, // --
+ EMPTY_BODY // --
};
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, FunctionBodies_count_mismatch2) {
static const byte data[] = {
- SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
- FUNCTION_SIGNATURES_SECTION(1, 0), // --
- SECTION(FUNCTION_BODIES, 1 + 2 * SIZEOF_NOP_BODY), // --
- 2, // --
- NOP_BODY, // --
- NOP_BODY // --
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
+ FUNCTION_SIGNATURES_SECTION(1, 0), // --
+ SECTION(Code, 1 + 2 * SIZEOF_NOP_BODY), // --
+ ENTRY_COUNT(2), // --
+ NOP_BODY, // --
+ NOP_BODY // --
};
EXPECT_FAILURE(data);
}
@@ -1179,11 +1199,11 @@ TEST_F(WasmModuleVerifyTest, Names_one_empty) {
static const byte data[] = {
SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
FUNCTION_SIGNATURES_SECTION(1, 0), // --
- SECTION(FUNCTION_BODIES, 1 + SIZEOF_EMPTY_BODY),
- 1,
+ SECTION(Code, 1 + SIZEOF_EMPTY_BODY),
+ ENTRY_COUNT(1),
EMPTY_BODY, // --
- SECTION(NAMES, 1 + 5),
- 1,
+ SECTION_NAMES(1 + 5),
+ ENTRY_COUNT(1),
FOO_STRING,
NO_LOCAL_NAMES // --
};
@@ -1192,14 +1212,14 @@ TEST_F(WasmModuleVerifyTest, Names_one_empty) {
TEST_F(WasmModuleVerifyTest, Names_two_empty) {
static const byte data[] = {
- SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
- FUNCTION_SIGNATURES_SECTION(2, 0, 0), // --
- SECTION(FUNCTION_BODIES, 1 + 2 * SIZEOF_EMPTY_BODY), // --
- 2,
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
+ FUNCTION_SIGNATURES_SECTION(2, 0, 0), // --
+ SECTION(Code, 1 + 2 * SIZEOF_EMPTY_BODY), // --
+ ENTRY_COUNT(2),
EMPTY_BODY,
EMPTY_BODY, // --
- SECTION(NAMES, 1 + 10),
- 2, // --
+ SECTION_NAMES(1 + 10),
+ ENTRY_COUNT(2), // --
FOO_STRING,
NO_LOCAL_NAMES, // --
FOO_STRING,
@@ -1208,6 +1228,66 @@ TEST_F(WasmModuleVerifyTest, Names_two_empty) {
EXPECT_VERIFIES(data);
}
+#define EXPECT_INIT_EXPR(Type, type, value, ...) \
+ { \
+ static const byte data[] = {__VA_ARGS__, kExprEnd}; \
+ WasmInitExpr expr = \
+ DecodeWasmInitExprForTesting(data, data + sizeof(data)); \
+ EXPECT_EQ(WasmInitExpr::k##Type##Const, expr.kind); \
+ EXPECT_EQ(value, expr.val.type##_const); \
+ }
+
+TEST_F(WasmModuleVerifyTest, InitExpr_i32) {
+ EXPECT_INIT_EXPR(I32, i32, 33, WASM_I32V_1(33));
+ EXPECT_INIT_EXPR(I32, i32, -21, WASM_I32V_1(-21));
+ EXPECT_INIT_EXPR(I32, i32, 437, WASM_I32V_2(437));
+ EXPECT_INIT_EXPR(I32, i32, 77777, WASM_I32V_3(77777));
+}
+
+TEST_F(WasmModuleVerifyTest, InitExpr_f32) {
+ EXPECT_INIT_EXPR(F32, f32, static_cast<float>(13.1), WASM_F32(13.1));
+ EXPECT_INIT_EXPR(F32, f32, static_cast<float>(-21.1), WASM_F32(-21.1));
+ EXPECT_INIT_EXPR(F32, f32, static_cast<float>(437.2), WASM_F32(437.2));
+ EXPECT_INIT_EXPR(F32, f32, static_cast<float>(77777.3), WASM_F32(77777.3));
+}
+
+TEST_F(WasmModuleVerifyTest, InitExpr_i64) {
+ EXPECT_INIT_EXPR(I64, i64, 33, WASM_I64V_1(33));
+ EXPECT_INIT_EXPR(I64, i64, -21, WASM_I64V_2(-21));
+ EXPECT_INIT_EXPR(I64, i64, 437, WASM_I64V_5(437));
+ EXPECT_INIT_EXPR(I64, i64, 77777, WASM_I64V_7(77777));
+}
+
+TEST_F(WasmModuleVerifyTest, InitExpr_f64) {
+ EXPECT_INIT_EXPR(F64, f64, 83.22, WASM_F64(83.22));
+ EXPECT_INIT_EXPR(F64, f64, -771.3, WASM_F64(-771.3));
+ EXPECT_INIT_EXPR(F64, f64, 43703.0, WASM_F64(43703.0));
+ EXPECT_INIT_EXPR(F64, f64, 77999.1, WASM_F64(77999.1));
+}
+
+#define EXPECT_INIT_EXPR_FAIL(...) \
+ { \
+ static const byte data[] = {__VA_ARGS__, kExprEnd}; \
+ WasmInitExpr expr = \
+ DecodeWasmInitExprForTesting(data, data + sizeof(data)); \
+ EXPECT_EQ(WasmInitExpr::kNone, expr.kind); \
+ }
+
+TEST_F(WasmModuleVerifyTest, InitExpr_illegal) {
+ EXPECT_INIT_EXPR_FAIL(WASM_I32V_1(0), WASM_I32V_1(0));
+ EXPECT_INIT_EXPR_FAIL(WASM_GET_LOCAL(0));
+ EXPECT_INIT_EXPR_FAIL(WASM_SET_LOCAL(0, WASM_I32V_1(0)));
+ EXPECT_INIT_EXPR_FAIL(WASM_I32_ADD(WASM_I32V_1(0), WASM_I32V_1(0)));
+ EXPECT_INIT_EXPR_FAIL(WASM_IF_ELSE(WASM_ZERO, WASM_ZERO, WASM_ZERO));
+}
+
+TEST_F(WasmModuleVerifyTest, InitExpr_global) {
+ static const byte data[] = {WASM_INIT_EXPR_GLOBAL(37)};
+ WasmInitExpr expr = DecodeWasmInitExprForTesting(data, data + sizeof(data));
+ EXPECT_EQ(WasmInitExpr::kGlobalIndex, expr.kind);
+ EXPECT_EQ(37, expr.val.global_index);
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc b/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
index 2b782f5dc7..0399835d01 100644
--- a/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
@@ -56,45 +56,44 @@ TEST_F(WasmMacroGenTest, Statements) {
EXPECT_SIZE(7, WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO, WASM_ZERO));
- EXPECT_SIZE(5, WASM_IF(WASM_ZERO, WASM_NOP));
+ EXPECT_SIZE(6, WASM_IF(WASM_ZERO, WASM_NOP));
- EXPECT_SIZE(7, WASM_IF_ELSE(WASM_ZERO, WASM_NOP, WASM_NOP));
+ EXPECT_SIZE(8, WASM_IF_ELSE(WASM_ZERO, WASM_NOP, WASM_NOP));
EXPECT_SIZE(5, WASM_SELECT(WASM_ZERO, WASM_NOP, WASM_NOP));
- EXPECT_SIZE(3, WASM_BR(0));
- EXPECT_SIZE(5, WASM_BR_IF(0, WASM_ZERO));
+ EXPECT_SIZE(2, WASM_BR(0));
+ EXPECT_SIZE(4, WASM_BR_IF(0, WASM_ZERO));
- EXPECT_SIZE(3, WASM_BLOCK(WASM_NOP));
- EXPECT_SIZE(4, WASM_BLOCK(WASM_NOP, WASM_NOP));
- EXPECT_SIZE(5, WASM_BLOCK(WASM_NOP, WASM_NOP, WASM_NOP));
+ EXPECT_SIZE(4, WASM_BLOCK(WASM_NOP));
+ EXPECT_SIZE(5, WASM_BLOCK(WASM_NOP, WASM_NOP));
+ EXPECT_SIZE(6, WASM_BLOCK(WASM_NOP, WASM_NOP, WASM_NOP));
EXPECT_SIZE(5, WASM_INFINITE_LOOP);
- EXPECT_SIZE(3, WASM_LOOP(WASM_NOP));
- EXPECT_SIZE(4, WASM_LOOP(WASM_NOP, WASM_NOP));
- EXPECT_SIZE(5, WASM_LOOP(WASM_NOP, WASM_NOP, WASM_NOP));
+ EXPECT_SIZE(4, WASM_LOOP(WASM_NOP));
+ EXPECT_SIZE(5, WASM_LOOP(WASM_NOP, WASM_NOP));
+ EXPECT_SIZE(6, WASM_LOOP(WASM_NOP, WASM_NOP, WASM_NOP));
EXPECT_SIZE(5, WASM_LOOP(WASM_BR(0)));
EXPECT_SIZE(7, WASM_LOOP(WASM_BR_IF(0, WASM_ZERO)));
- EXPECT_SIZE(2, WASM_RETURN0);
- EXPECT_SIZE(4, WASM_RETURN1(WASM_ZERO));
+ EXPECT_SIZE(1, WASM_RETURN0);
+ EXPECT_SIZE(3, WASM_RETURN1(WASM_ZERO));
EXPECT_SIZE(1, WASM_UNREACHABLE);
}
TEST_F(WasmMacroGenTest, MacroStatements) {
- EXPECT_SIZE(10, WASM_WHILE(WASM_I8(0), WASM_NOP));
+ EXPECT_SIZE(11, WASM_WHILE(WASM_I8(0), WASM_NOP));
EXPECT_SIZE(7, WASM_INC_LOCAL(0));
EXPECT_SIZE(7, WASM_INC_LOCAL_BY(0, 3));
- EXPECT_SIZE(3, WASM_BREAK(0));
- EXPECT_SIZE(3, WASM_CONTINUE(0));
+ EXPECT_SIZE(2, WASM_CONTINUE(0));
}
TEST_F(WasmMacroGenTest, BrTable) {
- EXPECT_SIZE(9, WASM_BR_TABLE(WASM_ZERO, 1, BR_TARGET(1)));
- EXPECT_SIZE(11, WASM_BR_TABLEV(WASM_ZERO, WASM_ZERO, 1, BR_TARGET(1)));
+ EXPECT_SIZE(5, WASM_BR_TABLE(WASM_ZERO, 1, BR_TARGET(0)));
+ EXPECT_SIZE(6, WASM_BR_TABLE(WASM_ZERO, 2, BR_TARGET(0), BR_TARGET(0)));
}
TEST_F(WasmMacroGenTest, Expressions) {
@@ -110,43 +109,34 @@ TEST_F(WasmMacroGenTest, Expressions) {
EXPECT_SIZE(3, WASM_NOT(WASM_ZERO));
- EXPECT_SIZE(5, WASM_BRV(1, WASM_ZERO));
- EXPECT_SIZE(7, WASM_BRV_IF(1, WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(4, WASM_BRV(1, WASM_ZERO));
+ EXPECT_SIZE(6, WASM_BRV_IF(1, WASM_ZERO, WASM_ZERO));
- EXPECT_SIZE(4, WASM_BLOCK(WASM_ZERO));
- EXPECT_SIZE(5, WASM_BLOCK(WASM_NOP, WASM_ZERO));
- EXPECT_SIZE(6, WASM_BLOCK(WASM_NOP, WASM_NOP, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_BLOCK(WASM_ZERO));
+ EXPECT_SIZE(6, WASM_BLOCK(WASM_NOP, WASM_ZERO));
+ EXPECT_SIZE(7, WASM_BLOCK(WASM_NOP, WASM_NOP, WASM_ZERO));
- EXPECT_SIZE(4, WASM_LOOP(WASM_ZERO));
- EXPECT_SIZE(5, WASM_LOOP(WASM_NOP, WASM_ZERO));
- EXPECT_SIZE(6, WASM_LOOP(WASM_NOP, WASM_NOP, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_LOOP(WASM_ZERO));
+ EXPECT_SIZE(6, WASM_LOOP(WASM_NOP, WASM_ZERO));
+ EXPECT_SIZE(7, WASM_LOOP(WASM_NOP, WASM_NOP, WASM_ZERO));
}
TEST_F(WasmMacroGenTest, CallFunction) {
- EXPECT_SIZE(3, WASM_CALL_FUNCTION0(0));
- EXPECT_SIZE(3, WASM_CALL_FUNCTION0(1));
- EXPECT_SIZE(3, WASM_CALL_FUNCTION0(11));
+ EXPECT_SIZE(2, WASM_CALL_FUNCTION0(0));
+ EXPECT_SIZE(2, WASM_CALL_FUNCTION0(1));
+ EXPECT_SIZE(2, WASM_CALL_FUNCTION0(11));
- EXPECT_SIZE(5, WASM_CALL_FUNCTION1(0, WASM_ZERO));
- EXPECT_SIZE(7, WASM_CALL_FUNCTION2(1, WASM_ZERO, WASM_ZERO));
-}
-
-TEST_F(WasmMacroGenTest, CallImport) {
- EXPECT_SIZE(3, WASM_CALL_IMPORT0(0));
- EXPECT_SIZE(3, WASM_CALL_IMPORT0(1));
- EXPECT_SIZE(3, WASM_CALL_IMPORT0(11));
-
- EXPECT_SIZE(5, WASM_CALL_IMPORT1(0, WASM_ZERO));
- EXPECT_SIZE(7, WASM_CALL_IMPORT2(1, WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(4, WASM_CALL_FUNCTION(0, WASM_ZERO));
+ EXPECT_SIZE(6, WASM_CALL_FUNCTION(1, WASM_ZERO, WASM_ZERO));
}
TEST_F(WasmMacroGenTest, CallIndirect) {
- EXPECT_SIZE(5, WASM_CALL_INDIRECT0(0, WASM_ZERO));
- EXPECT_SIZE(5, WASM_CALL_INDIRECT0(1, WASM_ZERO));
- EXPECT_SIZE(5, WASM_CALL_INDIRECT0(11, WASM_ZERO));
+ EXPECT_SIZE(4, WASM_CALL_INDIRECT0(0, WASM_ZERO));
+ EXPECT_SIZE(4, WASM_CALL_INDIRECT0(1, WASM_ZERO));
+ EXPECT_SIZE(4, WASM_CALL_INDIRECT0(11, WASM_ZERO));
- EXPECT_SIZE(7, WASM_CALL_INDIRECT1(0, WASM_ZERO, WASM_ZERO));
- EXPECT_SIZE(9, WASM_CALL_INDIRECT2(1, WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(6, WASM_CALL_INDIRECT1(0, WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(8, WASM_CALL_INDIRECT2(1, WASM_ZERO, WASM_ZERO, WASM_ZERO));
}
TEST_F(WasmMacroGenTest, Int32Ops) {
diff --git a/deps/v8/test/unittests/wasm/encoder-unittest.cc b/deps/v8/test/unittests/wasm/wasm-module-builder-unittest.cc
index 47885e697d..50049d557e 100644
--- a/deps/v8/test/unittests/wasm/encoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-module-builder-unittest.cc
@@ -7,15 +7,15 @@
#include "src/v8.h"
#include "src/wasm/ast-decoder.h"
-#include "src/wasm/encoder.h"
+#include "src/wasm/wasm-module-builder.h"
-#include "test/cctest/wasm/test-signatures.h"
+#include "test/common/wasm/test-signatures.h"
namespace v8 {
namespace internal {
namespace wasm {
-class EncoderTest : public TestWithZone {
+class WasmModuleBuilderTest : public TestWithZone {
protected:
void AddLocal(WasmFunctionBuilder* f, LocalType type) {
uint16_t index = f->AddLocal(type);
@@ -23,6 +23,14 @@ class EncoderTest : public TestWithZone {
}
};
+TEST_F(WasmModuleBuilderTest, Regression_647329) {
+ // Test crashed with asan.
+ ZoneBuffer buffer(zone());
+ const size_t kSize = ZoneBuffer::kInitialSize * 3 + 4096 + 100;
+ byte data[kSize];
+ buffer.write(data, kSize);
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/webkit/fast/js/stack-overflow-arrity-catch-expected.txt b/deps/v8/test/webkit/fast/js/stack-overflow-arrity-catch-expected.txt
deleted file mode 100644
index 80df97e671..0000000000
--- a/deps/v8/test/webkit/fast/js/stack-overflow-arrity-catch-expected.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
-# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
-# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-Test that if an arrity check causes a stack overflow, the exception goes to the right catch
-
-On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
-
-
-PASS gotRightCatch is true
-PASS successfullyParsed is true
-
-TEST COMPLETE
-
diff --git a/deps/v8/test/webkit/fast/regex/lastIndex-expected.txt b/deps/v8/test/webkit/fast/regex/lastIndex-expected.txt
index 1e0959c3f9..acceee2dd6 100644
--- a/deps/v8/test/webkit/fast/regex/lastIndex-expected.txt
+++ b/deps/v8/test/webkit/fast/regex/lastIndex-expected.txt
@@ -44,8 +44,8 @@ PASS Object.defineProperty(Object.defineProperty(/x/, 'lastIndex', {writable:fal
PASS Object.defineProperty(Object.defineProperty(/x/, 'lastIndex', {writable:false}), 'lastIndex', {value:0}); true is true
PASS Object.defineProperty(/x/, 'lastIndex', {writable:false}).exec('') is null
PASS Object.defineProperty(/x/, 'lastIndex', {writable:false}).exec('x') is ["x"]
-FAIL Object.defineProperty(/x/g, 'lastIndex', {writable:false}).exec('') should throw an exception. Was null.
-FAIL Object.defineProperty(/x/g, 'lastIndex', {writable:false}).exec('x') should throw an exception. Was x.
+PASS Object.defineProperty(/x/g, 'lastIndex', {writable:false}).exec('') threw exception TypeError: Cannot assign to read only property 'lastIndex' of object '[object RegExp]'.
+PASS Object.defineProperty(/x/g, 'lastIndex', {writable:false}).exec('x') threw exception TypeError: Cannot assign to read only property 'lastIndex' of object '[object RegExp]'.
PASS var re = /x/; Object.freeze(re); Object.isFrozen(re); is true
PASS successfullyParsed is true
diff --git a/deps/v8/test/webkit/webkit.status b/deps/v8/test/webkit/webkit.status
index 9e336a2f97..0437a858c6 100644
--- a/deps/v8/test/webkit/webkit.status
+++ b/deps/v8/test/webkit/webkit.status
@@ -130,4 +130,9 @@
}], # 'gcov_coverage'
##############################################################################
+['variant == asm_wasm', {
+ '*': [SKIP],
+}], # variant == asm_wasm
+
+##############################################################################
]
diff --git a/deps/v8/tools/callstats.html b/deps/v8/tools/callstats.html
index 76cc8c686d..cb2e0bea3e 100644
--- a/deps/v8/tools/callstats.html
+++ b/deps/v8/tools/callstats.html
@@ -247,8 +247,12 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
var selectedPage;
var baselineVersion;
var selectedEntry;
+
+ // Marker to programatically replace the defaultData.
+ var defaultData = /*default-data-start*/undefined/*default-data-end*/;
function initialize() {
+ // Initialize the stats table and toggle lists.
var original = $("column");
var view = document.createElement('div');
view.id = 'view';
@@ -303,6 +307,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
});
initializeToggleList(versions.versions, $('versionSelector'));
initializeToggleList(pages.values(), $('pageSelector'));
+ initializeToggleList(Group.groups.values(), $('groupSelector'));
initializeToggleContentVisibility();
}
@@ -317,7 +322,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
checkbox.type = 'checkbox';
checkbox.checked = item.enabled;
checkbox.item = item;
- checkbox.addEventListener('click', handleToggleVersionEnable);
+ checkbox.addEventListener('click', handleToggleVersionOrPageEnable);
li.appendChild(checkbox);
li.appendChild(document.createTextNode(item.name));
list.appendChild(li);
@@ -360,9 +365,8 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
});
if (changeSelectedEntry) {
showEntryDetail(selectedPage.getEntry(selectedEntry));
- } else {
- showImpactList(selectedPage);
}
+ showImpactList(selectedPage);
}
function showPageInColumn(page, columnIndex) {
@@ -536,7 +540,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
table = $('detailView').querySelector('.versionDetailTable');
tbody = document.createElement('tbody');
if (entry !== undefined) {
- $('detailView').querySelector('.versionDetail h3 span').innerHTML =
+ $('detailView').querySelector('.versionDetail h3 span').textContent =
entry.name + ' in ' + entry.page.name;
entries = versions.getPageVersions(entry.page).map(
(page) => {
@@ -571,7 +575,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
var version = entry.page.version;
var showDiff = version !== baselineVersion;
- $('detailView').querySelector('.pageDetail h3 span').innerHTML =
+ $('detailView').querySelector('.pageDetail h3 span').textContent =
version.name;
entries = version.pages.map((page) => {
if (!page.enabled) return;
@@ -597,24 +601,24 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
});
// show the total for all pages
var tds = table.querySelectorAll('tfoot td');
- tds[1].innerHTML = ms(entry.getTimeImpact(), showDiff);
+ tds[1].textContent = ms(entry.getTimeImpact(), showDiff);
// Only show the percentage total if we are in diff mode:
- tds[2].innerHTML = percent(entry.getTimePercentImpact(), showDiff);
- tds[3].innerHTML = '';
- tds[4].innerHTML = count(entry.getCountImpact(), showDiff);
+ tds[2].textContent = percent(entry.getTimePercentImpact(), showDiff);
+ tds[3].textContent = '';
+ tds[4].textContent = count(entry.getCountImpact(), showDiff);
table.replaceChild(tbody, table.querySelector('tbody'));
}
function showImpactList(page) {
var impactView = $('detailView').querySelector('.impactView');
- impactView.querySelector('h3 span').innerHTML = page.version.name;
+ impactView.querySelector('h3 span').textContent = page.version.name;
var table = impactView.querySelector('table');
var tbody = document.createElement('tbody');
var version = page.version;
var entries = version.allEntries();
if (selectedEntry !== undefined && selectedEntry.isGroup) {
- impactView.querySelector('h3 span').innerHTML += " " + selectedEntry.name;
+ impactView.querySelector('h3 span').textContent += " " + selectedEntry.name;
entries = entries.filter((entry) => {
return entry.name == selectedEntry.name ||
(entry.parent && entry.parent.name == selectedEntry.name)
@@ -662,7 +666,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
if (selectedGroup == undefined) {
selectedGroup = groups[0];
} else {
- groups = groups.filter(each => each.name != selectedGroup.name);
+ groups = groups.filter(each => each.enabled && each.name != selectedGroup.name);
groups.unshift(selectedGroup);
}
showPageGraph(groups, page);
@@ -806,7 +810,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
colors: groups.map(each => each.color)
};
var parentNode = $(id);
- parentNode.querySelector('h2>span, h3>span').innerHTML = title;
+ parentNode.querySelector('h2>span, h3>span').textContent = title;
var graphNode = parentNode.querySelector('.content');
var chart = graphNode.chart;
@@ -856,8 +860,8 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
function showPopover(entry) {
var popover = $('popover');
- popover.querySelector('td.name').innerHTML = entry.name;
- popover.querySelector('td.page').innerHTML = entry.page.name;
+ popover.querySelector('td.name').textContent = entry.name;
+ popover.querySelector('td.page').textContent = entry.page.name;
setPopoverDetail(popover, entry, '');
popover.querySelector('table').className = "";
if (baselineVersion !== undefined) {
@@ -870,32 +874,32 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
function setPopoverDetail(popover, entry, prefix) {
var node = (name) => popover.querySelector(prefix + name);
if (entry == undefined) {
- node('.version').innerHTML = baselineVersion.name;
- node('.time').innerHTML = '-';
- node('.timeVariance').innerHTML = '-';
- node('.percent').innerHTML = '-';
- node('.percentPerEntry').innerHTML = '-';
- node('.percentVariance').innerHTML = '-';
- node('.count').innerHTML = '-';
- node('.countVariance').innerHTML = '-';
- node('.timeImpact').innerHTML = '-';
- node('.timePercentImpact').innerHTML = '-';
+ node('.version').textContent = baselineVersion.name;
+ node('.time').textContent = '-';
+ node('.timeVariance').textContent = '-';
+ node('.percent').textContent = '-';
+ node('.percentPerEntry').textContent = '-';
+ node('.percentVariance').textContent = '-';
+ node('.count').textContent = '-';
+ node('.countVariance').textContent = '-';
+ node('.timeImpact').textContent = '-';
+ node('.timePercentImpact').textContent = '-';
} else {
- node('.version').innerHTML = entry.page.version.name;
- node('.time').innerHTML = ms(entry._time, false);
- node('.timeVariance').innerHTML
+ node('.version').textContent = entry.page.version.name;
+ node('.time').textContent = ms(entry._time, false);
+ node('.timeVariance').textContent
= percent(entry.timeVariancePercent, false);
- node('.percent').innerHTML = percent(entry.timePercent, false);
- node('.percentPerEntry').innerHTML
+ node('.percent').textContent = percent(entry.timePercent, false);
+ node('.percentPerEntry').textContent
= percent(entry.timePercentPerEntry, false);
- node('.percentVariance').innerHTML
+ node('.percentVariance').textContent
= percent(entry.timePercentVariancePercent, false);
- node('.count').innerHTML = count(entry._count, false);
- node('.countVariance').innerHTML
+ node('.count').textContent = count(entry._count, false);
+ node('.countVariance').textContent
= percent(entry.timeVariancePercent, false);
- node('.timeImpact').innerHTML
+ node('.timeImpact').textContent
= ms(entry.getTimeImpact(false), false);
- node('.timePercentImpact').innerHTML
+ node('.timePercentImpact').textContent
= percent(entry.getTimeImpactVariancePercent(false), false);
}
}
@@ -927,7 +931,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
function addCodeSearchButton(entry, node) {
if (entry.isGroup) return;
var button = document.createElement("div");
- button.innerHTML = '?'
+ button.textContent = '?'
button.className = "codeSearch"
button.addEventListener('click', handleCodeSearch);
node.appendChild(button);
@@ -936,7 +940,11 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
function td(tr, content, className) {
var td = document.createElement("td");
- td.innerHTML = content;
+ if (content[0] == '<') {
+ td.innerHTML = content;
+ } else {
+ td.textContent = content;
+ }
td.className = className
tr.appendChild(td);
return td
@@ -1002,7 +1010,25 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
// =========================================================================
// EventHandlers
function handleBodyLoad() {
- $('uploadInput').focus();
+ $('uploadInput').focus();
+ if (defaultData) {
+ handleLoadJSON(defaultData);
+ } else if (window.location.protocol !== 'file:') {
+ tryLoadDefaultResults();
+ }
+ }
+
+ function tryLoadDefaultResults() {
+ // Try to load a results.json file adjacent to this day.
+ var xhr = new XMLHttpRequest();
+ // The markers on the following line can be used to replace the url easily
+ // with scripts.
+ xhr.open('GET', /*results-url-start*/'results.json'/*results-url-end*/, true);
+ xhr.onreadystatechange = function(e) {
+ if(this.readyState !== XMLHttpRequest.DONE || this.status !== 200) return;
+ handleLoadText(this.responseText);
+ };
+ xhr.send();
}
function handleLoadFile() {
@@ -1011,14 +1037,23 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
var reader = new FileReader();
reader.onload = function(evt) {
- pages = new Pages();
- versions = Versions.fromJSON(JSON.parse(this.result));
- initialize()
- showPage(versions.versions[0].pages[0]);
+ handleLoadText(this.result);
}
reader.readAsText(file);
}
+ function handleLoadText(text) {
+ handleLoadJSON(JSON.parse(text));
+ }
+
+ function handleLoadJSON(json) {
+ pages = new Pages();
+ versions = Versions.fromJSON(json);
+ initialize()
+ showPage(versions.versions[0].pages[0]);
+ selectEntry(selectedPage.total);
+ }
+
function handleToggleGroup(event) {
var group = event.target.parentNode.parentNode.entry;
toggleGroup(selectedPage.get(group.name));
@@ -1097,7 +1132,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
showPopover(entry);
}
- function handleToggleVersionEnable(event) {
+ function handleToggleVersionOrPageEnable(event) {
var item = this.item ;
if (item === undefined) return;
item .enabled = this.checked;
@@ -1106,6 +1141,9 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
if (page === undefined || !page.version.enabled) {
page = versions.getEnabledPage(page.name);
}
+ if (!page.enabled) {
+ page = page.getNextPage();
+ }
showPage(page);
}
@@ -1190,13 +1228,17 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
return -1;
}
+ getNextPage(page) {
+ if (this.length == 0) return undefined;
+ return this.pages[(this.indexOf(page.name) + 1) % this.length];
+ }
get(name) {
var index = this.indexOf(name);
if (0 <= index) return this.pages[index];
return undefined
}
get length() {
- return this.versions.length
+ return this.pages.length
}
getEntry(entry) {
if (entry === undefined) return undefined;
@@ -1315,21 +1357,20 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
constructor(version, page) {
this.page = page;
this.page.add(this);
- this.total = new GroupedEntry('Total', /.*Total.*/, '#BBB');
+ this.total = Group.groups.get('total').entry();
this.total.isTotal = true;
- this.unclassified = new UnclassifiedEntry(this, "#000")
+ this.unclassified = new UnclassifiedEntry(this)
this.groups = [
this.total,
- new GroupedEntry('IC', /.*IC.*/, "#3366CC"),
- new GroupedEntry('Optimize',
- /StackGuard|.*Optimize.*|.*Deoptimize.*|Recompile.*/, "#DC3912"),
- new GroupedEntry('Compile', /.*Compile.*/, "#FFAA00"),
- new GroupedEntry('Parse', /.*Parse.*/, "#FF6600"),
- new GroupedEntry('Callback', /.*Callback$/, "#109618"),
- new GroupedEntry('API', /.*API.*/, "#990099"),
- new GroupedEntry('GC', /GC|AllocateInTargetSpace/, "#0099C6"),
- new GroupedEntry('JavaScript', /JS_Execution/, "#DD4477"),
- new GroupedEntry('Runtime', /.*/, "#88BB00"),
+ Group.groups.get('ic').entry(),
+ Group.groups.get('optimize').entry(),
+ Group.groups.get('compile').entry(),
+ Group.groups.get('parse').entry(),
+ Group.groups.get('callback').entry(),
+ Group.groups.get('api').entry(),
+ Group.groups.get('gc').entry(),
+ Group.groups.get('javascript').entry(),
+ Group.groups.get('runtime').entry(),
this.unclassified
];
this.entryDict = new Map();
@@ -1400,6 +1441,9 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
});
return sum;
}
+ getNextPage() {
+ return this.version.getNextPage(this);
+ }
}
PageVersion.fromJSON = function(version, name, data) {
var page = new PageVersion(version, pages.get(name));
@@ -1496,16 +1540,43 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
return new Entry(position, ...data);
}
-
- class GroupedEntry extends Entry {
+ class Group {
constructor(name, regexp, color) {
- super(0, 'Group-' + name, 0, 0, 0, 0, 0, 0);
+ this.name = name;
this.regexp = regexp;
this.color = color;
+ this.enabled = true;
+ }
+ entry() { return new GroupedEntry(this) };
+ }
+ Group.groups = new Map();
+ Group.add = function(name, group) {
+ this.groups.set(name, group);
+ }
+ Group.add('total', new Group('Total', /.*Total.*/, '#BBB'));
+ Group.add('ic', new Group('IC', /.*IC.*/, "#3366CC"));
+ Group.add('optimize', new Group('Optimize',
+ /StackGuard|.*Optimize.*|.*Deoptimize.*|Recompile.*/, "#DC3912"));
+ Group.add('compile', new Group('Compile', /.*Compile.*/, "#FFAA00"));
+ Group.add('parse', new Group('Parse', /.*Parse.*/, "#FF6600"));
+ Group.add('callback', new Group('Callback', /.*Callback$/, "#109618"));
+ Group.add('api', new Group('API', /.*API.*/, "#990099"));
+ Group.add('gc', new Group('GC', /GC|AllocateInTargetSpace/, "#0099C6"));
+ Group.add('javascript', new Group('JavaScript', /JS_Execution/, "#DD4477"));
+ Group.add('runtime', new Group('Runtime', /.*/, "#88BB00"));
+ Group.add('unclassified', new Group('Unclassified', /.*/, "#000"));
+
+ class GroupedEntry extends Entry {
+ constructor(group) {
+ super(0, 'Group-' + group.name, 0, 0, 0, 0, 0, 0);
+ this.group = group;
this.entries = [];
}
+ get regexp() { return this.group.regexp }
+ get color() { return this.group.color }
+ get enabled() { return this.group.enabled }
add(entry) {
- if (!entry.name.match(this.regexp)) return false;
+ if (!this.regexp.test(entry.name)) return false;
this._time += entry.time;
this._count += entry.count;
// TODO: sum up variance
@@ -1567,8 +1638,8 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
class UnclassifiedEntry extends GroupedEntry {
- constructor(page, color) {
- super('Unclassified', undefined, color);
+ constructor(page) {
+ super(Group.groups.get('unclassified'));
this.page = page;
this._time = undefined;
this._count = undefined;
@@ -1630,14 +1701,21 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
</div>
<div id="versionSelector" class="inline toggleContentVisibility">
- <h2>Version Selector</h2>
+ <h2>Versions</h2>
<div class="content hidden">
<ul></ul>
</div>
</div>
<div id="pageSelector" class="inline toggleContentVisibility">
- <h2>Page Selector</h2>
+ <h2>Pages</h2>
+ <div class="content hidden">
+ <ul></ul>
+ </div>
+ </div>
+
+ <div id="groupSelector" class="inline toggleContentVisibility">
+ <h2>Groups</h2>
<div class="content hidden">
<ul></ul>
</div>
diff --git a/deps/v8/tools/callstats.py b/deps/v8/tools/callstats.py
index 6339392733..262f9a6ade 100755
--- a/deps/v8/tools/callstats.py
+++ b/deps/v8/tools/callstats.py
@@ -46,7 +46,7 @@ def print_command(cmd_args):
print " ".join(map(fix_for_printing, cmd_args))
-def start_replay_server(args, sites):
+def start_replay_server(args, sites, discard_output=True):
with tempfile.NamedTemporaryFile(prefix='callstats-inject-', suffix='.js',
mode='wt', delete=False) as f:
injection = f.name
@@ -65,8 +65,11 @@ def start_replay_server(args, sites):
]
print "=" * 80
print_command(cmd_args)
- with open(os.devnull, 'w') as null:
- server = subprocess.Popen(cmd_args, stdout=null, stderr=null)
+ if discard_output:
+ with open(os.devnull, 'w') as null:
+ server = subprocess.Popen(cmd_args, stdout=null, stderr=null)
+ else:
+ server = subprocess.Popen(cmd_args)
print "RUNNING REPLAY SERVER: %s with PID=%s" % (args.replay_bin, server.pid)
print "=" * 80
return {'process': server, 'injection': injection}
@@ -123,6 +126,31 @@ def generate_injection(f, sites, refreshes=0):
onLoad(window.location.href);
})();"""
+def get_chrome_flags(js_flags, user_data_dir):
+ return [
+ "--no-default-browser-check",
+ "--no-sandbox",
+ "--disable-translate",
+ "--enable-benchmarking",
+ "--js-flags={}".format(js_flags),
+ "--no-first-run",
+ "--user-data-dir={}".format(user_data_dir),
+ ]
+
+def get_chrome_replay_flags(args):
+ http_port = 4080 + args.port_offset
+ https_port = 4443 + args.port_offset
+ return [
+ "--host-resolver-rules=MAP *:80 localhost:%s, " \
+ "MAP *:443 localhost:%s, " \
+ "EXCLUDE localhost" % (
+ http_port, https_port),
+ "--ignore-certificate-errors",
+ "--disable-seccomp-sandbox",
+ "--disable-web-security",
+ "--reduce-security-for-testing",
+ "--allow-insecure-localhost",
+ ]
def run_site(site, domain, args, timeout=None):
print "="*80
@@ -149,32 +177,11 @@ def run_site(site, domain, args, timeout=None):
js_flags = "--runtime-call-stats"
if args.replay_wpr: js_flags += " --allow-natives-syntax"
if args.js_flags: js_flags += " " + args.js_flags
- chrome_flags = [
- "--no-default-browser-check",
- "--no-sandbox",
- "--disable-translate",
- "--js-flags={}".format(js_flags),
- "--no-first-run",
- "--user-data-dir={}".format(user_data_dir),
- ]
+ chrome_flags = get_chrome_flags(js_flags, user_data_dir)
if args.replay_wpr:
- http_port = 4080 + args.port_offset
- https_port = 4443 + args.port_offset
- chrome_flags += [
- "--host-resolver-rules=MAP *:80 localhost:%s, " \
- "MAP *:443 localhost:%s, " \
- "EXCLUDE localhost" % (
- http_port, https_port),
- "--ignore-certificate-errors",
- "--disable-seccomp-sandbox",
- "--disable-web-security",
- "--reduce-security-for-testing",
- "--allow-insecure-localhost",
- ]
+ chrome_flags += get_chrome_replay_flags(args)
else:
- chrome_flags += [
- "--single-process",
- ]
+ chrome_flags += [ "--single-process", ]
if args.chrome_flags:
chrome_flags += args.chrome_flags.split()
cmd_args = [
@@ -234,12 +241,15 @@ def read_sites_file(args):
sys.exit(1)
-def do_run(args):
+def read_sites(args):
# Determine the websites to benchmark.
if args.sites_file:
- sites = read_sites_file(args)
- else:
- sites = [{'url': site, 'timeout': args.timeout} for site in args.sites]
+ return read_sites_file(args)
+ return [{'url': site, 'timeout': args.timeout} for site in args.sites]
+
+def do_run(args):
+ sites = read_sites(args)
+ replay_server = start_replay_server(args, sites) if args.replay_wpr else None
# Disambiguate domains, if needed.
L = []
domains = {}
@@ -266,18 +276,37 @@ def do_run(args):
domains[domain] += 1
entry[2] = domains[domain]
L.append(entry)
- replay_server = start_replay_server(args, sites) if args.replay_wpr else None
try:
# Run them.
for site, domain, count, timeout in L:
if count is not None: domain = "{}%{}".format(domain, count)
- print site, domain, timeout
+ print(site, domain, timeout)
run_site(site, domain, args, timeout)
finally:
if replay_server:
stop_replay_server(replay_server)
+def do_run_replay_server(args):
+ sites = read_sites(args)
+ print("- " * 40)
+ print("Available URLs:")
+ for site in sites:
+ print(" "+site['url'])
+ print("- " * 40)
+ print("Launch chromium with the following commands for debugging:")
+ flags = get_chrome_flags("'--runtime-call-stats --allow-natives-syntax'",
+ "/var/tmp/`date +%s`")
+ flags += get_chrome_replay_flags(args)
+ print(" $CHROMIUM_DIR/out/Release/chomium " + (" ".join(flags)) + " <URL>")
+ print("- " * 40)
+ replay_server = start_replay_server(args, sites, discard_output=False)
+ try:
+ replay_server['process'].wait()
+ finally:
+ stop_replay_server(replay_server)
+
+
# Calculate statistics.
def statistics(data):
@@ -355,8 +384,15 @@ def read_stats(path, domain, args):
entries[group_name]['time'] += time
entries[group_name]['count'] += count
break
+ # Calculate the V8-Total (all groups except Callback)
+ total_v8 = { 'time': 0, 'count': 0 }
+ for group_name, regexp in groups:
+ if group_name == 'Group-Callback': continue
+ total_v8['time'] += entries[group_name]['time']
+ total_v8['count'] += entries[group_name]['count']
+ entries['Group-Total-V8'] = total_v8
# Append the sums as single entries to domain.
- for key in entries :
+ for key in entries:
if key not in domain: domain[key] = { 'time_list': [], 'count_list': [] }
domain[key]['time_list'].append(entries[key]['time'])
domain[key]['count_list'].append(entries[key]['count'])
@@ -527,7 +563,7 @@ def main():
subparsers = {}
# Command: run.
subparsers["run"] = subparser_adder.add_parser(
- "run", help="run --help")
+ "run", help="Replay websites and collect runtime stats data.")
subparsers["run"].set_defaults(
func=do_run, error=subparsers["run"].error)
subparsers["run"].add_argument(
@@ -537,37 +573,6 @@ def main():
"--js-flags", type=str, default="",
help="specify additional V8 flags")
subparsers["run"].add_argument(
- "--domain", type=str, default="",
- help="specify the output file domain name")
- subparsers["run"].add_argument(
- "--no-url", dest="print_url", action="store_false", default=True,
- help="do not include url in statistics file")
- subparsers["run"].add_argument(
- "-n", "--repeat", type=int, metavar="<num>",
- help="specify iterations for each website (default: once)")
- subparsers["run"].add_argument(
- "-k", "--refresh", type=int, metavar="<num>", default=0,
- help="specify refreshes for each iteration (default: 0)")
- subparsers["run"].add_argument(
- "--replay-wpr", type=str, metavar="<path>",
- help="use the specified web page replay (.wpr) archive")
- subparsers["run"].add_argument(
- "--replay-bin", type=str, metavar="<path>",
- help="specify the replay.py script typically located in " \
- "$CHROMIUM/src/third_party/webpagereplay/replay.py")
- subparsers["run"].add_argument(
- "-r", "--retries", type=int, metavar="<num>",
- help="specify retries if website is down (default: forever)")
- subparsers["run"].add_argument(
- "-f", "--sites-file", type=str, metavar="<path>",
- help="specify file containing benchmark websites")
- subparsers["run"].add_argument(
- "-t", "--timeout", type=int, metavar="<seconds>", default=60,
- help="specify seconds before chrome is killed")
- subparsers["run"].add_argument(
- "-p", "--port-offset", type=int, metavar="<offset>", default=0,
- help="specify the offset for the replay server's default ports")
- subparsers["run"].add_argument(
"-u", "--user-data-dir", type=str, metavar="<path>",
help="specify user data dir (default is temporary)")
subparsers["run"].add_argument(
@@ -575,14 +580,56 @@ def main():
default="/usr/bin/google-chrome",
help="specify chrome executable to use")
subparsers["run"].add_argument(
- "-l", "--log-stderr", type=str, metavar="<path>",
- help="specify where chrome's stderr should go (default: /dev/null)")
+ "-r", "--retries", type=int, metavar="<num>",
+ help="specify retries if website is down (default: forever)")
subparsers["run"].add_argument(
- "sites", type=str, metavar="<URL>", nargs="*",
- help="specify benchmark website")
+ "--no-url", dest="print_url", action="store_false", default=True,
+ help="do not include url in statistics file")
+ subparsers["run"].add_argument(
+ "--domain", type=str, default="",
+ help="specify the output file domain name")
+ subparsers["run"].add_argument(
+ "-n", "--repeat", type=int, metavar="<num>",
+ help="specify iterations for each website (default: once)")
+
+ def add_replay_args(subparser):
+ subparser.add_argument(
+ "-k", "--refresh", type=int, metavar="<num>", default=0,
+ help="specify refreshes for each iteration (default: 0)")
+ subparser.add_argument(
+ "--replay-wpr", type=str, metavar="<path>",
+ help="use the specified web page replay (.wpr) archive")
+ subparser.add_argument(
+ "--replay-bin", type=str, metavar="<path>",
+ help="specify the replay.py script typically located in " \
+ "$CHROMIUM/src/third_party/webpagereplay/replay.py")
+ subparser.add_argument(
+ "-f", "--sites-file", type=str, metavar="<path>",
+ help="specify file containing benchmark websites")
+ subparser.add_argument(
+ "-t", "--timeout", type=int, metavar="<seconds>", default=60,
+ help="specify seconds before chrome is killed")
+ subparser.add_argument(
+ "-p", "--port-offset", type=int, metavar="<offset>", default=0,
+ help="specify the offset for the replay server's default ports")
+ subparser.add_argument(
+ "-l", "--log-stderr", type=str, metavar="<path>",
+ help="specify where chrome's stderr should go (default: /dev/null)")
+ subparser.add_argument(
+ "sites", type=str, metavar="<URL>", nargs="*",
+ help="specify benchmark website")
+ add_replay_args(subparsers["run"])
+
+ # Command: replay-server
+ subparsers["replay"] = subparser_adder.add_parser(
+ "replay", help="Run the replay server for debugging purposes")
+ subparsers["replay"].set_defaults(
+ func=do_run_replay_server, error=subparsers["replay"].error)
+ add_replay_args(subparsers["replay"])
+
# Command: stats.
subparsers["stats"] = subparser_adder.add_parser(
- "stats", help="stats --help")
+ "stats", help="Analize the results file create by the 'run' command.")
subparsers["stats"].set_defaults(
func=do_stats, error=subparsers["stats"].error)
subparsers["stats"].add_argument(
@@ -599,11 +646,13 @@ def main():
help="specify log files to parse")
subparsers["stats"].add_argument(
"--aggregate", dest="aggregate", action="store_true", default=False,
- help="Create aggregated entries. Adds Group-* entries at the toplevel. " +
+ help="Create aggregated entries. Adds Group-* entries at the toplevel. " \
"Additionally creates a Total page with all entries.")
+
# Command: json.
subparsers["json"] = subparser_adder.add_parser(
- "json", help="json --help")
+ "json", help="Collect results file created by the 'run' command into" \
+ "a single json file.")
subparsers["json"].set_defaults(
func=do_json, error=subparsers["json"].error)
subparsers["json"].add_argument(
@@ -611,8 +660,9 @@ def main():
help="specify directories with log files to parse")
subparsers["json"].add_argument(
"--aggregate", dest="aggregate", action="store_true", default=False,
- help="Create aggregated entries. Adds Group-* entries at the toplevel. " +
+ help="Create aggregated entries. Adds Group-* entries at the toplevel. " \
"Additionally creates a Total page with all entries.")
+
# Command: help.
subparsers["help"] = subparser_adder.add_parser(
"help", help="help information")
@@ -622,6 +672,7 @@ def main():
subparsers["help"].add_argument(
"help_cmd", type=str, metavar="<command>", nargs="?",
help="command for which to display help")
+
# Execute the command.
args = parser.parse_args()
setattr(args, 'script_path', os.path.dirname(sys.argv[0]))
diff --git a/deps/v8/tools/dev/v8gen.py b/deps/v8/tools/dev/v8gen.py
index a63a42705b..f0fb74b709 100755
--- a/deps/v8/tools/dev/v8gen.py
+++ b/deps/v8/tools/dev/v8gen.py
@@ -6,31 +6,35 @@
"""Script to generate V8's gn arguments based on common developer defaults
or builder configurations.
-Goma is used by default if a goma folder is detected. The compiler proxy is
-assumed to run.
+Goma is used by default if detected. The compiler proxy is assumed to run.
-This script can be added to the PATH and be used on other v8 checkouts than
-the including one. It always runs for the checkout that nests the CWD.
+This script can be added to the PATH and be used on other checkouts. It always
+runs for the checkout nesting the CWD.
Configurations of this script live in infra/mb/mb_config.pyl.
+Available actions are: {gen,list}. Omitting the action defaults to "gen".
+
-------------------------------------------------------------------------------
Examples:
-# Generate the x64.release config in out.gn/x64.release.
-v8gen.py x64.release
+# Generate the ia32.release config in out.gn/ia32.release.
+v8gen.py ia32.release
-# Generate into out.gn/foo and disable goma auto-detect.
-v8gen.py -b x64.release foo --no-goma
+# Generate into out.gn/foo without goma auto-detect.
+v8gen.py gen -b ia32.release foo --no-goma
# Pass additional gn arguments after -- (don't use spaces within gn args).
-v8gen.py x64.optdebug -- v8_enable_slow_dchecks=true
+v8gen.py ia32.optdebug -- v8_enable_slow_dchecks=true
# Generate gn arguments of 'V8 Linux64 - builder' from 'client.v8'. To switch
# off goma usage here, the args.gn file must be edited manually.
v8gen.py -m client.v8 -b 'V8 Linux64 - builder'
+# Show available configurations.
+v8gen.py list
+
-------------------------------------------------------------------------------
"""
@@ -40,9 +44,15 @@ import re
import subprocess
import sys
+CONFIG = os.path.join('infra', 'mb', 'mb_config.pyl')
GOMA_DEFAULT = os.path.join(os.path.expanduser("~"), 'goma')
OUT_DIR = 'out.gn'
+TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+sys.path.append(os.path.join(TOOLS_PATH, 'mb'))
+
+import mb
+
def _sanitize_nonalpha(text):
return re.sub(r'[^a-zA-Z0-9.]', '_', text)
@@ -57,30 +67,40 @@ class GenerateGnArgs(object):
self._gn_args = args[index + 1:]
def _parse_arguments(self, args):
- parser = argparse.ArgumentParser(
+ self.parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
- parser.add_argument(
+
+ def add_common_options(p):
+ p.add_argument(
+ '-m', '--master', default='developer_default',
+ help='config group or master from mb_config.pyl - default: '
+ 'developer_default')
+ p.add_argument(
+ '-v', '--verbosity', action='count',
+ help='print wrapped commands (use -vv to print output of wrapped '
+ 'commands)')
+
+ subps = self.parser.add_subparsers()
+
+ # Command: gen.
+ gen_cmd = subps.add_parser(
+ 'gen', help='generate a new set of build files (default)')
+ gen_cmd.set_defaults(func=self.cmd_gen)
+ add_common_options(gen_cmd)
+ gen_cmd.add_argument(
'outdir', nargs='?',
help='optional gn output directory')
- parser.add_argument(
+ gen_cmd.add_argument(
'-b', '--builder',
help='build configuration or builder name from mb_config.pyl, e.g. '
'x64.release')
- parser.add_argument(
- '-m', '--master', default='developer_default',
- help='config group or master from mb_config.pyl - default: '
- 'developer_default')
- parser.add_argument(
+ gen_cmd.add_argument(
'-p', '--pedantic', action='store_true',
help='run gn over command-line gn args to catch errors early')
- parser.add_argument(
- '-v', '--verbosity', action='count',
- help='print wrapped commands (use -vv to print output of wrapped '
- 'commands)')
- goma = parser.add_mutually_exclusive_group()
+ goma = gen_cmd.add_mutually_exclusive_group()
goma.add_argument(
'-g' , '--goma',
action='store_true', default=None, dest='goma',
@@ -91,27 +111,83 @@ class GenerateGnArgs(object):
help='don\'t use goma auto detection - goma might still be used if '
'specified as a gn arg')
- options = parser.parse_args(args)
+ # Command: list.
+ list_cmd = subps.add_parser(
+ 'list', help='list available configurations')
+ list_cmd.set_defaults(func=self.cmd_list)
+ add_common_options(list_cmd)
- if not options.outdir and not options.builder:
- parser.error('please specify either an output directory or '
- 'a builder/config name (-b), e.g. x64.release')
+ # Default to "gen" unless global help is requested.
+ if not args or args[0] not in subps.choices.keys() + ['-h', '--help']:
+ args = ['gen'] + args
- if not options.outdir:
+ return self.parser.parse_args(args)
+
+ def cmd_gen(self):
+ if not self._options.outdir and not self._options.builder:
+ self.parser.error('please specify either an output directory or '
+ 'a builder/config name (-b), e.g. x64.release')
+
+ if not self._options.outdir:
# Derive output directory from builder name.
- options.outdir = _sanitize_nonalpha(options.builder)
+ self._options.outdir = _sanitize_nonalpha(self._options.builder)
else:
# Also, if this should work on windows, we might need to use \ where
# outdir is used as path, while using / if it's used in a gn context.
- if options.outdir.startswith('/'):
- parser.error(
+ if self._options.outdir.startswith('/'):
+ self.parser.error(
'only output directories relative to %s are supported' % OUT_DIR)
- if not options.builder:
+ if not self._options.builder:
# Derive builder from output directory.
- options.builder = options.outdir
+ self._options.builder = self._options.outdir
+
+ # Check for builder/config in mb config.
+ if self._options.builder not in self._mbw.masters[self._options.master]:
+ print '%s does not exist in %s for %s' % (
+ self._options.builder, CONFIG, self._options.master)
+ return 1
- return options
+ # TODO(machenbach): Check if the requested configurations has switched to
+ # gn at all.
+
+ # The directories are separated with slashes in a gn context (platform
+ # independent).
+ gn_outdir = '/'.join([OUT_DIR, self._options.outdir])
+
+ # Call MB to generate the basic configuration.
+ self._call_cmd([
+ sys.executable,
+ '-u', os.path.join('tools', 'mb', 'mb.py'),
+ 'gen',
+ '-f', CONFIG,
+ '-m', self._options.master,
+ '-b', self._options.builder,
+ gn_outdir,
+ ])
+
+ # Handle extra gn arguments.
+ gn_args_path = os.path.join(OUT_DIR, self._options.outdir, 'args.gn')
+
+ # Append command-line args.
+ modified = self._append_gn_args(
+ 'command-line', gn_args_path, '\n'.join(self._gn_args))
+
+ # Append goma args.
+ # TODO(machenbach): We currently can't remove existing goma args from the
+ # original config. E.g. to build like a bot that uses goma, but switch
+ # goma off.
+ modified |= self._append_gn_args(
+ 'goma', gn_args_path, self._goma_args)
+
+ # Regenerate ninja files to check for errors in the additional gn args.
+ if modified and self._options.pedantic:
+ self._call_cmd(['gn', 'gen', gn_outdir])
+ return 0
+
+ def cmd_list(self):
+ print '\n'.join(sorted(self._mbw.masters[self._options.master]))
+ return 0
def verbose_print_1(self, text):
if self._options.verbosity >= 1:
@@ -189,6 +265,13 @@ class GenerateGnArgs(object):
f.write('\n# Additional %s args:\n' % type)
f.write(more_gn_args)
f.write('\n')
+
+ # Artificially increment modification time as our modifications happen too
+ # fast. This makes sure that gn is properly rebuilding the ninja files.
+ mtime = os.path.getmtime(gn_args_path) + 1
+ with open(gn_args_path, 'aw'):
+ os.utime(gn_args_path, (mtime, mtime))
+
return True
def main(self):
@@ -199,39 +282,21 @@ class GenerateGnArgs(object):
self.verbose_print_1('cd ' + workdir)
os.chdir(workdir)
- # The directories are separated with slashes in a gn context (platform
- # independent).
- gn_outdir = '/'.join([OUT_DIR, self._options.outdir])
-
- # Call MB to generate the basic configuration.
- self._call_cmd([
- sys.executable,
- '-u', os.path.join('tools', 'mb', 'mb.py'),
- 'gen',
- '-f', os.path.join('infra', 'mb', 'mb_config.pyl'),
- '-m', self._options.master,
- '-b', self._options.builder,
- gn_outdir,
- ])
+ # Initialize MB as a library.
+ self._mbw = mb.MetaBuildWrapper()
- # Handle extra gn arguments.
- gn_args_path = os.path.join(OUT_DIR, self._options.outdir, 'args.gn')
+ # TODO(machenbach): Factor out common methods independent of mb arguments.
+ self._mbw.ParseArgs(['lookup', '-f', CONFIG])
+ self._mbw.ReadConfigFile()
- # Append command-line args.
- modified = self._append_gn_args(
- 'command-line', gn_args_path, '\n'.join(self._gn_args))
+ if not self._options.master in self._mbw.masters:
+ print '%s not found in %s\n' % (self._options.master, CONFIG)
+ print 'Choose one of:\n%s\n' % (
+ '\n'.join(sorted(self._mbw.masters.keys())))
+ return 1
- # Append goma args.
- # TODO(machenbach): We currently can't remove existing goma args from the
- # original config. E.g. to build like a bot that uses goma, but switch
- # goma off.
- modified |= self._append_gn_args(
- 'goma', gn_args_path, self._goma_args)
+ return self._options.func()
- # Regenerate ninja files to check for errors in the additional gn args.
- if modified and self._options.pedantic:
- self._call_cmd(['gn', 'gen', gn_outdir])
- return 0
if __name__ == "__main__":
gen = GenerateGnArgs(sys.argv[1:])
diff --git a/deps/v8/tools/gcmole/gcmole.lua b/deps/v8/tools/gcmole/gcmole.lua
index bdbdf36a41..42cb2e370b 100644
--- a/deps/v8/tools/gcmole/gcmole.lua
+++ b/deps/v8/tools/gcmole/gcmole.lua
@@ -183,6 +183,7 @@ end
-------------------------------------------------------------------------------
-- GYP file parsing
+-- TODO(machenbach): Remove this when deprecating gyp.
local function ParseGYPFile()
local result = {}
local gyp_files = {
@@ -209,6 +210,32 @@ local function ParseGYPFile()
return result
end
+local function ParseGNFile()
+ local result = {}
+ local gn_files = {
+ { "BUILD.gn", '"([^"]-%.cc)"', "" },
+ { "test/cctest/BUILD.gn", '"(test-[^"]-%.cc)"', "test/cctest/" }
+ }
+
+ for i = 1, #gn_files do
+ local filename = gn_files[i][1]
+ local pattern = gn_files[i][2]
+ local prefix = gn_files[i][3]
+ local gn_file = assert(io.open(filename), "failed to open GN file")
+ local gn = gn_file:read('*a')
+ for condition, sources in
+ gn:gmatch "### gcmole%((.-)%) ###(.-)%]" do
+ if result[condition] == nil then result[condition] = {} end
+ for file in sources:gmatch(pattern) do
+ table.insert(result[condition], prefix .. file)
+ end
+ end
+ gn_file:close()
+ end
+
+ return result
+end
+
local function EvaluateCondition(cond, props)
if cond == 'all' then return true end
@@ -230,13 +257,40 @@ local function BuildFileList(sources, props)
return list
end
-local sources = ParseGYPFile()
+
+local gyp_sources = ParseGYPFile()
+local gn_sources = ParseGNFile()
+
+-- TODO(machenbach): Remove this comparison logic when deprecating gyp.
+local function CompareSources(sources1, sources2, what)
+ for condition, files1 in pairs(sources1) do
+ local files2 = sources2[condition]
+ assert(
+ files2 ~= nil,
+ "Missing gcmole condition in " .. what .. ": " .. condition)
+
+ -- Turn into set for speed.
+ files2_set = {}
+ for i, file in pairs(files2) do files2_set[file] = true end
+
+ for i, file in pairs(files1) do
+ assert(
+ files2_set[file] ~= nil,
+ "Missing file " .. file .. " in " .. what .. " for condition " ..
+ condition)
+ end
+ end
+end
+
+CompareSources(gyp_sources, gn_sources, "GN")
+CompareSources(gn_sources, gyp_sources, "GYP")
+
local function FilesForArch(arch)
- return BuildFileList(sources, { os = 'linux',
- arch = arch,
- mode = 'debug',
- simulator = ''})
+ return BuildFileList(gn_sources, { os = 'linux',
+ arch = arch,
+ mode = 'debug',
+ simulator = ''})
end
local mtConfig = {}
diff --git a/deps/v8/tools/gcmole/run-gcmole.isolate b/deps/v8/tools/gcmole/run-gcmole.isolate
index caa4f993fc..0fba2a12c1 100644
--- a/deps/v8/tools/gcmole/run-gcmole.isolate
+++ b/deps/v8/tools/gcmole/run-gcmole.isolate
@@ -12,6 +12,7 @@
'parallel.py',
'run-gcmole.py',
# The following contains all relevant source and gyp files.
+ '../../BUILD.gn',
'../../base/',
'../../include/',
'../../src/',
diff --git a/deps/v8/tools/gen-inlining-tests.py b/deps/v8/tools/gen-inlining-tests.py
new file mode 100644
index 0000000000..1a377e61ed
--- /dev/null
+++ b/deps/v8/tools/gen-inlining-tests.py
@@ -0,0 +1,566 @@
+#!/usr/bin/env python3
+
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+from collections import namedtuple
+import textwrap
+import sys
+
+SHARD_FILENAME_TEMPLATE = "test/mjsunit/compiler/inline-exception-{shard}.js"
+# Generates 2 files. Found by trial and error.
+SHARD_SIZE = 97
+
+PREAMBLE = """
+
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo --no-always-opt
+
+// This test file was generated by tools/gen-inlining-tests.py .
+
+// Global variables
+var deopt = undefined; // either true or false
+var counter = 0;
+
+function resetState() {
+ counter = 0;
+}
+
+function warmUp(f) {
+ try {
+ f();
+ } catch (ex) {
+ // ok
+ }
+ try {
+ f();
+ } catch (ex) {
+ // ok
+ }
+}
+
+function resetOptAndAssertResultEquals(expected, f) {
+ warmUp(f);
+ resetState();
+ // %DebugPrint(f);
+ eval("'dont optimize this function itself please, but do optimize f'");
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(expected, f());
+}
+
+function resetOptAndAssertThrowsWith(expected, f) {
+ warmUp(f);
+ resetState();
+ // %DebugPrint(f);
+ eval("'dont optimize this function itself please, but do optimize f'");
+ %OptimizeFunctionOnNextCall(f);
+ try {
+ var result = f();
+ fail("resetOptAndAssertThrowsWith",
+ "exception: " + expected,
+ "result: " + result);
+ } catch (ex) {
+ assertEquals(expected, ex);
+ }
+}
+
+function increaseAndReturn15() {
+ if (deopt) %DeoptimizeFunction(f);
+ counter++;
+ return 15;
+}
+
+function increaseAndThrow42() {
+ if (deopt) %DeoptimizeFunction(f);
+ counter++;
+ throw 42;
+}
+
+function increaseAndReturn15_noopt_inner() {
+ if (deopt) %DeoptimizeFunction(f);
+ counter++;
+ return 15;
+}
+
+%NeverOptimizeFunction(increaseAndReturn15_noopt_inner);
+
+function increaseAndThrow42_noopt_inner() {
+ if (deopt) %DeoptimizeFunction(f);
+ counter++;
+ throw 42;
+}
+
+%NeverOptimizeFunction(increaseAndThrow42_noopt_inner);
+
+// Alternative 1
+
+function returnOrThrow(doReturn) {
+ if (doReturn) {
+ return increaseAndReturn15();
+ } else {
+ return increaseAndThrow42();
+ }
+}
+
+// Alternative 2
+
+function increaseAndReturn15_calls_noopt() {
+ return increaseAndReturn15_noopt_inner();
+}
+
+function increaseAndThrow42_calls_noopt() {
+ return increaseAndThrow42_noopt_inner();
+}
+
+// Alternative 3.
+// When passed either {increaseAndReturn15} or {increaseAndThrow42}, it acts
+// as the other one.
+function invertFunctionCall(f) {
+ var result;
+ try {
+ result = f();
+ } catch (ex) {
+ return ex - 27;
+ }
+ throw result + 27;
+}
+
+// Alternative 4: constructor
+function increaseAndStore15Constructor() {
+ if (deopt) %DeoptimizeFunction(f);
+ ++counter;
+ this.x = 15;
+}
+
+function increaseAndThrow42Constructor() {
+ if (deopt) %DeoptimizeFunction(f);
+ ++counter;
+ this.x = 42;
+ throw this.x;
+}
+
+// Alternative 5: property
+var magic = {};
+Object.defineProperty(magic, 'prop', {
+ get: function () {
+ if (deopt) %DeoptimizeFunction(f);
+ return 15 + 0 * ++counter;
+ },
+
+ set: function(x) {
+ // argument should be 37
+ if (deopt) %DeoptimizeFunction(f);
+ counter -= 36 - x; // increments counter
+ throw 42;
+ }
+})
+
+// Generate type feedback.
+
+assertEquals(15, increaseAndReturn15_calls_noopt());
+assertThrowsEquals(function() { return increaseAndThrow42_noopt_inner() }, 42);
+
+assertEquals(15, (new increaseAndStore15Constructor()).x);
+assertThrowsEquals(function() {
+ return (new increaseAndThrow42Constructor()).x;
+ },
+ 42);
+
+function runThisShard() {
+
+""".strip()
+
+def booltuples(n):
+ """booltuples(2) yields 4 tuples: (False, False), (False, True),
+ (True, False), (True, True)."""
+
+ assert isinstance(n, int)
+ if n <= 0:
+ yield ()
+ else:
+ for initial in booltuples(n-1):
+ yield initial + (False,)
+ yield initial + (True,)
+
+def fnname(flags):
+ assert len(FLAGLETTERS) == len(flags)
+
+ return "f_" + ''.join(
+ FLAGLETTERS[i] if b else '_'
+ for (i, b) in enumerate(flags))
+
+NUM_TESTS_PRINTED = 0
+NUM_TESTS_IN_SHARD = 0
+
+def printtest(flags):
+ """Print a test case. Takes a couple of boolean flags, on which the
+ printed Javascript code depends."""
+
+ assert all(isinstance(flag, bool) for flag in flags)
+
+ # The alternative flags are in reverse order so that if we take all possible
+ # tuples, ordered lexicographically from false to true, we get first the
+ # default, then alternative 1, then 2, etc.
+ (
+ alternativeFn5, # use alternative #5 for returning/throwing:
+ # return/throw using property
+ alternativeFn4, # use alternative #4 for returning/throwing:
+ # return/throw using constructor
+ alternativeFn3, # use alternative #3 for returning/throwing:
+ # return/throw indirectly, based on function argument
+ alternativeFn2, # use alternative #2 for returning/throwing:
+ # return/throw indirectly in unoptimized code,
+ # no branching
+ alternativeFn1, # use alternative #1 for returning/throwing:
+ # return/throw indirectly, based on boolean arg
+ tryThrows, # in try block, call throwing function
+ tryReturns, # in try block, call returning function
+ tryFirstReturns, # in try block, returning goes before throwing
+ tryResultToLocal, # in try block, result goes to local variable
+ doCatch, # include catch block
+ catchReturns, # in catch block, return
+ catchWithLocal, # in catch block, modify or return the local variable
+ catchThrows, # in catch block, throw
+ doFinally, # include finally block
+ finallyReturns, # in finally block, return local variable
+ finallyThrows, # in finally block, throw
+ endReturnLocal, # at very end, return variable local
+ deopt, # deopt inside inlined function
+ ) = flags
+
+ # BASIC RULES
+
+ # Only one alternative can be applied at any time.
+ if (alternativeFn1 + alternativeFn2 + alternativeFn3 + alternativeFn4
+ + alternativeFn5 > 1):
+ return
+
+ # In try, return or throw, or both.
+ if not (tryReturns or tryThrows): return
+
+ # Either doCatch or doFinally.
+ if not doCatch and not doFinally: return
+
+ # Catch flags only make sense when catching
+ if not doCatch and (catchReturns or catchWithLocal or catchThrows):
+ return
+
+ # Finally flags only make sense when finallying
+ if not doFinally and (finallyReturns or finallyThrows):
+ return
+
+ # tryFirstReturns is only relevant when both tryReturns and tryThrows are
+ # true.
+ if tryFirstReturns and not (tryReturns and tryThrows): return
+
+ # From the try and finally block, we can return or throw, but not both.
+ if catchReturns and catchThrows: return
+ if finallyReturns and finallyThrows: return
+
+ # If at the end we return the local, we need to have touched it.
+ if endReturnLocal and not (tryResultToLocal or catchWithLocal): return
+
+ # PRUNING
+
+ anyAlternative = any([alternativeFn1, alternativeFn2, alternativeFn3,
+ alternativeFn4, alternativeFn5])
+ specificAlternative = any([alternativeFn2, alternativeFn3])
+ rareAlternative = not specificAlternative
+
+ # If try returns and throws, then don't catchWithLocal, endReturnLocal, or
+ # deopt, or do any alternative.
+ if (tryReturns and tryThrows and
+ (catchWithLocal or endReturnLocal or deopt or anyAlternative)):
+ return
+ # We don't do any alternative if we do a finally.
+ if doFinally and anyAlternative: return
+ # We only use the local variable if we do alternative #2 or #3.
+ if ((tryResultToLocal or catchWithLocal or endReturnLocal) and
+ not specificAlternative):
+ return
+ # We don't need to test deopting into a finally.
+ if doFinally and deopt: return
+
+ # We're only interested in alternative #2 if we have endReturnLocal, no
+ # catchReturns, and no catchThrows, and deopt.
+ if (alternativeFn2 and
+ (not endReturnLocal or catchReturns or catchThrows or not deopt)):
+ return
+
+
+ # Flag check succeeded.
+
+ trueFlagNames = [name for (name, value) in flags._asdict().items() if value]
+ flagsMsgLine = " // Variant flags: [{}]".format(', '.join(trueFlagNames))
+ write(textwrap.fill(flagsMsgLine, subsequent_indent=' // '))
+ write("")
+
+ if not anyAlternative:
+ fragments = {
+ 'increaseAndReturn15': 'increaseAndReturn15()',
+ 'increaseAndThrow42': 'increaseAndThrow42()',
+ }
+ elif alternativeFn1:
+ fragments = {
+ 'increaseAndReturn15': 'returnOrThrow(true)',
+ 'increaseAndThrow42': 'returnOrThrow(false)',
+ }
+ elif alternativeFn2:
+ fragments = {
+ 'increaseAndReturn15': 'increaseAndReturn15_calls_noopt()',
+ 'increaseAndThrow42': 'increaseAndThrow42_calls_noopt()',
+ }
+ elif alternativeFn3:
+ fragments = {
+ 'increaseAndReturn15': 'invertFunctionCall(increaseAndThrow42)',
+ 'increaseAndThrow42': 'invertFunctionCall(increaseAndReturn15)',
+ }
+ elif alternativeFn4:
+ fragments = {
+ 'increaseAndReturn15': '(new increaseAndStore15Constructor()).x',
+ 'increaseAndThrow42': '(new increaseAndThrow42Constructor()).x',
+ }
+ else:
+ assert alternativeFn5
+ fragments = {
+ 'increaseAndReturn15': 'magic.prop /* returns 15 */',
+ 'increaseAndThrow42': '(magic.prop = 37 /* throws 42 */)',
+ }
+
+ # As we print code, we also maintain what the result should be. Variable
+ # {result} can be one of three things:
+ #
+ # - None, indicating returning JS null
+ # - ("return", n) with n an integer
+ # - ("throw", n), with n an integer
+
+ result = None
+ # We also maintain what the counter should be at the end.
+ # The counter is reset just before f is called.
+ counter = 0
+
+ write( " f = function {} () {{".format(fnname(flags)))
+ write( " var local = 888;")
+ write( " deopt = {};".format("true" if deopt else "false"))
+ local = 888
+ write( " try {")
+ write( " counter++;")
+ counter += 1
+ resultTo = "local +=" if tryResultToLocal else "return"
+ if tryReturns and not (tryThrows and not tryFirstReturns):
+ write( " {} 4 + {increaseAndReturn15};".format(resultTo, **fragments))
+ if result == None:
+ counter += 1
+ if tryResultToLocal:
+ local += 19
+ else:
+ result = ("return", 19)
+ if tryThrows:
+ write( " {} 4 + {increaseAndThrow42};".format(resultTo, **fragments))
+ if result == None:
+ counter += 1
+ result = ("throw", 42)
+ if tryReturns and tryThrows and not tryFirstReturns:
+ write( " {} 4 + {increaseAndReturn15};".format(resultTo, **fragments))
+ if result == None:
+ counter += 1
+ if tryResultToLocal:
+ local += 19
+ else:
+ result = ("return", 19)
+ write( " counter++;")
+ if result == None:
+ counter += 1
+
+ if doCatch:
+ write( " } catch (ex) {")
+ write( " counter++;")
+ if isinstance(result, tuple) and result[0] == 'throw':
+ counter += 1
+ if catchThrows:
+ write(" throw 2 + ex;")
+ if isinstance(result, tuple) and result[0] == "throw":
+ result = ('throw', 2 + result[1])
+ elif catchReturns and catchWithLocal:
+ write(" return 2 + local;")
+ if isinstance(result, tuple) and result[0] == "throw":
+ result = ('return', 2 + local)
+ elif catchReturns and not catchWithLocal:
+ write(" return 2 + ex;");
+ if isinstance(result, tuple) and result[0] == "throw":
+ result = ('return', 2 + result[1])
+ elif catchWithLocal:
+ write(" local += ex;");
+ if isinstance(result, tuple) and result[0] == "throw":
+ local += result[1]
+ result = None
+ counter += 1
+ else:
+ if isinstance(result, tuple) and result[0] == "throw":
+ result = None
+ counter += 1
+ write( " counter++;")
+
+ if doFinally:
+ write( " } finally {")
+ write( " counter++;")
+ counter += 1
+ if finallyThrows:
+ write(" throw 25;")
+ result = ('throw', 25)
+ elif finallyReturns:
+ write(" return 3 + local;")
+ result = ('return', 3 + local)
+ elif not finallyReturns and not finallyThrows:
+ write(" local += 2;")
+ local += 2
+ counter += 1
+ else: assert False # unreachable
+ write( " counter++;")
+
+ write( " }")
+ write( " counter++;")
+ if result == None:
+ counter += 1
+ if endReturnLocal:
+ write( " return 5 + local;")
+ if result == None:
+ result = ('return', 5 + local)
+ write( " }")
+
+ if result == None:
+ write( " resetOptAndAssertResultEquals(undefined, f);")
+ else:
+ tag, value = result
+ if tag == "return":
+ write( " resetOptAndAssertResultEquals({}, f);".format(value))
+ else:
+ assert tag == "throw"
+ write( " resetOptAndAssertThrowsWith({}, f);".format(value))
+
+ write( " assertEquals({}, counter);".format(counter))
+ write( "")
+
+ global NUM_TESTS_PRINTED, NUM_TESTS_IN_SHARD
+ NUM_TESTS_PRINTED += 1
+ NUM_TESTS_IN_SHARD += 1
+
+FILE = None # to be initialised to an open file
+SHARD_NUM = 1
+
+def write(*args):
+ return print(*args, file=FILE)
+
+
+
+def rotateshard():
+ global FILE, NUM_TESTS_IN_SHARD, SHARD_SIZE
+ if MODE != 'shard':
+ return
+ if FILE != None and NUM_TESTS_IN_SHARD < SHARD_SIZE:
+ return
+ if FILE != None:
+ finishshard()
+ assert FILE == None
+ FILE = open(SHARD_FILENAME_TEMPLATE.format(shard=SHARD_NUM), 'w')
+ write_shard_header()
+ NUM_TESTS_IN_SHARD = 0
+
+def finishshard():
+ global FILE, SHARD_NUM, MODE
+ assert FILE
+ write_shard_footer()
+ if MODE == 'shard':
+ print("Wrote shard {}.".format(SHARD_NUM))
+ FILE.close()
+ FILE = None
+ SHARD_NUM += 1
+
+
+def write_shard_header():
+ if MODE == 'shard':
+ write("// Shard {}.".format(SHARD_NUM))
+ write("")
+ write(PREAMBLE)
+ write("")
+
+def write_shard_footer():
+ write("}")
+ write("%NeverOptimizeFunction(runThisShard);")
+ write("")
+ write("// {} tests in this shard.".format(NUM_TESTS_IN_SHARD))
+ write("// {} tests up to here.".format(NUM_TESTS_PRINTED))
+ write("")
+ write("runThisShard();")
+
+FLAGLETTERS="54321trflcrltfrtld"
+
+flagtuple = namedtuple('flagtuple', (
+ "alternativeFn5",
+ "alternativeFn4",
+ "alternativeFn3",
+ "alternativeFn2",
+ "alternativeFn1",
+ "tryThrows",
+ "tryReturns",
+ "tryFirstReturns",
+ "tryResultToLocal",
+ "doCatch",
+ "catchReturns",
+ "catchWithLocal",
+ "catchThrows",
+ "doFinally",
+ "finallyReturns",
+ "finallyThrows",
+ "endReturnLocal",
+ "deopt"
+ ))
+
+emptyflags = flagtuple(*((False,) * len(flagtuple._fields)))
+f1 = emptyflags._replace(tryReturns=True, doCatch=True)
+
+# You can test function printtest with f1.
+
+allFlagCombinations = [
+ flagtuple(*bools)
+ for bools in booltuples(len(flagtuple._fields))
+]
+
+if __name__ == '__main__':
+ global MODE
+ if sys.argv[1:] == []:
+ MODE = 'stdout'
+ print("// Printing all shards together to stdout.")
+ print("")
+ write_shard_header()
+ FILE = sys.stdout
+ elif sys.argv[1:] == ['--shard-and-overwrite']:
+ MODE = 'shard'
+ else:
+ print("Usage:")
+ print("")
+ print(" python {}".format(sys.argv[0]))
+ print(" print all tests to standard output")
+ print(" python {} --shard-and-overwrite".format(sys.argv[0]))
+ print(" print all tests to {}".format(SHARD_FILENAME_TEMPLATE))
+
+ print("")
+ print(sys.argv[1:])
+ print("")
+ sys.exit(1)
+
+ rotateshard()
+
+ for flags in allFlagCombinations:
+ printtest(flags)
+ rotateshard()
+
+ finishshard()
+
+ if MODE == 'shard':
+ print("Total: {} tests.".format(NUM_TESTS_PRINTED))
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index 1275bb5ed3..5fd39f3b85 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -168,8 +168,6 @@ consts_misc = [
'value': 'ScopeInfo::kStackLocalCount' },
{ 'name': 'scopeinfo_idx_ncontextlocals',
'value': 'ScopeInfo::kContextLocalCount' },
- { 'name': 'scopeinfo_idx_ncontextglobals',
- 'value': 'ScopeInfo::kContextGlobalCount' },
{ 'name': 'scopeinfo_idx_first_vars',
'value': 'ScopeInfo::kVariablePartIndex' },
diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py
index ab8f3265a6..4525e7ef33 100755
--- a/deps/v8/tools/grokdump.py
+++ b/deps/v8/tools/grokdump.py
@@ -39,6 +39,7 @@ import mmap
import optparse
import os
import re
+import StringIO
import sys
import types
import urllib
@@ -1745,10 +1746,12 @@ class InspectionInfo(object):
frame_pointer = self.reader.ExceptionFP()
self.styles[frame_pointer] = "frame"
for slot in xrange(stack_top, stack_bottom, self.reader.PointerSize()):
- self.styles[slot] = "stackaddress"
+ # stack address
+ self.styles[slot] = "sa"
for slot in xrange(stack_top, stack_bottom, self.reader.PointerSize()):
maybe_address = self.reader.ReadUIntPtr(slot)
- self.styles[maybe_address] = "stackval"
+ # stack value
+ self.styles[maybe_address] = "sv"
if slot == frame_pointer:
self.styles[slot] = "frame"
frame_pointer = maybe_address
@@ -1760,7 +1763,7 @@ class InspectionInfo(object):
def get_style_class_string(self, address):
style = self.get_style_class(address)
if style != None:
- return " class=\"%s\" " % style
+ return " class=%s " % style
else:
return ""
@@ -1875,11 +1878,13 @@ WEB_HEADER = """
.dmptable {
border-collapse : collapse;
border-spacing : 0px;
+ table-layout: fixed;
}
.codedump {
border-collapse : collapse;
border-spacing : 0px;
+ table-layout: fixed;
}
.addrcomments {
@@ -1932,11 +1937,11 @@ input {
background-color : cyan;
}
-.stackaddress {
+.stackaddress, .sa {
background-color : LightGray;
}
-.stackval {
+.stackval, .sv {
background-color : LightCyan;
}
@@ -1944,16 +1949,17 @@ input {
background-color : cyan;
}
-.commentinput {
+.commentinput, .ci {
width : 20em;
}
-a.nodump:visited {
+/* a.nodump */
+a.nd:visited {
color : black;
text-decoration : none;
}
-a.nodump:link {
+a.nd:link {
color : black;
text-decoration : none;
}
@@ -1984,6 +1990,7 @@ function comment() {
send_comment(s.substring(index + address_len), event.srcElement.value);
}
}
+var c = comment;
function send_comment(address, comment) {
xmlhttp = new XMLHttpRequest();
@@ -2038,7 +2045,7 @@ function onpage(kind, address) {
<body>
<div class="header">
- <form class="navigation" action="search.html">
+ <form class="navigation" action=/search.html">
<a href="summary.html?%(query_dump)s">Context info</a>&nbsp;&nbsp;&nbsp;
<a href="info.html?%(query_dump)s">Dump info</a>&nbsp;&nbsp;&nbsp;
<a href="modules.html?%(query_dump)s">Modules</a>&nbsp;&nbsp;&nbsp;
@@ -2095,24 +2102,34 @@ class InspectionWebHandler(BaseHTTPServer.BaseHTTPRequestHandler):
query_components = urlparse.parse_qs(parsedurl.query)
if parsedurl.path == "/dumps.html":
self.send_success_html_headers()
- self.server.output_dumps(self.wfile)
+ out_buffer = StringIO.StringIO()
+ self.server.output_dumps(out_buffer)
+ self.wfile.write(out_buffer.getvalue())
elif parsedurl.path == "/summary.html":
self.send_success_html_headers()
- self.formatter(query_components).output_summary(self.wfile)
+ out_buffer = StringIO.StringIO()
+ self.formatter(query_components).output_summary(out_buffer)
+ self.wfile.write(out_buffer.getvalue())
elif parsedurl.path == "/info.html":
self.send_success_html_headers()
- self.formatter(query_components).output_info(self.wfile)
+ out_buffer = StringIO.StringIO()
+ self.formatter(query_components).output_info(out_buffer)
+ self.wfile.write(out_buffer.getvalue())
elif parsedurl.path == "/modules.html":
self.send_success_html_headers()
- self.formatter(query_components).output_modules(self.wfile)
- elif parsedurl.path == "/search.html":
+ out_buffer = StringIO.StringIO()
+ self.formatter(query_components).output_modules(out_buffer)
+ self.wfile.write(out_buffer.getvalue())
+ elif parsedurl.path == "/search.html" or parsedurl.path == "/s":
address = query_components.get("val", [])
if len(address) != 1:
self.send_error(404, "Invalid params")
return
self.send_success_html_headers()
+ out_buffer = StringIO.StringIO()
self.formatter(query_components).output_search_res(
- self.wfile, address[0])
+ out_buffer, address[0])
+ self.wfile.write(out_buffer.getvalue())
elif parsedurl.path == "/disasm.html":
address = query_components.get("val", [])
exact = query_components.get("exact", ["on"])
@@ -2120,15 +2137,19 @@ class InspectionWebHandler(BaseHTTPServer.BaseHTTPRequestHandler):
self.send_error(404, "Invalid params")
return
self.send_success_html_headers()
+ out_buffer = StringIO.StringIO()
self.formatter(query_components).output_disasm(
- self.wfile, address[0], exact[0])
+ out_buffer, address[0], exact[0])
+ self.wfile.write(out_buffer.getvalue())
elif parsedurl.path == "/data.html":
address = query_components.get("val", [])
datakind = query_components.get("type", ["address"])
if len(address) == 1 and len(datakind) == 1:
self.send_success_html_headers()
+ out_buffer = StringIO.StringIO()
self.formatter(query_components).output_data(
- self.wfile, address[0], datakind[0])
+ out_buffer, address[0], datakind[0])
+ self.wfile.write(out_buffer.getvalue())
else:
self.send_error(404,'Invalid params')
elif parsedurl.path == "/setdumpdesc":
@@ -2235,8 +2256,8 @@ class InspectionWebFormatter(object):
straddress = "0x" + self.reader.FormatIntPtr(maybeaddress)
style_class = ""
if not self.reader.IsValidAddress(maybeaddress):
- style_class = " class=\"nodump\""
- return ("<a %s href=\"search.html?%s&amp;val=%s\">%s</a>" %
+ style_class = "class=nd"
+ return ("<a %s href=s?%s&amp;val=%s>%s</a>" %
(style_class, self.encfilename, straddress, straddress))
def output_header(self, f):
@@ -2247,7 +2268,7 @@ class InspectionWebFormatter(object):
def output_footer(self, f):
f.write(WEB_FOOTER)
- MAX_CONTEXT_STACK = 4096
+ MAX_CONTEXT_STACK = 2048
def output_summary(self, f):
self.output_header(f)
@@ -2257,9 +2278,10 @@ class InspectionWebFormatter(object):
# Output stack
exception_thread = self.reader.thread_map[self.reader.exception.thread_id]
- stack_bottom = exception_thread.stack.start + \
- min(exception_thread.stack.memory.data_size, self.MAX_CONTEXT_STACK)
stack_top = self.reader.ExceptionSP()
+ stack_bottom = min(exception_thread.stack.start + \
+ exception_thread.stack.memory.data_size,
+ stack_top + self.MAX_CONTEXT_STACK)
self.output_words(f, stack_top - 16, stack_bottom, stack_top, "Stack")
f.write('</div>')
@@ -2268,14 +2290,14 @@ class InspectionWebFormatter(object):
def output_info(self, f):
self.output_header(f)
- f.write("<h3>Dump info</h3>\n")
+ f.write("<h3>Dump info</h3>")
f.write("Description: ")
self.server.output_dump_desc_field(f, self.dumpfilename)
- f.write("<br>\n")
+ f.write("<br>")
f.write("Filename: ")
- f.write("<span class=\"code\">%s</span><br>\n" % (self.dumpfilename))
+ f.write("<span class=\"code\">%s</span><br>" % (self.dumpfilename))
dt = datetime.datetime.fromtimestamp(self.reader.header.time_date_stampt)
- f.write("Timestamp: %s<br>\n" % dt.strftime('%Y-%m-%d %H:%M:%S'))
+ f.write("Timestamp: %s<br>" % dt.strftime('%Y-%m-%d %H:%M:%S'))
self.output_context(f, InspectionWebFormatter.CONTEXT_FULL)
self.output_address_ranges(f)
self.output_footer(f)
@@ -2286,22 +2308,22 @@ class InspectionWebFormatter(object):
def print_region(_reader, start, size, _location):
regions[start] = size
self.reader.ForEachMemoryRegion(print_region)
- f.write("<h3>Available memory regions</h3>\n")
+ f.write("<h3>Available memory regions</h3>")
f.write('<div class="code">')
- f.write("<table class=\"regions\">\n")
+ f.write("<table class=\"regions\">")
f.write("<thead><tr>")
f.write("<th>Start address</th>")
f.write("<th>End address</th>")
f.write("<th>Number of bytes</th>")
- f.write("</tr></thead>\n")
+ f.write("</tr></thead>")
for start in sorted(regions):
size = regions[start]
f.write("<tr>")
f.write("<td>%s</td>" % self.format_address(start))
f.write("<td>&nbsp;%s</td>" % self.format_address(start + size))
f.write("<td>&nbsp;%d</td>" % size)
- f.write("</tr>\n")
- f.write("</table>\n")
+ f.write("</tr>")
+ f.write("</table>")
f.write('</div>')
return
@@ -2311,19 +2333,19 @@ class InspectionWebFormatter(object):
module.version_info.dwFileVersionLS)
product_version = GetVersionString(module.version_info.dwProductVersionMS,
module.version_info.dwProductVersionLS)
- f.write("<br>&nbsp;&nbsp;\n")
+ f.write("<br>&nbsp;&nbsp;")
f.write("base: %s" % self.reader.FormatIntPtr(module.base_of_image))
- f.write("<br>&nbsp;&nbsp;\n")
+ f.write("<br>&nbsp;&nbsp;")
f.write(" end: %s" % self.reader.FormatIntPtr(module.base_of_image +
module.size_of_image))
- f.write("<br>&nbsp;&nbsp;\n")
+ f.write("<br>&nbsp;&nbsp;")
f.write(" file version: %s" % file_version)
- f.write("<br>&nbsp;&nbsp;\n")
+ f.write("<br>&nbsp;&nbsp;")
f.write(" product version: %s" % product_version)
- f.write("<br>&nbsp;&nbsp;\n")
+ f.write("<br>&nbsp;&nbsp;")
time_date_stamp = datetime.datetime.fromtimestamp(module.time_date_stamp)
f.write(" timestamp: %s" % time_date_stamp)
- f.write("<br>\n");
+ f.write("<br>");
def output_modules(self, f):
self.output_header(f)
@@ -2337,16 +2359,16 @@ class InspectionWebFormatter(object):
def output_context(self, f, details):
exception_thread = self.reader.thread_map[self.reader.exception.thread_id]
f.write("<h3>Exception context</h3>")
- f.write('<div class="code">\n')
+ f.write('<div class="code">')
f.write("Thread id: %d" % exception_thread.id)
- f.write("&nbsp;&nbsp; Exception code: %08X<br/>\n" %
+ f.write("&nbsp;&nbsp; Exception code: %08X<br/>" %
self.reader.exception.exception.code)
if details == InspectionWebFormatter.CONTEXT_FULL:
if self.reader.exception.exception.parameter_count > 0:
- f.write("&nbsp;&nbsp; Exception parameters: \n")
+ f.write("&nbsp;&nbsp; Exception parameters: ")
for i in xrange(0, self.reader.exception.exception.parameter_count):
f.write("%08x" % self.reader.exception.exception.information[i])
- f.write("<br><br>\n")
+ f.write("<br><br>")
for r in CONTEXT_FOR_ARCH[self.reader.arch]:
f.write(HTML_REG_FORMAT %
@@ -2357,7 +2379,7 @@ class InspectionWebFormatter(object):
else:
f.write("<b>eflags</b>: %s" %
bin(self.reader.exception_context.eflags)[2:])
- f.write('</div>\n')
+ f.write('</div>')
return
def align_down(self, a, size):
@@ -2394,7 +2416,7 @@ class InspectionWebFormatter(object):
highlight_address, desc):
region = self.reader.FindRegion(highlight_address)
if region is None:
- f.write("<h3>Address 0x%x not found in the dump.</h3>\n" %
+ f.write("<h3>Address 0x%x not found in the dump.</h3>" %
(highlight_address))
return
size = self.heap.PointerSize()
@@ -2415,10 +2437,10 @@ class InspectionWebFormatter(object):
(self.encfilename, highlight_address))
f.write("<h3>%s 0x%x - 0x%x, "
- "highlighting <a href=\"#highlight\">0x%x</a> %s</h3>\n" %
+ "highlighting <a href=\"#highlight\">0x%x</a> %s</h3>" %
(desc, start_address, end_address, highlight_address, expand))
f.write('<div class="code">')
- f.write("<table class=\"codedump\">\n")
+ f.write("<table class=codedump>")
for j in xrange(0, end_address - start_address, size):
slot = start_address + j
@@ -2440,33 +2462,31 @@ class InspectionWebFormatter(object):
if maybe_address:
heap_object = self.format_object(maybe_address)
- address_fmt = "%s&nbsp;</td>\n"
+ address_fmt = "%s&nbsp;</td>"
if slot == highlight_address:
- f.write("<tr class=\"highlight-line\">\n")
- address_fmt = "<a id=\"highlight\"></a>%s&nbsp;</td>\n"
+ f.write("<tr class=highlight-line>")
+ address_fmt = "<a id=highlight></a>%s&nbsp;</td>"
elif slot < highlight_address and highlight_address < slot + size:
- f.write("<tr class=\"inexact-highlight-line\">\n")
- address_fmt = "<a id=\"highlight\"></a>%s&nbsp;</td>\n"
+ f.write("<tr class=inexact-highlight-line>")
+ address_fmt = "<a id=highlight></a>%s&nbsp;</td>"
else:
- f.write("<tr>\n")
+ f.write("<tr>")
- f.write(" <td>")
+ f.write("<td>")
self.output_comment_box(f, "da-", slot)
- f.write("</td>\n")
- f.write(" ")
+ f.write("</td>")
self.td_from_address(f, slot)
f.write(address_fmt % self.format_address(slot))
- f.write(" ")
self.td_from_address(f, maybe_address)
- f.write(":&nbsp;%s&nbsp;</td>\n" % straddress)
- f.write(" <td>")
+ f.write(":&nbsp;%s&nbsp;</td>" % straddress)
+ f.write("<td>")
if maybe_address != None:
self.output_comment_box(
f, "sv-" + self.reader.FormatIntPtr(slot), maybe_address)
- f.write(" </td>\n")
- f.write(" <td>%s</td>\n" % (heap_object or ''))
- f.write("</tr>\n")
- f.write("</table>\n")
+ f.write("</td>")
+ f.write("<td>%s</td>" % (heap_object or ''))
+ f.write("</tr>")
+ f.write("</table>")
f.write("</div>")
return
@@ -2565,7 +2585,7 @@ class InspectionWebFormatter(object):
f.write("<h3>Disassembling 0x%x - 0x%x, highlighting 0x%x %s</h3>" %
(start_address, end_address, highlight_address, expand))
f.write('<div class="code">')
- f.write("<table class=\"codedump\">\n");
+ f.write("<table class=\"codedump\">");
for i in xrange(len(lines)):
line = lines[i]
next_address = count
@@ -2574,7 +2594,7 @@ class InspectionWebFormatter(object):
next_address = next_line[0]
self.format_disasm_line(
f, start_address, line, next_address, highlight_address)
- f.write("</table>\n")
+ f.write("</table>")
f.write("</div>")
return
@@ -2590,22 +2610,22 @@ class InspectionWebFormatter(object):
extra.append(cgi.escape(str(object_info)))
if len(extra) == 0:
return line
- return ("%s <span class=\"disasmcomment\">;; %s</span>" %
+ return ("%s <span class=disasmcomment>;; %s</span>" %
(line, ", ".join(extra)))
def format_disasm_line(
self, f, start, line, next_address, highlight_address):
line_address = start + line[0]
- address_fmt = " <td>%s</td>\n"
+ address_fmt = " <td>%s</td>"
if line_address == highlight_address:
- f.write("<tr class=\"highlight-line\">\n")
- address_fmt = " <td><a id=\"highlight\">%s</a></td>\n"
+ f.write("<tr class=highlight-line>")
+ address_fmt = " <td><a id=highlight>%s</a></td>"
elif (line_address < highlight_address and
highlight_address < next_address + start):
- f.write("<tr class=\"inexact-highlight-line\">\n")
- address_fmt = " <td><a id=\"highlight\">%s</a></td>\n"
+ f.write("<tr class=inexact-highlight-line>")
+ address_fmt = " <td><a id=highlight>%s</a></td>"
else:
- f.write("<tr>\n")
+ f.write("<tr>")
num_bytes = next_address - line[0]
stack_slot = self.heap.stack_map.get(line_address)
marker = ""
@@ -2630,22 +2650,26 @@ class InspectionWebFormatter(object):
code = self.annotate_disasm_addresses(code[op_offset:])
f.write(" <td>")
self.output_comment_box(f, "codel-", line_address)
- f.write("</td>\n")
+ f.write("</td>")
f.write(address_fmt % marker)
f.write(" ")
self.td_from_address(f, line_address)
- f.write("%s (+0x%x)</td>\n" %
- (self.format_address(line_address), line[0]))
- f.write(" <td>:&nbsp;%s&nbsp;</td>\n" % opcodes)
- f.write(" <td>%s</td>\n" % code)
- f.write("</tr>\n")
+ f.write(self.format_address(line_address))
+ f.write(" (+0x%x)</td>" % line[0])
+ f.write("<td>:&nbsp;%s&nbsp;</td>" % opcodes)
+ f.write("<td>%s</td>" % code)
+ f.write("</tr>")
def output_comment_box(self, f, prefix, address):
- f.write("<input type=\"text\" class=\"commentinput\" "
- "id=\"%s-address-0x%s\" onchange=\"comment()\" value=\"%s\">" %
+ comment = self.comments.get_comment(address)
+ value = ""
+ if comment:
+ value = " value=\"%s\"" % cgi.escape(comment)
+ f.write("<input type=text class=ci "
+ "id=%s-address-0x%s onchange=c()%s>" %
(prefix,
self.reader.FormatIntPtr(address),
- cgi.escape(self.comments.get_comment(address)) or ""))
+ value))
MAX_FOUND_RESULTS = 100
@@ -2655,27 +2679,27 @@ class InspectionWebFormatter(object):
if toomany:
f.write("(found %i results, displaying only first %i)" %
(len(results), self.MAX_FOUND_RESULTS))
- f.write(": \n")
+ f.write(": ")
results = sorted(results)
results = results[:min(len(results), self.MAX_FOUND_RESULTS)]
for address in results:
- f.write("<span %s>%s</span>\n" %
+ f.write("<span %s>%s</span>" %
(self.comments.get_style_class_string(address),
self.format_address(address)))
if toomany:
- f.write("...\n")
+ f.write("...")
def output_page_info(self, f, page_kind, page_address, my_page_address):
if my_page_address == page_address and page_address != 0:
- f.write("Marked first %s page.\n" % page_kind)
+ f.write("Marked first %s page." % page_kind)
else:
f.write("<span id=\"%spage\" style=\"display:none\">" % page_kind)
f.write("Marked first %s page." % page_kind)
f.write("</span>\n")
f.write("<button onclick=\"onpage('%spage', '0x%x')\">" %
(page_kind, my_page_address))
- f.write("Mark as first %s page</button>\n" % page_kind)
+ f.write("Mark as first %s page</button>" % page_kind)
return
def output_search_res(self, f, straddress):
@@ -2687,11 +2711,11 @@ class InspectionWebFormatter(object):
f.write("Comment: ")
self.output_comment_box(f, "search-", address)
- f.write("<br>\n")
+ f.write("<br>")
page_address = address & ~self.heap.PageAlignmentMask()
- f.write("Page info: \n")
+ f.write("Page info: ")
self.output_page_info(f, "old", self.padawan.known_first_old_page, \
page_address)
self.output_page_info(f, "map", self.padawan.known_first_map_page, \
@@ -2705,27 +2729,27 @@ class InspectionWebFormatter(object):
self.output_words(f, address - 8, address + 32, address, "Dump")
# Print as ASCII
- f.write("<hr>\n")
+ f.write("<hr>")
self.output_ascii(f, address, address + 256, address)
# Print as code
- f.write("<hr>\n")
+ f.write("<hr>")
self.output_disasm_range(f, address - 16, address + 16, address, True)
aligned_res, unaligned_res = self.reader.FindWordList(address)
if len(aligned_res) > 0:
- f.write("<h3>Occurrences of 0x%x at aligned addresses</h3>\n" %
+ f.write("<h3>Occurrences of 0x%x at aligned addresses</h3>" %
address)
self.output_find_results(f, aligned_res)
if len(unaligned_res) > 0:
- f.write("<h3>Occurrences of 0x%x at unaligned addresses</h3>\n" % \
+ f.write("<h3>Occurrences of 0x%x at unaligned addresses</h3>" % \
address)
self.output_find_results(f, unaligned_res)
if len(aligned_res) + len(unaligned_res) == 0:
- f.write("<h3>No occurences of 0x%x found in the dump</h3>\n" % address)
+ f.write("<h3>No occurences of 0x%x found in the dump</h3>" % address)
self.output_footer(f)
diff --git a/deps/v8/tools/mb/OWNERS b/deps/v8/tools/mb/OWNERS
new file mode 100644
index 0000000000..de5efcb8dc
--- /dev/null
+++ b/deps/v8/tools/mb/OWNERS
@@ -0,0 +1,3 @@
+brettw@chromium.org
+dpranke@chromium.org
+machenbach@chromium.org
diff --git a/deps/v8/tools/mb/PRESUBMIT.py b/deps/v8/tools/mb/PRESUBMIT.py
new file mode 100644
index 0000000000..6f5307c63e
--- /dev/null
+++ b/deps/v8/tools/mb/PRESUBMIT.py
@@ -0,0 +1,41 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+def _CommonChecks(input_api, output_api):
+ results = []
+
+ # Run Pylint over the files in the directory.
+ pylint_checks = input_api.canned_checks.GetPylint(input_api, output_api)
+ results.extend(input_api.RunTests(pylint_checks))
+
+ # Run the MB unittests.
+ results.extend(input_api.canned_checks.RunUnitTestsInDirectory(
+ input_api, output_api, '.', [ r'^.+_unittest\.py$']))
+
+ # Validate the format of the mb_config.pyl file.
+ cmd = [input_api.python_executable, 'mb.py', 'validate']
+ kwargs = {'cwd': input_api.PresubmitLocalPath()}
+ results.extend(input_api.RunTests([
+ input_api.Command(name='mb_validate',
+ cmd=cmd, kwargs=kwargs,
+ message=output_api.PresubmitError)]))
+
+ results.extend(
+ input_api.canned_checks.CheckLongLines(
+ input_api,
+ output_api,
+ maxlen=80,
+ source_file_filter=lambda x: 'mb_config.pyl' in x.LocalPath()))
+
+ return results
+
+
+def CheckChangeOnUpload(input_api, output_api):
+ return _CommonChecks(input_api, output_api)
+
+
+def CheckChangeOnCommit(input_api, output_api):
+ return _CommonChecks(input_api, output_api)
diff --git a/deps/v8/tools/mb/README.md b/deps/v8/tools/mb/README.md
new file mode 100644
index 0000000000..4e73a8e9fc
--- /dev/null
+++ b/deps/v8/tools/mb/README.md
@@ -0,0 +1,22 @@
+# MB - The Meta-Build wrapper
+
+MB is a simple wrapper intended to provide a uniform interface to either
+GYP or GN, such that users and bots can call one script and not need to
+worry about whether a given bot is meant to use GN or GYP.
+
+It supports two main functions:
+
+1. "gen" - the main `gyp_chromium` / `gn gen` invocation that generates the
+ Ninja files needed for the build.
+
+2. "analyze" - the step that takes a list of modified files and a list of
+ desired targets and reports which targets will need to be rebuilt.
+
+We also use MB as a forcing function to collect all of the different
+build configurations that we actually support for Chromium builds into
+one place, in `//tools/mb/mb_config.pyl`.
+
+For more information, see:
+
+* [The User Guide](docs/user_guide.md)
+* [The Design Spec](docs/design_spec.md)
diff --git a/deps/v8/tools/mb/docs/README.md b/deps/v8/tools/mb/docs/README.md
new file mode 100644
index 0000000000..f29007d9ed
--- /dev/null
+++ b/deps/v8/tools/mb/docs/README.md
@@ -0,0 +1,4 @@
+# The MB (Meta-Build wrapper) documentation
+
+* The [User Guide](user_guide.md)
+* The [Design Spec](design_spec.md)
diff --git a/deps/v8/tools/mb/docs/design_spec.md b/deps/v8/tools/mb/docs/design_spec.md
new file mode 100644
index 0000000000..33fda806e8
--- /dev/null
+++ b/deps/v8/tools/mb/docs/design_spec.md
@@ -0,0 +1,426 @@
+# The MB (Meta-Build wrapper) design spec
+
+[TOC]
+
+## Intro
+
+MB is intended to address two major aspects of the GYP -> GN transition
+for Chromium:
+
+1. "bot toggling" - make it so that we can easily flip a given bot
+ back and forth between GN and GYP.
+
+2. "bot configuration" - provide a single source of truth for all of
+ the different configurations (os/arch/`gyp_define` combinations) of
+ Chromium that are supported.
+
+MB must handle at least the `gen` and `analyze` steps on the bots, i.e.,
+we need to wrap both the `gyp_chromium` invocation to generate the
+Ninja files, and the `analyze` step that takes a list of modified files
+and a list of targets to build and returns which targets are affected by
+the files.
+
+For more information on how to actually use MB, see
+[the user guide](user_guide.md).
+
+## Design
+
+MB is intended to be as simple as possible, and to defer as much work as
+possible to GN or GYP. It should live as a very simple Python wrapper
+that offers little in the way of surprises.
+
+### Command line
+
+It is structured as a single binary that supports a list of subcommands:
+
+* `mb gen -c linux_rel_bot //out/Release`
+* `mb analyze -m tryserver.chromium.linux -b linux_rel /tmp/input.json /tmp/output.json`
+
+### Configurations
+
+`mb` will first look for a bot config file in a set of different locations
+(initially just in //ios/build/bots). Bot config files are JSON files that
+contain keys for 'GYP_DEFINES' (a list of strings that will be joined together
+with spaces and passed to GYP, or a dict that will be similarly converted),
+'gn_args' (a list of strings that will be joined together), and an
+'mb_type' field that says whether to use GN or GYP. Bot config files
+require the full list of settings to be given explicitly.
+
+If no matching bot config file is found, `mb` looks in the
+`//tools/mb/mb_config.pyl` config file to determine whether to use GYP or GN
+for a particular build directory, and what set of flags (`GYP_DEFINES` or `gn
+args`) to use.
+
+A config can either be specified directly (useful for testing) or by specifying
+the master name and builder name (useful on the bots so that they do not need
+to specify a config directly and can be hidden from the details).
+
+See the [user guide](user_guide.md#mb_config.pyl) for details.
+
+### Handling the analyze step
+
+The interface to `mb analyze` is described in the
+[user\_guide](user_guide.md#mb_analyze).
+
+The way analyze works can be subtle and complicated (see below).
+
+Since the interface basically mirrors the way the "analyze" step on the bots
+invokes `gyp_chromium` today, when the config is found to be a gyp config,
+the arguments are passed straight through.
+
+It implements the equivalent functionality in GN by calling `gn refs
+[list of files] --type=executable --all --as=output` and filtering the
+output to match the list of targets.
+
+## Analyze
+
+The goal of the `analyze` step is to speed up the cycle time of the try servers
+by only building and running the tests affected by the files in a patch, rather
+than everything that might be out of date. Doing this ends up being tricky.
+
+We start with the following requirements and observations:
+
+* In an ideal (un-resource-constrained) world, we would build and test
+ everything that a patch affected on every patch. This does not
+ necessarily mean that we would build 'all' on every patch (see below).
+
+* In the real world, however, we do not have an infinite number of machines,
+ and try jobs are not infinitely fast, so we need to balance the desire
+ to get maximum test coverage against the desire to have reasonable cycle
+ times, given the number of machines we have.
+
+* Also, since we run most try jobs against tip-of-tree Chromium, by
+ the time one job completes on the bot, new patches have probably landed,
+ rendering the build out of date.
+
+* This means that the next try job may have to do a build that is out of
+ date due to a combination of files affected by a given patch, and files
+ affected for unrelated reasons. We want to rebuild and test only the
+ targets affected by the patch, so that we don't blame or punish the
+ patch author for unrelated changes.
+
+So:
+
+1. We need a way to indicate which changed files we care about and which
+ we don't (the affected files of a patch).
+
+2. We need to know which tests we might potentially want to run, and how
+ those are mapped onto build targets. For some kinds of tests (like
+ GTest-based tests), the mapping is 1:1 - if you want to run base_unittests,
+ you need to build base_unittests. For others (like the telemetry and
+ layout tests), you might need to build several executables in order to
+ run the tests, and that mapping might best be captured by a *meta*
+ target (a GN group or a GYP 'none' target like `webkit_tests`) that
+ depends on the right list of files. Because the GN and GYP files know
+ nothing about test steps, we have to have some way of mapping back
+ and forth between test steps and build targets. That mapping
+ is *not* currently available to MB (or GN or GYP), and so we have to
+ enough information to make it possible for the caller to do the mapping.
+
+3. We might also want to know when test targets are affected by data files
+ that aren't compiled (python scripts, or the layout tests themselves).
+ There's no good way to do this in GYP, but GN supports this.
+
+4. We also want to ensure that particular targets still compile even if they
+ are not actually tested; consider testing the installers themselves, or
+ targets that don't yet have good test coverage. We might want to use meta
+ targets for this purpose as well.
+
+5. However, for some meta targets, we don't necessarily want to rebuild the
+ meta target itself, perhaps just the dependencies of the meta target that
+ are affected by the patch. For example, if you have a meta target like
+ `blink_tests` that might depend on ten different test binaries. If a patch
+ only affects one of them (say `wtf_unittests`), you don't want to
+ build `blink_tests`, because that might actually also build the other nine
+ targets. In other words, some meta targets are *prunable*.
+
+6. As noted above, in the ideal case we actually have enough resources and
+ things are fast enough that we can afford to build everything affected by a
+ patch, but listing every possible target explicitly would be painful. The
+ GYP and GN Ninja generators provide an 'all' target that captures (nearly,
+ see [crbug.com/503241](crbug.com/503241)) everything, but unfortunately
+ neither GN nor GYP actually represents 'all' as a meta target in the build
+ graph, so we will need to write code to handle that specially.
+
+7. In some cases, we will not be able to correctly analyze the build graph to
+ determine the impact of a patch, and need to bail out (e.g,. if you change a
+ build file itself, it may not be easy to tell how that affects the graph).
+ In that case we should simply build and run everything.
+
+The interaction between 2) and 5) means that we need to treat meta targets
+two different ways, and so we need to know which targets should be
+pruned in the sense of 5) and which targets should be returned unchanged
+so that we can map them back to the appropriate tests.
+
+So, we need three things as input:
+
+* `files`: the list of files in the patch
+* `test_targets`: the list of ninja targets which, if affected by a patch,
+ should be reported back so that we can map them back to the appropriate
+ tests to run. Any meta targets in this list should *not* be pruned.
+* `additional_compile_targets`: the list of ninja targets we wish to compile
+ *in addition to* the list in `test_targets`. Any meta targets
+ present in this list should be pruned (we don't need to return the
+ meta targets because they aren't mapped back to tests, and we don't want
+ to build them because we might build too much).
+
+We can then return two lists as output:
+
+* `compile_targets`, which is a list of pruned targets to be
+ passed to Ninja to build. It is acceptable to replace a list of
+ pruned targets by a meta target if it turns out that all of the
+ dependendencies of the target are affected by the patch (i.e.,
+ all ten binaries that blink_tests depends on), but doing so is
+ not required.
+* `test_targets`, which is a list of unpruned targets to be mapped
+ back to determine which tests to run.
+
+There may be substantial overlap between the two lists, but there is
+no guarantee that one is a subset of the other and the two cannot be
+used interchangeably or merged together without losing information and
+causing the wrong thing to happen.
+
+The implementation is responsible for recognizing 'all' as a magic string
+and mapping it onto the list of all root nodes in the build graph.
+
+There may be files listed in the input that don't actually exist in the build
+graph: this could be either the result of an error (the file should be in the
+build graph, but isn't), or perfectly fine (the file doesn't affect the build
+graph at all). We can't tell these two apart, so we should ignore missing
+files.
+
+There may be targets listed in the input that don't exist in the build
+graph; unlike missing files, this can only indicate a configuration error,
+and so we should return which targets are missing so the caller can
+treat this as an error, if so desired.
+
+Any of the three inputs may be an empty list:
+
+* It normally doesn't make sense to call analyze at all if no files
+ were modified, but in rare cases we can hit a race where we try to
+ test a patch after it has already been committed, in which case
+ the list of modified files is empty. We should return 'no dependency'
+ in that case.
+
+* Passing an empty list for one or the other of test_targets and
+ additional_compile_targets is perfectly sensible: in the former case,
+ it can indicate that you don't want to run any tests, and in the latter,
+ it can indicate that you don't want to do build anything else in
+ addition to the test targets.
+
+* It doesn't make sense to call analyze if you don't want to compile
+ anything at all, so passing [] for both test_targets and
+ additional_compile_targets should probably return an error.
+
+In the output case, an empty list indicates that there was nothing to
+build, or that there were no affected test targets as appropriate.
+
+Note that passing no arguments to Ninja is equivalent to passing
+`all` to Ninja (at least given how GN and GYP work); however, we
+don't want to take advantage of this in most cases because we don't
+actually want to build every out of date target, only the targets
+potentially affected by the files. One could try to indicate
+to analyze that we wanted to use no arguments instead of an empty
+list, but using the existing fields for this seems fragile and/or
+confusing, and adding a new field for this seems unwarranted at this time.
+
+There is an "error" field in case something goes wrong (like the
+empty file list case, above, or an internal error in MB/GYP/GN). The
+analyze code should also return an error code to the shell if appropriate
+to indicate that the command failed.
+
+In the case where build files themselves are modified and analyze may
+not be able to determine a correct answer (point 7 above, where we return
+"Found dependency (all)"), we should also return the `test_targets` unmodified
+and return the union of `test_targets` and `additional_compile_targets` for
+`compile_targets`, to avoid confusion.
+
+### Examples
+
+Continuing the example given above, suppose we have the following build
+graph:
+
+* `blink_tests` is a meta target that depends on `webkit_unit_tests`,
+ `wtf_unittests`, and `webkit_tests` and represents all of the targets
+ needed to fully test Blink. Each of those is a separate test step.
+* `webkit_tests` is also a meta target; it depends on `content_shell`
+ and `image_diff`.
+* `base_unittests` is a separate test binary.
+* `wtf_unittests` depends on `Assertions.cpp` and `AssertionsTest.cpp`.
+* `webkit_unit_tests` depends on `WebNode.cpp` and `WebNodeTest.cpp`.
+* `content_shell` depends on `WebNode.cpp` and `Assertions.cpp`.
+* `base_unittests` depends on `logging.cc` and `logging_unittest.cc`.
+
+#### Example 1
+
+We wish to run 'wtf_unittests' and 'webkit_tests' on a bot, but not
+compile any additional targets.
+
+If a patch touches WebNode.cpp, then analyze gets as input:
+
+ {
+ "files": ["WebNode.cpp"],
+ "test_targets": ["wtf_unittests", "webkit_tests"],
+ "additional_compile_targets": []
+ }
+
+and should return as output:
+
+ {
+ "status": "Found dependency",
+ "compile_targets": ["webkit_unit_tests"],
+ "test_targets": ["webkit_tests"]
+ }
+
+Note how `webkit_tests` was pruned in compile_targets but not in test_targets.
+
+#### Example 2
+
+Using the same patch as Example 1, assume we wish to run only `wtf_unittests`,
+but additionally build everything needed to test Blink (`blink_tests`):
+
+We pass as input:
+
+ {
+ "files": ["WebNode.cpp"],
+ "test_targets": ["wtf_unittests"],
+ "additional_compile_targets": ["blink_tests"]
+ }
+
+And should get as output:
+
+ {
+ "status": "Found dependency",
+ "compile_targets": ["webkit_unit_tests"],
+ "test_targets": []
+ }
+
+Here `blink_tests` was pruned in the output compile_targets, and
+test_targets was empty, since blink_tests was not listed in the input
+test_targets.
+
+#### Example 3
+
+Build everything, but do not run any tests.
+
+Input:
+
+ {
+ "files": ["WebNode.cpp"],
+ "test_targets": [],
+ "additional_compile_targets": ["all"]
+ }
+
+Output:
+
+ {
+ "status": "Found dependency",
+ "compile_targets": ["webkit_unit_tests", "content_shell"],
+ "test_targets": []
+ }
+
+#### Example 4
+
+Same as Example 2, but a build file was modified instead of a source file.
+
+Input:
+
+ {
+ "files": ["BUILD.gn"],
+ "test_targets": ["wtf_unittests"],
+ "additional_compile_targets": ["blink_tests"]
+ }
+
+Output:
+
+ {
+ "status": "Found dependency (all)",
+ "compile_targets": ["webkit_unit_tests", "wtf_unittests"],
+ "test_targets": ["wtf_unittests"]
+ }
+
+test_targets was returned unchanged, compile_targets was pruned.
+
+## Random Requirements and Rationale
+
+This section is collection of semi-organized notes on why MB is the way
+it is ...
+
+### in-tree or out-of-tree
+
+The first issue is whether or not this should exist as a script in
+Chromium at all; an alternative would be to simply change the bot
+configurations to know whether to use GYP or GN, and which flags to
+pass.
+
+That would certainly work, but experience over the past two years
+suggests a few things:
+
+ * we should push as much logic as we can into the source repositories
+ so that they can be versioned and changed atomically with changes to
+ the product code; having to coordinate changes between src/ and
+ build/ is at best annoying and can lead to weird errors.
+ * the infra team would really like to move to providing
+ product-independent services (i.e., not have to do one thing for
+ Chromium, another for NaCl, a third for V8, etc.).
+ * we found that during the SVN->GIT migration the ability to flip bot
+ configurations between the two via changes to a file in chromium
+ was very useful.
+
+All of this suggests that the interface between bots and Chromium should
+be a simple one, hiding as much of the chromium logic as possible.
+
+### Why not have MB be smarter about de-duping flags?
+
+This just adds complexity to the MB implementation, and duplicates logic
+that GYP and GN already have to support anyway; in particular, it might
+require MB to know how to parse GYP and GN values. The belief is that
+if MB does *not* do this, it will lead to fewer surprises.
+
+It will not be hard to change this if need be.
+
+### Integration w/ gclient runhooks
+
+On the bots, we will disable `gyp_chromium` as part of runhooks (using
+`GYP_CHROMIUM_NO_ACTION=1`), so that mb shows up as a separate step.
+
+At the moment, we expect most developers to either continue to use
+`gyp_chromium` in runhooks or to disable at as above if they have no
+use for GYP at all. We may revisit how this works once we encourage more
+people to use GN full-time (i.e., we might take `gyp_chromium` out of
+runhooks altogether).
+
+### Config per flag set or config per (os/arch/flag set)?
+
+Currently, mb_config.pyl does not specify the host_os, target_os, host_cpu, or
+target_cpu values for every config that Chromium runs on, it only specifies
+them for when the values need to be explicitly set on the command line.
+
+Instead, we have one config per unique combination of flags only.
+
+In other words, rather than having `linux_rel_bot`, `win_rel_bot`, and
+`mac_rel_bot`, we just have `rel_bot`.
+
+This design allows us to determine easily all of the different sets
+of flags that we need to support, but *not* which flags are used on which
+host/target combinations.
+
+It may be that we should really track the latter. Doing so is just a
+config file change, however.
+
+### Non-goals
+
+* MB is not intended to replace direct invocation of GN or GYP for
+ complicated build scenarios (aka ChromeOS), where multiple flags need
+ to be set to user-defined paths for specific toolchains (e.g., where
+ ChromeOS needs to specify specific board types and compilers).
+
+* MB is not intended at this time to be something developers use frequently,
+ or to add a lot of features to. We hope to be able to get rid of it once
+ the GYP->GN migration is done, and so we should not add things for
+ developers that can't easily be added to GN itself.
+
+* MB is not intended to replace the
+ [CR tool](https://code.google.com/p/chromium/wiki/CRUserManual). Not
+ only is it only intended to replace the gyp\_chromium part of `'gclient
+ runhooks'`, it is not really meant as a developer-facing tool.
diff --git a/deps/v8/tools/mb/docs/user_guide.md b/deps/v8/tools/mb/docs/user_guide.md
new file mode 100644
index 0000000000..9817553bf6
--- /dev/null
+++ b/deps/v8/tools/mb/docs/user_guide.md
@@ -0,0 +1,297 @@
+# The MB (Meta-Build wrapper) user guide
+
+[TOC]
+
+## Introduction
+
+`mb` is a simple python wrapper around the GYP and GN meta-build tools to
+be used as part of the GYP->GN migration.
+
+It is intended to be used by bots to make it easier to manage the configuration
+each bot builds (i.e., the configurations can be changed from chromium
+commits), and to consolidate the list of all of the various configurations
+that Chromium is built in.
+
+Ideally this tool will no longer be needed after the migration is complete.
+
+For more discussion of MB, see also [the design spec](design_spec.md).
+
+## MB subcommands
+
+### `mb analyze`
+
+`mb analyze` is reponsible for determining what targets are affected by
+a list of files (e.g., the list of files in a patch on a trybot):
+
+```
+mb analyze -c chromium_linux_rel //out/Release input.json output.json
+```
+
+Either the `-c/--config` flag or the `-m/--master` and `-b/--builder` flags
+must be specified so that `mb` can figure out which config to use.
+
+The first positional argument must be a GN-style "source-absolute" path
+to the build directory.
+
+The second positional argument is a (normal) path to a JSON file containing
+a single object with the following fields:
+
+ * `files`: an array of the modified filenames to check (as paths relative to
+ the checkout root).
+ * `test_targets`: an array of (ninja) build targets that needed to run the
+ tests we wish to run. An empty array will be treated as if there are
+ no tests that will be run.
+ * `additional_compile_targets`: an array of (ninja) build targets that
+ reflect the stuff we might want to build *in addition to* the list
+ passed in `test_targets`. Targets in this list will be treated
+ specially, in the following way: if a given target is a "meta"
+ (GN: group, GYP: none) target like 'blink_tests' or
+ 'chromium_builder_tests', or even the ninja-specific 'all' target,
+ then only the *dependencies* of the target that are affected by
+ the modified files will be rebuilt (not the target itself, which
+ might also cause unaffected dependencies to be rebuilt). An empty
+ list will be treated as if there are no additional targets to build.
+ Empty lists for both `test_targets` and `additional_compile_targets`
+ would cause no work to be done, so will result in an error.
+ * `targets`: a legacy field that resembled a union of `compile_targets`
+ and `test_targets`. Support for this field will be removed once the
+ bots have been updated to use compile_targets and test_targets instead.
+
+The third positional argument is a (normal) path to where mb will write
+the result, also as a JSON object. This object may contain the following
+fields:
+
+ * `error`: this should only be present if something failed.
+ * `compile_targets`: the list of ninja targets that should be passed
+ directly to the corresponding ninja / compile.py invocation. This
+ list may contain entries that are *not* listed in the input (see
+ the description of `additional_compile_targets` above and
+ [design_spec.md](the design spec) for how this works).
+ * `invalid_targets`: a list of any targets that were passed in
+ either of the input lists that weren't actually found in the graph.
+ * `test_targets`: the subset of the input `test_targets` that are
+ potentially out of date, indicating that the matching test steps
+ should be re-run.
+ * `targets`: a legacy field that indicates the subset of the input `targets`
+ that depend on the input `files`.
+ * `build_targets`: a legacy field that indicates the minimal subset of
+ targets needed to build all of `targets` that were affected.
+ * `status`: a field containing one of three strings:
+
+ * `"Found dependency"` (build the `compile_targets`)
+ * `"No dependency"` (i.e., no build needed)
+ * `"Found dependency (all)"` (`test_targets` is returned as-is;
+ `compile_targets` should contain the union of `test_targets` and
+ `additional_compile_targets`. In this case the targets do not
+ need to be pruned).
+
+See [design_spec.md](the design spec) for more details and examples; the
+differences can be subtle. We won't even go into how the `targets` and
+`build_targets` differ from each other or from `compile_targets` and
+`test_targets`.
+
+The `-b/--builder`, `-c/--config`, `-f/--config-file`, `-m/--master`,
+`-q/--quiet`, and `-v/--verbose` flags work as documented for `mb gen`.
+
+### `mb audit`
+
+`mb audit` is used to track the progress of the GYP->GN migration. You can
+use it to check a single master, or all the masters we care about. See
+`mb help audit` for more details (most people are not expected to care about
+this).
+
+### `mb gen`
+
+`mb gen` is responsible for generating the Ninja files by invoking either GYP
+or GN as appropriate. It takes arguments to specify a build config and
+a directory, then runs GYP or GN as appropriate:
+
+```
+% mb gen -m tryserver.chromium.linux -b linux_rel //out/Release
+% mb gen -c linux_rel_trybot //out/Release
+```
+
+Either the `-c/--config` flag or the `-m/--master` and `-b/--builder` flags
+must be specified so that `mb` can figure out which config to use. The
+`--phase` flag must also be used with builders that have multiple
+build/compile steps (and only with those builders).
+
+By default, MB will look for a bot config file under `//ios/build/bots` (see
+[design_spec.md](the design spec) for details of how the bot config files
+work). If no matching one is found, will then look in
+`//tools/mb/mb_config.pyl` to look up the config information, but you can
+specify a custom config file using the `-f/--config-file` flag.
+
+The path must be a GN-style "source-absolute" path (as above).
+
+You can pass the `-n/--dryrun` flag to mb gen to see what will happen without
+actually writing anything.
+
+You can pass the `-q/--quiet` flag to get mb to be silent unless there is an
+error, and pass the `-v/--verbose` flag to get mb to log all of the files
+that are read and written, and all the commands that are run.
+
+If the build config will use the Goma distributed-build system, you can pass
+the path to your Goma client in the `-g/--goma-dir` flag, and it will be
+incorporated into the appropriate flags for GYP or GN as needed.
+
+If gen ends up using GYP, the path must have a valid GYP configuration as the
+last component of the path (i.e., specify `//out/Release_x64`, not `//out`).
+The gyp script defaults to `//build/gyp_chromium`, but can be overridden with
+the `--gyp-script` flag, e.g. `--gyp-script=gypfiles/gyp_v8`.
+
+### `mb help`
+
+Produces help output on the other subcommands
+
+### `mb lookup`
+
+Prints what command will be run by `mb gen` (like `mb gen -n` but does
+not require you to specify a path).
+
+The `-b/--builder`, `-c/--config`, `-f/--config-file`, `-m/--master`,
+`--phase`, `-q/--quiet`, and `-v/--verbose` flags work as documented for
+`mb gen`.
+
+### `mb validate`
+
+Does internal checking to make sure the config file is syntactically
+valid and that all of the entries are used properly. It does not validate
+that the flags make sense, or that the builder names are legal or
+comprehensive, but it does complain about configs and mixins that aren't
+used.
+
+The `-f/--config-file` and `-q/--quiet` flags work as documented for
+`mb gen`.
+
+This is mostly useful as a presubmit check and for verifying changes to
+the config file.
+
+## Isolates and Swarming
+
+`mb gen` is also responsible for generating the `.isolate` and
+`.isolated.gen.json` files needed to run test executables through swarming
+in a GN build (in a GYP build, this is done as part of the compile step).
+
+If you wish to generate the isolate files, pass `mb gen` the
+`--swarming-targets-file` command line argument; that arg should be a path
+to a file containing a list of ninja build targets to compute the runtime
+dependencies for (on Windows, use the ninja target name, not the file, so
+`base_unittests`, not `base_unittests.exe`).
+
+MB will take this file, translate each build target to the matching GN
+label (e.g., `base_unittests` -> `//base:base_unittests`, write that list
+to a file called `runtime_deps` in the build directory, and pass that to
+`gn gen $BUILD ... --runtime-deps-list-file=$BUILD/runtime_deps`.
+
+Once GN has computed the lists of runtime dependencies, MB will then
+look up the command line for each target (currently this is hard-coded
+in [mb.py](https://code.google.com/p/chromium/codesearch?q=mb.py#chromium/src/tools/mb/mb.py&q=mb.py%20GetIsolateCommand&sq=package:chromium&type=cs)), and write out the
+matching `.isolate` and `.isolated.gen.json` files.
+
+## The `mb_config.pyl` config file
+
+The `mb_config.pyl` config file is intended to enumerate all of the
+supported build configurations for Chromium. Generally speaking, you
+should never need to (or want to) build a configuration that isn't
+listed here, and so by using the configs in this file you can avoid
+having to juggle long lists of GYP_DEFINES and gn args by hand.
+
+`mb_config.pyl` is structured as a file containing a single PYthon Literal
+expression: a dictionary with three main keys, `masters`, `configs` and
+`mixins`.
+
+The `masters` key contains a nested series of dicts containing mappings
+of master -> builder -> config . This allows us to isolate the buildbot
+recipes from the actual details of the configs. The config should either
+be a single string value representing a key in the `configs` dictionary,
+or a list of strings, each of which is a key in the `configs` dictionary;
+the latter case is for builders that do multiple compiles with different
+arguments in a single build, and must *only* be used for such builders
+(where a --phase argument must be supplied in each lookup or gen call).
+
+The `configs` key points to a dictionary of named build configurations.
+
+There should be an key in this dict for every supported configuration
+of Chromium, meaning every configuration we have a bot for, and every
+configuration commonly used by develpers but that we may not have a bot
+for.
+
+The value of each key is a list of "mixins" that will define what that
+build_config does. Each item in the list must be an entry in the dictionary
+value of the `mixins` key.
+
+Each mixin value is itself a dictionary that contains one or more of the
+following keys:
+
+ * `gyp_crosscompile`: a boolean; if true, GYP_CROSSCOMPILE=1 is set in
+ the environment and passed to GYP.
+ * `gyp_defines`: a string containing a list of GYP_DEFINES.
+ * `gn_args`: a string containing a list of values passed to gn --args.
+ * `mixins`: a list of other mixins that should be included.
+ * `type`: a string with either the value `gyp` or `gn`;
+ setting this indicates which meta-build tool to use.
+
+When `mb gen` or `mb analyze` executes, it takes a config name, looks it
+up in the 'configs' dict, and then does a left-to-right expansion of the
+mixins; gyp_defines and gn_args values are concatenated, and the type values
+override each other.
+
+For example, if you had:
+
+```
+{
+ 'configs`: {
+ 'linux_release_trybot': ['gyp_release', 'trybot'],
+ 'gn_shared_debug': None,
+ }
+ 'mixins': {
+ 'bot': {
+ 'gyp_defines': 'use_goma=1 dcheck_always_on=0',
+ 'gn_args': 'use_goma=true dcheck_always_on=false',
+ },
+ 'debug': {
+ 'gn_args': 'is_debug=true',
+ },
+ 'gn': {'type': 'gn'},
+ 'gyp_release': {
+ 'mixins': ['release'],
+ 'type': 'gyp',
+ },
+ 'release': {
+ 'gn_args': 'is_debug=false',
+ }
+ 'shared': {
+ 'gn_args': 'is_component_build=true',
+ 'gyp_defines': 'component=shared_library',
+ },
+ 'trybot': {
+ 'gyp_defines': 'dcheck_always_on=1',
+ 'gn_args': 'dcheck_always_on=true',
+ }
+ }
+}
+```
+
+and you ran `mb gen -c linux_release_trybot //out/Release`, it would
+translate into a call to `gyp_chromium -G Release` with `GYP_DEFINES` set to
+`"use_goma=true dcheck_always_on=false dcheck_always_on=true"`.
+
+(From that you can see that mb is intentionally dumb and does not
+attempt to de-dup the flags, it lets gyp do that).
+
+## Debugging MB
+
+By design, MB should be simple enough that very little can go wrong.
+
+The most obvious issue is that you might see different commands being
+run than you expect; running `'mb -v'` will print what it's doing and
+run the commands; `'mb -n'` will print what it will do but *not* run
+the commands.
+
+If you hit weirder things than that, add some print statements to the
+python script, send a question to gn-dev@chromium.org, or
+[file a bug](https://crbug.com/new) with the label
+'mb' and cc: dpranke@chromium.org.
+
+
diff --git a/deps/v8/tools/mb/mb b/deps/v8/tools/mb/mb
new file mode 100755
index 0000000000..d3a0cdf019
--- /dev/null
+++ b/deps/v8/tools/mb/mb
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+base_dir=$(dirname "$0")
+
+PYTHONDONTWRITEBYTECODE=1 exec python "$base_dir/mb.py" "$@"
diff --git a/deps/v8/tools/mb/mb.bat b/deps/v8/tools/mb/mb.bat
new file mode 100755
index 0000000000..a82770e714
--- /dev/null
+++ b/deps/v8/tools/mb/mb.bat
@@ -0,0 +1,6 @@
+@echo off
+setlocal
+:: This is required with cygwin only.
+PATH=%~dp0;%PATH%
+set PYTHONDONTWRITEBYTECODE=1
+call python "%~dp0mb.py" %*
diff --git a/deps/v8/tools/mb/mb.py b/deps/v8/tools/mb/mb.py
new file mode 100755
index 0000000000..536dc00fcb
--- /dev/null
+++ b/deps/v8/tools/mb/mb.py
@@ -0,0 +1,1500 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""MB - the Meta-Build wrapper around GYP and GN
+
+MB is a wrapper script for GYP and GN that can be used to generate build files
+for sets of canned configurations and analyze them.
+"""
+
+from __future__ import print_function
+
+import argparse
+import ast
+import errno
+import json
+import os
+import pipes
+import pprint
+import re
+import shutil
+import sys
+import subprocess
+import tempfile
+import traceback
+import urllib2
+
+from collections import OrderedDict
+
+CHROMIUM_SRC_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
+ os.path.abspath(__file__))))
+sys.path = [os.path.join(CHROMIUM_SRC_DIR, 'build')] + sys.path
+
+import gn_helpers
+
+
+def main(args):
+ mbw = MetaBuildWrapper()
+ return mbw.Main(args)
+
+
+class MetaBuildWrapper(object):
+ def __init__(self):
+ self.chromium_src_dir = CHROMIUM_SRC_DIR
+ self.default_config = os.path.join(self.chromium_src_dir, 'infra', 'mb',
+ 'mb_config.pyl')
+ self.executable = sys.executable
+ self.platform = sys.platform
+ self.sep = os.sep
+ self.args = argparse.Namespace()
+ self.configs = {}
+ self.masters = {}
+ self.mixins = {}
+
+ def Main(self, args):
+ self.ParseArgs(args)
+ try:
+ ret = self.args.func()
+ if ret:
+ self.DumpInputFiles()
+ return ret
+ except KeyboardInterrupt:
+ self.Print('interrupted, exiting', stream=sys.stderr)
+ return 130
+ except Exception:
+ self.DumpInputFiles()
+ s = traceback.format_exc()
+ for l in s.splitlines():
+ self.Print(l)
+ return 1
+
+ def ParseArgs(self, argv):
+ def AddCommonOptions(subp):
+ subp.add_argument('-b', '--builder',
+ help='builder name to look up config from')
+ subp.add_argument('-m', '--master',
+ help='master name to look up config from')
+ subp.add_argument('-c', '--config',
+ help='configuration to analyze')
+ subp.add_argument('--phase', type=int,
+ help=('build phase for a given build '
+ '(int in [1, 2, ...))'))
+ subp.add_argument('-f', '--config-file', metavar='PATH',
+ default=self.default_config,
+ help='path to config file '
+ '(default is //tools/mb/mb_config.pyl)')
+ subp.add_argument('-g', '--goma-dir',
+ help='path to goma directory')
+ subp.add_argument('--gyp-script', metavar='PATH',
+ default=self.PathJoin('build', 'gyp_chromium'),
+ help='path to gyp script relative to project root '
+ '(default is %(default)s)')
+ subp.add_argument('--android-version-code',
+ help='Sets GN arg android_default_version_code and '
+ 'GYP_DEFINE app_manifest_version_code')
+ subp.add_argument('--android-version-name',
+ help='Sets GN arg android_default_version_name and '
+ 'GYP_DEFINE app_manifest_version_name')
+ subp.add_argument('-n', '--dryrun', action='store_true',
+ help='Do a dry run (i.e., do nothing, just print '
+ 'the commands that will run)')
+ subp.add_argument('-v', '--verbose', action='store_true',
+ help='verbose logging')
+
+ parser = argparse.ArgumentParser(prog='mb')
+ subps = parser.add_subparsers()
+
+ subp = subps.add_parser('analyze',
+ help='analyze whether changes to a set of files '
+ 'will cause a set of binaries to be rebuilt.')
+ AddCommonOptions(subp)
+ subp.add_argument('path', nargs=1,
+ help='path build was generated into.')
+ subp.add_argument('input_path', nargs=1,
+ help='path to a file containing the input arguments '
+ 'as a JSON object.')
+ subp.add_argument('output_path', nargs=1,
+ help='path to a file containing the output arguments '
+ 'as a JSON object.')
+ subp.set_defaults(func=self.CmdAnalyze)
+
+ subp = subps.add_parser('gen',
+ help='generate a new set of build files')
+ AddCommonOptions(subp)
+ subp.add_argument('--swarming-targets-file',
+ help='save runtime dependencies for targets listed '
+ 'in file.')
+ subp.add_argument('path', nargs=1,
+ help='path to generate build into')
+ subp.set_defaults(func=self.CmdGen)
+
+ subp = subps.add_parser('isolate',
+ help='generate the .isolate files for a given'
+ 'binary')
+ AddCommonOptions(subp)
+ subp.add_argument('path', nargs=1,
+ help='path build was generated into')
+ subp.add_argument('target', nargs=1,
+ help='ninja target to generate the isolate for')
+ subp.set_defaults(func=self.CmdIsolate)
+
+ subp = subps.add_parser('lookup',
+ help='look up the command for a given config or '
+ 'builder')
+ AddCommonOptions(subp)
+ subp.set_defaults(func=self.CmdLookup)
+
+ subp = subps.add_parser(
+ 'run',
+ help='build and run the isolated version of a '
+ 'binary',
+ formatter_class=argparse.RawDescriptionHelpFormatter)
+ subp.description = (
+ 'Build, isolate, and run the given binary with the command line\n'
+ 'listed in the isolate. You may pass extra arguments after the\n'
+ 'target; use "--" if the extra arguments need to include switches.\n'
+ '\n'
+ 'Examples:\n'
+ '\n'
+ ' % tools/mb/mb.py run -m chromium.linux -b "Linux Builder" \\\n'
+ ' //out/Default content_browsertests\n'
+ '\n'
+ ' % tools/mb/mb.py run out/Default content_browsertests\n'
+ '\n'
+ ' % tools/mb/mb.py run out/Default content_browsertests -- \\\n'
+ ' --test-launcher-retry-limit=0'
+ '\n'
+ )
+
+ AddCommonOptions(subp)
+ subp.add_argument('-j', '--jobs', dest='jobs', type=int,
+ help='Number of jobs to pass to ninja')
+ subp.add_argument('--no-build', dest='build', default=True,
+ action='store_false',
+ help='Do not build, just isolate and run')
+ subp.add_argument('path', nargs=1,
+ help=('path to generate build into (or use).'
+ ' This can be either a regular path or a '
+ 'GN-style source-relative path like '
+ '//out/Default.'))
+ subp.add_argument('target', nargs=1,
+ help='ninja target to build and run')
+ subp.add_argument('extra_args', nargs='*',
+ help=('extra args to pass to the isolate to run. Use '
+ '"--" as the first arg if you need to pass '
+ 'switches'))
+ subp.set_defaults(func=self.CmdRun)
+
+ subp = subps.add_parser('validate',
+ help='validate the config file')
+ subp.add_argument('-f', '--config-file', metavar='PATH',
+ default=self.default_config,
+ help='path to config file '
+ '(default is //infra/mb/mb_config.pyl)')
+ subp.set_defaults(func=self.CmdValidate)
+
+ subp = subps.add_parser('audit',
+ help='Audit the config file to track progress')
+ subp.add_argument('-f', '--config-file', metavar='PATH',
+ default=self.default_config,
+ help='path to config file '
+ '(default is //infra/mb/mb_config.pyl)')
+ subp.add_argument('-i', '--internal', action='store_true',
+ help='check internal masters also')
+ subp.add_argument('-m', '--master', action='append',
+ help='master to audit (default is all non-internal '
+ 'masters in file)')
+ subp.add_argument('-u', '--url-template', action='store',
+ default='https://build.chromium.org/p/'
+ '{master}/json/builders',
+ help='URL scheme for JSON APIs to buildbot '
+ '(default: %(default)s) ')
+ subp.add_argument('-c', '--check-compile', action='store_true',
+ help='check whether tbd and master-only bots actually'
+ ' do compiles')
+ subp.set_defaults(func=self.CmdAudit)
+
+ subp = subps.add_parser('help',
+ help='Get help on a subcommand.')
+ subp.add_argument(nargs='?', action='store', dest='subcommand',
+ help='The command to get help for.')
+ subp.set_defaults(func=self.CmdHelp)
+
+ self.args = parser.parse_args(argv)
+
+ def DumpInputFiles(self):
+
+ def DumpContentsOfFilePassedTo(arg_name, path):
+ if path and self.Exists(path):
+ self.Print("\n# To recreate the file passed to %s:" % arg_name)
+ self.Print("%% cat > %s <<EOF)" % path)
+ contents = self.ReadFile(path)
+ self.Print(contents)
+ self.Print("EOF\n%\n")
+
+ if getattr(self.args, 'input_path', None):
+ DumpContentsOfFilePassedTo(
+ 'argv[0] (input_path)', self.args.input_path[0])
+ if getattr(self.args, 'swarming_targets_file', None):
+ DumpContentsOfFilePassedTo(
+ '--swarming-targets-file', self.args.swarming_targets_file)
+
+ def CmdAnalyze(self):
+ vals = self.Lookup()
+ self.ClobberIfNeeded(vals)
+ if vals['type'] == 'gn':
+ return self.RunGNAnalyze(vals)
+ else:
+ return self.RunGYPAnalyze(vals)
+
+ def CmdGen(self):
+ vals = self.Lookup()
+ self.ClobberIfNeeded(vals)
+ if vals['type'] == 'gn':
+ return self.RunGNGen(vals)
+ else:
+ return self.RunGYPGen(vals)
+
+ def CmdHelp(self):
+ if self.args.subcommand:
+ self.ParseArgs([self.args.subcommand, '--help'])
+ else:
+ self.ParseArgs(['--help'])
+
+ def CmdIsolate(self):
+ vals = self.GetConfig()
+ if not vals:
+ return 1
+
+ if vals['type'] == 'gn':
+ return self.RunGNIsolate(vals)
+ else:
+ return self.Build('%s_run' % self.args.target[0])
+
+ def CmdLookup(self):
+ vals = self.Lookup()
+ if vals['type'] == 'gn':
+ cmd = self.GNCmd('gen', '_path_')
+ gn_args = self.GNArgs(vals)
+ self.Print('\nWriting """\\\n%s""" to _path_/args.gn.\n' % gn_args)
+ env = None
+ else:
+ cmd, env = self.GYPCmd('_path_', vals)
+
+ self.PrintCmd(cmd, env)
+ return 0
+
+ def CmdRun(self):
+ vals = self.GetConfig()
+ if not vals:
+ return 1
+
+ build_dir = self.args.path[0]
+ target = self.args.target[0]
+
+ if vals['type'] == 'gn':
+ if self.args.build:
+ ret = self.Build(target)
+ if ret:
+ return ret
+ ret = self.RunGNIsolate(vals)
+ if ret:
+ return ret
+ else:
+ ret = self.Build('%s_run' % target)
+ if ret:
+ return ret
+
+ cmd = [
+ self.executable,
+ self.PathJoin('tools', 'swarming_client', 'isolate.py'),
+ 'run',
+ '-s',
+ self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target)),
+ ]
+ if self.args.extra_args:
+ cmd += ['--'] + self.args.extra_args
+
+ ret, _, _ = self.Run(cmd, force_verbose=False, buffer_output=False)
+
+ return ret
+
+ def CmdValidate(self, print_ok=True):
+ errs = []
+
+ # Read the file to make sure it parses.
+ self.ReadConfigFile()
+
+ # Build a list of all of the configs referenced by builders.
+ all_configs = {}
+ for master in self.masters:
+ for config in self.masters[master].values():
+ if isinstance(config, list):
+ for c in config:
+ all_configs[c] = master
+ else:
+ all_configs[config] = master
+
+ # Check that every referenced args file or config actually exists.
+ for config, loc in all_configs.items():
+ if config.startswith('//'):
+ if not self.Exists(self.ToAbsPath(config)):
+ errs.append('Unknown args file "%s" referenced from "%s".' %
+ (config, loc))
+ elif not config in self.configs:
+ errs.append('Unknown config "%s" referenced from "%s".' %
+ (config, loc))
+
+ # Check that every actual config is actually referenced.
+ for config in self.configs:
+ if not config in all_configs:
+ errs.append('Unused config "%s".' % config)
+
+ # Figure out the whole list of mixins, and check that every mixin
+ # listed by a config or another mixin actually exists.
+ referenced_mixins = set()
+ for config, mixins in self.configs.items():
+ for mixin in mixins:
+ if not mixin in self.mixins:
+ errs.append('Unknown mixin "%s" referenced by config "%s".' %
+ (mixin, config))
+ referenced_mixins.add(mixin)
+
+ for mixin in self.mixins:
+ for sub_mixin in self.mixins[mixin].get('mixins', []):
+ if not sub_mixin in self.mixins:
+ errs.append('Unknown mixin "%s" referenced by mixin "%s".' %
+ (sub_mixin, mixin))
+ referenced_mixins.add(sub_mixin)
+
+ # Check that every mixin defined is actually referenced somewhere.
+ for mixin in self.mixins:
+ if not mixin in referenced_mixins:
+ errs.append('Unreferenced mixin "%s".' % mixin)
+
+ if errs:
+ raise MBErr(('mb config file %s has problems:' % self.args.config_file) +
+ '\n ' + '\n '.join(errs))
+
+ if print_ok:
+ self.Print('mb config file %s looks ok.' % self.args.config_file)
+ return 0
+
+ def CmdAudit(self):
+ """Track the progress of the GYP->GN migration on the bots."""
+
+ # First, make sure the config file is okay, but don't print anything
+ # if it is (it will throw an error if it isn't).
+ self.CmdValidate(print_ok=False)
+
+ stats = OrderedDict()
+ STAT_MASTER_ONLY = 'Master only'
+ STAT_CONFIG_ONLY = 'Config only'
+ STAT_TBD = 'Still TBD'
+ STAT_GYP = 'Still GYP'
+ STAT_DONE = 'Done (on GN)'
+ stats[STAT_MASTER_ONLY] = 0
+ stats[STAT_CONFIG_ONLY] = 0
+ stats[STAT_TBD] = 0
+ stats[STAT_GYP] = 0
+ stats[STAT_DONE] = 0
+
+ def PrintBuilders(heading, builders, notes):
+ stats.setdefault(heading, 0)
+ stats[heading] += len(builders)
+ if builders:
+ self.Print(' %s:' % heading)
+ for builder in sorted(builders):
+ self.Print(' %s%s' % (builder, notes[builder]))
+
+ self.ReadConfigFile()
+
+ masters = self.args.master or self.masters
+ for master in sorted(masters):
+ url = self.args.url_template.replace('{master}', master)
+
+ self.Print('Auditing %s' % master)
+
+ MASTERS_TO_SKIP = (
+ 'client.skia',
+ 'client.v8.fyi',
+ 'tryserver.v8',
+ )
+ if master in MASTERS_TO_SKIP:
+ # Skip these bots because converting them is the responsibility of
+ # those teams and out of scope for the Chromium migration to GN.
+ self.Print(' Skipped (out of scope)')
+ self.Print('')
+ continue
+
+ INTERNAL_MASTERS = ('official.desktop', 'official.desktop.continuous',
+ 'internal.client.kitchensync')
+ if master in INTERNAL_MASTERS and not self.args.internal:
+ # Skip these because the servers aren't accessible by default ...
+ self.Print(' Skipped (internal)')
+ self.Print('')
+ continue
+
+ try:
+ # Fetch the /builders contents from the buildbot master. The
+ # keys of the dict are the builder names themselves.
+ json_contents = self.Fetch(url)
+ d = json.loads(json_contents)
+ except Exception as e:
+ self.Print(str(e))
+ return 1
+
+ config_builders = set(self.masters[master])
+ master_builders = set(d.keys())
+ both = master_builders & config_builders
+ master_only = master_builders - config_builders
+ config_only = config_builders - master_builders
+ tbd = set()
+ gyp = set()
+ done = set()
+ notes = {builder: '' for builder in config_builders | master_builders}
+
+ for builder in both:
+ config = self.masters[master][builder]
+ if config == 'tbd':
+ tbd.add(builder)
+ elif isinstance(config, list):
+ vals = self.FlattenConfig(config[0])
+ if vals['type'] == 'gyp':
+ gyp.add(builder)
+ else:
+ done.add(builder)
+ elif config.startswith('//'):
+ done.add(builder)
+ else:
+ vals = self.FlattenConfig(config)
+ if vals['type'] == 'gyp':
+ gyp.add(builder)
+ else:
+ done.add(builder)
+
+ if self.args.check_compile and (tbd or master_only):
+ either = tbd | master_only
+ for builder in either:
+ notes[builder] = ' (' + self.CheckCompile(master, builder) +')'
+
+ if master_only or config_only or tbd or gyp:
+ PrintBuilders(STAT_MASTER_ONLY, master_only, notes)
+ PrintBuilders(STAT_CONFIG_ONLY, config_only, notes)
+ PrintBuilders(STAT_TBD, tbd, notes)
+ PrintBuilders(STAT_GYP, gyp, notes)
+ else:
+ self.Print(' All GN!')
+
+ stats[STAT_DONE] += len(done)
+
+ self.Print('')
+
+ fmt = '{:<27} {:>4}'
+ self.Print(fmt.format('Totals', str(sum(int(v) for v in stats.values()))))
+ self.Print(fmt.format('-' * 27, '----'))
+ for stat, count in stats.items():
+ self.Print(fmt.format(stat, str(count)))
+
+ return 0
+
+ def GetConfig(self):
+ build_dir = self.args.path[0]
+
+ vals = {}
+ if self.args.builder or self.args.master or self.args.config:
+ vals = self.Lookup()
+ if vals['type'] == 'gn':
+ # Re-run gn gen in order to ensure the config is consistent with the
+ # build dir.
+ self.RunGNGen(vals)
+ return vals
+
+ mb_type_path = self.PathJoin(self.ToAbsPath(build_dir), 'mb_type')
+ if not self.Exists(mb_type_path):
+ toolchain_path = self.PathJoin(self.ToAbsPath(build_dir),
+ 'toolchain.ninja')
+ if not self.Exists(toolchain_path):
+ self.Print('Must either specify a path to an existing GN build dir '
+ 'or pass in a -m/-b pair or a -c flag to specify the '
+ 'configuration')
+ return {}
+ else:
+ mb_type = 'gn'
+ else:
+ mb_type = self.ReadFile(mb_type_path).strip()
+
+ if mb_type == 'gn':
+ vals = self.GNValsFromDir(build_dir)
+ else:
+ vals = {}
+ vals['type'] = mb_type
+
+ return vals
+
+ def GNValsFromDir(self, build_dir):
+ args_contents = ""
+ gn_args_path = self.PathJoin(self.ToAbsPath(build_dir), 'args.gn')
+ if self.Exists(gn_args_path):
+ args_contents = self.ReadFile(gn_args_path)
+ gn_args = []
+ for l in args_contents.splitlines():
+ fields = l.split(' ')
+ name = fields[0]
+ val = ' '.join(fields[2:])
+ gn_args.append('%s=%s' % (name, val))
+
+ return {
+ 'gn_args': ' '.join(gn_args),
+ 'type': 'gn',
+ }
+
+ def Lookup(self):
+ vals = self.ReadBotConfig()
+ if not vals:
+ self.ReadConfigFile()
+ config = self.ConfigFromArgs()
+ if config.startswith('//'):
+ if not self.Exists(self.ToAbsPath(config)):
+ raise MBErr('args file "%s" not found' % config)
+ vals = {
+ 'args_file': config,
+ 'cros_passthrough': False,
+ 'gn_args': '',
+ 'gyp_crosscompile': False,
+ 'gyp_defines': '',
+ 'type': 'gn',
+ }
+ else:
+ if not config in self.configs:
+ raise MBErr('Config "%s" not found in %s' %
+ (config, self.args.config_file))
+ vals = self.FlattenConfig(config)
+
+ # Do some basic sanity checking on the config so that we
+ # don't have to do this in every caller.
+ assert 'type' in vals, 'No meta-build type specified in the config'
+ assert vals['type'] in ('gn', 'gyp'), (
+ 'Unknown meta-build type "%s"' % vals['gn_args'])
+
+ return vals
+
+ def ReadBotConfig(self):
+ if not self.args.master or not self.args.builder:
+ return {}
+ path = self.PathJoin(self.chromium_src_dir, 'ios', 'build', 'bots',
+ self.args.master, self.args.builder + '.json')
+ if not self.Exists(path):
+ return {}
+
+ contents = json.loads(self.ReadFile(path))
+ gyp_vals = contents.get('GYP_DEFINES', {})
+ if isinstance(gyp_vals, dict):
+ gyp_defines = ' '.join('%s=%s' % (k, v) for k, v in gyp_vals.items())
+ else:
+ gyp_defines = ' '.join(gyp_vals)
+ gn_args = ' '.join(contents.get('gn_args', []))
+
+ return {
+ 'args_file': '',
+ 'cros_passthrough': False,
+ 'gn_args': gn_args,
+ 'gyp_crosscompile': False,
+ 'gyp_defines': gyp_defines,
+ 'type': contents.get('mb_type', ''),
+ }
+
+ def ReadConfigFile(self):
+ if not self.Exists(self.args.config_file):
+ raise MBErr('config file not found at %s' % self.args.config_file)
+
+ try:
+ contents = ast.literal_eval(self.ReadFile(self.args.config_file))
+ except SyntaxError as e:
+ raise MBErr('Failed to parse config file "%s": %s' %
+ (self.args.config_file, e))
+
+ self.configs = contents['configs']
+ self.masters = contents['masters']
+ self.mixins = contents['mixins']
+
+ def ConfigFromArgs(self):
+ if self.args.config:
+ if self.args.master or self.args.builder:
+ raise MBErr('Can not specific both -c/--config and -m/--master or '
+ '-b/--builder')
+
+ return self.args.config
+
+ if not self.args.master or not self.args.builder:
+ raise MBErr('Must specify either -c/--config or '
+ '(-m/--master and -b/--builder)')
+
+ if not self.args.master in self.masters:
+ raise MBErr('Master name "%s" not found in "%s"' %
+ (self.args.master, self.args.config_file))
+
+ if not self.args.builder in self.masters[self.args.master]:
+ raise MBErr('Builder name "%s" not found under masters[%s] in "%s"' %
+ (self.args.builder, self.args.master, self.args.config_file))
+
+ config = self.masters[self.args.master][self.args.builder]
+ if isinstance(config, list):
+ if self.args.phase is None:
+ raise MBErr('Must specify a build --phase for %s on %s' %
+ (self.args.builder, self.args.master))
+ phase = int(self.args.phase)
+ if phase < 1 or phase > len(config):
+ raise MBErr('Phase %d out of bounds for %s on %s' %
+ (phase, self.args.builder, self.args.master))
+ return config[phase-1]
+
+ if self.args.phase is not None:
+ raise MBErr('Must not specify a build --phase for %s on %s' %
+ (self.args.builder, self.args.master))
+ return config
+
+ def FlattenConfig(self, config):
+ mixins = self.configs[config]
+ vals = {
+ 'args_file': '',
+ 'cros_passthrough': False,
+ 'gn_args': [],
+ 'gyp_defines': '',
+ 'gyp_crosscompile': False,
+ 'type': None,
+ }
+
+ visited = []
+ self.FlattenMixins(mixins, vals, visited)
+ return vals
+
+ def FlattenMixins(self, mixins, vals, visited):
+ for m in mixins:
+ if m not in self.mixins:
+ raise MBErr('Unknown mixin "%s"' % m)
+
+ visited.append(m)
+
+ mixin_vals = self.mixins[m]
+
+ if 'cros_passthrough' in mixin_vals:
+ vals['cros_passthrough'] = mixin_vals['cros_passthrough']
+ if 'gn_args' in mixin_vals:
+ if vals['gn_args']:
+ vals['gn_args'] += ' ' + mixin_vals['gn_args']
+ else:
+ vals['gn_args'] = mixin_vals['gn_args']
+ if 'gyp_crosscompile' in mixin_vals:
+ vals['gyp_crosscompile'] = mixin_vals['gyp_crosscompile']
+ if 'gyp_defines' in mixin_vals:
+ if vals['gyp_defines']:
+ vals['gyp_defines'] += ' ' + mixin_vals['gyp_defines']
+ else:
+ vals['gyp_defines'] = mixin_vals['gyp_defines']
+ if 'type' in mixin_vals:
+ vals['type'] = mixin_vals['type']
+
+ if 'mixins' in mixin_vals:
+ self.FlattenMixins(mixin_vals['mixins'], vals, visited)
+ return vals
+
+ def ClobberIfNeeded(self, vals):
+ path = self.args.path[0]
+ build_dir = self.ToAbsPath(path)
+ mb_type_path = self.PathJoin(build_dir, 'mb_type')
+ needs_clobber = False
+ new_mb_type = vals['type']
+ if self.Exists(build_dir):
+ if self.Exists(mb_type_path):
+ old_mb_type = self.ReadFile(mb_type_path)
+ if old_mb_type != new_mb_type:
+ self.Print("Build type mismatch: was %s, will be %s, clobbering %s" %
+ (old_mb_type, new_mb_type, path))
+ needs_clobber = True
+ else:
+ # There is no 'mb_type' file in the build directory, so this probably
+ # means that the prior build(s) were not done through mb, and we
+ # have no idea if this was a GYP build or a GN build. Clobber it
+ # to be safe.
+ self.Print("%s/mb_type missing, clobbering to be safe" % path)
+ needs_clobber = True
+
+ if self.args.dryrun:
+ return
+
+ if needs_clobber:
+ self.RemoveDirectory(build_dir)
+
+ self.MaybeMakeDirectory(build_dir)
+ self.WriteFile(mb_type_path, new_mb_type)
+
+ def RunGNGen(self, vals):
+ build_dir = self.args.path[0]
+
+ cmd = self.GNCmd('gen', build_dir, '--check')
+ gn_args = self.GNArgs(vals)
+
+ # Since GN hasn't run yet, the build directory may not even exist.
+ self.MaybeMakeDirectory(self.ToAbsPath(build_dir))
+
+ gn_args_path = self.ToAbsPath(build_dir, 'args.gn')
+ self.WriteFile(gn_args_path, gn_args, force_verbose=True)
+
+ swarming_targets = []
+ if getattr(self.args, 'swarming_targets_file', None):
+ # We need GN to generate the list of runtime dependencies for
+ # the compile targets listed (one per line) in the file so
+ # we can run them via swarming. We use ninja_to_gn.pyl to convert
+ # the compile targets to the matching GN labels.
+ path = self.args.swarming_targets_file
+ if not self.Exists(path):
+ self.WriteFailureAndRaise('"%s" does not exist' % path,
+ output_path=None)
+ contents = self.ReadFile(path)
+ swarming_targets = set(contents.splitlines())
+ gn_isolate_map = ast.literal_eval(self.ReadFile(self.PathJoin(
+ self.chromium_src_dir, 'testing', 'buildbot', 'gn_isolate_map.pyl')))
+ gn_labels = []
+ err = ''
+ for target in swarming_targets:
+ target_name = self.GNTargetName(target)
+ if not target_name in gn_isolate_map:
+ err += ('test target "%s" not found\n' % target_name)
+ elif gn_isolate_map[target_name]['type'] == 'unknown':
+ err += ('test target "%s" type is unknown\n' % target_name)
+ else:
+ gn_labels.append(gn_isolate_map[target_name]['label'])
+
+ if err:
+ raise MBErr('Error: Failed to match swarming targets to %s:\n%s' %
+ ('//testing/buildbot/gn_isolate_map.pyl', err))
+
+ gn_runtime_deps_path = self.ToAbsPath(build_dir, 'runtime_deps')
+ self.WriteFile(gn_runtime_deps_path, '\n'.join(gn_labels) + '\n')
+ cmd.append('--runtime-deps-list-file=%s' % gn_runtime_deps_path)
+
+ ret, _, _ = self.Run(cmd)
+ if ret:
+ # If `gn gen` failed, we should exit early rather than trying to
+ # generate isolates. Run() will have already logged any error output.
+ self.Print('GN gen failed: %d' % ret)
+ return ret
+
+ android = 'target_os="android"' in vals['gn_args']
+ for target in swarming_targets:
+ if android:
+ # Android targets may be either android_apk or executable. The former
+ # will result in runtime_deps associated with the stamp file, while the
+ # latter will result in runtime_deps associated with the executable.
+ target_name = self.GNTargetName(target)
+ label = gn_isolate_map[target_name]['label']
+ runtime_deps_targets = [
+ target_name + '.runtime_deps',
+ 'obj/%s.stamp.runtime_deps' % label.replace(':', '/')]
+ elif gn_isolate_map[target]['type'] == 'gpu_browser_test':
+ if self.platform == 'win32':
+ runtime_deps_targets = ['browser_tests.exe.runtime_deps']
+ else:
+ runtime_deps_targets = ['browser_tests.runtime_deps']
+ elif (gn_isolate_map[target]['type'] == 'script' or
+ gn_isolate_map[target].get('label_type') == 'group'):
+ # For script targets, the build target is usually a group,
+ # for which gn generates the runtime_deps next to the stamp file
+ # for the label, which lives under the obj/ directory.
+ label = gn_isolate_map[target]['label']
+ runtime_deps_targets = [
+ 'obj/%s.stamp.runtime_deps' % label.replace(':', '/')]
+ elif self.platform == 'win32':
+ runtime_deps_targets = [target + '.exe.runtime_deps']
+ else:
+ runtime_deps_targets = [target + '.runtime_deps']
+
+ for r in runtime_deps_targets:
+ runtime_deps_path = self.ToAbsPath(build_dir, r)
+ if self.Exists(runtime_deps_path):
+ break
+ else:
+ raise MBErr('did not generate any of %s' %
+ ', '.join(runtime_deps_targets))
+
+ command, extra_files = self.GetIsolateCommand(target, vals,
+ gn_isolate_map)
+
+ runtime_deps = self.ReadFile(runtime_deps_path).splitlines()
+
+ self.WriteIsolateFiles(build_dir, command, target, runtime_deps,
+ extra_files)
+
+ return 0
+
+ def RunGNIsolate(self, vals):
+ gn_isolate_map = ast.literal_eval(self.ReadFile(self.PathJoin(
+ self.chromium_src_dir, 'testing', 'buildbot', 'gn_isolate_map.pyl')))
+
+ build_dir = self.args.path[0]
+ target = self.args.target[0]
+ target_name = self.GNTargetName(target)
+ command, extra_files = self.GetIsolateCommand(target, vals, gn_isolate_map)
+
+ label = gn_isolate_map[target_name]['label']
+ cmd = self.GNCmd('desc', build_dir, label, 'runtime_deps')
+ ret, out, _ = self.Call(cmd)
+ if ret:
+ if out:
+ self.Print(out)
+ return ret
+
+ runtime_deps = out.splitlines()
+
+ self.WriteIsolateFiles(build_dir, command, target, runtime_deps,
+ extra_files)
+
+ ret, _, _ = self.Run([
+ self.executable,
+ self.PathJoin('tools', 'swarming_client', 'isolate.py'),
+ 'check',
+ '-i',
+ self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
+ '-s',
+ self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target))],
+ buffer_output=False)
+
+ return ret
+
+ def WriteIsolateFiles(self, build_dir, command, target, runtime_deps,
+ extra_files):
+ isolate_path = self.ToAbsPath(build_dir, target + '.isolate')
+ self.WriteFile(isolate_path,
+ pprint.pformat({
+ 'variables': {
+ 'command': command,
+ 'files': sorted(runtime_deps + extra_files),
+ }
+ }) + '\n')
+
+ self.WriteJSON(
+ {
+ 'args': [
+ '--isolated',
+ self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target)),
+ '--isolate',
+ self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
+ ],
+ 'dir': self.chromium_src_dir,
+ 'version': 1,
+ },
+ isolate_path + 'd.gen.json',
+ )
+
+ def GNCmd(self, subcommand, path, *args):
+ if self.platform == 'linux2':
+ subdir, exe = 'linux64', 'gn'
+ elif self.platform == 'darwin':
+ subdir, exe = 'mac', 'gn'
+ else:
+ subdir, exe = 'win', 'gn.exe'
+
+ gn_path = self.PathJoin(self.chromium_src_dir, 'buildtools', subdir, exe)
+
+ return [gn_path, subcommand, path] + list(args)
+
+ def GNArgs(self, vals):
+ if vals['cros_passthrough']:
+ if not 'GN_ARGS' in os.environ:
+ raise MBErr('MB is expecting GN_ARGS to be in the environment')
+ gn_args = os.environ['GN_ARGS']
+ if not re.search('target_os.*=.*"chromeos"', gn_args):
+ raise MBErr('GN_ARGS is missing target_os = "chromeos": (GN_ARGS=%s)' %
+ gn_args)
+ else:
+ gn_args = vals['gn_args']
+
+ if self.args.goma_dir:
+ gn_args += ' goma_dir="%s"' % self.args.goma_dir
+
+ android_version_code = self.args.android_version_code
+ if android_version_code:
+ gn_args += ' android_default_version_code="%s"' % android_version_code
+
+ android_version_name = self.args.android_version_name
+ if android_version_name:
+ gn_args += ' android_default_version_name="%s"' % android_version_name
+
+ # Canonicalize the arg string into a sorted, newline-separated list
+ # of key-value pairs, and de-dup the keys if need be so that only
+ # the last instance of each arg is listed.
+ gn_args = gn_helpers.ToGNString(gn_helpers.FromGNArgs(gn_args))
+
+ args_file = vals.get('args_file', None)
+ if args_file:
+ gn_args = ('import("%s")\n' % vals['args_file']) + gn_args
+ return gn_args
+
+ def RunGYPGen(self, vals):
+ path = self.args.path[0]
+
+ output_dir = self.ParseGYPConfigPath(path)
+ cmd, env = self.GYPCmd(output_dir, vals)
+ ret, _, _ = self.Run(cmd, env=env)
+ return ret
+
+ def RunGYPAnalyze(self, vals):
+ output_dir = self.ParseGYPConfigPath(self.args.path[0])
+ if self.args.verbose:
+ inp = self.ReadInputJSON(['files', 'test_targets',
+ 'additional_compile_targets'])
+ self.Print()
+ self.Print('analyze input:')
+ self.PrintJSON(inp)
+ self.Print()
+
+ cmd, env = self.GYPCmd(output_dir, vals)
+ cmd.extend(['-f', 'analyzer',
+ '-G', 'config_path=%s' % self.args.input_path[0],
+ '-G', 'analyzer_output_path=%s' % self.args.output_path[0]])
+ ret, _, _ = self.Run(cmd, env=env)
+ if not ret and self.args.verbose:
+ outp = json.loads(self.ReadFile(self.args.output_path[0]))
+ self.Print()
+ self.Print('analyze output:')
+ self.PrintJSON(outp)
+ self.Print()
+
+ return ret
+
+ def GetIsolateCommand(self, target, vals, gn_isolate_map):
+ android = 'target_os="android"' in vals['gn_args']
+
+ # This needs to mirror the settings in //build/config/ui.gni:
+ # use_x11 = is_linux && !use_ozone.
+ use_x11 = (self.platform == 'linux2' and
+ not android and
+ not 'use_ozone=true' in vals['gn_args'])
+
+ asan = 'is_asan=true' in vals['gn_args']
+ msan = 'is_msan=true' in vals['gn_args']
+ tsan = 'is_tsan=true' in vals['gn_args']
+
+ target_name = self.GNTargetName(target)
+ test_type = gn_isolate_map[target_name]['type']
+
+ executable = gn_isolate_map[target_name].get('executable', target_name)
+ executable_suffix = '.exe' if self.platform == 'win32' else ''
+
+ cmdline = []
+ extra_files = []
+
+ if android and test_type != "script":
+ logdog_command = [
+ '--logdog-bin-cmd', './../../bin/logdog_butler',
+ '--project', 'chromium',
+ '--service-account-json',
+ '/creds/service_accounts/service-account-luci-logdog-publisher.json',
+ '--prefix', 'android/swarming/logcats/${SWARMING_TASK_ID}',
+ '--source', '${ISOLATED_OUTDIR}/logcats',
+ '--name', 'unified_logcats',
+ ]
+ test_cmdline = [
+ self.PathJoin('bin', 'run_%s' % target_name),
+ '--logcat-output-file', '${ISOLATED_OUTDIR}/logcats',
+ '--target-devices-file', '${SWARMING_BOT_FILE}',
+ '-v'
+ ]
+ cmdline = (['./../../build/android/test_wrapper/logdog_wrapper.py']
+ + logdog_command + test_cmdline)
+ elif use_x11 and test_type == 'windowed_test_launcher':
+ extra_files = [
+ 'xdisplaycheck',
+ '../../testing/test_env.py',
+ '../../testing/xvfb.py',
+ ]
+ cmdline = [
+ '../../testing/xvfb.py',
+ '.',
+ './' + str(executable) + executable_suffix,
+ '--brave-new-test-launcher',
+ '--test-launcher-bot-mode',
+ '--asan=%d' % asan,
+ '--msan=%d' % msan,
+ '--tsan=%d' % tsan,
+ ]
+ elif test_type in ('windowed_test_launcher', 'console_test_launcher'):
+ extra_files = [
+ '../../testing/test_env.py'
+ ]
+ cmdline = [
+ '../../testing/test_env.py',
+ './' + str(executable) + executable_suffix,
+ '--brave-new-test-launcher',
+ '--test-launcher-bot-mode',
+ '--asan=%d' % asan,
+ '--msan=%d' % msan,
+ '--tsan=%d' % tsan,
+ ]
+ elif test_type == 'gpu_browser_test':
+ extra_files = [
+ '../../testing/test_env.py'
+ ]
+ gtest_filter = gn_isolate_map[target]['gtest_filter']
+ cmdline = [
+ '../../testing/test_env.py',
+ './browser_tests' + executable_suffix,
+ '--test-launcher-bot-mode',
+ '--enable-gpu',
+ '--test-launcher-jobs=1',
+ '--gtest_filter=%s' % gtest_filter,
+ ]
+ elif test_type == 'script':
+ extra_files = [
+ '../../testing/test_env.py'
+ ]
+ cmdline = [
+ '../../testing/test_env.py',
+ '../../' + self.ToSrcRelPath(gn_isolate_map[target]['script'])
+ ]
+ elif test_type in ('raw'):
+ extra_files = []
+ cmdline = [
+ './' + str(target) + executable_suffix,
+ ]
+
+ else:
+ self.WriteFailureAndRaise('No command line for %s found (test type %s).'
+ % (target, test_type), output_path=None)
+
+ cmdline += gn_isolate_map[target_name].get('args', [])
+
+ return cmdline, extra_files
+
+ def ToAbsPath(self, build_path, *comps):
+ return self.PathJoin(self.chromium_src_dir,
+ self.ToSrcRelPath(build_path),
+ *comps)
+
+ def ToSrcRelPath(self, path):
+ """Returns a relative path from the top of the repo."""
+ if path.startswith('//'):
+ return path[2:].replace('/', self.sep)
+ return self.RelPath(path, self.chromium_src_dir)
+
+ def ParseGYPConfigPath(self, path):
+ rpath = self.ToSrcRelPath(path)
+ output_dir, _, _ = rpath.rpartition(self.sep)
+ return output_dir
+
+ def GYPCmd(self, output_dir, vals):
+ if vals['cros_passthrough']:
+ if not 'GYP_DEFINES' in os.environ:
+ raise MBErr('MB is expecting GYP_DEFINES to be in the environment')
+ gyp_defines = os.environ['GYP_DEFINES']
+ if not 'chromeos=1' in gyp_defines:
+ raise MBErr('GYP_DEFINES is missing chromeos=1: (GYP_DEFINES=%s)' %
+ gyp_defines)
+ else:
+ gyp_defines = vals['gyp_defines']
+
+ goma_dir = self.args.goma_dir
+
+ # GYP uses shlex.split() to split the gyp defines into separate arguments,
+ # so we can support backslashes and and spaces in arguments by quoting
+ # them, even on Windows, where this normally wouldn't work.
+ if goma_dir and ('\\' in goma_dir or ' ' in goma_dir):
+ goma_dir = "'%s'" % goma_dir
+
+ if goma_dir:
+ gyp_defines += ' gomadir=%s' % goma_dir
+
+ android_version_code = self.args.android_version_code
+ if android_version_code:
+ gyp_defines += ' app_manifest_version_code=%s' % android_version_code
+
+ android_version_name = self.args.android_version_name
+ if android_version_name:
+ gyp_defines += ' app_manifest_version_name=%s' % android_version_name
+
+ cmd = [
+ self.executable,
+ self.args.gyp_script,
+ '-G',
+ 'output_dir=' + output_dir,
+ ]
+
+ # Ensure that we have an environment that only contains
+ # the exact values of the GYP variables we need.
+ env = os.environ.copy()
+
+ # This is a terrible hack to work around the fact that
+ # //tools/clang/scripts/update.py is invoked by GYP and GN but
+ # currently relies on an environment variable to figure out
+ # what revision to embed in the command line #defines.
+ # For GN, we've made this work via a gn arg that will cause update.py
+ # to get an additional command line arg, but getting that to work
+ # via GYP_DEFINES has proven difficult, so we rewrite the GYP_DEFINES
+ # to get rid of the arg and add the old var in, instead.
+ # See crbug.com/582737 for more on this. This can hopefully all
+ # go away with GYP.
+ m = re.search('llvm_force_head_revision=1\s*', gyp_defines)
+ if m:
+ env['LLVM_FORCE_HEAD_REVISION'] = '1'
+ gyp_defines = gyp_defines.replace(m.group(0), '')
+
+ # This is another terrible hack to work around the fact that
+ # GYP sets the link concurrency to use via the GYP_LINK_CONCURRENCY
+ # environment variable, and not via a proper GYP_DEFINE. See
+ # crbug.com/611491 for more on this.
+ m = re.search('gyp_link_concurrency=(\d+)(\s*)', gyp_defines)
+ if m:
+ env['GYP_LINK_CONCURRENCY'] = m.group(1)
+ gyp_defines = gyp_defines.replace(m.group(0), '')
+
+ env['GYP_GENERATORS'] = 'ninja'
+ if 'GYP_CHROMIUM_NO_ACTION' in env:
+ del env['GYP_CHROMIUM_NO_ACTION']
+ if 'GYP_CROSSCOMPILE' in env:
+ del env['GYP_CROSSCOMPILE']
+ env['GYP_DEFINES'] = gyp_defines
+ if vals['gyp_crosscompile']:
+ env['GYP_CROSSCOMPILE'] = '1'
+ return cmd, env
+
+ def RunGNAnalyze(self, vals):
+ # analyze runs before 'gn gen' now, so we need to run gn gen
+ # in order to ensure that we have a build directory.
+ ret = self.RunGNGen(vals)
+ if ret:
+ return ret
+
+ inp = self.ReadInputJSON(['files', 'test_targets',
+ 'additional_compile_targets'])
+ if self.args.verbose:
+ self.Print()
+ self.Print('analyze input:')
+ self.PrintJSON(inp)
+ self.Print()
+
+ # TODO(crbug.com/555273) - currently GN treats targets and
+ # additional_compile_targets identically since we can't tell the
+ # difference between a target that is a group in GN and one that isn't.
+ # We should eventually fix this and treat the two types differently.
+ targets = (set(inp['test_targets']) |
+ set(inp['additional_compile_targets']))
+
+ output_path = self.args.output_path[0]
+
+ # Bail out early if a GN file was modified, since 'gn refs' won't know
+ # what to do about it. Also, bail out early if 'all' was asked for,
+ # since we can't deal with it yet.
+ if (any(f.endswith('.gn') or f.endswith('.gni') for f in inp['files']) or
+ 'all' in targets):
+ self.WriteJSON({
+ 'status': 'Found dependency (all)',
+ 'compile_targets': sorted(targets),
+ 'test_targets': sorted(targets & set(inp['test_targets'])),
+ }, output_path)
+ return 0
+
+ # This shouldn't normally happen, but could due to unusual race conditions,
+ # like a try job that gets scheduled before a patch lands but runs after
+ # the patch has landed.
+ if not inp['files']:
+ self.Print('Warning: No files modified in patch, bailing out early.')
+ self.WriteJSON({
+ 'status': 'No dependency',
+ 'compile_targets': [],
+ 'test_targets': [],
+ }, output_path)
+ return 0
+
+ ret = 0
+ response_file = self.TempFile()
+ response_file.write('\n'.join(inp['files']) + '\n')
+ response_file.close()
+
+ matching_targets = set()
+ try:
+ cmd = self.GNCmd('refs',
+ self.args.path[0],
+ '@%s' % response_file.name,
+ '--all',
+ '--as=output')
+ ret, out, _ = self.Run(cmd, force_verbose=False)
+ if ret and not 'The input matches no targets' in out:
+ self.WriteFailureAndRaise('gn refs returned %d: %s' % (ret, out),
+ output_path)
+ build_dir = self.ToSrcRelPath(self.args.path[0]) + self.sep
+ for output in out.splitlines():
+ build_output = output.replace(build_dir, '')
+ if build_output in targets:
+ matching_targets.add(build_output)
+
+ cmd = self.GNCmd('refs',
+ self.args.path[0],
+ '@%s' % response_file.name,
+ '--all')
+ ret, out, _ = self.Run(cmd, force_verbose=False)
+ if ret and not 'The input matches no targets' in out:
+ self.WriteFailureAndRaise('gn refs returned %d: %s' % (ret, out),
+ output_path)
+ for label in out.splitlines():
+ build_target = label[2:]
+ # We want to accept 'chrome/android:chrome_public_apk' and
+ # just 'chrome_public_apk'. This may result in too many targets
+ # getting built, but we can adjust that later if need be.
+ for input_target in targets:
+ if (input_target == build_target or
+ build_target.endswith(':' + input_target)):
+ matching_targets.add(input_target)
+ finally:
+ self.RemoveFile(response_file.name)
+
+ if matching_targets:
+ self.WriteJSON({
+ 'status': 'Found dependency',
+ 'compile_targets': sorted(matching_targets),
+ 'test_targets': sorted(matching_targets &
+ set(inp['test_targets'])),
+ }, output_path)
+ else:
+ self.WriteJSON({
+ 'status': 'No dependency',
+ 'compile_targets': [],
+ 'test_targets': [],
+ }, output_path)
+
+ if self.args.verbose:
+ outp = json.loads(self.ReadFile(output_path))
+ self.Print()
+ self.Print('analyze output:')
+ self.PrintJSON(outp)
+ self.Print()
+
+ return 0
+
+ def ReadInputJSON(self, required_keys):
+ path = self.args.input_path[0]
+ output_path = self.args.output_path[0]
+ if not self.Exists(path):
+ self.WriteFailureAndRaise('"%s" does not exist' % path, output_path)
+
+ try:
+ inp = json.loads(self.ReadFile(path))
+ except Exception as e:
+ self.WriteFailureAndRaise('Failed to read JSON input from "%s": %s' %
+ (path, e), output_path)
+
+ for k in required_keys:
+ if not k in inp:
+ self.WriteFailureAndRaise('input file is missing a "%s" key' % k,
+ output_path)
+
+ return inp
+
+ def WriteFailureAndRaise(self, msg, output_path):
+ if output_path:
+ self.WriteJSON({'error': msg}, output_path, force_verbose=True)
+ raise MBErr(msg)
+
+ def WriteJSON(self, obj, path, force_verbose=False):
+ try:
+ self.WriteFile(path, json.dumps(obj, indent=2, sort_keys=True) + '\n',
+ force_verbose=force_verbose)
+ except Exception as e:
+ raise MBErr('Error %s writing to the output path "%s"' %
+ (e, path))
+
+ def CheckCompile(self, master, builder):
+ url_template = self.args.url_template + '/{builder}/builds/_all?as_text=1'
+ url = urllib2.quote(url_template.format(master=master, builder=builder),
+ safe=':/()?=')
+ try:
+ builds = json.loads(self.Fetch(url))
+ except Exception as e:
+ return str(e)
+ successes = sorted(
+ [int(x) for x in builds.keys() if "text" in builds[x] and
+ cmp(builds[x]["text"][:2], ["build", "successful"]) == 0],
+ reverse=True)
+ if not successes:
+ return "no successful builds"
+ build = builds[str(successes[0])]
+ step_names = set([step["name"] for step in build["steps"]])
+ compile_indicators = set(["compile", "compile (with patch)", "analyze"])
+ if compile_indicators & step_names:
+ return "compiles"
+ return "does not compile"
+
+ def PrintCmd(self, cmd, env):
+ if self.platform == 'win32':
+ env_prefix = 'set '
+ env_quoter = QuoteForSet
+ shell_quoter = QuoteForCmd
+ else:
+ env_prefix = ''
+ env_quoter = pipes.quote
+ shell_quoter = pipes.quote
+
+ def print_env(var):
+ if env and var in env:
+ self.Print('%s%s=%s' % (env_prefix, var, env_quoter(env[var])))
+
+ print_env('GYP_CROSSCOMPILE')
+ print_env('GYP_DEFINES')
+ print_env('GYP_LINK_CONCURRENCY')
+ print_env('LLVM_FORCE_HEAD_REVISION')
+
+ if cmd[0] == self.executable:
+ cmd = ['python'] + cmd[1:]
+ self.Print(*[shell_quoter(arg) for arg in cmd])
+
+ def PrintJSON(self, obj):
+ self.Print(json.dumps(obj, indent=2, sort_keys=True))
+
+ def GNTargetName(self, target):
+ return target
+
+ def Build(self, target):
+ build_dir = self.ToSrcRelPath(self.args.path[0])
+ ninja_cmd = ['ninja', '-C', build_dir]
+ if self.args.jobs:
+ ninja_cmd.extend(['-j', '%d' % self.args.jobs])
+ ninja_cmd.append(target)
+ ret, _, _ = self.Run(ninja_cmd, force_verbose=False, buffer_output=False)
+ return ret
+
+ def Run(self, cmd, env=None, force_verbose=True, buffer_output=True):
+ # This function largely exists so it can be overridden for testing.
+ if self.args.dryrun or self.args.verbose or force_verbose:
+ self.PrintCmd(cmd, env)
+ if self.args.dryrun:
+ return 0, '', ''
+
+ ret, out, err = self.Call(cmd, env=env, buffer_output=buffer_output)
+ if self.args.verbose or force_verbose:
+ if ret:
+ self.Print(' -> returned %d' % ret)
+ if out:
+ self.Print(out, end='')
+ if err:
+ self.Print(err, end='', file=sys.stderr)
+ return ret, out, err
+
+ def Call(self, cmd, env=None, buffer_output=True):
+ if buffer_output:
+ p = subprocess.Popen(cmd, shell=False, cwd=self.chromium_src_dir,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ env=env)
+ out, err = p.communicate()
+ else:
+ p = subprocess.Popen(cmd, shell=False, cwd=self.chromium_src_dir,
+ env=env)
+ p.wait()
+ out = err = ''
+ return p.returncode, out, err
+
+ def ExpandUser(self, path):
+ # This function largely exists so it can be overridden for testing.
+ return os.path.expanduser(path)
+
+ def Exists(self, path):
+ # This function largely exists so it can be overridden for testing.
+ return os.path.exists(path)
+
+ def Fetch(self, url):
+ # This function largely exists so it can be overridden for testing.
+ f = urllib2.urlopen(url)
+ contents = f.read()
+ f.close()
+ return contents
+
+ def MaybeMakeDirectory(self, path):
+ try:
+ os.makedirs(path)
+ except OSError, e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ def PathJoin(self, *comps):
+ # This function largely exists so it can be overriden for testing.
+ return os.path.join(*comps)
+
+ def Print(self, *args, **kwargs):
+ # This function largely exists so it can be overridden for testing.
+ print(*args, **kwargs)
+ if kwargs.get('stream', sys.stdout) == sys.stdout:
+ sys.stdout.flush()
+
+ def ReadFile(self, path):
+ # This function largely exists so it can be overriden for testing.
+ with open(path) as fp:
+ return fp.read()
+
+ def RelPath(self, path, start='.'):
+ # This function largely exists so it can be overriden for testing.
+ return os.path.relpath(path, start)
+
+ def RemoveFile(self, path):
+ # This function largely exists so it can be overriden for testing.
+ os.remove(path)
+
+ def RemoveDirectory(self, abs_path):
+ if self.platform == 'win32':
+ # In other places in chromium, we often have to retry this command
+ # because we're worried about other processes still holding on to
+ # file handles, but when MB is invoked, it will be early enough in the
+ # build that their should be no other processes to interfere. We
+ # can change this if need be.
+ self.Run(['cmd.exe', '/c', 'rmdir', '/q', '/s', abs_path])
+ else:
+ shutil.rmtree(abs_path, ignore_errors=True)
+
+ def TempFile(self, mode='w'):
+ # This function largely exists so it can be overriden for testing.
+ return tempfile.NamedTemporaryFile(mode=mode, delete=False)
+
+ def WriteFile(self, path, contents, force_verbose=False):
+ # This function largely exists so it can be overriden for testing.
+ if self.args.dryrun or self.args.verbose or force_verbose:
+ self.Print('\nWriting """\\\n%s""" to %s.\n' % (contents, path))
+ with open(path, 'w') as fp:
+ return fp.write(contents)
+
+
+class MBErr(Exception):
+ pass
+
+
+# See http://goo.gl/l5NPDW and http://goo.gl/4Diozm for the painful
+# details of this next section, which handles escaping command lines
+# so that they can be copied and pasted into a cmd window.
+UNSAFE_FOR_SET = set('^<>&|')
+UNSAFE_FOR_CMD = UNSAFE_FOR_SET.union(set('()%'))
+ALL_META_CHARS = UNSAFE_FOR_CMD.union(set('"'))
+
+
+def QuoteForSet(arg):
+ if any(a in UNSAFE_FOR_SET for a in arg):
+ arg = ''.join('^' + a if a in UNSAFE_FOR_SET else a for a in arg)
+ return arg
+
+
+def QuoteForCmd(arg):
+ # First, escape the arg so that CommandLineToArgvW will parse it properly.
+ # From //tools/gyp/pylib/gyp/msvs_emulation.py:23.
+ if arg == '' or ' ' in arg or '"' in arg:
+ quote_re = re.compile(r'(\\*)"')
+ arg = '"%s"' % (quote_re.sub(lambda mo: 2 * mo.group(1) + '\\"', arg))
+
+ # Then check to see if the arg contains any metacharacters other than
+ # double quotes; if it does, quote everything (including the double
+ # quotes) for safety.
+ if any(a in UNSAFE_FOR_CMD for a in arg):
+ arg = ''.join('^' + a if a in ALL_META_CHARS else a for a in arg)
+ return arg
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/deps/v8/tools/mb/mb_unittest.py b/deps/v8/tools/mb/mb_unittest.py
new file mode 100755
index 0000000000..ac58c0284f
--- /dev/null
+++ b/deps/v8/tools/mb/mb_unittest.py
@@ -0,0 +1,572 @@
+#!/usr/bin/python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tests for mb.py."""
+
+import json
+import StringIO
+import os
+import sys
+import unittest
+
+import mb
+
+
+class FakeMBW(mb.MetaBuildWrapper):
+ def __init__(self, win32=False):
+ super(FakeMBW, self).__init__()
+
+ # Override vars for test portability.
+ if win32:
+ self.chromium_src_dir = 'c:\\fake_src'
+ self.default_config = 'c:\\fake_src\\tools\\mb\\mb_config.pyl'
+ self.platform = 'win32'
+ self.executable = 'c:\\python\\python.exe'
+ self.sep = '\\'
+ else:
+ self.chromium_src_dir = '/fake_src'
+ self.default_config = '/fake_src/tools/mb/mb_config.pyl'
+ self.executable = '/usr/bin/python'
+ self.platform = 'linux2'
+ self.sep = '/'
+
+ self.files = {}
+ self.calls = []
+ self.cmds = []
+ self.cross_compile = None
+ self.out = ''
+ self.err = ''
+ self.rmdirs = []
+
+ def ExpandUser(self, path):
+ return '$HOME/%s' % path
+
+ def Exists(self, path):
+ return self.files.get(path) is not None
+
+ def MaybeMakeDirectory(self, path):
+ self.files[path] = True
+
+ def PathJoin(self, *comps):
+ return self.sep.join(comps)
+
+ def ReadFile(self, path):
+ return self.files[path]
+
+ def WriteFile(self, path, contents, force_verbose=False):
+ if self.args.dryrun or self.args.verbose or force_verbose:
+ self.Print('\nWriting """\\\n%s""" to %s.\n' % (contents, path))
+ self.files[path] = contents
+
+ def Call(self, cmd, env=None, buffer_output=True):
+ if env:
+ self.cross_compile = env.get('GYP_CROSSCOMPILE')
+ self.calls.append(cmd)
+ if self.cmds:
+ return self.cmds.pop(0)
+ return 0, '', ''
+
+ def Print(self, *args, **kwargs):
+ sep = kwargs.get('sep', ' ')
+ end = kwargs.get('end', '\n')
+ f = kwargs.get('file', sys.stdout)
+ if f == sys.stderr:
+ self.err += sep.join(args) + end
+ else:
+ self.out += sep.join(args) + end
+
+ def TempFile(self, mode='w'):
+ return FakeFile(self.files)
+
+ def RemoveFile(self, path):
+ del self.files[path]
+
+ def RemoveDirectory(self, path):
+ self.rmdirs.append(path)
+ files_to_delete = [f for f in self.files if f.startswith(path)]
+ for f in files_to_delete:
+ self.files[f] = None
+
+
+class FakeFile(object):
+ def __init__(self, files):
+ self.name = '/tmp/file'
+ self.buf = ''
+ self.files = files
+
+ def write(self, contents):
+ self.buf += contents
+
+ def close(self):
+ self.files[self.name] = self.buf
+
+
+TEST_CONFIG = """\
+{
+ 'masters': {
+ 'chromium': {},
+ 'fake_master': {
+ 'fake_builder': 'gyp_rel_bot',
+ 'fake_gn_builder': 'gn_rel_bot',
+ 'fake_gyp_crosscompile_builder': 'gyp_crosscompile',
+ 'fake_gn_debug_builder': 'gn_debug_goma',
+ 'fake_gyp_builder': 'gyp_debug',
+ 'fake_gn_args_bot': '//build/args/bots/fake_master/fake_gn_args_bot.gn',
+ 'fake_multi_phase': ['gn_phase_1', 'gn_phase_2'],
+ },
+ },
+ 'configs': {
+ 'gyp_rel_bot': ['gyp', 'rel', 'goma'],
+ 'gn_debug_goma': ['gn', 'debug', 'goma'],
+ 'gyp_debug': ['gyp', 'debug', 'fake_feature1'],
+ 'gn_rel_bot': ['gn', 'rel', 'goma'],
+ 'gyp_crosscompile': ['gyp', 'crosscompile'],
+ 'gn_phase_1': ['gn', 'phase_1'],
+ 'gn_phase_2': ['gn', 'phase_2'],
+ },
+ 'mixins': {
+ 'crosscompile': {
+ 'gyp_crosscompile': True,
+ },
+ 'fake_feature1': {
+ 'gn_args': 'enable_doom_melon=true',
+ 'gyp_defines': 'doom_melon=1',
+ },
+ 'gyp': {'type': 'gyp'},
+ 'gn': {'type': 'gn'},
+ 'goma': {
+ 'gn_args': 'use_goma=true',
+ 'gyp_defines': 'goma=1',
+ },
+ 'phase_1': {
+ 'gn_args': 'phase=1',
+ 'gyp_args': 'phase=1',
+ },
+ 'phase_2': {
+ 'gn_args': 'phase=2',
+ 'gyp_args': 'phase=2',
+ },
+ 'rel': {
+ 'gn_args': 'is_debug=false',
+ },
+ 'debug': {
+ 'gn_args': 'is_debug=true',
+ },
+ },
+}
+"""
+
+
+TEST_BAD_CONFIG = """\
+{
+ 'configs': {
+ 'gn_rel_bot_1': ['gn', 'rel', 'chrome_with_codecs'],
+ 'gn_rel_bot_2': ['gn', 'rel', 'bad_nested_config'],
+ },
+ 'masters': {
+ 'chromium': {
+ 'a': 'gn_rel_bot_1',
+ 'b': 'gn_rel_bot_2',
+ },
+ },
+ 'mixins': {
+ 'gn': {'type': 'gn'},
+ 'chrome_with_codecs': {
+ 'gn_args': 'proprietary_codecs=true',
+ },
+ 'bad_nested_config': {
+ 'mixins': ['chrome_with_codecs'],
+ },
+ 'rel': {
+ 'gn_args': 'is_debug=false',
+ },
+ },
+}
+"""
+
+
+GYP_HACKS_CONFIG = """\
+{
+ 'masters': {
+ 'chromium': {},
+ 'fake_master': {
+ 'fake_builder': 'fake_config',
+ },
+ },
+ 'configs': {
+ 'fake_config': ['fake_mixin'],
+ },
+ 'mixins': {
+ 'fake_mixin': {
+ 'type': 'gyp',
+ 'gn_args': '',
+ 'gyp_defines':
+ ('foo=bar llvm_force_head_revision=1 '
+ 'gyp_link_concurrency=1 baz=1'),
+ },
+ },
+}
+"""
+
+
+class UnitTest(unittest.TestCase):
+ def fake_mbw(self, files=None, win32=False):
+ mbw = FakeMBW(win32=win32)
+ mbw.files.setdefault(mbw.default_config, TEST_CONFIG)
+ mbw.files.setdefault(
+ mbw.ToAbsPath('//build/args/bots/fake_master/fake_gn_args_bot.gn'),
+ 'is_debug = false\n')
+ if files:
+ for path, contents in files.items():
+ mbw.files[path] = contents
+ return mbw
+
+ def check(self, args, mbw=None, files=None, out=None, err=None, ret=None):
+ if not mbw:
+ mbw = self.fake_mbw(files)
+
+ actual_ret = mbw.Main(args)
+
+ self.assertEqual(actual_ret, ret)
+ if out is not None:
+ self.assertEqual(mbw.out, out)
+ if err is not None:
+ self.assertEqual(mbw.err, err)
+ return mbw
+
+ def test_clobber(self):
+ files = {
+ '/fake_src/out/Debug': None,
+ '/fake_src/out/Debug/mb_type': None,
+ }
+ mbw = self.fake_mbw(files)
+
+ # The first time we run this, the build dir doesn't exist, so no clobber.
+ self.check(['gen', '-c', 'gn_debug_goma', '//out/Debug'], mbw=mbw, ret=0)
+ self.assertEqual(mbw.rmdirs, [])
+ self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gn')
+
+ # The second time we run this, the build dir exists and matches, so no
+ # clobber.
+ self.check(['gen', '-c', 'gn_debug_goma', '//out/Debug'], mbw=mbw, ret=0)
+ self.assertEqual(mbw.rmdirs, [])
+ self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gn')
+
+ # Now we switch build types; this should result in a clobber.
+ self.check(['gen', '-c', 'gyp_debug', '//out/Debug'], mbw=mbw, ret=0)
+ self.assertEqual(mbw.rmdirs, ['/fake_src/out/Debug'])
+ self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gyp')
+
+ # Now we delete mb_type; this checks the case where the build dir
+ # exists but wasn't populated by mb; this should also result in a clobber.
+ del mbw.files['/fake_src/out/Debug/mb_type']
+ self.check(['gen', '-c', 'gyp_debug', '//out/Debug'], mbw=mbw, ret=0)
+ self.assertEqual(mbw.rmdirs,
+ ['/fake_src/out/Debug', '/fake_src/out/Debug'])
+ self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gyp')
+
+ def test_gn_analyze(self):
+ files = {'/tmp/in.json': """{\
+ "files": ["foo/foo_unittest.cc"],
+ "test_targets": ["foo_unittests", "bar_unittests"],
+ "additional_compile_targets": []
+ }"""}
+
+ mbw = self.fake_mbw(files)
+ mbw.Call = lambda cmd, env=None, buffer_output=True: (
+ 0, 'out/Default/foo_unittests\n', '')
+
+ self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default',
+ '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
+ out = json.loads(mbw.files['/tmp/out.json'])
+ self.assertEqual(out, {
+ 'status': 'Found dependency',
+ 'compile_targets': ['foo_unittests'],
+ 'test_targets': ['foo_unittests']
+ })
+
+ def test_gn_analyze_fails(self):
+ files = {'/tmp/in.json': """{\
+ "files": ["foo/foo_unittest.cc"],
+ "test_targets": ["foo_unittests", "bar_unittests"],
+ "additional_compile_targets": []
+ }"""}
+
+ mbw = self.fake_mbw(files)
+ mbw.Call = lambda cmd, env=None, buffer_output=True: (1, '', '')
+
+ self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default',
+ '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=1)
+
+ def test_gn_analyze_all(self):
+ files = {'/tmp/in.json': """{\
+ "files": ["foo/foo_unittest.cc"],
+ "test_targets": ["bar_unittests"],
+ "additional_compile_targets": ["all"]
+ }"""}
+ mbw = self.fake_mbw(files)
+ mbw.Call = lambda cmd, env=None, buffer_output=True: (
+ 0, 'out/Default/foo_unittests\n', '')
+ self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default',
+ '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
+ out = json.loads(mbw.files['/tmp/out.json'])
+ self.assertEqual(out, {
+ 'status': 'Found dependency (all)',
+ 'compile_targets': ['all', 'bar_unittests'],
+ 'test_targets': ['bar_unittests'],
+ })
+
+ def test_gn_analyze_missing_file(self):
+ files = {'/tmp/in.json': """{\
+ "files": ["foo/foo_unittest.cc"],
+ "test_targets": ["bar_unittests"],
+ "additional_compile_targets": []
+ }"""}
+ mbw = self.fake_mbw(files)
+ mbw.cmds = [
+ (0, '', ''),
+ (1, 'The input matches no targets, configs, or files\n', ''),
+ (1, 'The input matches no targets, configs, or files\n', ''),
+ ]
+
+ self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default',
+ '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
+ out = json.loads(mbw.files['/tmp/out.json'])
+ self.assertEqual(out, {
+ 'status': 'No dependency',
+ 'compile_targets': [],
+ 'test_targets': [],
+ })
+
+ def test_gn_gen(self):
+ mbw = self.fake_mbw()
+ self.check(['gen', '-c', 'gn_debug_goma', '//out/Default', '-g', '/goma'],
+ mbw=mbw, ret=0)
+ self.assertMultiLineEqual(mbw.files['/fake_src/out/Default/args.gn'],
+ ('goma_dir = "/goma"\n'
+ 'is_debug = true\n'
+ 'use_goma = true\n'))
+
+ # Make sure we log both what is written to args.gn and the command line.
+ self.assertIn('Writing """', mbw.out)
+ self.assertIn('/fake_src/buildtools/linux64/gn gen //out/Default --check',
+ mbw.out)
+
+ mbw = self.fake_mbw(win32=True)
+ self.check(['gen', '-c', 'gn_debug_goma', '-g', 'c:\\goma', '//out/Debug'],
+ mbw=mbw, ret=0)
+ self.assertMultiLineEqual(mbw.files['c:\\fake_src\\out\\Debug\\args.gn'],
+ ('goma_dir = "c:\\\\goma"\n'
+ 'is_debug = true\n'
+ 'use_goma = true\n'))
+ self.assertIn('c:\\fake_src\\buildtools\\win\\gn.exe gen //out/Debug '
+ '--check\n', mbw.out)
+
+ mbw = self.fake_mbw()
+ self.check(['gen', '-m', 'fake_master', '-b', 'fake_gn_args_bot',
+ '//out/Debug'],
+ mbw=mbw, ret=0)
+ self.assertEqual(
+ mbw.files['/fake_src/out/Debug/args.gn'],
+ 'import("//build/args/bots/fake_master/fake_gn_args_bot.gn")\n')
+
+
+ def test_gn_gen_fails(self):
+ mbw = self.fake_mbw()
+ mbw.Call = lambda cmd, env=None, buffer_output=True: (1, '', '')
+ self.check(['gen', '-c', 'gn_debug_goma', '//out/Default'], mbw=mbw, ret=1)
+
+ def test_gn_gen_swarming(self):
+ files = {
+ '/tmp/swarming_targets': 'base_unittests\n',
+ '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
+ "{'base_unittests': {"
+ " 'label': '//base:base_unittests',"
+ " 'type': 'raw',"
+ " 'args': [],"
+ "}}\n"
+ ),
+ '/fake_src/out/Default/base_unittests.runtime_deps': (
+ "base_unittests\n"
+ ),
+ }
+ mbw = self.fake_mbw(files)
+ self.check(['gen',
+ '-c', 'gn_debug_goma',
+ '--swarming-targets-file', '/tmp/swarming_targets',
+ '//out/Default'], mbw=mbw, ret=0)
+ self.assertIn('/fake_src/out/Default/base_unittests.isolate',
+ mbw.files)
+ self.assertIn('/fake_src/out/Default/base_unittests.isolated.gen.json',
+ mbw.files)
+
+ def test_gn_isolate(self):
+ files = {
+ '/fake_src/out/Default/toolchain.ninja': "",
+ '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
+ "{'base_unittests': {"
+ " 'label': '//base:base_unittests',"
+ " 'type': 'raw',"
+ " 'args': [],"
+ "}}\n"
+ ),
+ '/fake_src/out/Default/base_unittests.runtime_deps': (
+ "base_unittests\n"
+ ),
+ }
+ self.check(['isolate', '-c', 'gn_debug_goma', '//out/Default',
+ 'base_unittests'], files=files, ret=0)
+
+ # test running isolate on an existing build_dir
+ files['/fake_src/out/Default/args.gn'] = 'is_debug = True\n'
+ self.check(['isolate', '//out/Default', 'base_unittests'],
+ files=files, ret=0)
+
+ files['/fake_src/out/Default/mb_type'] = 'gn\n'
+ self.check(['isolate', '//out/Default', 'base_unittests'],
+ files=files, ret=0)
+
+ def test_gn_run(self):
+ files = {
+ '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
+ "{'base_unittests': {"
+ " 'label': '//base:base_unittests',"
+ " 'type': 'raw',"
+ " 'args': [],"
+ "}}\n"
+ ),
+ '/fake_src/out/Default/base_unittests.runtime_deps': (
+ "base_unittests\n"
+ ),
+ }
+ self.check(['run', '-c', 'gn_debug_goma', '//out/Default',
+ 'base_unittests'], files=files, ret=0)
+
+ def test_gn_lookup(self):
+ self.check(['lookup', '-c', 'gn_debug_goma'], ret=0)
+
+ def test_gn_lookup_goma_dir_expansion(self):
+ self.check(['lookup', '-c', 'gn_rel_bot', '-g', '/foo'], ret=0,
+ out=('\n'
+ 'Writing """\\\n'
+ 'goma_dir = "/foo"\n'
+ 'is_debug = false\n'
+ 'use_goma = true\n'
+ '""" to _path_/args.gn.\n\n'
+ '/fake_src/buildtools/linux64/gn gen _path_\n'))
+
+ def test_gyp_analyze(self):
+ mbw = self.check(['analyze', '-c', 'gyp_rel_bot', '//out/Release',
+ '/tmp/in.json', '/tmp/out.json'], ret=0)
+ self.assertIn('analyzer', mbw.calls[0])
+
+ def test_gyp_crosscompile(self):
+ mbw = self.fake_mbw()
+ self.check(['gen', '-c', 'gyp_crosscompile', '//out/Release'],
+ mbw=mbw, ret=0)
+ self.assertTrue(mbw.cross_compile)
+
+ def test_gyp_gen(self):
+ self.check(['gen', '-c', 'gyp_rel_bot', '-g', '/goma', '//out/Release'],
+ ret=0,
+ out=("GYP_DEFINES='goma=1 gomadir=/goma'\n"
+ "python build/gyp_chromium -G output_dir=out\n"))
+
+ mbw = self.fake_mbw(win32=True)
+ self.check(['gen', '-c', 'gyp_rel_bot', '-g', 'c:\\goma', '//out/Release'],
+ mbw=mbw, ret=0,
+ out=("set GYP_DEFINES=goma=1 gomadir='c:\\goma'\n"
+ "python build\\gyp_chromium -G output_dir=out\n"))
+
+ def test_gyp_gen_fails(self):
+ mbw = self.fake_mbw()
+ mbw.Call = lambda cmd, env=None, buffer_output=True: (1, '', '')
+ self.check(['gen', '-c', 'gyp_rel_bot', '//out/Release'], mbw=mbw, ret=1)
+
+ def test_gyp_lookup_goma_dir_expansion(self):
+ self.check(['lookup', '-c', 'gyp_rel_bot', '-g', '/foo'], ret=0,
+ out=("GYP_DEFINES='goma=1 gomadir=/foo'\n"
+ "python build/gyp_chromium -G output_dir=_path_\n"))
+
+ def test_help(self):
+ orig_stdout = sys.stdout
+ try:
+ sys.stdout = StringIO.StringIO()
+ self.assertRaises(SystemExit, self.check, ['-h'])
+ self.assertRaises(SystemExit, self.check, ['help'])
+ self.assertRaises(SystemExit, self.check, ['help', 'gen'])
+ finally:
+ sys.stdout = orig_stdout
+
+ def test_multiple_phases(self):
+ # Check that not passing a --phase to a multi-phase builder fails.
+ mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase'],
+ ret=1)
+ self.assertIn('Must specify a build --phase', mbw.out)
+
+ # Check that passing a --phase to a single-phase builder fails.
+ mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_gn_builder',
+ '--phase', '1'],
+ ret=1)
+ self.assertIn('Must not specify a build --phase', mbw.out)
+
+ # Check different ranges; 0 and 3 are out of bounds, 1 and 2 should work.
+ mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase',
+ '--phase', '0'], ret=1)
+ self.assertIn('Phase 0 out of bounds', mbw.out)
+
+ mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase',
+ '--phase', '1'], ret=0)
+ self.assertIn('phase = 1', mbw.out)
+
+ mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase',
+ '--phase', '2'], ret=0)
+ self.assertIn('phase = 2', mbw.out)
+
+ mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase',
+ '--phase', '3'], ret=1)
+ self.assertIn('Phase 3 out of bounds', mbw.out)
+
+ def test_validate(self):
+ mbw = self.fake_mbw()
+ self.check(['validate'], mbw=mbw, ret=0)
+
+ def test_gyp_env_hacks(self):
+ mbw = self.fake_mbw()
+ mbw.files[mbw.default_config] = GYP_HACKS_CONFIG
+ self.check(['lookup', '-c', 'fake_config'], mbw=mbw,
+ ret=0,
+ out=("GYP_DEFINES='foo=bar baz=1'\n"
+ "GYP_LINK_CONCURRENCY=1\n"
+ "LLVM_FORCE_HEAD_REVISION=1\n"
+ "python build/gyp_chromium -G output_dir=_path_\n"))
+
+
+if __name__ == '__main__':
+ unittest.main()
+
+ def test_validate(self):
+ mbw = self.fake_mbw()
+ self.check(['validate'], mbw=mbw, ret=0)
+
+ def test_bad_validate(self):
+ mbw = self.fake_mbw()
+ mbw.files[mbw.default_config] = TEST_BAD_CONFIG
+ self.check(['validate'], mbw=mbw, ret=1)
+
+ def test_gyp_env_hacks(self):
+ mbw = self.fake_mbw()
+ mbw.files[mbw.default_config] = GYP_HACKS_CONFIG
+ self.check(['lookup', '-c', 'fake_config'], mbw=mbw,
+ ret=0,
+ out=("GYP_DEFINES='foo=bar baz=1'\n"
+ "GYP_LINK_CONCURRENCY=1\n"
+ "LLVM_FORCE_HEAD_REVISION=1\n"
+ "python build/gyp_chromium -G output_dir=_path_\n"))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/tools/oom_dump/README b/deps/v8/tools/oom_dump/README
deleted file mode 100644
index 1d840b9a9c..0000000000
--- a/deps/v8/tools/oom_dump/README
+++ /dev/null
@@ -1,33 +0,0 @@
-oom_dump extracts useful information from Google Chrome OOM minidumps.
-
-To build one needs a google-breakpad checkout
-(http://code.google.com/p/google-breakpad/).
-
-First, one needs to build and install breakpad itself. For instructions
-check google-breakpad, but currently it's as easy as:
-
- ./configure
- make
- sudo make install
-
-(the catch: breakpad installs .so into /usr/local/lib, so you might
-need some additional tweaking to make it discoverable, for example,
-put a soft link into /usr/lib directory).
-
-Next step is to build v8. Note: you should build x64 version of v8,
-if you're on 64-bit platform, otherwise you would get a link error when
-building oom_dump. Also, if you are testing against an older version of chrome
-you should build the corresponding version of V8 to make sure that the type-id
-enum have the correct values.
-
-The last step is to build oom_dump itself. The following command should work:
-
- cd <v8 working copy>/tools/oom_dump
- scons BREAKPAD_DIR=<path to google-breakpad working copy>
-
-(Additionally you can control v8 working copy dir, but the default should work.)
-
-If everything goes fine, oom_dump <path to minidump> should print
-some useful information about the OOM crash.
-
-Note: currently only 32-bit Windows minidumps are supported.
diff --git a/deps/v8/tools/oom_dump/SConstruct b/deps/v8/tools/oom_dump/SConstruct
deleted file mode 100644
index f228c89076..0000000000
--- a/deps/v8/tools/oom_dump/SConstruct
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2010 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-vars = Variables('custom.py')
-vars.Add(PathVariable('BREAKPAD_DIR',
- 'Path to checkout of google-breakpad project',
- '~/google-breakpad',
- PathVariable.PathIsDir))
-vars.Add(PathVariable('V8_DIR',
- 'Path to checkout of v8 project',
- '../..',
- PathVariable.PathIsDir))
-
-env = Environment(variables = vars,
- CPPPATH = ['${BREAKPAD_DIR}/src', '${V8_DIR}/src'],
- LIBPATH = ['/usr/local/lib', '${V8_DIR}'])
-
-env.Program('oom_dump.cc', LIBS = ['breakpad', 'v8', 'pthread'])
diff --git a/deps/v8/tools/oom_dump/oom_dump.cc b/deps/v8/tools/oom_dump/oom_dump.cc
deleted file mode 100644
index 581e1914e2..0000000000
--- a/deps/v8/tools/oom_dump/oom_dump.cc
+++ /dev/null
@@ -1,283 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdio.h>
-#include <stdlib.h>
-
-#include <algorithm>
-
-#include <google_breakpad/processor/minidump.h>
-
-#include <v8.h>
-
-namespace {
-
-using google_breakpad::Minidump;
-using google_breakpad::MinidumpContext;
-using google_breakpad::MinidumpThread;
-using google_breakpad::MinidumpThreadList;
-using google_breakpad::MinidumpException;
-using google_breakpad::MinidumpMemoryRegion;
-
-const char* InstanceTypeToString(int type) {
- static char const* names[v8::internal::LAST_TYPE] = {0};
- if (names[v8::internal::STRING_TYPE] == NULL) {
- using namespace v8::internal;
-#define SET(type) names[type] = #type;
- INSTANCE_TYPE_LIST(SET)
-#undef SET
- }
- return names[type];
-}
-
-
-u_int32_t ReadPointedValue(MinidumpMemoryRegion* region,
- u_int64_t base,
- int offset) {
- u_int32_t ptr = 0;
- CHECK(region->GetMemoryAtAddress(base + 4 * offset, &ptr));
- u_int32_t value = 0;
- CHECK(region->GetMemoryAtAddress(ptr, &value));
- return value;
-}
-
-
-void ReadArray(MinidumpMemoryRegion* region,
- u_int64_t array_ptr,
- int size,
- int* output) {
- for (int i = 0; i < size; i++) {
- u_int32_t value;
- CHECK(region->GetMemoryAtAddress(array_ptr + 4 * i, &value));
- output[i] = value;
- }
-}
-
-
-u_int32_t ReadArrayFrom(MinidumpMemoryRegion* region,
- u_int64_t base,
- int offset,
- int size,
- int* output) {
- u_int32_t ptr = 0;
- CHECK(region->GetMemoryAtAddress(base + 4 * offset, &ptr));
- ReadArray(region, ptr, size, output);
-}
-
-
-double toM(int size) {
- return size / (1024. * 1024.);
-}
-
-
-class IndirectSorter {
- public:
- explicit IndirectSorter(int* a) : a_(a) { }
-
- bool operator() (int i0, int i1) {
- return a_[i0] > a_[i1];
- }
-
- private:
- int* a_;
-};
-
-
-void DumpHeapStats(const char *minidump_file) {
- Minidump minidump(minidump_file);
- CHECK(minidump.Read());
-
- MinidumpException *exception = minidump.GetException();
- CHECK(exception);
-
- MinidumpContext* crash_context = exception->GetContext();
- CHECK(crash_context);
-
- u_int32_t exception_thread_id = 0;
- CHECK(exception->GetThreadID(&exception_thread_id));
-
- MinidumpThreadList* thread_list = minidump.GetThreadList();
- CHECK(thread_list);
-
- MinidumpThread* exception_thread =
- thread_list->GetThreadByID(exception_thread_id);
- CHECK(exception_thread);
-
- // Currently only 32-bit Windows minidumps are supported.
- CHECK_EQ(MD_CONTEXT_X86, crash_context->GetContextCPU());
-
- const MDRawContextX86* contextX86 = crash_context->GetContextX86();
- CHECK(contextX86);
-
- const u_int32_t esp = contextX86->esp;
-
- MinidumpMemoryRegion* memory_region = exception_thread->GetMemory();
- CHECK(memory_region);
-
- const u_int64_t last = memory_region->GetBase() + memory_region->GetSize();
-
- u_int64_t heap_stats_addr = 0;
- for (u_int64_t addr = esp; addr < last; addr += 4) {
- u_int32_t value = 0;
- CHECK(memory_region->GetMemoryAtAddress(addr, &value));
- if (value >= esp && value < last) {
- u_int32_t value2 = 0;
- CHECK(memory_region->GetMemoryAtAddress(value, &value2));
- if (value2 == v8::internal::HeapStats::kStartMarker) {
- heap_stats_addr = addr;
- break;
- }
- }
- }
- CHECK(heap_stats_addr);
-
- // Read heap stats.
-
-#define READ_FIELD(offset) \
- ReadPointedValue(memory_region, heap_stats_addr, offset)
-
- CHECK(READ_FIELD(0) == v8::internal::HeapStats::kStartMarker);
- CHECK(READ_FIELD(24) == v8::internal::HeapStats::kEndMarker);
-
- const int new_space_size = READ_FIELD(1);
- const int new_space_capacity = READ_FIELD(2);
- const int old_space_size = READ_FIELD(3);
- const int old_space_capacity = READ_FIELD(4);
- const int code_space_size = READ_FIELD(5);
- const int code_space_capacity = READ_FIELD(6);
- const int map_space_size = READ_FIELD(7);
- const int map_space_capacity = READ_FIELD(8);
- const int cell_space_size = READ_FIELD(9);
- const int cell_space_capacity = READ_FIELD(10);
- const int lo_space_size = READ_FIELD(11);
- const int global_handle_count = READ_FIELD(12);
- const int weak_global_handle_count = READ_FIELD(13);
- const int pending_global_handle_count = READ_FIELD(14);
- const int near_death_global_handle_count = READ_FIELD(15);
- const int destroyed_global_handle_count = READ_FIELD(16);
- const int memory_allocator_size = READ_FIELD(17);
- const int memory_allocator_capacity = READ_FIELD(18);
- const int os_error = READ_FIELD(19);
-#undef READ_FIELD
-
- int objects_per_type[v8::internal::LAST_TYPE + 1] = {0};
- ReadArrayFrom(memory_region, heap_stats_addr, 21,
- v8::internal::LAST_TYPE + 1, objects_per_type);
-
- int size_per_type[v8::internal::LAST_TYPE + 1] = {0};
- ReadArrayFrom(memory_region, heap_stats_addr, 22, v8::internal::LAST_TYPE + 1,
- size_per_type);
-
- int js_global_objects =
- objects_per_type[v8::internal::JS_GLOBAL_OBJECT_TYPE];
- int js_builtins_objects =
- objects_per_type[v8::internal::JS_BUILTINS_OBJECT_TYPE];
- int js_global_proxies =
- objects_per_type[v8::internal::JS_GLOBAL_PROXY_TYPE];
-
- int indices[v8::internal::LAST_TYPE + 1];
- for (int i = 0; i <= v8::internal::LAST_TYPE; i++) {
- indices[i] = i;
- }
-
- std::stable_sort(indices, indices + sizeof(indices)/sizeof(indices[0]),
- IndirectSorter(size_per_type));
-
- int total_size = 0;
- for (int i = 0; i <= v8::internal::LAST_TYPE; i++) {
- total_size += size_per_type[i];
- }
-
- // Print heap stats.
-
- printf("exception thread ID: %" PRIu32 " (%#" PRIx32 ")\n",
- exception_thread_id, exception_thread_id);
- printf("heap stats address: %#" PRIx64 "\n", heap_stats_addr);
-#define PRINT_INT_STAT(stat) \
- printf("\t%-25s\t% 10d\n", #stat ":", stat);
-#define PRINT_MB_STAT(stat) \
- printf("\t%-25s\t% 10.3f MB\n", #stat ":", toM(stat));
- PRINT_MB_STAT(new_space_size);
- PRINT_MB_STAT(new_space_capacity);
- PRINT_MB_STAT(old_space_size);
- PRINT_MB_STAT(old_space_capacity);
- PRINT_MB_STAT(code_space_size);
- PRINT_MB_STAT(code_space_capacity);
- PRINT_MB_STAT(map_space_size);
- PRINT_MB_STAT(map_space_capacity);
- PRINT_MB_STAT(cell_space_size);
- PRINT_MB_STAT(cell_space_capacity);
- PRINT_MB_STAT(lo_space_size);
- PRINT_INT_STAT(global_handle_count);
- PRINT_INT_STAT(weak_global_handle_count);
- PRINT_INT_STAT(pending_global_handle_count);
- PRINT_INT_STAT(near_death_global_handle_count);
- PRINT_INT_STAT(destroyed_global_handle_count);
- PRINT_MB_STAT(memory_allocator_size);
- PRINT_MB_STAT(memory_allocator_capacity);
- PRINT_INT_STAT(os_error);
-#undef PRINT_STAT
-
- printf("\n");
-
- printf(
- "\tJS_GLOBAL_OBJECT_TYPE/JS_BUILTINS_OBJECT_TYPE/JS_GLOBAL_PROXY_TYPE: "
- "%d/%d/%d\n\n",
- js_global_objects, js_builtins_objects, js_global_proxies);
-
- int running_size = 0;
- for (int i = 0; i <= v8::internal::LAST_TYPE; i++) {
- int type = indices[i];
- const char* name = InstanceTypeToString(type);
- if (name == NULL) {
- // Unknown instance type. Check that there is no objects of that type.
- CHECK_EQ(0, objects_per_type[type]);
- CHECK_EQ(0, size_per_type[type]);
- continue;
- }
- int size = size_per_type[type];
- running_size += size;
- printf("\t%-37s% 9d% 11.3f MB% 10.3f%%% 10.3f%%\n",
- name, objects_per_type[type], toM(size),
- 100. * size / total_size, 100. * running_size / total_size);
- }
- printf("\t%-37s% 9d% 11.3f MB% 10.3f%%% 10.3f%%\n",
- "total", 0, toM(total_size), 100., 100.);
-}
-
-} // namespace
-
-int main(int argc, char **argv) {
- if (argc != 2) {
- fprintf(stderr, "usage: %s <minidump>\n", argv[0]);
- return 1;
- }
-
- DumpHeapStats(argv[1]);
-
- return 0;
-}
diff --git a/deps/v8/tools/presubmit.py b/deps/v8/tools/presubmit.py
index d503538fcb..3be9caf061 100755
--- a/deps/v8/tools/presubmit.py
+++ b/deps/v8/tools/presubmit.py
@@ -203,7 +203,7 @@ class CppLintProcessor(SourceFileProcessor):
def GetPathsToSearch(self):
return ['src', 'include', 'samples', join('test', 'cctest'),
- join('test', 'unittests')]
+ join('test', 'unittests'), join('test', 'inspector')]
def GetCpplintScript(self, prio_path):
for path in [prio_path] + os.environ["PATH"].split(os.pathsep):
@@ -295,13 +295,21 @@ class SourceProcessor(SourceFileProcessor):
IGNORE_COPYRIGHTS = ['box2d.js',
'cpplint.py',
+ 'check_injected_script_source.py',
'copy.js',
'corrections.js',
'crypto.js',
'daemon.py',
+ 'debugger-script.js',
'earley-boyer.js',
'fannkuch.js',
'fasta.js',
+ 'generate_protocol_externs.py',
+ 'injected-script.cc',
+ 'injected-script.h',
+ 'injected-script-source.js',
+ 'java-script-call-frame.cc',
+ 'java-script-call-frame.h',
'jsmin.py',
'libraries.cc',
'libraries-empty.cc',
@@ -311,10 +319,19 @@ class SourceProcessor(SourceFileProcessor):
'primes.js',
'raytrace.js',
'regexp-pcre.js',
+ 'rjsmin.py',
+ 'script-breakpoint.h',
'sqlite.js',
'sqlite-change-heap.js',
'sqlite-pointer-masking.js',
'sqlite-safe-heap.js',
+ 'v8-debugger-script.h',
+ 'v8-function-call.cc',
+ 'v8-function-call.h',
+ 'v8-inspector-impl.cc',
+ 'v8-inspector-impl.h',
+ 'v8-runtime-agent-impl.cc',
+ 'v8-runtime-agent-impl.h',
'gnuplot-4.6.3-emscripten.js',
'zlib.js']
IGNORE_TABS = IGNORE_COPYRIGHTS + ['unicode-test.js', 'html-comments.js']
diff --git a/deps/v8/tools/run-perf.sh b/deps/v8/tools/run-perf.sh
index 03123fdbb8..83750936c8 100755
--- a/deps/v8/tools/run-perf.sh
+++ b/deps/v8/tools/run-perf.sh
@@ -45,9 +45,14 @@ if [ "$ACTUAL_KERNEL_MAP_RESTRICTION" -ne "0" ] ; then
echo 0 | sudo tee $KERNEL_MAP_CONFIG_FILE
fi
+# Extract the command being perfed, so that we can prepend arguments to the
+# arguments that the user supplied.
+COMMAND=$1
+shift 1
+
echo "Running..."
perf record -R \
-e $EVENT_TYPE \
-c $SAMPLE_EVERY_N_CYCLES \
--call-graph $CALL_GRAPH_METHOD \
- -i $@ --perf_basic_prof
+ -i "$COMMAND" --perf_basic_prof "$@"
diff --git a/deps/v8/tools/run-tests.py b/deps/v8/tools/run-tests.py
index de16463369..f248dff5cc 100755
--- a/deps/v8/tools/run-tests.py
+++ b/deps/v8/tools/run-tests.py
@@ -34,7 +34,7 @@ import json
import multiprocessing
import optparse
import os
-from os.path import join
+from os.path import getmtime, isdir, join
import platform
import random
import shlex
@@ -55,6 +55,8 @@ from testrunner.objects import context
# Base dir of the v8 checkout to be used as cwd.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+DEFAULT_OUT_GN = "out.gn"
+
ARCH_GUESS = utils.DefaultArch()
# Map of test name synonyms to lists of test suites. Should be ordered by
@@ -102,6 +104,7 @@ MORE_VARIANTS = [
"ignition",
"stress",
"turbofan_opt",
+ "asm_wasm",
]
EXHAUSTIVE_VARIANTS = VARIANTS + MORE_VARIANTS
@@ -294,6 +297,8 @@ def BuildOptions():
" \"%s\"" % ",".join(EXHAUSTIVE_VARIANTS))
result.add_option("--outdir", help="Base directory with compile output",
default="out")
+ result.add_option("--gn", help="Scan out.gn for the last built configuration",
+ default=False, action="store_true")
result.add_option("--predictable",
help="Compare output of several reruns of each test",
default=False, action="store_true")
@@ -427,6 +432,21 @@ def ProcessOptions(options):
# First try to auto-detect configurations based on the build if GN was
# used. This can't be overridden by cmd-line arguments.
options.auto_detect = False
+ if options.gn:
+ gn_out_dir = os.path.join(BASE_DIR, DEFAULT_OUT_GN)
+ latest_timestamp = -1
+ latest_config = None
+ for gn_config in os.listdir(gn_out_dir):
+ gn_config_dir = os.path.join(gn_out_dir, gn_config)
+ if not isdir(gn_config_dir):
+ continue
+ if os.path.getmtime(gn_config_dir) > latest_timestamp:
+ latest_timestamp = os.path.getmtime(gn_config_dir)
+ latest_config = gn_config
+ if latest_config:
+ print(">>> Latest GN build found is %s" % latest_config)
+ options.outdir = os.path.join(DEFAULT_OUT_GN, latest_config)
+
build_config_path = os.path.join(
BASE_DIR, options.outdir, "v8_build_config.json")
if os.path.exists(build_config_path):
diff --git a/deps/v8/tools/testrunner/local/testsuite.py b/deps/v8/tools/testrunner/local/testsuite.py
index 11d2207427..f7fa19b20a 100644
--- a/deps/v8/tools/testrunner/local/testsuite.py
+++ b/deps/v8/tools/testrunner/local/testsuite.py
@@ -325,13 +325,22 @@ class GoogleTestSuite(TestSuite):
shell = os.path.abspath(os.path.join(context.shell_dir, self.shell()))
if utils.IsWindows():
shell += ".exe"
- output = commands.Execute(context.command_prefix +
- [shell, "--gtest_list_tests"] +
- context.extra_flags)
- if output.exit_code != 0:
+
+ output = None
+ for i in xrange(3): # Try 3 times in case of errors.
+ output = commands.Execute(context.command_prefix +
+ [shell, "--gtest_list_tests"] +
+ context.extra_flags)
+ if output.exit_code == 0:
+ break
+ print "Test executable failed to list the tests (try %d).\n\nStdout:" % i
print output.stdout
+ print "\nStderr:"
print output.stderr
+ print "\nExit code: %d" % output.exit_code
+ else:
raise Exception("Test executable failed to list the tests.")
+
tests = []
test_case = ''
for line in output.stdout.splitlines():
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index b224e41d37..ea42bf5248 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -13,6 +13,7 @@ ALL_VARIANT_FLAGS = {
"ignition_staging": [["--ignition-staging"]],
"ignition_turbofan": [["--ignition-staging", "--turbo"]],
"preparser": [["--min-preparse-length=0"]],
+ "asm_wasm": [["--validate-asm"]],
}
# FAST_VARIANTS implies no --always-opt.
@@ -25,8 +26,9 @@ FAST_VARIANT_FLAGS = {
"ignition_staging": [["--ignition-staging"]],
"ignition_turbofan": [["--ignition-staging", "--turbo"]],
"preparser": [["--min-preparse-length=0"]],
+ "asm_wasm": [["--validate-asm"]],
}
ALL_VARIANTS = set(["default", "stress", "turbofan", "turbofan_opt",
"nocrankshaft", "ignition", "ignition_staging",
- "ignition_turbofan", "preparser"])
+ "ignition_turbofan", "preparser", "asm_wasm"])
diff --git a/deps/v8/tools/try_perf.py b/deps/v8/tools/try_perf.py
index e022ab45bf..05e240edb5 100755
--- a/deps/v8/tools/try_perf.py
+++ b/deps/v8/tools/try_perf.py
@@ -49,6 +49,7 @@ PUBLIC_BENCHMARKS = [
'simdjs',
'sunspider',
'sunspider-ignition',
+ 'unity',
'wasm',
]
diff --git a/deps/v8/tools/turbolizer/constants.js b/deps/v8/tools/turbolizer/constants.js
index f062fa2154..da92c45abc 100644
--- a/deps/v8/tools/turbolizer/constants.js
+++ b/deps/v8/tools/turbolizer/constants.js
@@ -20,5 +20,11 @@ var DISASSEMBLY_COLLAPSE_ID = 'disassembly-shrink';
var DISASSEMBLY_EXPAND_ID = 'disassembly-expand';
var COLLAPSE_PANE_BUTTON_VISIBLE = 'button-input';
var COLLAPSE_PANE_BUTTON_INVISIBLE = 'button-input-invisible';
-var PROF_HIGH = 5;
-var PROF_MED = 0.5;
+var UNICODE_BLOCK = '&#9611;';
+var PROF_COLS = [
+ { perc: 0, col: { r: 255, g: 255, b: 255 } },
+ { perc: 0.5, col: { r: 255, g: 255, b: 128 } },
+ { perc: 5, col: { r: 255, g: 128, b: 0 } },
+ { perc: 15, col: { r: 255, g: 0, b: 0 } },
+ { perc: 100, col: { r: 0, g: 0, b: 0 } }
+];
diff --git a/deps/v8/tools/turbolizer/disassembly-view.js b/deps/v8/tools/turbolizer/disassembly-view.js
index b704c77312..a2a534cd7f 100644
--- a/deps/v8/tools/turbolizer/disassembly-view.js
+++ b/deps/v8/tools/turbolizer/disassembly-view.js
@@ -159,6 +159,7 @@ class DisassemblyView extends TextView {
view.pos_start = -1;
view.addr_event_counts = null;
view.total_event_counts = null;
+ view.max_event_counts = null;
view.pos_lines = new Array();
// Comment lines for line 0 include sourcePosition already, only need to
// add sourcePosition for lines > 0.
@@ -181,15 +182,18 @@ class DisassemblyView extends TextView {
view.addr_event_counts = eventCounts;
view.total_event_counts = {};
- for (var ev_name in view.addr_event_counts) {
+ view.max_event_counts = {};
+ for (let ev_name in view.addr_event_counts) {
let keys = Object.keys(view.addr_event_counts[ev_name]);
let values = keys.map(key => view.addr_event_counts[ev_name][key]);
view.total_event_counts[ev_name] = values.reduce((a, b) => a + b);
+ view.max_event_counts[ev_name] = values.reduce((a, b) => Math.max(a, b));
}
}
else {
view.addr_event_counts = null;
view.total_event_counts = null;
+ view.max_event_counts = null;
}
}
@@ -198,6 +202,11 @@ class DisassemblyView extends TextView {
return num.toFixed(3).replace(/\.?0+$/, "") + "%";
}
+ // Interpolate between the given start and end values by a fraction of val/max.
+ interpolate(val, max, start, end) {
+ return start + (end - start) * (val / max);
+ }
+
processLine(line) {
let view = this;
let func = function(match, p1, p2, p3) {
@@ -214,30 +223,49 @@ class DisassemblyView extends TextView {
// Add profiling data per instruction if available.
if (view.total_event_counts) {
- let event_selector = document.getElementById('event-selector');
- if (event_selector.length !== 0) {
- let event = event_selector.value;
- let matches = /^(0x[0-9a-fA-F]+)\s+\d+\s+[0-9a-fA-F]+/.exec(line);
- if (matches) {
+ let matches = /^(0x[0-9a-fA-F]+)\s+\d+\s+[0-9a-fA-F]+/.exec(line);
+ if (matches) {
+ let newFragments = [];
+ for (let event in view.addr_event_counts) {
let count = view.addr_event_counts[event][matches[1]];
- let str = "";
- let css_cls = undefined;
+ let str = " ";
+ let css_cls = "prof";
if(count !== undefined) {
let perc = count / view.total_event_counts[event] * 100;
- str = "(" + view.humanize(perc) + ") ";
+ let col = { r: 255, g: 255, b: 255 };
+ for (let i = 0; i < PROF_COLS.length; i++) {
+ if (perc === PROF_COLS[i].perc) {
+ col = PROF_COLS[i].col;
+ break;
+ }
+ else if (perc > PROF_COLS[i].perc && perc < PROF_COLS[i + 1].perc) {
+ let col1 = PROF_COLS[i].col;
+ let col2 = PROF_COLS[i + 1].col;
+
+ let val = perc - PROF_COLS[i].perc;
+ let max = PROF_COLS[i + 1].perc - PROF_COLS[i].perc;
+
+ col.r = Math.round(view.interpolate(val, max, col1.r, col2.r));
+ col.g = Math.round(view.interpolate(val, max, col1.g, col2.g));
+ col.b = Math.round(view.interpolate(val, max, col1.b, col2.b));
+ break;
+ }
+ }
+
+ str = UNICODE_BLOCK;
+
+ let fragment = view.createFragment(str, css_cls);
+ fragment.title = event + ": " + view.humanize(perc) + " (" + count + ")";
+ fragment.style.color = "rgb(" + col.r + ", " + col.g + ", " + col.b + ")";
- css_cls = "prof-low";
- if(perc > PROF_HIGH)
- css_cls = "prof-high";
- else if(perc > PROF_MED)
- css_cls = "prof-med";
+ newFragments.push(fragment);
}
- // Pad extra spaces to keep alignment for all instructions.
- str = (" ".repeat(10) + str).slice(-10);
+ else
+ newFragments.push(view.createFragment(str, css_cls));
- fragments.splice(0, 0, view.createFragment(str, css_cls));
}
+ fragments = newFragments.concat(fragments);
}
}
return fragments;
diff --git a/deps/v8/tools/turbolizer/index.html b/deps/v8/tools/turbolizer/index.html
index 8dc21b7bdd..4066fd8010 100644
--- a/deps/v8/tools/turbolizer/index.html
+++ b/deps/v8/tools/turbolizer/index.html
@@ -1,6 +1,7 @@
<!DOCTYPE HTML>
<html>
<head>
+ <title>Turbolizer</title>
<link rel="stylesheet" href="turbo-visualizer.css" />
</head>
<body width="100%">
@@ -53,12 +54,9 @@
</text></svg></div>
</div>
<div id="right">
- <span id="disassembly-toolbox">
- <select id="event-selector"></select>
- </span>
<div id='disassembly'>
<pre id='disassembly-text-pre' class='prettyprint prettyprinted'>
- <ul id='disassembly-list' class='nolinenums noindent'>
+ <ul id='disassembly-list' class='nolinenums noindent'>
</ul>
</pre>
</div>
diff --git a/deps/v8/tools/turbolizer/text-view.js b/deps/v8/tools/turbolizer/text-view.js
index 70d2a252ae..6822500dde 100644
--- a/deps/v8/tools/turbolizer/text-view.js
+++ b/deps/v8/tools/turbolizer/text-view.js
@@ -120,7 +120,7 @@ class TextView extends View {
if (style != undefined) {
span.classList.add(style);
}
- span.innerText = text;
+ span.innerHTML = text;
return span;
}
diff --git a/deps/v8/tools/turbolizer/turbo-visualizer.css b/deps/v8/tools/turbolizer/turbo-visualizer.css
index 8e2bab282d..69a6ccabb5 100644
--- a/deps/v8/tools/turbolizer/turbo-visualizer.css
+++ b/deps/v8/tools/turbolizer/turbo-visualizer.css
@@ -326,16 +326,8 @@ span.linkable-text:hover {
display: none;
}
-.prof-low {
- color: #888;
-}
-
-.prof-med {
- color: #080;
-}
-
-.prof-high {
- color: #800;
+.prof {
+ cursor: default;
}
tspan {
diff --git a/deps/v8/tools/turbolizer/turbo-visualizer.js b/deps/v8/tools/turbolizer/turbo-visualizer.js
index b8d7762605..280caf01db 100644
--- a/deps/v8/tools/turbolizer/turbo-visualizer.js
+++ b/deps/v8/tools/turbolizer/turbo-visualizer.js
@@ -188,13 +188,6 @@ document.onload = (function(d3){
}
}
- var eventMenu = document.getElementById('event-selector');
- eventMenu.innerHTML = '';
- for (var event in jsonObj.eventCounts) {
- var optionElement = document.createElement("option");
- optionElement.text = event;
- eventMenu.add(optionElement, null);
- }
disassemblyView.initializePerfProfile(jsonObj.eventCounts);
disassemblyView.show(disassemblyPhase.data, null);
@@ -216,10 +209,6 @@ document.onload = (function(d3){
displayPhase(jsonObj.phases[selectMenu.selectedIndex]);
}
- eventMenu.onchange = function(item) {
- disassemblyView.show(disassemblyView.data, null);
- }
-
fitPanesToParents();
d3.select("#search-input").attr("value", window.sessionStorage.getItem("lastSearch") || "");
diff --git a/deps/v8/tools/update-wasm-fuzzers.sh b/deps/v8/tools/update-wasm-fuzzers.sh
new file mode 100755
index 0000000000..3652829c8d
--- /dev/null
+++ b/deps/v8/tools/update-wasm-fuzzers.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+set -e
+
+TOOLS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+cd ${TOOLS_DIR}/..
+
+rm -rf test/fuzzer/wasm
+rm -rf test/fuzzer/wasm_asmjs
+
+make x64.debug -j
+
+mkdir -p test/fuzzer/wasm
+mkdir -p test/fuzzer/wasm_asmjs
+
+# asm.js
+./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
+ --mode=debug --no-presubmit --extra-flags="--dump-wasm-module \
+ --dump-wasm-module-path=./test/fuzzer/wasm_asmjs/" mjsunit/wasm/asm*
+./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
+ --mode=debug --no-presubmit --extra-flags="--dump-wasm-module \
+ --dump-wasm-module-path=./test/fuzzer/wasm_asmjs/" mjsunit/asm/*
+./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
+ --mode=debug --no-presubmit --extra-flags="--dump-wasm-module \
+ --dump-wasm-module-path=./test/fuzzer/wasm_asmjs/" mjsunit/regress/asm/*
+# WASM
+./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
+ --mode=debug --no-presubmit --extra-flags="--dump-wasm-module \
+ --dump-wasm-module-path=./test/fuzzer/wasm/" unittests
+./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
+ --mode=debug --no-presubmit --extra-flags="--dump-wasm-module \
+ --dump-wasm-module-path=./test/fuzzer/wasm/" mjsunit/wasm/*
+./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
+ --mode=debug --no-presubmit --extra-flags="--dump-wasm-module \
+ --dump-wasm-module-path=./test/fuzzer/wasm/" \
+ $(cd test/; ls cctest/wasm/test-*.cc | \
+ sed -es/wasm\\///g | sed -es/[.]cc/\\/\\*/g)
+
+# Delete items over 20k.
+for x in $(find ./test/fuzzer/wasm/ -type f -size +20k)
+do
+ rm $x
+done
+for x in $(find ./test/fuzzer/wasm_asmjs/ -type f -size +20k)
+do
+ rm $x
+done
+
+# Upload changes.
+cd test/fuzzer
+upload_to_google_storage.py -a -b v8-wasm-fuzzer wasm
+upload_to_google_storage.py -a -b v8-wasm-asmjs-fuzzer wasm_asmjs
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index 69d73c2037..0ff0cf32b0 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -89,7 +89,7 @@ INSTANCE_TYPES = {
163: "ALIASED_ARGUMENTS_ENTRY_TYPE",
164: "BOX_TYPE",
173: "PROTOTYPE_INFO_TYPE",
- 174: "SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION_TYPE",
+ 174: "CONTEXT_EXTENSION_TYPE",
167: "FIXED_ARRAY_TYPE",
148: "FIXED_DOUBLE_ARRAY_TYPE",
168: "SHARED_FUNCTION_INFO_TYPE",
@@ -232,7 +232,7 @@ KNOWN_MAPS = {
0x09231: (165, "DebugInfoMap"),
0x0925d: (166, "BreakPointInfoMap"),
0x09289: (173, "PrototypeInfoMap"),
- 0x092b5: (174, "SloppyBlockWithEvalContextExtensionMap"),
+ 0x092b5: (174, "ContextExtensionMap"),
}
# List of known V8 objects.
diff --git a/deps/v8/tools/verify_source_deps.py b/deps/v8/tools/verify_source_deps.py
index 56e3156550..a3fdb2ec7c 100755
--- a/deps/v8/tools/verify_source_deps.py
+++ b/deps/v8/tools/verify_source_deps.py
@@ -8,45 +8,103 @@ Script to print potentially missing source dependencies based on the actual
.h and .cc files in the source tree and which files are included in the gyp
and gn files. The latter inclusion is overapproximated.
-TODO(machenbach): Gyp files in src will point to source files in src without a
-src/ prefix. For simplicity, all paths relative to src are stripped. But this
-tool won't be accurate for other sources in other directories (e.g. cctest).
+TODO(machenbach): If two source files with the same name exist, but only one
+is referenced from a gyp/gn file, we won't necessarily detect it.
"""
import itertools
import re
import os
+import subprocess
+import sys
V8_BASE = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
-V8_SRC_BASE = os.path.join(V8_BASE, 'src')
-V8_INCLUDE_BASE = os.path.join(V8_BASE, 'include')
GYP_FILES = [
os.path.join(V8_BASE, 'src', 'd8.gyp'),
os.path.join(V8_BASE, 'src', 'v8.gyp'),
+ os.path.join(V8_BASE, 'src', 'inspector', 'inspector.gypi'),
os.path.join(V8_BASE, 'src', 'third_party', 'vtune', 'v8vtune.gyp'),
+ os.path.join(V8_BASE, 'samples', 'samples.gyp'),
os.path.join(V8_BASE, 'test', 'cctest', 'cctest.gyp'),
+ os.path.join(V8_BASE, 'test', 'fuzzer', 'fuzzer.gyp'),
os.path.join(V8_BASE, 'test', 'unittests', 'unittests.gyp'),
+ os.path.join(V8_BASE, 'test', 'inspector', 'inspector.gyp'),
+ os.path.join(V8_BASE, 'testing', 'gmock.gyp'),
+ os.path.join(V8_BASE, 'testing', 'gtest.gyp'),
os.path.join(V8_BASE, 'tools', 'parser-shell.gyp'),
]
+ALL_GYP_PREFIXES = [
+ '..',
+ 'common',
+ os.path.join('src', 'third_party', 'vtune'),
+ 'src',
+ 'samples',
+ 'testing',
+ 'tools',
+ os.path.join('test', 'cctest'),
+ os.path.join('test', 'common'),
+ os.path.join('test', 'fuzzer'),
+ os.path.join('test', 'unittests'),
+ os.path.join('test', 'inspector'),
+]
+
+GYP_UNSUPPORTED_FEATURES = [
+ 'gcmole',
+]
+
+GN_FILES = [
+ os.path.join(V8_BASE, 'BUILD.gn'),
+ os.path.join(V8_BASE, 'build', 'secondary', 'testing', 'gmock', 'BUILD.gn'),
+ os.path.join(V8_BASE, 'build', 'secondary', 'testing', 'gtest', 'BUILD.gn'),
+ os.path.join(V8_BASE, 'src', 'inspector', 'BUILD.gn'),
+ os.path.join(V8_BASE, 'test', 'cctest', 'BUILD.gn'),
+ os.path.join(V8_BASE, 'test', 'unittests', 'BUILD.gn'),
+ os.path.join(V8_BASE, 'test', 'inspector', 'BUILD.gn'),
+ os.path.join(V8_BASE, 'tools', 'BUILD.gn'),
+]
+
+GN_UNSUPPORTED_FEATURES = [
+ 'aix',
+ 'cygwin',
+ 'freebsd',
+ 'gcmole',
+ 'openbsd',
+ 'ppc',
+ 'qnx',
+ 'solaris',
+ 'vtune',
+ 'x87',
+]
-def path_no_prefix(path):
- if path.startswith('../'):
- return path_no_prefix(path[3:])
- elif path.startswith('src/'):
- return path_no_prefix(path[4:])
- else:
- return path
+ALL_GN_PREFIXES = [
+ '..',
+ os.path.join('src', 'inspector'),
+ 'src',
+ 'testing',
+ os.path.join('test', 'cctest'),
+ os.path.join('test', 'unittests'),
+ os.path.join('test', 'inspector'),
+]
+
+def pathsplit(path):
+ return re.split('[/\\\\]', path)
+def path_no_prefix(path, prefixes):
+ for prefix in prefixes:
+ if path.startswith(prefix + os.sep):
+ return path_no_prefix(path[len(prefix) + 1:], prefixes)
+ return path
-def isources(directory):
- for root, dirs, files in os.walk(directory):
- for f in files:
- if not (f.endswith('.h') or f.endswith('.cc')):
- continue
- yield path_no_prefix(os.path.relpath(os.path.join(root, f), V8_BASE))
+
+def isources(prefixes):
+ cmd = ['git', 'ls-tree', '-r', 'HEAD', '--full-name', '--name-only']
+ for f in subprocess.check_output(cmd, universal_newlines=True).split('\n'):
+ if not (f.endswith('.h') or f.endswith('.cc')):
+ continue
+ yield path_no_prefix(os.path.join(*pathsplit(f)), prefixes)
def iflatten(obj):
@@ -59,7 +117,7 @@ def iflatten(obj):
for i in iflatten(value):
yield i
elif isinstance(obj, basestring):
- yield path_no_prefix(obj)
+ yield path_no_prefix(os.path.join(*pathsplit(obj)), ALL_GYP_PREFIXES)
def iflatten_gyp_file(gyp_file):
@@ -80,27 +138,44 @@ def iflatten_gn_file(gn_file):
for line in f.read().splitlines():
match = re.match(r'.*"([^"]*)".*', line)
if match:
- yield path_no_prefix(match.group(1))
+ yield path_no_prefix(
+ os.path.join(*pathsplit(match.group(1))), ALL_GN_PREFIXES)
-def icheck_values(values, *source_dirs):
- for source_file in itertools.chain(
- *[isources(source_dir) for source_dir in source_dirs]
- ):
+def icheck_values(values, prefixes):
+ for source_file in isources(prefixes):
if source_file not in values:
yield source_file
-gyp_values = set(itertools.chain(
- *[iflatten_gyp_file(gyp_file) for gyp_file in GYP_FILES]
- ))
+def missing_gyp_files():
+ gyp_values = set(itertools.chain(
+ *[iflatten_gyp_file(gyp_file) for gyp_file in GYP_FILES]
+ ))
+ gyp_files = sorted(icheck_values(gyp_values, ALL_GYP_PREFIXES))
+ return filter(
+ lambda x: not any(i in x for i in GYP_UNSUPPORTED_FEATURES), gyp_files)
+
+
+def missing_gn_files():
+ gn_values = set(itertools.chain(
+ *[iflatten_gn_file(gn_file) for gn_file in GN_FILES]
+ ))
+
+ gn_files = sorted(icheck_values(gn_values, ALL_GN_PREFIXES))
+ return filter(
+ lambda x: not any(i in x for i in GN_UNSUPPORTED_FEATURES), gn_files)
+
-print "----------- Files not in gyp: ------------"
-for i in sorted(icheck_values(gyp_values, V8_SRC_BASE, V8_INCLUDE_BASE)):
- print i
+def main():
+ print "----------- Files not in gyp: ------------"
+ for i in missing_gyp_files():
+ print i
-gn_values = set(iflatten_gn_file(os.path.join(V8_BASE, 'BUILD.gn')))
+ print "\n----------- Files not in gn: -------------"
+ for i in missing_gn_files():
+ print i
+ return 0
-print "\n----------- Files not in gn: -------------"
-for i in sorted(icheck_values(gn_values, V8_SRC_BASE, V8_INCLUDE_BASE)):
- print i
+if '__main__' == __name__:
+ sys.exit(main())